Q: HyperLedger fabric-starter-kit customisation - hyperledger

I have followed the basic guide for getting the HyperLedger fabric-starter-kit up and running which works perfectly. I cannot figure out how to successfully change the development directory of the app.js without causing an "invalid ELF header" error:
root#104efc36f09e:/user/env# node app
module.js:355
Module._extensions[extension](this, filename);
^
Error: /user/env/node_modules/grpc/src/node/extension_binary/grpc_node.node: invalid ELF header
at Error (native)
at Module.load (module.js:355:32)
at Function.Module._load (module.js:310:12)
at Module.require (module.js:365:17)
at require (module.js:384:17)
at Object.<anonymous> (/user/env/node_modules/grpc/src/node/src/grpc_extension.js:38:15)
at Module._compile (module.js:460:26)
at Object.Module._extensions..js (module.js:478:10)
at Module.load (module.js:355:32)
at Function.Module._load (module.js:310:12)
root#104efc36f09e:/user/env#
Dockerfile (unchanged):
FROM hyperledger/fabric-peer:latest
WORKDIR $GOPATH/src/github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02
RUN go build
WORKDIR $GOPATH/src/github.com/hyperledger/fabric/examples/sdk/node
RUN npm install hfc`
docker-compose.yaml (changed volume to local workdir: ~/Documents/Work/Blockchain/env):
membersrvc:
container_name: membersrvc
image: hyperledger/fabric-membersrvc
command: membersrvc
peer:
container_name: peer
image: hyperledger/fabric-peer
environment:
- CORE_PEER_ADDRESSAUTODETECT=true
- CORE_VM_ENDPOINT=unix:///var/run/docker.sock
- CORE_LOGGING_LEVEL=DEBUG
- CORE_PEER_ID=vp0
- CORE_SECURITY_ENABLED=true
- CORE_PEER_PKI_ECA_PADDR=membersrvc:7054
- CORE_PEER_PKI_TCA_PADDR=membersrvc:7054
- CORE_PEER_PKI_TLSCA_PADDR=membersrvc:7054
- CORE_PEER_VALIDATOR_CONSENSUS_PLUGIN=noops
# this gives access to the docker host daemon to deploy chain code in network mode
volumes:
- /var/run/docker.sock:/var/run/docker.sock
# have the peer wait 10 sec for membersrvc to start
# the following is to run the peer in Developer mode - also set sample DEPLOY_MODE=dev
command: sh -c "sleep 10; peer node start --peer-chaincodedev"
#command: sh -c "sleep 10; peer node start"
links:
- membersrvc
starter:
container_name: starter
image: hyperledger/fabric-starter-kit
volumes:
- ~/Documents/Work/Blockchain/env:/user/env
environment:
- MEMBERSRVC_ADDRESS=membersrvc:7054
- PEER_ADDRESS=peer:7051
- KEY_VALUE_STORE=/tmp/hl_sdk_node_key_value_store
# set to following to 'dev' if peer running in Developer mode
- DEPLOY_MODE=dev
- CORE_CHAINCODE_ID_NAME=mycc
- CORE_PEER_ADDRESS=peer:7051
# the following command will start the chain code when this container starts and ready it for deployment by the app
command: sh -c "sleep 20; /opt/gopath/src/github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02/chaincode_example02"
stdin_open: true
tty: true
links:
- membersrvc
- peer
app.js (unchanged):
/*
* A simple application utilizing the Node.js Client SDK to:
* 1) Enroll a user
* 2) User deploys chaincode
* 3) User queries chaincode
*/
// "HFC" stands for "Hyperledger Fabric Client"
var hfc = require("hfc");
console.log(" **** STARTING APP.JS ****");
// get the addresses from the docker-compose environment
var PEER_ADDRESS = process.env.CORE_PEER_ADDRESS;
var MEMBERSRVC_ADDRESS = process.env.MEMBERSRVC_ADDRESS;
var chain, chaincodeID;
// Create a chain object used to interact with the chain.
// You can name it anything you want as it is only used by client.
chain = hfc.newChain("mychain");
// Initialize the place to store sensitive private key information
chain.setKeyValStore( hfc.newFileKeyValStore('/tmp/keyValStore') );
// Set the URL to membership services and to the peer
console.log("member services address ="+MEMBERSRVC_ADDRESS);
console.log("peer address ="+PEER_ADDRESS);
chain.setMemberServicesUrl("grpc://"+MEMBERSRVC_ADDRESS);
chain.addPeer("grpc://"+PEER_ADDRESS);
// The following is required when the peer is started in dev mode
// (i.e. with the '--peer-chaincodedev' option)
var mode = process.env['DEPLOY_MODE'];
console.log("DEPLOY_MODE=" + mode);
if (mode === 'dev') {
chain.setDevMode(true);xs
//Deploy will not take long as the chain should already be running
chain.setDeployWaitTime(10);
} else {
chain.setDevMode(false);
//Deploy will take much longer in network mode
chain.setDeployWaitTime(120);
}
chain.setInvokeWaitTime(10);
// Begin by enrolling the user
enroll();
// Enroll a user.
function enroll() {
console.log("enrolling user admin ...");
// Enroll "admin" which is preregistered in the membersrvc.yaml
chain.enroll("admin", "Xurw3yU9zI0l", function(err, admin) {
if (err) {
console.log("ERROR: failed to register admin: %s",err);
process.exit(1);
}
// Set this user as the chain's registrar which is authorized to register other users.
chain.setRegistrar(admin);
var userName = "JohnDoe";
// registrationRequest
var registrationRequest = {
enrollmentID: userName,
affiliation: "bank_a"
};
chain.registerAndEnroll(registrationRequest, function(error, user) {
if (error) throw Error(" Failed to register and enroll " + userName + ": " + error);
console.log("Enrolled %s successfully\n", userName);
deploy(user);
});
});
}
// Deploy chaincode
function deploy(user) {
console.log("deploying chaincode; please wait ...");
// Construct the deploy request
var deployRequest = {
chaincodeName: process.env.CORE_CHAINCODE_ID_NAME,
fcn: "init",
args: ["a", "100", "b", "200"]
};
// where is the chain code, ignored in dev mode
deployRequest.chaincodePath = "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02";
// Issue the deploy request and listen for events
var tx = user.deploy(deployRequest);
tx.on('complete', function(results) {
// Deploy request completed successfully
console.log("deploy complete; results: %j",results);
// Set the testChaincodeID for subsequent tests
chaincodeID = results.chaincodeID;
invoke(user);
});
tx.on('error', function(error) {
console.log("Failed to deploy chaincode: request=%j, error=%k",deployRequest,error);
process.exit(1);
});
}
// Query chaincode
function query(user) {
console.log("querying chaincode ...");
// Construct a query request
var queryRequest = {
chaincodeID: chaincodeID,
fcn: "query",
args: ["a"]
};
// Issue the query request and listen for events
var tx = user.query(queryRequest);
tx.on('complete', function (results) {
console.log("query completed successfully; results=%j",results);
process.exit(0);
});
tx.on('error', function (error) {
console.log("Failed to query chaincode: request=%j, error=%k",queryRequest,error);
process.exit(1);
});
}
//Invoke chaincode
function invoke(user) {
console.log("invoke chaincode ...");
// Construct a query request
var invokeRequest = {
chaincodeID: chaincodeID,
fcn: "invoke",
args: ["a", "b", "1"]
};
// Issue the invoke request and listen for events
var tx = user.invoke(invokeRequest);
tx.on('submitted', function (results) {
console.log("invoke submitted successfully; results=%j",results);
});
tx.on('complete', function (results) {
console.log("invoke completed successfully; results=%j",results);
query(user);
});
tx.on('error', function (error) {
console.log("Failed to invoke chaincode: request=%j, error=%k",invokeRequest,error);
process.exit(1);
});
}
My goal is to create an authentication service using the HFC so that an Android app invoke a transaction. Any help would be greatly appreciated.

you installed node modules in your mac and used them in your Linux docker image. This is what causing the problem.
Make sure that npm modules are built on the platform you are executing it. Re-install your node modules in your linux environment by first deleting node_modules and running npm install from inside starter docker image.
Please consult these questions as well,
NodeJs Google Compute Engine Invalid ELF Header when using 'gcloud' module
"invalid ELF header" when using the nodejs "ref" module on AWS Lambda

Credit to Sufiyan Ghori for pointing this out - the issue was that the node modules were installed in my host (mac) and therefore weren't compatible with the linux docker image I was trying to execute code within.
SOLUTION:
Delete node_modules folder from work directory.
Run npm install hfc#0.6.x from inside the starter docker image.

Related

createMinToInstruction authorized by PDA, works fine in LocalNet, but gives "Account not associated with this Mint" when run on DevNet

I create an automate deployment script that will mint 2million SPL tokens to an address, the mint is owned by the program PDA as shown in Solana explorer:
Bellow is the last step in the deployment script:
export async function mint2e6Tokens(provider: anchor.AnchorProvider, tokenAccount: PublicKey, mint: PublicKey): Promise<void> {
// get the token account could be PDA
const programKeypair = await createKeypairFromFile(PROGRAM_KEYPAIR_PATH);
/** Load from PDA */
let mint_tokens_tx = new Transaction().add(
createMintToInstruction(
mint,
tokenAccount,
programKeypair.publicKey, // -> I DOUBLE CHECKED, THIS IS 6Z24B3qCrWfWvDo1f2HgxmnBSGhqQes1sHobqMtMxfbP
2e6,
[],
TOKEN_PROGRAM_ID
)
);
// We sign with our programId instead of the wallet because this is a PDA
// Program Derived Adress
await provider.sendAndConfirm(mint_tokens_tx, [programKeypair]);
}
When I run in LocalNet, it works:
Migrating to LocalNet...
RUNNING CUSTOM SCRIPT ====>>>
Payper: EsgJ9ihTEZskWyWpMMPuGVisy5ay76YWgetgTLb3jRmj
Using program 6Z24B3qCrWfWvDo1f2HgxmnBSGhqQes1sHobqMtMxfbP
Program: 6Z24B3qCrWfWvDo1f2HgxmnBSGhqQes1sHobqMtMxfbP
Mint: AVPgrT1y6ZfjGWPyLCWEPZdogRgmEMbrdRbcHNSfAPzF
Campaign: FcAmyEgZsXUKLB6hKufDmLuSBzMzVtPmRPm7vZLStK4U
31999500
However when I switch to DevNet, it always give me error
Migrating to DevNet...
RUNNING CUSTOM SCRIPT ====>>>
Payper: EsgJ9ihTEZskWyWpMMPuGVisy5ay76YWgetgTLb3jRmj
Using program 6Z24B3qCrWfWvDo1f2HgxmnBSGhqQes1sHobqMtMxfbP
Program: 6Z24B3qCrWfWvDo1f2HgxmnBSGhqQes1sHobqMtMxfbP
Mint: AVPgrT1y6ZfjGWPyLCWEPZdogRgmEMbrdRbcHNSfAPzF
Campaign: FcAmyEgZsXUKLB6hKufDmLuSBzMzVtPmRPm7vZLStK4U
SendTransactionError: failed to send transaction: Transaction simulation failed: Error processing Instruction 0: custom program error: 0x3
at Connection.sendEncodedTransaction (/code/beens/node_modules/#solana/web3.js/src/connection.ts:4248:13)
at processTicksAndRejections (node:internal/process/task_queues:95:5)
at async Connection.sendRawTransaction (/code/beens/node_modules/#solana/web3.js/src/connection.ts:4210:20)
at async sendAndConfirmRawTransaction (/code/beens/node_modules/#project-serum/anchor/src/provider.ts:288:21)
at async AnchorProvider.sendAndConfirm (/code/beens/node_modules/#project-serum/anchor/src/provider.ts:148:14) {
logs: [
'Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [1]',
'Program log: Instruction: MintTo',
'Program log: Error: Account not associated with this Mint',
'Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 2809 of 200000 compute units',
'Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA failed: custom program error: 0x3'
]
}
Why the authority is right, but Solana complain that Error: Account not associated with this Mint?
It turns out that my versions for Solana, Anchor and Rust are not compatible with DevNet and / or testnet.
I upgrade solana to main net recommended version and it works. One more things to mention that don't forget to run "solana program close --buffers" before trying. My current settings are:
root#d4c64206ce03:/code# solana --version
solana-cli 1.10.31 (src:77a40cd8; feat:4192065167)
root#d4c64206ce03:/code# rustc --version
rustc 1.63.0 (4b91a6ea7 2022-08-08)
root#d4c64206ce03:/code# anchor --version
anchor-cli 0.25.0

Docker-RabbitMQ-NestJS microservices error 406 PRECONDITION_FAILED

I'm new to Docker and RabbitMQ and I've been trying for 2 days to solve an error in my docker containers which contains: api_client, api_consumer, RabbitMQ. I've done a research and tried to read as many threads with this problem as I found but unfortunately nothing helped.
So here is my code:
compose.yml
services:
api_client:
build:
context: ""
dockerfile: apps/api_client/Dockerfile
env_file:
- ./config/.env.local
restart: always
ports:
- "3000:3000"
depends_on:
- rabbitmq
api_consumer:
build:
context: ""
dockerfile: apps/api_consumer/Dockerfile
env_file:
- ./config/.env.local
restart: always
depends_on:
- rabbitmq
rabbitmq:
image: rabbitmq:3.9.2-management
container_name: rabbitmq
hostname: rabbitmq
volumes:
- /var/lib/rabbitmq
- ./rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf
ports:
- "5672:5672"
- "15672:15672"
main.ts (in api_consumer)
async function bootstrap() {
const app = await NestFactory.createMicroservice<MicroserviceOptions>(
ApiConsumerModule,
{
transport: Transport.RMQ,
options: {
queue: 'test_queue',
urls: ['amqp://guest:guest#rabbitmq:5672'],
queueOptions: {
durable: true
}
}
},
);
const AWSAppConfig = app.get(AwsAppconfigLoaderService);
const Log = new Logger(ApiClientService.name);
await AWSAppConfig.loadAWSAppConfig()
.then((_) => {
Log.log(AWSAppConfig.getAppName());
})
.catch((err) => {
Log.error(
`Error occured while downloading AWS Config: ${JSON.stringify(
err,
)}`,
);
});
await app.listen();
}
bootstrap();
api-client.module.ts (in api_client)
#Module({
imports: [
ConfigModule.forRoot({
isGlobal: true,
load: [AppConfig],
}),
ClientsModule.register([{
name: GET_MATCHED_DEVICES,
transport: Transport.RMQ,
options: {
queue: 'test_queue',
urls: ['amqp://guest:guest#rabbitmq:5672'],
queueOptions: {
durable: true
}
}
},
]),
AwsAppconfigLoaderModule,
],
controllers: [ApiClientController],
providers: [ApiClientService],
})
export class ApiClientModule {}
Functionality is simple- when GET on localhost:3000 (api_client) is called, it calls (in controller) return this.client.send('getSample', "hello") and then in api_consumer it should call (in controller)
#MessagePattern('getSample')
getSample(data): string {
Logger.debug(data)
return "It works!";
}
When all docker services start there is the first error:
Disconnected from RMQ. Trying to reconnect.
{
"err": {
"code": 406,
"classId": 60,
"methodId": 40
}
}
And then when I try to access the localhost:3000, this error always occur:
Error: Channel closed by server: 406 (PRECONDITION-FAILED) with message "PRECONDITION_FAILED - fast reply consumer does not exist"
Both errors come from api_client.
What I've tried and didn't help:
-change durable to false or remove durable options completely
-add noAck
-remove queue in adminer on localhost:15672 (which works fine)
-remove port from urls in both microservices
-as you can see the queue options are the same in both microservices
Now the most absurd thing is that this code did work absolutely fine until I started to work on second compose file (and dockerfiles) for local (faster) development with volumes. Then suddenly these errors have started to occur and even if I undid all my code changes the errors are still there. Because of this I've wiped all my volumes (with docker system prune -a --volumes) many times but still nothing. My OS is Ubuntu 20.04
I am completely out of ideas so I've written it here in hope for some help, please.
The failure occurs because the app can not connect to rabbitmq, the problem resides in your docker compose. Make sure the services are using the same network in docker and are able to communicate.
I know it sounds strange but I have had the same problem yesterday, also using a docker-compose file and without making any changes to it or to the rabbitmq logic, it broke. I tried many things, and when i changed
return this.client.send('getSample', "hello") and #MessagePattern
to
return this.client.emit('getSample', "hello") and #EventPattern
thinking it wouldn't make sense for it to fix the issue, it actually did.
I suggest you try that and tell me if it works, sorry if i can't help you more.
So after 5 days of figuring out what causes the error I've found out the problem lies in Nest itself. The bug has been documented here. I've deleted all the code associated with #nestjs/microservices and tried to use this approach using amqlib only and the request/response functionality finally works.

Protractor Test Randomly Failing with Error "Failed: ECONNREFUSED connect ECONNREFUSED 127.0.0.1:32572"

My Protractor test randomly fails with error . :
Failed: ECONNREFUSED connect ECONNREFUSED 127.0.0.1:32572
at ClientRequest.<anonymous> (C:\jenkins\workspace\QA-E2E\automation\SeleniumFramework\node_modules\selenium-webdriver\http\index.js:238:15)
at ClientRequest.emit (events.js:223:5)
at Socket.socketErrorListener (_http_client.js:406:9)
at Socket.emit (events.js:223:5)
at emitErrorNT (internal/streams/destroy.js:92:8)
at emitErrorAndCloseNT (internal/streams/destroy.js:60:3)
at processTicksAndRejections (internal/process/task_queues.js:81:21)
From: Task: WebDriver.navigate().to(data:text/html,<html></html>)
at Driver.schedule (C:\jenkins\workspace\QA-E2E\automation\SeleniumFramework\node_modules\selenium-webdriver\lib\webdriver.js:807:17)
at Navigation.to (C:\jenkins\workspace\QA-E2E\automation\SeleniumFramework\node_modules\selenium-webdriver\lib\webdriver.js:1133:25)
at Driver.get (C:\jenkins\workspace\QA-E2E\automation\SeleniumFramework\node_modules\selenium-webdriver\lib\webdriver.js:988:28)
at C:\jenkins\workspace\QA-E2E\automation\SeleniumFramework\node_modules\protractor\built\browser.js:675:32
at ManagedPromise.invokeCallback_ (C:\jenkins\workspace\QA-E2E\automation\SeleniumFramework\node_modules\selenium-webdriver\lib\promise.js:1376:14)
Node version : v12.14.1,
npm version : 6.13.4,
protractor version: 5.4.2,
webdriver-manager version: 12.1.6
Note : I am also using async/await in my test.
This was happening to me when I have more chromedriver.exe processes in background.
Terminate all processes in background or restart the computer and you should not see this error.
There used to be a problem some time ago when this error was specific to a some chromedriver, which had to resolved from selenium side. Since chromedriver 80 is a brand new version, I assume it's just a temporary bug which will be fixed some time soon
Meanwhile, you can try the solution that I used for a while. All you need is just to mute the error, by adding this code before exporting you config file
//////////////////////////////////////////////////////////////////////////////
// temporary work around to avoid 'ECONNREFUSED' error, preferably to find another solution
// remove the block when https://github.com/SeleniumHQ/selenium/pull/5759 will be merged && released
let httpIndexFile = "node_modules/selenium-webdriver/http/index.js";
fs.readFile(httpIndexFile, "utf8", function (err, data) {
if (err) {
throw err;
}
let result = data.replace(/\(e.code === 'ECONNRESET'\)/g, "(e.code === 'ECONNRESET' || e.code === 'ECONNREFUSED')");
console.log(`Patching ${httpIndexFile}`);
fs.writeFileSync(httpIndexFile, result, "utf8");
});
let chromeFile = "node_modules/selenium-webdriver/chrome.js";
fs.readFile(chromeFile, "utf8", function (err, data) {
if (err) {
throw err;
}
let result = data.replace(/new http.HttpClient\(url\)/g, "new http.HttpClient(url, new (require('http').Agent)({ keepAlive: true }))");
console.log(`Patching ${chromeFile}`);
fs.writeFileSync(chromeFile, result, "utf8");
});
//////////////////////////////////////////////////////////////////////////////
exports.config = {
Again, this was another problem, but the tweak should work the same

serverless offline won't run offline: Failed to load resource: net::ERR_CONNECTION_REFUSED

PROBLEM
I cannot get serverless offline to run when not connected to internet.
serverless.yml
service: my-app
plugins:
- serverless-offline
# run on port 4000, because client runs on 3000
custom:
serverless-offline:
port: 4000
# app and org for use with dashboard.serverless.com
app: my-app
org: my-org
provider:
name: aws
runtime: nodejs10.x
functions:
getData:
handler: data-service.getData
events:
- http:
path: data/get
method: get
cors: true
isOffline: true
saveData:
handler: data-service.saveData
events:
- http:
path: data/save
method: put
cors: true
isOffline: true
To launch serverless offline, I run serverless offline start in terminal. This works when I am connected to the internet, but when offline, I get the following errors:
Console Error
:4000/data/get:1 Failed to load resource: net::ERR_CONNECTION_REFUSED
20:34:02.820 localhost/:1 Uncaught (in promise) TypeError: Failed to fetch
Terminal Error
FetchError: request to https://api.serverless.com/core/tenants/{tenant}/applications/my-app/profileValue failed, reason: getaddrinfo ENOTFOUND api.serverless.com api.serverless.com:443
Request
I suspect the cause is because I am not sure how to setup offline using instruction: "The event object passed to your λs has one extra key: { isOffline: true }. Also, process.env.IS_OFFLINE is true."
Any assistance on how to debug the issue would be much appreciated.
Probably you already fix it, but the problem is because app and org attribute
# app and org for use with dashboard.serverless.com
app: my-app
org: my-org
When you use it, serverless will use config set on serverless.com, commonly env var.
To use env var, you can use plugin serverless-dotenv-plugin. This way, you don't need to connect on internet.

ECONNRESET when opening a large number of connection in small time period

I have situation where I want to create large number of entities on orion. I am using docker version of Orion and mongo with this docker-compose.
version: "3"
services:
mongo:
image: mongo:3.4
volumes:
- /data/docker-mongo/db:/data/db
- /data/docker-mongo/log/mongodb.log:/var/log/mongodb/mongod.log
command: --nojournal
orion:
image: fiware/orion
volumes:
- /data/docker-mongo/log/contextBroker.log:/tmp/contextBroker.log
links:
- mongo
ports:
- "1026:1026"
command: -dbhost mongo
Now problems happens when I want to upload 2000 entities (opening new connection for each, I know it can be done different but for now this is request), I successfully create no more than 600 (or less never exact number) of them rest fail to create with error:
"error": {
"errno": "ECONNRESET",
"code": "ECONNRESET",
"syscall": "read"
},
So I assume this issue has something to do with maxConnections, reqPoolSize etc settings in Orion. But in docker I failed to locate Orion config file, I have no way of knowing when I type commands like contextBroker -maxConnections 123456 that setting is being accepted by Orion and docker container.
Also log of Orion is empty, and i cannot determined what is causing this issue when Orion is running on docker.
So main questions:
Can Orion running on docker be used in same manner as Orion running on VM (are there some fallbacks)
And how do I check this problem when Orion is running in docker, because I read a lot of docs/issues but no luck (or I missed something).
If you have some advice/soultion it would really help.
Thanks
{
"orion" : {
"version" : "1.13.0-next",
"uptime" : "2 d, 15 h, 46 m, 34 s",
"git_hash" : "ae72acf9e8eeaacaf4eb138f7de37bfee4514c6b",
"compile_time" : "Fri May 4 10:12:18 UTC 2018",
"compiled_by" : "root",
"compiled_in" : "1901fd6bb51a",
"release_date" : "Fri May 4 10:12:18 UTC 2018",
"doc" : "https://fiware-orion.readthedocs.org/en/master/"
}
}
{ Error: socket hang up
at createHangUpError (_http_client.js:313:15)
at Socket.socketOnEnd (_http_client.js:416:23)
at Socket.emit (events.js:187:15)
at endReadableNT (_stream_readable.js:1090:12)
at process._tickCallback (internal/process/next_tick.js:63:19) code: 'ECONNRESET' }
error:
{ Error: connect ECONNREFUSED ipofvirtualm:1026
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1174:14)
errno: 'ECONNREFUSED',
code: 'ECONNREFUSED',
syscall: 'read',
address: 'ipofvm',
port: 1026 },
options:
{ method: 'POST',
uri: 'http://ip:1026/v2/entities?options=keyValues',
headers:
{ 'Fiware-Service': 'some service',
'Fiware-ServicePath': 'some servicepath' },
body:
{ id: 'F0B935',
type: 'Transaction',
refEmitter: 'F0B935',
refReceiver: '7501JXG',
refCapturer: 'testtdata',
date: '12/12/2017 13:25',
refTransferredResources: 'testtdata',
transferredLoad: 92 },
json: true,
callback: [Function: RP$callback],
transform: undefined,
simple: true,
resolveWithFullResponse: false,
transform2xxOnly: false },
I am using request promise library for making calls, i try others they had same issue. Now since i cannot send u all 2000 responses i will try to describe. So it when i start to send this it behave. It create like 30 entities then next few or more return response saying ECONNRESET then it start creating again and so on.
What confuse me is that it is not failing totally meaning it works but not as intended. Also it seem that Orion close socket or hang it up some period then he is open again and create as normal and so on. If u need any more info ask, and thanks for quick answer.
instead of opening a new connection per entity why don't you use
POST /v2/op/update
and create all entities in just one batch? or a couple of batches
See some code at
https://github.com/Fiware/dataModels/blob/master/Weather/WeatherObserved/harvest/spain_weather_observed_harvest.py#L235
With regards to CLI argument passing to CB running inside docker, use the command line in docker compose file, eg:
command: -dbhost mongo -maxConnections 123456
However, I'm not sure that would help to solve the problem, as Orion should deal with your use case without any special customization. Looking to the error message (which seems to be about some problema at TCP layer) I wonder if docker networking layer is acting as bottleneck in some way...
In addition, the suggestion done by Jose Manuel Cantera about using POST /v2/op/update would be a good idea. It would reduce connection stress at network layer and may help to alleviate the problem.
If you cannot change your update strategy, maybe using an inter-request delay (100-200ms) could also help.

Resources