Chrome mv3 await sendMessage to background service worker [duplicate] - chrome-extension-manifest-v3

This question already has answers here:
chrome.runtime.onMessage response with async await
(7 answers)
Closed 1 year ago.
I am having an issue of asynchronicity (I believe). sendResponse() in contentscript.js does not wait for getThumbnails() to return.
I am sending a message in popup.js:
chrome.tabs.sendMessage(tabs[0].id, {message: "get_thumbnails", tabUrl: tabs[0].url},
function (respThumbnails) {
const thumbUrl = respThumbnails.payload;
console.log("payload", thumbUrl)
}
);
Then, in contentscript.js I listen for this message:
chrome.runtime.onMessage.addListener(async function(request,sender,sendResponse) {
if(request.message === "get_thumbnails") {
const payload = await getThumbnails();
console.log("thumbPayload after function:", payload)
sendResponse({payload:payload});
}
});
async function getThumbnails() {
let tUrl = null;
var potentialLocations = [
{sel: "meta[property='og:image:secure_url']", attr: "content" },
{sel: "meta[property='og:image']", attr: "content" },
];
for(s of potentialLocations) {
if(tUrl) return
const el = document.querySelector(s.sel);
if(el) {
tUrl = el.getAttribute(s.attr) || null;
}
}
return tUrl;
};
But it is also possible that the problem is coming from my getThumnails() function, because most of the times, payload is null and not undefined. So getThumbnails() might return before it is completely executed.
If this is the case, I have no idea why...
I also tried this code for getThubnails():
async function getThumbnails() {
let x = await function() {
let tUrl = null;
var potentialLocations = [
{sel: "meta[property='og:image:secure_url']", attr: "content" },
{sel: "meta[property='og:image']", attr: "content" },
];
for(s of potentialLocations) {
if(tUrl) return
const el = document.querySelector(s.sel);
if(el) {
tUrl = el.getAttribute(s.attr) || null;
}
}
return tUrl;
}
return x;
};
But this does not work, it seems to break my code...

The callback of onMessage should return a literal true value (documentation) in order to keep the internal messaging channel open so that sendResponse can work asynchronously.
Problem
Your callback is declared with async keyword, so it returns a Promise, not a literal true value. Chrome extensions API doesn't support Promise in the returned value of onMessage callback until https://crbug.com/1185241 is fixed so it's just ignored, the port is immediately closed, and the caller receives undefined in response.
Solutions
Remove the async keyword from before (request, sender, sendResponse), then...
Solution 1
Call an async function that can be embedded as an IIFE:
chrome.runtime.onMessage.addListener((request, sender, sendResponse) => {
if (request.message === "get_thumbnails") {
(async () => {
const payload = await getThumbnails();
console.log("thumbPayload after function:", payload)
sendResponse({payload});
})();
return true; // keep the messaging channel open for sendResponse
}
});
Solution 2
Declare a separate async function and call it from the onMessage listener:
chrome.runtime.onMessage.addListener((msg, sender, sendResponse) => {
if (msg.message === "get_thumbnails") {
processMessage(msg).then(sendResponse);
return true; // keep the messaging channel open for sendResponse
}
});
async function processMessage(msg) {
console.log('Processing message', msg);
// .................
return 'foo';
}

Related

Dart Socket one at a time

Hi i am building a blockchain and am trying to sync a list of connected peers,
but if the following code is called twice from different nodes the first call is still busy while the second call kicks in does anyone know how i could wait for the first message to be complete
class Peer {
Peer(
{required this.us,
required this.peers,
required this.allPeers,
required this.myPeers});
String us;
Map<String, int> peers;
Map<String, List<Online>> allPeers;
Map<String, List<Online>> myPeers;
List<String>? keep;
Map<String, List<NewNodePeerMessage>> nnpms = {};
//listen should never trigger a response to connect we will give the address / ip
// so your ndoe two will only sync when a thrid node joins the network
Future listen() async {
ServerSocket ss =
await ServerSocket.bind(us.split(':')[0], int.parse(us.split(':')[1]));
print('listening on ${us.split(':')[1]}');
// List<ReceivePort> lrp = [];
ss.listen((client) {
// ReceivePort rp = ReceivePort();
utf8.decoder.bind(client).listen((data) async {
final PeerMessage pm =
PeerMessage.fromJson(json.decode(data) as Map<String, dynamic>);
print('recieved msg from ${pm.from}');
switch (pm.type) {
case 'new-node':
{
// rp.listen((_) async {
final NewNodePeerMessage nnpm = NewNodePeerMessage.fromJson(
json.decode(data) as Map<String, dynamic>);
print('msg${nnpm.toJson()}');
peers[pm.code] ??= 3;
allPeers[pm.code] ??= [];
final List<Online> news = [];
print('apl ${allPeers[pm.code]!.length}');
// ,maby a list would let me know
// maby we should have a simple check to the loop bool that it doesnt start looping when it is adjusting the peers
print(
'allpeers before looping ${allPeers[pm.code]!.map((e) => e.toJson()).toList()}');
for (Online one in allPeers[pm.code]!.where((element) =>
element.address != pm.from &&
element.address != us &&
!nnpm.recieved.contains(element.address))) {
print(
'i am still loopin current one ${one.toJson()} current from ${pm.from}');
try {
final Socket ones = await Socket.connect(
one.address.split(':')[0],
int.parse(one.address.split(':')[1]));
nnpm.recieved.add(us);
nnpm.recieved.add(pm.from);
// if we would only move ones out of the for loop maby the program wont work at the same point in time
// we could have a bool that keeeps track of the msg is working on printstatement you inside off the msg
// so 8787 trigger 5442 because it has him in the list
// mabe a bool can be added to list if you is inished with listening
// isbusy knows iff its stuck in the loop if we write from here we are
// if we write ffrom connect we arent or this write could know if it is busy
// is busy shoudl be from down
// because if we wirte from here or we write from you is busy is true and false
// so if we write from below can isbusy stop us
// is isusy is true herewe can go into listen but on you we can not
ones.write(json.encode(NewNodePeerMessage(
isBusy: false,
max: peers[pm.code]!,
peer: nnpm.peer,
type: 'new-node',
from: us,
code: pm.code,
recieved: nnpm.recieved)
.toJson()));
print('propablywrote ${one.toJson()} from ${nnpm.from}');
ones.listen((ppmru) async {
print('listentedtoppmru ${one.toJson()}');
final PeersPeerMessageResponse ppmr =
peersPeerMessageResponse(ppmru);
print(
'abouttoaddnewonlines ${ppmr.onlines.map((e) => e.toJson())} and from ${ppmr.from}');
// news.add(Online(online: true, address: ppmr.from));
news.addAll(ppmr.onlines);
await ones.close();
});
} catch (err) {
one.online = false;
// break;
}
print('abouttoloopagain ${one.toJson()} from ${nnpm.from}');
}
print('gothereagainactuallydonelooping ${pm.from}');
// its actually that new node only should write to the client again only if its the first time maby
allPeers[pm.code]!.addAll(news);
allPeers[pm.code]!.removeWhere((element) => !element.online);
print(allPeers[pm.code]!.map((e) => e.toJson()).toList());
/// the problem occurs because of client clients response will shut down base or we could wrap it inside o try an catch
/// we need to know if this message will send the code up here or down to printstatement you to ones.listen or to s that listen
/// one global bool could say like will go down maby even when it goes up herte to printstatement abouttoaddnewonlines
/// how do we know here below that it will go to the you printstatement or down
/// so we need a message from up
allPeers[pm.code]!.add(Online(online: true, address: nnpm.peer));
// print('abouttowriteto ${client.address.address} ${client.port}');
print('abouttorespondto ${pm.from}');
client.write(json.encode(PeersPeerMessageResponse(
isBusy: true,
peer: nnpm.peer,
onlines: allPeers[pm.code]!
.where((element) => element.address != pm.from)
.toList(),
code: pm.code,
from: us)
.toJson()));
// await client.close();
// rp.sendPort.send(null);
// });
client.destroy();
break;
}
case 'new-node-through':
{
break;
}
case 'is-online':
client.write(null);
break;
case 'is-test':
print('recieved');
client.write('irespond');
break;
default:
break;
}
// client.destroy();
}, onDone: () {});
});
}
bool loop = false;
// Future connect(List<dynamic> args) async {
// is busy shoudl bee ffrom down
Future connect(String bootnode, String code) async {
final Socket s = await Socket.connect(
"${bootnode.split(':')[0]}", int.parse(bootnode.split(':')[1]));
print('connected to ${bootnode}');
s.write(json.encode(NewNodePeerMessage(
isBusy: true,
max: 3,
peer: us,
type: 'new-node',
code: code,
from: us,
recieved: []).toJson()));
print('befforelistening');
s.listen((pmmru) async {
print('whatwas first');
PeersPeerMessageResponse ppmr = peersPeerMessageResponse(pmmru);
print(ppmr.toJson());
allPeers[ppmr.code] ??= [];
allPeers[ppmr.code]!.addAll(ppmr.onlines);
allPeers[ppmr.code]!.add(Online(online: true, address: ppmr.from));
print(allPeers);
await s.close();
}, onDone: () {
print('doschopnescheee');
});
//because off up being triggered ffrom this msg we know it will go down because off client that write
// and client that write is up their and it m
// final somekindloop;
//if we would just never listen here would it relay on up might solve problem because up might be busy we could also ssst the isolate
// s.listen((ppmru) async {
// loop = true;
// // whenever we recieve here the ppmru could have the isbusy instead
// // print('you');
// // isBusy
// // while (!loop) {}
// PeersPeerMessageResponse ppmr = peersPeerMessageResponse(ppmru);
// print(ppmr.toJson());
// allPeers[ppmr.code] ??= [];
// allPeers[ppmr.code]!.addAll(ppmr.onlines);
// allPeers[ppmr.code]!.add(Online(online: true, address: ppmr.from));
// print(allPeers);
// await s.close();
// });
}
PeersPeerMessageResponse peersPeerMessageResponse(Uint8List resp) {
final PeersPeerMessageResponse ppm = PeersPeerMessageResponse.fromJson(
json.decode(String.fromCharCodes(resp).trim()) as Map<String, dynamic>);
return ppm;
}
// Future isOnline(String code) async {
// for (Online p in allPeers[code] ??= []) {
// try {
// final Socket peer = await Socket.connect(
// p.address.split(':')[0], int.parse(p.address.split(':')[1]));
// await peer.close();
// } catch (err) {
// p.ischis = false;
// }
// }
// }
}
its about the new node function inside of the switch statement while the first call is inside off the for loop the second call does not loop but increments the allPeers with new peers which because off the first call evolves into
Unhandled exception:
Concurrent modification during iteration: Instance(length:3) of '_GrowableList'.
#0 ListIterator.moveNext (dart:_internal/iterable.dart:336:7)
#1 WhereIterator.moveNext (dart:_internal/iterable.dart:438:22)
#2 Peer.listen.<anonymous closure>.<anonymous closure> (package:gov/peer/peer.dart:180:53)
<asynchronous suspension>
how can is use the on done event to wait or the first call to be complete?

Getting Puppeteer timeouts often on 'await browser.newpage'

I inherited a script to manage a deploy of Salesforce code to multiple orgs in one go, to ensure all orgs are on the same version. The code is maintained in a Github respository and the final step is the update of the main branch, so the deploy therefore has to be successful for all orgs before it updates the main branch. Currently we have 32 orgs for which the deploys run simultaneously (with more to be added).
The final step after the code has deployed successfully is to check all the Salesforce to Salesforce connections and mappings, since all the orgs update a 'hub' org. It is in this step that I've started getting Puppeteer timeouts. Sometimes it completes, sometimes it fails. It seems to be getting worse in that I have to rerun it 2 or 3 times to get it pass without timing out. I'm not experienced in Node or Puppeteer or scripts like these so don't know how to stop this happening. I've tried increasing the timeout from the default 30000 to 90000 but even then it fails sometimes so that is not a solution, obviously.
Interestingly a few of us have also been having problems lately with Chrome being dreadfully slow and timing out just in the browser (we run on the latest version of Chrome) and I read that Puppeteer uses Chrome. I tried googling but haven't found anything that helps me hence posting this query here.
I would appreciate any help to sort this out because running it multiple times for each deploy is not a viable solution, especially with the length of time it takes to complete.
This is the function from where it sets the timeout.
async function checkDifferencesForConnectionSafely(
argv: Config,
browser: Browser,
connection: Connection,
changes: SubscribedFieldUpdate[]
): Promise<void> {
const page = await browser.newPage();
page.setDefaultNavigationTimeout(90000); // added this but it still times out
try {
console.log(`Checking ${connection.username} -> ${connection.name}`);
await checkDifferencesForConnection(argv, page, connection, changes);
console.log(`Finished ${connection.username} -> ${connection.name}`);
} catch (e) {
console.log(`Failed ${connection.username} -> ${connection.name}`, e);
throw e;
} finally {
await page.close();
}
}
And this is the called function where I believe the timeout happens:
async function checkDifferencesForConnection(
argv: Config,
page: Page,
connection: Connection,
changes: SubscribedFieldUpdate[]
): Promise<void> {
await page.goto(connection.url);
const subscribedObjects = await getSubscribedObjects(page);
for (const object of subscribedObjects) {
await gotoObject(page, object);
const fields = await getSubscribedFields(page);
let changesMade = false;
for (const field of fields) {
field.isStrict = argv.strict;
if (field.selectedValueNeedsUpdate()) {
const newValue = field.newValue();
changes.push({
connection,
connectionObject: object,
connectionField: field,
newValue
});
await selectMapping(page, field, newValue);
changesMade = true;
} else if (!field.value) {
const options = field.options.map((o) => o.name);
throw new Error(
`No value for ${connection.name} -> ${object.name} -> ${field.name}, ` +
`options: ${options.join(", ")}`
);
}
}
if (!argv.skipPicklists) {
if (!argv.dryRun && changesMade) {
await saveSubscribedFields(page);
await gotoObject(page, object);
changesMade = false;
}
const pickListMappings = await getPicklistMappingLinks(page);
for (const pickListMapping of pickListMappings) {
try {
await pickListMapping.click();
} catch (e) {
console.log(
`Failed ${connection.username} -> ${connection.name} -> ${object.name} -> ${pickListMapping.id}`,
e
);
throw e;
}
const picklistValues = await getPicklistValues(page);
for (const picklistValue of picklistValues) {
picklistValue.isStrict = argv.strict;
if (picklistValue.selectedValueNeedsUpdate()) {
const newValue = picklistValue.newValue();
changes.push({
connection,
connectionObject: object,
connectionField: picklistValue,
newValue
});
await selectMapping(page, picklistValue, newValue);
changesMade = true;
}
}
await savePicklistMapping(page);
}
}
if (!argv.dryRun && changesMade) {
await saveSubscribedFields(page);
}
}
}
This is the error thrown (after running 2hrs 40min!)It is close to the end of the process so has completed most of the org checks at this stage. It doesn't always fail in the same place or on the same org checks so the timeout is not related to a specific connection.
The full script is here:
import puppeteer, { Browser, Page } from "puppeteer";
import { flatten } from "lodash";
import yargs from "yargs";
import pAll from "p-all";
import {
loginAndGetConnections,
Connection
} from "../page-objects/sf2sf-home.page-object";
import {
getSubscribedObjects,
ConnectionObject
} from "../page-objects/sf2sf-connection.page-object";
import {
SubscribedField,
SubscribedFieldOption,
getSubscribedFields,
gotoObject,
selectMapping,
getPicklistValues,
save as saveSubscribedFields,
getPicklistMappingLinks,
savePicklistMapping
} from "../page-objects/sf2sf-subscribed-fields.page-object";
import { SClusterConfig } from "../s-cluster-config";
class Config {
configFile: string;
clusterConfigFile: string;
dryRun: boolean;
strict: boolean;
concurrency: number;
skipPicklists: boolean;
constructor() {
// eslint-disable-next-line #typescript-eslint/no-explicit-any
const argv: any = yargs
.scriptName("publish-connections")
.describe("config-file", "The file configuring the SF2SF sync.")
.alias("config-file", "c")
.default("config-file", "./sf2sf.config.json")
.describe("cluster-config-file", "The file configuring the SF2SF sync.")
.alias("cluster-config-file", "f")
.string("cluster-config-file")
.required("cluster-config-file")
.describe(
"dry-run",
"don't make any changes, just print what you're going to do."
)
.boolean("dry-run")
.default("dry-run", false)
.describe("strict", "Prevents associations from being unassigned")
.boolean("strict")
.default("strict", false)
.number("concurrency")
.default("concurrency", 10)
.describe("skip-picklists", "Skip assigning the picklists")
.boolean("skip-picklists")
.default("skip-picklists", false).argv;
this.configFile = argv["config-file"];
this.clusterConfigFile = argv["cluster-config-file"];
this.dryRun = argv["dry-run"];
this.strict = argv["strict"];
this.concurrency = argv["concurrency"];
this.skipPicklists = argv["skip-picklists"];
}
}
interface SubscribedFieldUpdate {
connection: Connection;
connectionObject: ConnectionObject;
connectionField: SubscribedField;
newValue?: SubscribedFieldOption;
}
async function checkDifferencesForConnection(
argv: Config,
page: Page,
connection: Connection,
changes: SubscribedFieldUpdate[]
): Promise<void> {
await page.goto(connection.url);
const subscribedObjects = await getSubscribedObjects(page);
for (const object of subscribedObjects) {
await gotoObject(page, object);
const fields = await getSubscribedFields(page);
let changesMade = false;
for (const field of fields) {
field.isStrict = argv.strict;
if (field.selectedValueNeedsUpdate()) {
const newValue = field.newValue();
changes.push({
connection,
connectionObject: object,
connectionField: field,
newValue
});
await selectMapping(page, field, newValue);
changesMade = true;
} else if (!field.value) {
const options = field.options.map((o) => o.name);
throw new Error(
`No value for ${connection.name} -> ${object.name} -> ${field.name}, ` +
`options: ${options.join(", ")}`
);
}
}
if (!argv.skipPicklists) {
if (!argv.dryRun && changesMade) {
await saveSubscribedFields(page);
await gotoObject(page, object);
changesMade = false;
}
const pickListMappings = await getPicklistMappingLinks(page);
for (const pickListMapping of pickListMappings) {
try {
await pickListMapping.click();
} catch (e) {
console.log(
`Failed ${connection.username} -> ${connection.name} -> ${object.name} -> ${pickListMapping.id}`,
e
);
throw e;
}
const picklistValues = await getPicklistValues(page);
for (const picklistValue of picklistValues) {
picklistValue.isStrict = argv.strict;
if (picklistValue.selectedValueNeedsUpdate()) {
const newValue = picklistValue.newValue();
changes.push({
connection,
connectionObject: object,
connectionField: picklistValue,
newValue
});
await selectMapping(page, picklistValue, newValue);
changesMade = true;
}
}
await savePicklistMapping(page);
}
}
if (!argv.dryRun && changesMade) {
await saveSubscribedFields(page);
}
}
}
async function checkDifferencesForConnectionSafely(
argv: Config,
browser: Browser,
connection: Connection,
changes: SubscribedFieldUpdate[]
): Promise<void> {
const page = await browser.newPage();
page.setDefaultNavigationTimeout(90000);
try {
console.log(`Checking ${connection.username} -> ${connection.name}`);
await checkDifferencesForConnection(argv, page, connection, changes);
console.log(`Finished ${connection.username} -> ${connection.name}`);
} catch (e) {
console.log(`Failed ${connection.username} -> ${connection.name}`, e);
throw e;
} finally {
await page.close();
}
}
(async (): Promise<void> => {
const argv = new Config();
const { clusterConfigFile, concurrency } = argv;
const clusterConfig = await SClusterConfig.fromPath(clusterConfigFile);
const browser = await puppeteer.launch({});
const connections = flatten(
await pAll(
clusterConfig.usernames.map(
(username) => (): Promise<Connection[]> =>
loginAndGetConnections(browser, username)
),
{ concurrency }
)
).filter((conn) => conn.isActive);
const differences: SubscribedFieldUpdate[] = [];
await pAll(
connections.map(
(connection) => (): Promise<void> =>
checkDifferencesForConnectionSafely(
argv,
browser,
connection,
differences
)
),
{ concurrency }
);
const result = differences.map(
({ connection, connectionObject, connectionField, newValue }) => ({
username: connection.username,
connection: connection.name,
object: connectionObject.name,
field: connectionField.name,
oldValue: (connectionField.value && connectionField.value.name) || "",
newValue: (newValue && newValue.name) || ""
})
);
console.log(JSON.stringify(result, null, " "));
await browser.close();
})();

How to prevent multiple instances in Electron

I do not know if this is possible but I might as well give it a chance and ask.
I'm doing an Electron app and I'd like to know if it is possible to have no more than a single instance at a time.
I have found this gist but I'm not sure hot to use it. Can someone shed some light of share a better idea ?
var preventMultipleInstances = function(window) {
var socket = (process.platform === 'win32') ? '\\\\.\\pipe\\myapp-sock' : path.join(os.tmpdir(), 'myapp.sock');
net.connect({path: socket}, function () {
var errorMessage = 'Another instance of ' + pjson.productName + ' is already running. Only one instance of the app can be open at a time.'
dialog.showMessageBox(window, {'type': 'error', message: errorMessage, buttons: ['OK']}, function() {
window.destroy()
})
}).on('error', function (err) {
if (process.platform !== 'win32') {
// try to unlink older socket if it exists, if it doesn't,
// ignore ENOENT errors
try {
fs.unlinkSync(socket);
} catch (e) {
if (e.code !== 'ENOENT') {
throw e;
}
}
}
net.createServer(function (connection) {}).listen(socket);;
});
}
There is a new API now: requestSingleInstanceLock
const { app } = require('electron')
let myWindow = null
const gotTheLock = app.requestSingleInstanceLock()
if (!gotTheLock) {
app.quit()
} else {
app.on('second-instance', (event, commandLine, workingDirectory) => {
// Someone tried to run a second instance, we should focus our window.
if (myWindow) {
if (myWindow.isMinimized()) myWindow.restore()
myWindow.focus()
}
})
// Create myWindow, load the rest of the app, etc...
app.on('ready', () => {
})
}
Use the makeSingleInstance function in the app module, there's even an example in the docs.
In Case you need the code.
let mainWindow = null;
//to make singleton instance
const isSecondInstance = app.makeSingleInstance((commandLine, workingDirectory) => {
// Someone tried to run a second instance, we should focus our window.
if (mainWindow) {
if (mainWindow.isMinimized()) mainWindow.restore()
mainWindow.focus()
}
})
if (isSecondInstance) {
app.quit()
}

Dartlang server call my function more than once

I'm sending a data to the server like this:
save(){
var el = this.parent.nodes;
print(el);print(el.length);
request = new HttpRequest();
if(el.length == 1) print('No lines to save!');
else
{
var opt = el[i].shadowRoot.nodes[0].options[el[i].shadowRoot.nodes[0].selectedIndex].text;
print(this.parent.nodes.length);
for(var i=1; i < el.length; i++)
{
orderLines.add({
'poid': orderHeader[0]['OrderID'],
'ponum': orderHeader[0]['onum'],
'lnum' : i.toString(),
'itmid' :el[i].shadowRoot.nodes[0].value,
'icode' : opt,
'qty': el[i].shadowRoot.nodes[1].value,
'rqty': 0,
'bqty': el[i].shadowRoot.nodes[1].value,
'iqty': 0,
'biqty': el[i].shadowRoot.nodes[1].value,
'price': el[i].shadowRoot.nodes[2].value,
'rdd': orderHeader[0]['rdd'],
'eta': '',
'flag': 0
});
print(orderLines);
request.onReadyStateChange.listen(onData_save);
request.open('POST', host+'/sPO');
request.send(JSON.encode(orderLines));
}
}
}
and my server side function is:
void main() {
connections = new List<WebSocket>();
HttpServer.bind(HOST, PORT).then((HttpServer server) {
print('Server listening on port ${PORT}.');
server.listen((HttpRequest request) {
if (WebSocketTransformer.isUpgradeRequest(request)) {
WebSocketTransformer.upgrade(request).then(handleWS);
} else gotMSG(request);
});
});
}
handleWS(WebSocket ws){
connections.add(ws);
print('Client connected, there are now ${connections.length} client(s) connected.');
ws.listen((String message) {
for (WebSocket connection in connections) {
connection.add(message);
}
},
onDone: () {
connections.remove(ws);
print('Client disconnected, there are now ${connections.length} client(s) connected.');
});
}
void gotMSG(HttpRequest request) {
switch (request.method) {
case 'POST':
handlePost(request);
break;
case 'OPTIONS':
handleOptions(request);
break;
default:
defaultHandler(request);
}
}
void serveRequest(HttpRequest request){
print('Listening for GET and POST on http://$HOST:$PORT');
request.response.statusCode = HttpStatus.FORBIDDEN;
request.response.reasonPhrase = "WebSocket connections only";
request.response.close();
}
void handlePost(HttpRequest req) {
HttpResponse res = req.response;
switch (req.uri.path) {
case '/login': login(req); break;
...
case '/sPO': savePO(req); break;
default: break;
}
}
The /sPO => savePO is executed once if the order sent is of one line only, but if n lines in the order, the function is executed more than once, could not find a pattern for that,
In the SavePO I used oracledart pub, so thought something wrong in it, and tried postgresql pub, but got same results, the savePO function is:
void savePO(HttpRequest req){
HttpResponse res = req.response;
addCorsHeaders(res);
print('${req.method}: ${req.uri.path}');
Future future() => new Future.value(true);
req.listen((List<int> buffer) {
var theDataLines = JSON.decode(new String.fromCharCodes(buffer));
print(theDataLines);
connect(db).then((conn) {
for (var theData in theDataLines)
conn.execute("""
insert into pol
(poid,ponum,lnum,itmid,icode,qty,rqty,bqty,iqty,biqty,price,rdd, eta, flag)
values (#poid,#ponum,#lnum,#itmid,#icode,#qty,#rqty,#bqty,#iqty,#biqty,#price,
to_timestamp(#rdd,'YYYY-MM-DD'), to_timestamp(#eta,'YYYY-MM-DD'), #flag)
""",
{
'poid': theData['poid'],
'ponum': theData['ponum'],
'lnum' : theData['lnum'],
'itmid' : theData['itmid'],
'icode' : theData['icode'],
'qty': theData['qty'],
'rqty': theData['rqty'],
'bqty': theData['bqty'],
'iqty': theData['iqty'],
'biqty': theData['biqty'],
'price': theData['price'],
'rdd': theData['rdd'].toString(),
'eta': theData['eta'].toString(),
'flag': theData['flag']
})
.then((_)=>conn.query('commit').toList().then((rows) {print('committed');}))
.then((_){
res.write('done');
res.close();
});
}); // END of SQL
}, onError: printError); // End of server listen
} // END of function
I even tried to change the:
case '/sPO': savePO(req); break;
to be
case '/sPO': print(1); break;
the it printed the 1, 4 times after sending an order of 6 lines!!
It's hard to see for me what you actually try to accomplish.
The problem is very probably your save() method. You wrote how it behaves but not much about what you try to accomplish?
Why don't you put more lines into one JSON string and post them together in one request?
You create one request instance and call send repeatedly on this one request instance.
You also register the onReadyStateChange handler more than once on the same request object which results in onData_save being called several times when the event occurs just once.
I think you should either move request = new HttpRequest(); down just before
request.open('POST', host+'/sPO');
request.send(JSON.encode(orderLines));
or better move request.onReadyStateChange.listen(onData_save); up to request = new HttpRequest();,
add all orderlines into one JSON and call
request.open('POST', host+'/sPO');
request.send(JSON.encode(orderLines));
after the for loop.
Another problem I see is that you do a fire and forget. What if the send request fails for some reason?
I would create a sendJSON method that returns a future (with a Completer which completes on onDone and completeError when something goes wrong.
When you want to create more than one request in your save() you can use something like
// create your JSON
var futures = [];
for(i = 0; i < 5; i++) {
futures.add(sendData(myJson)); // collects the futures returned from sendData
}
// executes all futures and waits for all to respond and then returns another future
return Future.wait()
.then((results) {
results.forEach((r) {
// check result
});
});

How can I access the result of the response of HttpRequest in Dart?

After many attempts to get the content of the response in HttpRequest, I failed completely to know or understand why I can't have what I want, and I must mention that I can log and manipulate the response only inside an onReadyStateChange (onLoad and onLoadEnd are giving me the same results!), but I really want that value outside the callback.
Here is the part of code that I'm stuck with
Map responsData;
req=new HttpRequest()
..open(method,url)
..send(infojson);
req.onReadyStateChange.listen((ProgressEvent e){
if (req.readyState == HttpRequest.DONE ){
if(req.status == 200){
responsData = {'data': req.responseText};
print("data receaved: ${ req.responseText}");
//will log {"data":mydata}
}
if(req.status == 0){
responsData = {'data':'No server'};
print(responsData );
//will log {"data":No server}
}
}
});
//anything here to get responsData won't work
You have to assign an onLoad callback before you call send.
I'm not sure what you mean with only inside an onReadyStateChange.
Maybe you want to assign the responseText to a variable outside the the callback.
Create a method:
Future<String> send(String method, String url, String infojson) {
var completer = new Completer<String>();
// var result;
req=new HttpRequest()
..open(method,url)
..onLoad.listen((event) {
//print('Request complete ${event.target.reponseText}'))
// result = event.target.responseText;
completer.complete(event.target.responseText);
})
..send(infojson);
return completer.future;
}
and call this method like
var result;
send(method, url).then(
(e) {
// result = e;
print('Request complete ${e}'));
});

Resources