I use this script to upload Data from Google Sheets. Its here some way, how to set max errors? I want to just ignore all errors and upload the Data no matter how much errors. I have lot of different big Tables and they are everytime another Format etc.
I'm able to load this Data correctly manually(i set simply 100 or 1000 Errors allowed). But this script run with autodetect:true and don't allow errors. Thanks
/**
* Function to run from the UI menu.
*
* Uploads the sheets defined in the active sheet into BigQuery.
*/
function runFromUI() {
// Column indices.
const SHEET_URL = 1;
const PROJECT_ID = 2;
const DATASET_ID = 3;
const TABLE_ID = 4;
const APPEND = 5;
const STATUS = 6;
// Get the data range rows, skipping the header (first) row.
let sheet = SpreadsheetApp.getActiveSheet();
let rows = sheet.getDataRange().getValues().slice(1);
// Run the sheetToBigQuery function for every row and write the status.
rows.forEach((row, i) => {
let status = sheetToBigQuery(
row[SHEET_URL],
row[PROJECT_ID],
row[DATASET_ID],
row[TABLE_ID],
row[APPEND],
);
sheet.getRange(i+2, STATUS+1).setValue(status);
});
}
/**
* Uploads a single sheet to BigQuery.
*
* #param {string} sheetUrl - The Google Sheet Url containing the data to upload.
* #param {string} projectId - Google Cloud Project ID.
* #param {string} datasetId - BigQuery Dataset ID.
* #param {string} tableId - BigQuery Table ID.
* #param {bool} append - Appends to BigQuery table if true, otherwise replaces the content.
*
* #return {string} status - Returns the status of the job.
*/
function sheetToBigQuery(sheetUrl, projectId, datasetId, tableId, append) {
try {
createDatasetIfDoesntExist(projectId, datasetId);
} catch (e) {
return `${e}: Please verify your "Project ID" exists and you have permission to edit BigQuery`;
}
let sheet;
try {
sheet = openSheetByUrl(sheetUrl);
} catch (e) {
return `${e}: Please verify the "Sheet URL" is pasted correctly`;
}
// Get the values from the sheet's data range as a matrix of values.
let rows = sheet.getDataRange().getValues();
// Normalize the headers (first row) to valid BigQuery column names.
// https://cloud.google.com/bigquery/docs/schemas#column_names
rows[0] = rows[0].map((header) => {
header = header.toLowerCase().replace(/[^\w]+/g, '_');
if (header.match(/^\d/))
header = '_' + header;
return header;
});
// Create the BigQuery load job config. For more information, see:
// https://developers.google.com/apps-script/advanced/bigquery
let loadJob = {
configuration: {
load: {
destinationTable: {
projectId: projectId,
datasetId: datasetId,
tableId: tableId
},
autodetect: true, // Infer schema from contents.
writeDisposition: append ? 'WRITE_APPEND' : 'WRITE_TRUNCATE',
}
}
};
// BigQuery load jobs can only load files, so we need to transform our
// rows (matrix of values) into a blob (file contents as string).
// For convenience, we convert the rows into a CSV data string.
// https://cloud.google.com/bigquery/docs/loading-data-local
let csvRows = rows.map(values =>
// We use JSON.stringify() to add "quotes to strings",
// but leave numbers and booleans without quotes.
// If a string itself contains quotes ("), JSON escapes them with
// a backslash as \" but the CSV format expects them to be
// escaped as "", so we replace all the \" with "".
values.map(value => JSON.stringify(value).replace(/\\"/g, '""'))
);
let csvData = csvRows.map(values => values.join(',')).join('\n');
let blob = Utilities.newBlob(csvData, 'application/octet-stream');
// Run the BigQuery load job.
try {
BigQuery.Jobs.insert(loadJob, projectId, blob);
} catch (e) {
return e;
}
Logger.log(
'Load job started. Click here to check your jobs: ' +
`https://console.cloud.google.com/bigquery?project=${projectId}&page=jobs`
);
// The status of a successful run contains the timestamp.
// return `Last run: ${new Date().setDate }`;
return `last run: ${Utilities.formatDate(new Date(), SpreadsheetApp.getActive().getSpreadsheetTimeZone(), "yyyy-MM-dd HH:mm") }`;
}
/**
* Creates a dataset if it doesn't exist, otherwise does nothing.
*
* #param {string} projectId - Google Cloud Project ID.
* #param {string} datasetId - BigQuery Dataset ID.
*/
function createDatasetIfDoesntExist(projectId, datasetId) {
try {
BigQuery.Datasets.get(projectId, datasetId);
} catch (err) {
let dataset = {
datasetReference: {
projectId: projectId,
datasetId: datasetId,
},
};
BigQuery.Datasets.insert(dataset, projectId);
Logger.log(`Created dataset: ${projectId}:${datasetId}`);
}
}
/**
* Opens the spreadsheet sheet (tab) with the given URL.
*
* #param {string} sheetUrl - Google Sheet Url.
*
* #returns {Sheet} - The sheet corresponding to the URL.
*
* #throws Throws an error if the sheet doesn't exist.
*/
function openSheetByUrl(sheetUrl) {
// Extract the sheet (tab) ID from the Url.
let sheetIdMatch = sheetUrl.match(/gid=(\d+)/);
let sheetId = sheetIdMatch ? sheetIdMatch[1] : null;
// From the open spreadsheet, get the sheet (tab) that matches the sheetId.
let spreadsheet = SpreadsheetApp.openByUrl(sheetUrl);
let sheet = spreadsheet.getSheets().filter(sheet => sheet.getSheetId() == sheetId)[0];
if (!sheet)
throw 'Sheet tab ID does not exist';
return sheet;
}
If you want to set the number of max errors, you can use the maxBadRecords parameter in your load configration. If you want to ignore errors altogether, you can set ignoreUnknownValues to true instead.
let loadJob = {
configuration: {
load: {
destinationTable: {
projectId: projectId,
datasetId: datasetId,
tableId: tableId
},
autodetect: true, // Infer schema from contents.
// maxBadRecords: 1000,
ignoreUnknownValues: true, // use one or the other
writeDisposition: append ? 'WRITE_APPEND' : 'WRITE_TRUNCATE',
}
}
};
References:
BigQuery v2 | Job Configuration Load
Related
I've tried to write an ads-script that returns all the child accounts which had zero impression in the latest available hour (3 hours ago)
However the script returns false negative results.
Meaning there was an account with zero impressions, but the script flagged it as non-zero.
What am I missing?
function main() {
Logger.log("now.getHours(); = "+new Date().getHours());
var past = new Date(new Date().getTime() - HOURS_BACK * 3600 * 1000);
var pastHour = past.getHours();
var pastDateStr = getDateStringInTimeZone(past, 'yyyy-MM-dd');
query = "SELECT customer.id, metrics.impressions, segments.hour FROM customer WHERE metrics.impressions = 0 AND segments.hour = " + pastHour + " AND segments.date = '" + pastDateStr + "'";
Logger.log("query " + query);
updateAccountsInParallel();
}
function updateAccountsInParallel() {
// You can use this approach when you have a large amount of processing
// to do in each of your client accounts.
// Select the accounts to be processed. You can process up to 50 accounts.
var accountSelector = AdsManagerApp.accounts();
// Process the account in parallel. The callback method is optional.
accountSelector.executeInParallel('processAccount', 'allFinished', query);
}
/**
* Process one account at a time. This method is called by the executeInParallel
* method call in updateAccountsInParallel function for every account that
* it processes.
*/
function processAccount(query) {
// executeInParallel will automatically switch context to the account being
// processed, so all calls to AdsApp will apply to the selected account.
var customerId = AdsApp.currentAccount();
if (excludedAccountIds.includes(customerId)) return null;
var currentZeroImpressionRows = AdsApp.report(query, { apiVersion: 'v10' });
var rows = currentZeroImpressionRows.rows();
var accounts = [];
while (rows.hasNext()) {
var row = rows.next();
Logger.log(JSON.stringify(row));
accounts = accounts.push(row["customer.id"] + " " + row["customer.descriptive_name"]);
}
// Optional: return a string value. If you have a more complex JavaScript
// object to return from this method, use JSON.stringify(value). This value
// will be passed on to the callback method, if specified, in the
// executeInParallel method call.
return accounts.length > 0 ? account.getCustomerId() + " " + account.getName() : null;
}
/**
* Post-process the results from processAccount. This method will be called
* once all the accounts have been processed by the executeInParallel method
* call.
*
* #param {Array.<ExecutionResult>} results An array of ExecutionResult objects,
* one for each account that was processed by the executeInParallel method.
*/
function allFinished(results) {
var todayZeroCostAccounts = [];
for (var i = 0; i < results.length; i++) {
// Get the ExecutionResult for an account.
var result = results[i];
//Logger.log('Customer ID: %s; status = %s.', result.getCustomerId(),
// result.getStatus());
// Check the execution status. This can be one of ERROR, OK, or TIMEOUT.
if (result.getStatus() == 'ERROR') {
Logger.log("-- Failed with error: '%s'.", result.getError());
} else if (result.getStatus() == 'OK') {
// This is the value you returned from processAccount method. If you
// used JSON.stringify(value) in processAccount, you can use
// JSON.parse(text) to reconstruct the JavaScript object.
var retval = result.getReturnValue();
if (retval != null) {
Logger.log('%s had 0 impressions in that hour.', result.getCustomerId());
todayZeroCostAccounts.push(retval);
}
else
{
Logger.log('%s had positive impressions in that hour.', result.getCustomerId());
}
} else {
// Handle timeouts here.
}
}
}
I'm trying to recover the history of a single asset. The model is defined like the following
namespace org.example.basic
asset SampleAsset identified by assetId {
o String assetId
--> SampleParticipant owner
o String value
}
participant SampleParticipant identified by participantId {
o String participantId
o String firstName
o String lastName
}
transaction GetAssetHistory {
o String assetId
}
event SampleEvent {
--> SampleAsset asset
o String oldValue
o String newValue
}
I generate a single participant and a new asset referencing to the previous participant. And I proceed to update the asset value variable value. But reading about asset update I found the following:
async function getAssetHistory(tx) {
//How can I get a single asset history using the tx.assetId value??
let historian = await businessNetworkConnection.getHistorian();
let historianRecords = historian.getAll();
console.log(prettyoutput(historianRecords));
}
When I deploy the bna and I call the function I get the following:
img
In other functions i use the RuntimeApi but I dont know if businessNetworkConnection is a Runtime API call.
Any idea of how can a get a single asset history?
Any example on internet?
***************** UPDATE
I change the way to recover a particula asset history. Doing the following:
In js file
/**
* Sample read-only transaction
* #param {org.example.trading.MyPartHistory} tx
* #returns {org.example.trading.Trader[]} All trxns
* #transaction
*/
async function participantHistory(tx) {
console.log('1');
const partId = tx.tradeid;
console.log('2');
const nativeSupport = tx.nativeSupport;
// const partRegistry = await getParticipantRegistry('org.example.trading.Trader')
console.log('3');
const nativeKey = getNativeAPI().createCompositeKey('Asset:org.example.trading.Trader', [partId]);
console.log('4');
const iterator = await getNativeAPI().getHistoryForKey(nativeKey);
let results = [];
let res = {done : false};
while (!res.done) {
res = await iterator.next();
if (res && res.value && res.value.value) {
let val = res.value.value.toString('utf8');
if (val.length > 0) {
console.log("#debug val is " + val );
results.push(JSON.parse(val));
}
}
if (res && res.done) {
try {
iterator.close();
}
catch (err) {
}
}
}
var newArray = [];
for (const item of results) {
newArray.push(getSerializer().fromJSON(item));
}
console.log("#debug the results to be returned are as follows: ");
return newArray; // returns something to my NodeJS client (called via REST API)
}
In model file
#commit(false)
#returns(Trader[])
transaction MyPartHistory {
o String tradeId
}
I create a single asset an di update then with other values. But whe I call the MyPartHistory i get the following message:
Error: Native API not available in web runtime
use of the native api is only available when you are running your business network in a real fabric environment. You can't use it in the online playground environment. You will have to setup a real fabric environment and then run playground locally connecting to that fabric in order to test your business network.
I have a table that contains a collection of files.
My use case is that when a user clicks on a row in that table, an event will be fired that will make a call to the service and get the JSON data. the JSON data is going to change whenever user clicks on a different row in a table. So I am not going to have specific properties or let's say keys (in key value pair). I am just gonna get JSON data in the form of a 2D string array.
Now my task is to grab this data and export it into excel document.
I was able to find a solution to this problem and I am posting it here. Here are the links I had referred to in order to make this work.
https://stackblitz.com/edit/angular6-export-xlsx
https://medium.com/#madhavmahesh/exporting-an-excel-file-in-angular-927756ac9857
The first thing to do is
npm install xlsx
Then create a service -->
import { Injectable } from '#angular/core';
import * as FileSaver from 'file-saver';
import * as XLSX from 'xlsx';
const EXCEL_TYPE =
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet;charset=UTF-8';
const EXCEL_EXTENSION = '.xlsx';
#Injectable()
export class ExcelService {
constructor() {}
public exportAsExcelFile(json: any[], excelFileName: string): void {
const worksheet: XLSX.WorkSheet = XLSX.utils.json_to_sheet(json);
console.log('worksheet', worksheet);
const workbook: XLSX.WorkBook = { Sheets: { data: worksheet }, SheetNames: ['data'] };
const excelBuffer: any = XLSX.write(workbook, { bookType: 'xlsx', type: 'array' });
// const excelBuffer: any = XLSX.write(workbook, { bookType: 'xlsx', type: 'buffer' });
this.saveAsExcelFile(excelBuffer, excelFileName);
}
private saveAsExcelFile(buffer: any, fileName: string): void {
const data: Blob = new Blob([buffer], {
type: EXCEL_TYPE
});
FileSaver.saveAs(data, fileName + '_export_' + new Date().getTime() + EXCEL_EXTENSION);
}
}
Import this service in your main component (app.module.ts or whatever your component is). Make sure you add that service in the 'providers' array.
Then in your TS file, add this function -->
exportAsXLSX(): void {
this.someService.getFileData(this.clientFile).subscribe(
(res: any) => {
if (res.data) {
this.fileData = res.data;
}
},
err => {
let someMsg = 'Error exporting file';
if (err.status === 409) {
someMsg =
err.error.error.errorMessage.length > 0
? err.error.error.errorMessage
: someMsg;
}
this.toastService.error(someMsg);
}
);
this.excelService.exportAsExcelFile(this.fileData, 'sample');
}
Here, this.fileData is an array of type any, defined as fileData: any [] in my component. It's totally up to you how to get data in this array. The main thing is to pass it to the function that will convert this to excel.
Now that you have your service written and your component has a function for it, it's time to wire this up to the HTML file.
<div class="ui-helper-clearfix">
<button
type="button"
pButton
icon="fa fa-file-excel-o"
iconPos="left"
label="Export To Excel"
(click)="exportAsXLSX()"
style="float:left"
></button>
</div>
On click of the button, you will be able to get data in an excel document.
In Hyperledger Composer, v 19.12, I am trying to use the #returns decorator to return an asset. When I call the function through the REST API though I get a succesful transaction (200 return code) but do not get the Account object in the Response Body. Here is the transaction as defined in the data model file, the associated transaction function, and the Response Body from the REST API call. The Account object is defined in the same model file.
I expect to get an Account JSON object back. What am I doing wrong?
Transaction model
/*
Read only transaction to load account
*/
#commit(false)
#returns(Account)
transaction LoadAccountTx {
o String accountId
}
Transaction function
/**
* function to load account
* #param {org.scsvault.history.LoadAccountTx} loadAccountTx
* #returns {org.scsvault.history.Account} The resulting array of accounts
* #transaction
*/
async function loadAccount(loadAccount)
{
var i = 2;
var factory = getFactory();
var NS = 'org.scsvault.history';
var account = factory.newResource(NS, 'Account', 'ACCOUNT_1');
account.accountType = 'CREDITCARD';
account.balance = 100;
account.openingbalance = 1000;
account.opendate = new Date(2017, i, i);
if (i % 2) {
account.approvalStatus = 'REQUEST_PENDING';
}
else {
account.approvalStatus = 'CREATE';
}
account.status = 'PENDING_APPROVAL';
account.creditlimit = i * 1000;
account.term_months = i;
account.encryptedDescription = account.accountType + ' from Chase';
account.apr = i;
return account;
}
Response Body:
{
"$class": "org.scsvault.history.LoadAccountTx",
"accountId": "ACCOUNT_1",
"transactionId": "09c9eb722fe3adda41fe0a4d1060ab4efff4c2ca9ad817a763dae81374123b4c"
}
EDIT:
To test further, I changed the code above to be a simple string return value and do not receive the test string back throught the REST API.
#returns(String)
transaction LoadAccountTx {
o String accountId
}
/**
* function to load account
* #param {org.scsvault.history.LoadAccountTx} loadAccountTx
* #returns {string} (https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/string)
* #transaction
*/
async function loadAccount(loadAccount)
{
return "This is a test string";
}
just adding to what #nicolapaoli wrote: this is fixed in Hyperledger Composer release v0.19.13 FYI - you do get the return value.
I had very similar issue. I've just opened an issue with general example on GitHub here with ref to this question and to the message on Rocketchat as well. Hope this will be fixed soon.
I figured if the devtool can list all created IndexedDB, then there should be an API to retrieve them...?
Dose anyone know how I get get a list of names with the help of a firefox SDK?
I did dig into the code and looked at the source. unfortunately there wasn't any convenient API that would pull out all the databases from one host.
The way they did it was to lurk around in the user profiles folder and look at all folder and files for .sqlite and make a sql query (multiple times in case there is an ongoing transaction) to each .sqlite and ask for the database name
it came down this peace of code
// striped down version of: https://dxr.mozilla.org/mozilla-central/source/devtools/server/actors/storage.js
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
"use strict";
const {async} = require("resource://gre/modules/devtools/async-utils");
const { setTimeout } = require("sdk/timers");
const promise = require("sdk/core/promise");
// A RegExp for characters that cannot appear in a file/directory name. This is
// used to sanitize the host name for indexed db to lookup whether the file is
// present in <profileDir>/storage/default/ location
const illegalFileNameCharacters = [
"[",
// Control characters \001 to \036
"\\x00-\\x24",
// Special characters
"/:*?\\\"<>|\\\\",
"]"
].join("");
const ILLEGAL_CHAR_REGEX = new RegExp(illegalFileNameCharacters, "g");
var OS = require("resource://gre/modules/osfile.jsm").OS;
var Sqlite = require("resource://gre/modules/Sqlite.jsm");
/**
* An async method equivalent to setTimeout but using Promises
*
* #param {number} time
* The wait time in milliseconds.
*/
function sleep(time) {
let deferred = promise.defer();
setTimeout(() => {
deferred.resolve(null);
}, time);
return deferred.promise;
}
var indexedDBHelpers = {
/**
* Fetches all the databases and their metadata for the given `host`.
*/
getDBNamesForHost: async(function*(host) {
let sanitizedHost = indexedDBHelpers.getSanitizedHost(host);
let directory = OS.Path.join(OS.Constants.Path.profileDir, "storage",
"default", sanitizedHost, "idb");
let exists = yield OS.File.exists(directory);
if (!exists && host.startsWith("about:")) {
// try for moz-safe-about directory
sanitizedHost = indexedDBHelpers.getSanitizedHost("moz-safe-" + host);
directory = OS.Path.join(OS.Constants.Path.profileDir, "storage",
"permanent", sanitizedHost, "idb");
exists = yield OS.File.exists(directory);
}
if (!exists) {
return [];
}
let names = [];
let dirIterator = new OS.File.DirectoryIterator(directory);
try {
yield dirIterator.forEach(file => {
// Skip directories.
if (file.isDir) {
return null;
}
// Skip any non-sqlite files.
if (!file.name.endsWith(".sqlite")) {
return null;
}
return indexedDBHelpers.getNameFromDatabaseFile(file.path).then(name => {
if (name) {
names.push(name);
}
return null;
});
});
} finally {
dirIterator.close();
}
return names;
}),
/**
* Removes any illegal characters from the host name to make it a valid file
* name.
*/
getSanitizedHost: function(host) {
return host.replace(ILLEGAL_CHAR_REGEX, "+");
},
/**
* Retrieves the proper indexed db database name from the provided .sqlite
* file location.
*/
getNameFromDatabaseFile: async(function*(path) {
let connection = null;
let retryCount = 0;
// Content pages might be having an open transaction for the same indexed db
// which this sqlite file belongs to. In that case, sqlite.openConnection
// will throw. Thus we retey for some time to see if lock is removed.
while (!connection && retryCount++ < 25) {
try {
connection = yield Sqlite.openConnection({ path: path });
} catch (ex) {
// Continuously retrying is overkill. Waiting for 100ms before next try
yield sleep(100);
}
}
if (!connection) {
return null;
}
let rows = yield connection.execute("SELECT name FROM database");
if (rows.length != 1) {
return null;
}
let name = rows[0].getResultByName("name");
yield connection.close();
return name;
})
};
module.exports = indexedDBHelpers.getDBNamesForHost;
If anyone want to use this then here is how you would use it
var getDBNamesForHost = require("./getDBNamesForHost");
getDBNamesForHost("http://example.com").then(names => {
console.log(names);
});
Think it would be cool if someone were to build a addon that adds indexedDB.mozGetDatabaseNames to work the same way as indexedDB.webkitGetDatabaseNames. I'm not doing that... will leave it up to you if you want. would be a grate dev tool to have ;)