having trouble getting reports to run on superset, running latest from /docker
I can set reports from the ui, but nothing happens afterwards.
I see the beat worker is running and waking up every 5 minutes, I also see in the main worker logs:
[2021-02-07 20:01:00,071: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x7f03c6533830> (args:('email_reports.schedule_hourly', 'f1a887b8-4cb6-4d99-aba2-d571f56fcf1c', {'lang': None, 'task': 'email_reports.schedule_hourly', 'id': 'f1a887b8-4cb6-4d99-aba2-d571f56fcf1c', 'root_id': None, 'parent_id': None, 'group': None, 'meth': None, 'shadow': None, 'eta': None, 'expires': None, 'retries': 0, 'timelimit': [None, None], 'argsrepr': None, 'kwargsrepr': None, 'origin': None, 'reply_to': 'feca0569-95f8-39e2-893e-f2f29b904c06', 'correlation_id': 'f1a887b8-4cb6-4d99-aba2-d571f56fcf1c', 'hostname': 'celery#ebb95abd0849', 'delivery_info': {'exchange': '', 'routing_key': 'celery', 'priority': 0, 'redelivered': None}, 'args': [], 'kwargs': {}}, ([], {}, {'callbacks': None, 'errbacks': None, 'chord': None, 'chain': None}), None, None) kwargs:{})
[2021-02-07 20:01:00,097: DEBUG/MainProcess] Task accepted: email_reports.schedule_hourly[f1a887b8-4cb6-4d99-aba2-d571f56fcf1c] pid:17
[2021-02-07 20:01:00,221: INFO/ForkPoolWorker-1] Task email_reports.schedule_hourly[f1a887b8-4cb6-4d99-aba2-d571f56fcf1c] succeeded in 0.1267093000060413s: None
my config:
docker-compose.yaml
added:
superset-beat:
image: *superset-image
container_name: superset_beat
command: ["/app/docker/docker-bootstrap.sh", "beat"]
env_file: docker/.env
restart: unless-stopped
depends_on: *superset-depends-on
user: "root"
volumes: *superset-volumes
docker-bootstrap.sh
modified to:
if [[ "${1}" == "worker" ]]; then
echo "Starting Celery worker..."
celery worker --app=superset.tasks.celery_app:app --pool=prefork -O fair -c 4 -l DEBUG
elif [[ "${1}" == "beat" ]]; then
echo "Starting Celery beat..."
celery beat --app=superset.tasks.celery_app:app -l DEBUG
elif [[ "${1}" == "app" ]]; then
echo "Starting web app..."
flask run -p 8088 --with-threads --reload --debugger --host=0.0.0.0
fi
superset_config.py
modified to:
RESULTS_BACKEND = RedisCache(
host=REDIS_HOST, port=REDIS_PORT)
FEATURE_FLAGS = {
"ALERT_REPORTS": True,
}
class CeleryConfig(object):
BROKER_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_CELERY_DB}"
CELERY_IMPORTS = (
"superset.sql_lab",
"superset.tasks",
)
CELERY_RESULT_BACKEND = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_RESULTS_DB}"
CELERY_ANNOTATIONS = {
"tasks.add": {
"rate_limit": "10/s",
},
"sql_lab.get_sql_results": {
"rate_limit": "100/s",
},
"email_reports.send": {
"rate_limit": "1/s",
"time_limit": 120,
"soft_time_limit": 150,
"ignore_result": True,
},
}
CELERYBEAT_SCHEDULE = {
"email_reports.schedule_hourly": {
"task": "email_reports.schedule_hourly",
"schedule": crontab(minute=1, hour="*"),
},
}
CELERY_TASK_PROTOCOL = 1
CACHE_CONFIG = {
"CACHE_TYPE": "redis",
"CACHE_DEFAULT_TIMEOUT": 60 * 60 * 24, # 1 day default (in secs)
"CACHE_KEY_PREFIX": "superset_results",
"CACHE_REDIS_URL": f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_RESULTS_DB}",
}
CELERY_CONFIG = CeleryConfig
SQLLAB_CTAS_NO_LIMIT = True
ENABLE_SCHEDULED_EMAIL_REPORTS = True
EMAIL_NOTIFICATIONS = True
SMTP_HOST = "smtp.gmail.com"
SMTP_STARTTLS = True
SMTP_SSL = True
SMTP_USER = "df.team.test#gmail.com"
SMTP_PORT = 465
SMTP_PASSWORD = os.environ.get("SMTP_PASSWORD")
SMTP_MAIL_FROM = "df.team.test#gmail.com"
You should have the alerts and reports (in case you want both alerts and reports functionality) scheduler set in the CELERYBEAT_SCHEDULE . Here is an example config :
CELERYBEAT_SCHEDULE = {
'email_reports.schedule_hourly': {
'task': 'email_reports.schedule_hourly',
'schedule': crontab(minute=1, hour='*'),
},
'alerts.schedule_check': {
'task': 'alerts.schedule_check',
'schedule': crontab(minute='*', hour='*'),
},
'reports.scheduler': {
'task': 'reports.scheduler',
'schedule': crontab(minute='*', hour='*'),
},
'reports.prune_log': {
'task': 'reports.prune_log',
'schedule': crontab(minute=0, hour=0),
},
'cache-warmup-hourly': {
'task': 'cache-warmup',
'schedule': crontab(minute='*/30', hour='*'),
'kwargs': {
'strategy_name': 'top_n_dashboards',
'top_n': 10,
'since': '7 days ago',
},
},
}
Related
The documentation flake-utils has the following example as a doc
eachSystem [ system.x86_64-linux ] (system: { hello = 42; })
# => { hello = { x86_64-linux = 42; }; }
eachSystem allSystems (system: { hello = 42; })
# => {
hello.aarch64-darwin = 42,
hello.aarch64-genode = 42,
hello.aarch64-linux = 42,
...
hello.x86_64-redox = 42,
hello.x86_64-solaris = 42,
hello.x86_64-windows = 42
}
as far as I can tel, one has to
> nix repl
nix-repl> e = builtins.getFlake("github:numtide/flake-utils")
nix-repl> with e.outputs.lib;
eachSystem [ system.x86_64-linux ] (system: { hello = 42; })
to get a result value (one can also do :a e.outputs.lib to "Add attributes from resulting set to scope" and not use the with ..; line )
{ hello = { ... }; }
Is there a way to "eagerly print" the value ?
What you are looking for is :p:
> { a = 3; b = [ 1 2 3 ]; }
{ a = 3; b = [ ... ]; }
> :p { a = 3; b = [ 1 2 3 ]; }
{ a = 3; b = [ 1 2 3 ]; }
>
Very weird "crash". I am using
oscillator = AKOperationGenerator(channelCount: 2, operations: { parameters in
let portamentoOp = parameters[0].portamento(halfDuration: parameters[1])
let sineOp = AKOperation.sineWave(frequency: portamentoOp, amplitude: parameters[2])
let sawOp = AKOperation.sawtoothWave(frequency: portamentoOp, amplitude: parameters[2])
return [sawOp, sineOp] //saw on left, sine on right.
})
oscillator?.parameters = [440.0, 0.001, 1.0, 0.99]
To generate tones in my app, however after couple minutes of "playing" it just stops, no crash, no error nothing. Only thing I can do is to unplug headphones and plug them back again, setup audio again and it starts.
This is logs from device:
default 20:32:42.906934+1200 mediaserverd CAReportingClient.mm:470:-[CAReportingClient sendMessage:category:type:reporters:]: message {
"inf_frames" = 0;
"issue_type" = NonFinite;
"nan_frames" = 1083;
node = "PreDSP - { [ type: phpw; conn: 1; rout: 1 ] }";
"rtaid_client" = VA;
}: (
133143986217
)
default 20:32:42.907327+1200 mediaserverd AudioIssueDetectorManager.cpp:250:HandleAudioIssue: RTAID [ use_case=Generic issue_type=NonFinites clientID=VA node=PreDSP - { [ type: phpw; conn: 1; rout: 1 ] } ] -- [ nan_frames=1083 inf_frames=0 ]
default 20:32:42.909321+1200 rtcreportingd sendMessageWithDictionary: Sending message for session 41
default 20:32:42.909588+1200 rtcreportingd <private>(sendMessage): realtime is 0
default 20:32:42.909701+1200 rtcreportingd <private>(sendMessage): Batching 3, 1, 1
default 20:32:42.910209+1200 mediaserverd CAReportingClient.mm:470:-[CAReportingClient sendMessage:category:type:reporters:]: message {
"inf_frames" = 0;
"issue_type" = NonFinite;
"nan_frames" = 1083;
node = "PostDSP - { [ type: phpw; conn: 1; rout: 1 ] }";
"rtaid_client" = VA;
}: (
133143986217
)
default 20:32:42.911062+1200 mediaserverd AudioIssueDetectorManager.cpp:250:HandleAudioIssue: RTAID [ use_case=Generic issue_type=NonFinites clientID=VA node=PostDSP - { [ type: phpw; conn: 1; rout: 1 ] } ] -- [ nan_frames=1083 inf_frames=0 ]
default 20:32:42.913038+1200 rtcreportingd sendMessageWithDictionary: Sending message for session 41
default 20:32:42.913957+1200 rtcreportingd <private>(sendMessage): realtime is 0
default 20:32:42.914127+1200 rtcreportingd <private>(sendMessage): Batching 3, 1, 1
default 20:32:42.935883+1200 sharingd Identification starting maintenance timer
default 20:32:42.937060+1200 rapportd BLE device changed: SFDevice ID a6207782-86f9-e3fe-2ca2-fc6f74e80bd4, IDS B710183A, RSSI -47 (-44)*N, Nm 'Tomas’s MacBook Pro', Md 'MacBookPro16,1', DuetSync, PairedBT, PairedSys Conjectured, rapportID B710183A-9C32-45F2-B698-72BBC69FED93, WiFiP2P, DF 0x88 < MyiCloud AirDrop >
default 20:32:42.938332+1200 sharingd CompanionLink changed: IDS B710183A, Nm 'Tomas’s MacBook Pro', Md 'MacBookPro16,1'
default 20:32:43.009206+1200 powerlogHelperd {"msg":"CLCopyAppsUsingLocation", "event":"activity"}
default 20:32:43.038177+1200 nsurlsessiond Triggering periodic update to powerlog for client <private>
default 20:32:43.260560+1200 DTServiceHub Heartbeat
I am working on hyperledger blockchain. I am running 4 organizations network, I have developed docker-compose.yml file. When is start docker-compose all containers are starting but orderer.example.com container is not starting i.e., the container is exiting with code 2.
Below is the orderer.example.com docker-compose configuration details and logs of the orderer.example.com container.
docker-compose configuration details
###########################################
# Orderer Docker Container Config
###########################################
orderer.example.com:
container_name: orderer.example.com
image: hyperledger/fabric-orderer
environment:
- ORDERER_GENERAL_LOGLEVEL=debug
- FABRIC_LOGGING_SPEC=info
- ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
- ORDERER_GENERAL_GENESISMETHOD=file
- OERDERER_GENERAL_GENESISFILE=/etc/hyperledger/configtx/genesis.block
- ORDERER_GENERAL_LOCALMSPID=OrdererMSP
- ORDERER_GENERAL_LOCALMSPDIR=/etc/hyperledger/msp/orderer/msp
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/orderer
command: orderer
ports:
- 7050:7050
volumes:
- ./channel-artifacts/:/etc/hyperledger/configtx
- ./crypto-config/ordererOrganizations/example.com/orderers/OrdererPeer.example.com/:/etc/hyperledger/msp/orderer
- ./crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/:/etc/hyperledger/msp/peerOrg1/peer0
- ./crypto-config/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/:/etc/hyperledger/msp/peerOrg2/peer0
- ./crypto-config/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/:/etc/hyperledger/msp/peerOrg3/peer0
- ./crypto-config/peerOrganizations/org4.example.com/peers/peer0.org4.example.com/:/etc/hyperledger/msp/peerOrg4/peer0
networks:
- basic
Error logs of orderer.example.com container
2019-10-18 08:08:51.429 UTC [localconfig] completeInitialization -> INFO 001 Kafka.Version unset, setting to 0.10.2.0
2019-10-18 08:08:51.556 UTC [orderer.common.server] prettyPrintStruct -> INFO 002 Orderer config values:
General.LedgerType = "file"
General.ListenAddress = "0.0.0.0"
General.ListenPort = 7050
General.TLS.Enabled = false
General.TLS.PrivateKey = "/etc/hyperledger/fabric/tls/server.key"
General.TLS.Certificate = "/etc/hyperledger/fabric/tls/server.crt"
General.TLS.RootCAs = [/etc/hyperledger/fabric/tls/ca.crt]
General.TLS.ClientAuthRequired = false
General.TLS.ClientRootCAs = []
General.Cluster.ListenAddress = ""
General.Cluster.ListenPort = 0
General.Cluster.ServerCertificate = ""
General.Cluster.ServerPrivateKey = ""
General.Cluster.ClientCertificate = ""
General.Cluster.ClientPrivateKey = ""
General.Cluster.RootCAs = []
General.Cluster.DialTimeout = 5s
General.Cluster.RPCTimeout = 7s
General.Cluster.ReplicationBufferSize = 20971520
General.Cluster.ReplicationPullTimeout = 5s
General.Cluster.ReplicationRetryTimeout = 5s
General.Cluster.ReplicationBackgroundRefreshInterval = 5m0s
General.Cluster.ReplicationMaxRetries = 12
General.Cluster.SendBufferSize = 10
General.Cluster.CertExpirationWarningThreshold = 168h0m0s
General.Cluster.TLSHandshakeTimeShift = 0s
General.Keepalive.ServerMinInterval = 1m0s
General.Keepalive.ServerInterval = 2h0m0s
General.Keepalive.ServerTimeout = 20s
General.ConnectionTimeout = 0s
General.GenesisMethod = "file"
General.GenesisProfile = "SampleInsecureSolo"
General.SystemChannel = "test-system-channel-name"
General.GenesisFile = "/etc/hyperledger/fabric/genesisblock"
General.Profile.Enabled = false
General.Profile.Address = "0.0.0.0:6060"
General.LocalMSPDir = "/etc/hyperledger/msp/orderer/msp"
General.LocalMSPID = "OrdererMSP"
General.BCCSP.ProviderName = "SW"
General.BCCSP.SwOpts.SecLevel = 256
General.BCCSP.SwOpts.HashFamily = "SHA2"
General.BCCSP.SwOpts.Ephemeral = false
General.BCCSP.SwOpts.FileKeystore.KeyStorePath = "/etc/hyperledger/msp/orderer/msp/keystore"
General.BCCSP.SwOpts.DummyKeystore =
General.BCCSP.SwOpts.InmemKeystore =
General.BCCSP.PluginOpts =
General.Authentication.TimeWindow = 15m0s
General.Authentication.NoExpirationChecks = false
FileLedger.Location = "/var/hyperledger/production/orderer"
FileLedger.Prefix = "hyperledger-fabric-ordererledger"
RAMLedger.HistorySize = 1000
Kafka.Retry.ShortInterval = 5s
Kafka.Retry.ShortTotal = 10m0s
Kafka.Retry.LongInterval = 5m0s
Kafka.Retry.LongTotal = 12h0m0s
Kafka.Retry.NetworkTimeouts.DialTimeout = 10s
Kafka.Retry.NetworkTimeouts.ReadTimeout = 10s
Kafka.Retry.NetworkTimeouts.WriteTimeout = 10s
Kafka.Retry.Metadata.RetryMax = 3
Kafka.Retry.Metadata.RetryBackoff = 250ms
Kafka.Retry.Producer.RetryMax = 3
Kafka.Retry.Producer.RetryBackoff = 100ms
Kafka.Retry.Consumer.RetryBackoff = 2s
Kafka.Verbose = false
Kafka.Version = 0.10.2.0
Kafka.TLS.Enabled = false
Kafka.TLS.PrivateKey = ""
Kafka.TLS.Certificate = ""
Kafka.TLS.RootCAs = []
Kafka.TLS.ClientAuthRequired = false
Kafka.TLS.ClientRootCAs = []
Kafka.SASLPlain.Enabled = false
Kafka.SASLPlain.User = ""
Kafka.SASLPlain.Password = ""
Kafka.Topic.ReplicationFactor = 3
Debug.BroadcastTraceDir = ""
Debug.DeliverTraceDir = ""
Consensus = map[WALDir:/var/hyperledger/production/orderer/etcdraft/wal SnapDir:/var/hyperledger/production/orderer/etcdraft/snapshot]
Operations.ListenAddress = "127.0.0.1:8443"
Operations.TLS.Enabled = false
Operations.TLS.PrivateKey = ""
Operations.TLS.Certificate = ""
Operations.TLS.RootCAs = []
Operations.TLS.ClientAuthRequired = false
Operations.TLS.ClientRootCAs = []
Metrics.Provider = "disabled"
Metrics.Statsd.Network = "udp"
Metrics.Statsd.Address = "127.0.0.1:8125"
Metrics.Statsd.WriteInterval = 30s
Metrics.Statsd.Prefix = ""
panic: unable to bootstrap orderer. Error reading genesis block file: open /etc/hyperledger/fabric/genesisblock: no such file or directory
goroutine 1 [running]:
github.com/hyperledger/fabric/orderer/common/bootstrap/file.(*fileBootstrapper).GenesisBlock(0xc0002bb2b0, 0xc0002bb2b0)
/opt/gopath/src/github.com/hyperledger/fabric/orderer/common/bootstrap/file/bootstrap.go:39 +0x1d0
github.com/hyperledger/fabric/orderer/common/server.extractBootstrapBlock(0xc000444900, 0x0)
/opt/gopath/src/github.com/hyperledger/fabric/orderer/common/server/main.go:532 +0x1bd
github.com/hyperledger/fabric/orderer/common/server.Start(0x1018e03, 0x5, 0xc000444900)
/opt/gopath/src/github.com/hyperledger/fabric/orderer/common/server/main.go:96 +0x43
github.com/hyperledger/fabric/orderer/common/server.Main()
/opt/gopath/src/github.com/hyperledger/fabric/orderer/common/server/main.go:91 +0x1ce
main.main()
/opt/gopath/src/github.com/hyperledger/fabric/orderer/main.go:15 +0x20
When I got this error I made changes to the code follows as below:
environment:
- OERDERER_GENERAL_GENESISFILE=/etc/hyperledger/fabric/genesis.block
volumes:
- ./channel-artifacts/:/etc/hyperledger/fabric
But the I got the same error below is the error logs:
2019-10-18 07:49:44.841 UTC [orderer.common.server] Main -> ERRO 001 failed to parse config: Error reading configuration: Unsupported Config Type ""
Please help me with this issue. please ignore is there are any indentation error
Its weird,
According to your yaml manifest
Supplied path is:
- OERDERER_GENERAL_GENESISFILE=/etc/hyperledger/configtx/genesis.block
Error log stats:
Error reading genesis block file: open /etc/hyperledger/fabric/genesisblock: no such file or directory
Something you are missing, please upload the configuration to github
so that I will have a look
In your volume, you have to specify your path to block.genesis
And you have to give the same link in environment genesis file path
environment:
- OERDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.pb
volumes:
- ./channel-artifacts/:/etc/hyperledger/fabric
- ./orderer/genesis_block.pb:/var/hyperledger/orderer/orderer.genesis.pb
Please check the genesis block path. Docker is not able to locate the file
I am able to solve this issue by again generating the cryptography files in the folder "channel-artifacts" and "crypto-config".
1)
generating the cryptography files
By again Running Configtxgen to create artifacts
OERDERER_GENERAL_GENESISFILE >> ORDERER_GENERAL_GENESISFILE
I want to view the probabilities of each test image, so I modified the code(cifar10_eval.py) like this
def eval_once(saver, summary_writer, logits, labels, top_k_op, summary_op):
...............
while step < num_iter and not coord.should_stop():
result1, result2 = sess.run([logits, labels])
print('Step:', step, 'result',result1, 'Label:', result2)
...............
and I run the python code like this.
# python cifar10_eval.py --batch_size=1 --run_once=True
The screen results are like this
Step: 0 result [[ 0.01539493 -0.00109618 -0.00364288 -0.00898853 -0.00086198 0.00587899 0.00981337 -0.00785329 -0.00282823 -0.00171288]] Label: [4]
Step: 1 result [[ 0.01539471 -0.00109601 -0.00364273 -0.00898863 -0.00086192 0.005879 0.00981339 -0.00785322 -0.00282811 -0.00171296]] Label: [7]
Step: 2 result [[ 0.01539475 -0.00109617 -0.00364274 -0.00898876 -0.00086183 0.00587886 0.00981328 -0.00785333 -0.00282814 -0.00171295]] Label: [8]
Step: 3 result [[ 0.01539472 -0.00109597 -0.00364275 -0.0089886 -0.00086183 0.00587902 0.00981344 -0.00785326 -0.00282817 -0.00171299]] Label: [4]
Step: 4 result [[ 0.01539488 -0.00109631 -0.00364294 -0.00898863 -0.00086199 0.00587896 0.00981327 -0.00785329 -0.00282809 -0.00171307]] Label: [0]
Step: 5 result [[ 0.01539478 -0.00109607 -0.00364292 -0.00898858 -0.00086194 0.00587904 0.00981335 -0.0078533 -0.00282818 -0.00171321]] Label: [4]
Step: 6 result [[ 0.01539493 -0.00109627 -0.00364277 -0.00898873 -0.0008618 0.00587892 0.00981339 -0.00785325 -0.00282807 -0.00171289]] Label: [9]
Step: 7 result [[ 0.01539504 -0.00109619 -0.0036429 -0.00898865 -0.00086194 0.00587894 0.0098133 -0.00785331 -0.00282818 -0.00171294]] Label: [4]
Step: 8 result [[ 0.01539493 -0.00109627 -0.00364286 -0.00898867 -0.00086183 0.00587899 0.00981332 -0.00785329 -0.00282825 -0.00171283]] Label: [8]
Step: 9 result [[ 0.01539495 -0.00109617 -0.00364286 -0.00898852 -0.00086186 0.0058789 0.00981337 -0.00785326 -0.00282827 -0.00171287]] Label: [9]
The Label values seem to be good, but the logits outputs seem to be same values!
Why?
Anyone can tell me the reason?
This is new cifar10_eval.py source code.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import numpy as np
import tensorflow as tf
#from tensorflow.models.image.cifar10 import cifar10
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/tmp/cifar10_eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/cifar10_train',
"""Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 10000,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', True,
"""Whether to run eval only once.""")
def eval_once(saver, summary_writer, logits, labels, top_k_op, summary_op):
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
#num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
#total_sample_count = num_iter * FLAGS.batch_size
num_iter = FLAGS.num_examples
total_sample_count = FLAGS.num_examples
print (num_iter, FLAGS.batch_size, total_sample_count)
true_count = 0 # Counts the number of correct predictions.
step = 0
time.sleep(1)
while step < num_iter and not coord.should_stop():
result1, result2 = sess.run([logits, labels])
#label = sess.run(labels)
print('Step:', step, 'result',result1, 'Label:', result2)
step += 1
precision = true_count / step
print('Summary -- Step:', step, 'Accurcy:',true_count * 100.0 / step * 1.0, )
print('%s: total:%d true:%d precision # 1 = %.3f' % (datetime.now(), total_sample_count, true_count, precision))
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=eval_data, )
# Build a Graph that computes the logits predictions from the
# inference model. logits is softmax
logits = cifar10.inference(images)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
eval_once(saver, summary_writer, logits, labels,top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
print('Evaluate Start')
evaluate()
if __name__ == '__main__':
tf.app.run()
I have trained with 1k steps(accuracy 10% or so).
But after I trained with 100k steps(accuracy 86% or so), the result is very good
.................
Step: 9991 result: [[ 1.30259633 0.71064955 -2.6035285 -1.30183697 -4.1291523 -3.00246906 0.30873945 -4.02916574 13.05054665 -0.42556083]] Label: 8
Step: 9992 result: [[-1.05670786 -1.86572766 0.28350741 1.78929067 0.03841069 1.23079467 2.97172165 -1.18722486 -1.17184007 -1.02505279]] Label: 6
Step: 9993 result: [[ 1.50454926 2.34122658 -3.45632267 -0.55308843 -4.35214806 -2.28931832 -1.74908364 -4.71527719 11.44062901 1.72015083]] Label: 8
Step: 9994 result: [[ 1.96891284 -2.57139373 0.29864013 1.30923986 1.72708285 0.95571399 -0.49331608 0.49454236 -2.26134181 -1.39561605]] Label: 0
Step: 9995 result: [[-0.65523863 1.58577776 0.13226865 1.43122363 -2.34669352 0.18927786 -2.51019335 -1.70729315 -0.21297894 4.06098557]] Label: 9
Step: 9996 result: [[-2.17944765 -3.22895575 2.29571438 2.63287306 0.46685112 4.42715979 -0.76104468 2.39603662 -3.21783161 -2.8433671 ]] Label: 2
Step: 9997 result: [[ 4.26957560e+00 1.95574760e-03 1.91038296e-01 -8.00723195e-01 -2.36319876e+00 -2.12906289e+00 -3.35138845e+00 7.97132492e-01 6.60009801e-01 2.73786736e+00]] Label: 0
Step: 9998 result: [[ 0.42694128 -2.07150149 0.47749567 2.62247086 1.11608386 3.05186462 -0.19805858 0.03386561 -2.87092948 -2.59781456]] Label: 5
Step: 9999 result: [[ 0.23629765 -3.21540785 1.01075113 0.46802399 3.44423246 0.25743011 4.71304989 -1.12128389 -3.07727337 -2.7076664 ]] Label: 6
2016-04-09 00:32:49.861650 Total:10000 True:8631: precision # 1 = 0.863
I know MonkeyRunner is kind of deprecated, but I still have a weird question.
If I open Settings via touch events from Monkeyrunner Users option is not there,
If I do it as follows same thing.
FLAG_ACTIVITY_NEW_TASK = 0x10000000
package = 'com.android.settings'
activity='.Settings'
runComponent = package + '/' + activity
device.startActivity(component=runComponent, flags=FLAG_ACTIVITY_NEW_TASK)
Does anyone know why, or how to get Users to display or open using MonkeyRunner?
Running Android v6.0.1.
Thanks,
Eugene
You can use AndroidViewClient/culebra to do it.
This is a culebra script slightly modified to check for Users while scrolling, but most of it is generated using Culebra GUI:
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2013-2014 Diego Torres Milano
Created on 2016-01-11 by Culebra v11.0.8
__ __ __ __
/ \ / \ / \ / \
____________________/ __\/ __\/ __\/ __\_____________________________
___________________/ /__/ /__/ /__/ /________________________________
| / \ / \ / \ / \ \___
|/ \_/ \_/ \_/ \ o \
\_____/--<
#author: Diego Torres Milano
#author: Jennifer E. Swofford (ascii art snake)
'''
import re
import sys
import os
import unittest
from com.dtmilano.android.viewclient import ViewClient, CulebraTestCase
TAG = 'CULEBRA'
class CulebraTests(CulebraTestCase):
#classmethod
def setUpClass(cls):
cls.kwargs1 = {'ignoreversioncheck': False, 'verbose': False, 'ignoresecuredevice': False}
cls.kwargs2 = {'forceviewserveruse': False, 'useuiautomatorhelper': False, 'ignoreuiautomatorkilled': True, 'autodump': False, 'startviewserver': True, 'compresseddump': True}
cls.options = {'start-activity': None, 'concertina': False, 'device-art': None, 'use-jar': False, 'multi-device': False, 'unit-test-class': True, 'save-screenshot': None, 'use-dictionary': False, 'glare': False, 'dictionary-keys-from': 'id', 'scale': 0.5, 'find-views-with-content-description': True, 'window': -1, 'orientation-locked': None, 'save-view-screenshots': None, 'find-views-by-id': True, 'log-actions': False, 'use-regexps': False, 'null-back-end': False, 'auto-regexps': None, 'do-not-verify-screen-dump': True, 'verbose-comments': False, 'gui': True, 'find-views-with-text': True, 'prepend-to-sys-path': False, 'install-apk': None, 'drop-shadow': False, 'output': None, 'unit-test-method': None, 'interactive': False}
cls.sleep = 5
def setUp(self):
super(CulebraTests, self).setUp()
def tearDown(self):
super(CulebraTests, self).tearDown()
def preconditions(self):
if not super(CulebraTests, self).preconditions():
return False
return True
def testSomething(self):
if not self.preconditions():
self.fail('Preconditions failed')
_s = CulebraTests.sleep
_v = CulebraTests.verbose
self.vc.dump(window=-1)
self.vc.uiDevice.openQuickSettings()
self.vc.sleep(_s)
self.vc.dump(window=-1)
self.vc.findViewWithContentDescriptionOrRaise(u'''Settings''').touch()
self.vc.sleep(_s)
self.vc.dump(window=-1)
users = None
com_android_settings___id_dashboard = self.vc.findViewByIdOrRaise("com.android.settings:id/dashboard")
com_android_settings___id_dashboard.uiScrollable.flingToBeginning()
attempts = 10
while attempts > 0:
users = self.vc.findViewWithText(u'Users')
if users:
break
com_android_settings___id_dashboard.uiScrollable.flingForward()
self.vc.dump(window=-1)
self.vc.sleep(_s)
com_android_settings___id_dashboard = self.vc.findViewByIdOrRaise("com.android.settings:id/dashboard")
attempts -= 1
if not users:
self.fail("Users not found")
if __name__ == '__main__':
CulebraTests.main()