I get a encrypted base64 string from python. The format is AES 256 CBC. But when I try to decrypt using iOS Swift it return decrypted string as nil.
# coding=utf-8
import base64
from random import choice
from string import letters
try:
from Crypto import Random
from Crypto.Cipher import AES
except ImportError:
import crypto
import sys
sys.modules['Crypto'] = crypto
from crypto.Cipher import AES
from crypto import Random
class AESCipher(object):
def __init__(self, key):
self.bs = 32
self.key = key
def encrypt(self, raw):
_raw = raw
raw = self._pad(raw)
print raw, ';'
print _raw, ';'
iv = "".join([choice(letters[:26]) for i in xrange(16)])
print " iv :", iv
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
def _pad(self, s):
a = (self.bs - len(s) % self.bs)
b = chr(self.bs - len(s) % self.bs)
return s + a * b
#staticmethod
def _unpad(s):
return s[:-ord(s[len(s) - 1:])]
def encrypt(k, t):
o = AESCipher(k)
return o.encrypt(t)
def decrypt(k, t):
o = AESCipher(k)
return o.decrypt(t)
def main():
k = "qwertyuiopasdfghjklzxcvbnmqwerty"
s1 = "Hello World!"
d2 = encrypt(k, s1)
print " Password :", k
print "Encrypted :", d2
print " Plain :", decrypt(k, d2)
if __name__ == '__main__':
main()
iOS
Here I use AES256CBC Lib https://github.com/SwiftyBeaver/AES256CBC
let decrypted = AES256CBC.decryptString("Ymdqc3ZqdmZ1cXdsZG1sZenhgr4Xt0+ceARYRh1n40QkNDV/dyKbQjYLcbiXBBeO", password: "qwertyuiopasdfghjklzxcvbnmqwerty")
print("decrypted: \(String(describing: decrypted))") // here I get nil
when python run I get this Logs
iv : bgjsvjvfuqwldmle
Password : qwertyuiopasdfghjklzxcvbnmqwerty
Encrypted : Ymdqc3ZqdmZ1cXdsZG1sZenhgr4Xt0+ceARYRh1n40QkNDV/dyKbQjYLcbiXBBeO
Plain : Hello World!
I don't know why python and iOS not same in AES. Anyone solve this issue please put answer below. Thanks in advance.
I think you need to decode your base64 string first.
Maybe this will work (just googled it, I'm not a iOS dev, so I'm sorry to eventual errors)
let decodedData = NSData("Ymdqc3ZqdmZ1cXdsZG1sZenhgr4Xt0+ceARYRh1n40QkNDV/dyKbQjYLcbiXBBeO": base64String, options:NSDataBase64DecodingOptions.fromRaw(0)!)
let decodedString = NSString(data: decodedData, encoding: NSUTF8StringEncoding)
let decrypted = AES256CBC.decryptString(decodedString, password: "qwertyuiopasdfghjklzxcvbnmqwerty")
print("decrypted: \(String(describing: decrypted))")
It's also stated in the documentation
Related
I was trying to deploy ml model using node_js with help of ChildProcess package ,while running __predict(), it is taking too long and end with code_1 error.
Here I share all related code to decode the issue :
Model python code -->
import keras
import time
start = time.time()
encoder = keras.models.load_model('enc', compile = False)
decoder = keras.models.load_model('dec', compile = False)
import numpy as np
from flask import Flask, request, jsonify , render_template
import tensorflow as tf
import pickle
import string
import re
from keras_preprocessing.sequence import pad_sequences
def initialize_hidden_state():
return tf.zeros((1, 1024))
eng_tokenizer , hin_tokenizer = pickle.load( open('tokenizer.pkl','rb'))
def clean(text):
text = text.lower()
special_char = set(string.punctuation+'।') # Set of all special characters
# Remove all the special characters
text = ''.join(word for word in text if word not in special_char)
seq = eng_tokenizer.texts_to_sequences([text])
seq = pad_sequences(seq, maxlen=23, padding='post')
return seq
def __predict(data):
# Get the data from the POST request.
#data = request.get_json(force=True)
clean_input = clean(data)
# Make prediction using model loaded from disk as per the data.
hidden_enc = initialize_hidden_state()
enc_out, enc_hidden = encoder(clean_input, hidden_enc)
result = ''
dec_hidden = enc_hidden
dec_input = tf.expand_dims(hin_tokenizer.texts_to_sequences(['<Start>'])[0], 0)
#------------------------------------------------------------------
for t in range(25):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
predicted_id = tf.argmax(predictions[0]).numpy()
x = hin_tokenizer.sequences_to_texts([[predicted_id]])[0]
if x == 'end':
break
result += x + ' '
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
CLEANR = re.compile(r"([A-Za-z])", re.DOTALL)
result = re.sub(CLEANR, '', result)
return result
# import json
# with open('data.json', 'r') as openfile:
# json_object = json.load(openfile).get('data')
data =__predict("file")
end= time.time()
# print(start-end)
data1 = data +"abcd"
print(data1)
# print("abcd")
# dictionary = {
# "data": data,
# }
# json_object = json.dumps(dictionary, indent=2)
# with open("result.json", "w") as outfile:
# outfile.write(json_object)
When I type print("abcd") or print(start-end), it is giving result ,ending with code_0. But when I type print("data") not giving any result and ending with code_1 .
Here is the childProcess code -->
app.get('/', (req, res) => {
let dataToSend
let largeDataSet = []
// spawn new child process to call the python script
const python = spawn('python', ['app.py'])
// console.log(python);
// collect data from script
python.stdout.on('data', function (data) {
console.log('Pipe data from python script ...')
//dataToSend = data;
largeDataSet.push(data)
})
// in close event we are sure that stream is from child process is closed
python.on('close', (code) => {
console.log(`child process close all stdio with code ${code}`)
// send data to browser
// largeDataSet = []
console.log(largeDataSet.join(''));
res.send(largeDataSet.join(''))
})
})
Here is the error --->
child process close all stdio with code 1
Pls help , I tried to understand the problem but failed severely even in understanding it.
Thanks in advance !!!
I use Baidu loT Core. I use two devices as clients, a database(TSDB) as server. The target function:One client send a image to database , then data bese transmits to another client. I get help from How can I publish a file using Mosquitto in python?
but it still doesn't work.
send image
import paho.mqtt.client as mqtt
import json
import cv2
HOST = '************'
PORT = 1883
client_id = '************'
username = '***********'
password = '******************'
topic = '******'
# obj = userdata
# mqttc = client
def on_connect(mqttc, obj, flags, rc):
print("rc: " + str(rc))
def on_message(mqttc, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload.decode('utf-8')))
def on_publish(mqttc, obj, mid):
print("mid: " + str(mid))
def on_subscribe(mqttc, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_log(mqttc, obj, level, string):
print(string)
def on_disconnect(mqttc, obj, rc):
print("unsuccess connect %s" % rc)
mqttc = mqtt.Client(client_id)
mqttc.username_pw_set(username, password) # thanks correction. I found I forget to connect broker.But the question is still
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
mqttc.on_disconnect = on_disconnect
# Uncomment to enable debug messages
mqttc.on_log = on_log
mqttc.connect(HOST, PORT, 60)
image = 'E:/_TempPhoto/0.jpg'
print(type(image))
def imageToStr(image):
with open(image, 'rb') as f:
image_byte = base64.b64encode(f.read())
print(type(image_byte))
image_str = image_byte.decode('ascii') # byte to str
print(type(image_str))
return image_str
image1 = imageToStr(image)
data = {
"engineeringdata": {
"date": 12,
"value": "59.3;98.5",
"image": image1
}
}
json_mod = json.dumps(data)
mqttc.publish(topic, json_mod, 0)
mqttc.loop_forever()
receive image
import paho.mqtt.client as mqtt
import cv2
import numpy as np
import json
HOST = '*********'
PORT = 1883
client_id = '************'
username = '*****************'
password = '******************'
topic = '***************'
def on_connect(client, userdata, flags, rc):
print("Connected with result code: " + str(rc))
def strToImage(str,filename):
image_str= str.encode('ascii')
image_byte = base64.b64decode(image_str)
image_json = open(filename, 'wb')
image_json.write(image_byte) #将图片存到当前文件的fileimage文件中
image_json.close()
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
strToImage(str(msg.payload), 'E:/_TempPhoto/restore001.jpg')
client = mqtt.Client(client_id)
client.username_pw_set(username, password)
client.on_connect = on_connect
client.connect(HOST, PORT, 60)
client.subscribe(topic, 0)
client.on_message = on_message
client.loop_forever()
# while True:
# client.loop(15)
# time.sleep(2)
This is my send image 's message and log. I moit a part of printed data of image by '......'
Sending CONNECT (u1, p1, wr0, wq0, wf0, c1, k60) client_id=b'client_for_test'
<class 'str'>
<class 'bytes'>
<class 'str'>
Sending PUBLISH (d0, q0, r0, m1), 'b'$iot/client_for_test/user/fortest'', ... (102800 bytes)
mid: 1
Received CONNACK (0, 0)
rc: 0
Sending PINGREQ
Received PINGRESP
Sending PINGREQ
Received PINGRESP
this is receive image 's printed message:(seems right)
Connected with result code: 0
My error case is 5. And I didn't recvice a image. But it seemed that send successfully.
Maybe it's because the send image send once and end, but the receive image can't receive rightly.
I just need the most simple codes of two clients to send and receive the image through mqtt,please!
Give me a hand!
Thanks for help. The previous problems have been solved. But there has new trouble. I edit the new codes, I couldn't receive the image.
please, Correct my code.
you can encode the image to base64 before publishing it so the topic's content will be a string, example:
import base64
with open("t.png", "rb") as imageFile:
str = base64.b64encode(imageFile.read())
print str
Publisher.py
import paho.mqtt.publish as publish
f= open("myimage.jpg")
content = f.read()
mybyteArray = bytearray(content)
mqttc.publish(topic, mybyteArray , 1)
Receiver.py
def on_message(client, userdata, msg):
f = open('myReceivedImage.jpg','w')
f.write(msg.payload)
f.close()
Using google-cloud-dataflow/Cloud Composer for CSV to Avro and everything works on my local environment. When trying to read the .avsc file that contains the Avro schema from a Cloud Storage bucket, I keep getting:
IOError: [Errno 2] No such file or directory:'gs://my-bucket/xxx.avsc'
Code:
from __future__ import absolute_import
import argparse
import logging
import ntpath
import avro.schema
import apache_beam as beam
from apache_beam.options import pipeline_options
from apache_beam.options.pipeline_options import SetupOptions
from datetime import datetime
class RowTransformer(object):
def __init__(self, delimiter, header, filename):
self.delimiter = delimiter
self.keys = re.split(',', header)
self.filename = filename
def parse(self, row):
self.load_dt = datetime.utcnow()
split_row = row.split(self.delimiter)
#Need to cast anything that is not a string into proper type
split_row[8] = float('0' if not split_row[8] else split_row[8])
split_row[9] = float('0' if not split_row[9] else split_row[9])
split_row[10] = float('0' if not split_row[10] else split_row[10])
split_row[11] = float('0' if not split_row[11] else split_row[11])
split_row[12] = float('0' if not split_row[12] else split_row[12])
split_row[13] = float('0' if not split_row[13] else split_row[13])
split_row[14] = float('0' if not split_row[14] else split_row[14])
split_row[15] = float('0' if not split_row[15] else split_row[15])
split_row[16] = float('0' if not split_row[16] else split_row[16])
split_row[17] = float('0' if not split_row[17] else split_row[17])
split_row[18] = str('0' if not split_row[18] else split_row[18])
split_row[19] = str('0' if not split_row[19] else split_row[19])
split_row.append(self.filename)
split_row.append(self.load_dt.strftime('%Y-%m-%d %H:%M:%S.%f'))
decode_row = [i.decode('UTF-8') if isinstance(i, basestring) else i for i in split_row]
row = dict(zip(self.keys, decode_row))
return row
def run(argv=None):
"""The main function which creates the pipeline and runs it."""
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input', required=False,
help='Input file to read. This can be a local file or '
'a file in a Google Storage Bucket.',
default='gs://my-bucket/receive/xxx.txt')
parser.add_argument('--output', dest='output', required=False,
help='Output Avro to Cloud Storage',
default='gs://my-bucket/')
parser.add_argument('--schema', dest='schema', required=False,
help='Avro Schema',
default='gs://my-bucket/xxx.avsc')
parser.add_argument('--delimiter', dest='delimiter', required=False,
help='Delimiter to split input records.',
default='|')
parser.add_argument('--fields', dest='fields', required=False,
help='list of field names expected',
default='Col1,Col2...etc')
known_args, pipeline_args = parser.parse_known_args(argv)
row_transformer = RowTransformer(delimiter=known_args.delimiter,
header=known_args.fields,
filename=ntpath.basename(known_args.input))
p_opts = pipeline_options.PipelineOptions(pipeline_args)
with beam.Pipeline(options=p_opts) as pipeline:
schema_file = avro.schema.parse(open(known_args.schema, "rb").read())
rows = pipeline | "Read from text file" >> beam.io.ReadFromText(known_args.input, skip_header_lines=1)
dict_records = rows | "Convert to Avro" >> beam.Map(lambda r: row_transformer.parse(r))
dict_records | "Write to Cloud Storage as Avro" >> beam.io.avroio.WriteToAvro(known_args.output,schema=schema_file)
run()
You need to use the apache_beam.io.gcp.gcsio class instead of beam.io.ReadFromText which will only read local files, https://beam.apache.org/documentation/sdks/pydoc/2.6.0/apache_beam.io.gcp.gcsio.html
I am trying to extract twitter data using rest API in zeppelin. Tried both option registerAsTable and registerTempTable, both ways are not working. Please help me to resolve the error. Getting below error while executing zeppelin Tutorial Code:
error: value registerAsTable is not a member of org.apache.spark.rdd.RDD[Tweet] ).foreachRDD(rdd=> rdd.registerAsTable("tweets")
RDD cannot be registered as Table whereas dataframe can. You can convert your RDD into dataframe and then write the resulting dataframe as tempTable or table.
You can convert RDD into Dataframe as below
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
rdd.toDF()
Refer How to convert rdd object to dataframe in spark and http://spark.apache.org/docs/latest/sql-programming-guide.html
in zepplin interpretors add external dependency of org.apache.bahir:spark-streaming-twitter_2.11:2.0.0 from GUI and after that run following using spark-2.0.1
import org.apache.spark._
import org.apache.spark.streaming._
import org.apache.spark.streaming.StreamingContext._
import org.apache.spark.{ SparkConf, SparkContext}
import org.apache.spark.storage.StorageLevel
import scala.io.Source
//import org.apache.spark.Logging
import java.io.File
import org.apache.log4j.Logger
import org.apache.log4j.Level
import sys.process.stringSeqToProcess
import scala.collection.mutable.HashMap
/** Configures the Oauth Credentials for accessing Twitter */
def configureTwitterCredentials(apiKey: String, apiSecret: String, accessToken: String, accessTokenSecret: String) {
val configs = new HashMap[String, String] ++= Seq(
"apiKey" -> apiKey, "apiSecret" -> apiSecret, "accessToken" -> accessToken, "accessTokenSecret" -> accessTokenSecret)
println("Configuring Twitter OAuth")
configs.foreach{ case(key, value) =>
if (value.trim.isEmpty) {
throw new Exception("Error setting authentication - value for " + key + " not set")
}
val fullKey = "twitter4j.oauth." + key.replace("api", "consumer")
System.setProperty(fullKey, value.trim)
println("\tProperty " + fullKey + " set as [" + value.trim + "]")
}
println()
}
// Configure Twitter credentials , following config values will not work,it is for show off
val apiKey = "7AVLnhssAqumpgY6JtMa59w6Tr"
val apiSecret = "kRLstZgz0BYazK6nqfMkPvtJas7LEqF6IlCp9YB1m3pIvvxrRZl"
val accessToken = "79438845v6038203392-CH8jDX7iUSj9xmQRLpHqLzgvlLHLSdQ"
val accessTokenSecret = "OXUpYu5YZrlHnjSacnGJMFkgiZgi4KwZsMzTwA0ALui365"
configureTwitterCredentials(apiKey, apiSecret, accessToken, accessTokenSecret)
import org.apache.spark.{ SparkConf, SparkContext}
import org.apache.spark.streaming._
import org.apache.spark.streaming.twitter._
import org.apache.spark.SparkContext._
val ssc = new StreamingContext(sc, Seconds(2))
val tweets = TwitterUtils.createStream(ssc, None)
val twt = tweets.window(Seconds(10))
//twt.print
val sqlContext= new org.apache.spark.sql.SQLContext(sc)
import sqlContext.implicits._
case class Tweet(createdAt:Long, text:String)
val tweet = twt.map(status=>
Tweet(status.getCreatedAt().getTime()/1000, status.getText())
)
tweet.foreachRDD(rdd=>rdd.toDF.registerTempTable("tweets"))
ssc.start()
//ssc.stop()
After that run some queries in the table in another zappelin cell
%sql select createdAt, text from tweets limit 50
val data = sc.textFile("/FileStore/tables/uy43p2971496606385819/testweet.json");
//convert RDD to DF
val inputs= data.toDF();
inputs.createOrReplaceTempView("tweets");
How do I write the output of my code to a csv?
Here is what I'm trying, the frequency analysis works, but I can't get the csv to write. Pretty new to python, so I am sure that I am doing something wrong.
# This Python file uses the following encoding: utf-8
import os, sys
import re
import csv
filename = 'TweetsCSV_ORIGINAL.txt'
word_list = re.split('\s+', file(filename).read().lower())
print 'Words in text:', len(word_list)
freq_dic = {}
punctuation = re.compile(r'[.?!,":;]')
for word in word_list:
word = punctuation.sub("", word)
try:
freq_dic[word] += 1
except:
freq_dic[word] = 1
print 'Unique words:', len(freq_dic)
freq_list = freq_dic.items()
freq_list.sort()
for word, freq in freq_list:
print word, freq
#write to CSV
res = [word, freq]
csvfile = "tweetfreq.csv"
#Assuming res is a flat list
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in res:
writer.writerow([val])
This snippet will append a line to the end of your CSV file.
with open('tweetfreq.csv', 'a') as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow([word,freq])