Use Kivy app while excel file is being built - kivy

So I am trying to create a Kivy app that allows a user to control and monitor various hardware components. Part of the code builds and continuously updates an Excel worksheet that imports temperature readings from the hardware's comm port, along with a time-stamp. I have been able to implement all of this so far, but I am unable to interact with the Kivy app while the Excel worksheet is being built/updated (i.e. while my hardware test is underway), and leaves me unable to use the app's features while the test is running (Such as the 'Pause' or 'Abort' buttons) until the worksheet is no longer being altered. So my question is: Is it possible to export to an Excel file while being able to simultaneously use the Kivy app? And if so, how?
This is part of my code that sets up the Excel worksheet. Thank you in advance!
from kivy.app import App
from openpyxl import Workbook, load_workbook
import time
class HomeScreen(Screen):
def build(self):
return HomeScreen()
def RunExcelFile(self):
wb = Workbook()
ws = wb.active
a = 0
i = 2
while (a < 5):
ws.cell('A1').value = 'Time'
ws.cell('B1').value = 'Batch 1'
ws.cell('C1').value = 'Batch 2'
column = 'A'
row = i
time_cell = column + str(row)
t = time.localtime()
ws.cell(time_cell).value = time.asctime(t)
a = (a + 1)
i = (i + 1)
time.sleep(1)
wb.save("scatter.xlsx")

If you are doing some background job without touching widgets or properties, you can use threading module without problems. Otherwise, you would need to use #mainthread decorator or Clock.
import time
import threading
class HomeScreen(Screen):
def run_excel_file(self):
def job():
for i in xrange(5):
print i
time.sleep(1)
print 'job done'
threading.Thread(target=job).start()

Related

How to Select checkbox in Appium using Python if there is no unique ID/Text/Class name

Elements which i need to click
Element loator
Hi I am trying to click checkbox.
the below is my code
from appium import webdriver
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
desired_cap = dict(
platformName="Android",
platformVersion="11",
deviceName="1234567",
appPackage="io.appium.android.apis",
appActivity="io.appium.android.apis.ApiDemos"
)
driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", desired_cap)
driver.find_element(By.XPATH, "//android.widget.TextView[#text='Accessibility']").click()
driver.find_element(By.XPATH, "//android.widget.TextView[#text='Accessibility Node Querying']").click()
time.sleep(2)
chk= driver.find_elements(By.XPATH, "//android.widget.CheckBox")
for i in chk:
if i == 2:
i.click()
By above code if i give i.click() i am able to click all checkboxes. But if i want to click single check box i am not getting solution.
Select an array of elements and tap on its index
elements = self.driver.find_elements(*locator)
elements[i].click()

Monitor Changing file in python

How to create a program that monitors a file (for example, text, and when you wrote something new to this file, the program should output that something was added to the file, and when, on the contrary, part of the text was deleted from it, it should write that something was deleted
And it should print to the console exactly which words were deleted or added?
Explanation
I use watchdog to follow the file.
On instantiation of the handler, I read the file's size.
When the file is modified, watchdog calls the on_modified function.
When this method is called, I compare the file's current size to its previous size to determine if the change was additive or subtractive.
You have a few other options when it comes to tracking the file. For example, you could also compare:
the number of lines
the number of words
the number of characters
the exact contents of the file
import os
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class EventHandler(FileSystemEventHandler):
def __init__(self, file_path_to_watch):
self.file_path_to_watch = file_path_to_watch
self._file_size = self._read_file_size()
def _read_file_size(self):
return os.path.getsize(self.file_path_to_watch)
def _print_change(self, new_file_size):
if new_file_size > self._file_size:
print('File modified with additions')
elif new_file_size < self._file_size:
print('File modified with deletions')
def on_modified(self, event):
if event.src_path != self.file_path_to_watch:
return
new_file_size = self._read_file_size()
self._print_change(new_file_size)
self._file_size = new_file_size
if __name__ == "__main__":
file_to_watch = '/path/to/watch.txt'
event_handler = EventHandler(file_to_watch)
observer = Observer()
observer.schedule(event_handler, path=file_to_watch, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()

Saving SEC 10-K annual report text to files (trouble with decoding)

I am trying to bulk-download the text visible to the "end-user" from 10-K SEC Edgar reports (don't care about tables) and save it in a text file. I have found the code below on Youtube, however I am facing 2 challenges:
I am not sure if I am capturing all text, and when I print the URL from below, I receive very weird output (special characters e.g., at the very end of the print-out)
I can't seem to save the text in txt files, not sure if this is due to encoding (I am entirely new to programming).
import re
import requests
import unicodedata
from bs4 import BeautifulSoup
def restore_windows_1252_characters(restore_string):
def to_windows_1252(match):
try:
return bytes([ord(match.group(0))]).decode('windows-1252')
except UnicodeDecodeError:
# No character at the corresponding code point: remove it.
return ''
return re.sub(r'[\u0080-\u0099]', to_windows_1252, restore_string)
# define the url to specific html_text file
new_html_text = r"https://www.sec.gov/Archives/edgar/data/796343/0000796343-14-000004.txt"
# grab the response
response = requests.get(new_html_text)
page_soup = BeautifulSoup(response.content,'html5lib')
page_text = page_soup.html.body.get_text(' ',strip = True)
# normalize the text, remove characters. Additionally, restore missing window characters.
page_text_norm = restore_windows_1252_characters(unicodedata.normalize('NFKD', page_text))
# print: this works however gives me weird special characters in the print (e.g., at the very end)
print(page_text_norm)
# save to file: this only gives me an empty text file
with open('testfile.txt','w') as file:
file.write(page_text_norm)
Try this. If you take the data you expect as an example, it will be easier for people to understand your needs.
from simplified_scrapy import SimplifiedDoc,req,utils
url = 'https://www.sec.gov/Archives/edgar/data/796343/0000796343-14-000004.txt'
html = req.get(url)
doc = SimplifiedDoc(html)
# text = doc.body.text
text = doc.body.unescape() # Converting HTML entities
utils.saveFile("testfile.txt",text)

best tool for rendering short items of text in wxPython and Kivi

I have a multi-platform app running on desktop (wxPython) and mobile (kivy). In it I want to render small areas of variable text in a window in the app. The text will depend on the state of the app. I am happy to use rtf, html or reStructuredText. I need to use the same source for the text on each platform.
A typical example of a text snippet would be:
Heading
=======
1. With 24 widgets pull a **long** one;
2. with fewer, push a **wide** one.
Which would render as:
Heading
With 24 widgets pull a long one;
with fewer, push a wide one.
My question is: which format should I use?
My preference would be reStructuredText. There appears to be a kivy widget to support this but nothing in wxPython
One solution is to use the docutils package.
This will take reStructuredText and output it as html. I can then use the wxPython wx.html control to display the output.
import wx
import wx.html as wxhtml
from docutils.core import publish_string
class MainFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.panel = MainPanel(self)
sizer = wx.BoxSizer()
sizer.Add(self.panel)
self.SetSizerAndFit(sizer)
self.Show()
input_string = ("Heading\n"
"=======\n"
"\n"
"1. With 24 widgets pull a **long** one;\n"
"2. with fewer, push a **wide** one.\n")
self.display_rst(input_string)
def display_rst(self, rst):
html = publish_string(rst, writer_name='html')
self.panel.html.SetPage(html)
class MainPanel(wx.Panel):
def __init__(self, parent, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
display_style = wx.VSCROLL|wx.HSCROLL|wx.TE_READONLY|wx.BORDER_SIMPLE
self.html = wxhtml.HtmlWindow(self, -1, size=(300, 200),
style=display_style)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.html)
self.SetSizer(sizer)
if __name__ == "__main__":
SCREEN_APP = wx.App()
MAIN_FRAME = MainFrame(None, title="Display HTML")
SCREEN_APP.MainLoop()

zipline backtesting using non-US (European) intraday data

I'm trying to get zipline working with non-US, intraday data, that I've loaded into a pandas DataFrame:
BARC HSBA LLOY STAN
Date
2014-07-01 08:30:00 321.250 894.55 112.105 1777.25
2014-07-01 08:32:00 321.150 894.70 112.095 1777.00
2014-07-01 08:34:00 321.075 894.80 112.140 1776.50
2014-07-01 08:36:00 321.725 894.80 112.255 1777.00
2014-07-01 08:38:00 321.675 894.70 112.290 1777.00
I've followed moving-averages tutorial here, replacing "AAPL" with my own symbol code, and the historical calls with "1m" data instead of "1d".
Then I do the final call using algo_obj.run(DataFrameSource(mydf)), where mydf is the dataframe above.
However there are all sorts of problems arising related to TradingEnvironment. According to the source code:
# This module maintains a global variable, environment, which is
# subsequently referenced directly by zipline financial
# components. To set the environment, you can set the property on
# the module directly:
# from zipline.finance import trading
# trading.environment = TradingEnvironment()
#
# or if you want to switch the environment for a limited context
# you can use a TradingEnvironment in a with clause:
# lse = TradingEnvironment(bm_index="^FTSE", exchange_tz="Europe/London")
# with lse:
# the code here will have lse as the global trading.environment
# algo.run(start, end)
However, using the context doesn't seem to fully work. I still get errors, for example stating that my timestamps are before the market open (and indeed, looking at trading.environment.open_and_close the times are for the US market.
My question is, has anybody managed to use zipline with non-US, intra-day data? Could you point me to a resource and ideally example code on how to do this?
n.b. I've seen there are some tests on github that seem related to the trading calendars (tradincalendar_lse.py, tradingcalendar_tse.py , etc) - but this appears to only handle data at the daily level. I would need to fix:
open/close times
reference data for the benchmark
and probably more ...
I've got this working after fiddling around with the tutorial notebook. Code sample below. It's using the DF mid, as described in the original question. A few points bear mentioning:
Trading Calendar I create one manually and assign to trading.environment, by using non_working_days in tradingcalendar_lse.py. Alternatively you could create one that fits your data exactly (however could be a problem for out-of-sample data). There are two fields that you need to define: trading_days and open_and_closes.
sim_params There is a problem with the default start/end values because they aren't timezone aware. So you must create a sim_params object and pass start/end parameters with a timezone.
Also, run() must be called with the argument overwrite_sim_params=False as calculate_first_open/close raise timestamp errors.
I should mention that it's also possible to pass pandas Panel data, with fields open,high,low,close,price and volume in the minor_axis. But in this case, the former fields are mandatory - otherwise errors are raised.
Note that this code only produces a daily summary of the performance. I'm sure there must be a way to get the result at a minute resolution (I thought this was set by emission_rate, but apparently it's not). If anybody knows please comment and I'll update the code.
Also, not sure what the api call is to call 'analyze' (i.e. when using %%zipline magic in IPython, as in the tutorial, the analyze() method gets automatically called. How do I do this manually?)
import pytz
from datetime import datetime
from zipline.algorithm import TradingAlgorithm
from zipline.utils import tradingcalendar
from zipline.utils import tradingcalendar_lse
from zipline.finance.trading import TradingEnvironment
from zipline.api import order_target, record, symbol, history, add_history
from zipline.finance import trading
def initialize(context):
# Register 2 histories that track daily prices,
# one with a 100 window and one with a 300 day window
add_history(10, '1m', 'price')
add_history(30, '1m', 'price')
context.i = 0
def handle_data(context, data):
# Skip first 30 mins to get full windows
context.i += 1
if context.i < 30:
return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
short_mavg = history(10, '1m', 'price').mean()
long_mavg = history(30, '1m', 'price').mean()
sym = symbol('BARC')
# Trading logic
if short_mavg[sym] > long_mavg[sym]:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
order_target(sym, 100)
elif short_mavg[sym] < long_mavg[sym]:
order_target(sym, 0)
# Save values for later inspection
record(BARC=data[sym].price,
short_mavg=short_mavg[sym],
long_mavg=long_mavg[sym])
def analyze(context,perf) :
perf["pnl"].plot(title="Strategy P&L")
# Create algorithm object passing in initialize and
# handle_data functions
# This is needed to handle the correct calendar. Assume that market data has the right index for tradeable days.
# Passing in env_trading_calendar=tradingcalendar_lse doesn't appear to work, as it doesn't implement open_and_closes
from zipline.utils import tradingcalendar_lse
trading.environment = TradingEnvironment(bm_symbol='^FTSE', exchange_tz='Europe/London')
#trading.environment.trading_days = mid.index.normalize().unique()
trading.environment.trading_days = pd.date_range(start=mid.index.normalize()[0],
end=mid.index.normalize()[-1],
freq=pd.tseries.offsets.CDay(holidays=tradingcalendar_lse.non_trading_days))
trading.environment.open_and_closes = pd.DataFrame(index=trading.environment.trading_days,columns=["market_open","market_close"])
trading.environment.open_and_closes.market_open = (trading.environment.open_and_closes.index + pd.to_timedelta(60*7,unit="T")).to_pydatetime()
trading.environment.open_and_closes.market_close = (trading.environment.open_and_closes.index + pd.to_timedelta(60*15+30,unit="T")).to_pydatetime()
from zipline.utils.factory import create_simulation_parameters
sim_params = create_simulation_parameters(
start = pd.to_datetime("2014-07-01 08:30:00").tz_localize("Europe/London").tz_convert("UTC"), #Bug in code doesn't set tz if these are not specified (finance/trading.py:SimulationParameters.calculate_first_open[close])
end = pd.to_datetime("2014-07-24 16:30:00").tz_localize("Europe/London").tz_convert("UTC"),
data_frequency = "minute",
emission_rate = "minute",
sids = ["BARC"])
algo_obj = TradingAlgorithm(initialize=initialize,
handle_data=handle_data,
sim_params=sim_params)
# Run algorithm
perf_manual = algo_obj.run(mid,overwrite_sim_params=False) # overwrite == True calls calculate_first_open[close] (see above)
#Luciano
You can add analyze(None, perf_manual)at the end of your code for automatically running the analyze process.

Resources