getting 'error loading layout' while running dash app - machine-learning

I am getting 'error loading layout' while running dash app. I have tried all solution also tried unistalling and again installing dash,plotly,pandas libraries but still getting same error.
terminal:
Serving Flask app "app" (lazy loading)
Environment: production
WARNING: This is a development server. Do not use it in a production deployment.
Use a production WSGI server instead.
Debug mode: on
#code for stock visualization and prediction app
import dash
import dash_core_components as dcc
import dash_html_components as html
from datetime import datetime as dt
from datetime import date
from dash.dependencies import Input, Output, State
from flask.helpers import url_for
import yfinance as yf
import pandas as pd
import plotly.graph_objs as go
import plotly.express as px
app = dash.Dash()
server = app.server
app.layout=html.Div(
className="container",
children=[
#inpot section
html.Div(children=[
html.H1("Welcome to stock dash app!",className="start",style={'color':'#ffffff'}),
html.Div(children=[
html.Div(children=[
dcc.Input(id="stock_code",type='text',placeholder="Enter Stock Code",className="inputbox"),
html.Button('Submit',id='submit_button',className="submit_button",style={'color':'#fffbf5'})
]),
html.Div(
children=[
# Date range picker input
dcc.DatePickerRange(
id='date_range_picker',
min_date_allowed=date(1990,12,31),
initial_visible_month=date(2021,12,3),
end_date=dt.now,
)
]),
html.Div(
className='button',
children=[
# Stock price button
html.Button('Stock Price',id='Stock_price_button',className="stock_p_b",style={'color':'#fffbf5'}),
html.Button('Forcast',id='Forcast_button',className="forecast_b",style={'color':'#fffbf5'}),
]
),
html.Div(children=[
dcc.Input(id="days",type='number',placeholder="Number of Days",className="inputbox"),
html.Button('Indicator',id='Indicator_button',className="indicator_b",style={'color':'#fffbf5'})
]
)
])
],
className='nav'),
html.Div(
[
html.Div(
children=[
html.Img('',id='logo',className='company_logo'),
html.H1('',id='c_name',className='company_name',style={'color':'#064635'})
],
className="header"),
html.Div( #Description
html.P('',id='description', className='c_desc',style={'color':'#064635'}),),
html.Div(
id='stock_graph'
),
html.Div(
id='indicator_graph'),
html.Div(
id='forcast_graph'
)
],
className="content"
)
]
)
#app.callback(
[
Output('logo','children'),
Output('c_name','children'),
Output('description','children')
],
[
Input('stock_code','value'),
],
State(component_id='submit_button',component_property='n_clicks')
)
def update_data(arg1,arg2):
ticker = yf.Ticker(arg1)
inf = ticker.info
df = pd.DataFrame().from_dict(inf, orient="index").T
img_url=ticker.info['logo_url'],
name=ticker.info['shortName'],
des=ticker.info['longBusinessSummary']
return img_url,name,des
if __name__ == '__main__':
app.run_server(debug=True,)

Related

Scrapy Spider not returning any results

I am trying to build a scraper with Scrapy. My overall goal is to scrape the webpages of a website and return a list of links for all downloadable documents of the different pages.
Somehow my code does return only None. I am not sure what the cause for this could be. Thank you for your help in advance. Please note that the robots.txt does not cause this issue.
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from w3lib.url import url_query_cleaner
def processlinks(links):
for link in links:
link.url = url_query_cleaner(link.url)
yield link
class ExampleCrawler(CrawlSpider):
name = 'example'
allowed_domains = ['www.example.com']
start_urls = ["https://example.com/"]
rules = (
Rule(
LinkExtractor(
deny=[
re.escape('https://www.example.com/offsite'),
re.escape('https://www.example.com/whitelist-offsite'),
],
),
process_links=processlinks,
callback='parse_links',
follow=False
),)
def parse_links(self, response):
html = response.body
links = scrapy.Selector(text=html).xpath('//#href').extract()
documents = []
for link in links:
absolute_url = urljoin(response.url, link)
documents.append(absolute_url)
return documents
I expected to receive a list containing all document download links for all webpages of the website. I only got a None value returned. It seems like that parse_links method does not get called.
There were a few logical and technical issues in the code. I have made changes to the code. Below are the details.
Your site was redirecting to another site so you need to update the allowed domains and added www.iana.org to it.
allowed_domains = ['www.example.com', 'www.iana.org']
Secondly, in scrapy, you can't return a list or string it should be a request or team in the form or dictionary. see the last time code.
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from w3lib.url import url_query_cleaner
from urllib.parse import urljoin
import scrapy
def processlinks(links):
for link in links:
link.url = url_query_cleaner(link.url)
yield link
class ExampleCrawler(CrawlSpider):
name = 'example'
allowed_domains = ['www.example.com', 'www.iana.org']
start_urls = ["https://example.com/"]
rules = (
Rule(
LinkExtractor(
deny=[
re.escape('https://www.example.com/offsite'),
re.escape('https://www.example.com/whitelist-offsite'),
],
),
process_links=processlinks,
callback='parse_links',
follow=False
),)
def parse_links(self, response):
html = response.body
links = scrapy.Selector(text=html).xpath('//#href').extract()
documents = []
for link in links:
absolute_url = urljoin(response.url, link)
documents.append(absolute_url)
return {"document": documents}

Django admin page appears to be blank

I wondered if someone could help me resolve as to why my admin page appears to be blank? I am able to log in successfully with my superuser. For example when I go to localhost:8000/admin/ -the page returns with a login form, then when I click 'login' the resulting page is blank. I have already checked that 'django.contrib.admin' is installed in every URLS file. I have registered the models in the django admin.py files. I have also reinstalled Django.
INSTALLED_APPS = [
'HealthcareProject4',
'HealthcareProject4.Users2',
'HealthcareProject4.Admin',
'HealthcareProject4.CellularModelling',
'HealthcareProject4.Conditions',
'HealthcareProject4.Drugs',
'HealthcareProject4.Events',
'HealthcareProject4.Games',
'HealthcareProject4.GetInvolved',
'HealthcareProject4.News',
'HealthcareProject4.Outcomes',
'HealthcareProject4.Population',
'HealthcareProject4.Systems',
'HealthcareProject4.Trusts',
'HealthcareProject4.Logging',
'HealthcareProject4.Messaging',
# Add your apps here to enable them
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Other apps #
#'highcharts'
]
urls.py
app_name = "Conditions"
from datetime import datetime
from django.urls import path
from django.contrib import admin
from django.contrib.auth.views import LoginView, LogoutView
from django.conf.urls import include
from django.conf.urls import url
from HealthcareProject4.Conditions import views
urlpatterns = [
url('^admin/', admin.site.urls),
url('^home/', views.home, name='home'),
url('^asthma/', views.asthma, name='Asthma'),
url('^atrialfibrillation', views.atrialfibrillation, name='AtrialFibrillation'),
url('^bloodpressure/', views.bloodpressure, name='BloodPressure'),
url('^cancer/', views.cancer, name='Cancer'),
url('^cervicalscreening/', views.cervicalscreening, name='CervicalScreening'),
url('^chronicheartdisease/', views.chronicheartdisease, name='ChronicHeartDisease'),
url('^chronickidneydisease/', views.chronickidneydisease, name='ChronicKidneyDisease'),
url('^copd/', views.copd, name='COPD'),
url('^cvd/', views.cvd, name='CVD'),
url('^dementia/', views.dementia, name='Dementia'),
url('^depression/', views.depression, name='Depression'),
url('^diabetes/', views.diabetes, name='Diabetes'),
url('^epilepsy/', views.epilepsy, name='Epilepsy'),
url('^heartfailure/', views.heartfailure, name='HeartFailure'),
url('^hypertension/', views.hypertension, name='Hypertension'),
url('^learningdisability/', views.learningdisability, name='LearningDisability'),
url('^mentalhealth/', views.mentalhealth, name='MentalHealth'),
url('^obesity/', views.obesity, name='Obesity'),
url('^osteoporosis/', views.osteoporosis, name='Osteoporosis'),
url('^palliativecare/', views.palliativecare, name='PalliativeCare'),
url('^rheumatoidarthritis/', views.rheumatoidarthritis, name='RhemuatoidArthritis'),
url('^smoking/', views.smoking, name='Smoking'),
url('^stroke/', views.stroke, name='Stroke'),
]

Mysql 8 - load geojson values using load data infile

How can I import geojson values in a mysql column using load data infile? for example I have a file as follows (simplified for brevity):
{"type":"FeatureCollection", "features": [
{"type":"Feature","geometry":{"type":"Polygon","coordinates":
[
[
[31.287890625000017,-22.40205078125001],
[31.429492187500017,-22.298828125],
[32.37109375,-21.33486328125001]
]
] },"properties":{"NAME":"Zimbabwe"}
},
{"type":"Feature","geometry":{"type":"Polygon","coordinates":
[
[
[30.39609375,-15.64306640625],
[30.3505859375,-15.349707031250006],
[30.231835937500023,-14.990332031250006]
]
]},"properties":{"NAME":"Zambia"}
}
]
}
Currently when I do the following:
LOAD DATA LOCAL INFILE 'C:/Users/Downloads/countries.geojson' INTO TABLE countries (geo_json);
I get error:
Invalid JSON text: "Invalid value." at position 2668 in value for column 'countries.geo_json'
How can I load each feature into a table which has a json column called geo_json. Then I want to extract the name for each feature and add to a name column?

urllib.request.urlopen(url) how to use this function with ip address?

I'm working on Python3 with testing page load times so I created a local apache server for compare but the problem is I use urllib.request.urlopen(url) function which doesn't allow me to use my own ip address. Is there anything that helps me to get page with only ip address. Here's the code I working on;
start_loadf = time.time()
nf = urllib.request.urlopen(url) ##// I want here to be something like 192.168.1.2
page = nf.read()
end_loadf = time.time()
nf.close()
reading_time = format(end_loadf-start_loadf,'.3f')
print("Kaynaktan alinan ilk okuma suresi : ", reading_time , "sn.")
Solved the problem when I look in to urllib literally. Actually what I need is urllib2 but because of I'm using python3.4 I souldn't import urllib it causes python use urllib part not urllib2. After importing urlib.request only and writing the url part as http://192.168.1.2 instead of 192.168.1.2 it works fine.
import urllib.request
import time
import socket
nf = urllib.request.urlopen("http://192.168.1.2")
start_loadf = time.time()
page = nf.read()
end_loadf = time.time()
nf.close()
reading_time = format(end_loadf-start_loadf,'.3f')
print("Kaynaktan alinan ilk okuma suresi : ", reading_time , "sn.")

How to programatically get holdings of an ETF

I am looking for a way to get the holding list of an ETF via a web service such as yahoo finance. So far, YQL has not yielded the desired results.
As an example ZUB.TO is an ETF that has holdings. here is a list of the holdings by querying the yahoo.finance.quotes we do not get the proper information.
The result.
Is there another table somewhere that would contain the holdings?
Perhaps downloading from Yahoo Finance is not working and/or may not work.
Instead how about using the various APIs the ETF providers already have for downloading the Excel or CSV files of the holdings?
Use the "append_df_to_excel" file as file to import, and then use the code below to make Excel file for all the 11 Sector SPDRs provided by SSgA (State Street global Advisors).
Personally I use this for doing breadth analysis.
import pandas as pd
import append_to_excel
# https://stackoverflow.com/questions/20219254/how-to-write-to-an-existing-excel-file-without-overwriting-data-using-pandas
##############################################################################
# Author: Salil Gangal
# Posted on: 08-JUL-2018
# Forum: Stack Overflow
##############################################################################
output_file = 'C:\my_python\SPDR_Holdings.xlsx'
base_url = "http://www.sectorspdr.com/sectorspdr/IDCO.Client.Spdrs.Holdings/Export/ExportExcel?symbol="
data = {
'Ticker' : [ 'XLC','XLY','XLP','XLE','XLF','XLV','XLI','XLB','XLRE','XLK','XLU' ]
, 'Name' : [ 'Communication Services','Consumer Discretionary','Consumer Staples','Energy','Financials','Health Care','Industrials','Materials','Real Estate','Technology','Utilities' ]
}
spdr_df = pd.DataFrame(data)
print(spdr_df)
for i, row in spdr_df.iterrows():
url = base_url + row['Ticker']
df_url = pd.read_excel(url)
header = df_url.iloc[0]
holdings_df = df_url[1:]
holdings_df.set_axis(header, axis='columns', inplace=True)
print("\n\n", row['Ticker'] , "\n")
print(holdings_df)
append_df_to_excel(output_file, holdings_df, sheet_name= row['Ticker'], index=False)
Image of Excel file generated for SPDRs

Resources