Django admin page appears to be blank - django-admin

I wondered if someone could help me resolve as to why my admin page appears to be blank? I am able to log in successfully with my superuser. For example when I go to localhost:8000/admin/ -the page returns with a login form, then when I click 'login' the resulting page is blank. I have already checked that 'django.contrib.admin' is installed in every URLS file. I have registered the models in the django admin.py files. I have also reinstalled Django.
INSTALLED_APPS = [
'HealthcareProject4',
'HealthcareProject4.Users2',
'HealthcareProject4.Admin',
'HealthcareProject4.CellularModelling',
'HealthcareProject4.Conditions',
'HealthcareProject4.Drugs',
'HealthcareProject4.Events',
'HealthcareProject4.Games',
'HealthcareProject4.GetInvolved',
'HealthcareProject4.News',
'HealthcareProject4.Outcomes',
'HealthcareProject4.Population',
'HealthcareProject4.Systems',
'HealthcareProject4.Trusts',
'HealthcareProject4.Logging',
'HealthcareProject4.Messaging',
# Add your apps here to enable them
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Other apps #
#'highcharts'
]
urls.py
app_name = "Conditions"
from datetime import datetime
from django.urls import path
from django.contrib import admin
from django.contrib.auth.views import LoginView, LogoutView
from django.conf.urls import include
from django.conf.urls import url
from HealthcareProject4.Conditions import views
urlpatterns = [
url('^admin/', admin.site.urls),
url('^home/', views.home, name='home'),
url('^asthma/', views.asthma, name='Asthma'),
url('^atrialfibrillation', views.atrialfibrillation, name='AtrialFibrillation'),
url('^bloodpressure/', views.bloodpressure, name='BloodPressure'),
url('^cancer/', views.cancer, name='Cancer'),
url('^cervicalscreening/', views.cervicalscreening, name='CervicalScreening'),
url('^chronicheartdisease/', views.chronicheartdisease, name='ChronicHeartDisease'),
url('^chronickidneydisease/', views.chronickidneydisease, name='ChronicKidneyDisease'),
url('^copd/', views.copd, name='COPD'),
url('^cvd/', views.cvd, name='CVD'),
url('^dementia/', views.dementia, name='Dementia'),
url('^depression/', views.depression, name='Depression'),
url('^diabetes/', views.diabetes, name='Diabetes'),
url('^epilepsy/', views.epilepsy, name='Epilepsy'),
url('^heartfailure/', views.heartfailure, name='HeartFailure'),
url('^hypertension/', views.hypertension, name='Hypertension'),
url('^learningdisability/', views.learningdisability, name='LearningDisability'),
url('^mentalhealth/', views.mentalhealth, name='MentalHealth'),
url('^obesity/', views.obesity, name='Obesity'),
url('^osteoporosis/', views.osteoporosis, name='Osteoporosis'),
url('^palliativecare/', views.palliativecare, name='PalliativeCare'),
url('^rheumatoidarthritis/', views.rheumatoidarthritis, name='RhemuatoidArthritis'),
url('^smoking/', views.smoking, name='Smoking'),
url('^stroke/', views.stroke, name='Stroke'),
]

Related

Scrapy Spider not returning any results

I am trying to build a scraper with Scrapy. My overall goal is to scrape the webpages of a website and return a list of links for all downloadable documents of the different pages.
Somehow my code does return only None. I am not sure what the cause for this could be. Thank you for your help in advance. Please note that the robots.txt does not cause this issue.
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from w3lib.url import url_query_cleaner
def processlinks(links):
for link in links:
link.url = url_query_cleaner(link.url)
yield link
class ExampleCrawler(CrawlSpider):
name = 'example'
allowed_domains = ['www.example.com']
start_urls = ["https://example.com/"]
rules = (
Rule(
LinkExtractor(
deny=[
re.escape('https://www.example.com/offsite'),
re.escape('https://www.example.com/whitelist-offsite'),
],
),
process_links=processlinks,
callback='parse_links',
follow=False
),)
def parse_links(self, response):
html = response.body
links = scrapy.Selector(text=html).xpath('//#href').extract()
documents = []
for link in links:
absolute_url = urljoin(response.url, link)
documents.append(absolute_url)
return documents
I expected to receive a list containing all document download links for all webpages of the website. I only got a None value returned. It seems like that parse_links method does not get called.
There were a few logical and technical issues in the code. I have made changes to the code. Below are the details.
Your site was redirecting to another site so you need to update the allowed domains and added www.iana.org to it.
allowed_domains = ['www.example.com', 'www.iana.org']
Secondly, in scrapy, you can't return a list or string it should be a request or team in the form or dictionary. see the last time code.
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from w3lib.url import url_query_cleaner
from urllib.parse import urljoin
import scrapy
def processlinks(links):
for link in links:
link.url = url_query_cleaner(link.url)
yield link
class ExampleCrawler(CrawlSpider):
name = 'example'
allowed_domains = ['www.example.com', 'www.iana.org']
start_urls = ["https://example.com/"]
rules = (
Rule(
LinkExtractor(
deny=[
re.escape('https://www.example.com/offsite'),
re.escape('https://www.example.com/whitelist-offsite'),
],
),
process_links=processlinks,
callback='parse_links',
follow=False
),)
def parse_links(self, response):
html = response.body
links = scrapy.Selector(text=html).xpath('//#href').extract()
documents = []
for link in links:
absolute_url = urljoin(response.url, link)
documents.append(absolute_url)
return {"document": documents}

beautifulsoup returns only partial urls for some websites

from bs4 import BeautifulSoup, SoupStrainer
import requests
def get_url(url):
page = requests.get(url.format())
data = page.text
soup = BeautifulSoup(data)
for link in soup.find_all('a'):
print(link.get('href'))
so that's the base code and when i request,
# get_url("https://www.marie-claire.es/moda")
get_url("http://spanish.xinhuanet.com/")
xinhua returns
full URLs,
but the other website
does not return the full hyperlinks,
I am not sure why I have this issue and how to solve it.
Has anyone had a similar issue? or has an idea how to solve this?
I suspect that you're looking for urljoin here:
from bs4 import BeautifulSoup, SoupStrainer
import requests
from urllib.parse import urljoin
def get_url(url):
page = requests.get(url.format())
data = page.text
soup = BeautifulSoup(data)
for link in soup.find_all('a'):
print(urljoin(url, link.get('href')))
You might also consider
for link in set(soup.find_all('a')):
to avoid duplicates in your result.

Robots.txt flexibility with top level domains

so the only problem I have left for this web crawler is making it to where when the top level domain changes, say from imdb to youtube, that it will then switch the robots.txt from following the disallow rules of imdb to youtube. I believe that it can all be fixed just with how the variables are declared in the beginning.
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
import re
re.IGNORECASE = True
#SourceUrl
url = "http://www.imdb.com"
urls = [url]
visited =[url]
robotsUrl = url +'/robots.txt'
while len(urls) < 250000:
try:
htmltext = urllib.request.urlopen(urls[0]).read()
robots = urllib.request.urlopen(robotsUrl).read()
disallowList = re.findall(b'Disallow\:\s*([a-zA-Z0-9\*\-\/\_\?\.\%\:\&]+)', robots)
except:
print (urls[0])
sourceCode = BeautifulSoup(htmltext, "html.parser")
urls.pop(0)
print(len(urls))
for link in sourceCode.findAll('a', href=True):
if "http://" not in link['href']:
link['href'] = urllib.parse.urljoin(url,link['href'])
in_disallow = False
for i in range(len(disallowList)):
if (disallowList[i]).upper().decode() in link['href'].upper():
in_disallow = True
break
if not in_disallow:
if link['href'] not in visited:
urls.append(link['href'])
visited.append(link['href'])
print (visited)
As long as the domain names used inside your robots.txt matches the one corresponding to the url to your robots.txt, it is all fine. In other words, you can replace yoursite.imdb to yoursite.youtube in all urls. That's fine.
Update
Say you have a sitemap declared in your robots.txt, then it should have the same tld.
http://www.yoursite.imbd/robots.txt
should contain:
sitemap: http://www.yoursite.imbd/sitemap1.xml (not .youtube)
Otherwise, for directives such as allow or disallow, there is not impact, since the TDL does not appear in the paths.

Jira- creating one issue but have it in two projects

I need to be able to create a Jira issue. but when selecting a project to add the issue too, i need to be able to add it to another project also.
So what i have is the same issue, which can be worked on by both project teams.
I don't need clone or link as this results in two issues being created and needing to be maintained.
Does anyone have any ideas on how i might go about his? Develop new plugin perhaps?
This can be done using the Script Runner add-on, just add a script to the create transition that will create the same issue on the other project.
code for the script (change all id's to your own):
from com.atlassian.jira.util import ImportUtils
from com.atlassian.jira import ManagerFactory
from com.atlassian.jira.issue import MutableIssue
from com.atlassian.jira import ComponentManager
from com.atlassian.jira.issue.link import DefaultIssueLinkManager
from org.ofbiz.core.entity import GenericValue;
# get issue objects
issueManager = ComponentManager.getInstance().getIssueManager()
issueFactory = ComponentManager.getInstance().getIssueFactory()
authenticationContext = ComponentManager.getInstance().getJiraAuthenticationContext()
issueLinkManager = ComponentManager.getInstance().getIssueLinkManager()
customFieldManager = ComponentManager.getInstance().getCustomFieldManager()
userUtil = ComponentManager.getInstance().getUserUtil()
projectMgr = ComponentManager.getInstance().getProjectManager()
customFieldExample = customFieldManager.getCustomFieldObjectByName("custom Field Example")
# define new issue
issueObject = issueFactory.getIssue()
issueObject.setProject(projectMgr.getProject(10000)) # set which project
issueObject.setIssueTypeId("1") # which issue type
# set issue attributes
issueObject.setSummary("[copy from ...] "+issue.getSummary())
issueObject.setAssignee(userUtil.getUserObject("John"))
issueObject.setReporter(issue.getAssignee())
issueObject.setDescription(issue.getDescription())
issueObject.setCustomFieldValue(customFieldExample, issue.getCustomFieldValue(customer_email))
issueObject.setComponents(issue.getComponents())
# Create new issue
newIssue = issueManager.createIssue(authenticationContext.getUser(), issueObject)
# Link parent issue to the new one
issueLinkManager.createIssueLink(issueObject.getId(),issue.getId(),10003,1,authenticationContext.getUser()) # change to your link id
# Update search indexes
ImportUtils.setIndexIssues(True);
ComponentManager.getInstance().getIndexManager().reIndex(newIssue)
ImportUtils.setIndexIssues(False)

Django: How to get yester query set in class-based views?

Django: How to get yester query set in class-based views?
http://ccbv.co.uk/projects/Django/1.4/django.views.generic.dates/TodayArchiveView/
I use TodayArchiveView.
BTW, if TodayArchiveView get None, TodayArchiveView.as_view() raise Http404.
In this situation, I want to push data at yesterday in current model(table)...
What should I do?
Some Code Here:
urls.py
from django.conf.urls import patterns, include, url
from crm.views import *
urlpatterns = patterns('',
(r'^workDailyRecord/$', workDailyRecord),
)
views.py
from django.views.generic import TodayArchiveView
from django.http import Http404
from crm.models import *
def workDailyRecord(request):
if request.method === 'GET':
tView.as_view() # I want call class-based generic views at this line.
elif:
try:
return tView.as_view()(request)
except Http404:
# I want to push data at yesterday in 'WorkDailyRecord' model
pass
class tView(TodayArchiveView):
model = WorkDailyRecord
context_object_name = 'workDailyRecord'
date_field = 'date'
template_name = "workDailyRecord.html"
models.py
from django.db import models
class WorkDailyRecord(models.Model):
date = models.DateTimeField(auto_now_add=True)
contents = models.TextField()

Resources