ObjectModel accessing incoming relations - neo4j

I have two nodes A and B. They have a directed relation from A to B.
Thus, A has a ConnectedTo attributed of type RelatedTo. However, I want to iterate over all B nodes and access the incoming relations from A.
How can I do this?
I tried adding a ConnectedTo attribute of type RelatedFrom to B but when querying the graph I get a ValueError('Invalid Identifier').
class A(GraphObject):
__primarykey__ = "hash"
hash = Property()
ConnectedTo = RelatedTo('B')
def __init__(self, hash):
self.hash = hash
class B(GraphObject):
__primarykey__ = "hash"
hash = Property()
ConnectedTo = RelatedFrom('A')
def __init__(self, hash):
self.hash = hash
>>> a = A("testA")
>>> b = B("testB")
>>> a.ConnectedTo.add(b)
>>> graph.push(a)
>>> graph.push(b)
>>> test = B.select(graph).first()
Results in error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/py2neo/ogm.py", line 442, in first
return self._object_class.wrap(super(GraphObjectSelection, self).first())
File "/usr/local/lib/python2.7/dist-packages/py2neo/ogm.py", line 344, in wrap
_ = getattr(inst, attr)
File "/usr/local/lib/python2.7/dist-packages/py2neo/ogm.py", line 90, in __get__
related[key] = RelatedObjects(cog.node, self.direction, self.relationship_type, self.related_class)
File "/usr/local/lib/python2.7/dist-packages/py2neo/ogm.py", line 135, in __init__
self.__relationship_pattern = "(a)<-[_:%s]-(b)" % cypher_escape(relationship_type)
File "/usr/local/lib/python2.7/dist-packages/py2neo/database/cypher.py", line 221, in cypher_escape
writer.write_identifier(identifier)
File "/usr/local/lib/python2.7/dist-packages/py2neo/database/cypher.py", line 78, in write_identifier
raise ValueError("Invalid identifier")
ValueError: Invalid identifier

The solution was easier than expected:
class TestA(GraphObject):
__primarykey__ = "hash"
hash = Property()
CONNECTEDTO = RelatedTo('TestB')
def __init__(self, hash):
self.hash = hash
class TestB(GraphObject):
__primarykey__ = "hash"
hash = Property()
CONNECTEDTO = RelatedFrom('TestA', "CONNECTEDTO")
def __init__(self, hash):
self.hash = hash
>>> a = A("testA")
>>> b = B("testB")
>>> a.ConnectedTo.add(b)
>>> graph.push(a)
>>> graph.push(b)
>>> test = B.select(graph).first()
>>> list(test.CONNECTEDTO)
[ TestA ]
The important part is RelatedFrom('TestA','CONNECTEDTO'). You have to specify what the incoming connection is called.

Related

blank() got an unexpected keyword argument 'disable' for TextLMDataBunch

I changed the version of Fastai to 1.0.60 to support the functions TextLMDataBunch and
TextClasDataBunch
# Language model data
data_lm = TextLMDataBunch.from_df(train_df = df_trn, valid_df = df_val, path = "")
# Classifier model data
data_clas = TextClasDataBunch.from_df(train_df = df_trn, valid_df = df_val, path = "", vocab=data_lm.train_ds.vocab, bs=32)
and i am getting this error when i run the code:
Traceback (most recent call last):
File "/usr/lib/python3.8/concurrent/futures/process.py", line 239, in \_process_worker
r = call_item.fn(\*call_item.args, \*\*call_item.kwargs)
File "/usr/lib/python3.8/concurrent/futures/process.py", line 198, in \_process_chunk
return \[fn(\*args) for args in chunk\]
File "/usr/lib/python3.8/concurrent/futures/process.py", line 198, in \<listcomp\>
return \[fn(\*args) for args in chunk\]
File "/usr/local/lib/python3.8/dist-packages/fastai/text/transform.py", line 112, in \_process_all_1
tok = self.tok_func(self.lang)
File "/usr/local/lib/python3.8/dist-packages/fastai/text/transform.py", line 25, in __init__
self.tok = spacy.blank(lang, disable=\["parser","tagger","ner"\])
TypeError: blank() got an unexpected keyword argument 'disable'
"""
The above exception was the direct cause of the following exception:
TypeError Traceback (most recent call last)
\<ipython-input-12-6aea115a7181\> in \<module\>
1 # Language model data
\----\> 2 data_lm = TextLMDataBunch.from_df(train_df = df_trn, valid_df = df_val, path = "")
3 # Classifier model data
4 data_clas = TextClasDataBunch.from_df(train_df = df_trn, valid_df = df_val, path = "", vocab=data_lm.train_ds.vocab, bs=32)
15 frames
/usr/local/lib/python3.8/dist-packages/fastai/text/transform.py in __init__()
23 "Wrapper around a spacy tokenizer to make it a `BaseTokenizer`."
24 def __init__(self, lang:str):
\---\> 25 self.tok = spacy.blank(lang, disable=\["parser","tagger","ner"\])
26
27 def tokenizer(self, t:str) -\> List\[str\]:
TypeError: blank() got an unexpected keyword argument 'disable'
the version cannot be updated as if it were updated,the the functions TextLMDataBunch and
TextClasDataBunch are not supported.
What modifications should i do to this code?

ValueError Uploading Multiple FIles to Model with M2M Field

I have a Django 3.2, python 3.6 website. I am having issues uploading multiple files to a model that also has a M2M field. I get an error in the save_related method at the indicated line:
ValueError: "<Image: title>" needs to have a value for field "image_id" before this many-to-many relationship can be used.
I have used this same method to upload multiple files to models without an M2M field, so I am not sure where I am going wrong.
models.py
class Tags(models.Model):
tag_id = models.AutoField(primary_key=True)
tag_name = models.CharField(max_length=255)
class Image(models.Model):
image_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=255)
description = models.TextField()
original_image = models.ImageField('original_image', upload_to=settings.ORIGINAL_IMAGE_PATH,)
exif_data = models.JSONField(default=dict)
computed_sha256 = models.CharField(editable=False, max_length=64, default="foobar")
tags = models.ManyToManyField(Tags, blank=True)
admin.py
class ImageForm(forms.ModelForm):
original_image = forms.ImageField(widget=forms.FileInput(attrs={'multiple': True}))
class ImageAdmin(admin.ModelAdmin):
form = ImageForm
class Meta:
model = Image
fields = '__all__'
def save_related(self, request, form, *args, **kwargs):
tags = form.cleaned_data.pop('tags', ())
image = form.instance
for tag in tags:
image.tags.add(tag) # error occurs here
super(ImageAdmin, self).save_related(request, form, *args, **kwargs)
def save_model(self, request, obj, form, change):
if form.is_valid():
if not change:
# Uploading one or more images))
files = request.FILES.getlist('original_image')
for f in files:
image = Image()
if "Title" not in form.cleaned_data:
form.cleaned_data['Title'] = clean_title(f.name)
image.computed_sha256 = image_processing_utils.compute_sha256(f)
image.original_image = f
image.description = form.cleaned_data['description']
image.exif_data = image_processing_utils.read_exif_data(f)
image.save()
else:
pass
I could not find a way to upload multiple files to a model with a M2M field, so I punted and took the M2M field out of the model.
models.py
class Image(models.Model):
image_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=255, blank=True)
description = models.TextField()
original_image = models.FileField('original_image', upload_to=settings.ORIGINAL_IMAGE_PATH,)
exif_data = models.JSONField(default=dict)
computed_sha256 = models.CharField(editable=False, max_length=64, default="foobar")
def __str__(self):
return self.title
class Meta:
db_table = 'Image'
class ImageTags(models.Model):
image_id = models.ForeignKey(Image, on_delete=models.CASCADE)
tag_id = models.ForeignKey(Tags, on_delete=models.CASCADE)
class Meta:
db_table = 'ImageTags'
admin.py
class ImageAdminForm(forms.ModelForm):
original_image = forms.ImageField(widget=forms.ClearableFileInput(attrs={'multiple': True}))
def __init__(self, *args, **kwargs):
super(ImageAdminForm, self).__init__(*args, **kwargs)
tag_choices = Tags.objects.values_list('tag_id', 'tag_name')
self.fields['tags'] = forms.MultipleChoiceField(choices=tag_choices, widget=forms.SelectMultiple, required=False)
class ImageAdmin(admin.ModelAdmin):
list_display = ('image_id', 'title', 'description', 'views', 'original_image', 'get_tags', 'exif_data', 'created', 'updated')
readonly_fields = ('thumb_image', 'album_image', 'display_image', 'exif_data', 'views', )
form = ImageAdminForm
class Meta:
model = Image
fields = '__all__'
fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('title', 'description', 'original_image',)
}),
)
def get_form(self, request, obj=None, **kwargs):
# https://stackoverflow.com/questions/1057252/how-do-i-access-the-request-object-or-any-other-variable-in-a-forms-clean-met
logger.debug("get_form START")
kwargs['fields'] = flatten_fieldsets(self.fieldsets)
form = super(ImageAdmin, self).get_form(request, obj, **kwargs)
form.request_obj = request
logger.debug("get_form END")
return form
def get_fieldsets(self, request, obj=None):
logger.debug("get_fieldsets START")
import copy
fieldsets = copy.deepcopy(super(ImageAdmin, self).get_fieldsets(request, obj))
logger.debug("1 fieldsets=%s" % fieldsets)
change_page_fieldset = list(fieldsets[0][1]['fields'])
logger.debug("1 change_page_fieldset=%s" % change_page_fieldset)
#if obj:
if 'tags' not in change_page_fieldset:
change_page_fieldset.append('tags')
logger.debug('2 change_page_fieldset=%s' % change_page_fieldset)
fieldsets[0][1]['fields'] = tuple(change_page_fieldset)
logger.debug('2 fieldsets=%s' % fieldsets)
return fieldsets
def get_tags(self, obj):
tag_ids = list(ImageTags.objects.filter(image_id=obj).values_list("tag_id", flat=True))
tag_names = list(Tags.objects.filter(tag_id__in=tag_ids).values_list('tag_name', flat=True))
return ", ".join([t for t in tag_names])
def save_model(self, request, obj, form, change):
logger.debug("save_model START")
logger.debug("obj=%s, change=%s, valid=%s" % (obj, change, form.is_valid()))
logger.debug("changed fields=%s" % form.changed_data)
logger.debug("obj.original_image=%s" % obj.original_image)
if utils.is_celery_working():
if form.is_valid():
if not change:
# Uploading one or more images
logger.debug("\tvalid form")
logger.debug("form.cleaned_data=%s",form.cleaned_data)
logger.debug("files=%s" % request.FILES.getlist('original_image'))
files = request.FILES.getlist('original_image')
for f in files:
image = Image()
if not form.cleaned_data['title']:
image.title = clean_title(f.name)
else:
image.title = form.cleaned_data['title']
logger.debug("form.cleaned_data['title']=%s" % form.cleaned_data['title'])
logger.debug("f=%s" % f)
image.original_image = f
image.description = form.cleaned_data['description']
image.save()
# save the tags
tags = form.cleaned_data['tags']
for tag in tags:
ImageTags.objects.create(tag_id_id=int(tag), image_id_id=image.pk)
#super().save_model(request, obj, form, change)
else:
# processing a change form, so redo all the fields
pass
#super().save_model(request, obj, form, change)
else:
# error - form is invalid
pass
else:
# error - celery not working
pass
logger.debug("save_model END")

How to get rid of placements(SERVER or CLIENTS) so that I can transform float32#SERVER to float32?

I am trying to do learning rate decay challange of Building Your Own Federated Learning Algorithm tutorial. I have used the following code
import nest_asyncio
nest_asyncio.apply()
import collections
import attr
import functools
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
np.random.seed(0)
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
NUM_CLIENTS = 10
BATCH_SIZE = 20
initial_lr = 0.01
decay_rate = 0.0005
minimum_lr = initial_lr/2
def preprocess(dataset):
def batch_format_fn(element):
return(tf.reshape(element['pixels'],[-1,784]),
tf.reshape(element['label'],[-1,1]))
return dataset.batch(BATCH_SIZE).map(batch_format_fn)
client_ids = np.random.choice(emnist_train.client_ids,
size=NUM_CLIENTS, replace=False)
federated_train_data = [preprocess(emnist_train.create_tf_dataset_for_client(x))
for x in client_ids]
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
def model_fn():
keras_model = create_keras_model()
return tff.learning.from_keras_model(
keras_model,
input_spec=federated_train_data[0].element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
#tf.function
def client_update(model, dataset, server_weights, client_optimizer):
client_weights = model.trainable_variables
tf.nest.map_structure(lambda x,y: x.assign(y),
client_weights, server_weights)
for batch in dataset:
with tf.GradientTape() as tape:
outputs = model.forward_pass(batch)
grads = tape.gradient(outputs.loss, client_weights)
grads = tf.clip_by_global_norm(grads, 5.0)[0]
grads_and_vars = zip(grads, client_weights)
client_optimizer.apply_gradients(grads_and_vars)
return client_weights
#tf.function
def server_update(model, mean_client_weights):
model_weights = model.trainable_variables
tf.nest.map_structure(lambda x,y: x.assign(y),
model_weights, mean_client_weights)
return model_weights
#tff.tf_computation
def server_init():
model = model_fn()
return model.trainable_variables
#tff.federated_computation
def initialize_fn():
return [tff.federated_value(server_init(), tff.SERVER), tff.federated_value(initial_lr, tff.SERVER)]
#return tff.federated_value([server_init(),initial_lr], tff.SERVER)
whimsy_model = model_fn()
tf_dataset_type = tff.SequenceType(whimsy_model.input_spec)
str(tf_dataset_type)
model_weights_type = server_init.type_signature.result
str(model_weights_type)
#tff.tf_computation(tf_dataset_type, model_weights_type,tf.float32)
def client_update_fn(tf_dataset, server_weights, LR):
model = model_fn()
client_optimizer=tf.keras.optimizers.SGD(learning_rate=LR)
return client_update(model, tf_dataset, server_weights, client_optimizer)
#tff.tf_computation(model_weights_type)
def server_update_fn(mean_client_weights):
model = model_fn()
return server_update(model, mean_client_weights)
federated_server_type = tff.FederatedType(model_weights_type,
tff.SERVER)
federated_dataset_type = tff.FederatedType(tf_dataset_type,
tff.CLIENTS)
#federated_server_type_with_LR = tff.FederatedType([model_weights_type,tff.to_type((tf.float32))],tff.SERVER)
federated_server_type_with_LR = [tff.FederatedType(model_weights_type,tff.SERVER),
tff.FederatedType(tff.to_type((tf.float32)),tff.SERVER)]
#tf.function
def decay_lr(lr):
if lr-decay_rate > minimum_lr:
return lr-decay_rate
else:
return minimum_lr
#tff.tf_computation(tf.float32)
def decay_lr_fn(lr):
return decay_lr(lr)
#tff.federated_computation(federated_server_type_with_LR, federated_dataset_type)
def next_fn(server_weights_and_LR, federated_dataset):
server_weights = server_weights_and_LR[0]
#LR_SERVER = server_weights_and_LR[1]
#LR_CLIENTS = tff.federated_broadcast(server_weights_and_LR[1])
LR = server_weights_and_LR[1]
LR_NEW = tff.federated_map(decay_lr_fn, LR)
LR_NEW_CLIENTS = tff.federated_broadcast(LR_NEW)
# Broadcast the server weights to the clients
server_weights_at_client = tff.federated_broadcast(server_weights)
# Each client computes their updated weights
client_weights = tff.federated_map(
client_update_fn, (federated_dataset, server_weights_at_client, LR_NEW_CLIENTS))
# The server averages are updated
mean_client_weights = tff.federated_mean(client_weights)
# The surver update
server_weights = tff.federated_map(server_update_fn, mean_client_weights)
#return server_weights_and_LR
return [server_weights, LR_NEW]
federated_algorithm = tff.templates.IterativeProcess(
initialize_fn=initialize_fn,
next_fn=next_fn)
sorted_client_ids = sorted(emnist_test.client_ids)
sorted_client_ids2 = sorted_client_ids[0:100]
def data(client, source=emnist_test):
return preprocess(source.create_tf_dataset_for_client(client))
central_emnist_test = (tf.data.Dataset.from_tensor_slices(
[data(client) for client in sorted_client_ids2])).flat_map(lambda x: x)
def evaluate(server_state):
keras_model = create_keras_model()
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
keras_model.set_weights(server_state)
keras_model.evaluate(central_emnist_test)
server_state = federated_algorithm.initialize()
evaluate(server_state[0])
for round in range(15):
print(round)
#server_state_temp = federated_algorithm.next(server_state, federated_train_data)
#server_state = [server_state_temp[0], decaying_lr(round)]
server_state = federated_algorithm.next(server_state, federated_train_data)
print(server_state[1])
evaluate(server_state[0])
This code works just fine, but I want to add the learning rate definition to server_init() function. So basically have the following
#tff.tf_computation
def server_init():
model = model_fn()
return [model.trainable_variables, initial_lr]
#tff.federated_computation
def initialize_fn():
return tff.federated_value(server_init(), tff.SERVER)
But doing so leads to following problem
The return type of `initialize_fn` must be assignable to the first input argument of `next_fn`, but:
`initialize_fn` returned type:
<<float32[784,10],float32[10]>,float32>#SERVER
and the first input argument of `next_fn` is:
<server_weights_and_LR=<<float32[784,10],float32[10]>#SERVER,float32#SERVER>,federated_dataset={<float32[?,784],int32[?,1]>*}#CLIENTS>
The problem is return [server_weights, LR_NEW] code at the end of next_fn() has <float32[784,10],float32[10]>#SERVER,float32#SERVER> type. Both server_weights and LR_NEW has already #SERVER placement. Currently
#tff.tf_computation
def server_init():
model = model_fn()
return model.trainable_variables
#tff.federated_computation
def initialize_fn():
return [tff.federated_value(server_init(), tff.SERVER), tff.federated_value(initial_lr, tff.SERVER)]
also returns <float32[784,10],float32[10]>#SERVER,float32#SERVER>
But as I said I want to change that part so to do that I want to remove the placements of server_weight and LR_NEW in next_fn and apply placement to the list containing both of those. How can I do that?
Also does anyone have a "cleaner" solution to that challenge?
EDIT:
I just want to clarify the input-output match for initialize/input and next is "cyclic". So we seek a match between output of initialize and input of next but also want one between output of next and input argument.
The first return argument of `next_fn` must be assignable to its first input argument, but found
`next_fn` which returns type:
<<float32[784,10],float32[10]>#SERVER,float32#SERVER>
which does not match its first input argument:
<<float32[784,10],float32[10]>,float32>#SERVER
The problem in your code is when manually creating federated_server_type_with_LR.
In the type system, <A#SERVER, B#SERVER> different from <A, B>#SERVER. You can convert the former to the latter by using tff.federated_zip(), which promotes the placement to the top-level.
Two solutions:
(1) Modify the decorator of next_fn to be #tff.federated_computation(tff.federated_zip(federated_server_type_with_LR), federated_dataset_type)
(2) [preferred, to avoid this kind of issue] Do not create the type manually, and read it from initialize_fn instead. The decorator would be #tff.federated_computation(initialize_fn.type_signature.result, federated_dataset_type)

Use order output parameter and specify serialization key with marshmallow-sqlalchemy

I'm using marshmallow 3.4.0 and marshmallow-sqlalchemy 0.22.2
I have this SQLAlchemy model and Marshmallow schema
class Zone(Base):
zone_id = Column(Integer, primary_key=True)
name = Column(String(65))
location_id = Column(Integer, ForeignKey('locations.location_id'))
location = relationship("Location", lazy="joined")
class ZoneSchema(SQLAlchemySchema):
class Meta:
model = Zone
fields = ("zone_id", "name", "location")
ordered = True
load_instance = True
zone_id = auto_field(data_key="id")
location = fields.Nested(LocationSchema)
If I remove
zone_id = auto_field(data_key="id")
the fields are ordered as requested.
If I remove
fields = ("zone_id", "name", "location")
ordered = True
The key is set to id as requested.
If I run the code above, I get this error
File "[...]/models_schema.py", line 30, in <module>
class ZoneSchema(SQLAlchemySchema):
File "/venvs/backend-api/lib/python3.8/site-packages/marshmallow/schema.py", line 107, in __new__
cls_fields = _get_fields(attrs, base.FieldABC, pop=True, ordered=ordered)
File "/venvs/backend-api/lib/python3.8/site-packages/marshmallow/schema.py", line 57, in _get_fields
fields.sort(key=lambda pair: pair[1]._creation_index)
File "/venvs/backend-api/lib/python3.8/site-packages/marshmallow/schema.py", line 57, in <lambda>
fields.sort(key=lambda pair: pair[1]._creation_index)
AttributeError: 'SQLAlchemyAutoField' object has no attribute '_creation_index'
Could someone explain to me how I can set access the zone_id field and add the "data_key" property ?
As a last resort, I read the documentation and I managed to find an answer by myself.
class ZoneSchema(SQLAlchemySchema):
class Meta:
model = Zone
fields = ("zone_id", "name", "location")
ordered = True
load_instance = True
location = fields.Nested(LocationSchema)
#pre_dump
def set_data_key_for_zone_id(self, data, many, **kwargs):
self.fields.get("zone_id").data_key = "id"
return data
As you can see, I kept the ordered = True part and use the pre-dump decorator to access the zone_id field and set the data_key attribute.
If there is a better way to do this, please let me/us know !

wxPython: write SQL command results to outputbox

I'm trying to get back into Python and I'm once again stuck with this problem I've had before of making objects accessible to one another. In this simple example I am displaying a panel with a button and a text box. Clicking on the text box calls a function which queries a database and returns a cursor with the retrieved data. I need to make it so that either the LookupSQL function or the ShowClientData function can write this output, in a loop, to the Text box. The TextBox (outputBox) is unknown to any other functions currently. How do I make it so that the other functions know what it is?
import wx
import pypyodbc
conn = pypyodbc.connect(driver='{SQL Server}', server='.', database='TheDB', uid='sa', pwd='Pass')
class Audit(wx.Frame):
def __init__(self, *args, **kwargs):
super(Example, self).__init__(*args, **kwargs)
self.InitUI()
def InitUI(self):
panel = wx.Panel(self)
hbox = wx.BoxSizer()
sizer = wx.GridSizer(6,1,2,2)
btn1 = wx.Button(panel, label='Clients')
outputBox = wx.TextCtrl(panel, -1, style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL)
sizer.AddMany([btn1, btn2, btn3, btn4, btn5, btn6])
hbox.Add(sizer, 0, wx.ALL, 15)
hbox.Add(outputBox, 1, wx.EXPAND)
panel.SetSizer(hbox)
btn1.Bind(wx.EVT_BUTTON, self.ShowClientData)
self.SetSize((800, 600))
self.SetTitle('Audit View')
self.Centre()
self.Show(True)
def ShowClientData(self, event):
SQL = 'select * from V_UpdatedClient'
recursor = lookupSQL(SQL)
for row in recursor:
rChange = row[0]
rItemType = row[1]
rPK = row[2]
rItemCode = row[3]
rFieldName = row[4]
rOldValue = row[5]
rNewValue = row[6]
rUpdateDate = row[7]
rUserName = row[8]
print('%s %s %s %s %s %s %s %s %s' % (rChange, rItemType, rPK, rItemCode, rFieldName, rOldValue, rNewValue, rUpdateDate, rUserName))
def lookupSQL(SQLString):
cursor = conn.cursor()
cursor.execute(SQLString)
return cursor
cursor.close()
def main():
ex = wx.App()
Audit(None)
ex.MainLoop()
if __name__ == '__main__':
main()
What you are looking for is called data attributes.
self.outputBox = wx.TextCtrl(panel, -1, style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL)
And then within ShowClientData you can write
self.outputBox.AppendText("some text")
As long as you have that self reference, you can access its attributes.
Edit:
When you do the above change, you can't refer to the text box by just outputBox anymore, you should instead access it via self:
hbox.Add(self.outputBox, 1, wx.EXPAND)
Declaring it as globally is very bad!

Resources