I am getting Value Error:
The 'specs' argument to make_subplots must be a 2D list of dictionaries with dimensions (1 x 1).
Received value of type <class 'list'>: [[{'secondary_y': False}], [{'secondary_y': True}], [{'colspan': 1}, None]]
I refer to the existing post plotly subplots issue with specs, value error and followed the same but error still persists.
Below is the code snippet:
import talib as ta
import yfinance as yf
import pandas as pd
import plotly.io as pio
pio.renderers.default='browser'
import plotly.graph_objects as go
from plotly.subplots import make_subplots
'''
Extracting the data
'''
VIP = yf.Ticker('VIPIND.NS')
df = VIP.history(period="max")
df.reset_index(inplace = True)
df['Date'] = pd.to_datetime(df['Date'])
'''
Creating the technical indicators
'''
df['EMA_Close'] = ta.EMA(df.Close,100)
df['MA_Close'] = ta.MA(df.Close,60)
df['MACD'],df['MACDsig'],df['MACDhist']=ta.MACD(df.Close,30,60,15)
'''
###############################
Creating Plots
###############################
'''
'''
Declaring subplots
'''
fig = make_subplots(rows=2, cols=1)#, shared_xaxes=True,print_grid=True)
fig = make_subplots(specs=[[{"secondary_y": False}],[{"secondary_y": True}],[{"colspan": 1}, None]])
'''
Ploting the first row with OHLC, EMA and MA lines
'''
fig.add_trace(go.Candlestick(x=df["Date"], open=df["Open"], high=df["High"],
low=df["Low"], close=df["Close"], name="OHLC",showlegend=True),
row=1, col=1,secondary_y=False)
fig.add_trace(go.Scatter(x=df['Date'], y=df['EMA_Close'], showlegend=True,
name="EMA Close",line=dict(color="MediumPurple")
), row=1, col=1,secondary_y=False)
fig.add_trace(go.Scatter(x=df['Date'], y=df['MA_Close'], showlegend=True,
name="MA Close",line=dict(color="Orange")
), row=1, col=1,secondary_y=False)
'''
Ploting the second row with MACD & MACDSig lines and MACDHist as histogram/bar
'''
fig.add_trace(go.Bar(x=df.Date,
y=df['MACDhist'],showlegend=True,name="MACD Hist",marker=dict(color='black')
), row=2, col=1,secondary_y=False)
fig.add_trace(go.Scatter(x=df['Date'], y=df['MACDsig'], showlegend=True,
name="MACD Signal",line=dict(color="MediumPurple")
), row=2, col=1,secondary_y=True)
fig.add_trace(go.Scatter(x=df['Date'], y=df['MACD'], showlegend=True,
name="MACD",line=dict(color="red")
), row=2, col=1,secondary_y=True)
'''
Upadting the layout of the plot
'''
fig.update(layout_xaxis_rangeslider_visible=False)
fig.update_layout(height=600, width=1250)
fig.update_layout(
title='OHLC and Volume',
yaxis_title='Prices (Rs)',
xaxis_title='Dates')
fig.update_layout(template="plotly_white")
fig.update_layout(
margin=dict(l=20, r=20, t=40,b=20),)
# Providing desired Fonts for the plots
fig.update_layout(
font_family="Courier New",
font_color="blue",
title_font_family="Times New Roman",
title_font_color="red",
legend_title_font_color="green")
fig.show()
Requesting guidance on where am I going wrong.
Regards
Sudhir
You are getting the error because the dimension of your specs does not match the number of rows and cols you defined in your subplot. You have 2 rows and 1 col, which means your specs must be a list with 2x1 shape (i.e. a list of two lists. Here is an example:
specs=[[{"secondary_y": True, "colspan": X, "rowspan": X, "b": 0.05, etc}] ,
[{"secondary_y": False}]]).
Also, keep in mind the max value that colspan can take is the value you define for the col parameter. Finally, if you need to pass more settings for each subplot you can simply add them inside their corresponding dictionary
Related
import numpy as np
import rasterio
from rasterio.plot import show
b3 = rasterio.open('D:\Rice\Day_1\Micasense\T1\S1\IMG_0370_3.tif')
b4 = rasterio.open('D:\Rice\Day_1\Micasense\T1\S1\IMG_0370_4.tif')
red = b3.read(1).astype('float64')
nir = b4.read(1).astype('float64')
np.seterr(divide='ignore', invalid='ignore')
check = np.logical_or ( red > 0, nir > 0 )
ndvi = np.where ( check, (nir - red ) / ( nir + red ), -999 )
ndvi
array([[0.52380952, 0.56526611, 0.53551913, ..., 0.44277822, 0.4546773 ,
0.49622926],
[0.51214361, 0.57994723, 0.5954023 , ..., 0.4851632 , 0.48997135,
0.46975355],
[0.48920086, 0.54266958, 0.56402621, ..., 0.48092745, 0.45281639,
0.49479941],
...,
[0.15959253, 0.40617935, 0.42018072, ..., 0.30028599, 0.22739726,
0.22033898],
[0.30884808, 0.40158616, 0.41691395, ..., 0.23501427, 0.25222552,
0.22693727],
[0.40364188, 0.45576208, 0.45708304, ..., 0.32093023, 0.32592593,
0.30381944]])
import datetime
import pyexcel as pe
data =[0.52380952], [0.56526611], [0.53551913], [0.44277822], [0.4546773,]
pe.save_as(array=data, dest_file_name="P (21).csv")
import pyexcel
pyexcel.save_as(array=ndvi,dest_file_name="D:\Rice\Day_1\T (1)\S_1\P (21).csv",dest_delimiter=':')
[At first, I wanna mention that micasense was taken from a ground-based structure not using UAV. I used the above code to extract the vegetation index(NDVI) from a single image set(one red and one NIR) image. I have 21 sets like this in a folder. and Now I need
to process the image sets in batch
extracted vegetation values separately and save them in an excel file and if possible I need only the mean of filtered values.
for example only the mean of that values above 0.3 or something like that.
Thanks and Regards
Haque Md Asrakul]
My 'X' data is a pandas data frame of time-series. I extracted features of X data using Tsfresh and try to apply LightGBM algorithm to classify the data into 0(Bad) and 1(Good). But it shows an error. Columns of my X data are`
Index(['0__ratio_beyond_r_sigma__r_1',
'0__change_quantiles__f_agg_"mean"isabs_True__qh_0.8__ql_0.0',
'0__cwt_coefficients__coeff_1__w_20__widths(2, 5, 10, 20)',
'0__cwt_coefficients__coeff_1__w_10__widths(2, 5, 10, 20)',
'0__change_quantiles__f_agg_"var"_isabs_False__qh_0.8__ql_0.0',
'0__change_quantiles__f_agg"mean"_isabs_True__qh_0.4__ql_0.0',
'0__change_quantiles__f_agg"mean"_isabs_True__qh_0.8__ql_0.6',
'0__change_quantiles__f_agg"mean"_isabs_False__qh_0.4__ql_0.0',
'0__fft_coefficient__attr"real"_coeff_3',
'0__change_quantiles__f_agg"mean"_isabs_True__qh_1.0__ql_0.0',
...
'0__quantile__q_0.4', '0__fft_coefficient__attr"imag"coeff_39',
'0__large_standard_deviation__r_0.2',
'0__cwt_coefficients__coeff_13__w_10__widths(2, 5, 10, 20)',
'0__fourier_entropy__bins_10',
'0__fft_coefficient__attr"angle"_coeff_9',
'0__fft_coefficient__attr"imag"_coeff_17',
'0__fft_coefficient__attr"angle"_coeff_92', '0__maximum',
'0__fft_coefficient__attr"imag"__coeff_32'],
dtype='object', length=225)
My code is
`
import lightgbm as lgb
d_train = lgb.Dataset(X_train, label=y_train)
lgbm_params = {'learning_rate':0.05, 'boosting_type':'dart',
'objective':'binary',
'metric':['auc', 'binary_logloss'],
'num_leaves':100,
'max_depth':10}
clf = lgb.train(lgbm_params, d_train, 50)
y_pred_lgbm=clf.predict(X_test)
for i in range(0, X_test.shape[0]):
if y_pred_lgbm[i]>=.5:
y_pred_lgbm[i]=1
else:
y_pred_lgbm[i]=0
cm_lgbm = confusion_matrix(y_test, y_pred_lgbm)
sns.heatmap(cm_lgbm, annot=True)
`
I tried below code to change my columns but it does not work.
`
import re
X = X.rename(columns = lambda u:re.sub('[^A-Za-z0-9_]+', '', u))
After applying that rename function the columns looks as below
`
Index(['0__ratio_beyond_r_sigma__r_1',
'0__change_quantiles__f_agg_mean__isabs_True__qh_08__ql_00',
'0__cwt_coefficients__coeff_1__w_20__widths_251020',
'0__cwt_coefficients__coeff_1__w_10__widths_251020',
'0__change_quantiles__f_agg_var__isabs_False__qh_08__ql_00',
'0__change_quantiles__f_agg_mean__isabs_True__qh_04__ql_00',
'0__change_quantiles__f_agg_mean__isabs_True__qh_08__ql_06',
'0__change_quantiles__f_agg_mean__isabs_False__qh_04__ql_00',
'0__fft_coefficient__attr_real__coeff_3',
'0__change_quantiles__f_agg_mean__isabs_True__qh_10__ql_00',
...
'0__quantile__q_04', '0__fft_coefficient__attr_imag__coeff_39',
'0__large_standard_deviation__r_02',
'0__cwt_coefficients__coeff_13__w_10__widths_251020',
'0__fourier_entropy__bins_10',
'0__fft_coefficient__attr_angle__coeff_9',
'0__fft_coefficient__attr_imag__coeff_17',
'0__fft_coefficient__attr_angle__coeff_92', '0__maximum',
'0__fft_coefficient__attr_imag__coeff_32'],
dtype='object', length=225)
`
What should I do to get rid of this error?
u cant put like '_' these kind of symbol in column names or the lgb will report this kind of error
I would like to use dask.array.map_overlap to deal with the scipy interpolation function. However, I keep meeting errors that I cannot understand and hoping someone can answer this to me.
Here is the error message I have received if I want to run .compute().
ValueError: could not broadcast input array from shape (1070,0) into shape (1045,0)
To resolve the issue, I started to use .to_delayed() to check each partition outputs, and this is what I found.
Following is my python code.
Step 1. Load netCDF file through Xarray, and then output to dask.array with chunk size (400,400)
df = xr.open_dataset('./Brazil Sentinal2 Tile/' + data_file +'.nc')
lon, lat = df['lon'].data, df['lat'].data
slon = da.from_array(df['lon'], chunks=(400,400))
slat = da.from_array(df['lat'], chunks=(400,400))
data = da.from_array(df.isel(band=0).__xarray_dataarray_variable__.data, chunks=(400,400))
Step 2. declare a function for da.map_overlap use
def sumsum2(lon,lat,data, hex_res=10):
hex_col = 'hex' + str(hex_res)
lon_max, lon_min = lon.max(), lon.min()
lat_max, lat_min = lat.max(), lat.min()
b = box(lon_min, lat_min, lon_max, lat_max, ccw=True)
b = transform(lambda x, y: (y, x), b)
b = mapping(b)
target_df = pd.DataFrame(h3.polyfill( b, hex_res), columns=[hex_col])
target_df['lat'] = target_df[hex_col].apply(lambda x: h3.h3_to_geo(x)[0])
target_df['lon'] = target_df[hex_col].apply(lambda x: h3.h3_to_geo(x)[1])
tlon, tlat = target_df[['lon','lat']].values.T
abc = lNDI(points=(lon.ravel(), lat.ravel()),
values= data.ravel())(tlon,tlat)
target_df['out'] = abc
print(np.stack([tlon, tlat, abc],axis=1).shape)
return np.stack([tlon, tlat, abc],axis=1)
Step 3. Apply the da.map_overlap
b = da.map_overlap(sumsum2, slon[:1200,:1200], slat[:1200,:1200], data[:1200,:1200], depth=10, trim=True, boundary=None, align_arrays=False, dtype='float64',
)
Step 4. Using to_delayed() to test output shape
print(b.to_delayed().flatten()[0].compute().shape, )
print(b.to_delayed().flatten()[1].compute().shape)
(1065, 3)
(1045, 0)
(1090, 3)
(1070, 0)
which is saying that the output from da.map_overlap is only outputting 1-D dimension ( which is (1045,0) and (1070,0) ), while in the da.map_overlap, the output I am preparing is 2-D dimension ( which is (1065,3) and (1090,3) ).
In addition, if I turn off the trim argument, which is
c = da.map_overlap(sumsum2,
slon[:1200,:1200],
slat[:1200,:1200],
data[:1200,:1200],
depth=10,
trim=False,
boundary=None,
align_arrays=False,
dtype='float64',
)
print(c.to_delayed().flatten()[0].compute().shape, )
print(c.to_delayed().flatten()[1].compute().shape)
The output becomes
(1065, 3)
(1065, 3)
(1090, 3)
(1090, 3)
This is saying that when trim=True, I cut out everything?
because...
#-- print out the values
b.to_delayed().flatten()[0].compute()[:10,:]
(1065, 3)
array([], shape=(1045, 0), dtype=float64)
while...
#-- print out the values
c.to_delayed().flatten()[0].compute()[:10,:]
array([[ -47.83683837, -18.98359832, 1395.01848583],
[ -47.8482856 , -18.99038681, 2663.68391094],
[ -47.82800624, -18.99207069, 1465.56517187],
[ -47.81897323, -18.97919009, 2769.91556363],
[ -47.82066663, -19.00712956, 1607.85927095],
[ -47.82696896, -18.97167714, 2110.7516765 ],
[ -47.81562653, -18.98302933, 2662.72112163],
[ -47.82176881, -18.98594465, 2201.83205114],
[ -47.84567 , -18.97512514, 1283.20631652],
[ -47.84343568, -18.97270783, 1282.92117225]])
Any thoughts for this?
Thank You.
I guess I got the answer. Please let me if I am wrong.
I am not allowing to use trim=True is because I change the shape of output array (after surfing the internet, I notice that the shape of output array should be the same with the shape of input array). Since I change the shape, the dask has no idea how to deal with it so it returns the empty array to me (weird).
Instead of using trim=False, since I didn't ask cutting-out the buffer zone, it is now okay to output the return values. (although I still don't know why the dask cannot concat the chunked array, but believe is also related to shape)
The solution is using delayed function on da.concatenate, which is
delayed(da.concatenate)([e.to_delayed().flatten()[idx] for idx in range(len(e.to_delayed().flatten()))])
In this case, we are not relying on the concat function in map_overlap but use our own concat to combine the outputs we want.
I created a custom layer in python so that I can feed the data directly.
but I noticed it runs extremely slow and the GPU usage is at most 1% ( the memory is allocated, i.e. I can see that when I run the script, it allocates 2100MB VRAM and terminating the training, frees around 1G.
I'm not sure if this is an expected behavior or I'm doing something wrong.
Here is the script I wrote (based on this former pr) :
import json
import caffe
import numpy as np
from random import shuffle
from PIL import Image
class MyDataLayer(caffe.Layer):
"""
This is a simple datalayer for training a network on CIFAR10.
"""
def setup(self, bottom, top):
self.top_names = ['data', 'label']
# === Read input parameters ===
params = eval(self.param_str)
# Check the paramameters for validity.
check_params(params)
# store input as class variables
self.batch_size = params['batch_size']
# Create a batch loader to load the images.
self.batch_loader = BatchLoader(params, None)
# === reshape tops ===
# since we use a fixed input image size, we can shape the data layer
# once. Else, we'd have to do it in the reshape call.
top[0].reshape(self.batch_size, 3, params['im_height'], params['im_width'])
# this is for our label, since we only have one label we set this to 1
top[1].reshape(self.batch_size, 1)
print_info("MyDataLayer", params)
def forward(self, bottom, top):
"""
Load data.
"""
for itt in range(self.batch_size):
# Use the batch loader to load the next image.
im, label = self.batch_loader.load_next_image()
# Add directly to the caffe data layer
top[0].data[itt, ...] = im
top[1].data[itt, ...] = label
def reshape(self, bottom, top):
"""
There is no need to reshape the data, since the input is of fixed size
(rows and columns)
"""
pass
def backward(self, top, propagate_down, bottom):
"""
These layers does not back propagate
"""
pass
class BatchLoader(object):
"""
This class abstracts away the loading of images.
Images can either be loaded singly, or in a batch. The latter is used for
the asyncronous data layer to preload batches while other processing is
performed.
labels:
the format is like :
png_data_batch_1/leptodactylus_pentadactylus_s_000004.png 6
png_data_batch_1/camion_s_000148.png 9
png_data_batch_1/tipper_truck_s_001250.png 9
"""
def __init__(self, params, result):
self.result = result
self.batch_size = params['batch_size']
self.image_root = params['image_root']
self.im_shape = [params['im_height'],params['im_width']]
# get list of images and their labels.
self.image_labels = params['label']
#getting the list of all image filenames along with their labels
self.imagelist = [line.rstrip('\n\r') for line in open(self.image_labels)]
self._cur = 0 # current image
# this class does some simple data-manipulations
self.transformer = SimpleTransformer()
print ("BatchLoader initialized with {} images".format(len(self.imagelist)))
def load_next_image(self):
"""
Load the next image in a batch.
"""
# Did we finish an epoch?
if self._cur == len(self.imagelist):
self._cur = 0
shuffle(self.imagelist)
# Load an image
image_and_label = self.imagelist[self._cur] # Get the image index
#read the image filename
image_file_name = image_and_label[0:-1]
#load the image
im = np.asarray(Image.open(self.image_root +'/'+image_file_name))
#im = scipy.misc.imresize(im, self.im_shape) # resize
# do a simple horizontal flip as data augmentation
flip = np.random.choice(2)*2-1
im = im[:, ::flip, :]
# Load and prepare ground truth
#read the label
label = image_and_label[-1]
#convert to onehot encoded vector
#fix: caffe automatically converts the label into one hot encoded vector. so we only need to simply use the decimal number (i.e. the plain label number)
#one_hot_label = np.eye(10)[label]
self._cur += 1
return self.transformer.preprocess(im), label
def check_params(params):
"""
A utility function to check the parameters for the data layers.
"""
required = ['batch_size', 'image_root', 'im_width', 'im_height', 'label']
for r in required:
assert r in params.keys(), 'Params must include {}'.format(r)
def print_info(name, params):
"""
Ouput some info regarding the class
"""
print ("{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
name,
params['image_root'],
params['batch_size'],
params['im_height'],
params['im_width'],
params['label']))
class SimpleTransformer:
"""
SimpleTransformer is a simple class for preprocessing and deprocessing
images for caffe.
"""
def __init__(self, mean=[125.30, 123.05, 114.06]):
self.mean = np.array(mean, dtype=np.float32)
self.scale = 1.0
def set_mean(self, mean):
"""
Set the mean to subtract for centering the data.
"""
self.mean = mean
def set_scale(self, scale):
"""
Set the data scaling.
"""
self.scale = scale
def preprocess(self, im):
"""
preprocess() emulate the pre-processing occuring in the vgg16 caffe
prototxt.
"""
im = np.float32(im)
im = im[:, :, ::-1] # change to BGR
im -= self.mean
im *= self.scale
im = im.transpose((2, 0, 1))
return im
def deprocess(self, im):
"""
inverse of preprocess()
"""
im = im.transpose(1, 2, 0)
im /= self.scale
im += self.mean
im = im[:, :, ::-1] # change to RGB
return np.uint8(im)
And in my train_test.prototxt file I have :
name: "CIFAR10_SimpleTest_PythonLayer"
layer {
name: 'MyPythonLayer'
type: 'Python'
top: 'data'
top: 'label'
include {
phase: TRAIN
}
python_param {
#the python script filename
module: 'mypythonlayer'
#the class name
layer: 'MyDataLayer'
#needed parameters in json
param_str: '{"phase":"TRAIN", "batch_size":10, "im_height":32, "im_width":32, "image_root": "G:/Caffe/examples/cifar10/testbed/Train and Test using Pycaffe", "label": "G:/Caffe/examples/cifar10/testbed/Train and Test using Pycaffe/train_cifar10.txt"}'
}
}
layer {
name: 'MyPythonLayer'
type: 'Python'
top: 'data'
top: 'label'
include {
phase: TEST
}
python_param {
#the python script filename
module: 'mypythonlayer'
#the class name
layer: 'MyDataLayer'
#needed parameters in json
param_str: '{"phase":"TEST", "batch_size":10, "im_height":32, "im_width":32, "image_root": "G:/Caffe/examples/cifar10/testbed/Train and Test using Pycaffe", "label": "G:/Caffe/examples/cifar10/testbed/Train and Test using Pycaffe/test_cifar10.txt"}'
}
}
Whats wrong here?
Your data layer is not efficient enough and it takes most of the training time (you should try caffe time ... to get a more detailed profiling). At each forward pass you are waiting for the python layer to read batch_size images from disk one after the other. This can take forever.
You should consider using Multiprocessing to perform the reading at the background while the net is processing the previous batches: this should give you good CPU/GPU utilization.
See this example for multiprocessing python data layer.
Python layers are executed on CPU not the GPU so it's slow because things have to keep going between the CPU and GPU when training. That's also why you see low gpu usage because its waiting on the cpu to execute the python layer.
I've run into an issue with the TabularAdapter in the TraitsUI package...
I've been trying to figure this out on my own for much too long now, so I wanted to ask the experts here for some friendly advise :)
I'm going to add a piece of my program that illustrates my problem(s), and I'm hoping someone can look it over and say 'Ah Ha!...Here's your problem' (my fingers are crossed).
Basically, I can use the TabularAdapter to produce a table editor into an array of dtypes, and it works just fine except:
1) whenever I change the # of elements (identified as 'Number of fractures:'), the array gets resized, but the table doesn't reflect the change until after I click on one of the elements. What I'd like to happen is that the # of rows (fractures) changes after I release the # of fractures slider. Is this doable?
2) The second issue I have is that if the array gets resized before it's displayed by .configure_traits() (by the notifier when Number_of_fractures gets modified), I can shrink the size of the array, but I can't increase it over the new size.
2b) I thought I'd found a way to have the table editor display the full array even when it's increased over the 5 set in the code (just before calling .trait_configure()), but I was fooled :( I tried adding another Group() in front of the vertical_fracture_group so the table wasn't the first thing to display. This more closely emulates my entire program. When I did this, I was locked into the new smaller size of the array, and I could no longer increase its size to my maximum of 15. I'm modifying the code to reflect this issue.
Here's my sample code:
# -*- coding: utf-8 -*-
"""
This is a first shot at developing a ****** User Interface using Canopy by
Enthought. Canopy is a distribution of the Python language which has a lot of
scientific and engineering features 'built-in'.
"""
#-- Imports --------------------------------------------------------------------
from traitsui.api import TabularEditor
from traitsui.tabular_adapter import TabularAdapter
from numpy import zeros, dtype
from traits.api import HasTraits, Range
from traitsui.api import View, Group, Item
#-- FileDialogDemo Class -------------------------------------------------------
max_cracks = 15 #maximum number of Fracs/cracks to allow
class VertFractureAdapter(TabularAdapter):
columns = [('Frac #',0), ('X Cen',1), ('Y Cen',2), ('Z Cen',3),
('Horiz',4), ('Vert',5), ('Angle',6)]
class SetupDialog ( HasTraits ):
Number_Of_Fractures = Range(1, max_cracks) # line 277
vertical_frac_dtype = dtype([('Fracture', 'int'), ('x', 'float'), ('y', 'float'),
('z', 'float'), ('Horiz Length', 'float'), ('Vert Length', 'float')
, ('z-axis Rotation, degrees', 'float')])
vertical_frac_array = zeros((max_cracks), dtype=vertical_frac_dtype)
vertical_fracture_group = Group(
Item(name = 'vertical_frac_array',
show_label = False,
editor = TabularEditor(adapter = VertFractureAdapter()),
width = 0.5,
height = 0.5,
)
)
#-- THIS is the actual 'View' that gets put on the screen
view = View(
#Note: When as this group 'displays' before the one with the Table, I'm 'locked' into my new maximum table display size of 8 (not my original/desired maximum of 15)
Group(
Item( name = 'Number_Of_Fractures'),
),
#Note: If I place this Group() first, my table is free to grow to it's maximum of 15
Group(
Item( name = 'Number_Of_Fractures'),
vertical_fracture_group,
),
width = 0.60,
height = 0.50,
title = '****** Setup',
resizable=True,
)
#-- Traits Event Handlers --------------------------------------------------
def _Number_Of_Fractures_changed(self):
""" Handles resizing arrays if/when the number of Fractures is changed"""
print "I've changed the # of Fractures to " + repr(self.Number_Of_Fractures)
#if not self.user_StartingUp:
self.vertical_frac_array.resize(self.Number_Of_Fractures, refcheck=False)
for crk in range(self.Number_Of_Fractures):
self.vertical_frac_array[crk]['Fracture'] = crk+1
self.vertical_frac_array[crk]['x'] = crk
self.vertical_frac_array[crk]['y'] = crk
self.vertical_frac_array[crk]['z'] = crk
# Run the program (if invoked from the command line):
if __name__ == '__main__':
# Create the dialog:
fileDialog = SetupDialog()
fileDialog.configure_traits()
fileDialog.Number_Of_Fractures = 8
In my discussion with Chris below, he made some suggestions that so far haven't worked for me :( Following is my 'current' version of this test code so Chris (or anyone else who wishes to chime in) can see if I'm making some glaring error.
# -*- coding: utf-8 -*-
"""
This is a first shot at developing a ****** User Interface using Canopy by
Enthought. Canopy is a distribution of the Python language which has a lot of
scientific and engineering features 'built-in'.
"""
#-- Imports --------------------------------------------------------------------
from traitsui.api import TabularEditor
from traitsui.tabular_adapter import TabularAdapter
from numpy import zeros, dtype
from traits.api import HasTraits, Range, Array, List
from traitsui.api import View, Group, Item
#-- FileDialogDemo Class -------------------------------------------------------
max_cracks = 15 #maximum number of Fracs/cracks to allow
class VertFractureAdapter(TabularAdapter):
columns = [('Frac #',0), ('X Cen',1), ('Y Cen',2), ('Z Cen',3),
('Horiz',4), ('Vert',5), ('Angle',6)]
even_bg_color = 0xf4f4f4 # very light gray
class SetupDialog ( HasTraits ):
Number_Of_Fractures = Range(1, max_cracks) # line 277
dummy = Range(1, max_cracks)
vertical_frac_dtype = dtype([('Fracture', 'int'), ('x', 'float'), ('y', 'float'),
('z', 'float'), ('Horiz Length', 'float'), ('Vert Length', 'float')
, ('z-axis Rotation, degrees', 'float')])
vertical_frac_array = Array(dtype=vertical_frac_dtype)
vertical_fracture_group = Group(
Item(name = 'vertical_frac_array',
show_label = False,
editor = TabularEditor(adapter = VertFractureAdapter()),
width = 0.5,
height = 0.5,
)
)
#-- THIS is the actual 'View' that gets put on the screen
view = View(
Group(
Item( name = 'dummy'),
),
Group(
Item( name = 'Number_Of_Fractures'),
vertical_fracture_group,
),
width = 0.60,
height = 0.50,
title = '****** Setup',
resizable=True,
)
#-- Traits Event Handlers --------------------------------------------------
def _Number_Of_Fractures_changed(self, old, new):
""" Handles resizing arrays if/when the number of Fractures is changed"""
print "I've changed the # of Fractures to " + repr(self.Number_Of_Fractures)
vfa = self.vertical_frac_array
vfa.resize(self.Number_Of_Fractures, refcheck=False)
for crk in range(self.Number_Of_Fractures):
vfa[crk]['Fracture'] = crk+1
vfa[crk]['x'] = crk
vfa[crk]['y'] = crk
vfa[crk]['z'] = crk
self.vertical_frac_array = vfa
# Run the program (if invoked from the command line):
if __name__ == '__main__':
# Create the dialog:
fileDialog = SetupDialog()
# put the actual dialog up...if I put it up 'first' and then resize the array, I seem to get my full range back :)
fileDialog.configure_traits()
#fileDialog.Number_Of_Fractures = 8
There are two details of the code that are causing the problems you describe. First, vertical_frac_array is not a trait, so the tabular editor cannot monitor it for changes. Hence, the table only refreshes when you manually interact with it. Second, traits does not monitor the contents of an array for changes, but rather the identity of the array. So, resizing and assigning values into the array will not be detected.
One way to fix this is to first make vertical_frac_array and Array trait. E.g. vertical_frac_array = Array(dtype=vertical_frac_dtype). Then, inside of _Number_Of_Fractures_changed, do not resize the vertical_frac_array and modify it in-place. Instead, copy vertical_frac_array, resize it, modify the contents, and then reassign the manipulated copy back to vertical_frac_array. This way the table will see that the identity of the array has changed and will refresh the view.
Another option is to make vertical_frac_array a List instead of an Array. This avoids the copy-and-reassign trick above because traits does monitor the content of lists.
Edit
My solution is below. Instead of resizing the vertical_frac_array whenever Number_Of_Fractures changes, I instead recreate the array. I also provide a default value for vertical_frac_array via the _vertical_frac_array_default method. (I removed from unnecessary code in the view as well.)
# -*- coding: utf-8 -*-
"""
This is a first shot at developing a ****** User Interface using Canopy by
Enthought. Canopy is a distribution of the Python language which has a lot of
scientific and engineering features 'built-in'.
"""
#-- Imports --------------------------------------------------------------------
from traitsui.api import TabularEditor
from traitsui.tabular_adapter import TabularAdapter
from numpy import dtype, zeros
from traits.api import HasTraits, Range, Array
from traitsui.api import View, Item
#-- FileDialogDemo Class -------------------------------------------------------
max_cracks = 15 #maximum number of Fracs/cracks to allow
vertical_frac_dtype = dtype([('Fracture', 'int'), ('x', 'float'), ('y', 'float'),
('z', 'float'), ('Horiz Length', 'float'), ('Vert Length', 'float')
, ('z-axis Rotation, degrees', 'float')])
class VertFractureAdapter(TabularAdapter):
columns = [('Frac #',0), ('X Cen',1), ('Y Cen',2), ('Z Cen',3),
('Horiz',4), ('Vert',5), ('Angle',6)]
class SetupDialog ( HasTraits ):
Number_Of_Fractures = Range(1, max_cracks) # line 277
vertical_frac_array = Array(dtype=vertical_frac_dtype)
view = View(
Item('Number_Of_Fractures'),
Item(
'vertical_frac_array',
show_label=False,
editor=TabularEditor(
adapter=VertFractureAdapter(),
),
width=0.5,
height=0.5,
),
width=0.60,
height=0.50,
title='****** Setup',
resizable=True,
)
#-- Traits Defaults -------------------------------------------------------
def _vertical_frac_array_default(self):
""" Creates the default value of the `vertical_frac_array`. """
return self._calculate_frac_array()
#-- Traits Event Handlers -------------------------------------------------
def _Number_Of_Fractures_changed(self):
""" Update `vertical_frac_array` when `Number_Of_Fractures` changes """
print "I've changed the # of Fractures to " + repr(self.Number_Of_Fractures)
#if not self.user_StartingUp:
self.vertical_frac_array = self._calculate_frac_array()
#-- Private Interface -----------------------------------------------------
def _calculate_frac_array(self):
arr = zeros(self.Number_Of_Fractures, dtype=vertical_frac_dtype)
for crk in range(self.Number_Of_Fractures):
arr[crk]['Fracture'] = crk+1
arr[crk]['x'] = crk
arr[crk]['y'] = crk
arr[crk]['z'] = crk
return arr
# Run the program (if invoked from the command line):
if __name__ == '__main__':
# Create the dialog:
fileDialog = SetupDialog()
fileDialog.configure_traits()