Extracting avg. nodal stresses from odb file and nodeset using python - abaqus

What I am trying to do:
I want to extract nodal displacements (U2) and stresses (S22), which have been transformed into a local coordinate system, in order to animate their variation with each frame of a given analysis step.
Extracting history outputs and the nodal displacement field is OK. However, getting the stresses seems more cumbersome.
Animation
Problems:
The NODAL specified stress field is not node averaged. How do I obtain these?
The nodal stress output seems a bit random even though it has been specified through subsetregion = nodeSet. It looks like I am only accessing half of the non-averaged nodal stresses along the path nodeset which I have defined.
I have posted the entire script, but where it goes wrong is from the for loop over frames in the step: for frameNo in range(0,nFrames):
Any input or ideas would be greatly appreciated.
odb = openOdb(path = "PostJob.odb")
partName = odb.rootAssembly.instances.keys()
print('AVAILABLE PARTS AND PART NAMES IN THE MODEL', partName)
nSets = len(odb.rootAssembly.instances[partName[0]].nodeSets.keys())
nSetKeys = odb.rootAssembly.instances[partName[0]].nodeSets.keys()
print('AVAILABLE SETS IN THE PART ',partName[0],'sets',nSetKeys)
print('NO. OF SETS IN PART INSTANCE',nSets)
# ------------------------------------------------------------------------
# FORCE-DISPLACEMENT CURVES ----------------------------------------------
# ------------------------------------------------------------------------
# Extract U1 and RF1 from the history output
U1data = odb.steps['CZ Analysis'].historyRegions['Node ASSEMBLY.1'].historyOutputs['U1'].data
RF1data = odb.steps['CZ Analysis'].historyRegions['Node ASSEMBLY.1'].historyOutputs['RF1'].data
# The data is in the form of tuples, which we will convert to np. arrays instead.
U1data=np.asarray(U1data)
RF1data = np.array(RF1data)
# Plotting the force-displacement curve
plt.plot(U1data[:,1],RF1data[:,1])
plt.ylabel('Force: RF1 in [N]')
plt.xlabel('Displacement: U1 in [mm]')
plt.xlim((0,2))
plt.title('Force-Displacement Curve')
#plt.show()
# ------------------------------------------------------------------------
# INTERFACE TRACTIONS ----------------------------------------------------
# ------------------------------------------------------------------------
# Access the test node path and create a node path in the visulalization module
alpha = 10.0*math.pi/180 # with X-axis
# Perform a coordinate transformation on the nodal coordinates by using the transformation matrix T
R = np.array([[math.cos(alpha),-math.sin(alpha),0],
[math.sin(alpha), math.cos(alpha),0],
[ 0, 0,1]])
# T = [cos alpha, -sin alpha, 0; sin alpha, cos alpha, 0; 0, 0, 1]
print(R)
e1 = np.array([1,0,0])
e2 = np.array([0,1,0])
e3 = np.array([0,0,1])
e1p = np.dot(R, e1.transpose())
e2p = np.dot(R, e2.transpose())
e3p = np.dot(R, e3.transpose())
origin=[0,0,0]
#plt.figure()
origin = np.array([0, 0, 0])
#plt.quiver([e1p[0],e2p[0],e3p[0]],[e1p[1],e2p[1],e3p[1]], color=['r','g','b'],scale=21)
# Create a new coordinate system
coordSys = odb.rootAssembly.DatumCsysByThreePoints(name='Bond-CSYS', coordSysType=CARTESIAN,
origin=(0.0,0.0,0.0),
point1=tuple(e1p),
point2=tuple(e2p))
# Extract the node set and stress field of the node path.
instance = odb.rootAssembly.instances['ADHESIVE']
bondPath = instance.nodeSets['MIDNODESET']
allBondPathNodes = bondPath.nodes[0:-1]
nNodes = len(bondPath.nodes)
print('NUMBER OF NODES IN BONDLINE PATH FOUND FROM MIDNODESET', nNodes)
nodeCoord = np.zeros([3,nNodes])
# Transform the coordinates into the local coordinates of the bondline
jj = -1
for node in allBondPathNodes:
jj += 1
nodeCoord[0,jj] = node.coordinates[0]
nodeCoord[1,jj] = node.coordinates[1]
nodeCoord[2,jj] = node.coordinates[2]
# plt.figure()
# plt.plot(nodeCoord[0,:],nodeCoord[1,:])
# Transform the coordinates and determine the
transNodeCoord = np.dot(np.linalg.inv(R),nodeCoord)
# Test coordinates by plotting the line
#plt.plot(transNodeCoord[0,:],transNodeCoord[1,:])
# EXTRACT RELEVANT FIELDS ------
# Create a for loop to loop over the number of frames in the odb:
step = odb.steps['CZ Analysis']
nFrames = len(step.frames)
print("No. of Frames", nFrames)
# Initialize arrays outside for loop
U11 = np.zeros([nNodes,nFrames])
U22 = np.zeros([nNodes,nFrames])
t11 = np.zeros([nNodes,nFrames])
t22 = np.zeros([nNodes,nFrames])
t12 = np.zeros([nNodes,nFrames])
for frameNo in range(0,nFrames):
# Current frame no in analysis step 'CZ Analysis'
frame = step.frames[frameNo]
# Get the stress and displacement field for the current frame
stressField = frame.fieldOutputs['S']
displacement = frame.fieldOutputs['U']
# Transform the displacement and stress field to local coordinate system
transfStressField = stressField.getTransformedField(datumCsys=coordSys)
transfDisplacementField = displacement.getTransformedField(datumCsys = coordSys)
# Extract nodal field results for subset region = bondPath
pathStressField = transfStressField.getSubset(region=bondPath,position=NODAL)
pathDisplacementField = transfDisplacementField.getSubset(region=bondPath)
# Extract displacement field U1 component
dispU1 = pathDisplacementField.getScalarField(componentLabel='U1')
dispU2 = pathDisplacementField.getScalarField(componentLabel='U2')
# Loop over nodal values and append them to np.array
jj = -1
for u,v, stress in zip(dispU1.values, dispU2.values,transfStressField.values):
jj += 1
U11[jj][frameNo] = u.data
U22[jj][frameNo] = v.data
t11[jj][frameNo] = stress.data[0]
t22[jj][frameNo] = stress.data[1]
t12[jj][frameNo] = stress.data[3]
#print('Displacement node label', u.nodeLabel,v.nodeLabel,'Stress object node label', stress.nodeLabel)
#print("Node Label", v.nodeLabel, "Displacement Value", v.data,"S11",stress.data[0], "S22",stress.data[1],"S12",stress.data[3])
#print('POSITION: ', v.position, "Coordinate", node.coordinates, "Displacement Value", v.data)
fig, axs = plt.subplots(2)
axs[1].set_xlabel('x-coordinate')
def animate(i):
axs[0].clear()
axs[1].clear()
#ax.plot(transNodeCoord[0,:],t22[:,i]) # stresses are discontinous because they are specified at nodes
#ax.set_title('Displacement: U2', 'Frame No:',i)
axs[0].set_title('Displacement: U2 , Frame No: {}'.format(i))
axs[0].plot(np.flip(transNodeCoord[0,:]),U22[:,i]) # Displacements are continous, but look weird.-
axs[1].set_title('Traction: T22 , Frame No: {}'.format(i))
axs[1].plot(np.flip(transNodeCoord[0,:]),t22[:,i])
#ax.set_xlim([0,transNodeCoord[-1,:]])
# Create animation
ani1 = FuncAnimation(fig,animate,frames=32,interval=500,blit=False,repeat=True)
np.set_printoptions(threshold=np.inf)

Abaqus calculates stress at integration points located inside the 3D element (number and position depend on the element type). After the stress is calculated it is extrapolated to the nodes of the element.
Thus, each node would get a value from each element sharing that node. If the meshing is not fine enough such an interpolation could result in high differences in node values obtained from adjacent elements. Abaqus by default averages these values for visualization purposes.
For your task, you can export values at nodes and perform averaging by yourself. Or use results obtained at the element centroid/at integration points and pair these values with the corresponding coordinate. After obtaining Streess vs X-coordinate and Strain vs X-coordinate data sets you can interplate first or second so X-coordinates of both are matching.

Related

Why my feature map seems incorrect when the prediction of the class is correct

from torchvision.models.feature_extraction import create_feature_extractor
# Data processing
preprocess = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)])
image_path = './data/test_images/anemone.jpg'
image = Image.open(image_path).convert('RGB')
img_processed = preprocess(image)
batch_img_cat_tensor = torch.unsqueeze(img_processed, 0)
# Model initialization
resnet50_model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
# Eval model for predictions
resnet50_model.eval()
# Creating feature extractor (Detailed example here: https://pytorch.org/blog/FX-feature-extraction-torchvision/)
feature_extractor = create_feature_extractor(resnet50_model,
return_nodes=['layer4.2.conv3', 'fc'])
# Forward pass
out = feature_extractor(batch_img_cat_tensor)
pred = torch.argmax(out['fc'])
# Transforming last conv output to numpy and reshaping it so that the channels would be last
last_conv_output = torch.squeeze(out['layer4.2.conv3'])
last_conv_output = torch.reshape(last_conv_output, (7, 7, -1))
last_conv_output = last_conv_output.detach().numpy()
last_conv_output = last_conv_output.astype(np.uint8)
Calculating the upscale factors for last conv output
width_factor = int(image.size[0] / last_conv_output.shape[0])
height_factor = int(image.size[1] / last_conv_output.shape[1])
# Getting the shapes of the last conv output
last_conv_w, last_conv_h, n_channels = last_conv_output.shape
# Calculate the
upscaled_h = last_conv_h * height_factor
upscaled_w = last_conv_w * width_factor
# Upscaling the last_conv_output so that it could be "masked" with original image
upsampled_last_conv_output = np.zeros((upscaled_h, upscaled_w, n_channels))
upsampled_last_conv_output = []
for x in range(0, n_channels, 512):
upsampled_last_conv_output.append(cv2.resize(last_conv_output[:, :, x:x+512], (upscaled_w, upscaled_h), cv2.INTER_CUBIC))
upsampled_last_conv_output = np.concatenate(upsampled_last_conv_output, axis=2)
# Getting the weights of the predicted class
last_layer_weights = resnet50_model.fc.weight.T
last_layer_weights_for_pred = last_layer_weights[:, pred]
# Dot multiplying the upsampled_last_conv_output with last_layer_weights_for_pred
upsampled_last_conv_output = upsampled_last_conv_output.reshape((-1, 2048))
heat_map = np.dot(upsampled_last_conv_output,
last_layer_weights_for_pred.detach().numpy()).reshape(upscaled_h, upscaled_w)
# Plotting the results
fig, ax = plt.subplots()
ax.imshow(image)
ax.imshow(heat_map, cmap='jet', alpha=0.5)
ax.set_title(prediction)
I have followed the tutorial from here: https://www.youtube.com/watch?v=GiyldmoYe_M&t=665s&ab_channel=DigitalSreeni
The main problem with this is that I get the feature map that looks like this:
As you see it looks like the model reacts to multiple areas on the image and no matter what image I use it always has the biggest reaction in the middle.
PS. If you think this question should be posted on the AI stack exchange please notify me
I have found an error I made. It was that after creating a
heat_map = np.dot(upsampled_last_conv_output, last_layer_weights_for_pred.detach().numpy()).reshape(upscaled_h, upscaled_w)
I had to apply this as well:
heat_map = heat_map - np.min(heat_map)
heat_map = heat_map / np.max(heat_map)
Since I normalized the image, the generated heatmap was also normalized, so I needed to "denormalize" it back to it's original values.

How to write an optimiser for StyleGAN2 interpolation?

I would like to interpolate two images using StyleGAN2-ADA-PyTorch from NVLabs. For the sake of simplicity, it can be said that with two images of different persons I want to create a third image depicting a third person, with a body from the first image, and their head from the second. I also have corresponding w-vectors for the two images ready at hand.
# G is a generative model in line with StyleGAN2, trained to output 512x512 images.
# Latents shape is [1, 16, 512]
G = G.eval().requires_grad_(False).to(device) # type: ignore
num_ws = G.mapping.num_ws # 16
w_dim = G.mapping.w_dim # 512
# Segmentation network is used to extract important parts from images
segmentation_dnn = segmentation_dnn.to(device)
# Source images are represented as latent vectors. I use G to generate actual images:
image_body = image_from_output(G.synthesis(w_body, noise_mode='const'))
image_head = image_from_output(G.synthesis(w_head, noise_mode='const'))
# Custom function is applied to source images, creating masked images.
# In masked images, only head or body is present (and the rest is filled with white pixels)
image_body_masked = apply_segmentation_mask(image_body, segmentation_dnn, select='body')
image_head_masked = apply_segmentation_mask(image_head, segmentation_dnn, select='head')
In order to compare similarity of any two images, I use VGGLos
# VGG16 is used as a feature extractor to evaluate image similarity
url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
with dnnlib.util.open_url(url) as f:
vgg16 = torch.jit.load(f).eval().to(device)
class VGGLoss(nn.Module):
def __init__(self, device, vgg):
super().__init__()
for param in self.parameters():
param.requires_grad = False
self.criterion = nn.L1Loss().to(device)
def forward(self, source, target):
loss = 0
source_features = self.vgg(source, resize_images=False, return_lpips=True)
target_features = self.vgg(target, resize_images=False, return_lpips=True)
loss += self.criterion(source, target)
return loss
vgg_loss = VGGLoss(device, vgg=vgg16)
Now, I want to interpolate image_body and image_head, creating image_target.
To do this, I need to find latent representation of image_target in the latent space of StyleGAN2
Crudely, we can use optimize for a coefficient query_opt to partially include latents from image_body and image_head: w_target = w_body + (query_opt * (w_head - w_person))
query_opt = torch.randn([1, num_ws, 1], dtype=torch.float32, device=device, requires_grad=True)
optimizer = torch.optim.Adam(query_opt, betas=(0.9, 0.999), lr=initial_learning_rate)
w_out = []
for step in num_steps:
# Learning rate schedule.
t = step / num_steps
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
lr = initial_learning_rate * lr_ramp
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Synth image from w_target using query_opt.
# This interpolation formula is an important step, and I think my math might be out of order up here
w_target = w_body + (query_opt * (w_head - w_person))
image_target = image_from_output(G.synthesis(ws, noise_mode='const'))
image_target_body_masked = apply_segmentation_mask(image_target, segmentation_dnn, select='body')
image_target_head_masked = apply_segmentation_mask(image_target, segmentation_dnn, select='head')
loss = vgg_loss(image_body_masked, image_target_body_masked) + vgg_loss(image_head_masked, image_target_head_masked)
# Step
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
logprint(f'step {step+1:>4d}/{num_steps}: loss {float(loss):<5.2f}')
# Save current w_target
w_out[step] = w_target.detach()
I can't figure out how to make my optimizer actually target query_opt in such a way that combined VGGLoss is actually optimized for. I must be missing something in my PyTorch code, or maybe even in the main interpolation formula.

Can I use results of MCA for clustering using K-means, DBScan or GMM?

I'm working on a problem where I have all the variables as categorical variables and applied MCA. When I visualize MCA results combined with clusters obtained through K-modes (applied independently of MCA), the clusters overlap with each other. I was wondering instead of applying k-modes, I should simply get MCA components and apply K-means or other clustering algorithm on those components. Does that make sense?
I don't think K-Means allows overlapping. The sample result is assigned to closest cluster, but not to all, so there is no overlapping. Check out the code sample below.
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import Voronoi
def voronoi_finite_polygons_2d(vor, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max()*2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all([v >= 0 for v in vertices]):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
# make up data points
np.random.seed(1234)
points = np.random.rand(15, 2)
# compute Voronoi tesselation
vor = Voronoi(points)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
print("--")
print(regions)
print("--")
print(vertices)
# colorize
for region in regions:
polygon = vertices[region]
plt.fill(*zip(*polygon), alpha=0.4)
plt.plot(points[:,0], points[:,1], 'ko')
plt.axis('equal')
plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
I think some clustering algos actually do allow overlapping. Do a Google search and you will find what you are looking for.
Hope that helps.

How can I change the max sequence length in a Tensorflow RNN Model?

I am currently trying to adapt my tensorflow classifier, which is able to tag a sequence of words to be positive or negative, to handle much longer sequences, without retraining. My model is a RNN, with a max sequence lenght of 210. One input is one word(300 dim), I vectorised the words with Googles word2vec, so I am able to feed a sequence with max 210 words. Now my question is, how can I change the max sequence length to for example 3000, for classifying movie reviews.
My working model with fixed max sequence length of 210(tf_version: 1.1.0):
n_chunks = 210
chunk_size = 300
x = tf.placeholder("float",[None,n_chunks,chunk_size])
y = tf.placeholder("float",None)
seq_length = tf.placeholder("int64",None)
with tf.variable_scope("rnn1"):
lstm_cell = tf.contrib.rnn.LSTMCell(rnn_size,
state_is_tuple=True)
lstm_cell = tf.contrib.rnn.DropoutWrapper (lstm_cell,
input_keep_prob=0.8)
outputs, _ = tf.nn.dynamic_rnn(lstm_cell,x,dtype=tf.float32,
sequence_length = self.seq_length)
fc = tf.contrib.layers.fully_connected(outputs, 1000,
activation_fn=tf.nn.relu)
output = tf.contrib.layers.flatten(fc)
#*1
logits = tf.contrib.layers.fully_connected(output, self.n_classes,
activation_fn=None)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits
(logits=logits, labels=y) )
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)
...
#train
#train_x padded to fit(batch_size*n_chunks*chunk_size)
sess.run([optimizer, cost], feed_dict={x:train_x, y:train_y,
seq_length:seq_length})
#predict:
...
pred = tf.nn.softmax(logits)
pred = sess.run(pred,feed_dict={x:word_vecs, seq_length:sq_l})
What modifications I already tried:
1 Replacing n_chunks with None and simply feed data in
x = tf.placeholder(tf.float32, [None,None,300])
#model fails to build
#ValueError: The last dimension of the inputs to `Dense` should be defined.
#Found `None`.
# at *1
...
#all entrys in word_vecs still have got the same length for example
#3000(batch_size*3000(!= n_chunks)*300)
pred = tf.nn.softmax(logits)
pred = sess.run(pred,feed_dict={x:word_vecs, seq_length:sq_l})
2 Changing x and then restore the old model:
x = tf.placeholder(tf.float32, [None,n_chunks*10,chunk_size]
...
saver = tf.train.Saver(tf.all_variables(), reshape=True)
saver.restore(sess,"...")
#fails as well:
#InvalidArgumentError (see above for traceback): Input to reshape is a
#tensor with 420000 values, but the requested shape has 840000
#[[Node: save/Reshape_5 = Reshape[T=DT_FLOAT, Tshape=DT_INT32,
#_device="/job:localhost/replica:0/task:0/cpu:0"](save/RestoreV2_5,
#save/Reshape_5/shape)]]
# run prediction
If it is possible could you please provide me with any working example or explain me why it isnt?
I am just wondering why not you just assign the n_chunk a value of 3000?
In your first attempt, you cannot use two None, since tf cannot how many dimensions to put for each one. The first dimension is set as None because it is contingent upon the batch size. In your second attempt, you just change one place and the other places where n_chunks is used may conflict with the x placeholder.

Generating a spectrogram for a sequence of 2D movie frames

I have some data that consists of a sequence of video frames which represent changes in luminance over time relative to a moving baseline. In these videos there are two kinds of 'event' that can occur - 'localised' events, which consist of luminance changes in small groups of clustered pixels, and contaminating 'diffuse' events, which affect most of the pixels in the frame:
I'd like to be able to isolate local changes in luminance from diffuse events. I'm planning on doing this by subtracting an appropriately low-pass filtered version of each frame. In order to design an optimal filter, I'd like to know which spatial frequencies of my frames are modulated during diffuse and local events, i.e. I'd like to generate a spectrogram of my movie over time.
I can find lots of information about generating spectrograms for 1D data (e.g. audio), but I haven't come across much on generating spectrograms for 2D data. What I've tried so far is to generate a 2D power spectrum from the Fourier transform of the frame, then perform a polar transformation about the DC component and then average across angles to get a 1D power spectrum:
I then apply this to every frame in my movie, and generate a raster plot of spectral power over time:
Does this seem like a sensible approach to take? Is there a more 'standard' approach to doing spectral analysis on 2D data?
Here's my code:
import numpy as np
# from pyfftw.interfaces.scipy_fftpack import fft2, fftshift, fftfreq
from scipy.fftpack import fft2, fftshift, fftfreq
from matplotlib import pyplot as pp
from matplotlib.colors import LogNorm
from scipy.signal import windows
from scipy.ndimage.interpolation import map_coordinates
def compute_2d_psd(img, doplot=True, winfun=windows.hamming, winfunargs={}):
nr, nc = img.shape
win = make2DWindow((nr, nc), winfun, **winfunargs)
f2 = fftshift(fft2(img*win))
psd = np.abs(f2*f2)
pol_psd = polar_transform(psd, centre=(nr//2, nc//2))
mpow = np.nanmean(pol_psd, 0)
stdpow = np.nanstd(pol_psd, 0)
freq_r = fftshift(fftfreq(nr))
freq_c = fftshift(fftfreq(nc))
pos_freq = np.linspace(0, np.hypot(freq_r[-1], freq_c[-1]),
pol_psd.shape[1])
if doplot:
fig,ax = pp.subplots(2,2)
im0 = ax[0,0].imshow(img*win, cmap=pp.cm.gray)
ax[0,0].set_axis_off()
ax[0,0].set_title('Windowed image')
lnorm = LogNorm(vmin=psd.min(), vmax=psd.max())
ax[0,1].set_axis_bgcolor('k')
im1 = ax[0,1].imshow(psd, extent=(freq_c[0], freq_c[-1],
freq_r[0], freq_r[-1]), aspect='auto',
cmap=pp.cm.hot, norm=lnorm)
# cb1 = pp.colorbar(im1, ax=ax[0,1], use_gridspec=True)
# cb1.set_label('Power (A.U.)')
ax[0,1].set_title('2D power spectrum')
ax[1,0].set_axis_bgcolor('k')
im2 = ax[1,0].imshow(pol_psd, cmap=pp.cm.hot, norm=lnorm,
extent=(pos_freq[0],pos_freq[-1],0,360),
aspect='auto')
ax[1,0].set_ylabel('Angle (deg)')
ax[1,0].set_xlabel('Frequency (cycles/px)')
# cb2 = pp.colorbar(im2, ax=(ax[0,1],ax[1,1]), use_gridspec=True)
# cb2.set_label('Power (A.U.)')
ax[1,0].set_title('Polar-transformed power spectrum')
ax[1,1].hold(True)
# ax[1,1].fill_between(pos_freq, mpow - stdpow, mpow + stdpow,
# color='r', alpha=0.3)
ax[1,1].axvline(0, c='k', ls='--', alpha=0.3)
ax[1,1].plot(pos_freq, mpow, lw=3, c='r')
ax[1,1].set_xlabel('Frequency (cycles/px)')
ax[1,1].set_ylabel('Power (A.U.)')
ax[1,1].set_yscale('log')
ax[1,1].set_xlim(-0.05, None)
ax[1,1].set_title('1D power spectrum')
fig.tight_layout()
return mpow, stdpow, pos_freq
def make2DWindow(shape,winfunc,*args,**kwargs):
assert callable(winfunc)
r,c = shape
rvec = winfunc(r,*args,**kwargs)
cvec = winfunc(c,*args,**kwargs)
return np.outer(rvec,cvec)
def polar_transform(image, centre=(0,0), n_angles=None, n_radii=None):
"""
Polar transformation of an image about the specified centre coordinate
"""
shape = image.shape
if n_angles is None:
n_angles = shape[0]
if n_radii is None:
n_radii = shape[1]
theta = -np.linspace(0, 2*np.pi, n_angles, endpoint=False).reshape(-1,1)
d = np.hypot(shape[0]-centre[0], shape[1]-centre[1])
radius = np.linspace(0, d, n_radii).reshape(1,-1)
x = radius * np.sin(theta) + centre[0]
y = radius * np.cos(theta) + centre[1]
# nb: map_coordinates can give crazy negative values using higher order
# interpolation, which introduce nans when you take the log later on
output = map_coordinates(image, [x, y], order=1, cval=np.nan,
prefilter=True)
return output
I believe that the approach you describe is in general the best way to do this analysis.
However, i did spot an error in your code. as:
np.abs(f2*f2)
is not the PSD of complex array f2, you need to multiply f2 by it's complex conjugate instead of itself (|f2^2| is not the same as |f2|^2).
Instead you should do something like
(f2*np.conjugate(f2)).astype(float)
Or, more cleanly:
np.abs(f2)**2.
The oscillations in the 2D power-spectrum are a tell-tale sign of this kind of error (I've done this before myself!)

Categories

Resources