(There is a solid line at C and a faint line at T)
I want to detect the line at T. Currently I am using opencv to locate the qr code and rotate the image until the qr code is upright. Then I calculate the approximate location of the C and T mark by using the coordinates of the qr code. Then my code will scan along the y axis down and detect there are difference in the Green and Blue values.
My problem is, even if the T line is as faint as shown, it should be regarded as positive. How could I make a better detection?
I cropped out just the white strip since I assume you have a way of finding it already. Since we're looking for red, I changed to the LAB colorspace and looked on the "a" channel.
Note: all images of the strip have been transposed (np.transpose) for viewing convenience, it's not that way in the code.
the A channel
I did a linear reframe to improve the contrast
The image is super noisy. Again, I'm not sure if this is from the camera or the jpg compression. I averaged each row to smooth out some of the nonsense.
I graphed the intensities (x-vals were the row index)
Use a mean filter to smooth out the graph
I ran a mountain climber algorithm to look for peaks and valleys
And then I filtered for peaks with a climb greater than 10 (the second highest peak has a climb of 25.5, the third highest is 4.4).
Using these peaks we can determine that there are two lines and they are (about) here:
import cv2
import numpy as np
import matplotlib.pyplot as plt
# returns direction of gradient
# 1 if positive, -1 if negative, 0 if flat
def getDirection(one, two):
dx = two - one;
if dx == 0:
return 0;
if dx > 0:
return 1;
return -1;
# detects and returns peaks and valleys
def mountainClimber(vals, minClimb):
# init trackers
last_valley = vals[0];
last_peak = vals[0];
last_val = vals[0];
last_dir = getDirection(vals[0], vals[1]);
# get climbing
peak_valley = []; # index, height, climb (positive for peaks, negative for valleys)
for a in range(1, len(vals)):
# get current direction
sign = getDirection(last_val, vals[a]);
last_val = vals[a];
# if not equal, check gradient
if sign != 0:
if sign != last_dir:
# change in gradient, record peak or valley
# peak
if last_dir > 0:
last_peak = vals[a];
climb = last_peak - last_valley;
climb = round(climb, 2);
peak_valley.append([a, vals[a], climb]);
else:
# valley
last_valley = vals[a];
climb = last_valley - last_peak;
climb = round(climb, 2);
peak_valley.append([a, vals[a], climb]);
# change direction
last_dir = sign;
# filter out very small climbs
filtered_pv = [];
for dot in peak_valley:
if abs(dot[2]) > minClimb:
filtered_pv.append(dot);
return filtered_pv;
# run an mean filter over the graph values
def meanFilter(vals, size):
fil = [];
filtered_vals = [];
for val in vals:
fil.append(val);
# check if full
if len(fil) >= size:
# pop front
fil = fil[1:];
filtered_vals.append(sum(fil) / size);
return filtered_vals;
# averages each row (also gets graph values while we're here)
def smushRows(img):
vals = [];
h,w = img.shape[:2];
for y in range(h):
ave = np.average(img[y, :]);
img[y, :] = ave;
vals.append(ave);
return vals;
# linear reframe [min1, max1] -> [min2, max2]
def reframe(img, min1, max1, min2, max2):
copy = img.astype(np.float32);
copy -= min1;
copy /= (max1 - min1);
copy *= (max2 - min2);
copy += min2;
return copy.astype(np.uint8);
# load image
img = cv2.imread("strip.png");
# resize
scale = 2;
h,w = img.shape[:2];
h = int(h*scale);
w = int(w*scale);
img = cv2.resize(img, (w,h));
# lab colorspace
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB);
l,a,b = cv2.split(lab);
# stretch contrast
low = np.min(a);
high = np.max(a);
a = reframe(a, low, high, 0, 255);
# smush and get graph values
vals = smushRows(a);
# filter and round values
mean_filter_size = 20;
filtered_vals = meanFilter(vals, mean_filter_size);
for ind in range(len(filtered_vals)):
filtered_vals[ind] = round(filtered_vals[ind], 2);
# get peaks and valleys
pv = mountainClimber(filtered_vals, 1);
# pull x and y values
pv_x = [ind[0] for ind in pv];
pv_y = [ind[1] for ind in pv];
# find big peaks
big_peaks = [];
for dot in pv:
if dot[2] > 10: # climb filter size
big_peaks.append(dot);
print(big_peaks);
# make plot points for the two best
tops_x = [dot[0] for dot in big_peaks];
tops_y = [dot[1] for dot in big_peaks];
# plot
x = [index for index in range(len(filtered_vals))];
fig, ax = plt.subplots()
ax.plot(x, filtered_vals);
ax.plot(pv_x, pv_y, 'og');
ax.plot(tops_x, tops_y, 'vr');
plt.show();
# draw on original image
h,w = img.shape[:2];
for dot in big_peaks:
y = int(dot[0] + mean_filter_size / 2.0); # adjust for mean filter cutting
cv2.line(img, (0, y), (w,y), (100,200,0), 2);
# show
cv2.imshow("a", a);
cv2.imshow("strip", img);
cv2.waitKey(0);
Edit:
I was wondering why the lines seemed so off, then I realized that I forgot to account for the fact that the meanFilter reduces the size of the list (it cuts from the front and back). I've updated to take that into account.
Related
I am currently trying to simulate an optical flow using the following equation:
Below is a basic example where I have a 7x7 image where the central pixel is illuminated. The velocity I am applying is a uniform x-velocity of 2.
using Interpolations
using PrettyTables
# Generate grid
nx = 7 # Image will be 7x7 pixels
x = zeros(nx, nx)
yy = repeat(1:nx, 1, nx) # grid of y-values
xx = yy' # grid of x-values
# In this example x is the image I in the above equation
x[(nx-1)÷2 + 1, (nx-1)÷2 + 1] = 1.0 # set central pixel equal to 1
# Initialize velocity
velocity = 2;
vx = velocity .* ones(nx, nx); # vx=2
vy = 0.0 .* ones(nx, nx); # vy=0
for t in 1:1
# create 2d grid interpolator of the image
itp = interpolate((collect(1:nx), collect(1:nx)), x, Gridded(Linear()));
# create 2d grid interpolator of vx and vy
itpvx = interpolate((collect(1:nx), collect(1:nx)), vx, Gridded(Linear()));
itpvy = interpolate((collect(1:nx), collect(1:nx)), vy, Gridded(Linear()));
∇I_x = Array{Float64}(undef, nx, nx); # Initialize array for ∇I_x
∇I_y = Array{Float64}(undef, nx, nx); # Initialize array for ∇I_y
∇vx_x = Array{Float64}(undef, nx, nx); # Initialize array for ∇vx_x
∇vy_y = Array{Float64}(undef, nx, nx); # Initialize array for ∇vy_y
for i=1:nx
for j=1:nx
# gradient of image in x and y directions
Gx = Interpolations.gradient(itp, i, j);
∇I_x[i, j] = Gx[2];
∇I_y[i, j] = Gx[1];
Gvx = Interpolations.gradient(itpvx, i, j) # gradient of vx in both directions
Gvy = Interpolations.gradient(itpvy, i, j) # gradient of vy in both directions
∇vx_x[i, j] = Gvx[2];
∇vy_y[i, j] = Gvy[1];
end
end
v∇I = (vx .* ∇I_x) .+ (vy .* ∇I_y) # v dot ∇I
I∇v = x .* (∇vx_x .+ ∇vy_y) # I dot ∇v
x = x .- (v∇I .+ I∇v) # I(x, y, t+dt)
pretty_table(x)
end
What I expect is that the illuminated pixel in x will shift two pixels to the right in x_predicted. What I am seeing is the following:
where the original illuminated pixel's value is moved to the neighboring pixel twice rather than being shifted two pixels to the right. I.e. the neighboring pixel goes from being 0 to 2 and the original pixel goes from a value of 1 to -1. I'm not sure if I'm messing up the equation or if I'm thinking of velocity in the wrong way here. Any ideas?
Without looking into it too deeply, I think there are a couple of potential issues here:
Violation of the Courant Condition
The code you originally posted (I've edited it now) simulates a single timestep. I would not expect a cell 2 units away from your source cell to be activated in a single timestep. Doing so would voilate the Courant condition. From wikipedia:
The principle behind the condition is that, for example, if a wave is moving across a discrete spatial grid and we want to compute its amplitude at discrete time steps of equal duration, then this duration must be less than the time for the wave to travel to adjacent grid points.
The Courant condition requires that uΔt/Δx <= 1 (for an explicit time-marching solver such as the one you've implemented). Plugging in u=2, Δt=1, Δx=1 gives 2, which is greater than 1, so you have a mathematical problem. The general way of fixing this problem is to make Δt smaller. You probably want something like:
x = x .- Δt*(v∇I .+ I∇v) # I(x, y, t+dt)
Missing gradients?
I'm a little concerned about what's going on here:
Gvx = Interpolations.gradient(itpvx, i, j) # gradient of vx in both directions
Gvy = Interpolations.gradient(itpvy, i, j) # gradient of vy in both directions
∇vx_x[i, j] = Gvx[2];
∇vy_y[i, j] = Gvy[1];
You're able to pull two gradients out of both Gvx and Gvy, but you're only using one from each of them. Does that mean you're throwing information away?
https://scicomp.stackexchange.com/ is likely to provide better help with this.
I want to separate the handwriting from the background as perfectly as possible in images like the following:
It first looks like one can separate the pixels by color, but plotting the pixels by brightness and relative blue content does not give a clear separation:
Using the above separating lines to keep only the pixels in the upper left area (and set the other pixels to white) we get the following result:
The handwriting is not fully extracted, but the (printed) numbers start to appear. So an improvement of the separating lines does not seem to be possible. Any other possibilities to improve the result?
This is as far as I could get with simple techniques. I'm using thresholding to get the letters plus bits of numbers and contours to filter out the little number bits. I also end up losing the dots on the i's doing this. If you have control over the handwriting, it'd be a lot easier and cleaner to separate out red ink since the black numbers have some blue in them.
kmeans clustering might get you better results, but I've forgotten how to do that in OpenCV :p
import cv2
import numpy as np
# load image
img = cv2.imread("writing.png");
# convert to hsv
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB);
l, a, b = cv2.split(lab);
# threshold on b channel
done = False;
low = 0;
high = 124; # [0, 124, 8] b-channel
size = 8;
while not done:
# copy image
copy = b.copy();
# threshold
thresh = cv2.inRange(copy, low, high);
# contours
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
# filter contours by size
big_cntrs = [];
marked = img.copy();
for contour in contours:
area = cv2.contourArea(contour);
if area > size:
big_cntrs.append(contour);
cv2.drawContours(marked, big_cntrs, -1, (0, 255, 0), 3);
# show
cv2.imshow("original", img);
cv2.imshow("marked", marked);
cv2.imshow("thresh", thresh);
key = cv2.waitKey(1);
# check keypress
done = key == ord('z');
if key == ord('d'):
high += 1;
if key == ord('a'):
high -= 1;
if key == ord('w'):
low += 1;
if key == ord('s'):
low -= 1;
if key == ord('e'):
size += 1;
if key == ord('q'):
size -= 1;
print([low, high, size]);
# create a mask of the contoured image
mask = np.zeros_like(thresh);
mask = cv2.drawContours(mask, big_cntrs, -1, 255, -1);
cv2.imshow("Mask", mask);
cv2.waitKey(0);
cv2.imwrite("masked.png", mask);
I have tried to extract the dark line inside very noisy images without success. Some tips?
My current steps for the first example:
1) Clahe: with clip_limit = 10 and grid_size = (8,8)
2) Box Filter: with size = (5,5)
3) Inverted Image: 255 - image
4) Threshold: when inverted_image < 64
UPDATE
I have performed some preprocessing steps to improve the quality of tested images. I adjusted my ROI mask to crop top and down (because they are low intensities) and added a illumination correction to see better the line. Follow below the current images:
Even though the images are noisy, you are only looking for straight lines towards the north of image. So, why don't use some kind of matched filter with morphological operations?
EDIT: I have modified it.
1) Use median filter along the x and y axis, and normalize the images.
2) Matched filter with all possible orientations of lines.
% im=imread('JwXON.png');
% im=imread('Fiy72.png');
% im=imread('Ya9AN.png');
im=imread('OcgaIt8.png');
imOrig=im;
matchesx = fl(im, 1);
matchesy = fl(im, 0);
matches = matchesx + matchesy;
[x, y] = find(matches);
figure(1);
imagesc(imOrig), axis image
hold on, plot(y, x, 'r.', 'MarkerSize',5)
colormap gray
%----------
function matches = fl(im, direc)
if size(im,3)~=1
im = double(rgb2gray(im));
else
im=double(im);
end
[n, m] = size(im);
mask = bwmorph(imfill(im>0,'holes'),'thin',10);
indNaN=find(im==0); im=255-im; im(indNaN)=0;
N = n - numel(find(im(:,ceil(m/2))==0));
N = ceil(N*0.8); % possible line length
% Normalize the image with median filter
if direc
background= medfilt2(im,[1,30],'symmetric');
thetas = 31:149;
else
background= medfilt2(im,[30,1],'symmetric');
thetas = [1:30 150:179];
end
normIm = im - background;
normIm(normIm<0)=0;
% initialize matched filter result
matches=im*0;
% search for different angles of lines
for theta=thetas
normIm2 = imclose(normIm>0,strel('line',5,theta));
normIm3 = imopen(normIm2>0,strel('line',N,theta));
matches = matches + normIm3;
end
% eliminate false alarms
matches = imclose(matches,strel('disk',2));
matches = matches>3 & mask;
matches = bwareaopen(matches,100);
I have a processed image with text in it and I want to find the coordinates of lines which would touch the edges of the text field, but would not cross it and would strech through the whole side of text. Image below shows what I need (the red lines I drew show the example of what coordinates I want to find on a raw image):
It is not so straightforward, I can't just find the edges of processed field of text (upper left, upper right and so on), because it may be, f.e. a start of a paragraph (this is just an example of the possible scenario):
The sides of the text form a straight line, it is the top and bottom edges may be curved, so that could make things easier.
What is the best way to do this?
Any method I can think of is either not practical, inneficient or may usually give false results.
The raw image in case someone needs for processing:
The idea is to find the convex hull of all of the text. After we find the convex hull we find its sides. If the side Has a big change in its y coordinate and a small change in the x coordinate (i.e. the line has a high slope) we will consider it as a side line.
The resulted image:
the code:
import cv2
import numpy as np
def getConvexCoord(convexH, ind):
yLines = []
xLine = []
for index in range(len(ind[0])):
convexIndex = ind[0][index]
# Get point
if convexIndex == len(convexH) - 1:
p0 = convexH[0]
p1 = convexH[convexIndex]
else:
p0 = convexH[convexIndex]
p1 = convexH[convexIndex + 1]
# Add y corrdinate
yLines.append(p0[0, 1])
yLines.append(p1[0, 1])
xLine.append(p0[0, 0])
xLine.append(p1[0, 0])
return yLines,xLine
def filterLine(line):
sortX = sorted(line)
# Find the median
xMedian = np.median(sortX)
while ((sortX[-1] - sortX[0]) > I.shape[0]):
# Find out which is farther from the median and discard
lastValueDistance = np.abs(xMedian - sortX[-1])
firstValueDistance = np.abs(xMedian - sortX[0])
if lastValueDistance > firstValueDistance:
# Discard last
del sortX[-1]
else:
# Discard first
del sortX[0]
# Now return mixX and maxX
return max(sortX),min(sortX)
# Read image
Irgb = cv2.imread('text.jpg')
I = Irgb[:,:,0]
# Threshold
ret, Ithresh = cv2.threshold(I,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Find the convex hull of the text
textPixels = np.nonzero(Ithresh)
textPixels = zip(textPixels[1],textPixels[0])
convexH = cv2.convexHull(np.asarray(textPixels))
# Find the side edges in the convex hull
m = []
for index in range((len(convexH))-1):
# Calculate the angle of the line
point0 = convexH[index]
point1 = convexH[index+1]
if(point1[0,0]-point0[0,0]) == 0:
m.append(90)
else:
m.append(float((point1[0,1]-point0[0,1]))/float((point1[0,0]-point0[0,0])))
# Final line
point0 = convexH[index+1]
point1 = convexH[0]
if(point1[0,0]-point0[0,0]) == 0:
m.append(90)
else:
m.append(np.abs(float((point1[0,1]-point0[0,1]))/float((point1[0,0]-point0[0,0]))))
# Take all the lines with the big m
ind1 = np.where(np.asarray(m)>1)
ind2 = np.where(np.asarray(m)<-1)
# For both lines find min Y an max Y
yLines1,xLine1 = getConvexCoord(convexH,ind1)
yLines2,xLine2 = getConvexCoord(convexH,ind2)
yLines = yLines1 + yLines2
# Filter xLines. If we the difference between the min and the max are more than 1/2 the size of the image we filter it out
minY = np.min(np.asarray(yLines))
maxY = np.max(np.asarray(yLines))
maxX1,minX1 = filterLine(xLine1)
maxX2,minX2 = filterLine(xLine2)
# Change final lines to have minY and maxY
line1 = ((minX1,minY),(maxX1,maxY))
line2 = ((maxX2,minY),(minX2,maxY))
# Plot lines
IrgbWithLines = Irgb
cv2.line(IrgbWithLines,line1[0],line1[1],(0, 0, 255),2)
cv2.line(IrgbWithLines,line2[0],line2[1],(0, 0, 255),2)
Remarks:
The algorithm assumes that the y coordinate change is bigger than the x coordinate change. This will not be true for very high perspective distortions (45 degrees). In this case maybe you should use k-means on the slopes and take the group with the higher slopes as the vertical lines.
The lines marked in red colour on the sides could be found using image closing operation.
Please find below the matlab output after imclose operation with structuring element of type square and size 4.'
The matlab code is as follow:
I = rgb2gray(imread('image.jpg'));
imshow(I); title('image');
Ibinary = im2bw(I);
figure,imshow(Ibinary);
se = strel('square',4);
Iclose = imclose(Ibinary,se);
figure,imshow(Iclose); title('side lines');
I'm using Hough Lines to do corner detection for this image. i plan to find the intersection of the lines as the corner.
This is the image.
Unfortunately, Hough return lots of lines for each line I expect
How do I tune the Hough Lines so there is only four lines each corresponds to actual line on the image?
OpenCVs hough transform really could use some better Non-Maximum Suppression. Without that, you get this phenomenon of duplicate lines. Unfortunately I know of no easy way to tune that, besides reimplementing your own hough transform. (Which is a valid option. Hough transform is fairly simple)
Fortunately it is easy to fix in post-processing:
For the non-probabilistic hough transform, OpenCv will return the lines in order of their confidence, with the strongest line first. So simply take the first four lines that differ strongly in either rho or theta.
so, add the first line found by HoughLines into a new List: strong_lines
for each line found by HoughLines:
test whether its rho and theta are close to any strong_line (e.g. rho is within 50 pixels and theta is within 10° of the other line)
if not, put it into the list of strong_lines
if you have found 4 strong_lines, break
I implemented the approach described by HugoRune and though I would share my code as an example of how I implemented this. I used a tolerance of 5 degrees and 10 pixels.
strong_lines = np.zeros([4,1,2])
minLineLength = 2
maxLineGap = 10
lines = cv2.HoughLines(edged,1,np.pi/180,10, minLineLength, maxLineGap)
n2 = 0
for n1 in range(0,len(lines)):
for rho,theta in lines[n1]:
if n1 == 0:
strong_lines[n2] = lines[n1]
n2 = n2 + 1
else:
if rho < 0:
rho*=-1
theta-=np.pi
closeness_rho = np.isclose(rho,strong_lines[0:n2,0,0],atol = 10)
closeness_theta = np.isclose(theta,strong_lines[0:n2,0,1],atol = np.pi/36)
closeness = np.all([closeness_rho,closeness_theta],axis=0)
if not any(closeness) and n2 < 4:
strong_lines[n2] = lines[n1]
n2 = n2 + 1
EDIT: The code was updated to reflect the comment regarding a negative rho value
Collect the intersection of all line
for (int i = 0; i < lines.size(); i++)
{
for (int j = i + 1; j < lines.size(); j++)
{
cv::Point2f pt = computeIntersectionOfTwoLine(lines[i], lines[j]);
if (pt.x >= 0 && pt.y >= 0 && pt.x < image.cols && pt.y < image.rows)
{
corners.push_back(pt);
}
}
}
You can google the algorithm to find the intersection of two lines.
Once you collect all the intersection points you can easily determine the min max which will give you top-left and bottom right points. From these two points you can easily get the rectangle.
Here Sorting 2d point array to find out four corners & http://opencv-code.com/tutorials/automatic-perspective-correction-for-quadrilateral-objects/ Refer these two links.
Here is a complete solution written in python 2.7.x using OpenCV 2.4.
It is based on ideas from this thread.
Method: Detect all lines. Assume that the Hough function returns highest ranked lines first. Filter the lines to keep those that are separated by some minimum distance and/or angle.
Image of all Hough lines:
https://i.ibb.co/t3JFncJ/all-lines.jpg
Filtered lines:
https://i.ibb.co/yQLNxXT/filtered-lines.jpg
Code:
http://codepad.org/J57oVIzs
"""
Detect the best 4 lines for a rounded rectangle.
"""
import numpy as np
import cv2
input_image = cv2.imread("image.jpg")
def drawLines(img, lines):
"""
Draw lines on an image
"""
for line in lines:
for rho,theta in line:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img, (x1,y1), (x2,y2), (0,0,255), 1)
input_image_grey = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
edged = input_image_grey
rho = 1 # 1 pixel
theta = 1.0*0.017 # 1 degree
threshold = 100
lines = cv2.HoughLines(edged, rho, theta, threshold)
# Fix negative angles
num_lines = lines.shape[1]
for i in range(0, num_lines):
line = lines[0,i,:]
rho = line[0]
theta = line[1]
if rho < 0:
rho *= -1.0
theta -= np.pi
line[0] = rho
line[1] = theta
# Draw all Hough lines in red
img_with_all_lines = np.copy(input_image)
drawLines(img_with_all_lines, lines)
cv2.imshow("Hough lines", img_with_all_lines)
cv2.waitKey()
cv2.imwrite("all_lines.jpg", img_with_all_lines)
# Find 4 lines with unique rho & theta:
num_lines_to_find = 4
filtered_lines = np.zeros([1, num_lines_to_find, 2])
if lines.shape[1] < num_lines_to_find:
print("ERROR: Not enough lines detected!")
# Save the first line
filtered_lines[0,0,:] = lines[0,0,:]
print("Line 1: rho = %.1f theta = %.3f" % (filtered_lines[0,0,0], filtered_lines[0,0,1]))
idx = 1 # Index to store the next unique line
# Initialize all rows the same
for i in range(1,num_lines_to_find):
filtered_lines[0,i,:] = filtered_lines[0,0,:]
# Filter the lines
num_lines = lines.shape[1]
for i in range(0, num_lines):
line = lines[0,i,:]
rho = line[0]
theta = line[1]
# For this line, check which of the existing 4 it is similar to.
closeness_rho = np.isclose(rho, filtered_lines[0,:,0], atol = 10.0) # 10 pixels
closeness_theta = np.isclose(theta, filtered_lines[0,:,1], atol = np.pi/36.0) # 10 degrees
similar_rho = np.any(closeness_rho)
similar_theta = np.any(closeness_theta)
similar = (similar_rho and similar_theta)
if not similar:
print("Found a unique line: %d rho = %.1f theta = %.3f" % (i, rho, theta))
filtered_lines[0,idx,:] = lines[0,i,:]
idx += 1
if idx >= num_lines_to_find:
print("Found %d unique lines!" % (num_lines_to_find))
break
# Draw filtered lines
img_with_filtered_lines = np.copy(input_image)
drawLines(img_with_filtered_lines, filtered_lines)
cv2.imshow("Filtered lines", img_with_filtered_lines)
cv2.waitKey()
cv2.imwrite("filtered_lines.jpg", img_with_filtered_lines)
The above approach (proposed by #HugoRune's and implemented by #Onamission21) is correct but has a little bug. cv2.HoughLines may return negative rho and theta upto pi. Notice for example that the line (r0,0) is very close to the line (-r0,pi-epsilon) but they would not be found in the above closeness test.
I simply treated negative rhos by applying rho*=-1, theta-=pi before closeness calculations.