Rough Edges With Lanczos Resampling in Golang - image-processing

I've been writing some basic methods to resize images in Golang. I've seen several posts about resizing images, but for the life of me I can't figure out what I'm missing...
Essentially, my issue is that when resizing an image in Golang, my results seem to have a lot of aliasing.
I've tried iteratively downsampling the image, but that didn't yield much of an improvement.
Here's my code:
func resize(original image.Image,
edgeSize int, filterSize int) image.Image {
oldBounds := original.Bounds()
if oldBounds.Dx() < edgeSize && oldBounds.Dy() < edgeSize {
// No resize necessary
return original
}
threshold := edgeSize * 4 / 3
if oldBounds.Dx() > threshold || oldBounds.Dy() > threshold {
fmt.Println("Upstream")
original = customResizeImageToFitBounds(original, threshold, filterSize)
oldBounds = original.Bounds()
}
newBounds := getNewBounds(oldBounds, edgeSize)
resized := image.NewRGBA(newBounds)
var ratioX = float64(oldBounds.Dx()) / float64(newBounds.Dx())
var ratioY = float64(oldBounds.Dy()) / float64(newBounds.Dy())
for x := 0; x < newBounds.Dx(); x++ {
for y := 0; y < newBounds.Dy(); y++ {
sourceX := ratioX * float64(x)
minX := int(math.Floor(sourceX))
sourceY := ratioY * float64(y)
minY := int(math.Floor(sourceY))
sampleSize := filterSize<<1 + 1
var xCoeffs = make([]float64, sampleSize)
var yCoeffs = make([]float64, sampleSize)
var sumX = 0.0
var sumY = 0.0
for i := 0; i < sampleSize; i++ {
xCoeffs[i] = lanczos(filterSize, sourceX-float64(minX+i-filterSize))
yCoeffs[i] = lanczos(filterSize, sourceY-float64(minY+i-filterSize))
sumX += xCoeffs[i]
sumY += yCoeffs[i]
}
for i := 0; i < sampleSize; i++ {
xCoeffs[i] /= sumX
yCoeffs[i] /= sumY
}
rgba := make([]float64, 4)
for i := 0; i < sampleSize; i++ {
if yCoeffs[i] == 0.0 {
continue
}
currY := minY + i - filterSize
rgbaRow := make([]float64, 4)
for j := 0; j < sampleSize; j++ {
if xCoeffs[j] == 0.0 {
continue
}
currX := minX + i - filterSize
rij, gij, bij, aij := original.At(
clamp(currX, currY, oldBounds)).RGBA()
rgbaRow[0] += float64(rij) * xCoeffs[j]
rgbaRow[1] += float64(gij) * xCoeffs[j]
rgbaRow[2] += float64(bij) * xCoeffs[j]
rgbaRow[3] += float64(aij) * xCoeffs[j]
}
rgba[0] += float64(rgbaRow[0]) * yCoeffs[i]
rgba[1] += float64(rgbaRow[1]) * yCoeffs[i]
rgba[2] += float64(rgbaRow[2]) * yCoeffs[i]
rgba[3] += float64(rgbaRow[3]) * yCoeffs[i]
}
rgba[0] = clampRangeFloat(0, rgba[0], 0xFFFF)
rgba[1] = clampRangeFloat(0, rgba[1], 0xFFFF)
rgba[2] = clampRangeFloat(0, rgba[2], 0xFFFF)
rgba[3] = clampRangeFloat(0, rgba[3], 0xFFFF)
var rgbaF [4]uint64
rgbaF[0] = (uint64(math.Floor(rgba[0]+0.5)) * 0xFF) / 0xFFFF
rgbaF[1] = (uint64(math.Floor(rgba[1]+0.5)) * 0xFF) / 0xFFFF
rgbaF[2] = (uint64(math.Floor(rgba[2]+0.5)) * 0xFF) / 0xFFFF
rgbaF[3] = (uint64(math.Floor(rgba[3]+0.5)) * 0xFF) / 0xFFFF
rf := uint8(clampRangeUint(0, uint32(rgbaF[0]), 255))
gf := uint8(clampRangeUint(0, uint32(rgbaF[1]), 255))
bf := uint8(clampRangeUint(0, uint32(rgbaF[2]), 255))
af := uint8(clampRangeUint(0, uint32(rgbaF[3]), 255))
resized.Set(x, y, color.RGBA{R: rf, G: gf, B: bf, A: af})
}
}
return resized
}
// Machine epsilon
var epsilon = math.Nextafter(1.0, 2.0) - 1
func lanczos(filterSize int, x float64) float64 {
x = math.Abs(x)
fs := float64(filterSize)
if x < epsilon {
return 1.0
}
if x > fs {
return 0
}
piX := math.Pi * x
piXOverFS := piX / fs
return (math.Sin(piX) / piX) * (math.Sin(piXOverFS) / (piXOverFS))
}
It isn't particularly performant, because I want to get a good quality result before I look at optimization.
Does anyone who has experience with image resampling see anything potentially problematic?
For reference, here is my source image:
Here is my result:
Here is my result if I remove the recursive call:
Here is the result using RMagick/ImageMagick through Ruby (what I'm shooting for):
Does anyone have advice for how I can get a smoother downscale result?
This particular example is a pretty drastic downscale, but Rmagick was able to downscale it very quickly with great quality, so it must be possible.
I'm told that Lanczos3 Resampling yields good results, and that's what I'm trying to use here - I'm not sure if my implementation is correct though.
Also, as a side note: the 0xFF / 0xFFFF conversion is because golang's "At" function returns rgba values in the range [0, 0xFFFF] ([0, 65535]) but "Set" takes a color which is initialized with the range [0, 0xFF] ([0, 255])
For now, I'm more concerned with quality than performance.

Alright, I think I've found one way to solve the aliasing problem. Instead of using lanczos3, I used bilinear interpolation to resample the source image at a size slightly higher than what I was going for (edgeSize = 1080), gaussian blurred the image, then scaled the image down to the target size (edgeSize = 600), this time with bicubic interpolation. This gave me results just about the same as the ones RMagick was giving me.

Related

Convolution in Golang

I want execute a convolution product on an image.
The original image is:
So I test the convolution with gimp. With this matrix:
1 1 1
1 1 1
1 1 1
and the divider 9
I obtain
When I execute my algorithm I obtain:
My algorithm is:
func Convolution(img *image.Image, matrice [][]int) *image.NRGBA {
imageRGBA := image.NewNRGBA((*img).Bounds())
w := (*img).Bounds().Dx()
h := (*img).Bounds().Dy()
sumR := 0
sumB := 0
sumG := 0
var r uint32
var g uint32
var b uint32
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
for i := -1; i <= 1; i++ {
for j := -1; j <= 1; j++ {
var imageX int
var imageY int
imageX = x + i
imageY = y + j
r, g, b, _ = (*img).At(imageX, imageY).RGBA()
sumG = (sumG + (int(g) * matrice[i+1][j+1]))
sumR = (sumR + (int(r) * matrice[i+1][j+1]))
sumB = (sumB + (int(b) * matrice[i+1][j+1]))
}
}
imageRGBA.Set(x, y, color.NRGBA{
uint8(min(sumR/9, 255)),
uint8(min(sumG/9, 255)),
uint8(min(sumB/9, 255)),
255,
})
sumR = 0
sumB = 0
sumG = 0
}
}
return imageRGBA
}
Where are the error ?
Thank you for your help.
r, g, and b are uint32 values, and they contain 16bits of color information which is always greater than 255 if started as a non-zero 8 bit value.
You then can't operate on the RGBA values and truncate them to a uint8; that gives you a useless result because the least significant bits are just fractional parts of the 8bit values.
Compare the candidate integer value with the max 16bit value 65535, and shift it 8 bits before truncating it to get the 8 most significant bits.
uint8(min(sumR/9, 0xffff) >> 8),
uint8(min(sumG/9, 0xffff) >> 8),
uint8(min(sumB/9, 0xffff) >> 8),

Multi otsu(multi-thresholding) with openCV

I am trying to carry out multi-thresholding with otsu. The method I am using currently is actually via maximising the between class variance, I have managed to get the same threshold value given as that by the OpenCV library. However, that is just via running otsu method once.
Documentation on how to do multi-level thresholding or rather recursive thresholding is rather limited. Where do I do after obtaining the original otsu's value? Would appreciate some hints, I been playing around with the code, adding one external for loop, but the next value calculated is always 254 for any given image:(
My code if need be:
//compute histogram first
cv::Mat imageh; //image edited to grayscale for histogram purpose
//imageh=image; //to delete and uncomment below;
cv::cvtColor(image, imageh, CV_BGR2GRAY);
int histSize[1] = {256}; // number of bins
float hranges[2] = {0.0, 256.0}; // min andax pixel value
const float* ranges[1] = {hranges};
int channels[1] = {0}; // only 1 channel used
cv::MatND hist;
// Compute histogram
calcHist(&imageh, 1, channels, cv::Mat(), hist, 1, histSize, ranges);
IplImage* im = new IplImage(imageh);//assign the image to an IplImage pointer
IplImage* finalIm = cvCreateImage(cvSize(im->width, im->height), IPL_DEPTH_8U, 1);
double otsuThreshold= cvThreshold(im, finalIm, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU );
cout<<"opencv otsu gives "<<otsuThreshold<<endl;
int totalNumberOfPixels= imageh.total();
cout<<"total number of Pixels is " <<totalNumberOfPixels<< endl;
float sum = 0;
for (int t=0 ; t<256 ; t++)
{
sum += t * hist.at<float>(t);
}
cout<<"sum is "<<sum<<endl;
float sumB = 0; //sum of background
int wB = 0; // weight of background
int wF = 0; //weight of foreground
float varMax = 0;
int threshold = 0;
//run an iteration to find the maximum value of the between class variance(as between class variance shld be maximise)
for (int t=0 ; t<256 ; t++)
{
wB += hist.at<float>(t); // Weight Background
if (wB == 0) continue;
wF = totalNumberOfPixels - wB; // Weight Foreground
if (wF == 0) break;
sumB += (float) (t * hist.at<float>(t));
float mB = sumB / wB; // Mean Background
float mF = (sum - sumB) / wF; // Mean Foreground
// Calculate Between Class Variance
float varBetween = (float)wB * (float)wF * (mB - mF) * (mB - mF);
// Check if new maximum found
if (varBetween > varMax) {
varMax = varBetween;
threshold = t;
}
}
cout<<"threshold value is: "<<threshold;
To extend Otsu's thresholding method to multi-level thresholding the between class variance equation becomes:
Please check out Deng-Yuan Huang, Ta-Wei Lin, Wu-Chih Hu, Automatic
Multilevel Thresholding Based on Two-Stage Otsu's Method with Cluster
Determination by Valley Estimation, Int. Journal of Innovative
Computing, 2011, 7:5631-5644 for more information.
http://www.ijicic.org/ijicic-10-05033.pdf
Here is my C# implementation of Otsu Multi for 2 thresholds:
/* Otsu (1979) - multi */
Tuple < int, int > otsuMulti(object sender, EventArgs e) {
//image histogram
int[] histogram = new int[256];
//total number of pixels
int N = 0;
//accumulate image histogram and total number of pixels
foreach(int intensity in image.Data) {
if (intensity != 0) {
histogram[intensity] += 1;
N++;
}
}
double W0K, W1K, W2K, M0, M1, M2, currVarB, optimalThresh1, optimalThresh2, maxBetweenVar, M0K, M1K, M2K, MT;
optimalThresh1 = 0;
optimalThresh2 = 0;
W0K = 0;
W1K = 0;
M0K = 0;
M1K = 0;
MT = 0;
maxBetweenVar = 0;
for (int k = 0; k <= 255; k++) {
MT += k * (histogram[k] / (double) N);
}
for (int t1 = 0; t1 <= 255; t1++) {
W0K += histogram[t1] / (double) N; //Pi
M0K += t1 * (histogram[t1] / (double) N); //i * Pi
M0 = M0K / W0K; //(i * Pi)/Pi
W1K = 0;
M1K = 0;
for (int t2 = t1 + 1; t2 <= 255; t2++) {
W1K += histogram[t2] / (double) N; //Pi
M1K += t2 * (histogram[t2] / (double) N); //i * Pi
M1 = M1K / W1K; //(i * Pi)/Pi
W2K = 1 - (W0K + W1K);
M2K = MT - (M0K + M1K);
if (W2K <= 0) break;
M2 = M2K / W2K;
currVarB = W0K * (M0 - MT) * (M0 - MT) + W1K * (M1 - MT) * (M1 - MT) + W2K * (M2 - MT) * (M2 - MT);
if (maxBetweenVar < currVarB) {
maxBetweenVar = currVarB;
optimalThresh1 = t1;
optimalThresh2 = t2;
}
}
}
return new Tuple(optimalThresh1, optimalThresh2);
}
And this is the result I got by thresholding an image scan of soil with the above code:
(T1 = 110, T2 = 147).
Otsu's original paper: "Nobuyuki Otsu, A Threshold Selection Method
from Gray-Level Histogram, IEEE Transactions on Systems, Man, and
Cybernetics, 1979, 9:62-66" also briefly mentions the extension to
Multithresholding.
https://engineering.purdue.edu/kak/computervision/ECE661.08/OTSU_paper.pdf
Hope this helps.
Here is a simple general approach for 'n' thresholds in python (>3.0) :
# developed by- SUJOY KUMAR GOSWAMI
# source paper- https://people.ece.cornell.edu/acharya/papers/mlt_thr_img.pdf
import cv2
import numpy as np
import math
img = cv2.imread('path-to-image')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
a = 0
b = 255
n = 6 # number of thresholds (better choose even value)
k = 0.7 # free variable to take any positive value
T = [] # list which will contain 'n' thresholds
def sujoy(img, a, b):
if a>b:
s=-1
m=-1
return m,s
img = np.array(img)
t1 = (img>=a)
t2 = (img<=b)
X = np.multiply(t1,t2)
Y = np.multiply(img,X)
s = np.sum(X)
m = np.sum(Y)/s
return m,s
for i in range(int(n/2-1)):
img = np.array(img)
t1 = (img>=a)
t2 = (img<=b)
X = np.multiply(t1,t2)
Y = np.multiply(img,X)
mu = np.sum(Y)/np.sum(X)
Z = Y - mu
Z = np.multiply(Z,X)
W = np.multiply(Z,Z)
sigma = math.sqrt(np.sum(W)/np.sum(X))
T1 = mu - k*sigma
T2 = mu + k*sigma
x, y = sujoy(img, a, T1)
w, z = sujoy(img, T2, b)
T.append(x)
T.append(w)
a = T1+1
b = T2-1
k = k*(i+1)
T1 = mu
T2 = mu+1
x, y = sujoy(img, a, T1)
w, z = sujoy(img, T2, b)
T.append(x)
T.append(w)
T.sort()
print(T)
For full paper and more informations visit this link.
I've written an example on how otsu thresholding work in python before. You can see the source code here: https://github.com/subokita/Sandbox/blob/master/otsu.py
In the example there's 2 variants, otsu2() which is the optimised version, as seen on Wikipedia page, and otsu() which is more naive implementation based on the algorithm description itself.
If you are okay in reading python codes (in this case, they are pretty simple, almost pseudo code like), you might want to look at otsu() in the example and modify it. Porting it to C++ code is not hard either.
#Antoni4 gives the best answer in my opinion and it's very straight forward to increase the number of levels.
This is for three-level thresholding:
#include "Shadow01-1.cuh"
void multiThresh(double &optimalThresh1, double &optimalThresh2, double &optimalThresh3, cv::Mat &imgHist, cv::Mat &src)
{
double W0K, W1K, W2K, W3K, M0, M1, M2, M3, currVarB, maxBetweenVar, M0K, M1K, M2K, M3K, MT;
unsigned char *histogram = (unsigned char*)(imgHist.data);
int N = src.rows*src.cols;
W0K = 0;
W1K = 0;
M0K = 0;
M1K = 0;
MT = 0;
maxBetweenVar = 0;
for (int k = 0; k <= 255; k++) {
MT += k * (histogram[k] / (double) N);
}
for (int t1 = 0; t1 <= 255; t1++)
{
W0K += histogram[t1] / (double) N; //Pi
M0K += t1 * (histogram[t1] / (double) N); //i * Pi
M0 = M0K / W0K; //(i * Pi)/Pi
W1K = 0;
M1K = 0;
for (int t2 = t1 + 1; t2 <= 255; t2++)
{
W1K += histogram[t2] / (double) N; //Pi
M1K += t2 * (histogram[t2] / (double) N); //i * Pi
M1 = M1K / W1K; //(i * Pi)/Pi
W2K = 1 - (W0K + W1K);
M2K = MT - (M0K + M1K);
if (W2K <= 0) break;
M2 = M2K / W2K;
W3K = 0;
M3K = 0;
for (int t3 = t2 + 1; t3 <= 255; t3++)
{
W2K += histogram[t3] / (double) N; //Pi
M2K += t3 * (histogram[t3] / (double) N); // i*Pi
M2 = M2K / W2K; //(i*Pi)/Pi
W3K = 1 - (W1K + W2K);
M3K = MT - (M1K + M2K);
M3 = M3K / W3K;
currVarB = W0K * (M0 - MT) * (M0 - MT) + W1K * (M1 - MT) * (M1 - MT) + W2K * (M2 - MT) * (M2 - MT) + W3K * (M3 - MT) * (M3 - MT);
if (maxBetweenVar < currVarB)
{
maxBetweenVar = currVarB;
optimalThresh1 = t1;
optimalThresh2 = t2;
optimalThresh3 = t3;
}
}
}
}
}
#Guilherme Silva
Your code has a BUG
You Must Replace:
W3K = 0;
M3K = 0;
with
W2K = 0;
M2K = 0;
and
W3K = 1 - (W1K + W2K);
M3K = MT - (M1K + M2K);
with
W3K = 1 - (W0K + W1K + W2K);
M3K = MT - (M0K + M1K + M2K);
;-)
Regards
EDIT(1): [Toby Speight]
I discovered this bug by applying the effect to the same picture at different resoultions(Sizes) and seeing that the output results were to much different from each others (Even changing resolution a little bit)
W3K and M3K must be the totals minus the Previous WKs and MKs.
(I thought about this for Code-similarity with the one with one level less)
At the moment due to my lacks of English I cannot explain Better How and Why
To be honest I'm still not 100% sure that this way is correct, even thought from my outputs I could tell that it gives better results. (Even with 1 Level more (5 shades of gray))
You could try yourself ;-)
Sorry
My Outputs:
3 Thresholds
4 Thresholds
I found a useful piece of code in this thread. I was looking for a multi-level Otsu implementation for double/float images. So, I tried to generalize example for N-levels with double/float matrix as input. In my code below I am using armadillo library as dependency. But this code can be easily adapted for standard C++ arrays, just replace vec, uvec objects with single dimensional double and integer arrays, mat and umat with two-dimensional. Two other functions from armadillo used here are: vectorise and hist.
// Input parameters:
// map - input image (double matrix)
// mask - region of interest to be thresholded
// nBins - number of bins
// nLevels - number of Otsu thresholds
#include <armadillo>
#include <algorithm>
#include <vector>
mat OtsuFilterMulti(mat map, int nBins, int nLevels) {
mat mapr; // output thresholded image
mapr = zeros<mat>(map.n_rows, map.n_cols);
unsigned int numElem = 0;
vec threshold = zeros<vec>(nLevels);
vec q = zeros<vec>(nLevels + 1);
vec mu = zeros<vec>(nLevels + 1);
vec muk = zeros<vec>(nLevels + 1);
uvec binv = zeros<uvec>(nLevels);
if (nLevels <= 1) return mapr;
numElem = map.n_rows*map.n_cols;
uvec histogram = hist(vectorise(map), nBins);
double maxval = map.max();
double minval = map.min();
double odelta = (maxval - abs(minval)) / nBins; // distance between histogram bins
vec oval = zeros<vec>(nBins);
double mt = 0, variance = 0.0, bestVariance = 0.0;
for (int ii = 0; ii < nBins; ii++) {
oval(ii) = (double)odelta*ii + (double)odelta*0.5; // centers of histogram bins
mt += (double)ii*((double)histogram(ii)) / (double)numElem;
}
for (int ii = 0; ii < nLevels; ii++) {
binv(ii) = ii;
}
double sq, smuk;
int nComb;
nComb = nCombinations(nBins,nLevels);
std::vector<bool> v(nBins);
std::fill(v.begin(), v.begin() + nLevels, true);
umat ibin = zeros<umat>(nComb, nLevels); // indices from combinations will be stored here
int cc = 0;
int ci = 0;
do {
for (int i = 0; i < nBins; ++i) {
if(ci==nLevels) ci=0;
if (v[i]) {
ibin(cc,ci) = i;
ci++;
}
}
cc++;
} while (std::prev_permutation(v.begin(), v.end()));
uvec lastIndex = zeros<uvec>(nLevels);
// Perform operations on pre-calculated indices
for (int ii = 0; ii < nComb; ii++) {
for (int jj = 0; jj < nLevels; jj++) {
smuk = 0;
sq = 0;
if (lastIndex(jj) != ibin(ii, jj) || ii == 0) {
q(jj) += double(histogram(ibin(ii, jj))) / (double)numElem;
muk(jj) += ibin(ii, jj)*(double(histogram(ibin(ii, jj)))) / (double)numElem;
mu(jj) = muk(jj) / q(jj);
q(jj + 1) = 0.0;
muk(jj + 1) = 0.0;
if (jj>0) {
for (int kk = 0; kk <= jj; kk++) {
sq += q(kk);
smuk += muk(kk);
}
q(jj + 1) = 1 - sq;
muk(jj + 1) = mt - smuk;
mu(jj + 1) = muk(jj + 1) / q(jj + 1);
}
if (jj>0 && jj<(nLevels - 1)) {
q(jj + 1) = 0.0;
muk(jj + 1) = 0.0;
}
lastIndex(jj) = ibin(ii, jj);
}
}
variance = 0.0;
for (int jj = 0; jj <= nLevels; jj++) {
variance += q(jj)*(mu(jj) - mt)*(mu(jj) - mt);
}
if (variance > bestVariance) {
bestVariance = variance;
for (int jj = 0; jj<nLevels; jj++) {
threshold(jj) = oval(ibin(ii, jj));
}
}
}
cout << "Optimized thresholds: ";
for (int jj = 0; jj<nLevels; jj++) {
cout << threshold(jj) << " ";
}
cout << endl;
for (unsigned int jj = 0; jj<map.n_rows; jj++) {
for (unsigned int kk = 0; kk<map.n_cols; kk++) {
for (int ll = 0; ll<nLevels; ll++) {
if (map(jj, kk) >= threshold(ll)) {
mapr(jj, kk) = ll+1;
}
}
}
}
return mapr;
}
int nCombinations(int n, int r) {
if (r>n) return 0;
if (r*2 > n) r = n-r;
if (r == 0) return 1;
int ret = n;
for( int i = 2; i <= r; ++i ) {
ret *= (n-i+1);
ret /= i;
}
return ret;
}

What's the best way to fit a set of points in an image one or more good lines using RANSAC using OpenCV?

What's the best way to fit a set of points in an image one or more good lines using RANSAC using OpenCV?
Is RANSAC is the most efficient way to fit a line?
RANSAC is not the most efficient but it is better for a large number of outliers. Here is how to do it using opencv:
A useful structure-
struct SLine
{
SLine():
numOfValidPoints(0),
params(-1.f, -1.f, -1.f, -1.f)
{}
cv::Vec4f params;//(cos(t), sin(t), X0, Y0)
int numOfValidPoints;
};
Total Least squares used to make a fit for a successful pair
cv::Vec4f TotalLeastSquares(
std::vector<cv::Point>& nzPoints,
std::vector<int> ptOnLine)
{
//if there are enough inliers calculate model
float x = 0, y = 0, x2 = 0, y2 = 0, xy = 0, w = 0;
float dx2, dy2, dxy;
float t;
for( size_t i = 0; i < nzPoints.size(); ++i )
{
x += ptOnLine[i] * nzPoints[i].x;
y += ptOnLine[i] * nzPoints[i].y;
x2 += ptOnLine[i] * nzPoints[i].x * nzPoints[i].x;
y2 += ptOnLine[i] * nzPoints[i].y * nzPoints[i].y;
xy += ptOnLine[i] * nzPoints[i].x * nzPoints[i].y;
w += ptOnLine[i];
}
x /= w;
y /= w;
x2 /= w;
y2 /= w;
xy /= w;
//Covariance matrix
dx2 = x2 - x * x;
dy2 = y2 - y * y;
dxy = xy - x * y;
t = (float) atan2( 2 * dxy, dx2 - dy2 ) / 2;
cv::Vec4f line;
line[0] = (float) cos( t );
line[1] = (float) sin( t );
line[2] = (float) x;
line[3] = (float) y;
return line;
}
The actual RANSAC
SLine LineFitRANSAC(
float t,//distance from main line
float p,//chance of hitting a valid pair
float e,//percentage of outliers
int T,//number of expected minimum inliers
std::vector<cv::Point>& nzPoints)
{
int s = 2;//number of points required by the model
int N = (int)ceilf(log(1-p)/log(1 - pow(1-e, s)));//number of independent trials
std::vector<SLine> lineCandidates;
std::vector<int> ptOnLine(nzPoints.size());//is inlier
RNG rng((uint64)-1);
SLine line;
for (int i = 0; i < N; i++)
{
//pick two points
int idx1 = (int)rng.uniform(0, (int)nzPoints.size());
int idx2 = (int)rng.uniform(0, (int)nzPoints.size());
cv::Point p1 = nzPoints[idx1];
cv::Point p2 = nzPoints[idx2];
//points too close - discard
if (cv::norm(p1- p2) < t)
{
continue;
}
//line equation -> (y1 - y2)X + (x2 - x1)Y + x1y2 - x2y1 = 0
float a = static_cast<float>(p1.y - p2.y);
float b = static_cast<float>(p2.x - p1.x);
float c = static_cast<float>(p1.x*p2.y - p2.x*p1.y);
//normalize them
float scale = 1.f/sqrt(a*a + b*b);
a *= scale;
b *= scale;
c *= scale;
//count inliers
int numOfInliers = 0;
for (size_t i = 0; i < nzPoints.size(); ++i)
{
cv::Point& p0 = nzPoints[i];
float rho = abs(a*p0.x + b*p0.y + c);
bool isInlier = rho < t;
if ( isInlier ) numOfInliers++;
ptOnLine[i] = isInlier;
}
if ( numOfInliers < T)
{
continue;
}
line.params = TotalLeastSquares( nzPoints, ptOnLine);
line.numOfValidPoints = numOfInliers;
lineCandidates.push_back(line);
}
int bestLineIdx = 0;
int bestLineScore = 0;
for (size_t i = 0; i < lineCandidates.size(); i++)
{
if (lineCandidates[i].numOfValidPoints > bestLineScore)
{
bestLineIdx = i;
bestLineScore = lineCandidates[i].numOfValidPoints;
}
}
if ( lineCandidates.empty() )
{
return SLine();
}
else
{
return lineCandidates[bestLineIdx];
}
}
Take a look at Least Mean Square metod. It's faster and simplier than RANSAC.
Also take look at OpenCV's fitLine method.
RANSAC performs better when you have a lot of outliers in your data, or a complex hypothesis.

Fast Pixel Count on Binary Image- ARM neon intrinsics - iOS Dev

Can someone tell me a fast function to count the number of white pixels in a binary image. I need it for iOS app dev. I am working directly on the memory of the image defined as
bool *imageData = (bool *) malloc(noOfPixels * sizeof(bool));
I am implementing the function
int whiteCount = 0;
for (int q=i; q<i+windowHeight; q++)
{
for (int w=j; w<j+windowWidth; w++)
{
if (imageData[q*W + w] == 1)
whiteCount++;
}
}
This is obviously the slowest function possible. I heard that ARM Neon intrinsics on the iOS
can be used to make several operations in 1 cycle. Maybe thats the way to go ??
The problem is that I am not very familiar and don't have enough time to learn assembly language at the moment. So it would be great if anyone can post a Neon intrinsics code for the problem mentioned above or any other fast implementation in C/C++.
The only code in neon intrinsics that I am able to find online is the code for rgb to gray
http://computer-vision-talks.com/2011/02/a-very-fast-bgra-to-grayscale-conversion-on-iphone/
Firstly you can speed up the original code a little by factoring out the multiply and getting rid of the branch:
int whiteCount = 0;
for (int q = i; q < i + windowHeight; q++)
{
const bool * const row = &imageData[q * W];
for (int w = j; w < j + windowWidth; w++)
{
whiteCount += row[w];
}
}
(This assumes that imageData[] is truly binary, i.e. each element can only ever be 0 or 1.)
Here is a simple NEON implementation:
#include <arm_neon.h>
// ...
int i, w;
int whiteCount = 0;
uint32x4_t v_count = { 0 };
for (q = i; q < i + windowHeight; q++)
{
const bool * const row = &imageData[q * W];
uint16x8_t vrow_count = { 0 };
for (w = j; w <= j + windowWidth - 16; w += 16) // SIMD loop
{
uint8x16_t v = vld1q_u8(&row[j]); // load 16 x 8 bit pixels
vrow_count = vpadalq_u8(vrow_count, v); // accumulate 16 bit row counts
}
for ( ; w < j + windowWidth; ++w) // scalar clean up loop
{
whiteCount += row[j];
}
v_count = vpadalq_u16(v_count, vrow_count); // update 32 bit image counts
} // from 16 bit row counts
// add 4 x 32 bit partial counts from SIMD loop to scalar total
whiteCount += vgetq_lane_s32(v_count, 0);
whiteCount += vgetq_lane_s32(v_count, 1);
whiteCount += vgetq_lane_s32(v_count, 2);
whiteCount += vgetq_lane_s32(v_count, 3);
// total is now in whiteCount
(This assumes that imageData[] is truly binary, imageWidth <= 2^19, and sizeof(bool) == 1.)
Updated version for unsigned char and values of 255 for white, 0 for black:
#include <arm_neon.h>
// ...
int i, w;
int whiteCount = 0;
const uint8x16_t v_mask = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
uint32x4_t v_count = { 0 };
for (q = i; q < i + windowHeight; q++)
{
const uint8_t * const row = &imageData[q * W];
uint16x8_t vrow_count = { 0 };
for (w = j; w <= j + windowWidth - 16; w += 16) // SIMD loop
{
uint8x16_t v = vld1q_u8(&row[j]); // load 16 x 8 bit pixels
v = vandq_u8(v, v_mask); // mask out all but LS bit
vrow_count = vpadalq_u8(vrow_count, v); // accumulate 16 bit row counts
}
for ( ; w < j + windowWidth; ++w) // scalar clean up loop
{
whiteCount += (row[j] == 255);
}
v_count = vpadalq_u16(v_count, vrow_count); // update 32 bit image counts
} // from 16 bit row counts
// add 4 x 32 bit partial counts from SIMD loop to scalar total
whiteCount += vgetq_lane_s32(v_count, 0);
whiteCount += vgetq_lane_s32(v_count, 1);
whiteCount += vgetq_lane_s32(v_count, 2);
whiteCount += vgetq_lane_s32(v_count, 3);
// total is now in whiteCount
(This assumes that imageData[] is has values of 255 for white and 0 for black, and imageWidth <= 2^19.)
Note that all the above code is untested and may need some further work.
http://gcc.gnu.org/onlinedocs/gcc/ARM-NEON-Intrinsics.html
Section 6.55.3.6
The vectorized algorithm will do the comparisons and put them in a structure for you, but you'd still need to go through each element of the structure and determine if it's a zero or not.
How fast does that loop currently run and how fast do you need it to run? Also remember that NEON will work in the same registers as the floating point unit, so using NEON here may force an FPU context switch.

Search for lines with a small range of angles in OpenCV

I'm using the Hough transform in OpenCV to detect lines. However, I know in advance that I only need lines within a very limited range of angles (about 10 degrees or so). I'm doing this in a very performance sensitive setting, so I'd like to avoid the extra work spent detecting lines at other angles, lines I know in advance I don't care about.
I could extract the Hough source from OpenCV and just hack it to take min_rho and max_rho parameters, but I'd like a less fragile approach (have to manually update my code w/ each OpenCV update, etc.).
What's the best approach here?
Well, i've modified the icvHoughlines function to go for a certain range of angles. I'm sure there's cleaner ways that plays with memory allocation as well, but I got a speed gain going from 100ms to 33ms for a range of angle going from 180deg to 60deg, so i'm happy with that.
Note that this code also outputs the accumulator value. Also, I only output 1 line because that fit my purposes but there was no gain really there.
static void
icvHoughLinesStandard2( const CvMat* img, float rho, float theta,
int threshold, CvSeq *lines, int linesMax )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
float ang;
int r, n;
int i, j;
float irho = 1 / rho;
double scale;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
// find n and ang limits (in our case we want 60 to 120
float limit_min = 60.0/180.0*PI;
float limit_max = 120.0/180.0*PI;
//num_steps = (limit_max - limit_min)/theta;
int start_n = floor(limit_min/theta);
int stop_n = floor(limit_max/theta);
for( ang = limit_min, n = start_n; n < stop_n; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
//
for( n = start_n; n < stop_n; n++ )
{
r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
accum[(n+1) * (numrho+2) + r+1]++;
}
}
int max_accum = 0;
int max_ind = 0;
for( r = 0; r < numrho; r++ )
{
for( n = start_n; n < stop_n; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if (accum[base] > max_accum)
{
max_accum = accum[base];
max_ind = base;
}
}
}
CvLinePolar2 line;
scale = 1./(numrho+2);
int idx = max_ind;
n = cvFloor(idx*scale) - 1;
r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.votes = accum[idx];
cvSeqPush( lines, &line );
}
If you use the Probabilistic Hough transform then the output is in the form of a cvPoint each for lines[0] and lines[1] parameters. We can get x and y co-ordinated for each of the two points by pt1.x, pt1.y and pt2.x and pt2.y.
Then use the simple formula for finding slope of a line - (y2-y1)/(x2-x1). Taking arctan (tan inverse) of that will yield that angle in radians. Then simply filter out desired angles from the values for each hough line obtained.
I think it's more natural to use standart HoughLines(...) function, which gives collection of lines directly in rho and theta terms and select nessessary angle range from it, rather than recalculate angle from segment end points.

Resources