I am trying to make a Snake Active Contour program and I been looking at different websites that shows how they programmed the snake but none of them explain what CV_VALUE or coefficient usage is and how they initialized it.
Here some code that I was working on but I do not know what the problem is.
void snake(Mat copy){
threshold(copy, copy, 170, 255, CV_THRESH_BINARY);
float alpha = 0.1; //Continuity snake
float beta = 0.5; //Curvature snake
float gamma = 0.4; //Movement snake
//Have to be odd
CvSize size;
size.width = 5;
size.height = 5;
CvTermCriteria criteria;
criteria.type = CV_TERMCRIT_ITER;
criteria.max_iter = 10000;
criteria.epsilon = 0.1;
int cpt = 40;
CvPoint pointsArray[5];
pointsArray[0].x = 0;
pointsArray[0].y = 95;
pointsArray[1].x = 5;
pointsArray[1].y = 95;
pointsArray[2].x = 10;
pointsArray[2].y = 95;
pointsArray[3].x = 15;
pointsArray[3].y = 95;
pointsArray[4].x = 20;
pointsArray[4].y = 95;
//The Code (image, points, length, alpha (consistency), beta (curve), gamma (movement), coefficient Usage, win, criteria, calcGradient)
cvSnakeImage(copy, pointsArray, cpt, &alpha, &beta, &gamma, CV_VALUE, size,criteria, 0);
}
CV_VALUE indicates that each of alpha, beta, gamma is a pointer to a
single value to be used for all points;
CV_ARRAY indicates that each of alpha, beta, gamma is a pointer to an
array of coefficients different for all the points of the snake. All
the arrays must have the size equal to the contour size.
Related
- (UIImage*) snake:(UIImage *)processingImage :(UIImageView *)contourImage{
CvMat cvMatImage = [processingImage CVGrayscaleMat];
cv::Mat cvMatImage2 = &cvMatImage;
IplImage copy = cvMatImage2;
IplImage* snakeImage = ©
cvThreshold(snakeImage, snakeImage, 170, 255, CV_THRESH_BINARY);
float alpha = 0.1;
float beta = 0.5;
float gamma = 0.4;
CvSize size;
size.width = 5;
size.height = 5;
CvTermCriteria criteria;
criteria.type = CV_TERMCRIT_ITER;
criteria.max_iter = 10000;
criteria.epsilon = 0.1;
CvPoint temp;
std::vector<cv::Point> snakeCurve;
cvSnakeImage(snakeImage, temp, 40, 0.1, 0.5, 0.4, CV_VALUE, size, criteria);
}
I have already put #include as well. When I type the cvsnakeimage, the assistance pop up shown the function. However, when I finished, it shows no function match to call for...balabala....
Thanks in advance!
As I mentioned in title do you know how to flip an ID3D10Texture2D object horizontal/vertical ?
I used this code to take screenshot and save it to a file.
ID3D10Resource *backbufferRes;
renderTargetView->GetResource(&backbufferRes);
ID3D10Texture2D *mRenderedTexture;
// Create our texture
D3D10_TEXTURE2D_DESC texDesc;
texDesc.ArraySize = 1;
texDesc.BindFlags = 0;
texDesc.CPUAccessFlags = 0;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.Width = 640; // must be same as backbuffer
texDesc.Height = 480; // must be same as backbuffer
texDesc.MipLevels = 1;
texDesc.MiscFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D10_USAGE_DEFAULT;
d3d10Device->CreateTexture2D(&texDesc, 0, &mRenderedTexture);
d3d10Device->CopyResource(mRenderedTexture, backbufferRes);
D3DX10FilterTexture(mRenderedTexture, 0, D3DX10_FILTER_MIRROR_U);
D3DX10SaveTextureToFile(mRenderedTexture, D3DX10_IFF_PNG, L"test.png");
D3DX10FilterTexture(mRenderedTexture, 0, D3DX10_FILTER_MIRROR_U); line doesnt mirror my texture. Any suggestions ?
In your shader do 1-u to flip horizontally or 1-v to flip vertically.
Edit: If you aren't actually doing any rendering then there are far better ways to do image manipulation. However if you want to do it manually you will have to use map and flip the data round yourself.
You could do that as follows (The code is not tested so please excuse any compile errors):
D3D10Resource *backbufferRes;
renderTargetView->GetResource(&backbufferRes);
ID3D10Texture2D *mRenderedTexture;
// Create our texture
D3D10_TEXTURE2D_DESC texDesc;
texDesc.ArraySize = 1;
texDesc.BindFlags = 0;
texDesc.CPUAccessFlags = 0;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.Width = 640; // must be same as backbuffer
texDesc.Height = 480; // must be same as backbuffer
texDesc.MipLevels = 1;
texDesc.MiscFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D10_USAGE_DEFAULT;
d3d10Device->CreateTexture2D(&texDesc, 0, &mRenderedTexture);
d3d10Device->CopyResource(mRenderedTexture, backbufferRes);
D3D10_MAPPED_TEXTURE2D d3d10MT = { 0 };
mRenderedTexture->Map( 0, D3D10_MAP_READ_WRITE, 0, &d3d10MT );
unsigned int* pPix = (unsigned int)d3d10MT.pData;
int rows = 0;
int rowsMax = height;
while( rows < rowsMax )
{
unsigned int* pRowStart = pPix + (rows * width);
unsigned int* pRowEnd = pRowStart + width;
std::reverse( pRowStart, pRowEnd );
rows++;
}
mRenderedTexture->Unmap();
D3DX10SaveTextureToFile(mRenderedTexture, D3DX10_IFF_PNG, L"test.png");
From the doc btw:
D3DX10_FILTER_MIRROR_U Pixels off the edge of the texture on the
u-axis should be mirrored, not wrapped.
So that only counts for the pixels round the edge when you are filtering the image.
i am new to this website, please let me know if i have made any mistake on my post.
I have some questions regarding calculating and drawing histogram in javacv. Below are the codes that i have written based on some information that i have searched:
There is this error that i get: OpenCV Error: One of arguments' values is out of range (index is out of range) in unknown function, file ......\src\opencv\modules\core\src\array.cpp, line 1691
private CvHistogram getHistogram(IplImage image) {//get histogram data, input has been converted to grayscale beforehand
IplImage[] hsvImage1 = {image};
//bins and value-range
int numberOfBins = 256;
float minRange = 0.0f;
float maxRange = 255.0f;
// Allocate histogram object
int dims = 1;
int[] sizes = new int[]{numberOfBins};
int histType = CV_HIST_ARRAY;
float[] minMax = new float[]{minRange, maxRange};
float[][] ranges = new float[][]{minMax};
CvHistogram hist = cvCreateHist(dims, sizes, histType, ranges, 1);
cvCalcHist(hsvImage1, hist, 0, null);
return hist;
}
private IplImage DrawHistogram(CvHistogram hist, IplImage image) {//draw histogram
int scaleX = 1;
int scaleY = 1;
int i;
float[] max_value = {0};
int[] int_value = {0};
cvGetMinMaxHistValue(hist, max_value, max_value, int_value, int_value);//get min and max value for histogram
IplImage imgHist = cvCreateImage(cvSize(256, image.height() ),IPL_DEPTH_8U,1);//create image to store histogram
cvZero(imgHist);
CvPoint pts = new CvPoint(5);
for (i = 0; i < 256; i++) {//draw the histogram
float value = opencv_legacy.cvQueryHistValue_1D(hist, i);
float nextValue = opencv_legacy.cvQueryHistValue_1D(hist, i + 1);
pts.position(0).x(i * scaleX).y(image.height() * scaleY);
pts.position(1).x(i * scaleX + scaleX).y(image.height() * scaleY);
pts.position(2).x(i * scaleX + scaleX).y((int)((image.height() - nextValue * image.height() /max_value[0]) * scaleY));
pts.position(3).x(i * scaleX).y((int)((image.height() - value * image.height() / max_value[0]) * scaleY));
pts.position(4).x(i * scaleX).y(image.height() * scaleY);
cvFillConvexPoly(imgHist, pts.position(0), 5, CvScalar.RED, CV_AA, 0);
}
return imgHist;
}
I have tried searching few links that i provided at the bottom, however, each of them are in different language, therefore i am not sure i have converted them to java correctly. To be honest there are few things i doubt, will be glad if any advice can be provided, such as:
float[] max_value = {0}; // i referred to the internet and it helps me to getby syntax error in cvGetMinMaxHistValue() , not sure if it will cause logic error
pts.position(3).x(i * scaleX).y((int)((image.height() - value * image.height() / max_value[0]) * scaleY)); // i put int to downcast it to the type the pts will recognise, and one more thing is max_value[0] is 0, wondering if it will cause logical error due to division
Links used:
//use this
public CvHistogram getHistogram(IplImage image) {//get histogram data, input has been converted to grayscale beforehand
IplImageArray hsvImage1 = splitChannels(image);
//bins and value-range
int numberOfBins = 256;
float minRange = 0.0f;
float maxRange = 255.0f;
// Allocate histogram object
int dims = 1;
int[] sizes = new int[]{numberOfBins};
int histType = CV_HIST_ARRAY;
float[] minMax = new float[]{minRange, maxRange};
float[][] ranges = new float[][]{minMax};
CvHistogram hist = cvCreateHist(dims, sizes, histType, ranges, 1);
cvCalcHist(hsvImage1, hist, 0, null);
return hist;
}
private IplImageArray splitChannels(IplImage hsvImage) {
CvSize size = hsvImage.cvSize();
int depth = hsvImage.depth();
IplImage channel0 = cvCreateImage(size, depth, 1);
IplImage channel1 = cvCreateImage(size, depth, 1);
IplImage channel2 = cvCreateImage(size, depth, 1);
cvSplit(hsvImage, channel0, channel1, channel2, null);
return new IplImageArray(channel0, channel1, channel2);
}
Your error is in this part:
for (i = 0; i < 256; i++) {//draw the histogram
float value = opencv_legacy.cvQueryHistValue_1D(hist, i);
float nextValue = opencv_legacy.cvQueryHistValue_1D(hist, i + 1);
You use i+1 and it causes the error out of range, you can use your for until 255 to correct it.
I hope I helped you. GL
I want to track a color within an image. I use the following code (javaCV):
//Load initial image.
iplRGB = cvLoadImage(imageFile, CV_LOAD_IMAGE_UNCHANGED);
//Prepare for HSV
iplHSV = cvCreateImage(iplRGB.cvSize(), iplRGB.depth(), iplRGB.nChannels());
//Transform RGB to HSV
cvCvtColor(iplRGB, iplHSV, CV_BGR2HSV);
//Define a region of interest.
//minRow = 0; maxRow = iplHSV.height();
//minCol = 0; maxCol = iplHSV.width();
minRow = 197; minCol = 0; maxRow = 210; maxCol = 70;
//Print each HSV for each pixel of the region.
for (int y = minRow; y < maxRow; y++){
for (int x = minCol; x < maxCol; x++) {
CvScalar pixelHsv = cvGet2D(iplHSV, y, x);
double h = pixelHsv.val(0);
double s = pixelHsv.val(1);
double v = pixelHsv.val(2);
String line = y + "," + x + "," + h + "," + s + "," + v;
System.out.println(line);
}
}
I can easily find out the minimum and maximum for HUE and SAT from the output. Let's call then minHue, minSat, maxHue and maxSat (not fancy hey !). Then I execute this code:
iplMask = cvCreateImage(iplHSV.cvSize(), iplHSV.depth(), 1);
CvScalar min = cvScalar(minHue, minSat, 0, 0);
CvScalar max = cvScalar(maxHue, maxSat, 255 ,0);
cvInRangeS(iplHSV, min, max, iplMask);
When I show the iplMask, should not I see the region of interest entirely white ? I don't, I see the contour being white but the inside of the rectangle is black. I must mess with something but I do not understand what.
I know that Hue is in [0..179] with OpenCV and Sat and Val are in [0..255] but since I use the values displayed by openCV I would think I do not have to rescale...
Anyway, I am lost. Can somebody explain ? Thanks.
I'm using the Hough transform in OpenCV to detect lines. However, I know in advance that I only need lines within a very limited range of angles (about 10 degrees or so). I'm doing this in a very performance sensitive setting, so I'd like to avoid the extra work spent detecting lines at other angles, lines I know in advance I don't care about.
I could extract the Hough source from OpenCV and just hack it to take min_rho and max_rho parameters, but I'd like a less fragile approach (have to manually update my code w/ each OpenCV update, etc.).
What's the best approach here?
Well, i've modified the icvHoughlines function to go for a certain range of angles. I'm sure there's cleaner ways that plays with memory allocation as well, but I got a speed gain going from 100ms to 33ms for a range of angle going from 180deg to 60deg, so i'm happy with that.
Note that this code also outputs the accumulator value. Also, I only output 1 line because that fit my purposes but there was no gain really there.
static void
icvHoughLinesStandard2( const CvMat* img, float rho, float theta,
int threshold, CvSeq *lines, int linesMax )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
float ang;
int r, n;
int i, j;
float irho = 1 / rho;
double scale;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
// find n and ang limits (in our case we want 60 to 120
float limit_min = 60.0/180.0*PI;
float limit_max = 120.0/180.0*PI;
//num_steps = (limit_max - limit_min)/theta;
int start_n = floor(limit_min/theta);
int stop_n = floor(limit_max/theta);
for( ang = limit_min, n = start_n; n < stop_n; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
//
for( n = start_n; n < stop_n; n++ )
{
r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
accum[(n+1) * (numrho+2) + r+1]++;
}
}
int max_accum = 0;
int max_ind = 0;
for( r = 0; r < numrho; r++ )
{
for( n = start_n; n < stop_n; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if (accum[base] > max_accum)
{
max_accum = accum[base];
max_ind = base;
}
}
}
CvLinePolar2 line;
scale = 1./(numrho+2);
int idx = max_ind;
n = cvFloor(idx*scale) - 1;
r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.votes = accum[idx];
cvSeqPush( lines, &line );
}
If you use the Probabilistic Hough transform then the output is in the form of a cvPoint each for lines[0] and lines[1] parameters. We can get x and y co-ordinated for each of the two points by pt1.x, pt1.y and pt2.x and pt2.y.
Then use the simple formula for finding slope of a line - (y2-y1)/(x2-x1). Taking arctan (tan inverse) of that will yield that angle in radians. Then simply filter out desired angles from the values for each hough line obtained.
I think it's more natural to use standart HoughLines(...) function, which gives collection of lines directly in rho and theta terms and select nessessary angle range from it, rather than recalculate angle from segment end points.