I have the following problem: Everything works fine, but when i reach the calibrateCamera Part i get the following error:
OpenCV Error: Assertion failed (ni > 0 && ni == ni1) in unknown function, file .
.....\src\opencv\modules\calib3d\src\calibration.cpp, line 3197
I am new here, and maybe my code isn't as clear as it could be, but please be friendly.
Thank you very much in advance.
My code is: (i deleted the include and pragma parts, because it got really big letters here)
int _tmain()
{
printf("Everything loaded. Press Enter to continue.\n\a");
getch();
system("cls"); //bildschirm clearen
int numBoards = 0;
int numCornersHor;
int numCornersVer;
char stCurPath[200];
int numFiles = 0;
char stRemFileNum[200];
int curNum;
vector<string> file_names;
string s;
bool pathok = false;
#pragma region ask user for path to load images and list them
// ask user for path to load images and list them
while(pathok == false)
{
fflush(stdin);
printf("Enter path to the folder where the pictures are:\n");
scanf("%199[^\n]s", stCurPath); //achtung wegen bufferoverflow - nicht mehr als 199 zeichen + EOF (0) einlesen.
//[^\n] wegen Leerzeichen. scanf liest bis Enter.
system("cls");
DIR *dir;
struct dirent *ent;
dir = opendir (stCurPath);
if (dir != NULL) {
/* print all the files and directories within directory */
printf("Your chosen path:\n%s\n\nFiletree of this path:\n", stCurPath);
while ((ent = readdir (dir)) != NULL)
{
numFiles++; //Anzahl der Files
printf ("%d)\t%s\n",numFiles, ent->d_name);
s = ent->d_name;
file_names.push_back(s);
}
closedir (dir);
fflush( stdout );
printf ("\nNumber of found files: %d\n", numFiles);
pathok = true;
}
else
{
/* could not open directory */
printf ("Could not open directory. Make sure path is ok!\n\n");
pathok = false;
/*perror ("");
return EXIT_FAILURE*/;
}
}//while(pathok == false)
/*****************************end ask user for path to load images and list them****************************************/
#pragma endregion
#pragma region ask user to exclude some files
/*****************************************ask user to exclude some files*************************/
printf ("\nEnter the number of the files you dont want to load (number only!).\nSeperate single files with comma.\nEnter 0 if you don't want to exclude files.\nEnter x to exclude all non bmp files.\n\nNumbers:\n");
scanf("%199s", stRemFileNum); //achtung wegen bufferoverflow - nicht mehr als 199 zeichen + EOF (0) einlesen
vector<string>::iterator it;
if(strcmp(stRemFileNum,"x") == 0)
{
curNum=1;
numFiles = 0;
string extstr;
const char * extc;
file_names.erase(remove_if(file_names.begin(),
file_names.end(),
isBmpExtension), file_names.end());
system("cls"); //bildschirm clearen
printf("New Filetree of this path:\n\n");
it = file_names.begin();
for(it; it != file_names.end(); ++it)
{
numFiles++;
printf ("%d)\t%s\n",numFiles, (*it).c_str());
}
printf ("\nNumber of found files: %d\n", numFiles);
}//if(strcmp(stRemFileNum,"x") == 0)
//end user entered x
//start user entered 0
if(strcmp(stRemFileNum,"0") != 0 && strcmp(stRemFileNum,"x") != 0)
{
//fehler
numFiles = 0;
vector<string> numbersVector;
string strNumbers = stRemFileNum;
Tokenize(strNumbers, numbersVector, ",");
sort(numbersVector.begin(), numbersVector.end(), strCompDesc);
for(it = numbersVector.begin(); it != numbersVector.end(); ++it)
{
curNum = atoi((*it).c_str());
file_names.erase(file_names.begin() + (curNum - 1));
}
system("cls"); //bildschirm clearen
printf("New Filetree of this path:\n\n");
//fehler ende
for(vector<string>::iterator it = file_names.begin(); it != file_names.end(); ++it)
{
numFiles++;
printf ("%d)\t%s\n",numFiles, (*it).c_str());
}
printf ("\nNumber of found files: %d\n", numFiles);
}
//user entered 0
else if(strcmp(stRemFileNum,"0") == 0)
{
printf ("\nNo files excluded.\n");
}
/*****************end ask user to exclude some files *******************************************/
#pragma endregion
fflush(stdin);
printf("\nEnter number of corners along width: ");
scanf("%d", &numCornersHor);
fflush(stdin);
printf("Enter number of corners along height: ");
scanf("%d", &numCornersVer);
int numSquares = numCornersHor * numCornersVer;
Size board_sz = Size(numCornersHor, numCornersVer);
vector<vector<Point3f>> object_points;
vector<vector<Point2f>> image_points;
vector<Point2f> corners;
vector<Point3f> obj;
int pictures_done=0;
Mat image;
Mat gray_image;
//Bilder zum persönlichen auswerten anzeigen +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//
for(vector<string>::iterator it = file_names.begin(); it != file_names.end(); ++it)
{
printf("\nLoading Picture..\n");
image = imread(string(stCurPath) + "\\" + (*it).c_str()); //als farbe lesen; doppelter backslash o. normaler
//resize(image, image, Size(0,0), 0.5, 0.5, INTER_AREA);
cvtColor(image,gray_image,CV_RGB2GRAY);
for(int j=0;j<numSquares;j++)
{
obj.push_back(Point3f(j/numCornersHor, j%numCornersHor, 0.0f));
}//for(int j=0;j<numSquares;j++)
bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found)
{
cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(gray_image, board_sz, corners, found);
}
cvNamedWindow("win1", 1);
imshow("win1", gray_image);
waitKey(30);
cvMoveWindow("win1",0,0);
waitKey(30);
printf("\nPicture loaded.\nPress a to load original, s to store snap and show next picture,");
printf("\nd to drop snap and show next picture, and f to close the programm\n");
char key;
bool bOriginalDisplayed = false;
while(1)
{
if (cin.rdbuf()->in_avail())
{
key = _getch();
}
if('a' == key && found!=0)
{
if (!bOriginalDisplayed)
{
printf("\nLoading original..\n");
cvNamedWindow("win2", 1);
cvMoveWindow("win2",0,0);
imshow("win2", image); //oder imshow cvMoveWindow("Smile", 100, 100);
bOriginalDisplayed = true;
waitKey(50);
printf("\nOriginal loaded.\nPess a again to close original before you continue.\n");
}
else
{
cvDestroyWindow("win2");
printf("\nOriginal closed.\n");
bOriginalDisplayed = false;
}
}
if('s'==key)
{
image_points.push_back(corners);
object_points.push_back(obj);
printf("\nSnap stored!\n");
pictures_done++;
found = false;
break;
}
if('d' == key)
{
pictures_done++;
break;
found = false;
}
if('f' == key)
{
return 0;
}
Sleep(50);
} //while (1)
} //for(vector<string>::iterator it = file_names.begin(); it != file_names.end(); ++it)
cvDestroyWindow("win1");
waitKey(50);
Mat intrinsic = Mat(3, 3, CV_32FC1);
Mat distCoeffs;
vector<Mat> rvecs;
vector<Mat> tvecs;
intrinsic.ptr<float>(0)[0] = 1;
intrinsic.ptr<float>(1)[1] = 1;
calibrateCamera(object_points, image_points, image.size(), intrinsic, distCoeffs, rvecs, tvecs);
Mat imageUndistorted;
for(vector<string>::iterator it = file_names.begin(); it != file_names.end(); ++it)
{
printf("\nLoading undistorted Picture..\n");
image = imread(string(stCurPath) + "\\" + (*it).c_str()); //als farbe lesen; doppelter backslash o. normaler
//resize(image, image, Size(0,0), 0.5, 0.5, INTER_AREA);
cvtColor(image,gray_image,CV_RGB2GRAY);
cvNamedWindow("win1", 1);
cvNamedWindow("win2", 1);
cvMoveWindow("win1",0,0);
cvMoveWindow("win2",0,0);
undistort(image, imageUndistorted, intrinsic, distCoeffs);
imshow("win1", image);
waitKey(0);
imshow("win2", imageUndistorted);
waitKey(30);
printf("\nPicture loaded. Press s for the next picture or f to exit.");
char key;
bool bOriginalDisplayed = false;
while(1)
{
if (cin.rdbuf()->in_avail())
{
key = _getch();
}
if('s'==key)
{
break;
}
if('f' == key)
{
return 0;
}
Sleep(50);
} //while (1)
} //for(vector<string>::iterator it = file_names.begin(); it != file_names.end(); ++it)
return 0;
}
If found the problem. And me being a good person i came here to post the answer for people having the same issue in the future.
Solution: Add a obj.clear() into the for function so obj always stays same size when you push back. Like this:
//Bilder zum persönlichen auswerten anzeigen +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//
for(vector<string>::iterator it = file_names.begin(); it != file_names.end(); ++it)
{
printf("\nLoading Picture..\n");
image = imread(string(stCurPath) + "\\" + (*it).c_str()); //als farbe lesen; doppelter backslash o. normaler
//resize(image, image, Size(0,0), 0.5, 0.5, INTER_AREA);
cvtColor(image,gray_image,CV_RGB2GRAY);
obj.clear();
for(int j=0;j<numSquares;j++)
{
obj.push_back(Point3f(j/numCornersHor, j%numCornersHor, 0.0f));
}//for(int j=0;j<numSquares;j++)
bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found)
Have fun programming. Greets, Escore.
Related
I try to use string.find("中国", "中"). It successed on PC but failed on Android when I develop my cocos-lua game.
on Android, string.find return nil
Fristly, I think their encoding may be diffent, so I try to print out their byte.
on Android: text1: "中国", text2:"中".
local text1 = self.__editBox2:getText()
local text2 = self.__editBox3:getText()
local code1 = ""
for i = 1, string.len(text1) do
code1 = code1 .. "-" .. tostring(string.byte(text1, i))
end
local code2 = ""
for i = 1, string.len(text2) do
code2 = code2 .. "-" .. tostring(string.byte(text1, i))
end
self.__editBox2:setText(code1)
self.__editBox3:setText(code2)
local a, b = string.find(text1, text2)
local data = tostring(a) .. ":" .. tostring(b)
self.__editBox1:setText(data)
text1:
228-184-173-229-155-189
text2:
228-184-173
The Answer is still:
nil:nil
PS: lua implementation of string.find
static int str_find_aux (lua_State *L, int find) {
size_t l1, l2;
const char *s = luaL_checklstring(L, 1, &l1);
const char *p = luaL_checklstring(L, 2, &l2);
ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1;
if (init < 0) init = 0;
else if ((size_t)(init) > l1) init = (ptrdiff_t)l1;
if (find && (lua_toboolean(L, 4) || /* explicit request? */
strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */
/* do a plain search */
const char *s2 = lmemfind(s+init, l1-init, p, l2);
if (s2) {
lua_pushinteger(L, s2-s+1);
lua_pushinteger(L, s2-s+l2);
return 2;
}
}
else {
MatchState ms;
int anchor = (*p == '^') ? (p++, 1) : 0;
const char *s1=s+init;
ms.L = L;
ms.src_init = s;
ms.src_end = s+l1;
do {
const char *res;
ms.level = 0;
if ((res=match(&ms, s1, p)) != NULL) {
if (find) {
lua_pushinteger(L, s1-s+1); /* start */
lua_pushinteger(L, res-s); /* end */
return push_captures(&ms, NULL, 0) + 2;
}
else
return push_captures(&ms, s1, res);
}
} while (s1++ < ms.src_end && !anchor);
}
lua_pushnil(L); /* not found */
return 1;
}
static int str_find (lua_State *L) {
return str_find_aux(L, 1);
}
Lua does not have proper support to unicode characters out of the box, but there are good libraries that will fix that. I have never used cocos2d and I'm unsure if they have any add-ons to deal with this. But you could try using this one:https://luarocks.org/modules/xavier-wang/luautf8. I have used it with success once. Hope this helps!
The code below should load an image, then grey scale the image in a window. Instead it just loads the image. I've used printf("hello") in the loop starting with "for (int y = 0; y < image->h; y++)" however the console doesn't show "hello", unless I removed SDL_Delay(20000) which makes the console print it, but the image flashes for a second and i cant tell if that's in greyscale of the same image.
#include <SDL2/SDL.h>
#include <SDL2/SDL_image.h>
#include <stdio.h>
#include "SDL2/SDL_ttf.h"
SDL_Window *window = NULL;
SDL_Surface *windowSurface = NULL;
SDL_Surface *image = NULL;
SDL_Event *event = NULL;
SDL_Texture *texture = NULL;
int main(int argc, char *argv[])
{
if(SDL_Init(SDL_INIT_VIDEO) < 0)
{
perror("Cannot initialise SDL");
SDL_Quit();
return 1;
}
else
{
window = SDL_CreateWindow("Loading_image", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 640, 480, SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE);
if(window == NULL)
perror("Cannot load image");
else
{
windowSurface = SDL_GetWindowSurface(window);
image = IMG_Load("image.bmp");
if(image == NULL)
perror("Cannot load image");
else
{
SDL_BlitSurface(image, NULL, windowSurface, NULL);
}
SDL_UpdateWindowSurface(window);
SDL_Delay(20000);
}
}
SDL_UpdateTexture(texture, NULL, image->pixels, image->w * sizeof(Uint32));
image = SDL_ConvertSurfaceFormat(image,SDL_PIXELFORMAT_ARGB8888,0);
Uint32 * pixels = (Uint32 *)image->pixels;
int x = 0;
int y = 0;
for (int y = 0; y < image->h; y++)
{
for (int x = 0; x < image->w; x++)
{
Uint32 pixel = pixels[y * image->w + x];
Uint8 r=0,g=0,b=0;
SDL_GetRGB(pixel, image->format, &r,&g,&b);
Uint8 v = 0.212671f * r + 0.715160f * g + 0.072169f * b;
SDL_MapRGB(image->format,v,v,v);
}
}
int quit = 0;
while (!quit) //This loop will loop until the conditions are met e.g. You quit the renderer//
{
SDL_WaitEvent(event);// waits for the event (quitting the renderer)//
switch (event->type)
{
case SDL_QUIT:
quit = 1;
break;
}
}
SDL_FreeSurface(image);
image = NULL;
window = NULL;
windowSurface = NULL;
SDL_DestroyWindow(window);
IMG_Quit();
SDL_Quit();
return 0;
}
There are several issues with your code. Mostly SDL specifics, but also some issues with the grayscale conversion.
I removed any unnecessary stuff I could spot and annotated some changes by comments.
#include <SDL.h>
#include <SDL_image.h>
#include <stdio.h>
SDL_Window *window = NULL;
SDL_Surface *windowSurface = NULL;
SDL_Surface *image = NULL;
SDL_Event event; // You may want to use an object instead of a pointer
SDL_Texture *texture = NULL;
int main(int argc, char *argv[])
{
if (SDL_Init(SDL_INIT_VIDEO) < 0)
{
perror("Cannot initialise SDL");
SDL_Quit();
return 1;
}
else
{
window = SDL_CreateWindow("Loading_image", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 640, 480, SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE);
if (window == NULL)
perror("Cannot load image"); // You may want to change this error message
else
{
windowSurface = SDL_GetWindowSurface(window);
image = IMG_Load("image.bmp");
if (image == NULL)
perror("Cannot load image");
// I removed the blitting code here, you basically don't need it here
// Rather do it in the render loop below
}
}
image = SDL_ConvertSurfaceFormat(image, SDL_PIXELFORMAT_ARGB8888, 0);
Uint32 * pixels = (Uint32 *)image->pixels;
int x = 0;
int y = 0;
for (int y = 0; y < image->h; y++)
{
for (int x = 0; x < image->w; x++)
{
Uint32 pixel = pixels[y * image->w + x];
Uint8 r = 0, g = 0, b = 0;
SDL_GetRGB(pixel, image->format, &r, &g, &b);
Uint8 v = 0.212671f * r + 0.715160f * g + 0.072169f * b;
pixel = SDL_MapRGB(image->format, v, v, v); // Get the return value which is the pixel value
pixels[y * image->w + x] = pixel; // ...and assign it back to the pixels
}
}
int quit = 0;
while (!quit)
{
while (SDL_PollEvent(&event)) // Continous checking for events
{
switch (event.type)
{
case SDL_QUIT:
quit = 1;
break;
}
}
// "Render loop"
SDL_BlitSurface(image, NULL, windowSurface, NULL);
SDL_UpdateWindowSurface(window);
}
SDL_FreeSurface(image);
image = NULL;
window = NULL;
windowSurface = NULL;
SDL_DestroyWindow(window);
IMG_Quit();
SDL_Quit();
return 0;
}
I have some code to draw a line between two points on an image which are selected by mouse, and then to display a histogram.
However, when I press q as required by code I get an error saying R6010 abort() has been called and saying VC++ run time error.
Please advise me how I can find this error.
#include <vector>
#include "opencv2/highgui/highgui.hpp"
#include <opencv\cv.h>
#include <iostream>
#include<conio.h>
using namespace cv;
using namespace std;
struct Data_point
{
int x;
unsigned short int y;
};
int PlotMeNow(unsigned short int *values, unsigned int nSamples)
{
std::vector<Data_point> graph(nSamples);
for (unsigned int i = 0; i < nSamples; i++)
{
graph[i].x = i;
graph[i].y = values[i];
}
cv::Size imageSize(5000, 500); // your window size
cv::Mat image(imageSize, CV_8UC1);
if (image.empty()) //check whether the image is valid or not
{
std::cout << "Error : Image cannot be created..!!" << std::endl;
system("pause"); //wait for a key press
return 0;
}
else
{
std::cout << "Good job : Image created successfully..!!" << std::endl;
}
// tru to do some ofesseting so the graph do not hide on x or y axis
Data_point dataOffset;
dataOffset.x = 20;
// we have to mirror the y axis!
dataOffset.y = 5000;
for (unsigned int i = 0; i<nSamples; ++i)
{
graph[i].x = (graph[i].x + dataOffset.x) * 3;
graph[i].y = (graph[i].y + dataOffset.y) / 200;
}
// draw the samples
for (unsigned int i = 0; i<nSamples - 1; ++i)
{
cv::Point2f p1;
p1.x = graph[i].x;
p1.y = graph[i].y;
cv::Point2f p2;
p2.x = graph[i + 1].x;
p2.y = graph[i + 1].y;
cv::line(image, p1, p2, 'r', 1, 4, 0);
}
cv::namedWindow("MyWindow1", CV_WINDOW_AUTOSIZE); //create a window with the name "MyWindow"
cv::imshow("MyWindow1", image); //display the image which is stored in the 'img' in the "MyWindow" window
while (true)
{
char c = cv::waitKey(10);
if (c == 'q')
break;
}
destroyWindow("MyWindow1");
destroyWindow("MyWindow"); //destroy the window with the name, "MyWindow"
return 0;
}
void IterateLine(const Mat& image, vector<ushort>& linePixels, Point p2, Point p1, int* count1)
{
LineIterator it(image, p2, p1, 8);
for (int i = 0; i < it.count; i++, it++)
{
linePixels.push_back(image.at<ushort>(it.pos())); //doubt
}
*count1 = it.count;
}
//working line with mouse
void onMouse(int evt, int x, int y, int flags, void* param)
{
if (evt == CV_EVENT_LBUTTONDOWN)
{
std::vector<cv::Point>* ptPtr = (std::vector<cv::Point>*)param;
ptPtr->push_back(cv::Point(x, y));
}
}
void drawline(Mat image, std::vector<Point>& points)
{
cv::namedWindow("Output Window");
cv::setMouseCallback("Output Window", onMouse, (void*)&points);
int X1 = 0, Y1 = 0, X2 = 0, Y2 = 0;
while (1)
{
cv::imshow("Output Window", image);
if (points.size() > 1) //we have 2 points
{
for (auto it = points.begin(); it != points.end(); ++it)
{
}
break;
}
waitKey(10);
}
//just for testing that we are getting pixel values
X1 = points[0].x;
X2 = points[1].x;
Y1 = points[0].y;
Y2 = points[1].y;
// Draw a line
line(image, Point(X1, Y1), Point(X2, Y2), 'r', 2, 8);
cv::imshow("Output Window", image);
//exit image window
while (true)
{
char c = cv::waitKey(10);
if (c == 'q')
break;
}
destroyWindow("Output Window");
}
void show_histogram_image(Mat img1)
{
int sbins = 65536;
int histSize[] = { sbins };
float sranges[] = { 0, 65536 };
const float* ranges[] = { sranges };
cv::MatND hist;
int channels[] = { 0 };
cv::calcHist(&img1, 1, channels, cv::Mat(), // do not use mask
hist, 1, histSize, ranges,
true, // the histogram is uniform
false);
double maxVal = 0;
minMaxLoc(hist, 0, &maxVal, 0, 0);
int xscale = 10;
int yscale = 10;
cv::Mat hist_image;
hist_image = cv::Mat::zeros(65536, sbins*xscale, CV_16UC1);
for int s = 0; s < sbins; s++)
{
float binVal = hist.at<float>(s, 0);
int intensity = cvRound(binVal * 65535 / maxVal);
rectangle(hist_image, cv::Point(s*xscale, hist_image.rows),
cv::Point((s + 1)*xscale - 1, hist_image.rows - intensity),
cv::Scalar::all(65535), 1);
}
imshow("Histogram", hist_image);
waitKey(0);
}
int main()
{
vector<Point> points1;
vector<ushort>linePixels;
Mat img = cvLoadImage("desert.jpg");
if (img.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
//Draw the line
drawline(img, points1);
//now check the collected points
Mat img1 = cvLoadImage("desert.jpg");
if (img1.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
int *t = new int;
IterateLine( img1, linePixels, points1[1], points1[0], t );
PlotMeNow(&linePixels[0], t[0]);
show_histogram_image(img);
delete t;
_getch();
return 0;
}
This is one of the bad smells in your code:
void IterateLine(const Mat& image, vector<ushort>& linePixels, Point p2, Point p1, int* count1)
{
...
linePixels.push_back(image.at<ushort>(it.pos())); //doubt
Now image is a CV_8UC3 image (from Mat img1 = cvLoadImage("desert.jpg");, but you are accessing here like it is CV_16UC1, so what gets put in linePixels is garbage. This will almost certainly cause PlotMeNow() to draw outside its image and corrupt something, which is probably why your code is crashing.
Sine it is very unclear what your code is trying to do, I can't suggest what you should have here instead.
I have just managed to do this, you only have to put "-1" to your loop limit:
for (unsigned int i = 0; i < nSamples-1; i++)
{
graph[i].x = i;
graph[i].y = values[i];
}
I'm trying to load RAW formats(especially Nikon .NEF) with libraw library and than convert it to opencv format cv::Mat.
Has anybody solved this problem in past? How do I do this?
I have a conversion method from libraw to QImage (Qt). However, this demonstrates the use of libraw and should be easily adapted to cv::Mat.
My solution is loosely similar to this: https://github.com/mardy/qtraw/blob/master/src/raw-io-handler.cpp
Due to open questions in the comments to the original question, a further comment: You can control the white-balance, chromatic abberation-, distortion correction and much more with libraw: http://www.libraw.org/docs/API-datastruct-eng.html#libraw_output_params_t
Converting embedded RAW thumbnail from libraw to QImage:
LibRaw RawProcessor;
QImage thumbnail;
if( LIBRAW_SUCCESS == RawProcessor.open_file(filename)) {
if( LIBRAW_SUCCESS == RawProcessor.unpack_thumb() ) {
if (LIBRAW_THUMBNAIL_JPEG == RawProcessor.imgdata.thumbnail.tformat ) {
thumbnail.loadFromData((uchar*)RawProcessor.imgdata.thumbnail.thumb,
RawProcessor.imgdata.thumbnail.tlength,
"JPEG");
LibRawImagePerformFlip(RawProcessor.imgdata.sizes.flip, thumbnail);
} else if (LIBRAW_THUMBNAIL_BITMAP == RawProcessor.imgdata.thumbnail.tformat) {
thumbnail = LibRawImageToQImage(
(uchar*)RawProcessor.imgdata.thumbnail.thumb,
RawProcessor.imgdata.thumbnail.twidth,
RawProcessor.imgdata.thumbnail.theight,
RawProcessor.imgdata.thumbnail.tcolors);
} // else: could not read
}
RawProcessor.recycle();
}
Converting full RAW image from libraw to QImage:
LibRaw RawProcessor;
QImage image;
RawProcessor.imgdata.params.gamm[0] = 1.0;
RawProcessor.imgdata.params.gamm[1] = 0.0;
RawProcessor.imgdata.params.user_qual = 0; // fastest interpolation (linear)
RawProcessor.imgdata.params.use_camera_wb = 1;
if( LIBRAW_SUCCESS == rawProcessor.open_file(filename) {
if( LIBRAW_SUCCESS == RawProcessor.unpack() ) {
if (LIBRAW_SUCCESS == RawProcessor.dcraw_process()) {
libraw_processed_image_t* output = RawProcessor.dcraw_make_mem_image();
if (LIBRAW_IMAGE_JPEG == output->type ) {
image.loadFromData((uchar*)output->data,
output->data_size,
"JPEG");
LibRawImagePerformFlip(RawProcessor.imgdata.sizes.flip, image);
} else if (LIBRAW_IMAGE_BITMAP == output->type) {
image= LibRawImageToQImage(
(uchar*)output->data,
output->width,
output->height,
output->colors,
output->bits);
} // else: could not read
LibRaw::dcraw_clear_mem(output);
}
RawProcessor.recycle();
}
}
with the two helper functions:
QImage MiscToolsLibRawImageToQImage(const uchar *data,
const int width,
const int height,
const int nCols,
const int colorBits)
{
int colorSize = (colorBits % 8) == 0 ? colorBits / 8 : ceil(colorBits / 8.0);
int numPixels = width * height;
int pixelSize = nCols * colorSize;
uchar* pixels = new uchar[numPixels * 3];
for (int i = 0; i < numPixels; i++, data += pixelSize) {
if (nCols == 3) {
// this ordering produces correct RGB results - don't ask why
// tested with .CR2 (Canon)
pixels[i * 3] = data[3*colorSize];
pixels[i * 3 + 1] = data[colorSize];
pixels[i * 3 + 2] = data[2*colorSize];
} else {
pixels[i * 3] = data[0];
pixels[i * 3 + 1] = data[0];
pixels[i * 3 + 2] = data[0];
}
}
// immediately create a copy since otherwise we'd have to
// 'delete[] pixels' somewhere else, ourselves
// see http://doc.qt.io/qt-5.5/qimage.html#QImage-6
QImage out = QImage(pixels, width, height, width * 3,
QImage::Format_RGB888).copy();
delete[] pixels;
return out;
}
void LibRawImagePerformFlip(const int flip, QImage& image)
{
if (flip != 0) {
QTransform rotation;
int angle = 0;
if (flip == 3) angle = 180;
else if (flip == 5) angle = -90;
else if (flip == 6) angle = 90;
if (angle != 0) {
rotation.rotate(angle);
image = image.transformed(rotation);
}
}
}
I have been trying to develop a simple feature tracking program. The user outlines an area on the screen with their mouse, and a mask is created for this area and passed to goodFeaturesToTrack. The features found by the function are then drawn on the screen (represented by blue circles).
Next I pass the feature vector returned by the function to calcOpticalFlowPyrLk and draw the resulting vector of points on the screen (represented by green circles). Although the program tracks the direction of flow correctly, for some reason the features output by the calcOpticalFlow funciton do not line up with the object's location on the screen.
I feel as though it is a small mistake in the logic I have used on my part, but I just can't seem to decompose it, and I would really appreciate some help from the you guys.
I have posted my code below, and I would like to greatly apologize for the global variables and messy structure. I am just testing at the moment, and plan to clean up and convert to an OOP format as soon as I get it running.
As well, here is a link to a YouTube video I have uploaded that demonstrates the behavior I am combating.
bool drawingBox = false;
bool destroyBox = false;
bool targetAcquired = false;
bool featuresFound = false;
CvRect box;
int boxCounter = 0;
cv::Point objectLocation;
cv::Mat prevFrame, nextFrame, prevFrame_1C, nextFrame_1C;
std::vector<cv::Point2f> originalFeatures, newFeatures, baseFeatures;
std::vector<uchar> opticalFlowFeatures;
std::vector<float> opticalFlowFeaturesError;
cv::TermCriteria opticalFlowTermination = cv::TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3);
cv::Mat mask;
cv::Mat clearMask;
long currentFrame = 0;
void draw(cv::Mat image, CvRect rectangle)
{
if (drawingBox)
{
cv::rectangle(image, cv::Point(box.x, box.y), cv::Point(box.x + box.width, box.y + box.height), cv::Scalar(225, 238 , 81), 2);
CvRect rectangle2 = cvRect(box.x, box.y, box.width, box.height);
}
if (featuresFound)
{
for (int i = 0; i < originalFeatures.size(); i++)
{
cv::circle(image, baseFeatures[i], 4, cv::Scalar(255, 0, 0), 1, 8, 0);
cv::circle(image, newFeatures[i], 4, cv::Scalar(0, 255, 0),1, 8, 0);
cv::line(image, baseFeatures[i], newFeatures[i], cv::Scalar(255, 0, 0), 2, CV_AA);
}
}
}
void findFeatures(cv::Mat mask)
{
if (!featuresFound && targetAcquired)
{
cv::goodFeaturesToTrack(prevFrame_1C, baseFeatures, 200, 0.1, 0.1, mask);
originalFeatures= baseFeatures;
featuresFound = true;
std::cout << "Number of Corners Detected: " << originalFeatures.size() << std::endl;
for(int i = 0; i < originalFeatures.size(); i++)
{
std::cout << "Corner Location " << i << ": " << originalFeatures[i].x << "," << originalFeatures[i].y << std::endl;
}
}
}
void trackFeatures()
{
cv::calcOpticalFlowPyrLK(prevFrame_1C, nextFrame_1C, originalFeatures, newFeatures, opticalFlowFeatures, opticalFlowFeaturesError, cv::Size(30,30), 5, opticalFlowTermination);
originalFeatures = newFeatures;
}
void mouseCallback(int event, int x, int y, int flags, void *param)
{
cv::Mat frame;
frame = *((cv::Mat*)param);
switch(event)
{
case CV_EVENT_MOUSEMOVE:
{
if(drawingBox)
{
box.width = x-box.x;
box.height = y-box.y;
}
}
break;
case CV_EVENT_LBUTTONDOWN:
{
drawingBox = true;
box = cvRect (x, y, 0, 0);
targetAcquired = false;
cv::destroyWindow("Selection");
}
break;
case CV_EVENT_LBUTTONUP:
{
drawingBox = false;
featuresFound = false;
boxCounter++;
std::cout << "Box " << boxCounter << std::endl;
std::cout << "Box Coordinates: " << box.x << "," << box.y << std::endl;
std::cout << "Box Height: " << box.height << std::endl;
std::cout << "Box Width: " << box.width << std:: endl << std::endl;
if(box.width < 0)
{
box.x += box.width;
box.width *= -1;
}
if(box.height < 0)
{
box.y +=box.height;
box.height *= -1;
}
objectLocation.x = box.x;
objectLocation.y = box.y;
targetAcquired = true;
}
break;
case CV_EVENT_RBUTTONUP:
{
destroyBox = true;
}
break;
}
}
int main ()
{
const char *name = "Boundary Box";
cv::namedWindow(name);
cv::VideoCapture camera;
cv::Mat cameraFrame;
int cameraNumber = 0;
camera.open(cameraNumber);
camera >> cameraFrame;
cv::Mat mask = cv::Mat::zeros(cameraFrame.size(), CV_8UC1);
cv::Mat clearMask = cv::Mat::zeros(cameraFrame.size(), CV_8UC1);
if (!camera.isOpened())
{
std::cerr << "ERROR: Could not access the camera or video!" << std::endl;
}
cv::setMouseCallback(name, mouseCallback, &cameraFrame);
while(true)
{
if (destroyBox)
{
cv::destroyAllWindows();
break;
}
camera >> cameraFrame;
if (cameraFrame.empty())
{
std::cerr << "ERROR: Could not grab a camera frame." << std::endl;
exit(1);
}
camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame);
camera >> prevFrame;
cv::cvtColor(prevFrame, prevFrame_1C, cv::COLOR_BGR2GRAY);
camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame ++);
camera >> nextFrame;
cv::cvtColor(nextFrame, nextFrame_1C, cv::COLOR_BGR2GRAY);
if (targetAcquired)
{
cv::Mat roi (mask, cv::Rect(box.x, box.y, box.width, box.height));
roi = cv::Scalar(255, 255, 255);
findFeatures(mask);
clearMask.copyTo(mask);
trackFeatures();
}
draw(cameraFrame, box);
cv::imshow(name, cameraFrame);
cv::waitKey(20);
}
cv::destroyWindow(name);
return 0;
}
In my opinion you can't use camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame) on a webcam, but I 'm not positive about that.
Instead I suggest you to save the previous frame in your prevFrame variable.
As an example I can suggest you this working code, I only change inside the while loop and I add comment before all my adds :
while(true)
{
if (destroyBox)
{
cv::destroyAllWindows();
break;
}
camera >> cameraFrame;
if (cameraFrame.empty())
{
std::cerr << "ERROR: Could not grab a camera frame." << std::endl;
exit(1);
}
// new lines
if(prevFrame.empty()){
prevFrame = cameraFrame;
continue;
}
// end new lines
//camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame);
//camera >> prevFrame;
cv::cvtColor(prevFrame, prevFrame_1C, cv::COLOR_BGR2GRAY);
//camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame ++);
//camera >> nextFrame;
// new line
nextFrame = cameraFrame;
cv::cvtColor(nextFrame, nextFrame_1C, cv::COLOR_BGR2GRAY);
if (targetAcquired)
{
cv::Mat roi (mask, cv::Rect(box.x, box.y, box.width, box.height));
roi = cv::Scalar(255, 255, 255);
findFeatures(mask);
clearMask.copyTo(mask);
trackFeatures();
}
draw(cameraFrame, box);
cv::imshow(name, cameraFrame);
cv::waitKey(20);
// old = new
// new line
prevFrame = cameraFrame.clone();
}