How to process a JPEG binary data in OpenCV? - opencv

I am trying to process a JPEG Binary data in OpenCV. When I do that I get Segmentation fault (core dumped).
I read JPEG file through fread command and stored in a buffer.
After reading, I copied the buffer data to a Mat variable,
When I tried to do grayscale conversion on copied data using cvtColor OpenCV function. I get Segmentation Fault.
int main( int argc, char** argv )
{
Mat threshold_output;
Mat gray_image;
unsigned char *pre_image;
FILE *read_image;
FILE *write_image;
int filesize;
size_t data, write;
read_image = fopen(argv[1] , "rb"); //Read Jpeg as Binary
write_image = fopen("output11.jpg", "wb"); //Write JPEG
if(read_image == NULL)
{
printf("Image Not Found\r\n");
}
fseek(read_image, 0, SEEK_END);
int fileLen = ftell(read_image);
fseek(read_image, 0, SEEK_SET);
pre_image = (unsigned char *)malloc(fileLen);
data = fread(pre_image, 1, fileLen, read_image);
write = fwrite(pre_image, 1, fileLen, write_image);
// Printed and verify the values
printf("File Size %d\r\n", fileLen);
printf("Read bytes %zu\r\n", data);
printf("Write bytes %zu\r\n", data);
fclose(read_image);
fclose(write_image);
/* Copy the Jpeg Binary buffer to a MAt Variable*/
cv::Mat image(Size(640, 480), CV_8UC3, pre_image); //Seg Fault comes here
/* Convert Grayscale */
cvtColor( image, gray_image, CV_BGR2GRAY);
/* Threshold conversion */
threshold( gray_image, threshold_output, 80, 255, THRESH_BINARY );
namedWindow( "Thresholded", CV_WINDOW_AUTOSIZE );
imshow( "Thresholded", image );
waitKey(0);
return 0;
}
I have attached the code for reference. I have verified that both fread and fwrite works properly.
But when I do the cvtColor only I got error.

As #Micka already pointed out, you should use cv::imdecode
You can use it with your FILE*. You probably may want to use fstreams if you're using C++. You can also rely directly on OpenCV capabilities to read files.
The code below will show you these options for reading files. Code for writing is similar (I can add it if you need it).
Remember that if you want to write the binary stream, you should use imencode
#include <opencv2\opencv.hpp>
#include <fstream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main()
{
////////////////////////////////
// Method 1: using FILE*
////////////////////////////////
FILE* read_image = fopen("path_to_image", "rb");
if (read_image == NULL)
{
printf("Image Not Found\n");
}
fseek(read_image, 0, SEEK_END);
int fileLen = ftell(read_image);
fseek(read_image, 0, SEEK_SET);
unsigned char* pre_image = (unsigned char *)malloc(fileLen);
size_t data = fread(pre_image, 1, fileLen, read_image);
// Printed and verify the values
printf("File Size %d\n", fileLen);
printf("Read bytes %d\n", data);
fclose(read_image);
vector<unsigned char> buffer(pre_image, pre_image + data);
Mat img = imdecode(buffer, IMREAD_ANYCOLOR);
////////////////////////////////
//// Method 2: using fstreams
////////////////////////////////
//ifstream ifs("path_to_image", iostream::binary);
//filebuf* pbuf = ifs.rdbuf();
//size_t size = pbuf->pubseekoff(0, ifs.end, ifs.in);
//pbuf->pubseekpos(0, ifs.in);
//vector<char> buffer(size);
//pbuf->sgetn(buffer.data(), size);
//ifs.close();
//Mat img = imdecode(buffer, IMREAD_ANYCOLOR);
////////////////////////////////
//// Method 3: using imread
////////////////////////////////
//Mat img = imread("path_to_image", IMREAD_ANYCOLOR);
// Work with img as you want
imshow("img", img);
waitKey();
return 0;
}

OpenCV uses channels like BGR etc and can't perform computer vision operations on ENCODED images, since encoded images don't consist of pixel data but some encoded data which can be transformed to pixels. OpenCV assumes that images are already decoded so it can work on pixel data.
BUT: you can use a binary image buffer (like your pre_image) and let openCV DECODE it.
use cv::imdecode to do it and after that you'll get a legal cv::Mat image. http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#imdecode

Related

Unsupported format or combination of formats (Only 8-bit, 3-channel input images are supported) in cvWatershed

Hi I am new to image segmentation, i am trying the given code to get foreground objects, but i got error like "Unsupported format or combination of formats (Only 8-bit, 3-channel input images are supported) in cvWatershed"
cv::Mat img0 = [img toMat];
cv::Mat img1;
cv::cvtColor(img0, img0, CV_RGB2GRAY);
cv::threshold(img0, img0, 100, 255, cv::THRESH_BINARY);
cv::Mat fg;
cv::erode(img0,fg,cv::Mat(),cv::Point(-1,-1),6);
cv::Mat bg;
cv::dilate(img0,bg,cv::Mat(),cv::Point(-1,-1),6);
cv::threshold(bg,bg,1,128,cv::THRESH_BINARY_INV);
cv::Mat markers(img0.size(),CV_8U,cv::Scalar(0));
markers= fg+bg;
// cv::namedWindow("Markers");
// cv::imshow("Markers", markers);
WatershedSegmenter segmenter;
segmenter.setMarkers(markers);
cv::Mat result1 = segmenter.process(img0);
// cv::Mat result1;
result1.convertTo(result1,CV_8U);
UIImage * result = [UIImage imageWithMat:result1 andImageOrientation:[img imageOrientation]];
return result;
And i try by debugging and got error in line
cv::Mat result1 = segmenter.process(img0);
Thanks in advance
I again analyzed my code and solved the problem. Convert the image to ilpImage and then change it to a 8 bit and 3 channel image using code
WatershedSegmenter segmenter;
segmenter.setMarkers(markers);
markers=cvCreateImage(cvGetSize(my_iplImage), IPL_DEPTH_8U, 3);
cv::Mat result1 = segmenter.process(markers);
This reminds me on one example from book "Opencv 2 computer vision application programming cookbook". All you should do was to do this:
// Get the binary map
cv::Mat binary;
//binary = cv::imread("binary.bmp", 0); // prevent loading of pre-converted image
cvtColor(image, binary, CV_BGR2GRAY); // instead convert original
binary = binary < 65; // apply threshold
Whole code (excluding water segmentation header) would be something like this:
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "watershedSegmentation.h"
int main()
{
// Read input image
cv::Mat image = cv::imread("group.jpg");
if (!image.data)
return 0;
// Display the image
cv::namedWindow("Original Image");
cv::imshow("Original Image", image);
// // Get the binary map
cv::Mat binary;
//binary = cv::imread("binary.bmp", 0); // prevent loading of pre-converted image
cvtColor(image, binary, CV_BGR2GRAY); // instead convert original
binary = binary < 60; // apply threshold
// Display the binary image
cv::namedWindow("Binary Image");
cv::imshow("Binary Image", binary);
// Eliminate noise and smaller objects
cv::Mat fg;
cv::erode(binary, fg, cv::Mat(), cv::Point(-1, -1), 6);
// Display the foreground image
cv::namedWindow("Foreground Image");
cv::imshow("Foreground Image", fg);
// Identify image pixels without objects
cv::Mat bg;
cv::dilate(binary, bg, cv::Mat(), cv::Point(-1, -1), 6);
cv::threshold(bg, bg, 1, 128, cv::THRESH_BINARY_INV);
// Display the background image
cv::namedWindow("Background Image");
cv::imshow("Background Image", bg);
// Show markers image
cv::Mat markers(binary.size(), CV_8U, cv::Scalar(0));
markers = fg + bg;
cv::namedWindow("Markers");
cv::imshow("Markers", markers);
// Create watershed segmentation object
WatershedSegmenter segmenter;
// Set markers and process
segmenter.setMarkers(markers);
segmenter.process(image);
// Display segmentation result
cv::namedWindow("Segmentation");
cv::imshow("Segmentation", segmenter.getSegmentation());
// Display watersheds
cv::namedWindow("Watersheds");
cv::imshow("Watersheds", segmenter.getWatersheds());
cv::waitKey();
return 0;
}

SiftFeatureDetector .detect function broken?

Ive been trying out SIFT/SURF from online resources and wanted to test it out myself.
I first tried without the non-free libraries using this code:
int _tmain(int argc, _TCHAR* argv[])
{
Mat img = imread("c:\\car.jpg", 0);
Ptr<FeatureDetector> feature_detector = FeatureDetector::create("SIFT");
vector<KeyPoint> keypoints;
feature_detector->detect(img, keypoints);
Mat output;
drawKeypoints(img, keypoints, output, Scalar(255, 0, 0));
namedWindow("meh", CV_WINDOW_AUTOSIZE);
imshow("meh", output);
waitKey(0);
return 0;
}
Here if I do a step by step debugging it breaks at feature_detector->detect(img, keypoints);
Then I tried using the non-free library and tried this code:
int main(int argc, char** argv)
{
const Mat input = cv::imread("/tmp/image.jpg", 0); //Load as grayscale
SiftFeatureDetector detector;
vector<KeyPoint> keypoints;
detector.detect(input, keypoints);
// Add results to image and save.
Mat output;
drawKeypoints(input, keypoints, output);
imwrite("/tmp/SIFT_RESULT.jpg", output);
return 0;
}
This again compiles without errors but when ran, breaks at this step: detector.detect(input, keypoints);
I cannot find the reason why. Can some one please help me out here.
Thank you
edit: This is the error I get when it breaks:
Unhandled exception at 0x007f0900 in SIFT.exe: 0xC0000005: Access violation reading location 0x00000000.
.
My setup: Microsoft Visual C++ 2010, OpenCV 2.4.2, Windows XP. All
libraries added and linked
Use color image not grayscale, it works for me that way.
You could try skipping "const" too, if the color image would not work either.
const Mat input = cv::imread("/tmp/image.jpg");

OpenCV Image To Matrix In txt files

I am doing this to get each pixel value of Image and printing it on Console
include "stdafx.h"
include "opencv2/imgproc/imgproc.hpp"
include "opencv2/highgui/highgui.hpp"
include <stdlib.h>
include <stdio.h>
using namespace cv;
int main( int argc, char** argv )
{
IplImage *img = cvLoadImage("lena.jpg");
CvMat *mat = cvCreateMat(img->height,img->width,CV_32FC3 );
cvConvert( img, mat );
for(int i=0;i<10;i++)
{
for(int j=0;j<10;j++)
{
CvScalar scal = cvGet2D( mat,j,i);
printf( "(%.f,%.f,%.f) ",scal.val[0], scal.val[1], scal.val[2] );
}
printf("\n");
}
waitKey(1);
return 0;
}
Is there better way to get each pixel value along with there header and channel values in txt.files
I would recommend against using the C API if you can help it. The C++ API is much easier to use. As for storage of a matrix in a file, have a look at the FileStorage class available in OpenCV.
It's as easy as:
Write
FileStorage fs("test.yml", FileStorage::WRITE);
Mat cameraMatrix = (Mat_<double>(3,3) << 1000, 0, 320, 0, 1000, 240, 0, 0, 1);
fs << "cameraMatrix" << cameraMatrix;
...
fs.release();
Read
FileStorage fs("test.yml", FileStorage::READ);
Mat cameraMatrixFromFile;
fs["cameraMatrix"] >> cameraMatrixFromFile;
...
fs.release();
Hope that helps!

Extracting DCT coefficients from encoded images and video

Is there a way to easily extract the DCT coefficients (and quantization parameters) from encoded images and video? Any decoder software must be using them to decode block-DCT encoded images and video. So I'm pretty sure the decoder knows what they are. Is there a way to expose them to whomever is using the decoder?
I'm implementing some video quality assessment algorithms that work directly in the DCT domain. Currently, the majority of my code uses OpenCV, so it would be great if anyone knows of a solution using that framework. I don't mind using other libraries (perhaps libjpeg, but that seems to be for still images only), but my primary concern is to do as little format-specific work as possible (I don't want to reinvent the wheel and write my own decoders). I want to be able to open any video/image (H.264, MPEG, JPEG, etc) that OpenCV can open, and if it's block DCT-encoded, to get the DCT coefficients.
In the worst case, I know that I can write up my own block DCT code, run the decompressed frames/images through it and then I'd be back in the DCT domain. That's hardly an elegant solution, and I hope I can do better.
Presently, I use the fairly common OpenCV boilerplate to open images:
IplImage *image = cvLoadImage(filename);
// Run quality assessment metric
The code I'm using for video is equally trivial:
CvCapture *capture = cvCaptureFromAVI(filename);
while (cvGrabFrame(capture))
{
IplImage *frame = cvRetrieveFrame(capture);
// Run quality assessment metric on frame
}
cvReleaseCapture(&capture);
In both cases, I get a 3-channel IplImage in BGR format. Is there any way I can get the DCT coefficients as well?
Well, I did a bit of reading and my original question seems to be an instance of wishful thinking.
Basically, it's not possible to get the DCT coefficients from H.264 video frames for the simple reason that H.264 doesn't use DCT. It uses a different transform (integer transform). Next, the coefficients for that transform don't necessarily change on a frame-by-frame basis -- H.264 is smarter cause it splits up frames into slices. It should be possible to get those coefficients through a special decoder, but I doubt OpenCV exposes it for the user.
For JPEG, things are a bit more positive. As I suspected, libjpeg exposes the DCT coefficients for you. I wrote a small app to show that it works (source at the end). It makes a new image using the DC term from each block. Because the DC term is equal to the block average (after proper scaling), the DC images are downsampled versions of the input JPEG image.
EDIT: fixed scaling in source
Original image (512 x 512):
DC images (64x64): luma Cr Cb RGB
Source (C++):
#include <stdio.h>
#include <assert.h>
#include <cv.h>
#include <highgui.h>
extern "C"
{
#include "jpeglib.h"
#include <setjmp.h>
}
#define DEBUG 0
#define OUTPUT_IMAGES 1
/*
* Extract the DC terms from the specified component.
*/
IplImage *
extract_dc(j_decompress_ptr cinfo, jvirt_barray_ptr *coeffs, int ci)
{
jpeg_component_info *ci_ptr = &cinfo->comp_info[ci];
CvSize size = cvSize(ci_ptr->width_in_blocks, ci_ptr->height_in_blocks);
IplImage *dc = cvCreateImage(size, IPL_DEPTH_8U, 1);
assert(dc != NULL);
JQUANT_TBL *tbl = ci_ptr->quant_table;
UINT16 dc_quant = tbl->quantval[0];
#if DEBUG
printf("DCT method: %x\n", cinfo->dct_method);
printf
(
"component: %d (%d x %d blocks) sampling: (%d x %d)\n",
ci,
ci_ptr->width_in_blocks,
ci_ptr->height_in_blocks,
ci_ptr->h_samp_factor,
ci_ptr->v_samp_factor
);
printf("quantization table: %d\n", ci);
for (int i = 0; i < DCTSIZE2; ++i)
{
printf("% 4d ", (int)(tbl->quantval[i]));
if ((i + 1) % 8 == 0)
printf("\n");
}
printf("raw DC coefficients:\n");
#endif
JBLOCKARRAY buf =
(cinfo->mem->access_virt_barray)
(
(j_common_ptr)cinfo,
coeffs[ci],
0,
ci_ptr->v_samp_factor,
FALSE
);
for (int sf = 0; (JDIMENSION)sf < ci_ptr->height_in_blocks; ++sf)
{
for (JDIMENSION b = 0; b < ci_ptr->width_in_blocks; ++b)
{
int intensity = 0;
intensity = buf[sf][b][0]*dc_quant/DCTSIZE + 128;
intensity = MAX(0, intensity);
intensity = MIN(255, intensity);
cvSet2D(dc, sf, (int)b, cvScalar(intensity));
#if DEBUG
printf("% 2d ", buf[sf][b][0]);
#endif
}
#if DEBUG
printf("\n");
#endif
}
return dc;
}
IplImage *upscale_chroma(IplImage *quarter, CvSize full_size)
{
IplImage *full = cvCreateImage(full_size, IPL_DEPTH_8U, 1);
cvResize(quarter, full, CV_INTER_NN);
return full;
}
GLOBAL(int)
read_JPEG_file (char * filename, IplImage **dc)
{
/* This struct contains the JPEG decompression parameters and pointers to
* working space (which is allocated as needed by the JPEG library).
*/
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
/* More stuff */
FILE * infile; /* source file */
/* In this example we want to open the input file before doing anything else,
* so that the setjmp() error recovery below can assume the file is open.
* VERY IMPORTANT: use "b" option to fopen() if you are on a machine that
* requires it in order to read binary files.
*/
if ((infile = fopen(filename, "rb")) == NULL) {
fprintf(stderr, "can't open %s\n", filename);
return 0;
}
/* Step 1: allocate and initialize JPEG decompression object */
cinfo.err = jpeg_std_error(&jerr);
/* Now we can initialize the JPEG decompression object. */
jpeg_create_decompress(&cinfo);
/* Step 2: specify data source (eg, a file) */
jpeg_stdio_src(&cinfo, infile);
/* Step 3: read file parameters with jpeg_read_header() */
(void) jpeg_read_header(&cinfo, TRUE);
/* We can ignore the return value from jpeg_read_header since
* (a) suspension is not possible with the stdio data source, and
* (b) we passed TRUE to reject a tables-only JPEG file as an error.
* See libjpeg.txt for more info.
*/
/* Step 4: set parameters for decompression */
/* In this example, we don't need to change any of the defaults set by
* jpeg_read_header(), so we do nothing here.
*/
jvirt_barray_ptr *coeffs = jpeg_read_coefficients(&cinfo);
IplImage *y = extract_dc(&cinfo, coeffs, 0);
IplImage *cb_q = extract_dc(&cinfo, coeffs, 1);
IplImage *cr_q = extract_dc(&cinfo, coeffs, 2);
IplImage *cb = upscale_chroma(cb_q, cvGetSize(y));
IplImage *cr = upscale_chroma(cr_q, cvGetSize(y));
cvReleaseImage(&cb_q);
cvReleaseImage(&cr_q);
#if OUTPUT_IMAGES
cvSaveImage("y.png", y);
cvSaveImage("cb.png", cb);
cvSaveImage("cr.png", cr);
#endif
*dc = cvCreateImage(cvGetSize(y), IPL_DEPTH_8U, 3);
assert(dc != NULL);
cvMerge(y, cr, cb, NULL, *dc);
cvReleaseImage(&y);
cvReleaseImage(&cb);
cvReleaseImage(&cr);
/* Step 7: Finish decompression */
(void) jpeg_finish_decompress(&cinfo);
/* We can ignore the return value since suspension is not possible
* with the stdio data source.
*/
/* Step 8: Release JPEG decompression object */
/* This is an important step since it will release a good deal of memory. */
jpeg_destroy_decompress(&cinfo);
fclose(infile);
return 1;
}
int
main(int argc, char **argv)
{
int ret = 0;
if (argc != 2)
{
fprintf(stderr, "usage: %s filename.jpg\n", argv[0]);
return 1;
}
IplImage *dc = NULL;
ret = read_JPEG_file(argv[1], &dc);
assert(dc != NULL);
IplImage *rgb = cvCreateImage(cvGetSize(dc), IPL_DEPTH_8U, 3);
cvCvtColor(dc, rgb, CV_YCrCb2RGB);
#if OUTPUT_IMAGES
cvSaveImage("rgb.png", rgb);
#else
cvNamedWindow("DC", CV_WINDOW_AUTOSIZE);
cvShowImage("DC", rgb);
cvWaitKey(0);
#endif
cvReleaseImage(&dc);
cvReleaseImage(&rgb);
return 0;
}
You can use, libjpeg to extract dct data of your jpeg file, but for h.264 video file, I can't find any open source code that give you dct data (actully Integer dct data). But you can use h.264 open source software like JM, JSVM or x264. In these two source file, you have to find their specific function that make use of dct function, and change it to your desire form, to get your output dct data.
For Image:
use the following code, and after read_jpeg_file( infilename, v, quant_tbl ), v and quant_tbl will have dct data and quantization table of your jpeg image respectively.
I used Qvector to store my output data, change it to your preferred c++ array list.
#include <iostream>
#include <stdio.h>
#include <jpeglib.h>
#include <stdlib.h>
#include <setjmp.h>
#include <fstream>
#include <QVector>
int read_jpeg_file( char *filename, QVector<QVector<int> > &dct_coeff, QVector<unsigned short> &quant_tbl)
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
FILE * infile;
if ((infile = fopen(filename, "rb")) == NULL) {
fprintf(stderr, "can't open %s\n", filename);
return 0;
}
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
(void) jpeg_read_header(&cinfo, TRUE);
jvirt_barray_ptr *coeffs_array = jpeg_read_coefficients(&cinfo);
for (int ci = 0; ci < 1; ci++)
{
JBLOCKARRAY buffer_one;
JCOEFPTR blockptr_one;
jpeg_component_info* compptr_one;
compptr_one = cinfo.comp_info + ci;
for (int by = 0; by < compptr_one->height_in_blocks; by++)
{
buffer_one = (cinfo.mem->access_virt_barray)((j_common_ptr)&cinfo, coeffs_array[ci], by, (JDIMENSION)1, FALSE);
for (int bx = 0; bx < compptr_one->width_in_blocks; bx++)
{
blockptr_one = buffer_one[0][bx];
QVector<int> tmp;
for (int bi = 0; bi < 64; bi++)
{
tmp.append(blockptr_one[bi]);
}
dct_coeff.push_back(tmp);
}
}
}
// coantization table
j_decompress_ptr dec_cinfo = (j_decompress_ptr) &cinfo;
jpeg_component_info *ci_ptr = &dec_cinfo->comp_info[0];
JQUANT_TBL *tbl = ci_ptr->quant_table;
for(int ci =0 ; ci < 64; ci++){
quant_tbl.append(tbl->quantval[ci]);
}
return 1;
}
int main()
{
QVector<QVector<int> > v;
QVector<unsigned short> quant_tbl;
char *infilename = "your_image.jpg";
std::ofstream out;
out.open("out_dct.txt");
if( read_jpeg_file( infilename, v, quant_tbl ) > 0 ){
for(int j = 0; j < v.size(); j++ ){
for (int i = 0; i < v[0].size(); ++i){
out << v[j][i] << "\t";
}
out << "---------------" << std::endl;
}
out << "\n\n\n" << std::string(10,'-') << std::endl;
out << "\nQauntization Table:" << std::endl;
for(int i = 0; i < quant_tbl.size(); i++ ){
out << quant_tbl[i] << "\t";
}
}
else{
std::cout << "Can not read, Returned With Error";
return -1;
}
out.close();
return 0;
}

weird binary image values generated by cvThreshold

Hey, guys, i am using opencv to do some vehicle recognition work, and when i use cvThershold to convert the gray image to binary image, the return image is really strange, the binary image supposes to have only two values,0 and 255, however, it contains other values like 2,3,254,253, anyone knows how this happens, and also cvCmps also has this problem.
cvThreshold has a variety of behaviours beyond normal binary thresholding. They are described in the OpenCV API reference.
For example, if you call it with the flag threshold_type set CV_THRESH_TRUNC, it will truncate all intensities above the specified threshold only. The intensities below the threshold will remain untouched. Perhaps this accounts for your strange result?
If you post the image and your code (the bit that calls cvThreshold is enough) I could probably be of more help.
Try this:
/*
* compile with:
*
* g++ -Wall -ggdb -I. -I/usr/include/opencv -L /usr/lib -lm -lcv -lhighgui -lcvaux threshold.cpp -o threshold.out
*/
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <assert.h>
IplImage *
threshold(IplImage const *in, int threshold)
{
assert(in->nChannels == 1);
CvSize size = cvSize(in->width, in->height);
IplImage *out = cvCreateImage(size, IPL_DEPTH_8U, 1);
cvThreshold(in, out, threshold, 255, CV_THRESH_BINARY);
return out;
}
void
show_image(char const *title, IplImage const *image)
{
cvNamedWindow(title, CV_WINDOW_AUTOSIZE);
cvShowImage(title, image);
cvWaitKey(0);
cvDestroyWindow(title);
}
int
main(int argc, char **argv)
{
if (argc < 2)
{
fprintf(stderr, "usage: %s in.png\n", argv[0]);
return 1;
}
IplImage *in = cvLoadImage(argv[1]);
IplImage *grey = in;
if (in->nChannels != 1)
{
/*
* For some reason, cvCreateImage returns an image with 3 channels even
* when a greyscale image is specified (e.g. PGM). Hack around this by
* just taking the first channel of the image. OpenCV uses BGR order,
* so it will be the B channel.
*/
CvSize size = cvSize(in->width, in->height);
grey = cvCreateImage(size, IPL_DEPTH_8U, 1);
cvSplit(in, grey, NULL, NULL, NULL);
cvReleaseImage(&in);
}
IplImage *thres = threshold(grey, 127);
show_image("thresholded", thres);
cvReleaseImage(&thres);
cvReleaseImage(&grey);
return 0;
}
Give it any image (even a colour one, see comment for clarification), e.g. [this one][1]. Do you get the expected result?
[1]: http://r0k.us/graphics/kodak/kodak/kodim20.png SixShooter

Resources