Undefined symbols for architecture x86_64: "_cvHaarDetectObjects" Eclipse - opencv

I'm trying to build the following sample code:
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#include <time.h>
#include <ctype.h>
// Create a string that contains the exact cascade name
const char* cascade_name = "/usr/share/src/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt.xml";
// "C:/Program Files/OpenCV/data/haarcascades/haarcascade_frontalface_alt.xml";
/* "haarcascade_profileface.xml";*/
// Function prototype for detecting and drawing an object from an image
void detect_and_draw( IplImage* image );
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{
// Create a sample image
IplImage *img = cvLoadImage("1.pgm");
// Call the function to detect and draw the face positions
detect_and_draw(img);
// Wait for user input before quitting the program
cvWaitKey();
// Release the image
cvReleaseImage(&img);
// Destroy the window previously created with filename: "result"
cvDestroyWindow("result");
// return 0 to indicate successfull execution of the program
return 0;
}
// Function to detect and draw any faces that is present in an image
void detect_and_draw( IplImage* img )
{
// Create memory for calculations
static CvMemStorage* storage = 0;
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
int scale = 1;
// Create a new image based on the input image
IplImage* temp = cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );
// Create two points to represent the face locations
CvPoint pt1, pt2;
int i;
// Load the HaarClassifierCascade
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// Check whether the cascade has loaded successfully. Else report and error and quit
if( !cascade )
{
fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
return;
}
// Allocate the memory storage
storage = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow( "result", 1 );
// Clear the memory storage which was used before
cvClearMemStorage( storage );
// Find whether the cascade is loaded, to find the faces. If yes, then:
if( cascade )
{
// There can be more than one face in an image. So create a growable sequence of faces.
// Detect the objects and store them in the sequence
CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
cvSize(40, 40) );
// Loop the number of faces found.
for( i = 0; i < (faces ? faces->total : 0); i++ )
{
// Create a new rectangle for drawing the face
CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
// Find the dimensions of the face,and scale it if necessary
pt1.x = r->x*scale;
pt2.x = (r->x+r->width)*scale;
pt1.y = r->y*scale;
pt2.y = (r->y+r->height)*scale;
// Draw the rectangle in the input image
cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
}
}
// Show the image in the window named "result"
cvShowImage( "result", img );
// Release the temp image created.
cvReleaseImage( &temp );
}
And the path /usr/share/src....is the right path where the xml file is. I've linked the opencv libraries: opencv_core, opencv_imgproc, opencv_highgui and opencv_video to eclipse (and I think they are correctly linked since I build other opencv projects in this way). But Eclipse keeps throwing the following errors:
Invoking: MacOS X C++ Linker
g++ -L/usr/local/include/opencv -L/usr/local/include/opencv2 -L/usr/local/lib -o "OpenCVFace" ./main.o -lopencv_core -lopencv_highgui -lopencv_video -lopencv_imgproc
Undefined symbols for architecture x86_64:
"_cvHaarDetectObjects", referenced from:
detect_and_draw(_IplImage*) in main.o
ld: symbol(s) not found for architecture x86_64
collect2: ld returned 1 exit status
make: *** [OpenCVFace] Error 1
**** Build Finished ****
I can see the cvHaarDetectObjects function is highlighted in Eclipse (which turned to purple color). Any ideas of how to solve the problem? Thanks!

You need to link to opencv_objdetect; include -lopencv_objdetect in the link flags.

Related

Error while compiling gaussian filter code using OpenCV and CUDA

when I compile this code using Cmake.. this error has shown to me....
I tried to remove CUDA and reinstall it, but it's the same
#include "iostream"
#include "stdio.h"
#include "stdlib.h"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudaimgproc.hpp"
#include "opencv2/cudafilters.hpp" // cv::cuda::Filter
#include "opencv2/cudaarithm.hpp" // cv::cuda::abs orcv::cuda::addWeighted
#include "timer.h"
using namespace std;
using namespace cv;
void processUsingOpenCvCpu(std::string nput_file, std::string output_file);
void processUsingOpenCvGpu(std::string input_file, std::string output_file);
void processUsingCuda(std::string input_file, std::string output_file);
int main(int argc, char **argv) {
const string input_file = argc >= 2 ? argv[1] : "evarest.jpg";
const string output_file_OpenCvCpu = argc >= 3 ? argv[2] : "output_OpenCvCpu.jpg";
const string output_file_OpenCvGpu = argc >= 4 ? argv[3] : "output_OpenCvGpu.jpg";
const string output_file_Cuda = argc >= 5 ? argv[2] : "output_Cuda.jpg";
for (int i=0; i<5; ++i) {
processUsingOpenCvCpu(input_file, output_file_OpenCvCpu);
processUsingOpenCvGpu(input_file, output_file_OpenCvGpu);
//processUsingCuda(input_file, output_file_Cuda);
}
return 0;
}
void processUsingOpenCvGpu(std::string input_file, std::string output_file) {
//Read input image from the disk
Mat input = imread(input_file, CV_LOAD_IMAGE_COLOR);
Mat output;
if(input.empty())
{
std::cout<<"Image Not Found: "<< input_file << std::endl;
return;
}
GpuTimer timer;
timer.Start();
// copy the input image from CPU to GPU memory
cuda::GpuMat gpuInput = cuda::GpuMat(input);
// blur the input image to remove the noise
Ptr<cv::cuda::Filter> filter = cv::cuda::createGaussianFilter(gpuInput.type(), gpuInput.type(), Size(3,3), 0);
filter->apply(gpuInput, gpuInput);
// convert it to grayscale (CV_8UC3 -> CV_8UC1)
cv::cuda::GpuMat gpuInput_gray;
cv::cuda::cvtColor( gpuInput, gpuInput_gray, COLOR_RGB2GRAY );
// compute the gradients on both directions x and y
cv::cuda::GpuMat gpuGrad_x, gpuGrad_y;
cv::cuda::GpuMat abs_gpuGrad_x, abs_gpuGrad_y;
int scale = 1;
int ddepth = CV_16S; // use 16 bits unsigned to avoid overflow
// gradient x direction
filter = cv::cuda::createSobelFilter(gpuInput_gray.type(), ddepth, 1, 0, 3, scale, BORDER_DEFAULT);
filter->apply(gpuInput_gray, gpuGrad_x);
cv::cuda::abs(gpuGrad_x, gpuGrad_x);
gpuGrad_x.convertTo(abs_gpuGrad_x, CV_8UC1); // CV_16S -> CV_8U
// gradient y direction
filter = cv::cuda::createSobelFilter(gpuInput_gray.type(), ddepth, 0, 1, 3, scale, BORDER_DEFAULT);
filter->apply(gpuInput_gray, gpuGrad_y);
cv::cuda::abs(gpuGrad_y, gpuGrad_y);
gpuGrad_y.convertTo(abs_gpuGrad_y, CV_8UC1); // CV_16S -> CV_8U
// create the output by adding the absolute gradient images of each x and y direction
cv::cuda::GpuMat gpuOutput;
cv::cuda::addWeighted( abs_gpuGrad_x, 0.5, abs_gpuGrad_y, 0.5, 0, gpuOutput );
// copy the result gradient from GPU to CPU and release GPU memory
gpuOutput.download(output);
gpuOutput.release();
gpuInput.release();
gpuInput_gray.release();
gpuGrad_x.release();
gpuGrad_y.release();
abs_gpuGrad_x.release();
abs_gpuGrad_y.release();
timer.Stop();
printf("OpenCV GPU code ran in: %f msecs.\n", timer.Elapsed());
//show image
imshow("Image", output);
// wait until user press a key
waitKey(0);
//imwrite(output_file, output);
}
and this is the error
[ 50%] Linking CXX executable tut1
/usr/bin/ld: CMakeFiles/tut1.dir/1.cpp.o: undefined reference to symbol 'cudaEventSynchronize'
//usr/lib/x86_64-linux-gnu/libcudart.so.7.5: error adding symbols: DSO missing from command line
collect2: error: ld returned 1 exit status
CMakeFiles/tut1.dir/build.make:122: recipe for target 'tut1' failed
make[2]: *** [tut1] Error 1
CMakeFiles/Makefile2:67: recipe for target 'CMakeFiles/tut1.dir/all' failed
make[1]: *** [CMakeFiles/tut1.dir/all] Error 2
Makefile:83: recipe for target 'all' failed
make: *** [all] Error 2
when I removed CUDA... he didn't recognize all of CUDA's functions.... but now the error is only on cudaEventSynchronize..
Recheck in your code:
cv::cuda::addWeighted(gpugrad_x, 0.5, gpugrad_y, 0.5, 0, gpuoutput);
FIX:
cv::cuda::addWeighted(gpugrad_x, 0.5, gpugrad_y, 0.5, 0, gpuoutput);
you forget to put double gamma in the addWeighted() function;
see: https://docs.opencv.org/3.4/d8/d34/group__cudaarithm__elem.html#ga2cd14a684ea70c6ab2a63ee90ffe6201
I tried some part of your code and it works.

Unable to access Kinect Sensor cam Using Opencv and libfreenect driver On Ubuntu platform

I am using OpenCv and OpenKinect on Ubuntu platform to access Kinect sensor. The major error is: undefined reference to `freenect_sync_get_rgb_cv' Help me to debug this error. The Source code is as given below:
#include <iostream>
#include <cv.h>
#include <opencv/highgui.h>
// OpenKinect Header files
#include <libfreenect.h>
#include <libfreenect_sync.h>
#include <libfreenect/libfreenect_cv.h>
// --- C++ ---
#include <stdio.h>
#include <fstream>
#include <vector>
#include <math.h>
#include <iostream>
#include <vector>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
char key;
// IplImage *freenect_sync_get_depth_cv(int index);
// IplImage *freenect_sync_get_rgb_cv(int index);
int main(int argc, char** argv)
{
IplImage* image = NULL;
/* create a window */
cvNamedWindow("Camera_Output", 1);
while(1) {
image = freenect_sync_get_rgb_cv(0);
// Mat image(freenect_sync_get_rgb_cv(0));
//CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);
// cvCvtColor(image, image, CV_RGB2BGR); // cvLoadImage( freenect_sync_get_rgb_cv(0) )
// VideoCapture::grab
//cvCreateImage(cvSize(640, 480), 8, 4);
cvShowImage("Camera_Output", image);
if (!image) {
printf("Error: Kinect not connected?\n");
return -1;
}
key = cvWaitKey(100); //Capture Keyboard stroke
if (char(key) == 27){
break;
}
}
/* free memory */
cvDestroyWindow( "video" );
return 0;
}
The error looks like:
[100%] Building CXX object CMakeFiles/KinectRGB.dir/KinectRGB.cpp.o
Linking CXX executable KinectRGB
CMakeFiles/KinectRGB.dir/KinectRGB.cpp.o: In function `main':
KinectRGB.cpp:(.text+0x2c): undefined reference to `freenect_sync_get_rgb_cv'
collect2: error: ld returned 1 exit status
make[2]: *** [KinectRGB] Error 1
make[1]: *** [CMakeFiles/KinectRGB.dir/all] Error 2
make: *** [all] Error 2
sincos#sincos-300E4C-300E5C-300E7C:~/Desktop/OpenCV_test/KinectRead/build$
CMakeLists.txt file to Build the code is as given below:
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
project(KinectRGB)
set(DEPENDENCIES OpenCV GLUT OpenGL)
message("\n")
foreach( DEP ${DEPENDENCIES} )
find_package( ${DEP} )
string( TOUPPER ${DEP} UDEP ) # Capitalize
if( ${DEP}_FOUND OR ${UDEP}_FOUND )
message("\n${DEP}_Found = TRUE\n")
endif()
find_package(Threads REQUIRED)
find_package(libfreenect REQUIRED)
include_directories("/usr/include/libusb-1.0/")
endforeach()
include_directories(
${FREENECT_INCLUDE_DIR}
${GLUT_INCLUDE_DIR}
${OPENGL_INCLUDE_DIR}
${OpenCV_INCLUDE_DIRS}
)
add_executable(KinectRGB KinectRGB.cpp)
target_link_libraries(KinectRGB
${FREENECT_LIBRARIES}
${GLUT_LIBRARY}
${OPENGL_LIBRARIES}
${OpenCV_LIBS}
${CMAKE_THREAD_LIBS_INIT}
)
I would not recommend using IplImage. It is a primitive class type. To test whether your Kinect is working properly, run the following python script:
import freenect
import cv2
import numpy as np
from functions import *
def nothing(x):
pass
kernel = np.ones((5, 5), np.uint8)
def pretty_depth(depth):
np.clip(depth, 0, 2**10 - 1, depth)
depth >>= 2
depth = depth.astype(np.uint8)
return depth
while 1:
dst = pretty_depth(freenect.sync_get_depth()[0])#input from kinect
cv2.imshow('Video', dst)
if cv2.waitKey(1) & 0xFF == ord('b'):
break
You should see the kinect's disparity map

cvSobel problems - opencv

i've got the code below:
// Image Processing.cpp : Defines the entry point for the console application.
//
//Save an available image.
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include "cxcore.h"
/*
The purpose of this program is to show an example of THRESHOLDING.
*/
int _tmain(int argc, _TCHAR* argv[])
{
IplImage* src = cvLoadImage("D:\\document\\Study\\university of technology\\semester_8\\Computer Vision\\Pics for test\\black-white 4.jpg");
IplImage* dst = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,3);
IplImage* temp1 = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* temp2 = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
cvCvtColor(src,temp1,CV_RGB2GRAY);
cvSobel(temp1,temp2,0,1,3);
cvMerge(temp2,temp2,temp2,NULL,dst);
cvNamedWindow("src",1);
cvNamedWindow("dst",1);
cvShowImage("src",src);
cvShowImage("dst",temp2);
cvWaitKey(0);
cvReleaseImage(&src);
//cvReleaseImage(&dst);
cvDestroyAllWindows();
return 0;
}
when i run it, there's an warning as the picture below:
but if i still click on "countinue" button, the result is displayed!
hope someone can give me an explaination !
The result is correct. The description of the program is not. Your xorder=0 and yorder=1 which means that you are detecting the first derivative in the y-direction. The white pixels in the image correspond to boundaries that can be detected by a vertical derivative, namely as close to horizontal boundaries as possible. This is why the vertical lines are barely ever detected.
CvSobel by itself has NOTHING to do with thresholding. CvSobel is a function used for finding boundaries and contours. Thresholding is most commonly an operation that creates a black-and-white image from a greyscale image. It is also called image binarization.
If you want to threshold an image, start with cvThreshold and cvAdaptiveThreshold.
i've fixed it, here is my code:
// Image Processing.cpp : Defines the entry point for the console application.
//
//Save an available image.
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include "cxcore.h"
/*
The purpose of this program is to show an example of Sobel method.
*/
int _tmain(int argc, _TCHAR* argv[])
{
IplImage* src = cvLoadImage("D:\\document\\Study\\university of technology\\semester_8\\Computer Vision\\Pics for test\\black-white 4.jpg");
IplImage* dst = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* dst_x = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* dst_y = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* temp1 = cvCreateImage(cvGetSize(src),IPL_DEPTH_8U,1);
IplImage* temp2 = cvCreateImage(cvGetSize(src),IPL_DEPTH_16S,1);
cvCvtColor(src,temp1,CV_RGB2GRAY);
cvSobel(temp1,temp2,0,1,3);
cvConvertScale(temp2,dst_y,1.0,0);
cvSobel(temp1,temp2,1,0,3);
cvConvertScale(temp2,dst_x,1.0,0);
//k nen dao ham cung luc theo x va y ma nen dao ham rieng roi dung ham cvAdd.
//cvSobel(temp1,temp2,1,1,3);
//cvConvertScale(temp2,dst,1.0,0);
cvAdd(dst_x,dst_y,dst,NULL);
cvNamedWindow("src",1);
cvNamedWindow("dst",1);
cvNamedWindow("dst_x",1);
cvNamedWindow("dst_y",1);
cvShowImage("src",src);
cvShowImage("dst",dst);
cvShowImage("dst_x",dst_x);
cvShowImage("dst_y",dst_y);
cvWaitKey(0);
cvReleaseImage(&src);
cvReleaseImage(&dst);
cvReleaseImage(&temp1);
cvReleaseImage(&temp2);
cvDestroyAllWindows();
return 0;
}

Unhandled exception at 0x52f9e470 in project1.exe : 0xC000001D : Illegal instruction

i am trying to detect an object using opencv in c++ but i am getting an error :
Unhandled exception at 0x52f9e470 in project1.exe : 0xC000001D : Illegal instruction.
using windows 7 32 bit,opencv 2.4.3,visual studio (c++) 2010 and my code is :
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#include <time.h>
#include <ctype.h>
// Create a string that contains the exact cascade name
// Contains the trained classifer for detecting hand
const char *cascade_name="D:/31dec12/hand.xml";
//The function detects the hand from input frame and draws a rectangle around the detected portion of the frame
void detect_and_draw( IplImage* img )
{
// Create memory for calculations
static CvMemStorage* storage = 0;
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
// Sets the scale with which the rectangle is drawn with
int scale = 1;
// Create two points to represent the hand locations
CvPoint pt1, pt2;
// Looping variable
int i;
// Load the HaarClassifierCascade
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// Check whether the cascade has loaded successfully. Else report and error and quit
if( !cascade )
{
fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
return;
}
// Allocate the memory storage
storage = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow( "result", 1 );
// Clear the memory storage which was used before
cvClearMemStorage( storage );
// Find whether the cascade is loaded, to find the hands. If yes, then:
if( cascade )
{
// There can be more than one hand in an image. So create a growable sequence of hands.
// Detect the objects and store them in the sequence
CvSeq* hands = cvHaarDetectObjects( img, cascade, storage,
1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
cvSize(40, 40) );
// Loop the number of hands found.
for( i = 0; i < (hands ? hands->total : 0); i++ )
{
// Create a new rectangle for drawing the hand
CvRect* r = (CvRect*)cvGetSeqElem( hands, i );
// Find the dimensions of the hand,and scale it if necessary
pt1.x = r->x*scale;
pt2.x = (r->x+r->width)*scale;
pt1.y = r->y*scale;
pt2.y = (r->y+r->height)*scale;
// Draw the rectangle in the input image
cvRectangle( img, pt1, pt2, CV_RGB(230,20,232), 3, 8, 0 );
}
}
// Show the image in the window named "result"
cvShowImage( "result", img );
}
// A Simple Camera Capture Framework
int main()
{
// Gets the input video stream from camera
CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
// Checks if the input stream is obtained
if( !capture )
{
fprintf( stderr, "ERROR: capture is NULL \n" );
getchar();
return -1;
}
// Show the image captured from the camera in the window and repeat
while( 1 )
{
// Get one frame
IplImage* frame = cvQueryFrame( capture );
// Cecks if a frame is obtained
if( !frame )
{
fprintf( stderr, "ERROR: frame is null...\n" );
getchar();
break;
}
// Flips the frame into mirror image
cvFlip(frame,frame,1);
// Call the function to detect and draw the hand positions
detect_and_draw(frame);
//If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
//remove higher bits using AND operator
if( (cvWaitKey(10) & 255) == 27 )
break;
}
// Release the capture device housekeeping
cvReleaseCapture( &capture );
return 0;
}
What kind of cpu are you using? Last time I had the error: 0xC000001D : Illegal instruction was related to the SSE instruction used in the code. Some new SSE instruction are not implemented at AMD processors e.g. So you can fix this by rebuilding opencv without SSE support.
I also have this problem when using cv::Mat(...)
The same exception throw at
size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type);
Not so sure why but after changing Visual C++ Project floating point model from precise to fast, the problem solve.

weird binary image values generated by cvThreshold

Hey, guys, i am using opencv to do some vehicle recognition work, and when i use cvThershold to convert the gray image to binary image, the return image is really strange, the binary image supposes to have only two values,0 and 255, however, it contains other values like 2,3,254,253, anyone knows how this happens, and also cvCmps also has this problem.
cvThreshold has a variety of behaviours beyond normal binary thresholding. They are described in the OpenCV API reference.
For example, if you call it with the flag threshold_type set CV_THRESH_TRUNC, it will truncate all intensities above the specified threshold only. The intensities below the threshold will remain untouched. Perhaps this accounts for your strange result?
If you post the image and your code (the bit that calls cvThreshold is enough) I could probably be of more help.
Try this:
/*
* compile with:
*
* g++ -Wall -ggdb -I. -I/usr/include/opencv -L /usr/lib -lm -lcv -lhighgui -lcvaux threshold.cpp -o threshold.out
*/
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <assert.h>
IplImage *
threshold(IplImage const *in, int threshold)
{
assert(in->nChannels == 1);
CvSize size = cvSize(in->width, in->height);
IplImage *out = cvCreateImage(size, IPL_DEPTH_8U, 1);
cvThreshold(in, out, threshold, 255, CV_THRESH_BINARY);
return out;
}
void
show_image(char const *title, IplImage const *image)
{
cvNamedWindow(title, CV_WINDOW_AUTOSIZE);
cvShowImage(title, image);
cvWaitKey(0);
cvDestroyWindow(title);
}
int
main(int argc, char **argv)
{
if (argc < 2)
{
fprintf(stderr, "usage: %s in.png\n", argv[0]);
return 1;
}
IplImage *in = cvLoadImage(argv[1]);
IplImage *grey = in;
if (in->nChannels != 1)
{
/*
* For some reason, cvCreateImage returns an image with 3 channels even
* when a greyscale image is specified (e.g. PGM). Hack around this by
* just taking the first channel of the image. OpenCV uses BGR order,
* so it will be the B channel.
*/
CvSize size = cvSize(in->width, in->height);
grey = cvCreateImage(size, IPL_DEPTH_8U, 1);
cvSplit(in, grey, NULL, NULL, NULL);
cvReleaseImage(&in);
}
IplImage *thres = threshold(grey, 127);
show_image("thresholded", thres);
cvReleaseImage(&thres);
cvReleaseImage(&grey);
return 0;
}
Give it any image (even a colour one, see comment for clarification), e.g. [this one][1]. Do you get the expected result?
[1]: http://r0k.us/graphics/kodak/kodak/kodim20.png SixShooter

Resources