Related
I'm implementing OMR system for test papers. But faced with problems when determining filled circles. I've succeeded in getting these grayscale regions of interest .
The problems are:
- Binary thresholding (adaptive and fixed) and counting non zero pixels gives a lot of errors because of letters in a circles and different brightness of photos made by mobile cameras.
- Also tried technique described in this survey that uses average grayscale values of a circle do mark it filled or not, but the brightness of an image is not uniform because of different light sources when people take photos be their cameras and I got a lot of wrong results.
- People also doesn't follow rules such us filling the whole circle, algorithm also need to be robust in such cases.
Sample images
I already have about 10 GBs of samples, so may be machine learning or other statistical methods will be useful.
Does anybody know other methods to classify a circle as filled?
Since it is not a straight forward problem, it needs lot of tweaking to make it robust. But I would like suggest you a good starting point. You can play with it and try to make it work.
import numpy as np
import cv2
image_ori = cv2.imread("circle_opt.png")
lower_bound = np.array([0, 0, 0])
upper_bound = np.array([255, 255, 195])
image = image_ori
mask = cv2.inRange(image_ori, lower_bound, upper_bound)
masked_red = cv2.bitwise_and(image, image, mask=mask)
kernel = np.ones((3,3),np.uint8)
closing = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[0]
contours.sort(key=lambda x:cv2.boundingRect(x)[0])
print len(contours)
for c in contours:
(x,y),r = cv2.minEnclosingCircle(c)
center = (int(x),int(y))
r = int(r)
if 10 <= r <= 15:
cv2.circle(image,center,r,(0,255,0),2)
# cv2.imwrite('omr_processed.png', image_ori)
cv2.imshow("original",image_ori)
cv2.waitKey(0)
The result I got from my code on the image you shared was this
You can apply thresholds to these green circled patches and then count non-zeros to get if the circle is marked or not. You can play with lower and upper_bound to try to make the solution robust.
Hope this helps! Good luck on your problem solving :)
I am trying to make a computer vision program in which it would detect litter and random trash in a noisy background such as the beach (noisy due to sand).
Original Image:
Canny Edge detection without any image processing:
I realize that a certain combination of image processing technique will help me accomplish my goal of ignoring the noisy sandy background and detect all trash and objects on the ground.
I tried to preform median blurring, play around and tune the parameters, and it gave me this:
It preforms well in terms of ignoring the sandy background, but it fails to detect some of the other many objects on the ground, possibly because it is blurred out (not too sure).
Is there any way of improving my algorithm or image processing techniques that will ignore the noisy sandy background while allowing canny edge detection to find all objects and have the program detect and draw contours on all objects.
Code:
from pyimagesearch.transform import four_point_transform
from matplotlib import pyplot as plt
import numpy as np
import cv2
import imutils
im = cv2.imread('images/beach_trash_3.jpg')
#cv2.imshow('Original', im)
# Histogram equalization to improve contrast
###
#im = np.fliplr(im)
im = imutils.resize(im, height = 500)
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
# Contour detection
#ret,thresh = cv2.threshold(imgray,127,255,0)
#imgray = cv2.GaussianBlur(imgray, (5, 5), 200)
imgray = cv2.medianBlur(imgray, 11)
cv2.imshow('Blurred', imgray)
'''
hist,bins = np.histogram(imgray.flatten(),256,[0,256])
plt_one = plt.figure(1)
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
cdf_m = np.ma.masked_equal(cdf,0)
cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_m,0).astype('uint8')
imgray = cdf[imgray]
cv2.imshow('Histogram Normalization', imgray)
'''
'''
imgray = cv2.adaptiveThreshold(imgray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
'''
thresh = imgray
#imgray = cv2.medianBlur(imgray,5)
#imgray = cv2.Canny(imgray,10,500)
thresh = cv2.Canny(imgray,75,200)
#thresh = imgray
cv2.imshow('Canny', thresh)
contours, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:5]
test = im.copy()
cv2.drawContours(test, cnts, -1,(0,255,0),2)
cv2.imshow('All contours', test)
print '---------------------------------------------'
##### Code to show each contour #####
main = np.array([[]])
for c in cnts:
epsilon = 0.02*cv2.arcLength(c,True)
approx = cv2.approxPolyDP(c,epsilon,True)
test = im.copy()
cv2.drawContours(test, [approx], -1,(0,255,0),2)
#print 'Contours: ', contours
if len(approx) == 4:
print 'Found rectangle'
print 'Approx.shape: ', approx.shape
print 'Test.shape: ', test.shape
# frame_f = frame_f[y: y+h, x: x+w]
frame_f = test[approx[0,0,1]:approx[2,0,1], approx[0,0,0]:approx[2,0,0]]
print 'frame_f.shape: ', frame_f.shape
main = np.append(main, approx[None,:][None,:])
print 'main: ', main
# Uncomment in order to show all rectangles in image
#cv2.imshow('Show Ya', test)
#print 'Approx: ', approx.shape
#cv2.imshow('Show Ya', frame_f)
cv2.waitKey()
print '---------------------------------------------'
cv2.drawContours(im, cnts, -1,(0,255,0),2)
print main.shape
print main
cv2.imshow('contour-test', im)
cv2.waitKey()
what i am understanding from your problem is: you want to segment out the foreground objects from a background which is variable in nature(sand gray level is depending on many other conditions).
there are various ways to approach this kind of problem:
Approach 1:
From your image one thing is clear that, background color pixels will always much more in numbers than foreground, simplest method to start initial segmentation is:
Convert the image into gray.
Create its histogram.
Find the peak index of the histogram, i.e. index which have maximum pixels.
above three steps give you an idea of background BUT the game is not ends here, now you can put this index value in the center and take a range of values around it like 25 above and below, for example: if your peak index is 207 (as in your case) choose a range of gray level from 75 to 225 and threshold image, As according to nature of your background above method can be used for foreground object detection, after segmentation you have to perform some post processing steps like morphological analysis to segment out different objects after extraction of objects you can apply some classification stuff for finer level of segmentation to remove false positive.
Approach 2:
Play with some statistics of the image pixels, like make a small data set of gray values and
Label them class 1 and 2, for example 1 for sand and 2 for foreground,
Find out mean and variance(std deviation) of pixels from both the classes, and also calculate probability for both the class ( num_pix_per_class/total_num_pix), now store these stats for later use,
Now come back to image and take every pixel one by one and apply a gaussian pdf: 1/2*pisigma(exp(-(pix - mean)/2*sigma)); at the place of mean put the mean calculated earlier and at the sigma put std deviation calculated earlier.
after applying stage 3 you will get two probability value for each pixel for two classes, just choose the class which have higher probability.
Approach 3:
Approach 3 is more complex than above two: you can use some texture based operation to segment out sand type texture, but for applying texture based method i will recommend supervised classification than unsupervised(like k-means).
Different texture feature which you can use are:
Basic:
Range of gray levels in a defined neighborhood.
local mean and variance or entropy.
Gray Level Co-occurrence Matrices (GLCM).
Advanced:
Local Binary Patterns.
Wavelet Transform.
Gabor Transform. etc.
PS: In my opinion you should give a try to approach 1 and 2. it can solve lot of work. :)
For better results you should apply many algorithms. The OpenCV-tutorials focus always on one feature of OpenCV. The real CV-applications should use as many as possible techniques and algorithms.
I've used to detect biological cells in noisy pictures and I gained very good results applying some contextual information:
Expected size of cells
The fact that all cells have similar size
Expected number of cells
So I changed many parameters and tried to detect what I'm looking for.
If using edge detection, the sand would give rather random shapes. Try to change the canny parameters and detect lines, rects, circles, ets. - any shapes more probable for litter. Remember the positions of detected objects for each parameters-set and at the and give the priority to those positions (areas) where the shapes were detected most times.
Use color-separation. The peaks in color-histogram could be the hints to the litter, as the distribution of sand-colors should be more even.
For some often appearing, small objects like cigarette-stubs you can apply object matching.
P.S:
Cool application! Jus out of curiosity, are yoou going to scan the beach with a quadcopter?
If you want to detect objects on such uniform background, you should start by detecting the main color in the image. Like that you will detect all the sand, and the objects will be in the remaining parts. You can take a look to papers published by Arnaud LeTrotter and Ludovic Llucia who both used this type of "main color detection".
I'm intending to write a program to detect and differentiate certain objects from a nearly solid background. The foreground and the background have a high contrast difference which I would further increase to aid in the object identification process. I'm planning to use Hough transform technique and OpenCV.
Sample image
As seen in the above image, I would want to separately identify the circular objects and the square objects (or any other shape out of a finite set of shapes). Since I'm quite new to image processing I do not have an idea whether such a situation needs a neural network to be implemented and each shape to be learned beforehand. Would a technique such as template matching let me do this without a neural network?
These posts will get you started:
How to detect circles
How to detect squares
How to detect a sheet of paper (advanced square detection)
You will probably have to adjust some parameters in these codes to match your circles/squares, but the core of the technique is shown on these examples.
If you intend to detect shapes other than just circles, (and from the image I assume you do), I would recommend the Chamfer matching for a quick start, especially as you have a good contrast.
The basic premise, explained in simple terms, is following:
You do an edge detection (for example, cvCanny in opencv)
You create a distance image, where the value of each pixel means the distance fom the nearest edge.
You take the shapes you would like to detect, define sample points along the edges of the shape, and try to match these points on the distance image. Basically you just add the values on the distance image which are "under" the coordinates of your sample points, given a specific position of your objects.
Find a good minimization algorithm, the effectiveness of this depends on your application.
This basic approach is a general solution, usually works well, but without further advancements, it is very slow.
Usually it's a good idea to first separate the objects of interest, so you don't have to always do the full search on the whole image. Find a good threshold, so you can separate objects. You still don't know which object it is, but you only have to do the matching itself in close proximity of this object.
Another good idea is, instead of doing the full search on the high resolution image, first do it on a very low resolution. The result will not be very accurate, but you can know the general areas where it's worth to do a search on a higher resolution, so you don't waste your time on areas where there is nothing of interest.
There are a number of more advanced techniques, but it's still worth to take a look at the basic chamfer matching, as it is the base of a large number of techniques.
With the assumption that the objects are simple shapes, here's an approach using thresholding + contour approximation. Contour approximation is based on the assumption that a curve can be approximated by a series of short line segments which can be used to determine the shape of a contour. For instance, a triangle has three vertices, a square/rectangle has four vertices, a pentagon has five vertices, and so on.
Obtain binary image. We load the image, convert to grayscale, Gaussian blur, then adaptive threshold to obtain a binary image.
Detect shapes. Find contours and identify the shape of each contour using contour approximation filtering. This can be done using arcLength to compute the perimeter of the contour and approxPolyDP to obtain the actual contour approximation.
Input image
Detected objects highlighted in green
Labeled contours
Code
import cv2
def detect_shape(c):
# Compute perimeter of contour and perform contour approximation
shape = ""
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
# Triangle
if len(approx) == 3:
shape = "triangle"
# Square or rectangle
elif len(approx) == 4:
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
# A square will have an aspect ratio that is approximately
# equal to one, otherwise, the shape is a rectangle
shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"
# Star
elif len(approx) == 10:
shape = "star"
# Otherwise assume as circle or oval
else:
shape = "circle"
return shape
# Load image, grayscale, Gaussian blur, and adaptive threshold
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,31,3)
# Find contours and detect shape
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
# Identify shape
shape = detect_shape(c)
# Find centroid and label shape name
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.putText(image, shape, (cX - 20, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.waitKey()
What is the best way to detect the corners of an invoice/receipt/sheet-of-paper in a photo? This is to be used for subsequent perspective correction, before OCR.
My current approach has been:
RGB > Gray > Canny Edge Detection with thresholding > Dilate(1) > Remove small objects(6) > clear boarder objects > pick larges blog based on Convex Area. > [corner detection - Not implemented]
I can't help but think there must be a more robust 'intelligent'/statistical approach to handle this type of segmentation. I don't have a lot of training examples, but I could probably get 100 images together.
Broader context:
I'm using matlab to prototype, and planning to implement the system in OpenCV and Tesserect-OCR. This is the first of a number of image processing problems I need to solve for this specific application. So I'm looking to roll my own solution and re-familiarize myself with image processing algorithms.
Here are some sample image that I'd like the algorithm to handle: If you'd like to take up the challenge the large images are at http://madteckhead.com/tmp
(source: madteckhead.com)
(source: madteckhead.com)
(source: madteckhead.com)
(source: madteckhead.com)
In the best case this gives:
(source: madteckhead.com)
(source: madteckhead.com)
(source: madteckhead.com)
However it fails easily on other cases:
(source: madteckhead.com)
(source: madteckhead.com)
(source: madteckhead.com)
EDIT: Hough Transform Progress
Q: What algorithm would cluster the hough lines to find corners?
Following advice from answers I was able to use the Hough Transform, pick lines, and filter them. My current approach is rather crude. I've made the assumption the invoice will always be less than 15deg out of alignment with the image. I end up with reasonable results for lines if this is the case (see below). But am not entirely sure of a suitable algorithm to cluster the lines (or vote) to extrapolate for the corners. The Hough lines are not continuous. And in the noisy images, there can be parallel lines so some form or distance from line origin metrics are required. Any ideas?
(source: madteckhead.com)
I'm Martin's friend who was working on this earlier this year. This was my first ever coding project, and kinda ended in a bit of a rush, so the code needs some errr...decoding...
I'll give a few tips from what I've seen you doing already, and then sort my code on my day off tomorrow.
First tip, OpenCV and python are awesome, move to them as soon as possible. :D
Instead of removing small objects and or noise, lower the canny restraints, so it accepts more edges, and then find the largest closed contour (in OpenCV use findcontour() with some simple parameters, I think I used CV_RETR_LIST). might still struggle when it's on a white piece of paper, but was definitely providing best results.
For the Houghline2() Transform, try with the CV_HOUGH_STANDARD as opposed to the CV_HOUGH_PROBABILISTIC, it'll give rho and theta values, defining the line in polar coordinates, and then you can group the lines within a certain tolerance to those.
My grouping worked as a look up table, for each line outputted from the hough transform it would give a rho and theta pair. If these values were within, say 5% of a pair of values in the table, they were discarded, if they were outside that 5%, a new entry was added to the table.
You can then do analysis of parallel lines or distance between lines much more easily.
Hope this helps.
Here's what I came up with after a bit of experimentation:
import cv, cv2, numpy as np
import sys
def get_new(old):
new = np.ones(old.shape, np.uint8)
cv2.bitwise_not(new,new)
return new
if __name__ == '__main__':
orig = cv2.imread(sys.argv[1])
# these constants are carefully picked
MORPH = 9
CANNY = 84
HOUGH = 25
img = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
cv2.GaussianBlur(img, (3,3), 0, img)
# this is to recognize white on white
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(MORPH,MORPH))
dilated = cv2.dilate(img, kernel)
edges = cv2.Canny(dilated, 0, CANNY, apertureSize=3)
lines = cv2.HoughLinesP(edges, 1, 3.14/180, HOUGH)
for line in lines[0]:
cv2.line(edges, (line[0], line[1]), (line[2], line[3]),
(255,0,0), 2, 8)
# finding contours
contours, _ = cv2.findContours(edges.copy(), cv.CV_RETR_EXTERNAL,
cv.CV_CHAIN_APPROX_TC89_KCOS)
contours = filter(lambda cont: cv2.arcLength(cont, False) > 100, contours)
contours = filter(lambda cont: cv2.contourArea(cont) > 10000, contours)
# simplify contours down to polygons
rects = []
for cont in contours:
rect = cv2.approxPolyDP(cont, 40, True).copy().reshape(-1, 2)
rects.append(rect)
# that's basically it
cv2.drawContours(orig, rects,-1,(0,255,0),1)
# show only contours
new = get_new(img)
cv2.drawContours(new, rects,-1,(0,255,0),1)
cv2.GaussianBlur(new, (9,9), 0, new)
new = cv2.Canny(new, 0, CANNY, apertureSize=3)
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
cv2.imshow('result', orig)
cv2.waitKey(0)
cv2.imshow('result', dilated)
cv2.waitKey(0)
cv2.imshow('result', edges)
cv2.waitKey(0)
cv2.imshow('result', new)
cv2.waitKey(0)
cv2.destroyAllWindows()
Not perfect, but at least works for all samples:
A student group at my university recently demonstrated an iPhone app (and python OpenCV app) that they'd written to do exactly this. As I remember, the steps were something like this:
Median filter to completely remove the text on the paper (this was handwritten text on white paper with fairly good lighting and may not work with printed text, it worked very well). The reason was that it makes the corner detection much easier.
Hough Transform for lines
Find the peaks in the Hough Transform accumulator space and draw each line across the entire image.
Analyse the lines and remove any that are very close to each other and are at a similar angle (cluster the lines into one). This is necessary because the Hough Transform isn't perfect as it's working in a discrete sample space.
Find pairs of lines that are roughly parallel and that intersect other pairs to see which lines form quads.
This seemed to work fairly well and they were able to take a photo of a piece of paper or book, perform the corner detection and then map the document in the image onto a flat plane in almost realtime (there was a single OpenCV function to perform the mapping). There was no OCR when I saw it working.
Instead of starting from edge detection you could use Corner detection.
Marvin Framework provides an implementation of Moravec algorithm for this purpose. You could find the corners of the papers as a starting point. Below the output of Moravec's algorithm:
Also you can use MSER (Maximally stable extremal regions) over Sobel operator result to find the stable regions of the image. For each region returned by MSER you can apply convex hull and poly approximation to obtain some like this:
But this kind of detection is useful for live detection more than a single picture that not always return the best result.
After edge-detection, use Hough Transform.
Then, put those points in an SVM(supporting vector machine) with their labels, if the examples have smooth lines on them, SVM will not have any difficulty to divide the necessary parts of the example and other parts. My advice on SVM, put a parameter like connectivity and length. That is, if points are connected and long, they are likely to be a line of the receipt. Then, you can eliminate all of the other points.
Here you have #Vanuan 's code using C++:
cv::cvtColor(mat, mat, CV_BGR2GRAY);
cv::GaussianBlur(mat, mat, cv::Size(3,3), 0);
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Point(9,9));
cv::Mat dilated;
cv::dilate(mat, dilated, kernel);
cv::Mat edges;
cv::Canny(dilated, edges, 84, 3);
std::vector<cv::Vec4i> lines;
lines.clear();
cv::HoughLinesP(edges, lines, 1, CV_PI/180, 25);
std::vector<cv::Vec4i>::iterator it = lines.begin();
for(; it!=lines.end(); ++it) {
cv::Vec4i l = *it;
cv::line(edges, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255,0,0), 2, 8);
}
std::vector< std::vector<cv::Point> > contours;
cv::findContours(edges, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_KCOS);
std::vector< std::vector<cv::Point> > contoursCleaned;
for (int i=0; i < contours.size(); i++) {
if (cv::arcLength(contours[i], false) > 100)
contoursCleaned.push_back(contours[i]);
}
std::vector<std::vector<cv::Point> > contoursArea;
for (int i=0; i < contoursCleaned.size(); i++) {
if (cv::contourArea(contoursCleaned[i]) > 10000){
contoursArea.push_back(contoursCleaned[i]);
}
}
std::vector<std::vector<cv::Point> > contoursDraw (contoursCleaned.size());
for (int i=0; i < contoursArea.size(); i++){
cv::approxPolyDP(Mat(contoursArea[i]), contoursDraw[i], 40, true);
}
Mat drawing = Mat::zeros( mat.size(), CV_8UC3 );
cv::drawContours(drawing, contoursDraw, -1, cv::Scalar(0,255,0),1);
Convert to lab space
Use kmeans segment 2 cluster
Then use contours or hough on one of the clusters (intenral)
In my app, I will input a human image and I want to get the face and neck only of that person as output in separate image. Example: Below image as input:(Source:http://www.fremantlepress.com.au)
And I want to get the up image as output:
I want to perform the following algorithm:
1. Detect face
2. Select (face region * 2) area
3. Detect skin and neck
4. Cut the skin region of the selected image
5. Save that cut region into a new image
As going through the EmguCV wiki and other online resources, I am confident to perform the step 1 and 2. But I am not sure how can I accomplish step 3 and 4.
There are some functions/methods I am looking on (Cunny Edge Detection, Contour etc) but I am not sure how and where should I apply those methods.
I am using EmguCV (C#) and Windows Form Application.
Please help me how can I do step 3 and 4. I will be glad if someone elaborate these two steps and some code also.
Well there are several ways you could approach this. Edge detection will only give you a binary image of edges and you will have to perform some line tracing or Hough transforms to detect the location of these. There accuracy will vary.
I will assume for know that you can detect the eyes and the relative location of the face. I would expect a statistical filter would provide a favourable outcome with better performance than a neural network which is the best alternative. A good Alternative is naturally colour segmentation if colour images are used (This is far easier to implement). I will also assume that the head position can change slightly with the neck being more or less visible within an image.
So for a Statistical Filter:
(Note that the background of the individual is similar to the face data when dealing with a greyscale image so a colour image would be better to work with).
Take a blank copy of our original image. We will form a binary map of our face on this while not
necessary it will allow us to examine our success easier
Find the Face, Eyes and Mouth in the original image.
We can assume that any data from the eyes and mouth form part of the face and mark these on the
blank copy with "1"s.
Now we need a bit of maths, as we know the face detection algorithm can only detect a face at a
certain angle to the camera. We use this and select a statistical mask from the image of certain
parts from the image let’s say 10x10 pixels 2 or 3 from the cheek area. This will be the most
likely area of the face within the image. We use this data and get values from the image such as
mean and standard deviation.
We now scan across the segmented part of the image where we have detected the face. We won't do
the whole image as this will take a long period of time. (Note: There is a border half the size
of the mask that won't be looked at). We examine each pixel and it surrounding neighbours to the
size of the 10x10 mask. If the average or standard deviation (whatever we are examining) is
similar to that of our filter say within 10% then we mark this pixel in our blank copy as a "1"
and consider that pixel to belong to the skin.
As for Colour Segmentation:
(Note: You could also try this process for greyscale however it will be less successful due to the brickwork)
Repeat steps 1 to 2.
Again we will select certain areas of the image that we can expect to contain face data (i.e. 10
pixels below the eye). In this case however we examine the data that forms the colour of this
pixel. Don't forget HSV images can obtain better results from this process an a combination more
so. We can the scan across the image examining each pixel for a similar colour. If it matches
mark it on your binary map.
An alternative is subtracting or adding a calculated from the R G and B spectrum of the image of
which only the data face will survive. You can convert this directly to a binary image by
making any value > 1 == 1;
This will only work for Skin as for the hair we will need other filters. A few notes:
A statistical filter working on a colour image has a far greater ability however takes longer.
Use data from the image to form your statistical filter as this will allow for other skin colours to be classified. A mathematical designed filter or colour segmentation will require a lot of work to achieve the same variability.
The size of the mask is important the greater the mask size the less likely errors will occur but again processing time increases.
You can speed up the process by referencing the same area within the binary map copy if the pixel your examining is already a 1 (classified by eye/nose/mouth detection) then why examine it again just skip it.
Multiple skin filters will provide better results however may also introduce more noise and remember each filter must then by compared with a pixel increasing processing time.
To get an lgorithm working accuratley will require a bit of trial and error but you sould see comparable results fairly quickly using these methods.
I hope this helps you on your way. Sorry for not including any code but hopefully others can help you were you get stuck and writing it yourself will help you understand what is going on and allow you to cut down on processing time. Let me know if you require any additional advice I'm doing my PhD in image analysis just so you know that the advice is sound.
Take Care
Chris
[EDIT]
Some quick results:
Here is a 20x20 filter applied in detecting the hair. The program I've written only works on greyscale images at the moment so the skin detection suffers interference from the stone (see later)
Colour Image of Face Region
Binary Map of Average Hair Filter 20x20 Mask 40% Error allowed
As can be observed there is interference from the shirt in this case as it matches the colour of the hair. This can be eliminated by simply only examining the top third or half of the detected facial region.
Binary Map of Average Skin Filter 20x20 Mask 40% Error allowed
In this image I use only 1 filter formed from the chin area as the stubble obviously changes the filters behaviour. There is still noise presented from the stone behind the individual however using colour image could eliminate this. The gaps in the case could be filled by an algorithm or another filter. Again there is noise from the edge of the shirt but we could minimise this either by detecting the shirt and removing any data that forms it or dimply only looking in certain areas.
Examples of the Regions to Inspect
To eliminate false classification you could take the top two thirds of the segmented image and look for the face and the width of the detected eyes to the bottom of the facial region for neck data.
Cheers Again
Chris
Hello Chris Can you share the codes for the same. Actually I have used grabcut algorithm to crop the face upto neck but the accuracy of images is not perfect. I am sharing the code where i am using webcam to capture images and then blurring the background and using grabcut algorithm. Please check it and reply.
import numpy as np
import cv2
import pixellib
from pixellib.tune_bg import alter_bg
rect = (0,0,0,0)
startPoint = False
endPoint = False
img_counter = 0
# function for mouse callback
def on_mouse(event,x,y,flags,params):
global rect,startPoint,endPoint
# get mouse click
if event == cv2.EVENT_LBUTTONDOWN:
if startPoint == True and endPoint == True:
startPoint = False
endPoint = False
rect = (0, 0, 0, 0)
if startPoint == False:
rect = (x, y, 0, 0)
startPoint = True
elif endPoint == False:
rect = (rect[0], rect[1], x, y)
endPoint = True
#cap = cv2.VideoCapture("YourVideoFile.mp4")
#cap = cv2.imread("/home/mongoose/Projects/background removal/bg_grabcut/GrabCut-from-video-master/IMG_6471.jpg")
#capturing the camera feed, '0' denotes the first camera connected to the computer
cap = cv2.VideoCapture(0)
waitTime = 50
change_bg = alter_bg(model_type = "pb")
change_bg.load_pascalvoc_model("/home/mongoose/Projects/background removal/bg_grabcut/test/xception_pascalvoc.pb")
change_bg.blur_camera(cap, extreme = True, frames_per_second= 10, output_video_name= "output_video.mp4", show_frames= True, frame_name= "frame", detect = "person")
#Reading the first frame
(grabbed, frame) = cap.read()
while(cap.isOpened()):
(grabbed, frame) = cap.read()
cv2.namedWindow('frame')
cv2.setMouseCallback('frame', on_mouse)
#drawing rectangle
if startPoint == True and endPoint == True:
cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (255, 0, 255), 2)
if not grabbed:
break
cv2.imshow('frame',frame)
key = cv2.waitKey(waitTime)
if key == ord('q'):
#esc pressed
break
elif key % 256 == 32:
# SPACE pressed
alpha = 1 # Transparency factor.
img_name = "opencv_frame_{}.png".format(img_counter)
imgCopy = frame.copy()
img = frame
mask = np.zeros(img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
w = abs(rect[0]-rect[2]+10)
h= abs(rect[1]-rect[3]+10)
rect2 = (rect[0]+10, rect[1]+10,w ,h )
cv2.grabCut(img, mask, rect2, bgdModel, fgdModel, 100, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
img = img * mask2[:, :, np.newaxis]
cv2.imwrite(img_name, img )
print("{} written!".format(img_name))
img_counter += 1
cap.release()
cv2.destroyAllWindows()