Selecting a Region OpenCV - opencv

I am new to OpenCV and I want to select a particular region in the video/image for detection. In my case I want to detect cars that are only in the road not in the parking lot.

Well, selecting cars requires use of training data. But to select an ROI (region of interest) is fairly simple:
Consider img = cv2.imread(image)
In that case, somewhere in your code, you can specify a region this way:
sub_image = img[y:y+h, x:x+w]
That will get the ROI once you specify the values, of course, not using 'x' or 'y', where h is the height and w is the width. Remember that images are just 2D matrices.
Use CascadeClassifier() to select the car(s) from the image(s). Documentation is found here. OpenCV comes packed with training data you can use to make classifications in the form of XML files.

If you want to manually select a region of interest (ROI) to do some processing on it, then you may trying using mouse click event to select start and stop points of your ROI.
Once you have start and stop point you can use it to retrieve image from selected region.
The can be done on image or capture video frame.
bool roi_captured = false;
Point pt1, pt2;
Mat cap_img;
//Callback for mousclick event, the x-y coordinate of mouse button-up and button-down
//are stored in two points pt1, pt2.
void mouse_click(int event, int x, int y, int flags, void *param)
{
switch(event)
{
case CV_EVENT_LBUTTONDOWN:
{
std::cout<<"Mouse Pressed"<<std::endl;
if(!roi_capture)
{
pt1.x = x;
pt1.y = y;
}
else
{
std::cout<<"ROI Already Acquired"<<std::endl;
}
break;
}
case CV_EVENT_LBUTTONUP:
{
if(!got_roi)
{
Mat cl;
std::cout<<"Mouse LBUTTON Released"<<std::endl;
pt2.x = x;
pt2.y = y;
cl = cap_img.clone();
Mat roi(cl, Rect(pt1, pt2));
Mat prev_imgT = roi.clone();
std::cout<<"PT1"<<pt1.x<<", "<<pt1.y<<std::endl;
std::cout<<"PT2"<<pt2.x<<","<<pt2.y<<std::endl;
imshow("Clone",cl);
got_roi = true;
}
else
{
std::cout<<"ROI Already Acquired"<<std::endl;
}
break;
}
}
}
//In main open video and wait for roi event to complete by the use.
// You capture roi in pt1 and pt2 you can use the same coordinates for processing // //subsequent frame
int main(int argc, char *argv[])
{
int frame_num = 0;
int non_decode_frame =0;
int count = 1, idx =0;
int frame_pos =0;
std::cout<<"Video File "<<argv[1]<<std::endl;
cv::VideoCapture input_video(argv[1]);
namedWindow("My_Win",1);
cvSetMouseCallback("My_Win", mouse_click, 0);
sleep(1);
while(input_video.grab())
{
cap_img.release();
if(input_video.retrieve(cap_img))
{
imshow("My_Win", cap_img);
if(!got_roi)
{
//Wait here till user select the desire ROI
waitKey(0);
}
else
{
std::cout<<"Got ROI disp prev and curr image"<<std::endl;
std::cout<<"PT1"<<pt1.x<<" "<<pt1.y<<std::endl;
std::cout<<"PT2"<<pt2.x<<" "<<pt2.y<<std::endl;
Mat curr_img_t1;
Mat roi2(cap_img,Rect(pt1, pt2));
Mat curr_imgT = roi2.clone();
cvtColor(curr_imgT, curr_img_t1, CV_RGB2GRAY);
imshow("curr_img", curr_img);
// Do remaining processing here on capture roi for every frame
waitKey(1);
}
}
}
}

You didn't tag in what programming language you are writing with. Anyway, I answer you in python. (You can easily convert it to C++ if you want)
def mouse_drawing(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
car = img[y: y + carheight, x: x + carwidth]
cv2.imwrite("car", car)
cv2.namedWindow("my_img")
cv2.setMouseCallback("my_img", mouse_drawing)
while True:
cv2.imshow("my_img", img)
key = cv2.waitKey(1)
if key == 27:
break
As in other answers was told, if you want to find cars automatically, that would be another problem and has to do with training data and other things.

Related

ID3D11DeviceContext::DrawIndexed() Failed

my program is Directx Program that draws a container cube within it smaller cubes....these smaller cubes fall by time i hope you understand what i mean...
The program isn't complete yet ...it should draws the container only ....but it draws nothing ...only the background color is visible... i only included what i think is needed ...
this is the routines that initialize the program
bool Game::init(HINSTANCE hinst,HWND _hw){
Directx11 ::init(hinst , _hw);
return LoadContent();}
Directx11::init()
bool Directx11::init(HINSTANCE hinst,HWND hw){
_hinst=hinst;_hwnd=hw;
RECT rc;
GetClientRect(_hwnd,&rc);
height= rc.bottom - rc.top;
width = rc.right - rc.left;
UINT flags=0;
#ifdef _DEBUG
flags |=D3D11_CREATE_DEVICE_DEBUG;
#endif
HR(D3D11CreateDevice(0,_driverType,0,flags,0,0,D3D11_SDK_VERSION,&d3dDevice,&_featureLevel,&d3dDeviceContext));
if (d3dDevice == 0 || d3dDeviceContext == 0)
return 0;
DXGI_SWAP_CHAIN_DESC sdesc;
ZeroMemory(&sdesc,sizeof(DXGI_SWAP_CHAIN_DESC));
sdesc.Windowed=true;
sdesc.BufferCount=1;
sdesc.BufferDesc.Format=DXGI_FORMAT_R8G8B8A8_UNORM;
sdesc.BufferDesc.Height=height;
sdesc.BufferDesc.Width=width;
sdesc.BufferDesc.Scaling=DXGI_MODE_SCALING_UNSPECIFIED;
sdesc.BufferDesc.ScanlineOrdering=DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
sdesc.OutputWindow=_hwnd;
sdesc.BufferDesc.RefreshRate.Denominator=1;
sdesc.BufferDesc.RefreshRate.Numerator=60;
sdesc.Flags=0;
sdesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
if (m4xMsaaEnable)
{
sdesc.SampleDesc.Count=4;
sdesc.SampleDesc.Quality=m4xMsaaQuality-1;
}
else
{
sdesc.SampleDesc.Count=1;
sdesc.SampleDesc.Quality=0;
}
IDXGIDevice *Device=0;
HR(d3dDevice->QueryInterface(__uuidof(IDXGIDevice),reinterpret_cast <void**> (&Device)));
IDXGIAdapter*Ad=0;
HR(Device->GetParent(__uuidof(IDXGIAdapter),reinterpret_cast <void**> (&Ad)));
IDXGIFactory* fac=0;
HR(Ad->GetParent(__uuidof(IDXGIFactory),reinterpret_cast <void**> (&fac)));
fac->CreateSwapChain(d3dDevice,&sdesc,&swapchain);
ReleaseCOM(Device);
ReleaseCOM(Ad);
ReleaseCOM(fac);
ID3D11Texture2D *back = 0;
HR(swapchain->GetBuffer(0,__uuidof(ID3D11Texture2D),reinterpret_cast <void**> (&back)));
HR(d3dDevice->CreateRenderTargetView(back,0,&RenderTarget));
D3D11_TEXTURE2D_DESC Tdesc;
ZeroMemory(&Tdesc,sizeof(D3D11_TEXTURE2D_DESC));
Tdesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
Tdesc.ArraySize = 1;
Tdesc.Format= DXGI_FORMAT_D24_UNORM_S8_UINT;
Tdesc.Height= height;
Tdesc.Width = width;
Tdesc.Usage = D3D11_USAGE_DEFAULT;
Tdesc.MipLevels=1;
if (m4xMsaaEnable)
{
Tdesc.SampleDesc.Count=4;
Tdesc.SampleDesc.Quality=m4xMsaaQuality-1;
}
else
{
Tdesc.SampleDesc.Count=1;
Tdesc.SampleDesc.Quality=0;
}
HR(d3dDevice->CreateTexture2D(&Tdesc,0,&depthview));
HR(d3dDevice->CreateDepthStencilView(depthview,0,&depth));
d3dDeviceContext->OMSetRenderTargets(1,&RenderTarget,depth);
D3D11_VIEWPORT vp;
vp.TopLeftX=0.0f;
vp.TopLeftY=0.0f;
vp.Width = static_cast <float> (width);
vp.Height= static_cast <float> (height);
vp.MinDepth = 0.0f;
vp.MaxDepth = 1.0f;
d3dDeviceContext -> RSSetViewports(1,&vp);
return true;
SetBuild() Prepare the matrices inside the container for the smaller cubes ....i didnt program it to draw the smaller cubes yet
and this the function that draws the scene
void Game::Render(){
d3dDeviceContext->ClearRenderTargetView(RenderTarget,reinterpret_cast <const float*> (&Colors::LightSteelBlue));
d3dDeviceContext->ClearDepthStencilView(depth,D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL,1.0f,0);
d3dDeviceContext-> IASetInputLayout(_layout);
d3dDeviceContext-> IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3dDeviceContext->IASetIndexBuffer(indices,DXGI_FORMAT_R32_UINT,0);
UINT strides=sizeof(Vertex),off=0;
d3dDeviceContext->IASetVertexBuffers(0,1,&vertices,&strides,&off);
D3DX11_TECHNIQUE_DESC des;
Tech->GetDesc(&des);
Floor * Lookup; /*is a variable to Lookup inside the matrices structure (Floor Contains XMMATRX Piese[9])*/
std::vector<XMFLOAT4X4> filled; // saves the matrices of the smaller cubes
XMMATRIX V=XMLoadFloat4x4(&View),P = XMLoadFloat4x4(&Proj);
XMMATRIX vp = V * P;XMMATRIX wvp;
for (UINT i = 0; i < des.Passes; i++)
{
d3dDeviceContext->RSSetState(BuildRast);
wvp = XMLoadFloat4x4(&(B.Memory[0].Pieces[0])) * vp; // Loading The Matrix at translation(0,0,0)
HR(ShadeMat->SetMatrix(reinterpret_cast<float*> ( &wvp)));
HR(Tech->GetPassByIndex(i)->Apply(0,d3dDeviceContext));
d3dDeviceContext->DrawIndexed(build_ind_count,build_ind_index,build_vers_index);
d3dDeviceContext->RSSetState(PieseRast);
UINT r1=B.GetSize(),r2=filled.size();
for (UINT j = 0; j < r1; j++)
{
Lookup = &B.Memory[j];
for (UINT r = 0; r < Lookup->filledindeces.size(); r++)
{
filled.push_back(Lookup->Pieces[Lookup->filledindeces[r]]);
}
}
for (UINT j = 0; j < r2; j++)
{
ShadeMat->SetMatrix( reinterpret_cast<const float*> (&filled[i]));
Tech->GetPassByIndex(i)->Apply(0,d3dDeviceContext);
d3dDeviceContext->DrawIndexed(piese_ind_count,piese_ind_index,piese_vers_index);
}
}
HR(swapchain->Present(0,0));}
thanks in Advance
One bug in your program appears to be that you're using i, the index of the current pass, as an index into the filled vector, when you should apparently be using j.
Another apparent bug is that in the loop where you are supposed to be iterating over the elements of filled, you're not iterating over all of them. The value r2 is set to the size of filled before you append anything to it during that pass. During the first pass this means that nothing will be drawn by this loop. If your technique only has one pass then this means that the second DrawIndexed call in your code will never be executed.
It also appears you should be only adding matrices to filled once, regardless of the number of the passes the technique has. You should consider if your code is actually meant to work with techniques with multiple passes.

matchTemplate with openCV in java

i have a code like this:
Mat img = Highgui.imread(inFile);
Mat templ = Highgui.imread(templateFile);
int result_cols = img.cols() - templ.cols() + 1;
int result_rows = img.rows() - templ.rows() + 1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_32FC1);
Imgproc.matchTemplate(img, templ, result, Imgproc.TM_CCOEFF);
/////Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
for (int i = 0; i < result_rows; i++)
for (int j = 0; j < result_cols; j++)
if(result.get(i, j)[0]>?)
//match!
I need to parse the input image to find multiple occurrencies of the template image. I want to have a result like this:
result[0][0]= 15%
result[0][1]= 17%
result[x][y]= 47%
If i use TM_COEFF all results are [-xxxxxxxx.xxx,+xxxxxxxx.xxx]
If i use TM_SQDIFF all results are xxxxxxxx.xxx
If i use TM_CCORR all results are xxxxxxxx.xxx
How can i detect a match or a mismatch? What is the right condition into the if?
If i normalized the matrix the application set a value to 1 and i can't detect if the template isn't stored into the image (all mismatch).
Thanks in advance
You can append "_NORMED" to the method names (For instance: CV_TM_COEFF_NORMED in C++; could be slightly different in Java) to get a sensible value for your purpose.
By 'sensible', I mean that you will get values in the range of 0 to 1 which can be multiplied by 100 for your purpose.
Note: For CV_TM_SQDIFF_NORMED, it will be in the range -1 to 0, and you will have to subtract the value from 1 in order to make sense of it, because the lowest value if used in this method.
Tip: you can use the java equivalent of minMaxLoc() in order to get the minimum and maximum values. It's very useful when used in conjunction with matchtemplate.
I believe 'minMaxLoc' that is located inside the class Core.
Here's a C++ implementation:
matchTemplate( input_mat, template_mat, result_mat, method_NORMED );
double minVal, maxVal;
double percentage;
Point minLoc; Point maxLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
if( method_NORMED == CV_TM_SQDIFF_NORMED )
{
percentage=1-minVal;
}
else
{
percentage=maxVal;
}
Useful C++ docs:
Match template description along with available methods: http://docs.opencv.org/modules/imgproc/doc/object_detection.html
MinMaxLoc documentation:
http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=minmaxloc#minmaxloc
Another approach will be background differencing. You can observe the distortion.
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
public class BackgroundDifference {
public static void main(String[] arg){
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat model = Highgui.imread("e:\\answers\\template.jpg",Highgui.CV_LOAD_IMAGE_GRAYSCALE);
Mat scene = Highgui.imread("e:\\answers\\front7.jpg",Highgui.CV_LOAD_IMAGE_GRAYSCALE);
Mat diff = new Mat();
Core.absdiff(model,scene,diff);
Imgproc.threshold(diff,diff,15,255,Imgproc.THRESH_BINARY);
int distortion = Core.countNonZero(diff);
System.out.println("distortion:"+distortion);
Highgui.imwrite("e:\\answers\\diff.jpg",diff);
}
}

Standard Hough Lines in EMGU CV

I am in need of using the standard Hough Transformation (instead of the using the HoughLinesBinary method which implements Probabilistic Hough Transform) and have attempted doing so by creating a custom version of the HoughLinesBinary method:
using (MemStorage stor = new MemStorage())
{
IntPtr lines = CvInvoke.cvHoughLines2(canny.Ptr, stor.Ptr, Emgu.CV.CvEnum.HOUGH_TYPE.CV_HOUGH_STANDARD, rhoResolution, (thetaResolution*Math.PI)/180, threshold, 0, 0);
Seq<MCvMat> segments = new Seq<MCvMat>(lines, stor);
List<MCvMat> lineslist = segments.ToList();
foreach(MCvMat line in lineslist)
{
//Process lines: (rho, theta)
}
}
My problem is that I am unsure of what type is the sequence returned. I believe it should be MCvMat, due to reading the documentation that CvMat* is used in OpenCV, which also states that for STANDARD "the matrix must be (the created sequence will be) of CV_32FC2 type"
I am unclear as to what I would need to do to return and process that correct output data from the STANDARD hough lines (i.e. the 2x1 vector for each line giving the rho and theta information).
Any help would be greatly appreciated. Thank you
-Sal
I had the same problem myself a couple of days ago. This is how I solved it using marshalling. Please let me know if you find a simpler solution.
using (MemStorage stor = new MemStorage())
{
IntPtr lines = CvInvoke.cvHoughLines2(canny.Ptr, stor.Ptr, Emgu.CV.CvEnum.HOUGH_TYPE.CV_HOUGH_STANDARD, rhoResolution, (thetaResolution*Math.PI)/180, threshold, 0, 0);
int maxLines = 100;
for(int i = 0; i < maxLines; i++)
{
IntPtr line = CvInvoke.cvGetSeqElem(lines, i);
if (line == IntPtr.Zero)
{
// No more lines
break;
}
PolarCoordinates coords = (PolarCoordinates)System.Runtime.InteropServices.Marshal.PtrToStructure(line, typeof(PolarCoordinates));
// Do something with your Hough lines
}
}
with a struct defined as follows:
public struct PolarCoordinates
{
public float Rho;
public float Theta;
}

Find rectangles without corners using opencv

I have an image where I want to find contours but the "contours" in my image don't have corners. Are there some tricks I can use to help find the rectangles that are implied by the lines in this image? I thought about extending all the lines to form the corners but I worry about lines intersecting from other contours and how to determine which intersections I'm interested in. I'm very new to opencv and I don't know much about image processing. Thank you for any help you can give.
Fit lines in your binary image with the Hough transform and fit rectangles to the orthogonally intersecting lines.
I ended up implementing my own solution. It isn't very graceful but it gets the job done. I would be interested in hearing about improvements. HoughLines2 didn't always give me good results for finding line segments and I had to mess around with the threshold value a lot for different scenarios. Instead I opted for FindCountours where I took contours with two elements, I should be guaranteed 1 pixel wide lines. After finding the lines I iterated through them and traced them out to find the rectangles.
Where points is a *CvSeq of the line endpoints
while(points->total>0){
if(p1.x==-1&&p1.y==-1){
cvSeqPopFront(points,&p1);
cvSeqPopFront(points,&p2);
}
if((pos=findClosestPoint(&p1,&p2, points,maxDist))>=0){
p3 = (CvPoint*)cvGetSeqElem( points,pos );
pos2 = (pos%2==0)?pos+1:pos-1; //lines are in pairs of points
p4 = (CvPoint*)cvGetSeqElem( points,pos2 );
if(isVertical(&p1,&p2) && isHorizontal(p3,p4)){
printf("found Corner %d %d\n",p2.x,p3->y);
} else if(isHorizontal(&p1,&p2) && isVertical(p3,p4) ){
printf("found Corner %d %d\n",p3->x,p2.y);
}
memcpy(&p1,p3,sizeof(CvPoint));
memcpy(&p2,p4,sizeof(CvPoint));
cvSeqRemove(points, (pos>pos2)?pos:pos2);
cvSeqRemove(points, (pos>pos2)?pos2:pos);
} else {
p1.x=-1;
p1.y=-1;
}
}
int findClosestPoint (CvPoint *p1, CvPoint *p2, CvSeq *points, int maxDist) {
int ret = -1,i;
float dist, minDist = maxDist;
CvPoint* test;
int (*dirTest)(CvPoint *,CvPoint *);
if(isVertical(p1,p2)){ //vertical line
if(p2->y > p1->y) {//going down
dirTest = isBelow;
} else { // going up
dirTest = isAbove;
}
} else if (isHorizontal(p1,p2)){ //horizontal line
if(p2->x > p1->x) {//going right
dirTest = isRight;
} else { //going left
dirTest = isLeft;
}
}
for( i = 0; i < points->total; i++ )
{
test = (CvPoint*)cvGetSeqElem( points, i );
if(dirTest(p2,test)){ //only test points in the region we care about
dist = sqrt(pow(test->x - p2->x,2)+pow(test->y - p2->y,2));
if(dist<minDist){
minDist = dist;
ret = i;
}
}
}
return ret;
}
int isVertical(CvPoint *p1, CvPoint *p2){
return p1->x == p2->x;
}
int isHorizontal(CvPoint *p1, CvPoint *p2){
return p1->y == p2->y;
}
int isRight(CvPoint *pt1, CvPoint *pt2){
return pt2->x > pt1->x;
}
int isLeft(CvPoint *pt1, CvPoint *pt2){
return pt2->x < pt1->x;
}
int isBelow(CvPoint *pt1, CvPoint *pt2){
return pt2->y > pt1->y;
}
int isAbove(CvPoint *pt1, CvPoint *pt2){
return pt2->y < pt1->y;
}
You could also try posing it as optimization problem. Rectangle is defined as 4D state vector (x,w,width,height) or 5D vector if you include rotation (x,y,width,height,rotation). For your current state you could do a gradient descent towards result of Hough lines to converge to the optimal state. Other option is using linear least squares: http://people.inf.ethz.ch/arbenz/MatlabKurs/node88.html
Using the hough transform you will be able to extract lines. Then you can calculate intersections of these lines to estimate the position of the rectangles.

Checking if removing an edge in a graph will result in the graph splitting

I have a graph structure where I am removing edges one by one until some conditions are met. My brain has totally stopped and i can't find an efficient way to detect if removing an edge will result in my graph splitting in two or more graphs.
The bruteforce solution would be to do an bfs until one can reach all the nodes from a random node, but that will take too much time with large graphs...
Any ideas?
Edit: After a bit of search it seems what I am trying to do is very similar to the fleury's algorithm, where I need to find if an edge is a "bridge" or not.
Edges that make a graph disconnected when removed are called 'bridges'. You can find them in O(|V|+|E|) with a single depth-first search over the whole graph. A related algorithm finds all 'articulation points' (nodes that, if removed, makes the graph disconnected) follows. Any edge between two articulation-points is a bridge (you can test that in a second pass over all edges).
//
// g: graph; v: current vertex id;
// r_p: parents (r/w); r_a: ascents (r/w); r_ap: art. points, bool array (r/w)
// n_v: bfs order-of-visit
//
void dfs_art_i(graph *g, int v, int *r_p, int *r_v, int *r_a, int *r_ap, int *n_v) {
int i;
r_v[v] = *n_v;
r_a[v] = *n_v;
(*n_v) ++;
// printf("entering %d (nv = %d)\n", v, *n_v);
for (i=0; i<g->vertices[v].n_edges; i++) {
int w = g->vertices[v].edges[i].target;
// printf("\t evaluating %d->%d: ", v, w);
if (r_v[w] == -1) {
// printf("...\n");
// This is the first time we find this vertex
r_p[w] = v;
dfs_art_i(g, w, r_p, r_v, r_a, r_ap, n_v);
// printf("\n\t ... back in %d->%d", v, w);
if (r_a[w] >= r_v[v]) {
// printf(" - a[%d] %d >= v[%d] %d", w, r_a[w], v, r_v[v]);
// Articulation point found
r_ap[i] = 1;
}
if (r_a[w] < r_a[v]) {
// printf(" - a[%d] %d < a[%d] %d", w, r_a[w], v, r_a[v]);
r_a[v] = r_a[w];
}
// printf("\n");
}
else {
// printf("back");
// We have already found this vertex before
if (r_v[w] < r_a[v]) {
// printf(" - updating ascent to %d", r_v[w]);
r_a[v] = r_v[w];
}
// printf("\n");
}
}
}
int dfs_art(graph *g, int root, int *r_p, int *r_v, int *r_a, int *r_ap) {
int i, n_visited = 0, n_root_children = 0;
for (i=0; i<g->n_vertices; i++) {
r_p[i] = r_v[i] = r_a[i] = -1;
r_ap[i] = 0;
}
dfs_art_i(g, root, r_p, r_v, r_a, r_ap, &n_visitados);
// the root can only be an AP if it has more than 1 child
for (i=0; i<g->n_vertices; i++) {
if (r_p[i] == root) {
n_root_children ++;
}
}
r_ap[root] = n_root_children > 1 ? 1 : 0;
return 1;
}
If you remove the link between vertices A and B, can't you just check that you can still reach A from B after the edge removal? That's a little better than getting to all nodes from a random node.
How do you choose the edges to be removed?
Can you tell more about your problem domain?
Just how large Is your graph? maybe BFS is just fine!
After you wrote that you are trying to find out whether an edge is a bridge or not, I suggest
you remove edges in decreasing order of their betweenness measure.
Essentially, betweenness is a measure of an edges (or vertices) centrality in a graph.
Edges with higher value of betweenness have greater potential of being a bridge in a graph.
Look it up on the web, the algorithm is called 'Girvan-Newman algorithm'.

Resources