How to merge a lot of square images via OpenCV? - opencv

How can I merge images like below into a single image using OpenCV (there can be any number of them both horizontally and vertically)? Is there any built-in solution to do it?
Additional pieces:

Well, it seems that I finished the puzzle:
Main steps:
Compare each pair of images (puzzle pieces) to know the relative position (findRelativePositions and getPosition).
Build a map knowing the relative positions of the pieces (buildPuzzle and builfForPiece)
Create the final collage putting each image at the correct position (final part of buildPuzzle).
Comparison between pieces A and B in step 1 is done checking for similarity (sum of absolute difference) among:
B is NORTH to A: A first row and B last row;
B is SOUTH to A: A last row and B first row;
B is WEST to A : A last column and B first column;
B is EAST to A : A first column and B last column.
Since images do not overlap, but we can assume that confining rows (columns) are quite similar, the key aspect is to use a (ad-hoc) threshold to discriminate between confining pieces or not. This is handled in function getPosition, with threshold parameter threshold.
Here the full code. Please let me know if something is not clear.
#include <opencv2\opencv.hpp>
#include <algorithm>
#include <set>
using namespace std;
using namespace cv;
enum Direction
{
NORTH = 0,
SOUTH,
WEST,
EAST
};
int getPosition(const Mat3b& A, const Mat3b& B, double& cost)
{
Mat hsvA, hsvB;
cvtColor(A, hsvA, COLOR_BGR2HSV);
cvtColor(B, hsvB, COLOR_BGR2HSV);
int threshold = 1000;
// Check NORTH
Mat3b AN = hsvA(Range(0, 1), Range::all());
Mat3b BS = hsvB(Range(B.rows - 1, B.rows), Range::all());
Mat3b AN_BS;
absdiff(AN, BS, AN_BS);
Scalar scoreN = sum(AN_BS);
// Check SOUTH
Mat3b AS = hsvA(Range(A.rows - 1, A.rows), Range::all());
Mat3b BN = hsvB(Range(0, 1), Range::all());
Mat3b AS_BN;
absdiff(AS, BN, AS_BN);
Scalar scoreS = sum(AS_BN);
// Check WEST
Mat3b AW = hsvA(Range::all(), Range(A.cols - 1, A.cols));
Mat3b BE = hsvB(Range::all(), Range(0, 1));
Mat3b AW_BE;
absdiff(AW, BE, AW_BE);
Scalar scoreW = sum(AW_BE);
// Check EAST
Mat3b AE = hsvA(Range::all(), Range(0, 1));
Mat3b BW = hsvB(Range::all(), Range(B.cols - 1, B.cols));
Mat3b AE_BW;
absdiff(AE, BW, AE_BW);
Scalar scoreE = sum(AE_BW);
vector<double> scores{ scoreN[0], scoreS[0], scoreW[0], scoreE[0] };
int idx_min = distance(scores.begin(), min_element(scores.begin(), scores.end()));
int direction = (scores[idx_min] < threshold) ? idx_min : -1;
cost = scores[idx_min];
return direction;
}
void resolveConflicts(Mat1i& positions, Mat1d& costs)
{
for (int c = 0; c < 4; ++c)
{
// Search for duplicate pieces in each column
set<int> pieces;
set<int> dups;
for (int r = 0; r < positions.rows; ++r)
{
int label = positions(r, c);
if (label >= 0)
{
if (pieces.count(label) == 1)
{
dups.insert(label);
}
else
{
pieces.insert(label);
}
}
}
if (dups.size() > 0)
{
int min_idx = -1;
for (int duplicate : dups)
{
// Find minimum cost position
Mat1d column = costs.col(c);
min_idx = distance(column.begin(), min_element(column.begin(), column.end()));
// Keep only minimum cost position
for (int ir = 0; ir < positions.rows; ++ir)
{
int label = positions(ir, c);
if ((label == duplicate) && (ir != min_idx))
{
positions(ir, c) = -1;
}
}
}
}
}
}
void findRelativePositions(const vector<Mat3b>& pieces, Mat1i& positions)
{
positions = Mat1i(pieces.size(), 4, -1);
Mat1d costs(pieces.size(), 4, DBL_MAX);
for (int i = 0; i < pieces.size(); ++i)
{
for (int j = i + 1; j < pieces.size(); ++j)
{
double cost;
int pos = getPosition(pieces[i], pieces[j], cost);
if (pos >= 0)
{
if (costs(i, pos) > cost)
{
positions(i, pos) = j;
costs(i, pos) = cost;
switch (pos)
{
case NORTH:
positions(j, SOUTH) = i;
costs(j, SOUTH) = cost;
break;
case SOUTH:
positions(j, NORTH) = i;
costs(j, NORTH) = cost;
break;
case WEST:
positions(j, EAST) = i;
costs(j, EAST) = cost;
break;
case EAST:
positions(j, WEST) = i;
costs(j, WEST) = cost;
break;
}
}
}
}
}
resolveConflicts(positions, costs);
}
void builfForPiece(int idx_piece, set<int>& posed, Mat1i& labels, const Mat1i& positions)
{
Point pos(-1, -1);
// Find idx_piece on grid;
for (int r = 0; r < labels.rows; ++r)
{
for (int c = 0; c < labels.cols; ++c)
{
if (labels(r, c) == idx_piece)
{
pos = Point(c, r);
break;
}
}
if (pos.x >= 0) break;
}
if (pos.x < 0) return;
// Put connected pieces
for (int c = 0; c < 4; ++c)
{
int next = positions(idx_piece, c);
if (next > 0)
{
switch (c)
{
case NORTH:
labels(Point(pos.x, pos.y - 1)) = next;
posed.insert(next);
break;
case SOUTH:
labels(Point(pos.x, pos.y + 1)) = next;
posed.insert(next);
break;
case WEST:
labels(Point(pos.x + 1, pos.y)) = next;
posed.insert(next);
break;
case EAST:
labels(Point(pos.x - 1, pos.y)) = next;
posed.insert(next);
break;
}
}
}
}
Mat3b buildPuzzle(const vector<Mat3b>& pieces, const Mat1i& positions, Size sz)
{
int n_pieces = pieces.size();
set<int> posed;
set<int> todo;
for (int i = 0; i < n_pieces; ++i) todo.insert(i);
Mat1i labels(n_pieces * 2 + 1, n_pieces * 2 + 1, -1);
// Place first element in the center
todo.erase(0);
labels(Point(n_pieces, n_pieces)) = 0;
posed.insert(0);
builfForPiece(0, posed, labels, positions);
// Build puzzle starting from the already placed elements
while (todo.size() > 0)
{
auto it = todo.begin();
int next = -1;
do
{
next = *it;
++it;
} while (posed.count(next) == 0 && it != todo.end());
todo.erase(next);
builfForPiece(next, posed, labels, positions);
}
// Posed all pieces, now collage!
vector<Point> pieces_position;
Mat1b mask = labels >= 0;
findNonZero(mask, pieces_position);
Rect roi = boundingRect(pieces_position);
Mat1i lbls = labels(roi);
Mat3b collage(roi.height * sz.height, roi.width * sz.width, Vec3b(0, 0, 0));
for (int r = 0; r < lbls.rows; ++r)
{
for (int c = 0; c < lbls.cols; ++c)
{
if (lbls(r, c) >= 0)
{
Rect rect(c*sz.width, r*sz.height, sz.width, sz.height);
pieces[lbls(r, c)].copyTo(collage(rect));
}
}
}
return collage;
}
int main()
{
// Load images
vector<String> filenames;
glob("D:\\SO\\img\\puzzle*", filenames);
vector<Mat3b> pieces(filenames.size());
for (int i = 0; i < filenames.size(); ++i)
{
pieces[i] = imread(filenames[i], IMREAD_COLOR);
}
// Find Relative positions
Mat1i positions;
findRelativePositions(pieces, positions);
// Build the puzzle
Mat3b puzzle = buildPuzzle(pieces, positions, pieces[0].size());
imshow("Puzzle", puzzle);
waitKey();
return 0;
}
NOTE
No, there is no built-in solution to perform this. Image stitching won't work since the images are not overlapped.
I cannot guarantee that this works for every puzzle, but should work for the most.
I probably should have worked this couple of hours, but it was fun :D
EDIT
Adding more puzzle pieces generates wrong results in the previous code version. This was due the (wrong) assumption that at most one piece is good enough to be connected with a given piece.
Now I added a cost matrix, and only the minimum cost piece is saved as neighbor of a given piece.
I added also a resolveConflicts function that avoid that one piece can be merged (in non-conflicting position) with more than one piece.
This is the result adding more pieces:
UPDATE
Considerations after increasing the number of puzzle pieces:
This solution it's dependent on the input order of pieces, since it turns out it has a greedy approach to find neighbors.
While searching for neighbors, it's better to compare the H channel in the HSV space. I updated the code above with this improvement.
The final solution needs probably some kind of global minimization of the of a global cost matrix. This will make the method independent on the input order. I'll be back on this asap.

Once you have loaded this images as OpenCV Mat, you can concatenate these Mat both vertically or horizontally using:
Mat A, B; // Images that will be concatenated
Mat H; // Here we will concatenate A and B horizontally
Mat V; // Here we will concatenate A and B vertically
hconcat(A, B, H);
vconcat(A, B, V);
If you need to concatenate more than two images, you can use these methods recursively.
By the way, I think these methods are not included in the OpenCV documentation, but I have used them in the past.

Related

Using opencv's ptr pointer to manipulate pixels

I used opencv to read in a picture and split the image.
The divided matrix:
Odd rows are odd columns as A matrix;
The odd-numbered rows are evenly listed as B-matrices;
The odd-numbered rows are evenly listed as C-matrices;
Even-numbered even-numbered columns are D-matrices;
My code:
void SplitMat(Mat& src, Mat& objA, Mat& objB, Mat& objC, Mat& objD) {
// src: Input image, CV_16UC1
// objA: Output image, CV_16UC1
// objB: Output image, CV_16UC1
// objC: Output image, CV_16UC1
// objD: Output image, CV_16UC1
Mat dst;
src.copyTo(dst);
int row, col, i, j;
for (row = 0, i = 0; row < dst.rows - 1; row = row + 2, ++i) {
ushort* temp0 = dst.ptr<ushort>(row);
ushort* temp1 = dst.ptr<ushort>(row + 1);
ushort* obja = objA.ptr<ushort>(i);
ushort* objb = objB.ptr<ushort>(i);
ushort* objc = objC.ptr<ushort>(i);
ushort* objd = objD.ptr<ushort>(i);
for (col = 0, j = 0; col < dst.cols - 1; col = col + 2, ++j) {
obja[j] = temp0[col];
objb[j] = temp0[col + 1];
objc[j] = temp1[col];
objd[j] = temp1[col + 1];
}
}
}
Test result:
I don't know why the image in the x direction has become two images.
Please guide us. Is this a logical error or something else? thanks
I know the reason for this problem. Since the picture read is 8 bits, and the program uses 16 bits.
uchar -> ushort have a question.

Converting a pointcloud to a depth/multi channel image

I have a pointcloud generated by scanning a planar surface using stereo cameras. I have generated features such as normals, fpfh etc and using this information I want to classify areas in the pointcloud. To enable the use of more traditional CNN approaches I want to convert this pointcloud to a multi-channel image in opencv. I have the pointcloud collapsed to the XY plane, and aligned to the X and Y axes so that I can create a bounding box for the image.
I am looking for ideas on how to proceed further with the mapping from points to pixels. Specifically, I am confused about the image size, and how to go about filling in each pixel with the appropriate data. (Overlapping points would be averaged out, empty ones will be labelled accordingly). Since this is an unorganized pointcloud, I do not have camera parameters to use, and I guess PCL's RangImage class would not work in my case.
Any help is appreciated!
Try creating an empty cv::Mat of predetermined size first. Then iterate through every pixel of that Mat to determine what value it should take.
Here is some code which does something similar to what you were describing:
cv::Mat makeImageFromPointCloud(pcl::PointCloud<pcl::PointXYZI>::Ptr cloud, std::string dimensionToRemove, float stepSize1, float stepSize2)
{
pcl::PointXYZI cloudMin, cloudMax;
pcl::getMinMax3D(*cloud, cloudMin, cloudMax);
std::string dimen1, dimen2;
float dimen1Max, dimen1Min, dimen2Min, dimen2Max;
if (dimensionToRemove == "x")
{
dimen1 = "y";
dimen2 = "z";
dimen1Min = cloudMin.y;
dimen1Max = cloudMax.y;
dimen2Min = cloudMin.z;
dimen2Max = cloudMax.z;
}
else if (dimensionToRemove == "y")
{
dimen1 = "x";
dimen2 = "z";
dimen1Min = cloudMin.x;
dimen1Max = cloudMax.x;
dimen2Min = cloudMin.z;
dimen2Max = cloudMax.z;
}
else if (dimensionToRemove == "z")
{
dimen1 = "x";
dimen2 = "y";
dimen1Min = cloudMin.x;
dimen1Max = cloudMax.x;
dimen2Min = cloudMin.y;
dimen2Max = cloudMax.y;
}
std::vector<std::vector<int>> pointCountGrid;
int maxPoints = 0;
std::vector<pcl::PointCloud<pcl::PointXYZI>::Ptr> grid;
for (float i = dimen1Min; i < dimen1Max; i += stepSize1)
{
pcl::PointCloud<pcl::PointXYZI>::Ptr slice = passThroughFilter1D(cloud, dimen1, i, i + stepSize1);
grid.push_back(slice);
std::vector<int> slicePointCount;
for (float j = dimen2Min; j < dimen2Max; j += stepSize2)
{
pcl::PointCloud<pcl::PointXYZI>::Ptr grid_cell = passThroughFilter1D(slice, dimen2, j, j + stepSize2);
int gridSize = grid_cell->size();
slicePointCount.push_back(gridSize);
if (gridSize > maxPoints)
{
maxPoints = gridSize;
}
}
pointCountGrid.push_back(slicePointCount);
}
cv::Mat mat(static_cast<int>(pointCountGrid.size()), static_cast<int>(pointCountGrid.at(0).size()), CV_8UC1);
mat = cv::Scalar(0);
for (int i = 0; i < mat.rows; ++i)
{
for (int j = 0; j < mat.cols; ++j)
{
int pointCount = pointCountGrid.at(i).at(j);
float percentOfMax = (pointCount + 0.0) / (maxPoints + 0.0);
int intensity = percentOfMax * 255;
mat.at<uchar>(i, j) = intensity;
}
}
return mat;
}

How to apply K means in a mask of an image instead the whole one

I want to apply on OpenCV a K Means to a region of an image not squared or a rectangle. For example the source image is:
now I select a custom mask:
and apply K Means with K = 3:
Obviously without considering the bounds (white).
Instead, what I can do with OpenCV is K Means but considering the bounds:
And that messes out my final image because black is considered one colour.
Do you have any clue?
Thank you in advance.
Quick and dirty solution.
vector<Vec3b> points;
vector<Point> locations;
for( int y = 0; y < src.rows; y++) {
for( int x = 0; x < src.cols; x++) {
if ( (int)mask.at<unsigned char>(y,x) != 0 ) {
points.push_back(src.at<Vec3b>(y,x));
locations.push_back(Point(x,y));
}
}
}
Mat kmeanPoints(points.size(), 3, CV_32F);
for( int y = 0; y < points.size(); y++ ) {
for( int z = 0; z < 3; z++) {
kmeanPoints.at<float>(y, z) = points[y][z];
}
}
Mat labels;
Mat centers;
kmeans(kmeanPoints, 4, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.1), 10, cv::KMEANS_PP_CENTERS, centers);
Mat final = Mat::zeros( src.size(), src.type() );
Vec3b tempColor;
for(int i = 0; i<locations.size(); i++) {
int cluster_idx = labels.at<int>(i,0);
tempColor[0] = centers.at<float>(cluster_idx, 0);
tempColor[1] = centers.at<float>(cluster_idx, 1);
tempColor[2] = centers.at<float>(cluster_idx, 2);
final.at<Vec3b>(locations[i]) = tempColor;
}
Assuming that you have an input RGB image called img(here) and a one-channel mask called mask(here), here is the snippet to prepare your k-means computation :
int nbClasses = 3; // or whatever you want
cv::TermCriteria myCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 10, 1.0);
cv::Mat labels, centers, result;
img.convertTo(data, CV_32F);
// reshape into 3 columns (one per channel, in BGR order) and as many rows as the total number of pixels in img
data = data.reshape(1, data.total());
If you want to apply a normal k-means (without mask) :
// apply k-means
cv::kmeans(data, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape both to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value
cv::Vec3f *p = data.ptr<cv::Vec3f>();
for (size_t i = 0; i < data.rows; i++)
{
int center_id = labels.at<int>(i);
p[i] = centers.at<cv::Vec3f>(center_id);
}
// back to 2D image
data = data.reshape(3, img.rows);
// optional conversion to uchar
data.convertTo(result, CV_8U);
The result is here.
But, if you want instead to apply a masked k-means :
int nbWhitePixels = cv::countNonZero(mask);
cv::Mat dataMasked = cv::Mat(nbWhitePixels, 3, CV_32F, cv::Scalar(0));
cv::Mat maskFlatten = mask.reshape(1, mask.total());
// filter data by the mask
int idx = 0;
for (int k = 0; k < mask.total(); k++)
{
int val = maskFlatten.at<uchar>(k, 0);
if (val != 0)
{
float val0 = data.at<float>(k, 0);
float val1 = data.at<float>(k, 1);
float val2 = data.at<float>(k, 2);
dataMasked.at<float>(idx,0) = val0;
dataMasked.at<float>(idx,1) = val1;
dataMasked.at<float>(idx,2) = val2;
idx++;
}
}
// apply k-means
cv::kmeans(dataMasked, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
dataMasked = dataMasked.reshape(3, dataMasked.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value, only for pixels in mask
cv::Vec3f *p = data.ptr<cv::Vec3f>();
idx = 0;
for (size_t i = 0; i < data.rows; i++)
{
if (maskFlatten.at<uchar>(i, 0) != 0)
{
int center_id = labels.at<int>(idx);
p[i] = centers.at<cv::Vec3f>(center_id);
idx++;
}
//else
// p[i] = cv::Vec3f(0, 0, 0);
}
// back to 2d, and uchar
data = data.reshape(3, img.rows);
data.convertTo(result, CV_8U);
You will have now this result.
If you let commented the else part, you will keep initial pixels outside the mask, whereas if you uncomment it, you will convert them into black pixels, like here.

efficient cropping calculation in processing

I am loading a png in processing. This png has a lot of unused pixels around the actual image. Luckily all those pixels are completely transparent. My goal is to crop the png to only show the image and get rid of the unused pixels. The first step would be to calculate the bounds of the image. Initially i wanted to check every pixel for alpha value and see if that pixel is the highest or lowest coordinate for bounds. like this:
------
------
--->oo
oooooo
oooooo
Then i realized i only needed to do this until the first non-alpha pixel and repeat it backwards for highest coordinate bound. Like this:
------
-->ooo
oooooo
ooo<--
------
This would mean less calculating for the same result. However the code i got out of it still seems to be very complex. Here it is:
class Rect { //class for storing the boundries
int xMin, xMax, yMin, yMax;
Rect() {
}
}
PImage gfx;
void setup() {
size(800, 600);
gfx = loadImage("resources/test.png");
Rect _bounds = calcBounds(); //first calculate the boundries
cropImage(_bounds); //then crop the image using those boundries
}
void draw() {
}
Rect calcBounds() {
Rect _bounds = new Rect();
boolean _coordFound = false;
gfx.loadPixels();
//x min bounds
for (int i = 0; i < gfx.width; i++) { //rows
for (int i2 = 0; i2 < gfx.height; i2++) { //columns
if (alpha(gfx.pixels[(gfx.width * i2) + i]) != 0) {
_bounds.xMin = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//x max bounds
_coordFound = false;
for (int i = gfx.width - 1; i >= 0; i--) { //rows
for (int i2 = gfx.height - 1; i2 >= 0; i2--) { //columns
if (alpha(gfx.pixels[(gfx.width * i2) + i]) != 0) {
_bounds.xMax = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//y min bounds
_coordFound = false;
for (int i = 0; i < gfx.height; i++) { //columns
for (int i2 = 0; i2 < gfx.width; i2++) { //rows
if (alpha(gfx.pixels[(gfx.width * i) + i2]) != 0) {
_bounds.yMin = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//y max bounds
_coordFound = false;
for (int i = gfx.height - 1; i >= 0; i--) { //columns
for (int i2 = gfx.width -1; i2 >= 0; i2--) { //rows
if (alpha(gfx.pixels[(gfx.width * i) + i2]) != 0) {
_bounds.yMax = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
return _bounds;
}
void cropImage(Rect _bounds) {
PImage _temp = createImage((_bounds.xMax - _bounds.xMin) + 1, (_bounds.yMax - _bounds.yMin) + 1, ARGB);
_temp.copy(gfx, _bounds.xMin, _bounds.yMin, (_bounds.xMax - _bounds.xMin) + 1, (_bounds.yMax - _bounds.yMin)+ 1, 0, 0, _temp.width, _temp.height);
gfx = _temp; //now the image is cropped
}
Isnt there a more efficient/faster way to calculate the bounds of the image?
And i do still want the boundries coordinates afterward instead of just cutting away at the image during calculation.
If you store the last completely empty line found for e.g. the horizontal minimum and maximum scan in a variable, you can use that to constrain your vertical scanning to only the area that has not yet been checked for being empty, instead of having to scan full columns. Depending on the amount and shape of the croppable area that can save you quite a bit - See the schematic for a visual explanation of the modified algorithm:
By the way, in your //x min bounds scan you seem to be iterating over the width in both for loops, should be height in one though? (unless your images are all square of course :))

Accessing value at row,col in a Matrix

I'm trying to access a specific row in a matrix but am having a hard time doing so.
I want to get the value at row j, column i but I don't think my algorithm is correct. I'm using OpenCV's Mat for my matrix and accessing it through the data member.
Here is how I am attempting to access values:
plane.data[i + j*plane.rows]
Where i = the column, j = the row. Is this correct? The Matrix is 1 plane from a YUV matrix.
Any help would be appreciated! Thanks.
No, your are wrong
plane.data[i + j*plane.rows] is not a good way to access pixel. Your pointer must depend on type of the matrix and its depth.
You should use at() operator of the matrix.
To make it simple here is a code sample which access each pixel of a matrix and prints it. It works almost for every matrix type and for any number of channels:
void printMat(const Mat& M){
switch ( (M.dataend-M.datastart) / (M.cols*M.rows*M.channels())){
case sizeof(char):
printMatTemplate<unsigned char>(M,true);
break;
case sizeof(float):
printMatTemplate<float>(M,false);
break;
case sizeof(double):
printMatTemplate<double>(M,false);
break;
}
}
template <typename T>
void printMatTemplate(const Mat& M, bool isInt = true){
if (M.empty()){
printf("Empty Matrix\n");
return;
}
if ((M.elemSize()/M.channels()) != sizeof(T)){
printf("Wrong matrix type. Cannot print\n");
return;
}
int cols = M.cols;
int rows = M.rows;
int chan = M.channels();
char printf_fmt[20];
if (isInt)
sprintf_s(printf_fmt,"%%d,");
else
sprintf_s(printf_fmt,"%%0.5g,");
if (chan > 1){
// Print multi channel array
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
printf("(");
const T* Pix = &M.at<T>(i,j);
for (int c = 0; c < chan; c++){
printf(printf_fmt,Pix[c]);
}
printf(")");
}
printf("\n");
}
printf("-----------------\n");
}
else {
// Single channel
for (int i = 0; i < rows; i++){
const T* Mi = M.ptr<T>(i);
for (int j = 0; j < cols; j++){
printf(printf_fmt,Mi[j]);
}
printf("\n");
}
printf("\n");
}
}
I do not think there is anything different between accessing RGB Mat and YUV Mat. Its just the colorspace different.
Please refer to http://opencv.willowgarage.com/wiki/faq#Howtoaccessmatrixelements.3F on how to access each pixel.

Resources