Dynamic 2d rendering in a 3d world - xna

Ok, so I'm working with my first serious experience in XNA. Here's my current task: Download a file. From that file parse a list of "shapes." Each "shape" contains a list of points. These points are the vertexes of lines to be drawn on a map. In fact, it is a county map of the entire United States, so the number of "shapes" is not trivial. We need to be able to zoom in and out of this map, so it needs to be a 2d rendering in 3d space. I want to know what the best strategy for this would be.
I've tried simply using DrawUserIndexedPrimitives, but the draw function takes way too long to process this.
So then I thought I'd try drawing to a series of RenderTarget2D's in LoadContent, and just saving those textures for drawing in the Draw function. But so far all I can seem to get is a series of purple boxes.
EDIT:
Here's my current code:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.IO;
using Microsoft.Xna.Framework;
using Microsoft.Xna.Framework.Graphics;
using System.Runtime.InteropServices;
namespace First
{
class FirstShape : DrawableGameComponent
{
const int kSizeMultiplier = 10;
public FirstShape(string inFilePath, Game inGame) : base(inGame)
{
DrawOrder = 1000;
mFile = inFilePath;
}
protected override void LoadContent()
{
mSpriteBatch = new SpriteBatch(GraphicsDevice);
FirstMemoryStream stream = FirstMemoryStream.MakeSeekableStream(File.OpenRead(mFile));
// skip headers
stream.Seek(100, 0);
for (int i = 0; stream.Position != stream.Length; i++)
{
stream.Seek(stream.Position + 12, 0);
double minX = stream.ReadDouble();
double minY = stream.ReadDouble();
double maxX = stream.ReadDouble();
double maxY = stream.ReadDouble();
int numParts = stream.ReadInt();
int numPoints = stream.ReadInt();
VertexPositionColor[] points = new VertexPositionColor[numPoints];
stream.Seek(stream.Position + (4 * numParts), 0);
int top, bottom, left, right;
float x2, y2;
x2 = (float)stream.ReadDouble();
y2 = (float)stream.ReadDouble();
Vector2 projectedPoint = Vertex(x2, y2);
left = right = (int)Math.Round(projectedPoint.X * kSizeMultiplier);
top = bottom = (int)Math.Round(maxY - (projectedPoint.Y * kSizeMultiplier));
points[0].Position.X = left;
points[0].Position.Y = top;
for (int j = 1; j < points.Length; j++)
{
float x1 = x2;
float y1 = y2;
x2 = (float)stream.ReadDouble();
y2 = (float)stream.ReadDouble();
Vector2 p1 = Vertex(x1, y1);
Vector2 p2 = Vertex(x2, y2);
p1.X *= kSizeMultiplier;
p1.Y *= kSizeMultiplier;
p2.X *= kSizeMultiplier;
p2.Y *= kSizeMultiplier;
points[j].Position.X = (int)Math.Round(p2.X);
points[j].Position.Y = (int)Math.Round(maxY - p2.Y);
if (points[j].Position.X < left)
left = (int)points[j].Position.X;
if (points[j].Position.X > right)
right = (int)points[j].Position.X;
if (points[j].Position.Y < top)
top = (int)points[j].Position.Y;
if (points[j].Position.Y > bottom)
bottom = (int)points[j].Position.Y;
}
if (mTopLeft.X == 0 || mTopLeft.X > left)
mTopLeft.X = left;
if (mTopLeft.Y == 0 || mTopLeft.Y > top)
mTopLeft.Y = top;
for (int j = 0; j < points.Length; j++)
{
points[j].Color = Color;
}
int width = (right - left) + 1;
int height = (bottom - top) + 1;
mTextures.Add(new FirstImage(GraphicsDevice, width, height, new Vector2(left, top)));
GraphicsDevice.SetRenderTarget(mTextures.Last());
GraphicsDevice.Indices = new IndexBuffer(GraphicsDevice, IndexElementSize.SixteenBits, points.Length, BufferUsage.None);
GraphicsDevice.SetVertexBuffer(new VertexBuffer(GraphicsDevice, VertexPositionColor.VertexDeclaration, points.Length, BufferUsage.None));
BasicEffect basicEffect = new BasicEffect(GraphicsDevice);
basicEffect.LightingEnabled = false;
basicEffect.VertexColorEnabled = true;
foreach (EffectPass pass in basicEffect.CurrentTechnique.Passes)
{
pass.Apply();
GraphicsDevice.DrawIndexedPrimitives(PrimitiveType.LineStrip, 0, 0, points.Length, 0, points.Length - 1);
}
}
GraphicsDevice.SetRenderTarget(null);
stream.Close();
for (int i = 0; i < mTextures.Count; i++)
{
mTextures[i].Position -= mTopLeft;
}
}
public override void Draw(GameTime inTime)
{
mSpriteBatch.Begin();
for(int i = 0; i < mTextures.Count; i++)
{
mSpriteBatch.Draw(mTextures[i], mTextures[i].Position, Color.White);
}
mSpriteBatch.End();
}
private Vector2 Vertex(float inX, float inY)
{
return FirstProjector.Project(new Vector2(inX, inY));
}
public Color Color { get; set; }
private string mFile;
private List<FirstImage> mTextures = new List<FirstImage>();
private SpriteBatch mSpriteBatch;
private Vector2 mTopLeft = new Vector2(0.0f, 0.0f);
private Vector2 mBottomRight = new Vector2(0.0f, 0.0f);
}
class FirstImage : RenderTarget2D
{
public FirstImage(GraphicsDevice inDevice, int inWidth, int inHeight, Vector2 inPosition) : base(inDevice, inWidth, inHeight, false, SurfaceFormat.Color, DepthFormat.None)
{
Position = inPosition;
}
public Vector2 Position {get; set;}
}
}

Related

Calculate distance between parameters and target image

How can I calculate distance between a fixed parameter and a target image/pixel?
The following code does color recognition, finds the average position, and draws circle on it. It is able to find if the target (averageX and averageY) is close to leftPd, centerPd, or rightPd. I want to change this code as lane tracking which is at least able to find distance value between leftPd parameter variable and left lane or rightPd parameter variable and right lane.
import processing.video.*;
Capture video;
float threshold = 210;
color trackColor;
PVector leftP, centerP, rightP, target;
void setup() {
leftP = new PVector (80,420);
centerP = new PVector (width/2, 380);
rightP = new PVector (560,420);
size(640, 480);
video = new Capture(this, width, height);
video.start();
trackColor = color(160,0,0); // Start off tracking for red
}
void captureEvent(Capture video) {
// Read image from the camera
video.read();
}
void draw() {
loadPixels();
video.loadPixels();
image(video, 0, 0);
float avgX = 0;
float avgY = 0;
int count = 0;
for (int x = 0; x < video.width; x ++ ) {
for (int y = 0; y < video.height; y ++ ) {
int loc = x + y*video.width;
color currentColor = video.pixels[loc];
float r1 = red(currentColor);
float g1 = green(currentColor);
float b1 = blue(currentColor);
float r2 = red(trackColor);
float g2 = green(trackColor);
float b2 = blue(trackColor);
// Using euclidean distance to compare colors
float d = distSq(r1, g1, b1, r2, g2, b2);
if (d < threshold) {
stroke(255);
strokeWeight(1);
point(x,y);
avgX += x;
avgY += y;
count++;
}
}
}
if (count > 0) {
avgX = avgX / count;
avgY = avgY / count;
// Draw a circle at the tracked pixel
fill(trackColor);
strokeWeight(4.0);
stroke(0);
ellipse(avgX, avgY, 20, 20);
text("brightnesslevel: " + trackColor, 20, 60);
text("FPS: " + frameRate, 20, 80);
}
target = new PVector (avgX, avgY);
color c = color(255, 204, 0);
fill(c);
noStroke();
ellipse(leftP.x,leftP.y,16,16); // left param
ellipse(centerP.x,centerP.y,16,16); // center param
ellipse(rightP.x,rightP.y,16,16); // right param
float leftPd = leftP.dist(target);
float centerPd = centerP.dist(target);
float rightPd = rightP.dist(target);
if ( leftPd <= 85 ){
text("To Close left " , 20, 250);
}
if ( centerPd <= 85 ){
text("To Close turn center " , 20, 275);
}
if ( rightPd <= 85 ){
text("To Close turn right " , 20, 300);
}
}
float distSq(float x1,float y1, float z1, float x2, float y2, float z2){
float d = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + (z2-z1)*(z2-z1);
return d;
}
void mousePressed() {
// Save color where the mouse is clicked in trackColor variable
int loc = mouseX + mouseY*video.width;
trackColor = video.pixels[loc];
}

Image printing with Epson compatible Thermal printer problem

I am using C# and write code for print contents for the Thermal ticket printer.
There are codes that people use for image print, and it indeed prints images, but something goes wrong. This is my code for image print class, it is widely using open source (I googled and found it, and people successfully implement this code to theirs without problem).
public static class ImagePrint
{
/// <summary>
/// Image convert to Byte Array
/// </summary>
/// <param name="LogoPath">Image Path</param>
/// <param name="printWidth">Image print Horizontal Length</param>
/// <returns></returns>
public static byte[] GetLogo(string LogoPath, int printWidth)
{
List<byte> byteList = new List<byte>();
if (!File.Exists(LogoPath))
return null;
BitmapData data = GetBitmapData(LogoPath, printWidth);
BitArray dots = data.Dots;
byte[] width = BitConverter.GetBytes(data.Width);
int offset = 0;
// Initialize Printer
byteList.Add(Convert.ToByte(Convert.ToChar(0x1B)));
byteList.Add(Convert.ToByte('#'));
// Line Spacing Adjust (24/180 inch)
byteList.Add(Convert.ToByte(Convert.ToChar(0x1B)));
byteList.Add(Convert.ToByte('3'));
byteList.Add((byte)24);
while (offset < data.Height)
{
byteList.Add(Convert.ToByte(Convert.ToChar(0x1B)));
byteList.Add(Convert.ToByte('*'));
byteList.Add((byte)33);
byteList.Add(width[0]);
byteList.Add(width[1]);
for (int x = 0; x < data.Width; ++x)
{
for (int k = 0; k < 3; ++k)
{
byte slice = 0;
for (int b = 0; b < 8; ++b)
{
int y = (((offset / 8) + k) * 8) + b;
int i = (y * data.Width) + x;
bool v = false;
if (i < dots.Length)
v = dots[i];
slice |= (byte)((v ? 1 : 0) << (7 - b));
}
byteList.Add(slice);
}
}
offset += 24;
byteList.Add(Convert.ToByte(0x0A));
}
// Return to normal line spacing (30/160 inch)
byteList.Add(Convert.ToByte(0x1B));
byteList.Add(Convert.ToByte('3'));
byteList.Add((byte)30);
return byteList.ToArray();
}
private static BitmapData GetBitmapData(string bmpFileName, int width)
{
using (var bitmap = (Bitmap)Bitmap.FromFile(bmpFileName))
{
var threshold = 127;
var index = 0;
double multiplier = width; // 이미지 width조정
double scale = (double)(multiplier / (double)bitmap.Width);
int xheight = (int)(bitmap.Height * scale);
int xwidth = (int)(bitmap.Width * scale);
var dimensions = xwidth * xheight;
var dots = new BitArray(dimensions);
for (var y = 0; y < xheight; y++)
{
for (var x = 0; x < xwidth; x++)
{
var _x = (int)(x / scale);
var _y = (int)(y / scale);
var color = bitmap.GetPixel(_x, _y);
var luminance = (int)(color.R * 0.3 + color.G * 0.59 + color.B * 0.11);
dots[index] = (luminance < threshold);
index++;
}
}
return new BitmapData()
{
Dots = dots,
Height = (int)(bitmap.Height * scale),
Width = (int)(bitmap.Width * scale)
};
}
}
private class BitmapData
{
public BitArray Dots
{
get;
set;
}
public int Height
{
get;
set;
}
public int Width
{
get;
set;
}
}
}
And I use this code like this on my code for image print:
string Image_File_Path = #"D:\TEST\TESTImage.bmp";
int Image_Size_I_Want = 100;
byte[] img = ImagePrint.GetLogo(Image_File_Path, Image_Size_I_Want);
port.Write(img, 0, img.Length);
You can see the result in the attached picture.
There are white space lines on the image.
This class automatically adds a line spacing command, but it seems does not work.
Please suggest any solution.
Using 'mike42/escpos-php' package in laravel
use Mike42\Escpos\Printer;
use Mike42\Escpos\EscposImage;
$tux = EscposImage::load(public_path()."\assets\img\path-to-file.jpg");
$printer->setJustification(Printer::JUSTIFY_CENTER);
$printer->bitImage($tux, 0);
$printer -> setJustification();

Processing save() Function Saves Only Final Result

Here's my code...
void setup() {
size(500, 500);
surface.setResizable(true);
smooth();
dot = loadImage("1-DOT.png");
}
void draw() {
background(255);
grid(dot, 5, .2);
}
void grid(PImage img, int dim, float scale) {
int imgsize = floor(img.width * scale);
int canvassize;
for (int i = 1; i <= dim; i++) {
canvassize = dim * imgsize;
surface.setSize(canvassize, canvassize);
for (int x = 0; x < canvassize; x += imgsize) {
for (int y = 0; y < canvassize; y += imgsize) {
image(img, x, y, imgsize, imgsize);
}
}
save("grid_" + str(i) + ".png");
}
}
The grid function takes an image file, a dimension parameter, and a scale. It creates square grids of sizes 0 to dim from image.
It should save each iteration of this grid as a file. But it doesn't. What I am left with once I run the code is (in this case), 5 identical 5x5 grids. I should have a 1x1 grid, a 2x2 grid and so on. I have also attempted to use saveFrame(), but to no avail.
Thanks in advance!
Majlik is correct that you aren't calculating your canvassize correctly. If you want it to be different each iteration of the loop, then you need to use i instead of dim.
But on top of that, it seems like a really bad idea to change the size of your surface in the middle of a call to draw(). That throws an IndexOutOfBoundsException for me.
Instead, you'll probably have better luck if you create a PGraphics of whatever size you want and draw to that. Here's an example:
void setup() {
PImage dot = loadImage("dot.png");
grid(dot, 5, .2);
exit();
}
void grid(PImage img, int dim, float scale) {
int imgsize = floor(img.width * scale);
for (int i = 1; i <= dim; i++) {
int canvassize = i * imgsize;
PGraphics pg = createGraphics(canvassize, canvassize);
pg.beginDraw();
for (int x = 0; x < canvassize; x += imgsize) {
for (int y = 0; y < canvassize; y += imgsize) {
pg.image(img, x, y, imgsize, imgsize);
}
}
pg.endDraw();
pg.save("grid_" + str(i) + ".png");
}
}
That creates these images:
Also, notice that I'm not calling this from the draw() function: your program would continuously create images, which is not necessary. Just create them once and then exit.
I think you have mistake on calculating a canvassize. If I get your goal right you should use i instead of dim.
canvassize = i * imgsize; // Corrected
Also it is easier to use saveFrame instead of save
saveFrame("grid_###.png");
But I tested in only with Java Mode (without surface methods).

Improve javaFx processing performance

I'm working on Image processing with javaFx. I think that my code is not enouth efficient (With HD images, refresh is very slow). Because I do a for on each pixel of my image everytime I have to refresh it. But I don't know how to do differently.
So I need help to improve the performance of my processing.
This is my code :
import javafx.application.Application;
import javafx.beans.InvalidationListener;
import javafx.beans.Observable;
import javafx.beans.property.DoubleProperty;
import javafx.scene.Scene;
import javafx.scene.control.ScrollPane;
import javafx.scene.control.Slider;
import javafx.scene.image.Image;
import javafx.scene.image.ImageView;
import javafx.scene.image.PixelReader;
import javafx.scene.image.PixelWriter;
import javafx.scene.image.WritableImage;
import javafx.scene.layout.AnchorPane;
import javafx.scene.paint.Color;
import javafx.stage.Stage;
public class Example extends Application {
private Image src;
private WritableImage dest;
private int width;
private int height;
int value = 0;
#Override
public void start(Stage stage) {
AnchorPane root = new AnchorPane();
initImage(root);
Scene scene = new Scene(root);
stage.setTitle("Demo processing");
stage.setResizable(false);
stage.setScene(scene);
stage.show();
}
private void initImage(AnchorPane root) {
src = new Image(
"http://mikecann.co.uk/wp-content/uploads/2009/12/ScreenHunter_02-Dec.-10-19.41-1024x484.jpg");
width = (int) src.getWidth();
height = (int) src.getHeight();
root.setPrefSize(800, 800 + 50);
ScrollPane scrollPane = new ScrollPane();
scrollPane.setPrefHeight(600);
scrollPane.setPrefWidth(1000);
dest = new WritableImage(width, height);
ImageView destView = new ImageView(dest);
scrollPane.setContent(destView);
root.getChildren().add(scrollPane);
AnchorPane.setTopAnchor(scrollPane, 0.0);
Slider slider = new Slider(0, 255, 1);
slider.setPrefSize(800, 50);
slider.setShowTickLabels(true);
slider.setShowTickMarks(true);
slider.setSnapToTicks(true);
slider.setMajorTickUnit(1.0);
slider.setMinorTickCount(0);
slider.setLayoutY(700);
slider.valueProperty().addListener(new InvalidationListener() {
#Override
public void invalidated(Observable o) {
value = (int) ((DoubleProperty) o).get();
color();
}
});
root.getChildren().add(slider);
color();
}
private void color() {
PixelReader reader = src.getPixelReader();
PixelWriter writer = dest.getPixelWriter();
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
Color color = reader.getColor(x, y);
double red = (double) value * x * y / (width * height) / 255;
double green = color.getGreen();
double blue = (double) value * ((width * height) - x * y)
/ (width * height) / 255;
writer.setColor(x, y, Color.color(red, green, blue));
}
}
}
public static void main(String[] args) {
launch(args);
}
}
And this is with a full HD image :
src = new Image(
"http://www.freedomwallpaper.com//nature-wallpaper-hd/hd_sunshine_hd.jpg");
Getitng color of each pixel in loop is too slow. So, get entire pixels first, and change colors, finally wirte changed colors with PixelWriter.
Like this
private void color() {
PixelReader reader = src.getPixelReader();
WritablePixelFormat<IntBuffer> format = WritablePixelFormat.getIntArgbInstance();
int[] pixels = new int[width * height]; // Buffer for all pixels
reader.getPixels(0, 0, width, height, format, pixels, 0, width); // get all pixels by argb format
int alpha = 0xFF << 24;
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
int index = x + y * width;
int argb = pixels[index];
int red = value * x * y / (width * height);
int green = (argb >> 8) & 0xFF;
int blue = value * ((width * height) - x * y)
/ (width * height);
int newArgb = alpha | (red << 16) | (green << 8) | blue;
pixels[index] = newArgb;
}
}
PixelWriter writer = dest.getPixelWriter();
writer.setPixels(0, 0, width, height, format, pixels, 0, width); // write entire image
}

Cropping panorama image in OpenCV

I'm trying to find a simple algorithm to crop (remove the black areas) of a panorama image created with the openCV Stitcher module.
My idea is to calculate the most inner black points in the image which will define the cropping area, as shown in the next image:
Expected cropped result:
I've tried the next two approaches, but they don't crop the image as expected:
First Approach:
void testCropA(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
Size size = gray.size();
int type = gray.type();
int left = 0, top = 0, right = size.width, bottom = size.height;
cv::Mat row_zeros = Mat::zeros(1, right, type);
cv::Mat col_zeros = Mat::zeros(bottom, 1, type);
while (countNonZero(gray.row(top) != row_zeros) == 0) { top++; }
while (countNonZero(gray.col(left) != col_zeros) == 0) { left++; }
while (countNonZero(gray.row(bottom-1) != row_zeros) == 0) { bottom--; }
while (countNonZero(gray.col(right-1) != col_zeros) == 0) { right--; }
cv::Rect cropRect(left, top, right - left, bottom - top);
image = image(cropRect);
}
Second Approach:
void testCropB(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
int minCol = gray.cols;
int minRow = gray.rows;
int maxCol = 0;
int maxRow = 0;
for (int i = 0; i < gray.rows - 3; i++)
{
for (int j = 0; j < gray.cols; j++)
{
if (gray.at<char>(i, j) != 0)
{
if (i < minRow) {minRow = i;}
if (j < minCol) {minCol = j;}
if (i > maxRow) {maxRow = i;}
if (j > maxCol) {maxCol = j;}
}
}
}
cv::Rect cropRect = Rect(minCol, minRow, maxCol - minCol, maxRow - minRow);
image = image(cropRect);
}
This is my current solution. Hope it helps to others:
bool checkInteriorExterior(const cv::Mat &mask, const cv::Rect &croppingMask,
int &top, int &bottom, int &left, int &right)
{
// Return true if the rectangle is fine as it is
bool result = true;
cv::Mat sub = mask(croppingMask);
int x = 0;
int y = 0;
// Count how many exterior pixels are, and choose that side for
// reduction where mose exterior pixels occurred (that's the heuristic)
int top_row = 0;
int bottom_row = 0;
int left_column = 0;
int right_column = 0;
for (y = 0, x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the top side of the rect a bit to the bottom
if (sub.at<char>(y, x) == 0)
{
result = false;
++top_row;
}
}
for (y = (sub.rows - 1), x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the bottom side of the rect a bit to the top
if (sub.at<char>(y, x) == 0)
{
result = false;
++bottom_row;
}
}
for (y = 0, x = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++left_column;
}
}
for (x = (sub.cols - 1), y = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++right_column;
}
}
// The idea is to set `top = 1` if it's better to reduce
// the rect at the top than anywhere else.
if (top_row > bottom_row)
{
if (top_row > left_column)
{
if (top_row > right_column)
{
top = 1;
}
}
}
else if (bottom_row > left_column)
{
if (bottom_row > right_column)
{
bottom = 1;
}
}
if (left_column >= right_column)
{
if (left_column >= bottom_row)
{
if (left_column >= top_row)
{
left = 1;
}
}
}
else if (right_column >= top_row)
{
if (right_column >= bottom_row)
{
right = 1;
}
}
return result;
}
bool compareX(cv::Point a, cv::Point b)
{
return a.x < b.x;
}
bool compareY(cv::Point a, cv::Point b)
{
return a.y < b.y;
}
void crop(cv::Mat &source)
{
cv::Mat gray;
source.convertTo(source, CV_8U);
cvtColor(source, gray, cv::COLOR_RGB2GRAY);
// Extract all the black background (and some interior parts maybe)
cv::Mat mask = gray > 0;
// now extract the outer contour
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cv::Point(0, 0));
cv::Mat contourImage = cv::Mat::zeros(source.size(), CV_8UC3);;
// Find contour with max elements
int maxSize = 0;
int id = 0;
for (int i = 0; i < contours.size(); ++i)
{
if (contours.at((unsigned long)i).size() > maxSize)
{
maxSize = (int)contours.at((unsigned long)i).size();
id = i;
}
}
// Draw filled contour to obtain a mask with interior parts
cv::Mat contourMask = cv::Mat::zeros(source.size(), CV_8UC1);
drawContours(contourMask, contours, id, cv::Scalar(255), -1, 8, hierarchy, 0, cv::Point());
// Sort contour in x/y directions to easily find min/max and next
std::vector<cv::Point> cSortedX = contours.at((unsigned long)id);
std::sort(cSortedX.begin(), cSortedX.end(), compareX);
std::vector<cv::Point> cSortedY = contours.at((unsigned long)id);
std::sort(cSortedY.begin(), cSortedY.end(), compareY);
int minXId = 0;
int maxXId = (int)(cSortedX.size() - 1);
int minYId = 0;
int maxYId = (int)(cSortedY.size() - 1);
cv::Rect croppingMask;
while ((minXId < maxXId) && (minYId < maxYId))
{
cv::Point min(cSortedX[minXId].x, cSortedY[minYId].y);
cv::Point max(cSortedX[maxXId].x, cSortedY[maxYId].y);
croppingMask = cv::Rect(min.x, min.y, max.x - min.x, max.y - min.y);
// Out-codes: if one of them is set, the rectangle size has to be reduced at that border
int ocTop = 0;
int ocBottom = 0;
int ocLeft = 0;
int ocRight = 0;
bool finished = checkInteriorExterior(contourMask, croppingMask, ocTop, ocBottom, ocLeft, ocRight);
if (finished == true)
{
break;
}
// Reduce rectangle at border if necessary
if (ocLeft)
{ ++minXId; }
if (ocRight)
{ --maxXId; }
if (ocTop)
{ ++minYId; }
if (ocBottom)
{ --maxYId; }
}
// Crop image with created mask
source = source(croppingMask);
}
I never used the stitcher calss, but I think that you may get the estimated homography matrix at each pair of images, if you could obtain it easily, then you can multiply it with the corners of the first original image and so for the corner of the last original one, you will get their stitched coordinate, then get the min of left and right x-coordinates and min of up and bottom y-coordinates of each images. You may get the coordinates of of each stitched image, what you need to do in some cases of cropping.

Resources