How can I set the stride of an Image properly? - image-processing

While converting from double[,] to Bitmap,
Bitmap image = ImageDataConverter.ToBitmap(new double[,]
{
{ .11, .11, .11, },
{ .11, .11, .11, },
{ .11, .11, .11, },
});
the routine gives
data.Stride == 4
Where does this value come from?
Since the double[,] is 3x3, stride should be 5. Right?
How can I fix this not only for this one, but also for any dimension?
Relevant Source Code
public class ImageDataConverter
{
public static Bitmap ToBitmap(double[,] input)
{
int width = input.GetLength(0);
int height = input.GetLength(1);
Bitmap output = Grayscale.CreateGrayscaleImage(width, height);
BitmapData data = output.LockBits(new Rectangle(0, 0, width, height),
ImageLockMode.WriteOnly,
output.PixelFormat);
int pixelSize = System.Drawing.Image.GetPixelFormatSize(output.PixelFormat) / 8;
int offset = data.Stride - width * pixelSize;
double Min = 0.0;
double Max = 255.0;
unsafe
{
byte* address = (byte*)data.Scan0.ToPointer();
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
double v = 255 * (input[x, y] - Min) / (Max - Min);
byte value = unchecked((byte)v);
for (int c = 0; c < pixelSize; c++, address++)
{
*address = value;
}
}
address += offset;
}
}
output.UnlockBits(data);
return output;
}
}

Don't know how you arrived at 5.
The stride is the width of a single row of pixels (a scan line), rounded up to a four-byte boundary.
Link
Since it's 3x3, 3 rounded up to four-byte boundary is 4.

Related

Image printing with Epson compatible Thermal printer problem

I am using C# and write code for print contents for the Thermal ticket printer.
There are codes that people use for image print, and it indeed prints images, but something goes wrong. This is my code for image print class, it is widely using open source (I googled and found it, and people successfully implement this code to theirs without problem).
public static class ImagePrint
{
/// <summary>
/// Image convert to Byte Array
/// </summary>
/// <param name="LogoPath">Image Path</param>
/// <param name="printWidth">Image print Horizontal Length</param>
/// <returns></returns>
public static byte[] GetLogo(string LogoPath, int printWidth)
{
List<byte> byteList = new List<byte>();
if (!File.Exists(LogoPath))
return null;
BitmapData data = GetBitmapData(LogoPath, printWidth);
BitArray dots = data.Dots;
byte[] width = BitConverter.GetBytes(data.Width);
int offset = 0;
// Initialize Printer
byteList.Add(Convert.ToByte(Convert.ToChar(0x1B)));
byteList.Add(Convert.ToByte('#'));
// Line Spacing Adjust (24/180 inch)
byteList.Add(Convert.ToByte(Convert.ToChar(0x1B)));
byteList.Add(Convert.ToByte('3'));
byteList.Add((byte)24);
while (offset < data.Height)
{
byteList.Add(Convert.ToByte(Convert.ToChar(0x1B)));
byteList.Add(Convert.ToByte('*'));
byteList.Add((byte)33);
byteList.Add(width[0]);
byteList.Add(width[1]);
for (int x = 0; x < data.Width; ++x)
{
for (int k = 0; k < 3; ++k)
{
byte slice = 0;
for (int b = 0; b < 8; ++b)
{
int y = (((offset / 8) + k) * 8) + b;
int i = (y * data.Width) + x;
bool v = false;
if (i < dots.Length)
v = dots[i];
slice |= (byte)((v ? 1 : 0) << (7 - b));
}
byteList.Add(slice);
}
}
offset += 24;
byteList.Add(Convert.ToByte(0x0A));
}
// Return to normal line spacing (30/160 inch)
byteList.Add(Convert.ToByte(0x1B));
byteList.Add(Convert.ToByte('3'));
byteList.Add((byte)30);
return byteList.ToArray();
}
private static BitmapData GetBitmapData(string bmpFileName, int width)
{
using (var bitmap = (Bitmap)Bitmap.FromFile(bmpFileName))
{
var threshold = 127;
var index = 0;
double multiplier = width; // 이미지 width조정
double scale = (double)(multiplier / (double)bitmap.Width);
int xheight = (int)(bitmap.Height * scale);
int xwidth = (int)(bitmap.Width * scale);
var dimensions = xwidth * xheight;
var dots = new BitArray(dimensions);
for (var y = 0; y < xheight; y++)
{
for (var x = 0; x < xwidth; x++)
{
var _x = (int)(x / scale);
var _y = (int)(y / scale);
var color = bitmap.GetPixel(_x, _y);
var luminance = (int)(color.R * 0.3 + color.G * 0.59 + color.B * 0.11);
dots[index] = (luminance < threshold);
index++;
}
}
return new BitmapData()
{
Dots = dots,
Height = (int)(bitmap.Height * scale),
Width = (int)(bitmap.Width * scale)
};
}
}
private class BitmapData
{
public BitArray Dots
{
get;
set;
}
public int Height
{
get;
set;
}
public int Width
{
get;
set;
}
}
}
And I use this code like this on my code for image print:
string Image_File_Path = #"D:\TEST\TESTImage.bmp";
int Image_Size_I_Want = 100;
byte[] img = ImagePrint.GetLogo(Image_File_Path, Image_Size_I_Want);
port.Write(img, 0, img.Length);
You can see the result in the attached picture.
There are white space lines on the image.
This class automatically adds a line spacing command, but it seems does not work.
Please suggest any solution.
Using 'mike42/escpos-php' package in laravel
use Mike42\Escpos\Printer;
use Mike42\Escpos\EscposImage;
$tux = EscposImage::load(public_path()."\assets\img\path-to-file.jpg");
$printer->setJustification(Printer::JUSTIFY_CENTER);
$printer->bitImage($tux, 0);
$printer -> setJustification();

My Code Doesn't Work to Equalize Histogram

I tried to code histogram equalization operation in order to enhance contrast of the images, but my code didn't work. When I displayed image's original histogram and histogram after processed by my code I saw that output histogram only have a value at 0 and there aren't values for other pixel intensities. I don't know why. Here is my code:
private: System::Void histogramEqualizationToolStripMenuItem_Click(System::Object^ sender, System::EventArgs^ e) {
Raw_Intensity = ConvertBMPToIntensity(Buffer, Width, Height);
int histogram[256] = { 0 };
int equalizedHistogram[256] = { 0 };
int runningSum = 0;
int numberOfPixels = Width * Height;
for (int row = 0; row < Height; row++)
{
for (int column = 0; column < Width; column++)
{
histogram[Raw_Intensity[row * Width + column]]++;
}
}
for (int i = 0; i < 256; i++)
{
runningSum += histogram[i];
int index = round(((runningSum / numberOfPixels) * 255));
equalizedHistogram[index] += histogram[i];
}
}
I think the casting issue is occuring.
change this line :
int index = round(((runningSum / numberOfPixels) * 255));
to
int index = round(((runningSum*1.0 / numberOfPixels) * 255));

How do I convert ByteArray from ImageMetaData() to Bitmap?

I have this code:
Frame frame = mSession.update();
Camera camera = frame.getCamera();
...
bytes=frame.getImageMetadata().getByteArray(0);
System.out.println("Byte Array "+frame.getImageMetadata().getByteArray(0));
Bitmap bmp = BitmapFactory.decodeByteArray(bytes,0,bytes.length);
System.out.println(bmp);
When I print Bitmap, I get a null object. I'm trying to get the image from the camera, that's the reason I'm trying to convert byteArray to Bitmap. If there's an alternative way, it would also be helpful.
Thank You.
The ImageMetaData describes the background image, but does not actually contain the image itself.
If you want to capture the background image as a Bitmap, you should look at the computervision sample which uses a FrameBufferObject to copy the image to a byte array.
I've tried something similar. It works. But I don't recommend anyone to try this way. It takes time because of nested loops.
CameraImageBuffer inputImage;
final Bitmap bmp = Bitmap.createBitmap(inputImage.width, inputImage.height, Bitmap.Config.ARGB_8888);
int width = inputImage.width;
int height = inputImage.height;
int frameSize = width*height;
// Write Bytebuffer to byte[]
byte[] imageBuffer= new byte[inputImage.buffer.remaining()];
inputImage.buffer.get(imageBuffer);
int[] rgba = new int[frameSize];
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++) {
int r =imageBuffer[(i * width + j)*4 + 0];
int g =imageBuffer[(i * width + j)*4 + 1];
int b =imageBuffer[(i * width + j)*4 + 2];
rgba[i * width + j] = 0xff000000 + (b << 16) + (g << 8) + r;
}
}
bmp.setPixels(rgba, 0, width , 0, 0, width, height);
Bytebuffer is converted to rgba buffer, and is written to Bitmap. CameraImageBuffer is the class provided in computervision sample app.
You may not able to get bitmap using image metadata. Use below approach.Use onDrawFrame override method of surface view render.
#Override public void onDrawFrame(GL10 gl) {
int w = 1080;
int h = 1080;
int b[] = new int[w * (0 + h)];
int bt[] = new int[w * h];
IntBuffer ib = IntBuffer.wrap(b);
ib.position(0);
GLES20.glReadPixels(0, 0, w, h, GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, ib);
for (int i = 0, k = 0; i < h; i++, k++) {
for (int j = 0; j < w; j++) {
int pix = b[i * w + j];
int pb = (pix >> 16) & 0xff;
int pr = (pix << 16) & 0x00ff0000;
int pix1 = (pix & 0xff00ff00) | pr | pb;
bt[(h - k - 1) * w + j] = pix1;
}
}
Bitmap mBitmap = Bitmap.createBitmap(bt, w, h, Bitmap.Config.ARGB_8888);
runOnUiThread(new Runnable() {
#Override public void run() {
image_test.setImageBitmap(resizedBitmap);
}
});
}

How to swap bit U with bit V in YUV format

I want to swap the U and V bit in YUV format, from NV12
YYYYYYYY UVUV // each letter presents a bit
to NV21
YYYYYYYY VUVU
I leave the Y planar alone, and handle the U and V planar by the function below
uchar swap(uchar in) {
uchar out = ((in >> 1) & 0x55) | ((in << 1) & 0xaa);
return out;
}
But I cannot get the desired result, the colour of the output image still not correct.
How can I swap U and V planar correctly?
Found the problem. UV should be manipulated in byte format, not bit.
byte[] yuv = // ...
final int length = yuv.length;
for (int i1 = 0; i1 < length; i1 += 2) {
if (i1 >= width * height) {
byte tmp = yuv[i1];
yuv[i1] = yuv[i1+1];
yuv[i1+1] = tmp;
}
}
try this method (-_-)
IFrameCallback iFrameCallback = new IFrameCallback() {
#Override
public void onFrame(ByteBuffer frame) {
//get nv12 data
byte[] b = new byte[frame.remaining()];
frame.get(b);
//nv12 data to nv21
NV12ToNV21(b, 1280, 720);
//send NV21 data
BVPU.InputVideoData(nv21, nv21.length,
System.currentTimeMillis() * 1000, 1280, 720);
}
};
byte[] nv21;
private void NV12ToNV21(byte[] data, int width, int height) {
nv21 = new byte[data.length];
int framesize = width * height;
int i = 0, j = 0;
System.arraycopy(data, 0, nv21, 0, framesize);
for (i = 0; i < framesize; i++) {
nv21[i] = data[i];
}
for (j = 0; j < framesize / 2; j += 2) {
nv21[framesize + j - 1] = data[j + framesize];
}
for (j = 0; j < framesize / 2; j += 2) {
nv21[framesize + j] = data[j + framesize - 1];
}
}

Improve javaFx processing performance

I'm working on Image processing with javaFx. I think that my code is not enouth efficient (With HD images, refresh is very slow). Because I do a for on each pixel of my image everytime I have to refresh it. But I don't know how to do differently.
So I need help to improve the performance of my processing.
This is my code :
import javafx.application.Application;
import javafx.beans.InvalidationListener;
import javafx.beans.Observable;
import javafx.beans.property.DoubleProperty;
import javafx.scene.Scene;
import javafx.scene.control.ScrollPane;
import javafx.scene.control.Slider;
import javafx.scene.image.Image;
import javafx.scene.image.ImageView;
import javafx.scene.image.PixelReader;
import javafx.scene.image.PixelWriter;
import javafx.scene.image.WritableImage;
import javafx.scene.layout.AnchorPane;
import javafx.scene.paint.Color;
import javafx.stage.Stage;
public class Example extends Application {
private Image src;
private WritableImage dest;
private int width;
private int height;
int value = 0;
#Override
public void start(Stage stage) {
AnchorPane root = new AnchorPane();
initImage(root);
Scene scene = new Scene(root);
stage.setTitle("Demo processing");
stage.setResizable(false);
stage.setScene(scene);
stage.show();
}
private void initImage(AnchorPane root) {
src = new Image(
"http://mikecann.co.uk/wp-content/uploads/2009/12/ScreenHunter_02-Dec.-10-19.41-1024x484.jpg");
width = (int) src.getWidth();
height = (int) src.getHeight();
root.setPrefSize(800, 800 + 50);
ScrollPane scrollPane = new ScrollPane();
scrollPane.setPrefHeight(600);
scrollPane.setPrefWidth(1000);
dest = new WritableImage(width, height);
ImageView destView = new ImageView(dest);
scrollPane.setContent(destView);
root.getChildren().add(scrollPane);
AnchorPane.setTopAnchor(scrollPane, 0.0);
Slider slider = new Slider(0, 255, 1);
slider.setPrefSize(800, 50);
slider.setShowTickLabels(true);
slider.setShowTickMarks(true);
slider.setSnapToTicks(true);
slider.setMajorTickUnit(1.0);
slider.setMinorTickCount(0);
slider.setLayoutY(700);
slider.valueProperty().addListener(new InvalidationListener() {
#Override
public void invalidated(Observable o) {
value = (int) ((DoubleProperty) o).get();
color();
}
});
root.getChildren().add(slider);
color();
}
private void color() {
PixelReader reader = src.getPixelReader();
PixelWriter writer = dest.getPixelWriter();
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
Color color = reader.getColor(x, y);
double red = (double) value * x * y / (width * height) / 255;
double green = color.getGreen();
double blue = (double) value * ((width * height) - x * y)
/ (width * height) / 255;
writer.setColor(x, y, Color.color(red, green, blue));
}
}
}
public static void main(String[] args) {
launch(args);
}
}
And this is with a full HD image :
src = new Image(
"http://www.freedomwallpaper.com//nature-wallpaper-hd/hd_sunshine_hd.jpg");
Getitng color of each pixel in loop is too slow. So, get entire pixels first, and change colors, finally wirte changed colors with PixelWriter.
Like this
private void color() {
PixelReader reader = src.getPixelReader();
WritablePixelFormat<IntBuffer> format = WritablePixelFormat.getIntArgbInstance();
int[] pixels = new int[width * height]; // Buffer for all pixels
reader.getPixels(0, 0, width, height, format, pixels, 0, width); // get all pixels by argb format
int alpha = 0xFF << 24;
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
int index = x + y * width;
int argb = pixels[index];
int red = value * x * y / (width * height);
int green = (argb >> 8) & 0xFF;
int blue = value * ((width * height) - x * y)
/ (width * height);
int newArgb = alpha | (red << 16) | (green << 8) | blue;
pixels[index] = newArgb;
}
}
PixelWriter writer = dest.getPixelWriter();
writer.setPixels(0, 0, width, height, format, pixels, 0, width); // write entire image
}

Resources