SDL code should greyscale but just loads the image normally - image-processing

The code below should load an image, then grey scale the image in a window. Instead it just loads the image. I've used printf("hello") in the loop starting with "for (int y = 0; y < image->h; y++)" however the console doesn't show "hello", unless I removed SDL_Delay(20000) which makes the console print it, but the image flashes for a second and i cant tell if that's in greyscale of the same image.
#include <SDL2/SDL.h>
#include <SDL2/SDL_image.h>
#include <stdio.h>
#include "SDL2/SDL_ttf.h"
SDL_Window *window = NULL;
SDL_Surface *windowSurface = NULL;
SDL_Surface *image = NULL;
SDL_Event *event = NULL;
SDL_Texture *texture = NULL;
int main(int argc, char *argv[])
{
if(SDL_Init(SDL_INIT_VIDEO) < 0)
{
perror("Cannot initialise SDL");
SDL_Quit();
return 1;
}
else
{
window = SDL_CreateWindow("Loading_image", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 640, 480, SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE);
if(window == NULL)
perror("Cannot load image");
else
{
windowSurface = SDL_GetWindowSurface(window);
image = IMG_Load("image.bmp");
if(image == NULL)
perror("Cannot load image");
else
{
SDL_BlitSurface(image, NULL, windowSurface, NULL);
}
SDL_UpdateWindowSurface(window);
SDL_Delay(20000);
}
}
SDL_UpdateTexture(texture, NULL, image->pixels, image->w * sizeof(Uint32));
image = SDL_ConvertSurfaceFormat(image,SDL_PIXELFORMAT_ARGB8888,0);
Uint32 * pixels = (Uint32 *)image->pixels;
int x = 0;
int y = 0;
for (int y = 0; y < image->h; y++)
{
for (int x = 0; x < image->w; x++)
{
Uint32 pixel = pixels[y * image->w + x];
Uint8 r=0,g=0,b=0;
SDL_GetRGB(pixel, image->format, &r,&g,&b);
Uint8 v = 0.212671f * r + 0.715160f * g + 0.072169f * b;
SDL_MapRGB(image->format,v,v,v);
}
}
int quit = 0;
while (!quit) //This loop will loop until the conditions are met e.g. You quit the renderer//
{
SDL_WaitEvent(event);// waits for the event (quitting the renderer)//
switch (event->type)
{
case SDL_QUIT:
quit = 1;
break;
}
}
SDL_FreeSurface(image);
image = NULL;
window = NULL;
windowSurface = NULL;
SDL_DestroyWindow(window);
IMG_Quit();
SDL_Quit();
return 0;
}

There are several issues with your code. Mostly SDL specifics, but also some issues with the grayscale conversion.
I removed any unnecessary stuff I could spot and annotated some changes by comments.
#include <SDL.h>
#include <SDL_image.h>
#include <stdio.h>
SDL_Window *window = NULL;
SDL_Surface *windowSurface = NULL;
SDL_Surface *image = NULL;
SDL_Event event; // You may want to use an object instead of a pointer
SDL_Texture *texture = NULL;
int main(int argc, char *argv[])
{
if (SDL_Init(SDL_INIT_VIDEO) < 0)
{
perror("Cannot initialise SDL");
SDL_Quit();
return 1;
}
else
{
window = SDL_CreateWindow("Loading_image", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 640, 480, SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE);
if (window == NULL)
perror("Cannot load image"); // You may want to change this error message
else
{
windowSurface = SDL_GetWindowSurface(window);
image = IMG_Load("image.bmp");
if (image == NULL)
perror("Cannot load image");
// I removed the blitting code here, you basically don't need it here
// Rather do it in the render loop below
}
}
image = SDL_ConvertSurfaceFormat(image, SDL_PIXELFORMAT_ARGB8888, 0);
Uint32 * pixels = (Uint32 *)image->pixels;
int x = 0;
int y = 0;
for (int y = 0; y < image->h; y++)
{
for (int x = 0; x < image->w; x++)
{
Uint32 pixel = pixels[y * image->w + x];
Uint8 r = 0, g = 0, b = 0;
SDL_GetRGB(pixel, image->format, &r, &g, &b);
Uint8 v = 0.212671f * r + 0.715160f * g + 0.072169f * b;
pixel = SDL_MapRGB(image->format, v, v, v); // Get the return value which is the pixel value
pixels[y * image->w + x] = pixel; // ...and assign it back to the pixels
}
}
int quit = 0;
while (!quit)
{
while (SDL_PollEvent(&event)) // Continous checking for events
{
switch (event.type)
{
case SDL_QUIT:
quit = 1;
break;
}
}
// "Render loop"
SDL_BlitSurface(image, NULL, windowSurface, NULL);
SDL_UpdateWindowSurface(window);
}
SDL_FreeSurface(image);
image = NULL;
window = NULL;
windowSurface = NULL;
SDL_DestroyWindow(window);
IMG_Quit();
SDL_Quit();
return 0;
}

Related

How to calculate perimeter of a binary image using OpenCV 4.2 in C++

I want to calculate perimeter of a white blob in a 512*512 dimension binary image. Image will have only one blob. I used following code earlier in OpenCV 3 but somehow it doesn't work in OpenCV 4.2. IplImage
is deprecated in latest version. And I cannot pass Mat object directly to cvFindContours function. I am new to opencv and I don't know how does it work. Other related questions regarding perimeter are still unanswered.
To summaries, following works in opencv 3 but does not work in current opencv version (4.2).
int getPerimeter(unsigned char* inImagePtr, int inW, int inH)
{
int sumEven = 0; int sumOdd = 0;
int sumCorner = 0; int prevCode = 0;
//create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
//create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1; element.data[3] = 1;
element.data[4] = 1; element.data[5] = 1;
element.data[7] = 1;
//erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
//Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
//multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
//Get chain code of the blob
CvChain* chain = 0;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
auto temp = new IplImage(edge);
cvFindContours(temp, storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_EXTERNAL, CV_CHAIN_CODE);
delete temp;
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
for (i = 0; i < total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
if (code % 2 == 0)
sumEven++;
else
sumOdd++;
if (i > 0) {
if (code != prevCode)
sumCorner++;
}
prevCode = code;
}
}
float perimeter = (float)sumEven*0.980 + (float)sumOdd*1.406 - (float)sumCorner*0.091;
return (roundf(perimeter));
}
This worked just fine for me!
int getPerimeter(unsigned char* inImagePtr, int inW, int inH) {
// create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
// create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1;
element.data[3] = 1;
element.data[4] = 1;
element.data[5] = 1;
element.data[7] = 1;
// erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
// Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
// multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
vector<vector<Point>> contours;
findContours(edge, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); // Retrieve only external contour
int preValue[2];
int nextValue[2];
int sumEven = 0;
int sumOdd = 0;
//vector<Point>::iterator itr;
for (int ii = 0; ii < contours[0].size(); ii++) {
Point pt = contours[0].at(ii);
preValue[0] = pt.x;
preValue[1] = pt.y;
if (ii != contours[0].size() - 1) {
Point pt_next = contours[0].at(ii + 1);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
} else {
Point pt_next = contours[0].at(0);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
}
if ((preValue[0] == nextValue[0]) or (preValue[1] == nextValue[1])) {
sumEven = sumEven + abs(nextValue[0] - preValue[0]) + abs(nextValue[1] - preValue[1]);
} else {
sumOdd = sumOdd + abs(nextValue[0] - preValue[0]);
}
}
int sumCorner = contours[0].size() - 1;
float perimeter = round(sumEven * 0.980 + sumOdd * 1.406 - sumCorner * 0.091);
return (roundf(perimeter));
}

error in runing source face detection in opencv 2.4.9 and vs 2012

i use pre-build opencv 2.4.9 i test the image show in opencv 2.4.9 it works,but for this source its have error?! if this errors is that i use pre-build opencv?what to do what not to do
i copy the xml file in current folder and my hardware corei5,Radeon ATI graphic
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <opencv/ml.h>
void doMosaic(IplImage* in, int x, int y,
int width, int height, int size);
int main (int argc, char **argv)
{
int i, c;
IplImage *src_img = 0, *src_gray = 0;
const char *cascade_name = "haarcascade_frontalface_alt.xml";
CvHaarClassifierCascade *cascade = 0;
CvMemStorage *storage = 0;
CvSeq *faces;
cascade = (CvHaarClassifierCascade *) cvLoad (cascade_name, 0, 0, 0);
cvNamedWindow ("Capture", CV_WINDOW_AUTOSIZE);
CvCapture *capture = cvCreateCameraCapture(0);
assert(capture != NULL);
while (1) {
src_img = cvQueryFrame (capture);
src_gray = cvCreateImage (cvGetSize(src_img), IPL_DEPTH_8U, 1);
storage = cvCreateMemStorage (0);
cvClearMemStorage (storage);
cvCvtColor (src_img, src_gray, CV_BGR2GRAY);
cvEqualizeHist (src_gray, src_gray);
faces = cvHaarDetectObjects (src_gray, cascade, storage,
1.11, 4, 0, cvSize (40, 40));
for (i = 0; i < (faces ? faces->total : 0); i++) {
CvRect *r = (CvRect *) cvGetSeqElem (faces, i);
doMosaic(src_img, r->x, r->y, r->width, r->height, 20);
}
cvShowImage("Capture", src_img);
cvReleaseImage(&src_gray);
c = cvWaitKey (2);
if (c == '\x1b')
break;
}
cvReleaseCapture (&capture);
cvDestroyWindow ("Capture");
return 0;
}
void doMosaic(IplImage* in, int x0, int y0,
int width, int height, int size)
{
int b, g, r, col, row;
int xMin = size*(int)floor((double)x0/size);
int yMin = size*(int)floor((double)y0/size);
int xMax = size*(int)ceil((double)(x0+width)/size);
int yMax = size*(int)ceil((double)(y0+height)/size);
for(int y=yMin; y<yMax; y+=size){
for(int x=xMin; x<xMax; x+=size){
b = g = r = 0;
for(int i=0; i<size; i++){
if( y+i > in->height ){
break;
}
row = i;
for(int j=0; j<size; j++){
if( x+j > in->width ){
break;
}
b += (unsigned char)in->imageData[in->widthStep*(y+i)+(x+j)*3];
g += (unsigned char)in->imageData[in->widthStep*(y+i)+(x+j)*3+1];
r += (unsigned char)in->imageData[in->widthStep*(y+i)+(x+j)*3+2];
col = j;
}
}
row++;
col++;
for(int i=0;i<row;i++){
for(int j=0;j<col;j++){
in->imageData[in->widthStep*(y+i)+(x+j)*3] = cvRound((double)b/(row*col));
in->imageData[in->widthStep*(y+i)+(x+j)*3+1] = cvRound((double)g/(row*col));
in->imageData[in->widthStep*(y+i)+(x+j)*3+2] = cvRound((double)r/(row*col));
}
}
}
}
}
the error is a break in microsoft ,please help me.thanks very much
First-chance exception at 0x75C4B727 in opencv.exe: Microsoft C++ exception: cv::Exception at memory location 0x003CF678.
If there is a handler for this exception, the program may be safely continued.
finally i succeed,i first for PDB errors tools>option>debugging>outputwindow>moduleloadmessage >off and then i tools>option>debugging>symbols>microsoft symbols server not checked and then i chnge capture.open( -1 ); to capture.open( 0 ); now by seting in opencv itworks,thanks very much

opencv r6010 abort() has been called error in visual studio 2013

I have some code to draw a line between two points on an image which are selected by mouse, and then to display a histogram.
However, when I press q as required by code I get an error saying R6010 abort() has been called and saying VC++ run time error.
Please advise me how I can find this error.
#include <vector>
#include "opencv2/highgui/highgui.hpp"
#include <opencv\cv.h>
#include <iostream>
#include<conio.h>
using namespace cv;
using namespace std;
struct Data_point
{
int x;
unsigned short int y;
};
int PlotMeNow(unsigned short int *values, unsigned int nSamples)
{
std::vector<Data_point> graph(nSamples);
for (unsigned int i = 0; i < nSamples; i++)
{
graph[i].x = i;
graph[i].y = values[i];
}
cv::Size imageSize(5000, 500); // your window size
cv::Mat image(imageSize, CV_8UC1);
if (image.empty()) //check whether the image is valid or not
{
std::cout << "Error : Image cannot be created..!!" << std::endl;
system("pause"); //wait for a key press
return 0;
}
else
{
std::cout << "Good job : Image created successfully..!!" << std::endl;
}
// tru to do some ofesseting so the graph do not hide on x or y axis
Data_point dataOffset;
dataOffset.x = 20;
// we have to mirror the y axis!
dataOffset.y = 5000;
for (unsigned int i = 0; i<nSamples; ++i)
{
graph[i].x = (graph[i].x + dataOffset.x) * 3;
graph[i].y = (graph[i].y + dataOffset.y) / 200;
}
// draw the samples
for (unsigned int i = 0; i<nSamples - 1; ++i)
{
cv::Point2f p1;
p1.x = graph[i].x;
p1.y = graph[i].y;
cv::Point2f p2;
p2.x = graph[i + 1].x;
p2.y = graph[i + 1].y;
cv::line(image, p1, p2, 'r', 1, 4, 0);
}
cv::namedWindow("MyWindow1", CV_WINDOW_AUTOSIZE); //create a window with the name "MyWindow"
cv::imshow("MyWindow1", image); //display the image which is stored in the 'img' in the "MyWindow" window
while (true)
{
char c = cv::waitKey(10);
if (c == 'q')
break;
}
destroyWindow("MyWindow1");
destroyWindow("MyWindow"); //destroy the window with the name, "MyWindow"
return 0;
}
void IterateLine(const Mat& image, vector<ushort>& linePixels, Point p2, Point p1, int* count1)
{
LineIterator it(image, p2, p1, 8);
for (int i = 0; i < it.count; i++, it++)
{
linePixels.push_back(image.at<ushort>(it.pos())); //doubt
}
*count1 = it.count;
}
//working line with mouse
void onMouse(int evt, int x, int y, int flags, void* param)
{
if (evt == CV_EVENT_LBUTTONDOWN)
{
std::vector<cv::Point>* ptPtr = (std::vector<cv::Point>*)param;
ptPtr->push_back(cv::Point(x, y));
}
}
void drawline(Mat image, std::vector<Point>& points)
{
cv::namedWindow("Output Window");
cv::setMouseCallback("Output Window", onMouse, (void*)&points);
int X1 = 0, Y1 = 0, X2 = 0, Y2 = 0;
while (1)
{
cv::imshow("Output Window", image);
if (points.size() > 1) //we have 2 points
{
for (auto it = points.begin(); it != points.end(); ++it)
{
}
break;
}
waitKey(10);
}
//just for testing that we are getting pixel values
X1 = points[0].x;
X2 = points[1].x;
Y1 = points[0].y;
Y2 = points[1].y;
// Draw a line
line(image, Point(X1, Y1), Point(X2, Y2), 'r', 2, 8);
cv::imshow("Output Window", image);
//exit image window
while (true)
{
char c = cv::waitKey(10);
if (c == 'q')
break;
}
destroyWindow("Output Window");
}
void show_histogram_image(Mat img1)
{
int sbins = 65536;
int histSize[] = { sbins };
float sranges[] = { 0, 65536 };
const float* ranges[] = { sranges };
cv::MatND hist;
int channels[] = { 0 };
cv::calcHist(&img1, 1, channels, cv::Mat(), // do not use mask
hist, 1, histSize, ranges,
true, // the histogram is uniform
false);
double maxVal = 0;
minMaxLoc(hist, 0, &maxVal, 0, 0);
int xscale = 10;
int yscale = 10;
cv::Mat hist_image;
hist_image = cv::Mat::zeros(65536, sbins*xscale, CV_16UC1);
for int s = 0; s < sbins; s++)
{
float binVal = hist.at<float>(s, 0);
int intensity = cvRound(binVal * 65535 / maxVal);
rectangle(hist_image, cv::Point(s*xscale, hist_image.rows),
cv::Point((s + 1)*xscale - 1, hist_image.rows - intensity),
cv::Scalar::all(65535), 1);
}
imshow("Histogram", hist_image);
waitKey(0);
}
int main()
{
vector<Point> points1;
vector<ushort>linePixels;
Mat img = cvLoadImage("desert.jpg");
if (img.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
//Draw the line
drawline(img, points1);
//now check the collected points
Mat img1 = cvLoadImage("desert.jpg");
if (img1.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
int *t = new int;
IterateLine( img1, linePixels, points1[1], points1[0], t );
PlotMeNow(&linePixels[0], t[0]);
show_histogram_image(img);
delete t;
_getch();
return 0;
}
This is one of the bad smells in your code:
void IterateLine(const Mat& image, vector<ushort>& linePixels, Point p2, Point p1, int* count1)
{
...
linePixels.push_back(image.at<ushort>(it.pos())); //doubt
Now image is a CV_8UC3 image (from Mat img1 = cvLoadImage("desert.jpg");, but you are accessing here like it is CV_16UC1, so what gets put in linePixels is garbage. This will almost certainly cause PlotMeNow() to draw outside its image and corrupt something, which is probably why your code is crashing.
Sine it is very unclear what your code is trying to do, I can't suggest what you should have here instead.
I have just managed to do this, you only have to put "-1" to your loop limit:
for (unsigned int i = 0; i < nSamples-1; i++)
{
graph[i].x = i;
graph[i].y = values[i];
}

D3DX9 Custom mesh overlaps with itself during render

I have custom model file format that I am reading from to create a model in DX. I use
DWORD dwFVF = ( D3DFVF_XYZ | D3DFVF_NORMAL | D3DFVF_TEX1 );
D3DXCreateMeshFVF(numIndices/3, numVertices, D3DXMESH_MANAGED, dwFVF, *d3ddev, mesh);
to create the mesh, then lock, fill, unlock the index buffer, vertex buffer, and attribute buffer in turn.
void createMeshFromSkn(ifstream* fHandle, LPDIRECT3DDEVICE9 * d3ddev, LPD3DXMESH * mesh)
{
// Start reading the file
int magic = readInt(fHandle);
short version = readShort(fHandle);
short numObjects = readShort(fHandle);
SKNMaterial *materialHeaders;
if (version > 0)
{
// Read in the material headers
int numMaterialHeaders = readInt(fHandle);
fHandle->seekg((16 + MATERIAL_NAME_SIZE) * numMaterialHeaders, ios::cur);
// Read in model data.
int numIndices = readInt(fHandle);
int numVertices = readInt(fHandle);
// Create the mesh
DWORD dwFVF = ( D3DFVF_XYZ | D3DFVF_NORMAL | D3DFVF_TEX1 );
D3DXCreateMeshFVF(numIndices/3, numVertices, D3DXMESH_MANAGED, dwFVF, *d3ddev, mesh);
// Read in the index buffer
WORD* indexBuffer = 0;
(*mesh)->LockIndexBuffer(0, (void**)&indexBuffer);
for (int i = 0; i < numIndices; i++)
{
indexBuffer[i] = readShort(fHandle);
}
(*mesh)->UnlockIndexBuffer();
// Read in the vertexBuffer
D3DVERTEX* vertexBuffer;
(*mesh)->LockVertexBuffer( 0, (void**)&vertexBuffer);
for (int i = 0; i < numVertices; ++i)
{
((D3DVERTEX*)vertexBuffer)[i].position.x = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].position.y = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].position.z = readFloat(fHandle);
for (unsigned int j = 0; j < BONE_INDEX_SIZE; ++j)
{
int bone = (int) readByte(fHandle);
//data->vertices[i].boneIndex[j] = bone;
}
//////////////////////////////////////////////////////////////////////////
//
// Need to fix this to work with bones
//
//////////////////////////////////////////////////////////////////////////
D3DXVECTOR4 weight;
weight.x = readFloat(fHandle);
weight.y = readFloat(fHandle);
weight.z = readFloat(fHandle);
weight.w = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].normal.x = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].normal.y = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].normal.z = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].tu = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].tv = readFloat(fHandle);
}
(*mesh)->UnlockVertexBuffer();
DWORD *pAttribBuf;
HRESULT hRslt = (*mesh)->LockAttributeBuffer(0, &pAttribBuf);
if(hRslt != D3D_OK)
return; // Add error handling
unsigned int numFaces = (*mesh)->GetNumFaces();
for(unsigned int i=0; i<numFaces; i++)
pAttribBuf[i]= 0;
hRslt = (*mesh)->UnlockAttributeBuffer();
if(hRslt != D3D_OK)
return; // Add error handling
DWORD *m_pAdjacencyBuffer;
m_pAdjacencyBuffer = new DWORD[3 * (*mesh)->GetNumFaces()];
(*mesh)->GenerateAdjacency(0.0f, m_pAdjacencyBuffer);
(*mesh)->OptimizeInplace(D3DXMESHOPT_ATTRSORT | D3DXMESHOPT_VERTEXCACHE, m_pAdjacencyBuffer, NULL, NULL, NULL);
}
return;
}
My problem is that the model is overlapping with itself:
http://imageshack.us/a/img210/2732/20121018181019896.png
I have CCW backface culling enabled:
d3ddev->SetRenderState(D3DRS_CULLMODE, D3DCULL_CCW);
I also have z-buffer enabled, but I'm pretty sure that's only between two meshes, not between a mesh and itself.
I've spent the last day and a half trying to Google for a solution, but I couldn't find anything. Any help or links to help would be greatly appreciated.
It turns out I hadn't actually turned on Z-buffering, because I needed to turn it on in the d3d presentation parameters:
d3dpp.EnableAutoDepthStencil = TRUE;
d3dpp.AutoDepthStencilFormat = D3DFMT_D16;
Once I did that and added a
d3ddev->Clear(0, NULL, D3DCLEAR_ZBUFFER, D3DCOLOR_XRGB(0, 0, 0), 1.0f, 0);
to the render loop, it renders correctly.
Wow, so glad I figured this out. I hope this helps others in their explorations of DX

SlimDX (DirectX10) - How to change a texel in Texture?

I try to change the texels of a Texture which is already loaded.
My assumption was to use the Texture2D::Map and UnMap functions, but there is no change when I change the data of given DataRectangle.
I need a simple example like, creating a texture of 128x128 with a gradient from black to white from each side.
Thx
ps: A Direct3D 10 C++ example may also help, SlimDX is only a wrapper and has nearly complete the same functions.
This is my D3D10 2D texture loader
bool D3D10Texture::Init( GFXHandler* pHandler, unsigned int usage, unsigned int width, unsigned int height, unsigned int textureType, bool bMipmapped, void* pTextureData )
{
mMipmapped = bMipmapped;
//SetData( pHandler, 0 );
D3D10Handler* pD3DHandler = (D3D10Handler*)pHandler;
ID3D10Device* pDevice = pD3DHandler->GetDevice();
DXGI_SAMPLE_DESC dxgiSampleDesc;
dxgiSampleDesc.Count = 1;
dxgiSampleDesc.Quality = 0;
D3D10_USAGE d3d10Usage;
if ( usage & RU_All_Dynamic ) d3d10Usage = D3D10_USAGE_DYNAMIC;
else d3d10Usage = D3D10_USAGE_DEFAULT;
//unsigned int cpuAccess = D3D10_CPU_ACCESS_WRITE;
//if ( (usage & RU_Buffer_WriteOnly) == 0 ) cpuAccess |= D3D10_CPU_ACCESS_READ;
unsigned int cpuAccess = 0;
if ( !pTextureData )
{
cpuAccess = D3D10_CPU_ACCESS_WRITE;
//if ( (usage & RU_Buffer_WriteOnly) == 0 ) cpuAccess |= D3D10_CPU_ACCESS_READ;
}
unsigned int bindFlags = D3D10_BIND_SHADER_RESOURCE;
if ( usage & RU_Texture_RenderTarget ) bindFlags |= D3D10_BIND_RENDER_TARGET;
unsigned int miscFlags = 0;
if ( usage & RU_Texture_AutoGenMipmap ) miscFlags |= D3D10_RESOURCE_MISC_GENERATE_MIPS;
D3D10_TEXTURE2D_DESC d3d10Texture2DDesc;
d3d10Texture2DDesc.Width = width;
d3d10Texture2DDesc.Height = height;
d3d10Texture2DDesc.MipLevels = GetNumMipMaps( width, height, bMipmapped );
d3d10Texture2DDesc.ArraySize = 1;
d3d10Texture2DDesc.Format = GetD3DFormat( (TextureTypes)textureType );
d3d10Texture2DDesc.SampleDesc = dxgiSampleDesc;
d3d10Texture2DDesc.Usage = d3d10Usage;
d3d10Texture2DDesc.BindFlags = D3D10_BIND_SHADER_RESOURCE;
d3d10Texture2DDesc.CPUAccessFlags = cpuAccess;
d3d10Texture2DDesc.MiscFlags = miscFlags;
//D3D10_SUBRESOURCE_DATA d3d10SubResourceData;
//d3d10SubResourceData.pSysMem = pTextureData;
//d3d10SubResourceData.SysMemPitch = GetPitch( width, (TextureTypes)textureType );
//d3d10SubResourceData.SysMemSlicePitch = 0;
D3D10_SUBRESOURCE_DATA* pSubResourceData = NULL;
if ( pTextureData )
{
pSubResourceData = new D3D10_SUBRESOURCE_DATA[d3d10Texture2DDesc.MipLevels];
char* pTexPos = (char*)pTextureData;
unsigned int pitch = GetPitch( width, (TextureTypes)textureType );
unsigned int count = 0;
unsigned int max = d3d10Texture2DDesc.MipLevels;
while( count < max )
{
pSubResourceData[count].pSysMem = pTexPos;
pSubResourceData[count].SysMemPitch = pitch;
pSubResourceData[count].SysMemSlicePitch = 0;
pTexPos += pitch * height;
pitch >>= 1;
count++;
}
}
if ( FAILED( pDevice->CreateTexture2D( &d3d10Texture2DDesc, pSubResourceData, &mpTexture ) ) )
{
return false;
}
if ( pSubResourceData )
{
delete[] pSubResourceData;
pSubResourceData = NULL;
}
mWidth = width;
mHeight = height;
mFormat = (TextureTypes)textureType;
mpTexture->AddRef();
mpTexture->Release();
D3D10_SHADER_RESOURCE_VIEW_DESC d3d10ShaderResourceViewDesc;
d3d10ShaderResourceViewDesc.Format = d3d10Texture2DDesc.Format;
d3d10ShaderResourceViewDesc.ViewDimension = D3D10_SRV_DIMENSION_TEXTURE2D;
d3d10ShaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
d3d10ShaderResourceViewDesc.Texture2D.MipLevels = GetNumMipMaps( width, height, bMipmapped );
if ( FAILED( pDevice->CreateShaderResourceView( mpTexture, &d3d10ShaderResourceViewDesc, &mpView ) ) )
{
return false;
}
ResourceRecorder::Instance()->AddResource( this );
return true;
}
With that function all you need to do is pass in the whit to black texture. For example to write a 256x256 textue with each horizontal line being one brighter than the previous line the following code will work
int* pTexture = new int[256 * 256];
int count = 0;
while( count < 256 )
{
int count2 = 0;
while( count2 < 256 )
{
pTexture[(count * 256) + count2] = 0xff000000 | (count << 16) | (count << 8) | count;
count2++;
}
count++;
}
Make sure you follow the rules in the "Resource Usage Restrictions" section:
MSDN: D3D10_USAGE
public void NewData(byte[] newData)
{
DataRectangle mappedTex = null;
//assign and lock the resource
mappedTex = pTexture.Map(0, D3D10.MapMode.WriteDiscard, D3D10.MapFlags.None);
// if unable to hold texture
if (!mappedTex.Data.CanWrite)
{
throw new ApplicationException("Cannot Write to the Texture");
}
// write new data to the texture
mappedTex.Data.WriteRange<byte>(newData);
// unlock the resource
pTexture.Unmap(0);
if (samplerflag)
temptex = newData;
}
this overwrites the buffer on every new frame, you may want to use a D3D10.MapMode.readwrite or something if ur only trying to write one texel
you will also need to write to the datarectangle in a specific point using one of the other write functions

Resources