OK.
I can load mesh into my project, but I can't change the mesh's color.
On the screen, the mesh's color is white.
I want to change it to red, but I can't.
Please help.
Here is my code:
g_pApp->m_pd3dDevice->SetRenderState(D3DRS_LIGHTING, FALSE);
g_pApp->m_pd3dDevice->SetTextureStageState(0, D3DTSS_COLORARG1, D3DTA_TEXTURE);
g_pApp->m_pd3dDevice->SetTextureStageState(0, D3DTSS_COLORARG2, D3DTA_DIFFUSE);
g_pApp->m_pd3dDevice->SetTextureStageState(0, D3DTSS_COLOROP, D3DTOP_DISABLE);
g_pApp->m_pd3dDevice->SetRenderState(D3DRS_ALPHABLENDENABLE, FALSE);
g_pApp->m_pd3dDevice->SetTextureStageState(0, D3DTSS_ALPHAOP, D3DTOP_DISABLE);
g_pApp->m_pd3dDevice->SetTextureStageState(1, D3DTSS_COLOROP, D3DTOP_DISABLE);
g_pApp->m_pd3dDevice->SetTextureStageState(2, D3DTSS_COLOROP, D3DTOP_DISABLE);
D3DMATERIAL9 mtrl;
D3DUtil_InitMaterial(mtrl,1,1,1);
for(int j = 0 ; j < m_meshTarget[i].dwNumMaterial ; j++){
if( m_meshTarget[i].pnTextureIndex[j] != -1){
g_pApp->m_pd3dDevice->SetTexture(0, m_meshTarget[i].ppTexture[m_meshTarget[i].pnTextureIndex[j]]);}
else
g_pApp->m_pd3dDevice->SetTexture(0,0);
g_pApp->m_pd3dDevice->SetMaterial(&mtrl);
m_meshTarget[i].pMesh->DrawSubset(j);
}
Not exactly sure what D3DUtil_InitMaterial() does, but you need to set proper values for mtrl light properties.
float red = 1.0f, green = 0.0f, blue = 0.0f;
mtrl.Diffuse.r = 1.0f;
mtrl.Diffuse.g = 1.0f;
mtrl.Diffuse.b = 1.0f;
mtrl.Ambient.r = red;
mtrl.Ambient.g = green;
mtrl.Ambient.b = blue;
mtrl.Specular.r = 0.5f;
mtrl.Specular.g = 0.5f;
mtrl.Specular.b = 0.5f;
mtrl.Power = 8.0f;
g_pApp->m_pd3dDevice->SetMaterial(&mtrl);
The diffuse, specular, and power values are just for example.
Related
How to clear the captcha picture of the interference line, when the interference line and the text of the same color
I try to use the following picture as a demo
demo picture
Use the following code processing
- (UIImage *)cleanLine:(UIImage *)image {
IplImage *src = [self convertToIplImage:image];
IplImage *gray = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage *dst = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage *binary = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
cvCvtColor(src, gray, CV_RGB2GRAY);
cvThreshold(gray, binary, 120, 255, CV_THRESH_OTSU);
findLines(gray, dst);
for (int row = 0; row < binary->height; row++)
for (int col = 0; col < binary->width; col++)
{
if (cvGet2D(dst, row, col).val[0] == 255)
{
int up = 0, down = 0;
int white = 0;
for (int i = row; i >= 0; i--)
{
if (cvGet2D(binary, i, col).val[0] == 0)
{
up++;
white = 0;
}
else white++;
if(white > 2) break;
}
white = 0;
for (int i = row; i < binary->height; i++)
{
if (cvGet2D(binary, i, col).val[0] == 0)
{
down++;
white = 0;
}
else white++;
if (white > 2) break;
}
if (up + down < 8)
{
for (int i = -up; i <= down; i++) cvSet2D(binary, row + i, col, cvScalar(255));
}
}
}
erase(binary);
cvErode(binary, binary, NULL, 1);
cvDilate(binary, binary, NULL, 1);
Mat Img = cvarrToMat(binary);
cvReleaseImage(&src);
cvReleaseImage(&gray);
cvReleaseImage(&dst);
cvReleaseImage(&binary);
return MatToUIImage(Img);
}
Find the line of interference code
void findLines(IplImage *raw, IplImage *dst) {
IplImage *src = cvCloneImage(raw);
IplImage *canny = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
cvCanny(src, canny, 20, 200, 3);
CvMemStorage *stor = cvCreateMemStorage(0);
CvSeq *lines = NULL;
lines = cvHoughLines2(canny, stor, CV_HOUGH_PROBABILISTIC, 1, CV_PI / 180, 80, 200, 30);
cvZero(dst);
CvPoint maxStart, maxEnd;
int maxDistance = 0;
for (int i = 0; i < lines->total; i++) {
CvPoint* line = (CvPoint*)cvGetSeqElem(lines, i);
if (abs(line[0].x - line[1].x) > maxDistance) {
maxDistance = abs(line[0].x - line[1].x);
maxStart = line[0];
maxEnd = line[1];
}
}
cvLine(dst, maxStart, maxEnd, cvScalar(255), 1);
cvReleaseImage(&src);
cvReleaseMemStorage(&stor);
}
Get the result picture result picture
I have a part of the captcha image here
captcha image
I used my code to process the captcha image, but it did not work
I do not know how to modify my code to make it clear the interference line
Anyone can guide me, thank you very much
- (UIImage*) snake:(UIImage *)processingImage :(UIImageView *)contourImage{
CvMat cvMatImage = [processingImage CVGrayscaleMat];
cv::Mat cvMatImage2 = &cvMatImage;
IplImage copy = cvMatImage2;
IplImage* snakeImage = ©
cvThreshold(snakeImage, snakeImage, 170, 255, CV_THRESH_BINARY);
float alpha = 0.1;
float beta = 0.5;
float gamma = 0.4;
CvSize size;
size.width = 5;
size.height = 5;
CvTermCriteria criteria;
criteria.type = CV_TERMCRIT_ITER;
criteria.max_iter = 10000;
criteria.epsilon = 0.1;
CvPoint temp;
std::vector<cv::Point> snakeCurve;
cvSnakeImage(snakeImage, temp, 40, 0.1, 0.5, 0.4, CV_VALUE, size, criteria);
}
I have already put #include as well. When I type the cvsnakeimage, the assistance pop up shown the function. However, when I finished, it shows no function match to call for...balabala....
Thanks in advance!
As I mentioned in title do you know how to flip an ID3D10Texture2D object horizontal/vertical ?
I used this code to take screenshot and save it to a file.
ID3D10Resource *backbufferRes;
renderTargetView->GetResource(&backbufferRes);
ID3D10Texture2D *mRenderedTexture;
// Create our texture
D3D10_TEXTURE2D_DESC texDesc;
texDesc.ArraySize = 1;
texDesc.BindFlags = 0;
texDesc.CPUAccessFlags = 0;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.Width = 640; // must be same as backbuffer
texDesc.Height = 480; // must be same as backbuffer
texDesc.MipLevels = 1;
texDesc.MiscFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D10_USAGE_DEFAULT;
d3d10Device->CreateTexture2D(&texDesc, 0, &mRenderedTexture);
d3d10Device->CopyResource(mRenderedTexture, backbufferRes);
D3DX10FilterTexture(mRenderedTexture, 0, D3DX10_FILTER_MIRROR_U);
D3DX10SaveTextureToFile(mRenderedTexture, D3DX10_IFF_PNG, L"test.png");
D3DX10FilterTexture(mRenderedTexture, 0, D3DX10_FILTER_MIRROR_U); line doesnt mirror my texture. Any suggestions ?
In your shader do 1-u to flip horizontally or 1-v to flip vertically.
Edit: If you aren't actually doing any rendering then there are far better ways to do image manipulation. However if you want to do it manually you will have to use map and flip the data round yourself.
You could do that as follows (The code is not tested so please excuse any compile errors):
D3D10Resource *backbufferRes;
renderTargetView->GetResource(&backbufferRes);
ID3D10Texture2D *mRenderedTexture;
// Create our texture
D3D10_TEXTURE2D_DESC texDesc;
texDesc.ArraySize = 1;
texDesc.BindFlags = 0;
texDesc.CPUAccessFlags = 0;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.Width = 640; // must be same as backbuffer
texDesc.Height = 480; // must be same as backbuffer
texDesc.MipLevels = 1;
texDesc.MiscFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D10_USAGE_DEFAULT;
d3d10Device->CreateTexture2D(&texDesc, 0, &mRenderedTexture);
d3d10Device->CopyResource(mRenderedTexture, backbufferRes);
D3D10_MAPPED_TEXTURE2D d3d10MT = { 0 };
mRenderedTexture->Map( 0, D3D10_MAP_READ_WRITE, 0, &d3d10MT );
unsigned int* pPix = (unsigned int)d3d10MT.pData;
int rows = 0;
int rowsMax = height;
while( rows < rowsMax )
{
unsigned int* pRowStart = pPix + (rows * width);
unsigned int* pRowEnd = pRowStart + width;
std::reverse( pRowStart, pRowEnd );
rows++;
}
mRenderedTexture->Unmap();
D3DX10SaveTextureToFile(mRenderedTexture, D3DX10_IFF_PNG, L"test.png");
From the doc btw:
D3DX10_FILTER_MIRROR_U Pixels off the edge of the texture on the
u-axis should be mirrored, not wrapped.
So that only counts for the pixels round the edge when you are filtering the image.
How can we draw a background pattern (to be set to UIImageView) programmatically like the following?
It has alternating colored squares like 20 x 20 pixels. I can do it with REAL Stupid and MS Visual Basic. I have never done it with iOS. If I run a search, one clue that I get is colorWithPatternImage. I used the following REAL Stupid code a few years ago. It works regardless of the dimensions of the canvas (equivalent to UIImageView).
Dim i,j As Integer
For j=0 To Ceil(CanvasX.Height/20)
For i=0 To Ceil(CanvasX.Width/20)
If i Mod 2=0 And j Mod 2=0 Then
If CField1.text="1" Then
g.ForeColor=&cCC9900
Elseif CField1.text="2" Then
g.ForeColor=&c000000
Else
g.ForeColor=&cCCCCCC
End if
Elseif i Mod 2>0 And j Mod 2>0 Then
If CField1.text="1" Then
g.ForeColor=&cCC9900
Else
g.ForeColor=&cCCCCCC
End if
Else
If CField1.text="1" Then
g.ForeColor=&cE6E6E6
Else
g.ForeColor=&cFFFFFF
End if
End if
g.FillRect i*20,j*20,20,20
Next i
Next j
Thank you for your help.
Approach #1: take this image:
Then set a background color by specifying a pattern image:
UIImage *bgImage = [UIImage imageNamed:#"squares"];
UIColor *bgColor = [UIColor colorWithPatternImage:bgImage];
someView.backgroundColor = bgColor;
Approach #2: use Quartz. Subclass UIView, then implement the following method:
- (void)drawRect:(CGRect)rect
{
[super drawRect:rect];
CGContextRef ctx = UIGraphicsGetCurrentContext();
NSLog(#"%#", ctx);
CGFloat ws = self.frame.size.width;
CGFloat hs = self.frame.size.height;
const int side = 10;
int nx = ws / side;
int ny = hs / side;
CGRect rects[nx / 2];
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j += 2) {
rects[j / 2] = CGRectMake(j * side, i * side, side, side);
}
const static CGFloat w[4] = { 1.0, 1.0, 1.0, 1.0 };
const static CGFloat g[4] = { .75, .75, .75, .75 };
if (i % 2) {
CGContextSetFillColor(ctx, g);
} else {
CGContextSetFillColor(ctx, w);
}
CGContextFillRects(ctx, rects, nx / 2);
for (int j = 1; j < nx; j += 2) {
rects[j / 2] = CGRectMake(j * side, i * side, side, side);
}
if (i % 2) {
CGContextSetFillColor(ctx, w);
} else {
CGContextSetFillColor(ctx, g);
}
CGContextFillRects(ctx, rects, nx / 2);
}
}
I am in the process of implementing Lucas-Kanade algorithm using OpenCv. Even though my intention is to track facial features, as a first cut i am getting all the good features using cvGoodFeatures api and using this points as the input, i am trying to track the points using Lucas-Kanade algorithm.
Now the scenario is if i start moving the objects captured by camera near to the edges of the frame(not out of the frame) what i observe is that LK algorithm starts giving me points that are either outside the frame or having negative values.
Please let me know whether i am doing a wrong implementation or is this behavior expected from LK tracking method. Also i am attaching my code at the end of this post for reference.
Regards,
Sujil C
IplImage *image = 0,
*grey = 0,
*prev_grey = 0,
*pyramid = 0,
*prev_pyramid = 0,
*swap_temp = 0,
*velx = 0,
*vely = 0;
const int MAX_COUNT = 20;
CvPoint2D32f* points[2] = {0,0}, *swap_points;
char* status = 0;
int lkcount = 0;
int detectGoodFeature = 0;
int flags = 0;
CvPoint pt;
CvSize currentFrameSize;
CvPoint2D32f* processLKFrame(IplImage* frame, int &pointCount)
{
int win_size = 15;
int level = 5;
int i, k;
// If frame size has changed, release all resources (they will be reallocated further on)
if ( (grey && ((currentFrameSize.width != cvGetSize(frame).width) || (currentFrameSize.height != cvGetSize(frame).height))))
{
// Release images
cvReleaseImage(&grey);
cvReleaseImage(&prev_grey);
cvReleaseImage(&pyramid);
cvReleaseImage(&prev_pyramid);
cvReleaseImage(&velx);
cvReleaseImage(&vely);
// Release buffers
cvFree(&(points[0]));
cvFree(&(points[1]));
cvFree(&status);
// Zerofiy grey so initialization will occur
grey = NULL;
}
// Initialize
if( !grey )
{
/* allocate all the buffers */
currentFrameSize = cvGetSize(frame);
grey = cvCreateImage( currentFrameSize, 8, 1 );
prev_grey = cvCreateImage( currentFrameSize, 8, 1 );
pyramid = cvCreateImage( currentFrameSize, 8, 1 );
prev_pyramid = cvCreateImage( currentFrameSize, 8, 1 );
velx = cvCreateImage(currentFrameSize, 32, 1);
vely = cvCreateImage(currentFrameSize, 32, 1);
points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
status = (char*)cvAlloc(MAX_COUNT);
flags = 0;
}
printf("Current Frame Size : Width:%d Height:%d\n", currentFrameSize.width, currentFrameSize.height );
cvCopy( frame, grey, 0 );
if (detectGoodFeature) {
/* automatic initialization */
IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
double quality = 0.01;
double min_distance = 10;
lkcount = MAX_COUNT;
cvGoodFeaturesToTrack( grey, eig, temp, points[1], &lkcount,
quality, min_distance, 0, 3, 0, 0.04 );
cvFindCornerSubPix( grey, points[1], lkcount,
cvSize(win_size,win_size), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
cvReleaseImage( &eig );
cvReleaseImage( &temp );
}
else if( lkcount > 0 )
{
//For debugging points
printf("==============================================================================================================\n");
printf("Input Points:");
for (int i = 0; i < lkcount; i++) {
printf("(%f, %f)", points[0][i].x, points[0][i].y);
}
printf("\n");
// Calc movement of tracked points
cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
points[0], points[1], lkcount, cvSize(win_size,win_size), level, status, 0,
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), 0 );
//For debugging points
printf("Tracked Points:");
for (int i = 0; i < lkcount; i++) {
printf("(%f, %f),", points[1][i].x, points[1][i].y);
}
printf("\n");
printf("==============================================================================================================\n");
}
CV_SWAP( prev_grey, grey, swap_temp );
CV_SWAP( prev_pyramid, pyramid, swap_temp );
CV_SWAP( points[0], points[1], swap_points );
detectGoodFeature = 0;
pointCount = lkcount;
return points[0];
}