H265 seq parameter set data Syntax table required - parsing

I have been searching for seq parameter set syntax table for H265 for long time ,but cant able to find. I have H264 syntax table which i used for parsing h264 file and it works fine .I am trying to do the same with H265 video file to get the resolution. i have parser which i derived my version from-> for reference https://github.com/virinext/hevcesbrowser
Note:With Hevcbrowser source i can able to see and understand the value and type but i not getting the same So syntax table will be helpful
Need to know the type like Ue U or Se for H265 and its value
. Please refer to this image for details
Table of H264 Sps parameter syntax
int32_t CtbLog2SizeY = m_spsMap[spsId] -> log2_min_luma_coding_block_size_minus3 + 3 + m_spsMap[spsId] -> log2_diff_max_min_luma_coding_block_size;
uint32_t CtbSizeY = 1 << CtbLog2SizeY;
uint32_t PicWidthInCtbsY = m_spsMap[spsId] -> pic_width_in_luma_samples / CtbSizeY;
i am expecting 640 at PicWidthInCtbsY
int h265_parser_sps(unsigned char * buffer, unsigned int bufferlen, int *width,int * height)
{
unsigned int StartBit=0;
de_emulation_prevention(buffer,&bufferlen);
uint32_t sps_video_parameter_set_id = 0;
uint32_t sps_max_sub_layers_minus1 = 0;
bool sps_temporal_id_nesting_flag;
uint32_t sps_seq_parameter_set_id = 0;
uint32_t chroma_format_idc;
bool separate_colour_plane_flag = false;
uint32_t pic_width_in_luma_samples=0;
uint32_t pic_height_in_luma_samples=0;
bool conformance_window_flag;
uint32_t conf_win_left_offset;
uint32_t conf_win_right_offset;
uint32_t conf_win_top_offset;
uint32_t conf_win_bottom_offset;
uint32_t bit_depth_luma_minus8;
uint32_t bit_depth_chroma_minus8;
uint32_t log2_max_pic_order_cnt_lsb_minus4;
bool sps_sub_layer_ordering_info_present_flag;
bool rbsp_stop_one_bit;
u(16,buffer,StartBit);//nal_unit_header
sps_video_parameter_set_id = u(4,buffer,StartBit);
sps_max_sub_layers_minus1 = u(3,buffer,StartBit);
sps_temporal_id_nesting_flag = u(1,buffer,StartBit);
cout<<"sps_video_parameter_set_id:"<<sps_video_parameter_set_id<<endl;
cout<<"sps_max_sub_layers_minus1:"<<sps_max_sub_layers_minus1<<endl;
cout<<"sps_temporal_id_nesting_flag:"<<sps_temporal_id_nesting_flag<<endl;
h265_parse_ptl(sps_max_sub_layers_minus1,buffer,StartBit,bufferlen);
sps_seq_parameter_set_id = Ue(buffer,bufferlen,StartBit);
cout<<"sps_seq_parameter_set_id:"<<sps_seq_parameter_set_id<<endl;
//p_sps = &sps[sps_seq_parameter_set_id];
chroma_format_idc = Ue(buffer,bufferlen,StartBit);
cout<<"chroma_format_idc:"<<chroma_format_idc<<endl;
if (3 == chroma_format_idc)
{
separate_colour_plane_flag = u(1,buffer,StartBit);
}
else
separate_colour_plane_flag = 0;
cout<<"separate_colour_plane_flag:"<<separate_colour_plane_flag<<endl;
pic_width_in_luma_samples = Ue(buffer,bufferlen,StartBit);
pic_height_in_luma_samples = Ue(buffer,bufferlen,StartBit);
cout<<"pic_width_in_luma_samples:"<<pic_width_in_luma_samples<<endl;
cout<<"pic_height_in_luma_samples:"<<pic_height_in_luma_samples<<endl;
*width=pic_width_in_luma_samples;
*height=pic_height_in_luma_samples;
conformance_window_flag = u(1,buffer,StartBit);
cout<<"conformance_window_flag:"<<conformance_window_flag<<endl;
if (conformance_window_flag)
{
conf_win_left_offset = Ue(buffer,bufferlen,StartBit);
conf_win_right_offset = Ue(buffer,bufferlen,StartBit);
conf_win_top_offset = Ue(buffer,bufferlen,StartBit);
conf_win_bottom_offset = Ue(buffer,bufferlen,StartBit);
}
bit_depth_luma_minus8 = Ue(buffer,bufferlen,StartBit);
bit_depth_chroma_minus8 = Ue(buffer,bufferlen,StartBit);
log2_max_pic_order_cnt_lsb_minus4 = Ue(buffer,bufferlen,StartBit);
cout<<"bit_depth_luma_minus8:"<<bit_depth_luma_minus8<<endl;
cout<<"bit_depth_chroma_minus8:"<<bit_depth_chroma_minus8<<endl;
cout<<"log2_max_pic_order_cnt_lsb_minus4:"<<log2_max_pic_order_cnt_lsb_minus4<<endl;
sps_sub_layer_ordering_info_present_flag = u(1,buffer,StartBit);
cout<<"sps_sub_layer_ordering_info_present_flag:"<<sps_sub_layer_ordering_info_present_flag<<endl;
int i;
uint32_t *sps_max_dec_pic_buffering_minus1 = new uint32_t[sps_max_sub_layers_minus1 + 1];
uint32_t *sps_max_num_reorder_pics = new uint32_t[sps_max_sub_layers_minus1 + 1];
uint32_t *sps_max_latency_increase_plus1 = new uint32_t[sps_max_sub_layers_minus1 + 1];
for (i = (sps_sub_layer_ordering_info_present_flag ? 0 : sps_max_sub_layers_minus1); i <= sps_max_sub_layers_minus1; i++ )
{
sps_max_dec_pic_buffering_minus1[i] = Ue(buffer,bufferlen,StartBit);
sps_max_num_reorder_pics[i] = Ue(buffer,bufferlen,StartBit);
sps_max_latency_increase_plus1[i] = Ue(buffer,bufferlen,StartBit);
}
uint32_t log2_min_luma_coding_block_size_minus3;
uint32_t log2_diff_max_min_luma_coding_block_size;
uint32_t log2_min_transform_block_size_minus2;
uint32_t log2_diff_max_min_transform_block_size;
uint32_t max_transform_hierarchy_depth_inter;
uint32_t max_transform_hierarchy_depth_intra;
bool scaling_list_enabled_flag;
log2_min_luma_coding_block_size_minus3 = Ue(buffer,bufferlen,StartBit);
log2_diff_max_min_luma_coding_block_size = Ue(buffer,bufferlen,StartBit);
log2_min_transform_block_size_minus2 = Ue(buffer,bufferlen,StartBit);
log2_diff_max_min_transform_block_size = Ue(buffer,bufferlen,StartBit);
max_transform_hierarchy_depth_inter = Ue(buffer,bufferlen,StartBit);
max_transform_hierarchy_depth_intra = Ue(buffer,bufferlen,StartBit);
scaling_list_enabled_flag = u(1,buffer,StartBit);

You simply need the specifications document of H.265/HEVC, which you can find here. It's quite a long document obviously. You should look for the right table.

Related

distorted cv::Mat converted from CMSampleBuffer of video frame

I use AVAssetReader/AVAssetReaderTrackOutput to get CMSampleBuffer from video. But When I convert CMSampleBuffer to cv::Mat, the Mat is a distorted image.
Video decode code:
#objc open func startReading() -> Void {
if let reader = try? AVAssetReader.init(asset: _asset){
let videoTrack = _asset.tracks(withMediaType: .video).compactMap{ $0 }.first;
let options = [kCVPixelBufferPixelFormatTypeKey : Int(kCVPixelFormatType_32BGRA)]
let readerOutput = AVAssetReaderTrackOutput.init(track: videoTrack!, outputSettings: options as [String : Any])
reader.add(readerOutput)
reader.startReading()
var count = 0
//reading
while (reader.status == .reading && videoTrack?.nominalFrameRate != 0){
let sampleBuffer = readerOutput.copyNextSampleBuffer()
_delegate?.reader(self, newFrameReady: sampleBuffer, count)
count = count+1;
}
_delegate?.readerDidFinished(self,totalFrameCount: count)
}
}
Image covert code:
//convert sampleBuffer in callback of video reader
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, kCVPixelBufferLock_ReadOnly);
char *baseBuffer = (char*)CVPixelBufferGetBaseAddress(imageBuffer);
cv::Mat cvImage = cv::Mat((int)height,(int)width,CV_8UC3);
cv::MatIterator_<cv::Vec3b> it_start = cvImage.begin<cv::Vec3b>();
cv::MatIterator_<cv::Vec3b> it_end = cvImage.end<cv::Vec3b>();
long cur = 0;
while (it_start != it_end) {
//opt pixel
long p_idx = cur*4;
char b = baseBuffer[p_idx];
char g = baseBuffer[p_idx + 1];
char r = baseBuffer[p_idx + 2];
cv::Vec3b newpixel(b,g,r);
*it_start = newpixel;
cur++;
it_start++;
}
UIImage *tmpImg = MatToUIImage(cvImage);
preview of tmpImg:
I find some video is work fine but some not. Any help is appreciated!
Finally I figure out this bug is because padding bytes of sampleBuffer.
Many API pad extra bytes behind image rows to optimize memory layout for SIMD, which could process parallel pixels.
Blow code works.
cv::Mat cvImage = cv::Mat((int)height,(int)width,CV_8UC3);
cv::MatIterator_<cv::Vec3b> it_start = cvImage.begin<cv::Vec3b>();
cv::MatIterator_<cv::Vec3b> it_end = cvImage.end<cv::Vec3b>();
long cur = 0;
//Padding bytes added behind image row bytes
size_t padding = CVPixelBufferGetBytesPerRow(imageBuffer) - width*4;
size_t offset = 0;
while (it_start != it_end) {
//opt pixel
long p_idx = cur*4 + offset;
char b = baseBuffer[p_idx];
char g = baseBuffer[p_idx + 1];
char r = baseBuffer[p_idx + 2];
cv::Vec3b newpixel(b,g,r);
*it_start = newpixel;
cur++;
it_start++;
if (cur%width == 0) {
offset = offset + padding;
}
}
UIImage *tmpImg = MatToUIImage(cvImage);

How to do real-time pitch shifting from mic with Superpowered? [duplicate]

This question already has answers here:
Superpowered: real time pitch shift with timestretcher not working
(2 answers)
Closed 5 years ago.
I'm trying to make a pitch shift in real time from a microphone using superpowerd. I looked at the example that is for the file. Also tried to adapt it. I managed to change the sound, but it turned out very distorted with interference. What am I doing wrong? where to find more information on superpowered and timeStretching?
static bool audioProcessing(void *clientdata,
float **buffers,
unsigned int inputChannels,
unsigned int outputChannels,
unsigned int numberOfSamples,
unsigned int samplerate,
uint64_t hostTime) {
__unsafe_unretained Superpowered *self = (__bridge Superpowered *)clientdata;
float tempBuffer[numberOfSamples * 2 + 16];
SuperpoweredInterleave(buffers[0], buffers[1], tempBuffer, numberOfSamples);
float *outputBuffer = tempBuffer;
SuperpoweredAudiobufferlistElement inputBuffer;
inputBuffer.samplePosition = 0;
inputBuffer.startSample = 0;
inputBuffer.samplesUsed = 0;
inputBuffer.endSample = self->timeStretcher->numberOfInputSamplesNeeded;
inputBuffer.buffers[0] = SuperpoweredAudiobufferPool::getBuffer(self->timeStretcher->numberOfInputSamplesNeeded * 8 + 64);
inputBuffer.buffers[1] = inputBuffer.buffers[2] = inputBuffer.buffers[3] = NULL;
memcpy((float *)inputBuffer.buffers[0], outputBuffer, numberOfSamples * 2 + 16);
self->timeStretcher->process(&inputBuffer, self->outputBuffers);
// Do we have some output?
if (self->outputBuffers->makeSlice(0, self->outputBuffers->sampleLength)) {
while (true) { // Iterate on every output slice.
// Get pointer to the output samples.
int sampleCount = 0;
float *timeStretchedAudio = (float *)self->outputBuffers->nextSliceItem(&sampleCount);
if (!timeStretchedAudio) break;
SuperpoweredDeInterleave(timeStretchedAudio, buffers[0], buffers[1], numberOfSamples);
};
// Clear the output buffer list.
self->outputBuffers->clear();
};
return true;
}
I did the following:
static bool audioProcessing(void *clientdata,
float **buffers,
unsigned int inputChannels,
unsigned int outputChannels,
unsigned int numberOfSamples,
unsigned int samplerate,
uint64_t hostTime) {
__unsafe_unretained Superpowered *self = (__bridge Superpowered *)clientdata;
SuperpoweredAudiobufferlistElement inputBuffer;
inputBuffer.startSample = 0;
inputBuffer.samplesUsed = 0;
inputBuffer.endSample = numberOfSamples;
inputBuffer.buffers[0] = SuperpoweredAudiobufferPool::getBuffer((unsigned int) (numberOfSamples * 8 + 64));
inputBuffer.buffers[1] = inputBuffer.buffers[2] = inputBuffer.buffers[3] = NULL;
SuperpoweredInterleave(buffers[0], buffers[1], (float *)inputBuffer.buffers[0], numberOfSamples);
self->timeStretcher->process(&inputBuffer, self->outputBuffers);
// Do we have some output?
if (self->outputBuffers->makeSlice(0, self->outputBuffers->sampleLength)) {
while (true) { // Iterate on every output slice.
// Get pointer to the output samples.
int numSamples = 0;
float *timeStretchedAudio = (float *)self->outputBuffers->nextSliceItem(&numSamples);
if (!timeStretchedAudio || *timeStretchedAudio == 0) {
break;
}
SuperpoweredDeInterleave(timeStretchedAudio, buffers[0], buffers[1], numSamples);
}
// Clear the output buffer list.
self->outputBuffers->clear();
}
return true;
}
This might not work correctly when changing the speed also, but I wanted live pitch shifting only. People should be able to speak slower or faster themselves.

How to convert cv::Mat to pcl::pointcloud

How to get from a opencv Mat pointcloud to a pcl::pointcloud? The color is not important for me only the points itself.
you can do this like:
pcl::PointCloud<pcl::PointXYZ>::Ptr SimpleOpenNIViewer::MatToPoinXYZ(cv::Mat OpencVPointCloud)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);//(new pcl::pointcloud<pcl::pointXYZ>);
for(int i=0;i<OpencVPointCloud.cols;i++)
{
//std::cout<<i<<endl;
pcl::PointXYZ point;
point.x = OpencVPointCloud.at<float>(0,i);
point.y = OpencVPointCloud.at<float>(1,i);
point.z = OpencVPointCloud.at<float>(2,i);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr -> points.push_back(point);
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
point_cloud_ptr->height = 1;
return point_cloud_ptr;
}
And also the otherway
cv::Mat MVW_ICP::PoinXYZToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr){
cv::Mat OpenCVPointCloud(3, point_cloud_ptr->points.size(), CV_64FC1);
for(int i=0; i < point_cloud_ptr->points.size();i++){
OpenCVPointCloud.at<double>(0,i) = point_cloud_ptr->points.at(i).x;
OpenCVPointCloud.at<double>(1,i) = point_cloud_ptr->points.at(i).y;
OpenCVPointCloud.at<double>(2,i) = point_cloud_ptr->points.at(i).z;
}
return OpenCVPointCloud;
}
To convert from a range image captured by a Kinect sensor and represented by depthMat to a pcl::PointCloud you can try this function. The calibration parameters are those used here.
{
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPoinXYZ(cv::Mat depthMat)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr ptCloud (new pcl::PointCloud<pcl::PointXYZ>);
// calibration parameters
float const fx_d = 5.9421434211923247e+02;
float const fy_d = 5.9104053696870778e+02;
float const cx_d = 3.3930780975300314e+02;
float const cy_d = 2.4273913761751615e+02;
unsigned char* p = depthMat.data;
for (int i = 0; i<depthMat.rows; i++)
{
for (int j = 0; j < depthMat.cols; j++)
{
float z = static_cast<float>(*p);
pcl::PointXYZ point;
point.z = 0.001 * z;
point.x = point.z*(j - cx_d) / fx_d;
point.y = point.z *(cy_d - i) / fy_d;
ptCloud->points.push_back(point);
++p;
}
}
ptCloud->width = (int)depthMat.cols;
ptCloud->height = (int)depthMat.rows;
return ptCloud;
}
}

Mean image with two functions difference

I want process image so each pixel value will be mean of its value and 4 neighbours.
Created two different functions:
Mat meanImage(cv::Mat& inputImage)
{
Mat output;
Mat kernel(3,3,CV_32F,0.0);
kernel.at<float>(0,1) = 0.2;
kernel.at<float>(1,0) = 0.2;
kernel.at<float>(1,1) = 0.2;
kernel.at<float>(1,2) = 0.2;
kernel.at<float>(2,1) = 0.2;
filter2D(inputImage,output,-1,kernel);
return output;
}
and:
Mat meanImage2(Mat& inputImage)
{
Mat temp;
Mat output(inputImage.rows,inputImage.cols,inputImage.type());
copyMakeBorder(inputImage,temp,1,1,1,1,BORDER_REPLICATE);
CV_Assert(output.isContinuous());
CV_Assert(temp.isContinuous());
const int len = output.rows * output.cols * output.channels();
const int rowLenTemp = temp.cols * temp.channels();
const int twoRowLenTemp = 2 * rowLenTemp;
const int rowLen = output.cols * output.channels();
uchar* outPtr = output.ptr<uchar>(0);
uchar* tempPtr = temp.ptr<uchar>(0);
for(int i = 0; i < len; ++i)
{
const int a = 6 * (i / rowLen) + 3;
outPtr[i] = (tempPtr[i+rowLenTemp+a] + tempPtr[i+a] +
tempPtr[i+rowLenTemp+a+3] + tempPtr[i+rowLenTemp+a-3] +
tempPtr[i+twoRowLenTemp+a]) / 5;
}
return output;
}
I've assumed that the result will be the same. So I've compared images:
Mat diff;
compare(meanImg1,meanImg2,diff,CMP_NE);
printf("Difference: %d\n",countNonZero(diff));
imshow("diff",diff);
And get a lot off differences. What is the difference between this functions?
Edit:
Difference for lena image taken from Lena
Beware that when you do the sum of pixels, you add unsigned chars and you may overflow.
Test your code by casting these pixels values to int.
outPtr[i] = ((int)tempPtr[i+rowLenTemp+a] + (int)tempPtr[i+a] +
(int)tempPtr[i+rowLenTemp+a+3] + (int)tempPtr[i+rowLenTemp+a-3] +
(int)tempPtr[i+twoRowLenTemp+a]) / 5;
Edit: I'd rather code this like (assuming image type is uchar and it has 3 channels)
for (int r = 0; r < output.rows; r++)
{
uchar* previousRow = temp.ptr<uchar>(r) + 3;
uchar* currentRow = temp.ptr<uchar>(r+1) + 3;
uchar* nextRow = temp.ptr<uchar>(r+2) + 3;
uchar* outRow = output.ptr<uchar>(r);
for (int c = 0; c < 3*output.cols; c++)
{
int value = (int)previousRow[c] +
(int)currentRow[c-3] + (int)currentRow [c] + (int)currentRow[c+3] +
(int)nextRow [c];
outRow[c] = value / 5;
}
}

SlimDX (DirectX10) - How to change a texel in Texture?

I try to change the texels of a Texture which is already loaded.
My assumption was to use the Texture2D::Map and UnMap functions, but there is no change when I change the data of given DataRectangle.
I need a simple example like, creating a texture of 128x128 with a gradient from black to white from each side.
Thx
ps: A Direct3D 10 C++ example may also help, SlimDX is only a wrapper and has nearly complete the same functions.
This is my D3D10 2D texture loader
bool D3D10Texture::Init( GFXHandler* pHandler, unsigned int usage, unsigned int width, unsigned int height, unsigned int textureType, bool bMipmapped, void* pTextureData )
{
mMipmapped = bMipmapped;
//SetData( pHandler, 0 );
D3D10Handler* pD3DHandler = (D3D10Handler*)pHandler;
ID3D10Device* pDevice = pD3DHandler->GetDevice();
DXGI_SAMPLE_DESC dxgiSampleDesc;
dxgiSampleDesc.Count = 1;
dxgiSampleDesc.Quality = 0;
D3D10_USAGE d3d10Usage;
if ( usage & RU_All_Dynamic ) d3d10Usage = D3D10_USAGE_DYNAMIC;
else d3d10Usage = D3D10_USAGE_DEFAULT;
//unsigned int cpuAccess = D3D10_CPU_ACCESS_WRITE;
//if ( (usage & RU_Buffer_WriteOnly) == 0 ) cpuAccess |= D3D10_CPU_ACCESS_READ;
unsigned int cpuAccess = 0;
if ( !pTextureData )
{
cpuAccess = D3D10_CPU_ACCESS_WRITE;
//if ( (usage & RU_Buffer_WriteOnly) == 0 ) cpuAccess |= D3D10_CPU_ACCESS_READ;
}
unsigned int bindFlags = D3D10_BIND_SHADER_RESOURCE;
if ( usage & RU_Texture_RenderTarget ) bindFlags |= D3D10_BIND_RENDER_TARGET;
unsigned int miscFlags = 0;
if ( usage & RU_Texture_AutoGenMipmap ) miscFlags |= D3D10_RESOURCE_MISC_GENERATE_MIPS;
D3D10_TEXTURE2D_DESC d3d10Texture2DDesc;
d3d10Texture2DDesc.Width = width;
d3d10Texture2DDesc.Height = height;
d3d10Texture2DDesc.MipLevels = GetNumMipMaps( width, height, bMipmapped );
d3d10Texture2DDesc.ArraySize = 1;
d3d10Texture2DDesc.Format = GetD3DFormat( (TextureTypes)textureType );
d3d10Texture2DDesc.SampleDesc = dxgiSampleDesc;
d3d10Texture2DDesc.Usage = d3d10Usage;
d3d10Texture2DDesc.BindFlags = D3D10_BIND_SHADER_RESOURCE;
d3d10Texture2DDesc.CPUAccessFlags = cpuAccess;
d3d10Texture2DDesc.MiscFlags = miscFlags;
//D3D10_SUBRESOURCE_DATA d3d10SubResourceData;
//d3d10SubResourceData.pSysMem = pTextureData;
//d3d10SubResourceData.SysMemPitch = GetPitch( width, (TextureTypes)textureType );
//d3d10SubResourceData.SysMemSlicePitch = 0;
D3D10_SUBRESOURCE_DATA* pSubResourceData = NULL;
if ( pTextureData )
{
pSubResourceData = new D3D10_SUBRESOURCE_DATA[d3d10Texture2DDesc.MipLevels];
char* pTexPos = (char*)pTextureData;
unsigned int pitch = GetPitch( width, (TextureTypes)textureType );
unsigned int count = 0;
unsigned int max = d3d10Texture2DDesc.MipLevels;
while( count < max )
{
pSubResourceData[count].pSysMem = pTexPos;
pSubResourceData[count].SysMemPitch = pitch;
pSubResourceData[count].SysMemSlicePitch = 0;
pTexPos += pitch * height;
pitch >>= 1;
count++;
}
}
if ( FAILED( pDevice->CreateTexture2D( &d3d10Texture2DDesc, pSubResourceData, &mpTexture ) ) )
{
return false;
}
if ( pSubResourceData )
{
delete[] pSubResourceData;
pSubResourceData = NULL;
}
mWidth = width;
mHeight = height;
mFormat = (TextureTypes)textureType;
mpTexture->AddRef();
mpTexture->Release();
D3D10_SHADER_RESOURCE_VIEW_DESC d3d10ShaderResourceViewDesc;
d3d10ShaderResourceViewDesc.Format = d3d10Texture2DDesc.Format;
d3d10ShaderResourceViewDesc.ViewDimension = D3D10_SRV_DIMENSION_TEXTURE2D;
d3d10ShaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
d3d10ShaderResourceViewDesc.Texture2D.MipLevels = GetNumMipMaps( width, height, bMipmapped );
if ( FAILED( pDevice->CreateShaderResourceView( mpTexture, &d3d10ShaderResourceViewDesc, &mpView ) ) )
{
return false;
}
ResourceRecorder::Instance()->AddResource( this );
return true;
}
With that function all you need to do is pass in the whit to black texture. For example to write a 256x256 textue with each horizontal line being one brighter than the previous line the following code will work
int* pTexture = new int[256 * 256];
int count = 0;
while( count < 256 )
{
int count2 = 0;
while( count2 < 256 )
{
pTexture[(count * 256) + count2] = 0xff000000 | (count << 16) | (count << 8) | count;
count2++;
}
count++;
}
Make sure you follow the rules in the "Resource Usage Restrictions" section:
MSDN: D3D10_USAGE
public void NewData(byte[] newData)
{
DataRectangle mappedTex = null;
//assign and lock the resource
mappedTex = pTexture.Map(0, D3D10.MapMode.WriteDiscard, D3D10.MapFlags.None);
// if unable to hold texture
if (!mappedTex.Data.CanWrite)
{
throw new ApplicationException("Cannot Write to the Texture");
}
// write new data to the texture
mappedTex.Data.WriteRange<byte>(newData);
// unlock the resource
pTexture.Unmap(0);
if (samplerflag)
temptex = newData;
}
this overwrites the buffer on every new frame, you may want to use a D3D10.MapMode.readwrite or something if ur only trying to write one texel
you will also need to write to the datarectangle in a specific point using one of the other write functions

Resources