I am trying to modify the method of generating the square wave that
it can generate eight pulses with different pulse width with 0.3ms delay each per a period. I have seen that
sampleBuffer is responsible for generating the pulse signals but i am not sure how to create a function of the pulse for such specific pattern. Would you please tell me whether there is a library function at AudioTrack.h for generating the pulse ?
The below is my code for
generating the square wave
void generateSquare(SInt16 *sampleBuffer, int numFrames, float sampleRate, float frequency, float amp) {
if(amp>1) amp=1;
if(amp<0) amp=0;
amp = amp*SHRT_MAX;
float samplesPerCycle = sampleRate/frequency;
for(int i = 0; i < numFrames; i++) {
if(fmodf(squareIndex, samplesPerCycle)/samplesPerCycle > 0.5) {
sampleBuffer[i] = amp;
} else {
sampleBuffer[i] = -1*amp;
}
squareIndex = squareIndex+1;
if(squareIndex >= samplesPerCycle) squareIndex-=samplesPerCycle;
}
}
Here is my solution for nearly the same problem.
In my case I create pulse with 1ms width, which I modify with the fill value by +/-0.5ms.
So according to fillValue I generate a square wave with 0.5-1.5ms pulse width.
int squareIndex = 0;
void generateSquare(SInt16 *sampleBuffer, int numFrames, float sampleRate, float fillValue, float amp) {
// Fill value = pulse width value in frames
// fillValue = [-20, 20];
if(amp>1) amp=1;
if(amp<0) amp=0;
if(fillValue > 20) fillValue = 20;
if(fillValue < -20) fillValue = -20;
amp = amp*SHRT_MAX;
float samplesPerCycle = sampleRate/50;
//Sample / Cycle = 882
//1ms = 41 frame -> 0.5ms = 20(.5)frame
//In your case 0.3ms = 12(.3) frame
#pragma mark - PWM
for(int i = 0; i < numFrames; i++) {
//if(fmodf(squareIndex, samplesPerCycle)/samplesPerCycle < 0.05) {
if(squareIndex < 41 + fillValue) {
sampleBuffer[i] = 1*SHRT_MAX;
} else {
sampleBuffer[i] = 0;
}
squareIndex = squareIndex+1;
if(squareIndex >= samplesPerCycle) squareIndex-=samplesPerCycle;
}
}
Related
I tried to code histogram equalization operation in order to enhance contrast of the images, but my code didn't work. When I displayed image's original histogram and histogram after processed by my code I saw that output histogram only have a value at 0 and there aren't values for other pixel intensities. I don't know why. Here is my code:
private: System::Void histogramEqualizationToolStripMenuItem_Click(System::Object^ sender, System::EventArgs^ e) {
Raw_Intensity = ConvertBMPToIntensity(Buffer, Width, Height);
int histogram[256] = { 0 };
int equalizedHistogram[256] = { 0 };
int runningSum = 0;
int numberOfPixels = Width * Height;
for (int row = 0; row < Height; row++)
{
for (int column = 0; column < Width; column++)
{
histogram[Raw_Intensity[row * Width + column]]++;
}
}
for (int i = 0; i < 256; i++)
{
runningSum += histogram[i];
int index = round(((runningSum / numberOfPixels) * 255));
equalizedHistogram[index] += histogram[i];
}
}
I think the casting issue is occuring.
change this line :
int index = round(((runningSum / numberOfPixels) * 255));
to
int index = round(((runningSum*1.0 / numberOfPixels) * 255));
I want to swap the U and V bit in YUV format, from NV12
YYYYYYYY UVUV // each letter presents a bit
to NV21
YYYYYYYY VUVU
I leave the Y planar alone, and handle the U and V planar by the function below
uchar swap(uchar in) {
uchar out = ((in >> 1) & 0x55) | ((in << 1) & 0xaa);
return out;
}
But I cannot get the desired result, the colour of the output image still not correct.
How can I swap U and V planar correctly?
Found the problem. UV should be manipulated in byte format, not bit.
byte[] yuv = // ...
final int length = yuv.length;
for (int i1 = 0; i1 < length; i1 += 2) {
if (i1 >= width * height) {
byte tmp = yuv[i1];
yuv[i1] = yuv[i1+1];
yuv[i1+1] = tmp;
}
}
try this method (-_-)
IFrameCallback iFrameCallback = new IFrameCallback() {
#Override
public void onFrame(ByteBuffer frame) {
//get nv12 data
byte[] b = new byte[frame.remaining()];
frame.get(b);
//nv12 data to nv21
NV12ToNV21(b, 1280, 720);
//send NV21 data
BVPU.InputVideoData(nv21, nv21.length,
System.currentTimeMillis() * 1000, 1280, 720);
}
};
byte[] nv21;
private void NV12ToNV21(byte[] data, int width, int height) {
nv21 = new byte[data.length];
int framesize = width * height;
int i = 0, j = 0;
System.arraycopy(data, 0, nv21, 0, framesize);
for (i = 0; i < framesize; i++) {
nv21[i] = data[i];
}
for (j = 0; j < framesize / 2; j += 2) {
nv21[framesize + j - 1] = data[j + framesize];
}
for (j = 0; j < framesize / 2; j += 2) {
nv21[framesize + j] = data[j + framesize - 1];
}
}
How to get from a opencv Mat pointcloud to a pcl::pointcloud? The color is not important for me only the points itself.
you can do this like:
pcl::PointCloud<pcl::PointXYZ>::Ptr SimpleOpenNIViewer::MatToPoinXYZ(cv::Mat OpencVPointCloud)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);//(new pcl::pointcloud<pcl::pointXYZ>);
for(int i=0;i<OpencVPointCloud.cols;i++)
{
//std::cout<<i<<endl;
pcl::PointXYZ point;
point.x = OpencVPointCloud.at<float>(0,i);
point.y = OpencVPointCloud.at<float>(1,i);
point.z = OpencVPointCloud.at<float>(2,i);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr -> points.push_back(point);
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
point_cloud_ptr->height = 1;
return point_cloud_ptr;
}
And also the otherway
cv::Mat MVW_ICP::PoinXYZToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr){
cv::Mat OpenCVPointCloud(3, point_cloud_ptr->points.size(), CV_64FC1);
for(int i=0; i < point_cloud_ptr->points.size();i++){
OpenCVPointCloud.at<double>(0,i) = point_cloud_ptr->points.at(i).x;
OpenCVPointCloud.at<double>(1,i) = point_cloud_ptr->points.at(i).y;
OpenCVPointCloud.at<double>(2,i) = point_cloud_ptr->points.at(i).z;
}
return OpenCVPointCloud;
}
To convert from a range image captured by a Kinect sensor and represented by depthMat to a pcl::PointCloud you can try this function. The calibration parameters are those used here.
{
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPoinXYZ(cv::Mat depthMat)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr ptCloud (new pcl::PointCloud<pcl::PointXYZ>);
// calibration parameters
float const fx_d = 5.9421434211923247e+02;
float const fy_d = 5.9104053696870778e+02;
float const cx_d = 3.3930780975300314e+02;
float const cy_d = 2.4273913761751615e+02;
unsigned char* p = depthMat.data;
for (int i = 0; i<depthMat.rows; i++)
{
for (int j = 0; j < depthMat.cols; j++)
{
float z = static_cast<float>(*p);
pcl::PointXYZ point;
point.z = 0.001 * z;
point.x = point.z*(j - cx_d) / fx_d;
point.y = point.z *(cy_d - i) / fy_d;
ptCloud->points.push_back(point);
++p;
}
}
ptCloud->width = (int)depthMat.cols;
ptCloud->height = (int)depthMat.rows;
return ptCloud;
}
}
I'm trying to group similar hues together using a given threshold. It works quite well except for red values. Since near 0 or 180 represent red in OpenCV, I'm having trouble to group let say 3 degree and 179 degree hues in the same group.
The hues are stored in a Vector.
I have created a function with the following signature.
Vector <uchar> getGroupedHues(Vector<uchar> hues, int threshold);
The final goal is to create a smarties counter. I have isolated the individual smarties and now I want to find the hue of each one to classified them.
I based my code using the page. The algorithm to cluster the hues is at the end, but like I said, I'm struggling with near 0/180 degrees values.
Thanks for helping!
UPDATE
This is the code I have made.
// Creates a cluster of hues that are within a threshold
Vector<uchar> getClusteredHues(Vector<uchar> values, int threshold) {
int nbBin = 180;
Vector <uchar> groups(nbBin, 0);
// Sorting the hues
sort(values.begin(), values.end());
Point2f previous = getPointFromAngle(values[0]);
Point2f currentCluster = previous;
Point2f currentValue;
Point2f delta;
Point2f thresholdXY = getPointFromAngle(threshold);
groups[values[0]]++;
for (int i = 1; i < values.size(); i++) {
currentValue = getPointFromAngle( values[i]);
delta = currentValue - previous;
if (delta.x < thresholdXY.x && delta.y < thresholdXY.y) {
groups[(int)(atan2(currentCluster.y, currentCluster.x)* 180 / CV_PI)]++;
}
else {
currentCluster = currentValue;
groups[(int)(atan2(currentCluster.y, currentCluster.x)* 180 / CV_PI)]++;
}
previous = currentValue;
}
return groups;
}
Ok, I have found a workaround. I always check if the current value is near the end limit, if so, I the current group becomes the first group.
Here's is the code.
Vector<uchar> getClusteredHues(Vector<uchar> values, int threshold) {
int nbBin = 180;
Vector <uchar> clusters(nbBin, 0);
// trier les teintes
sort(values.begin(), values.end());
int previous = values[0];
int currentCluster = previous;
int currentValue;
int delta;
int halfThreshold = threshold / 2;
int firstCluster = values[0];
clusters[values[0]]++;
for (int i = 1; i < values.size(); i++) {
currentValue = values[i];
delta = currentValue - previous;
if (currentValue + threshold > nbBin) {
if (abs(firstCluster - (currentValue + threshold - nbBin)) < threshold) {
delta = 0;
currentCluster = firstCluster;
}
}
if (delta < threshold) {
clusters[currentCluster]++;
}
else {
currentCluster = currentValue;
clusters[currentCluster]++;
}
previous = currentValue;
}
return clusters;
}
I'm writing a gaussian filter, and my goal is to match the gaussian blur filter in photoshop as closely as possible. This is my first image processing endeavor. Some problems/questions I have are...
Further blurring an image with my filter darkens it, while photoshop’s seems to lighten it.
The deviation value (“sigma,” in my code) I’m using is r/3, which results in the gaussian curve having approached about 0.0001 within the matrix...is there a better way to determine this value?
How does photoshop (or most people) handle image borders for this type of blur?
int matrixDimension = (radius*2)+1;
float sigma = radius/3;
float twoSigmaSquared = 2*pow(sigma, 2);
float oneOverSquareRootOfTwoPiSigmaSquared = 1/(sqrt(M_PI*twoSigmaSquared));
float kernel[matrixDimension];
int index = 0;
for (int offset = -radius; offset <= radius; offset++) {
float xSquared = pow(offset, 2);
float exponent = -(xSquared/twoSigmaSquared);
float eToThePower = pow(M_E, exponent);
float multFactor = oneOverSquareRootOfTwoPiSigmaSquared*eToThePower;
kernel[index] = multFactor;
index++;
}
//Normalize the kernel such that all its values will add to 1
float sum = 0;
for (int i = 0; i < matrixDimension; i++) {
sum += kernel[i];
}
for (int i = 0; i < matrixDimension; i++) {
kernel[i] = kernel[i]/sum;
}
//Blur horizontally
for (int row = 0; row < imageHeight; row++) {
for (int column = 0; column < imageWidth; column++) {
int currentPixel = (row*imageWidth)+column;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int index = 0;
for (int offset = -radius; offset <= radius; offset++) {
if (!(column+offset < 0) && !(column+offset > imageWidth-1)) {
int firstByteOfPixelWereLookingAtInSrcData = (currentPixel+offset)*4;
int in1 = srcData[firstByteOfPixelWereLookingAtInSrcData];
int in2 = srcData[firstByteOfPixelWereLookingAtInSrcData+1];
int in3 = srcData[firstByteOfPixelWereLookingAtInSrcData+2];
int in4 = srcData[firstByteOfPixelWereLookingAtInSrcData+3];
sum1 += (int)(in1 * kernel[index]);
sum2 += (int)(in2 * kernel[index]);
sum3 += (int)(in3 * kernel[index]);
sum4 += (int)(in4 * kernel[index]);
}
index++;
}
int currentPixelInData = currentPixel*4;
destData[currentPixelInData] = sum1;
destData[currentPixelInData+1] = sum2;
destData[currentPixelInData+2] = sum3;
destData[currentPixelInData+3] = sum4;
}
}
//Blur vertically
for (int row = 0; row < imageHeight; row++) {
for (int column = 0; column < imageWidth; column++) {
int currentPixel = (row*imageWidth)+column;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int index = 0;
for (int offset = -radius; offset <= radius; offset++) {
if (!(row+offset < 0) && !(row+offset > imageHeight-1)) {
int firstByteOfPixelWereLookingAtInSrcData = (currentPixel+(offset*imageWidth))*4;
int in1 = destData[firstByteOfPixelWereLookingAtInSrcData];
int in2 = destData[firstByteOfPixelWereLookingAtInSrcData+1];
int in3 = destData[firstByteOfPixelWereLookingAtInSrcData+2];
int in4 = destData[firstByteOfPixelWereLookingAtInSrcData+3];
sum1 += (int)(in1 * kernel[index]);
sum2 += (int)(in2 * kernel[index]);
sum3 += (int)(in3 * kernel[index]);
sum4 += (int)(in4 * kernel[index]);
}
index++;
}
int currentPixelInData = currentPixel*4;
finalData[currentPixelInData] = sum1;
finalData[currentPixelInData+1] = sum2;
finalData[currentPixelInData+2] = sum3;
finalData[currentPixelInData+3] = sum4;
}
}
To reverse engineer a filter, you need to find its impulse response. On a background of a very dark value, say 32, place a nearly white pixel, say 223. You don't want to use 0 and 255 because some filters will try to create values beyond the starting values. Run the filter on this image, and take the output values and stretch them from 0.0 to 1.0: (value-32)/(223-32). Now you have the exact weights needed to emulate the filter.
There are lots of ways to treat the image edges. I would suggest taking the filter weights and summing them, then dividing the result by that sum; if you're trying to go beyond the edge, use 0.0 for both the pixel value and the filter weight on that pixel.
Boundary conditions sometimes depend on exactly what you're doing and what kind of data you're working with, but I think for general purpose image manipulation the best thing to do is to extend the values at the borders beyond the edges of the image. Not literally of course, but if the filter tries to read a pixel that's outside the image borders, you substitute the value of the nearest pixel on the edge of the image. Which is really the same as just clamping the row to be between 0 and height, and the column to be between 0 and width.