After comparing more than 20 apps, I found that the decibel values measured by each app are different.
// Method 1:
float power = [self.audioRecorder averagePowerForChannel:0];
float powerMax = [self.audioRecorder peakPowerForChannel:0];
power = power + 160 - 50;
int dB = 0;
if (power < 0.f) {
dB = 0;
} else if (power < 40.f) {
dB = (int)(power * 0.875);
} else if (power < 100.f) {
dB = (int)(power - 15);
} else if (power < 110.f) {
dB = (int)(power * 2.5 - 165);
} else {
dB = 110;
}
NSLog(#"power = %f, powerMax = %f , dB = %d",power, powerMax,dB);
// Method 2:
float level; // The linear 0.0 .. 1.0 value we need.
float minDecibels = -80.0f; // Or use -60dB, which I measured in a silent room.
float decibels = [self.audioRecorder averagePowerForChannel:0];
if (decibels < minDecibels)
{
level = 0.0f;
}
else if (decibels >= 0.0f)
{
level = 1.0f;
}
else
{
float root = 2.0f;
float minAmp = powf(10.0f, 0.05f * minDecibels);
float inverseAmpRange = 1.0f / (1.0f - minAmp);
float amp = powf(10.0f, 0.05f * decibels);
float adjAmp = (amp - minAmp) * inverseAmpRange;
level = powf(adjAmp, 1.0f / root);
}
NSLog(#"leval %f", level * 120);
There is a big gap between Method 1 and Method 2
So,Are these methods accurate? Is there a more correct way?
Related
I am using Xamarin.Forms.
On iOS, when using the pinch gesture below to zoom in on a Label (for example), the Label turns pixelated instead of updating and staying sharp.
On Android, this doesn't happen and the Label stays sharp and visible, no matter how much I zoom in on it.
Is there a way to make behave on iOS as it does with Android?
Edit: this behaviour can also be observed simply by raising the scale of a visual element, on Android it looks good but on iOS it turns pixelated. I assume my problem lies here but I'm not sure how to tackle it yet.
The pinch gesture implementation I use to zoom in with:
public class ZoomWithPinch : ContentView
{
public ZoomWithPinch()
{
var pinchGesture = new PinchGestureRecognizer();
pinchGesture.PinchUpdated += OnPinchUpdated;
GestureRecognizers.Add(pinchGesture);
}
double currentScale = 1;
double startScale = 1;
double xOffset = 0;
double yOffset = 0;
private void OnPinchUpdated(object sender, PinchGestureUpdatedEventArgs e)
{
if (e.Status == GestureStatus.Started)
{
startScale = Content.Scale;
Content.AnchorX = 0;
Content.AnchorY = 0;
}
if (e.Status == GestureStatus.Running)
{
currentScale += (e.Scale - 1) * startScale;
currentScale = Math.Max(1, currentScale);
double renderedX = Content.X + xOffset;
double deltaX = renderedX / Width;
double deltaWidth = Width / (Content.Width * startScale);
double originX = (e.ScaleOrigin.X - deltaX) * deltaWidth;
double renderedY = Content.Y + yOffset;
double deltaY = renderedY / Height;
double deltaHeight = Height / (Content.Height * startScale);
double originY = (e.ScaleOrigin.Y - deltaY) * deltaHeight;
double targetX = xOffset - (originX * Content.Width) * (currentScale - startScale);
double targetY = yOffset - (originY * Content.Height) * (currentScale - startScale);
Content.TranslationX = Math.Min(0, Math.Max(targetX, -Content.Width * (currentScale - 1)));
Content.TranslationY = Math.Min(0, Math.Max(targetY, -Content.Height * (currentScale - 1)));
Content.Scale = currentScale;
}
if (e.Status == GestureStatus.Completed)
{
xOffset = Content.TranslationX;
yOffset = Content.TranslationY;
}
}
public void OnPanUpdated(object sender, PanUpdatedEventArgs e)
{
if (Content.Scale == 1)
{
return;
}
switch (e.StatusType)
{
case GestureStatus.Running:
double newX = (e.TotalX * Scale) + xOffset;
double newY = (e.TotalY * Scale) + yOffset;
double width = (Content.Width * Content.Scale);
double height = (Content.Height * Content.Scale);
bool canMoveX = width > Application.Current.MainPage.Width;
bool canMoveY = height > Application.Current.MainPage.Height;
if (canMoveX)
{
double minX = (width - (Application.Current.MainPage.Width / 2)) * -1;
double maxX = Math.Min(Application.Current.MainPage.Width / 2, width / 2);
if (newX < minX)
{
newX = minX;
}
if (newX > maxX)
{
newX = maxX;
}
}
else
{
newX = 0;
}
if (canMoveY)
{
double minY = (height - (Application.Current.MainPage.Height / 2)) * -1;
double maxY = Math.Min(Application.Current.MainPage.Width / 2, height / 2);
if (newY < minY)
{
newY = minY;
}
if (newY > maxY)
{
newY = maxY;
}
}
else
{
newY = 0;
}
Content.TranslationX = newX;
Content.TranslationY = newY;
break;
case GestureStatus.Completed:
xOffset = Content.TranslationX;
yOffset = Content.TranslationY;
break;
}
}
}
<ZoomWithPinch>
<Grid>
<Label Text="Test" HorizontalOptions="Center" VerticalOptions="Center"/>
</Grid>
</ZoomWithPinch>
How to get from a opencv Mat pointcloud to a pcl::pointcloud? The color is not important for me only the points itself.
you can do this like:
pcl::PointCloud<pcl::PointXYZ>::Ptr SimpleOpenNIViewer::MatToPoinXYZ(cv::Mat OpencVPointCloud)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);//(new pcl::pointcloud<pcl::pointXYZ>);
for(int i=0;i<OpencVPointCloud.cols;i++)
{
//std::cout<<i<<endl;
pcl::PointXYZ point;
point.x = OpencVPointCloud.at<float>(0,i);
point.y = OpencVPointCloud.at<float>(1,i);
point.z = OpencVPointCloud.at<float>(2,i);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr -> points.push_back(point);
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
point_cloud_ptr->height = 1;
return point_cloud_ptr;
}
And also the otherway
cv::Mat MVW_ICP::PoinXYZToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr){
cv::Mat OpenCVPointCloud(3, point_cloud_ptr->points.size(), CV_64FC1);
for(int i=0; i < point_cloud_ptr->points.size();i++){
OpenCVPointCloud.at<double>(0,i) = point_cloud_ptr->points.at(i).x;
OpenCVPointCloud.at<double>(1,i) = point_cloud_ptr->points.at(i).y;
OpenCVPointCloud.at<double>(2,i) = point_cloud_ptr->points.at(i).z;
}
return OpenCVPointCloud;
}
To convert from a range image captured by a Kinect sensor and represented by depthMat to a pcl::PointCloud you can try this function. The calibration parameters are those used here.
{
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPoinXYZ(cv::Mat depthMat)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr ptCloud (new pcl::PointCloud<pcl::PointXYZ>);
// calibration parameters
float const fx_d = 5.9421434211923247e+02;
float const fy_d = 5.9104053696870778e+02;
float const cx_d = 3.3930780975300314e+02;
float const cy_d = 2.4273913761751615e+02;
unsigned char* p = depthMat.data;
for (int i = 0; i<depthMat.rows; i++)
{
for (int j = 0; j < depthMat.cols; j++)
{
float z = static_cast<float>(*p);
pcl::PointXYZ point;
point.z = 0.001 * z;
point.x = point.z*(j - cx_d) / fx_d;
point.y = point.z *(cy_d - i) / fy_d;
ptCloud->points.push_back(point);
++p;
}
}
ptCloud->width = (int)depthMat.cols;
ptCloud->height = (int)depthMat.rows;
return ptCloud;
}
}
I am trying to create the custom wave signals using iOS
When adding the second for loop to create, it works to create the impulse between the interval 0.8T and 0.9T m which T is the period.
When I try add the third for loop to create one , it creates a weird signals with convex hyperbola after period 0.7T instead of generating a new impulse.
Would you please tell me which I shall modify or reset the buffer variable so that I can generate multiple impulses ?
The below is my code
// Fixed amplitude is good enough for our purposes
const double amplitude = 1.0;
// Get the tone parameters out of the view controller
ToneGeneratorViewController *viewController =
(ToneGeneratorViewController *)inRefCon;
double theta = viewController->theta;
//double theta_increment = 2.0 * M_PI * viewController->frequency / viewController->sampleRate;
double theta_increment = viewController->sampleRate / viewController->frequency;
( sampleRate = 44100; , frequency = 44.44 )
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
float squareIndex = 0.0;
//Generate the samples//
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
float k =0.0;
float y = 34.0/75.0;
if( fmodf(squareIndex, theta_increment)/theta_increment < y) {
k = 1.0;
} else {
k = 0.0;
}
buffer[frame] = k * amplitude;
squareIndex += 1.0;
if(squareIndex >= theta_increment) squareIndex-=theta_increment;
viewController->theta = theta;
}
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
float k =0.0;
float z = 0.8;
float y = 0.9;
if( z < fmodf(squareIndex, theta_increment)/theta_increment < y) {
k = 0.0;
} else {
k = 1.0;
}
buffer[frame] += k * amplitude;
squareIndex += 1.0;
if(squareIndex >= theta_increment) squareIndex-=theta_increment;
viewController->theta = theta;
}
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
float k =0.0;
float z = 0.6;
float y = 0.7;
if( z < fmodf(squareIndex, theta_increment)/theta_increment < y) {
k = 0.0;
} else {
k = 1.0;
}
buffer[frame] += k * amplitude;
squareIndex += 1.0;
if(squareIndex >= theta_increment) squareIndex-=theta_increment;
viewController->theta = theta;
}
Since we don't see buffer allocation in the provided code, I would assuming that the buffer has sufficient size to hold all samples (otherwise buffer overflow could cause all sorts of undefined behaviors).
With the code provided, you should realize that in the loops, the expressions
if( z < fmodf(squareIndex, theta_increment)/theta_increment < y) {
are evaluated from left to right as:
if( (z < fmodf(squareIndex, theta_increment)/theta_increment) < y) {
Let's look at the second loop to illustrate the effect:
so long as squareIndex is less than0.8*theta_increment`, the subexpression
(z < fmodf(squareIndex, theta_increment)/theta_increment)
evaluates to false which, after numerical promotion, is less than y=0.9, so the overall expression is true (so k=0). Once squareIndex becomes more than 0.8*theta_increment
(z < fmodf(squareIndex, theta_increment)/theta_increment)
becomes true which, again after numeric promotion, is more than y=0.9, so the overall expression becomes false (so k=1).
The loops then generate the following curves:
where from top to bottom you have the first, second and third loop, followed by the combined waveform.
To fix this you can change the conditions to:
float t = fmodf(squareIndex, theta_increment)/theta_increment;
if (z < t && t < y) {
k = 1.0;
} else {
k = 0.0;
}
Which should then generate the following waveform:
I am trying to modify the method of generating the square wave that
it can generate eight pulses with different pulse width with 0.3ms delay each per a period. I have seen that
sampleBuffer is responsible for generating the pulse signals but i am not sure how to create a function of the pulse for such specific pattern. Would you please tell me whether there is a library function at AudioTrack.h for generating the pulse ?
The below is my code for
generating the square wave
void generateSquare(SInt16 *sampleBuffer, int numFrames, float sampleRate, float frequency, float amp) {
if(amp>1) amp=1;
if(amp<0) amp=0;
amp = amp*SHRT_MAX;
float samplesPerCycle = sampleRate/frequency;
for(int i = 0; i < numFrames; i++) {
if(fmodf(squareIndex, samplesPerCycle)/samplesPerCycle > 0.5) {
sampleBuffer[i] = amp;
} else {
sampleBuffer[i] = -1*amp;
}
squareIndex = squareIndex+1;
if(squareIndex >= samplesPerCycle) squareIndex-=samplesPerCycle;
}
}
Here is my solution for nearly the same problem.
In my case I create pulse with 1ms width, which I modify with the fill value by +/-0.5ms.
So according to fillValue I generate a square wave with 0.5-1.5ms pulse width.
int squareIndex = 0;
void generateSquare(SInt16 *sampleBuffer, int numFrames, float sampleRate, float fillValue, float amp) {
// Fill value = pulse width value in frames
// fillValue = [-20, 20];
if(amp>1) amp=1;
if(amp<0) amp=0;
if(fillValue > 20) fillValue = 20;
if(fillValue < -20) fillValue = -20;
amp = amp*SHRT_MAX;
float samplesPerCycle = sampleRate/50;
//Sample / Cycle = 882
//1ms = 41 frame -> 0.5ms = 20(.5)frame
//In your case 0.3ms = 12(.3) frame
#pragma mark - PWM
for(int i = 0; i < numFrames; i++) {
//if(fmodf(squareIndex, samplesPerCycle)/samplesPerCycle < 0.05) {
if(squareIndex < 41 + fillValue) {
sampleBuffer[i] = 1*SHRT_MAX;
} else {
sampleBuffer[i] = 0;
}
squareIndex = squareIndex+1;
if(squareIndex >= samplesPerCycle) squareIndex-=samplesPerCycle;
}
}
I want to create a visible object with a trajectory using standard actions (CCMoveBy and e.t.c) which is similar to:
x = sin(y)
My code:
CCMoveBy *moveAction1 = [CCMoveBy actionWithDuration:1.5 position:ccp(300, 0)];
CCEaseInOut *easeInOutAction1 = [CCEaseInOut actionWithAction:moveAction1 rate:2];
CCMoveBy *moveAction2 = [CCMoveBy actionWithDuration:1.5 position:ccp(-300, 0)];
CCEaseInOut *easeInOutAction2 = [CCEaseInOut actionWithAction:moveAction2 rate:2];
CCMoveBy *moveAction3 = [CCMoveBy actionWithDuration:1.5 position:ccp(0, -32)];
CCSpawn *moveActionRight = [CCSpawn actionOne:easeInOutAction1 two:moveAction3];
CCSpawn *moveActionLeft = [CCSpawn actionOne:easeInOutAction2 two:moveAction3];
CCSequence *sequenceOfActions = [CCSequence actionOne:moveActionRight two:moveActionLeft];
CCRepeatForever *finalMoveAction = [CCRepeatForever actionWithAction:sequenceOfActions];
[enemy runAction:finalMoveAction];
This code shows move down only. The problem is that object has a different x and y accelerations and I don't know how to combine them
UPDATED
- (void)tick:(ccTime)dt
{
CGPoint pos = self.position;
pos.y -= 50 * dt;
if (pos.y < activationDistance) {
pos.x = 240 + sin(angle) * 140;
angle += dt * 360 * 0.007;
if (angle >= 360) {
angle = ((int)angle) % 360;
}
}
self.position = pos;
}
It is my current solution. I can increase activationDistance to adjust the object trajectory. But I want to setup an initial value of the angle variable.
I use numbers instead of variables because they are used inside this function only.
SOLVED
To change the initial angle:
angle = point.x < 240 ? -asin((240 - point.x) / 140) : asin((point.x - 240) / 140);
the main problem was my tiled map has its own coordinates and cover 320x320 part of the screen only
I think it will be easier for you to just do it in your frame update method (the one I assume you schedule for updating your objects. So why not just do :
- (void)tick:(ccTime)dt {
CGPoint pos = myObject.position;
pos.x = <desired x> + sin(angle);
pos.y = pos.y - y_acceleration * dt;
angle += dt * 360 * x_acceleration;
if (angle >= 360)
angle = ((int)angle) % 360;
myObject.position = pos;
}
And you can apply the same for the y axis of the object