First I want to show my method for converting source .wav files to .mp3 by Lame library:
- (void)convertFromWav:(NSString *)sourceFilePath ToMp3:(NSString *)resultName {
NSString *mp3FileName = [resultName stringByAppendingString:#".mp3"];
NSString *mp3FilePath = [NSTemporaryDirectory() stringByAppendingPathComponent:mp3FileName];
#try {
int read, write;
FILE *pcm = fopen([sourceFilePath UTF8String], "rb"); //source
if (pcm == NULL) {
perror("fopen");
return;
}
fseek(pcm, 4*1024, SEEK_CUR); //skip file header
FILE *mp3 = fopen([mp3FilePath cStringUsingEncoding:1], "wb"); //output
const int sampleRate = 44100;
const int bitsPerSample = 16;
const int numberOfChannels = 2;
const int PCM_SIZE = 8192*2;
const int MP3_SIZE = 8192*2;
short int pcm_buffer[PCM_SIZE*2];
unsigned char mp3_buffer[MP3_SIZE];
lame_t lame = lame_init();
lame_set_in_samplerate(lame, sampleRate);
lame_set_VBR(lame, vbr_default);
lame_init_params(lame);
lame_get_num_samples(lame);
long long fileSize = [[[[NSFileManager defaultManager] attributesOfItemAtPath:sourceFilePath error:nil] objectForKey:NSFileSize] longLongValue];
long duration = fileSize / (sampleRate * numberOfChannels * bitsPerSample / 8);//(fileSize * 8.0f) / (sampleRate * 2);
lame_set_num_samples(lame, (duration * sampleRate));
lame_get_num_samples(lame);
float percent = 0.0;
int totalframes = lame_get_totalframes(lame);
do {
read = fread(pcm_buffer, 2*sizeof(short int), PCM_SIZE, pcm);
if (read == 0)
write = lame_encode_flush(lame, mp3_buffer, MP3_SIZE);
else
write = lame_encode_buffer_interleaved(lame, pcm_buffer, read, mp3_buffer, MP3_SIZE);
fwrite(mp3_buffer, write, 1, mp3);
int frameNum = lame_get_frameNum(lame);
if (frameNum < totalframes)
percent = (100. * frameNum / totalframes + 0.5);
else
percent = 100;
if ([_delegate respondsToSelector:#selector(convertingProgressChangedWithPercent:)])
{
[_delegate convertingProgressChangedWithPercent:percent];
}
} while (read != 0);
lame_close(lame);
fclose(mp3);
fclose(pcm);
}
#catch (NSException *exception) {
NSLog(#"%#",[exception description]);
}
#finally {
if ([_delegate respondsToSelector:#selector(convertingDidFinish:)])
{
[_delegate convertingDidFinish:mp3FilePath];
}
}
}
It's okay and it's working. As a result I have .mp3 which has 152000 bits per second. But I want to make it 320000 bits per second. How can I change it? I am not good in theory about this stuff so I don't know which values change to what. Thanks.
You want to use lame_set_VBR (lame_t, vbr_off); and then you can use lame_set_brate where you can set the required bitrate amount. Using vbr_off gives you CBR mode as confirmed in the docs (see headers.h) :
*********************************************************************
VBR control
************************************************************************ /* Types of VBR. default = vbr_off = CBR */ int CDECL
lame_set_VBR(lame_global_flags *, vbr_mode); vbr_mode CDECL
lame_get_VBR(const lame_global_flags *);
Try this :
//# for constants of settings
const int sampleRate = 44100;
const int bitsPerSample = 16;
const int numberOfChannels = 2;
const int myBitRate = 320;
//# for Lame settings
lame_t lame = lame_init();
lame_set_in_samplerate (lame_t, sampleRate); //is 44100
lame_set_VBR (lame_t, vbr_off); //force CBR mode
lame_set_brate (lame_t, myBitRate); //is 320
lame_init_params (lame_t);
Also you can probably setup Lame like this :
lame_t lame = lame_init(); instead becomes like this : lame_t = lame_init();
Just saying that if you defined a lame_t I would expect it to require that name for rest of settings. You know like lame_init_params (lame_t); etc.
Related
i have a code in c# for aes decryption
i want make same encryption result by objective-c
but i failed.. help me
i can fix objective-c code, what can i for this?
c# for decrypt
private static readonly string AES_KEY = "asdfasdfasdfasdf";
private static readonly int BUFFER_SIZE = 1024 * 4;
private static readonly int KEY_SIZE = 128;
private static readonly int BLOCK_SIZE = 128;
static public string Composite(string value)
{
using (AesManaged aes = new AesManaged())
using (MemoryStream ims = new MemoryStream(Convert.FromBase64String(value), false))
{
aes.KeySize = KEY_SIZE;
aes.BlockSize = BLOCK_SIZE;
aes.Mode = CipherMode.CBC;
aes.Key = Encoding.UTF8.GetBytes(AES_KEY);
byte[] iv = new byte[aes.IV.Length];
ims.Read(iv, 0, iv.Length);
aes.IV = iv;
using (ICryptoTransform ce = aes.CreateDecryptor(aes.Key, aes.IV))
using (CryptoStream cs = new CryptoStream(ims, ce, CryptoStreamMode.Read))
using (DeflateStream ds = new DeflateStream(cs, CompressionMode.Decompress))
using (MemoryStream oms = new MemoryStream())
{
byte[] buf = new byte[BUFFER_SIZE];
for (int size = ds.Read(buf, 0, buf.Length); size > 0; size = ds.Read(buf, 0, buf.Length))
{
oms.Write(buf, 0, size);
}
return Encoding.UTF8.GetString(oms.ToArray());
}
}
}
objective-c for encrypt
- (NSString *)AES128EncryptWithKey:(NSString *)key
{
NSData *plainData = [self dataUsingEncoding:NSUTF8StringEncoding];
NSData *encryptedData = [plainData AES128EncryptWithKey:key];
NSString *encryptedString = [encryptedData stringUsingEncodingBase64];
return encryptedString;
}
#import "NSData+AESCrypt.h"
#import <CommonCrypto/CommonCryptor.h>
static char encodingTable[64] =
{
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P',
'Q','R','S','T','U','V','W','X','Y','Z','a','b','c','d','e','f',
'g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v',
'w','x','y','z','0','1','2','3','4','5','6','7','8','9','+','/'
};
#implementation NSData (AESCrypt)
- (NSData *)AES128EncryptWithKey:(NSString *)key
{
// 'key' should be 16 bytes for AES128
char keyPtr[kCCKeySizeAES128 + 1]; // room for terminator (unused)
bzero( keyPtr, sizeof( keyPtr ) ); // fill with zeroes (for padding)
// fetch key data
[key getCString:keyPtr maxLength:sizeof( keyPtr ) encoding:NSUTF8StringEncoding];
NSUInteger dataLength = [self length];
//See the doc: For block ciphers, the output size will always be less than or
//equal to the input size plus the size of one block.
//That's why we need to add the size of one block here
size_t bufferSize = dataLength + kCCBlockSizeAES128;
void *buffer = malloc( bufferSize );
size_t numBytesEncrypted = 0;
CCCryptorStatus cryptStatus = CCCrypt( kCCEncrypt, kCCAlgorithmAES128, kCCModeCBC | kCCOptionPKCS7Padding,
keyPtr, kCCKeySizeAES128,
NULL /* initialization vector (optional) */,
[self bytes], dataLength, /* input */
buffer, bufferSize, /* output */
&numBytesEncrypted );
if( cryptStatus == kCCSuccess )
{
//the returned NSData takes ownership of the buffer and will free it on deallocation
return [NSData dataWithBytesNoCopy:buffer length:numBytesEncrypted];
}
free( buffer ); //free the buffer
return nil;
}
- (NSString *)base64Encoding
{
const unsigned char *bytes = [self bytes];
NSMutableString *result = [NSMutableString stringWithCapacity:self.length];
unsigned long ixtext = 0;
unsigned long lentext = self.length;
long ctremaining = 0;
unsigned char inbuf[3], outbuf[4];
unsigned short i = 0;
unsigned short charsonline = 0, ctcopy = 0;
unsigned long ix = 0;
while( YES )
{
ctremaining = lentext - ixtext;
if( ctremaining <= 0 ) break;
for( i = 0; i < 3; i++ )
{
ix = ixtext + i;
if( ix < lentext ) inbuf[i] = bytes[ix];
else inbuf [i] = 0;
}
outbuf [0] = (inbuf [0] & 0xFC) >> 2;
outbuf [1] = ((inbuf [0] & 0x03) << 4) | ((inbuf [1] & 0xF0) >> 4);
outbuf [2] = ((inbuf [1] & 0x0F) << 2) | ((inbuf [2] & 0xC0) >> 6);
outbuf [3] = inbuf [2] & 0x3F;
ctcopy = 4;
switch( ctremaining )
{
case 1:
ctcopy = 2;
break;
case 2:
ctcopy = 3;
break;
}
for( i = 0; i < ctcopy; i++ )
[result appendFormat:#"%c", encodingTable[outbuf[i]]];
for( i = ctcopy; i < 4; i++ )
[result appendString:#"="];
ixtext += 3;
charsonline += 4;
}
return [NSString stringWithString:result];
}
------------------------------------------------------------
------------------------------------------------------------
This question already has answers here:
Superpowered: real time pitch shift with timestretcher not working
(2 answers)
Closed 5 years ago.
I'm trying to make a pitch shift in real time from a microphone using superpowerd. I looked at the example that is for the file. Also tried to adapt it. I managed to change the sound, but it turned out very distorted with interference. What am I doing wrong? where to find more information on superpowered and timeStretching?
static bool audioProcessing(void *clientdata,
float **buffers,
unsigned int inputChannels,
unsigned int outputChannels,
unsigned int numberOfSamples,
unsigned int samplerate,
uint64_t hostTime) {
__unsafe_unretained Superpowered *self = (__bridge Superpowered *)clientdata;
float tempBuffer[numberOfSamples * 2 + 16];
SuperpoweredInterleave(buffers[0], buffers[1], tempBuffer, numberOfSamples);
float *outputBuffer = tempBuffer;
SuperpoweredAudiobufferlistElement inputBuffer;
inputBuffer.samplePosition = 0;
inputBuffer.startSample = 0;
inputBuffer.samplesUsed = 0;
inputBuffer.endSample = self->timeStretcher->numberOfInputSamplesNeeded;
inputBuffer.buffers[0] = SuperpoweredAudiobufferPool::getBuffer(self->timeStretcher->numberOfInputSamplesNeeded * 8 + 64);
inputBuffer.buffers[1] = inputBuffer.buffers[2] = inputBuffer.buffers[3] = NULL;
memcpy((float *)inputBuffer.buffers[0], outputBuffer, numberOfSamples * 2 + 16);
self->timeStretcher->process(&inputBuffer, self->outputBuffers);
// Do we have some output?
if (self->outputBuffers->makeSlice(0, self->outputBuffers->sampleLength)) {
while (true) { // Iterate on every output slice.
// Get pointer to the output samples.
int sampleCount = 0;
float *timeStretchedAudio = (float *)self->outputBuffers->nextSliceItem(&sampleCount);
if (!timeStretchedAudio) break;
SuperpoweredDeInterleave(timeStretchedAudio, buffers[0], buffers[1], numberOfSamples);
};
// Clear the output buffer list.
self->outputBuffers->clear();
};
return true;
}
I did the following:
static bool audioProcessing(void *clientdata,
float **buffers,
unsigned int inputChannels,
unsigned int outputChannels,
unsigned int numberOfSamples,
unsigned int samplerate,
uint64_t hostTime) {
__unsafe_unretained Superpowered *self = (__bridge Superpowered *)clientdata;
SuperpoweredAudiobufferlistElement inputBuffer;
inputBuffer.startSample = 0;
inputBuffer.samplesUsed = 0;
inputBuffer.endSample = numberOfSamples;
inputBuffer.buffers[0] = SuperpoweredAudiobufferPool::getBuffer((unsigned int) (numberOfSamples * 8 + 64));
inputBuffer.buffers[1] = inputBuffer.buffers[2] = inputBuffer.buffers[3] = NULL;
SuperpoweredInterleave(buffers[0], buffers[1], (float *)inputBuffer.buffers[0], numberOfSamples);
self->timeStretcher->process(&inputBuffer, self->outputBuffers);
// Do we have some output?
if (self->outputBuffers->makeSlice(0, self->outputBuffers->sampleLength)) {
while (true) { // Iterate on every output slice.
// Get pointer to the output samples.
int numSamples = 0;
float *timeStretchedAudio = (float *)self->outputBuffers->nextSliceItem(&numSamples);
if (!timeStretchedAudio || *timeStretchedAudio == 0) {
break;
}
SuperpoweredDeInterleave(timeStretchedAudio, buffers[0], buffers[1], numSamples);
}
// Clear the output buffer list.
self->outputBuffers->clear();
}
return true;
}
This might not work correctly when changing the speed also, but I wanted live pitch shifting only. People should be able to speak slower or faster themselves.
I've managed to decode and play H264 videos, however I'm having a difficult time with MPEG4 videos.
What CMVideoFormatDescription extensions does it need? I'm getting -8971 error (codecExtensionNotFoundErr) when trying to create a VTDecompressionSession.
This is how I create a VideoFormatDescription
OSStatus success = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
self.mediaCodec,
message.frameSize.width,
message.frameSize.height,
NULL,
&mediaDescriptor);
Instead of that NULL, I assume I need to specify a CFDictionaryRef, however I don't know what it should contain. Any idea?
After much pain and agony, I've finally managed to make it work.
I need to provide a CFDictionaryRef with at least a value for the kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms key. The value for this key also has to be a CFDictionaryRef. For H264 types this is created inside the CMVideoFormatDescriptionCreateFromH264ParameterSets and looks like this:
avcC = <014d401e ffe10016 674d401e 9a660a0f ff350101 01400000 fa000013 88010100 0468ee3c 80>
However for the MPEG4 type, you need to create this on your own. The end result should look like this:
esds = <00000000 038081e6 00000003 8081e611 00000000 00000000 058081e5 060102>
Now the way to create this is still fuzzy to me, however it somehow works. I was inspired by this link. This is the code:
- (CMFormatDescriptionRef)createFormatDescriptorFromMPEG4Message:(MessageContainer *)message {
CMVideoFormatDescriptionRef mediaDescriptor = NULL;
NSData *esdsData = [self newESDSFromData:message.frameData];
CFMutableDictionaryRef esdsDictionary = CFDictionaryCreateMutable(kCFAllocatorDefault, 1,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
CFDictionarySetValue(esdsDictionary, CFSTR("esds"), (__bridge const void *)(esdsData));
NSDictionary *dictionary = #{(__bridge NSString *)kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms : (__bridge NSDictionary *)esdsDictionary};
OSStatus status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
self.mediaCodec,
message.frameSize.width,
message.frameSize.height,
(__bridge CFDictionaryRef)dictionary,
&mediaDescriptor);
if (status) {
NSLog(#"CMVideoFormatDesciprionCreate failed with %zd", status);
}
return mediaDescriptor;
}
- (NSData *)newESDSFromData:(NSData *)data {
NSInteger dataLength = data.length;
int full_size = 3 + 5 + 13 + 5 + dataLength + 3;
// ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
int config_size = 13 + 5 + dataLength;
int padding = 12;
int8_t *esdsInfo = calloc(full_size + padding, sizeof(int8_t));
//Version
esdsInfo[0] = 0;
//Flags
esdsInfo[1] = 0;
esdsInfo[2] = 0;
esdsInfo[3] = 0;
//ES_DescrTag
esdsInfo[4] |= 0x03;
[self addMPEG4DescriptionLength:full_size
toPointer:esdsInfo + 5];
//esid
esdsInfo[8] = 0;
esdsInfo[9] = 0;
//Stream priority
esdsInfo[10] = 0;
//DecoderConfigDescrTag
esdsInfo[11] = 0x03;
[self addMPEG4DescriptionLength:config_size
toPointer:esdsInfo + 12];
//Stream Type
esdsInfo[15] = 0x11;
//Buffer Size
esdsInfo[16] = 0;
esdsInfo[17] = 0;
//Max bitrate
esdsInfo[18] = 0;
esdsInfo[19] = 0;
esdsInfo[20] = 0;
//Avg bitrate
esdsInfo[21] = 0;
esdsInfo[22] = 0;
esdsInfo[23] = 0;
//< DecSpecificInfoTag
esdsInfo[24] |= 0x05;
[self addMPEG4DescriptionLength:dataLength
toPointer:esdsInfo + 25];
//SLConfigDescrTag
esdsInfo[28] = 0x06;
//Length
esdsInfo[29] = 0x01;
esdsInfo[30] = 0x02;
NSData *esdsData = [NSData dataWithBytes:esdsInfo length:31 * sizeof(int8_t)];
free(esdsInfo);
return esdsData;
}
- (void)addMPEG4DescriptionLength:(NSInteger)length
toPointer:(int8_t *)ptr {
for (int i = 3; i >= 0; i--) {
uint8_t b = (length >> (i * 7)) & 0x7F;
if (i != 0) {
b |= 0x80;
}
ptr[3 - i] = b;
}
}
The message container is a simple wrapper around the data received from the server:
#interface MessageContainer : NSObject
#property (nonatomic) CGSize frameSize;
#property (nonatomic) NSData *frameData;
#end
Where frameSize is the size of the frame (received separately from the server) and frameData is the data itself.
I have a function in my application, that store data from buffer. It works fine in debug mode both device and simulator, but when I create .ipa and run it on device, I have EXC_ARM_DA_ALIGN error libstdc++.6.dylib std::string::_M_replace_safe(unsigned long, unsigned long, char const, unsigned long)
struct stMemoryBlock
{
stMemoryBlock(void* InData, int InSize)
{
data = InData;
size = InSize;
offset = 0;
};
void* data;
unsigned int size;
unsigned int offset;
};
//-----------------------------------------------
char* cDataCollector::TestMemoryThink(char* Buffer, int BufferSize, int TestOffset, int TestSize)
{
char* result = NULL;
if (TestOffset + TestSize <= BufferSize)
{
result = &Buffer[TestOffset];
}
return result;
}
//-----------------------------------------------------
bool cDataCollector::StoreBinaryData(void* DataBuffer, int DataSize)
{
bool result = false;
char* InBuffer = (char *)DataBuffer;
if (!mPreparedData && !mPreparedDataSize && !mMemoryMap.size())
{
unsigned int CountElements = 0;
int offset = sizeof(unsigned int);
if (DataSize >= sizeof(unsigned int))
{
// CountElements = *(unsigned int*)(&InBuffer[0]);
memcpy(&CountElements, InBuffer, sizeof(CountElements));
}
result = true;
for (unsigned int i = 0; (i < CountElements) && result; ++i)
{
std::string ThinkName ;
stMemoryBlock * MemoryBlock = NULL;
result = result && TestMemoryThink(InBuffer, DataSize, offset, 0) != NULL;
if (result)
{
size_t name_think_size = strlen(&InBuffer[offset]);
char* think_name = TestMemoryThink(InBuffer, DataSize, offset, 0);
result = result && (think_name != NULL);
if (result)
{
ThinkName = think_name;
offset += (name_think_size + 1);
}
}
this line cause an error:
ThinkName = think_name;
maybe I need another way to read a string from memory location that isn’t word (32-bit) aligned? please,help!
I have an app that selects a song from the iPod Library then copies that song into the app's directory as a '.caf' file. I now need to play and at the same time read that file into Apples FFT from the Accelerate framework so I can visualize the data like a spectrogram. Here is the code for the FFT:
void FFTAccelerate::doFFTReal(float samples[], float amp[], int numSamples)
{
int i;
vDSP_Length log2n = log2f(numSamples);
//Convert float array of reals samples to COMPLEX_SPLIT array A
vDSP_ctoz((COMPLEX*)samples,2,&A,1,numSamples/2);
//Perform FFT using fftSetup and A
//Results are returned in A
vDSP_fft_zrip(fftSetup, &A, 1, log2n, FFT_FORWARD);
//Convert COMPLEX_SPLIT A result to float array to be returned
amp[0] = A.realp[0]/(numSamples*2);
for(i=1;i<numSamples;i++)
amp[i]=sqrt(A.realp[i]*A.realp[i]+A.imagp[i]*A.imagp[i])/numSamples;
}
//Constructor
FFTAccelerate::FFTAccelerate (int numSamples)
{
vDSP_Length log2n = log2f(numSamples);
fftSetup = vDSP_create_fftsetup(log2n, FFT_RADIX2);
int nOver2 = numSamples/2;
A.realp = (float *) malloc(nOver2*sizeof(float));
A.imagp = (float *) malloc(nOver2*sizeof(float));
}
My question is how to I loop through the '.caf' audio file to feed the FFT while at the same time playing the song? I only need one channel. Im guessing I need to get 1024 samples of the song, process that in the FTT and then move further down the file and grab another 1024 samples. But I dont understand how to read an audio file to do this. The file has a sample rate of 44100.0 hz, is in linear PCM format, 16 Bit and I believe is also interleaved if that helps...
Try the ExtendedAudioFile API (requires AudioToolbox.framework).
#include <AudioToolbox/ExtendedAudioFile.h>
NSURL *urlToCAF = ...;
ExtAudioFileRef caf;
OSStatus status;
status = ExtAudioFileOpenURL((__bridge CFURLRef)urlToCAF, &caf);
if(noErr == status) {
// request float format
const UInt32 NumFrames = 1024;
const int ChannelsPerFrame = 1; // Mono, 2 for Stereo
// request float format
AudioStreamBasicDescription clientFormat;
clientFormat.mChannelsPerFrame = ChannelsPerFrame;
clientFormat.mSampleRate = 44100;
clientFormat.mFormatID = kAudioFormatLinearPCM;
clientFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved; // float
int cmpSize = sizeof(float);
int frameSize = cmpSize*ChannelsPerFrame;
clientFormat.mBitsPerChannel = cmpSize*8;
clientFormat.mBytesPerPacket = frameSize;
clientFormat.mFramesPerPacket = 1;
clientFormat.mBytesPerFrame = frameSize;
status = ExtAudioFileSetProperty(caf, kExtAudioFileProperty_ClientDataFormat, sizeof(clientFormat), &clientFormat);
if(noErr != status) { /* handle it */ }
while(1) {
float buf[ChannelsPerFrame*NumFrames];
AudioBuffer ab = { ChannelsPerFrame, sizeof(buf), buf };
AudioBufferList abl;
abl.mNumberBuffers = 1;
abl.mBuffers[0] = ab;
UInt32 ioNumFrames = NumFrames;
status = ExtAudioFileRead(caf, &ioNumFrames, &abl);
if(noErr == status) {
// process ioNumFrames here in buf
if(0 == ioNumFrames) {
// EOF!
break;
} else if(ioNumFrames < NumFrames) {
// TODO: pad buf with zeroes out to NumFrames
} else {
float amp[NumFrames]; // scratch space
doFFTReal(buf, amp, NumFrames);
}
}
}
// later
status = ExtAudioFileDispose(caf);
if(noErr != status) { /* hmm */ }
}