FFmpeg api iOS "Resource temporarily unavailable" - ios

I've spent hours trying to fix this:
I'm trying to use the ffmpeg api on iOS. My Xcode project is building and I can call ffmpeg api functions. I am trying to write code that decodes a video (Without outputting anything for now), and I keep getting error -35: "Resource temporarily unavailable".
The input file is from the camera roll (.mov) and I'm using Mpeg-4 for decoding. All I'm currently doing is getting data from the file, parsing it and sending the parsed packets to the decoder. When I try to get frames, all I get is an error. Does anyone know what I'm doing wrong?
+(void)test: (NSString*)filename outfile:(NSString*)outfilename {
/* register all the codecs */
avcodec_register_all();
AVCodec *codec;
AVCodecParserContext *parser;
AVCodecContext *c= NULL;
int frame_count;
FILE* f;
AVFrame* frame;
AVPacket* avpkt;
avpkt = av_packet_alloc();
//av_init_packet(avpkt);
char buf[1024];
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
uint8_t *data;
size_t data_size;
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
printf("Decode video file %s to %s\n", [filename cStringUsingEncoding:NSUTF8StringEncoding], [outfilename cStringUsingEncoding:NSUTF8StringEncoding]);
/* find the h264 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG4);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
available in the bitstream. */
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen([filename cStringUsingEncoding:NSUTF8StringEncoding], "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", [filename cStringUsingEncoding:NSUTF8StringEncoding]);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame_count = 0;
parser = av_parser_init(codec->id);
if (!parser) {
fprintf(stderr, "parser not found\n");
exit(1);
}
while (!feof(f)) {
/* read raw data from the input file */
data_size = fread(inbuf, 1, INBUF_SIZE, f);
if (!data_size)
break;
/* use the parser to split the data into frames */
data = inbuf;
while (data_size > 0) {
int ret = av_parser_parse2(parser, c, &avpkt->data, &avpkt->size, data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) {
fprintf(stderr, "Error while parsing\n");
exit(1);
}
data += ret;
data_size -= ret;
if (avpkt->size){
char buf[1024];
ret = avcodec_send_packet(c, avpkt);
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
continue;
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_frame(c, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
char e [1024];
av_strerror(ret, e, 1024);
fprintf(stderr, "Fail: %s !\n", e);
// ~~~~~~~~ This is where my program exits ~~~~~~~~~~~~~~~~~~~~~~~~~~~
return;
}
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
exit(1);
}
}
}
}
}
/* some codecs, such as MPEG, transmit the I and P frame with a
latency of one frame. You must do the following to have a
chance to get the last frame of the video */
fclose(f);
avcodec_close(c);
av_free(c);
av_frame_free(&frame);
printf("\n");
}

AVERROR(EAGAIN) is not an error, it just means output is not yet available and you need to call _send_packet() a few more times before the first output will be available from _receive_frame().
The output is buffered (delayed) to allow for B-frames and frame threading.

Related

fread() from binary file then printf streamed string - not working

C Lang Newb: My goal: Using a character stream using getchar() into an array and write array to a binary file, and then retrieve binary file into array and output string on console. I was successful with all parts except the output to console.
Call to function:
if (c == '6')
loadDoc();
Write file function:
int saveDoc(char document[MAXSIZE], int size){
FILE *f;
f = fopen("document.bin", "wb");
if (!f) {
printf("Error during writing to file !\n");
}
else{
fwrite(&document, sizeof(document), size, f);
fclose(f);
}
return 0;
}
Read file function:
char loadDoc(){
char buffer[1000];
FILE *f;
f = fopen("document.bin", "rb");
if (!f) {
printf("Error during reading file !\n");
}
else {
printf("Size of buffer: %d\n", sizeof(buffer));
fread(&buffer, sizeof(buffer), 1, f);
printf("File cpntents: \n %s\n", buffer);
fclose(f);
}
return 0;
}
Output:
********************
1) Start New Document:
2) View Document:
3) Edit Document:
4) Delete Document:
5) Save Document:
6) Load Document:
7) Exit
********************
6
Size of buffer: 1000
File cpntents:
#p#
When I open file in notebook, actual content of file is:
#p# & P ÿÿÿÿ& ðýb µ# ú ~ù 3 ÿÿÿÿÿÿÿÿ1 þb B# 3   è# 3  =A
The input entered into file saved and expected output to console with file is read:
This code isn't working as expected.
Here is the correct working solution in GCC:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void save_to_binary_file(const char* file_name, const char* text_data)
{
FILE* file = fopen(file_name, "wb");
if(file == NULL)
printf("Failed to open file %s\n", file_name);
if(fwrite(text_data, sizeof(char), strlen(text_data) + 1, file) != (strlen(text_data) + 1))
puts("Failed to write all text data\n");
fclose(file);
}
char* load_binary_file_as_text(const char* file_name)
{
FILE* file = fopen(file_name, "rb");
if(file == NULL)
printf("Failed to open file %s\n", file_name);
fseek(file, 0, SEEK_END);
size_t file_size_in_bytes = ftell(file);
fseek(file, 0, SEEK_SET);
char* read_buffer = (char*)malloc(file_size_in_bytes);
if(fread(read_buffer, 1, file_size_in_bytes, file) != file_size_in_bytes)
puts("Failed to read all the bytes\n");
fclose(file);
return read_buffer;
}
int main()
{
save_to_binary_file("Data.data", "ABCD");
char* buffer = load_binary_file_as_text("Data.data");
printf("%s\n", buffer);
free(buffer);
return 0;
}
The problem was the '&' in the fwrite() call. Removed it and works as expected.

v4l2src simple pipeline to c-application

My pipeline is like this
gst-launch-1.0 v4l2src ! videoconvert ! xvimagesink
and my code is like this
#include <gst/gst.h>
// easier to pass them as callbacks
typedef struct _CustomData{
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *sink;
}CustomData;
// callback function
// here src is the v4l2src, newpad is gstpad that has just been added to src element. This is usually the pad to which we want to lnk
// data is the pointer we provided when attaching to the signal.
static void pad_added_handler(GstElement *src, GstPad *new_pad,CustomData *data)
{
GstPad *sink_pad = gst_element_get_static_pad(data->convert, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
if(gst_pad_is_linked(sink_pad))
{
g_print("we are linked. igonring\n");
}
// check the new pad types
// we have previously created a piece of pipeline which deals with videoconvert linked with xvimagesink and we will nto be able to link it to a pad producing video.
//gst-pad_get_current_caps()- retrieves current capabilities of pad
new_pad_caps = gst_pad_get_current_caps(new_pad);
new_pad_struct = gst_caps_get_structure(new_pad_caps, 0);
new_pad_type = gst_structure_get_name(new_pad_struct);
if(!g_str_has_prefix(new_pad_type, "video/x-raw"))
{
g_print("It has new pad type");
}
// gst_pad_link tries to link two pads . the link must be specified from source to sink and both pads must be owned by elements residing in same pipeline
ret = gst_pad_link(new_pad, sink_pad);
if(GST_PAD_LINK_FAILED(ret))
{
g_print("type is new_pad_type");
}
if(new_pad_caps !=NULL)
{
gst_caps_unref(new_pad_caps);
}
gst_object_unref(sink_pad);
}
int main(int argc, char *argv[])
{
GMainLoop *loop;
CustomData data;
GstBus *bus;
GstMessage *msg;
gboolean terminate = FALSE;
gst_init(&argc, &argv);
// loop = g_main_loop_new(NULL, FALSE);
// create the elements
data.source = gst_element_factory_make("v4l2src", "source");
data.convert = gst_element_factory_make("videoconvert", "convert");
data.sink = gst_element_factory_make("xvimagesink", "sink");
data.pipeline = gst_pipeline_new("new-pipeline");
if(!data.pipeline || !data.source || !data.convert || !data.sink)
{
g_printerr("Not all elements could be created\n");
return -1;
}
//we did not link source at this point of time, we will do it later
gst_bin_add_many(GST_BIN(data.pipeline), data.source, data.convert, data.sink, NULL);
// we link convert element to sink, do not link them with source. we dont have source pads here. so we just have videoconvert->sink unlinked
// gst_element_link(data.source, data.convert);
if(!gst_element_link(data.convert,data.sink))
{
g_printerr("elements could not be linked\n");
gst_object_unref(data.pipeline);
return -1;
}
// we set the device source
//g_object_set(source, "device", "/dev/video0", NULL);
//connect to pad added signal.
// we want to attach pad added signal to source element. to do so, we are using g_signal_connect and provide callback function and datapointer.
// when source element has enough information to start producing data, it will create source pads and trigger the pad added signal. at this point, our callback is called
g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
//g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(handler), &data);
GstStateChangeReturn ret;
ret =gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
// g_main_loop_run(loop);
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state(data.pipeline, GST_STATE_NULL);
gst_object_unref(data.pipeline);
return 0;
}
and I am getting error like this
Pipeline state changed from NULL to READY:
Pipeline state changed from READY to PAUSED:
Error received from element source: Internal data stream error.
Debugging information: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:new-pipeline/GstV4l2Src:source:
streaming stopped, reason not-linked (-1)
Please let me know what changes should I make to make my pipeline work. Thanks! the above code is based on dynamic pipeline example from gstreamer tutorials. I dont understand where I am going wrong.
The following works though
#include <gst/gst.h>
int main(int argc, char *argv[])
{
GstElement *pipeline, *source,*filter, *convert, *sink;
GstBus *bus;
GstMessage *msg;
GstCaps *caps;
gst_init(&argc, &argv);
source = gst_element_factory_make("v4l2src", "source");
filter = gst_element_factory_make("capsfilter","filter");
convert = gst_element_factory_make("videoconvert", "convert");
sink = gst_element_factory_make("xvimagesink", "sink");\
pipeline = gst_pipeline_new("pipe");
gst_bin_add_many(GST_BIN(pipeline), source, convert,sink, NULL);
gst_element_link_many(source,convert,sink,NULL);
caps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING, "YUY2", NULL);
g_object_set(G_OBJECT(filter), "caps", caps, NULL);
gst_element_set_state(pipeline,GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref(bus);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
}
Any ideas why if I add pads, it is not working well??

How to edit a frame's content using ffmpeg and opencv?

I am going to edit the content of one frame from a mp4 file using OpenCV and ffmpeg 3.3. However, I encountered some problems such as the width and the height of video are zero, some functions are deprecated. I have changed the old function to updated function, but still cannot extract a correct frame. Please help.
Can anyone show an example of extracting a frame from a mp4 file using ffmpeg 3.3?
#include "stdafx.h"
#include <iostream>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// FFmpeg
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/pixdesc.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
}
#define CODEC_TYPE_VIDEO AVMEDIA_TYPE_VIDEO
int main(int argc, char* argv[])
{
// initialize FFmpeg library
av_register_all();
// av_log_set_level(AV_LOG_DEBUG);
int ret;
// open input file context
AVFormatContext* inctx = nullptr;
//ret = avformat_open_input(&inctx, infile, nullptr, nullptr);
ret = avformat_open_input(&inctx, "C:\\car.mp4", nullptr, nullptr);
// retrive input stream information
ret = avformat_find_stream_info(inctx, nullptr);
if (ret < 0) {
std::cerr << "fail to avformat_find_stream_info: ret=" << ret;
return 2;
}
// find primary video stream
AVCodec* vcodec = nullptr;
vcodec = avcodec_find_decoder(AV_CODEC_ID_MPEG4);
if (!vcodec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
const int vstrm_idx = ret;
AVStream* vstrm = inctx->streams[vstrm_idx];
// open video decoder context
AVCodecContext *c = NULL;
c = avcodec_alloc_context3(vcodec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
if (avcodec_open2(c, vcodec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
// print input video stream informataion
// initialize sample scaler
c->pix_fmt = AV_PIX_FMT_YUV420P;
c->width = 1280;
c->height = 720;
if (vcodec->capabilities & CODEC_CAP_TRUNCATED)
c->flags |= CODEC_FLAG_TRUNCATED;
c->flags2 |= CODEC_FLAG2_FAST;
int width = 1280;
int height = 720;
SwsContext* swsctx = sws_getCachedContext(nullptr, width,
height, AV_PIX_FMT_YUV420P, width, height, AV_PIX_FMT_RGB32,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
}
Not sure about writing processed frames, not remember, but seems this worked for me:
extern "C" {
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include "libavcodec/avcodec.h"
#include <libavutil/opt.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
#include <libavutil/mathematics.h>
}
#include "opencv2/opencv.hpp"
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#endif
using namespace std;
using namespace cv;
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
char buf1[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->pts);
char buf2[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->dts);
char buf3[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->duration);
char buf4[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->pts, time_base);
char buf5[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->dts, time_base);
char buf6[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->duration, time_base);
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
buf1, buf4,
buf2, buf5,
buf3, buf6,
pkt->stream_index);
}
int main(int argc, char **argv)
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
int frameFinished = 0;
pFrame = av_frame_alloc();
pFrameRGB = av_frame_alloc();
const char *in_filename, *out_filename;
int ret, i;
in_filename = "../../TestClips/Audio Video Sync Test.mp4";
out_filename = "out.avi";
// Initialize FFMPEG
av_register_all();
// Get input file format context
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0)
{
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
// Extract streams description
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
{
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
// Print detailed information about the input or output format,
// such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
av_dump_format(ifmt_ctx, 0, in_filename, 0);
// Allocate an AVFormatContext for an output format.
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx)
{
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
// The output container format.
ofmt = ofmt_ctx->oformat;
// Allocating output streams
for (i = 0; i < ifmt_ctx->nb_streams; i++)
{
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream)
{
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0)
{
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
goto end;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
}
// Open output file
if (!(ofmt->flags & AVFMT_NOFILE))
{
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
// Write output file header
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0)
{
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
// Search for input video codec info
AVCodec *in_codec = NULL;
AVCodecContext* avctx = NULL;
int video_stream_index = -1;
for (int i = 0; i < ifmt_ctx->nb_streams; i++)
{
if (ifmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
video_stream_index = i;
avctx = ifmt_ctx->streams[i]->codec;
in_codec = avcodec_find_decoder(avctx->codec_id);
if (!in_codec)
{
fprintf(stderr, "in codec not found\n");
exit(1);
}
break;
}
}
// Search for output video codec info
AVCodec *out_codec = NULL;
AVCodecContext* o_avctx = NULL;
int o_video_stream_index = -1;
for (int i = 0; i < ofmt_ctx->nb_streams; i++)
{
if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
o_video_stream_index = i;
out_codec = avcodec_find_encoder(ofmt_ctx->streams[i]->codec->codec_id);
if (!out_codec)
{
fprintf(stderr, "out codec not found\n");
exit(1);
}
o_avctx = avcodec_alloc_context3(out_codec);
o_avctx->height = avctx->height;
o_avctx->width = avctx->width;
o_avctx->sample_aspect_ratio = avctx->sample_aspect_ratio;
o_avctx->gop_size = 2;
o_avctx->max_b_frames = 2;
if (out_codec->pix_fmts)
{
o_avctx->pix_fmt = out_codec->pix_fmts[0];
}
else
{
o_avctx->pix_fmt = avctx->pix_fmt;
}
o_avctx->time_base = avctx->time_base;
if (avcodec_open2(o_avctx, out_codec, NULL) < 0)
{
fprintf(stderr, "cannot open encoder\n");
exit(1);
}
break;
}
}
// Show output format info
av_dump_format(ofmt_ctx, 0, out_filename, 1);
// openCV pixel format
enum AVPixelFormat pFormat = AV_PIX_FMT_RGB24;
// Data size
int numBytes = avpicture_get_size(pFormat, avctx->width, avctx->height);
// allocate buffer
uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
// fill frame structure
avpicture_fill((AVPicture *)pFrameRGB, buffer, pFormat, avctx->width, avctx->height);
// frame area
int y_size = avctx->width * avctx->height;
// Open input codec
avcodec_open2(avctx, in_codec, NULL);
// Main loop
while (1)
{
AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
{
break;
}
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
//log_packet(ifmt_ctx, &pkt, "in");
// copy packet
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
//log_packet(ofmt_ctx, &pkt, "out");
if (pkt.stream_index == video_stream_index)
{
avcodec_decode_video2(avctx, pFrame, &frameFinished, &pkt);
if (frameFinished)
{
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getCachedContext(NULL,
avctx->width,
avctx->height,
avctx->pix_fmt,
avctx->width,
avctx->height,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL,
NULL,
NULL);
sws_scale(img_convert_ctx,
((AVPicture*)pFrame)->data,
((AVPicture*)pFrame)->linesize,
0,
avctx->height,
((AVPicture *)pFrameRGB)->data,
((AVPicture *)pFrameRGB)->linesize);
sws_freeContext(img_convert_ctx);
// Do some image processing
cv::Mat img(pFrame->height, pFrame->width, CV_8UC3, pFrameRGB->data[0], false);
cv::GaussianBlur(img, img, Size(5, 5), 3);
cv::imshow("Display", img);
cv::waitKey(5);
// --------------------------------
// Transform back to initial format
// --------------------------------
img_convert_ctx = sws_getCachedContext(NULL,
avctx->width,
avctx->height,
AV_PIX_FMT_BGR24,
avctx->width,
avctx->height,
avctx->pix_fmt,
SWS_BICUBIC,
NULL,
NULL,
NULL);
sws_scale(img_convert_ctx,
((AVPicture*)pFrameRGB)->data,
((AVPicture*)pFrameRGB)->linesize,
0,
avctx->height,
((AVPicture *)pFrame)->data,
((AVPicture *)pFrame)->linesize);
int got_packet = 0;
AVPacket enc_pkt = { 0 };
av_init_packet(&enc_pkt);
avcodec_encode_video2(o_avctx, &enc_pkt, pFrame, &got_packet);
if (o_avctx->coded_frame->pts != AV_NOPTS_VALUE)
{
enc_pkt.pts = av_rescale_q(o_avctx->coded_frame->pts, o_avctx->time_base, ofmt_ctx->streams[video_stream_index]->time_base);
}
if (o_avctx->coded_frame->key_frame)
{
enc_pkt.flags |= AV_PKT_FLAG_KEY;
}
av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
av_packet_unref(&enc_pkt);
sws_freeContext(img_convert_ctx);
}
}
else // write sound frame
{
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
}
if (ret < 0)
{
fprintf(stderr, "Error muxing packet\n");
break;
}
// Decrease packet ref counter
av_packet_unref(&pkt);
}
av_write_trailer(ofmt_ctx);
end:
avcodec_close(avctx);
avcodec_close(o_avctx);
av_free(pFrame);
av_free(pFrameRGB);
avformat_close_input(&ifmt_ctx);
// close output
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
{
avio_closep(&ofmt_ctx->pb);
}
avformat_free_context(ofmt_ctx);
if (ret < 0 && ret != AVERROR_EOF)
{
char buf_err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
av_make_error_string(buf_err, AV_ERROR_MAX_STRING_SIZE, ret);
fprintf(stderr, "Error occurred: %s\n", buf_err);
return 1;
}
return 0;
}

Ice cast source client for ios

I have been trying to create a source client for ice cast for ios. I have been able to connect using asyncsocket to connect to the socket. I am also able to write data to the server. The icecast configuration is done for mp3 format. But the mp3 file written to the server is corrupt. I am providing some code snippets.
Header:
NSString *string = #"SOURCE /sync HTTP/1.0\r\n"
"Authorization: Basic c291cmNlOmhhY2ttZQ==\r\n"
"User-Agent: butt-0.1.12\r\n"
"User-Agent: butt-0.1.12\r\n"
"content-type: audio/mpeg\r\n"
"ice-name: sync's Stream\r\n"
"ice-public: 0\r\n"
"ice-genre: Rock\r\n"
"ice-description: This is my server description\r\n"
"Connection: keep-alive\r\n"
"ice-audio-info: ice-samplerate=44100;ice-bitrate=48;ice-channels=2\r\n\r\n";
NSData *data = [string dataUsingEncoding:NSUTF8StringEncoding];
//sending http request to write the header
NSLog(#"Sending HTTP Request.");
[socket writeData:data withTimeout:-1 tag:1];
//write buffer data to server
[socket writeData:self.dataBuffer withTimeout:-1 tag:1];
for recording i am using aqrecorder using the following code to record it.
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
try {
if (inNumPackets > 0) {
// write packets to file
XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
"AudioFileWritePackets failed");
aqr->mRecordPacket += inNumPackets;
NSLog(#"size = %u",(unsigned int)inBuffer->mAudioDataByteSize);
data = [[[NSData alloc]initWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize]retain];
server *srv = [[server alloc]init];
srv.dataBuffer=data;
[srv connecting];
}
// if we're not stopping, re-enqueue the buffe so that it gets filled again
if (aqr->IsRunning())
XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
// server *srv=[[server alloc]init];
// [srv connecting];
int i, bufferByteSize;
UInt32 size;
CFURLRef url = nil;
try {
mFileName = CFStringCreateCopy(kCFAllocatorDefault, inRecordFile);
// // specify the recording format
// SetupAudioFormat(kAudioFormatMPEG4AAC);
// specify the recording format, use hardware AAC if available
// otherwise use IMA4
if(IsAACHardwareEncoderAvailable())
SetupAudioFormat(kAudioFormatMPEG4AAC);
else
SetupAudioFormat(kAudioFormatAppleIMA4);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
NSString *recordFile = [NSTemporaryDirectory() stringByAppendingPathComponent: (NSString*)inRecordFile];
//url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)recordFile, NULL);
url = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)recordFile, kCFURLPOSIXPathStyle, false);
// create the audio file
OSStatus status = AudioFileCreateWithURL(url, kAudioFileCAFType, &mRecordFormat, kAudioFileFlags_EraseFile, &mRecordFile);
CFRelease(url);
XThrowIfError(status, "AudioFileCreateWithURL failed");
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");;
}
}
Do i need to change the format to write to the server?
You're not sending MP3 data, you're sending AAC or M4A data. I don't believe Icecast supports M4A. Are you actually using Icecast or some other server?
For AAC, your Content-Type header is wrong. Try audio/aac, audio/aacp, audio/mp4 or audio/mpeg4-generic.
Also, you only need one User-Agent header, and you should pick something that matches the software you are writing rather than copying someone else's. In the future, there might need to be an adjustment of protocol for your code, and that would only be possible if you used your own user-agent string.

clicking/tapping between buffers for AudioQueue

As you can see from the code, within my callback I extract out the audio data and place it into NSData data, then send that off to another class to upload that to the server. This all works, meaning the server receives and plays the audio data. HOWEVER there is a clicking or tapping noise between the buffers. I am hoping someone might show me what is causing that and how it can be fixed.
I have read other related postings however they all seemed to refer to only using 1 buffer and that adding more was the fix but I am using 3 buffers and have tried adjusting that number which did not fix it
AQRecorder.mm
#include "AQRecorder.h"
RestClient * restClient;
NSData* data;
// ____________________________________________________________________________________
// Determine the size, in bytes, of a buffer necessary to represent the supplied number
// of seconds of audio data.
int AQRecorder::ComputeRecordBufferSize(const AudioStreamBasicDescription *format, float seconds)
{
int packets, frames, bytes = 0;
try {
frames = (int)ceil(seconds * format->mSampleRate);
if (format->mBytesPerFrame > 0)
bytes = frames * format->mBytesPerFrame;
else {
UInt32 maxPacketSize;
if (format->mBytesPerPacket > 0)
maxPacketSize = format->mBytesPerPacket; // constant packet size
else {
UInt32 propertySize = sizeof(maxPacketSize);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MaximumOutputPacketSize, &maxPacketSize,
&propertySize), "couldn't get queue's maximum output packet size");
}
if (format->mFramesPerPacket > 0)
packets = frames / format->mFramesPerPacket;
else
packets = frames; // worst-case scenario: 1 frame in a packet
if (packets == 0) // sanity check
packets = 1;
bytes = packets * maxPacketSize;
}
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
return 0;
}
return bytes;
}
// ____________________________________________________________________________________
// AudioQueue callback function, called when an input buffers has been filled.
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
try {
if (inNumPackets > 0) {
// write packets to file
// XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
// inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
// "AudioFileWritePackets failed");
aqr->mRecordPacket += inNumPackets;
// int numBytes = inBuffer->mAudioDataByteSize;
// SInt8 *testBuffer = (SInt8*)inBuffer->mAudioData;
//
// for (int i=0; i < numBytes; i++)
// {
// SInt8 currentData = testBuffer[i];
// printf("Current data in testbuffer is %d", currentData);
//
// NSData * temp = [NSData dataWithBytes:currentData length:sizeof(currentData)];
// }
data=[[NSData dataWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize]retain];
[restClient uploadAudioData:data url:nil];
}
// if we're not stopping, re-enqueue the buffer so that it gets filled again
if (aqr->IsRunning())
XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
AQRecorder::AQRecorder()
{
mIsRunning = false;
mRecordPacket = 0;
data = [[NSData alloc]init];
restClient = [[RestClient sharedManager]retain];
}
AQRecorder::~AQRecorder()
{
AudioQueueDispose(mQueue, TRUE);
AudioFileClose(mRecordFile);
if (mFileName){
CFRelease(mFileName);
}
[restClient release];
[data release];
}
// ____________________________________________________________________________________
// Copy a queue's encoder's magic cookie to an audio file.
void AQRecorder::CopyEncoderCookieToFile()
{
UInt32 propertySize;
// get the magic cookie, if any, from the converter
OSStatus err = AudioQueueGetPropertySize(mQueue, kAudioQueueProperty_MagicCookie, &propertySize);
// we can get a noErr result and also a propertySize == 0
// -- if the file format does support magic cookies, but this file doesn't have one.
if (err == noErr && propertySize > 0) {
Byte *magicCookie = new Byte[propertySize];
UInt32 magicCookieSize;
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MagicCookie, magicCookie, &propertySize), "get audio converter's magic cookie");
magicCookieSize = propertySize; // the converter lies and tell us the wrong size
// now set the magic cookie on the output file
UInt32 willEatTheCookie = false;
// the converter wants to give us one; will the file take it?
err = AudioFileGetPropertyInfo(mRecordFile, kAudioFilePropertyMagicCookieData, NULL, &willEatTheCookie);
if (err == noErr && willEatTheCookie) {
err = AudioFileSetProperty(mRecordFile, kAudioFilePropertyMagicCookieData, magicCookieSize, magicCookie);
XThrowIfError(err, "set audio file's magic cookie");
}
delete[] magicCookie;
}
}
void AQRecorder::SetupAudioFormat(UInt32 inFormatID)
{
memset(&mRecordFormat, 0, sizeof(mRecordFormat));
UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&mRecordFormat.mSampleRate), "couldn't get hardware sample rate");
//override samplearate to 8k from device sample rate
mRecordFormat.mSampleRate = 8000.0;
size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels,
&size,
&mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");
// mRecordFormat.mChannelsPerFrame = 1;
mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
// if we want pcm, default to signed 16-bit little-endian
mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mRecordFormat.mBitsPerChannel = 16;
mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
mRecordFormat.mFramesPerPacket = 1;
}
if (inFormatID == kAudioFormatULaw) {
NSLog(#"is ulaw");
mRecordFormat.mSampleRate = 8000.0;
mRecordFormat.mFormatFlags = 0;
mRecordFormat.mFramesPerPacket = 1;
mRecordFormat.mChannelsPerFrame = 1;
mRecordFormat.mBitsPerChannel = 8;
mRecordFormat.mBytesPerPacket = 1;
mRecordFormat.mBytesPerFrame = 1;
}
}
NSString * GetDocumentDirectory(void)
{
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *basePath = ([paths count] > 0) ? [paths objectAtIndex:0] : nil;
return basePath;
}
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
int i, bufferByteSize;
UInt32 size;
CFURLRef url;
try {
mFileName = CFStringCreateCopy(kCFAllocatorDefault, inRecordFile);
// specify the recording format
SetupAudioFormat(kAudioFormatULaw /*kAudioFormatLinearPCM*/);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
NSString *basePath = GetDocumentDirectory();
NSString *recordFile = [basePath /*NSTemporaryDirectory()*/ stringByAppendingPathComponent: (NSString*)inRecordFile];
url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)recordFile, NULL);
// create the audio file
XThrowIfError(AudioFileCreateWithURL(url, kAudioFileCAFType, &mRecordFormat, kAudioFileFlags_EraseFile,
&mRecordFile), "AudioFileCreateWithURL failed");
CFRelease(url);
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException &e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");
}
}
void AQRecorder::StopRecord()
{
// end recording
mIsRunning = false;
// XThrowIfError(AudioQueueReset(mQueue), "AudioQueueStop failed");
XThrowIfError(AudioQueueStop(mQueue, true), "AudioQueueStop failed");
// a codec may update its cookie at the end of an encoding session, so reapply it to the file now
CopyEncoderCookieToFile();
if (mFileName)
{
CFRelease(mFileName);
mFileName = NULL;
}
AudioQueueDispose(mQueue, true);
AudioFileClose(mRecordFile);
}
I changed my #define kBufferDurationSeconds from .5 to 5.0 and although the clicking is still there it is alot less noticeable.
Please if you have suggestions/answer still post as this is not a fix merely a work around thats somewhat better then before
I also tried to append data to data for a number of times prior to sending the data to the server. This also seems to have helped.

Resources