I'm developing a C application (under linux) that receives a raw h.264 stream and should visualize this stream using gstreamer APIs.
I am a newbie with GStreamer so maybe I am doing huge stupid mistakes or ignoring well-known stuff, sorry about that.
I got a raw h264 video (I knew it was the exact same format I need) and developed an application that plays it. It correctly works with appsrc in pull mode (when need data is called, I get new data from the file and perform push-buffer).
Now I'm trying to do the exact same thing but in push mode, this is basically because I don't have a video, but a stream. So I have a method inside my code that will be called every time new data (in the form of an uint8_t buffer) arrives, and this is my video source.
I googled my problem and had a look at the documentation, but I found no simple code snippets for my use case, even if it seems a very simple one. I understood that I have to init the pipeline and appsrc and then only push-buffer when I have new data.
Well, I developed two methods: init_stream() for pipeline/appsrc initialization and populate_app(void *inBuf, size_t len) to send data when they are available.
It compiles and correctly runs, but no video:
struct _App
{
GstAppSrc *appsrc;
GstPipeline *pipeline;
GstElement *h264parse;
GstElement *mfw_vpudecoder;
GstElement *mfw_v4lsin;
GMainLoop *loop;
};
typedef struct _App App;
App s_app;
App *app = &s_app;
static gboolean bus_message (GstBus * bus, GstMessage * message, App * app)
{
GST_DEBUG ("got message %s", gst_message_type_get_name (GST_MESSAGE_TYPE (message)));
switch (GST_MESSAGE_TYPE (message)) {
case GST_MESSAGE_ERROR:
g_error ("received error");
g_main_loop_quit (app->loop);
break;
case GST_MESSAGE_EOS:
g_main_loop_quit (app->loop);
break;
default:
break;
}
return TRUE;
}
int init_stream()
{
GstBus *bus;
gst_init (NULL, NULL);
fprintf(stderr, "gst_init done\n");
/* create a mainloop to get messages */
app->loop = g_main_loop_new (NULL, TRUE);
fprintf(stderr, "app loop initialized\n");
app->pipeline = gst_parse_launch("appsrc name=mysource ! h264parse ! mfw_vpudecoder ! mfw_v4lsin", NULL);
app->appsrc = gst_bin_get_by_name (GST_BIN(app->pipeline), "mysource");
gst_app_src_set_stream_type(app->appsrc, GST_APP_STREAM_TYPE_STREAM);
gst_app_src_set_emit_signals(app->appsrc, TRUE);
fprintf(stderr, "Pipeline and appsrc initialized\n");
/* Create Bus from pipeline */
bus = gst_pipeline_get_bus(app->pipeline);
fprintf(stderr, "bus created\n");
/* add watch for messages */
gst_bus_add_watch (bus, (GstBusFunc) bus_message, app);
gst_object_unref(bus);
fprintf(stderr, "bus_add_watch done\n");
GstCaps* video_caps = gst_caps_new_simple ("video/x-h264",
"width", G_TYPE_INT, 800,
"height", G_TYPE_INT, 480,
"framerate", GST_TYPE_FRACTION, 25,
1, NULL);
gst_app_src_set_caps(GST_APP_SRC(app->appsrc), video_caps);
/* go to playing and wait in a mainloop. */
gst_element_set_state ((GstElement*) app->pipeline, GST_STATE_PLAYING);
fprintf(stderr, "gst_element_set_state play\n");
/* this mainloop is stopped when we receive an error or EOS */
g_main_loop_run (app->loop);
fprintf(stderr, "g_main_loop_run called\n");
gst_element_set_state ((GstElement*) app->pipeline, GST_STATE_NULL);
fprintf(stderr, "gst_element_set_state GST_STATE_NULL\n");
/* free the file */
// g_mapped_file_unref (app->file);
gst_object_unref (bus);
g_main_loop_unref (app->loop);
return 0;
}
void populateApp(void *inBuf , size_t len) {
guint8 *_buffer = (guint8*) inBuf;
GstFlowReturn ret;
GstBuffer *buffer = gst_buffer_new();
GstCaps* video_caps = gst_caps_new_simple ("video/x-h264",
"width", G_TYPE_INT, 800,
"height", G_TYPE_INT, 480,
"framerate", GST_TYPE_FRACTION, 25,
1, NULL);
gst_buffer_set_caps(buffer, video_caps);
GST_BUFFER_DATA (buffer) = _buffer;
GST_BUFFER_SIZE (buffer) = len;
// g_signal_emit_by_name (app->appsrc, "push-buffer", buffer, &ret);
ret = gst_app_src_push_buffer(GST_APP_SRC(app->appsrc), buffer);
gst_buffer_unref (buffer);
}
As said, I am a total newbie at GStreamer so there's a lot of cut-and-paste code from the internet, but IMHO it should work.
Do you see any issues?
It's not clear how you are calling populateApp, but you need to call that repeatedly as you have data to push to your pipeline. This can be done in a separate thread from the one blocked by g_main_loop_run, or you can restructure your program to avoid using GMainLoop.
Related
My pipeline is like this
gst-launch-1.0 v4l2src ! videoconvert ! xvimagesink
and my code is like this
#include <gst/gst.h>
// easier to pass them as callbacks
typedef struct _CustomData{
GstElement *pipeline;
GstElement *source;
GstElement *convert;
GstElement *sink;
}CustomData;
// callback function
// here src is the v4l2src, newpad is gstpad that has just been added to src element. This is usually the pad to which we want to lnk
// data is the pointer we provided when attaching to the signal.
static void pad_added_handler(GstElement *src, GstPad *new_pad,CustomData *data)
{
GstPad *sink_pad = gst_element_get_static_pad(data->convert, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
if(gst_pad_is_linked(sink_pad))
{
g_print("we are linked. igonring\n");
}
// check the new pad types
// we have previously created a piece of pipeline which deals with videoconvert linked with xvimagesink and we will nto be able to link it to a pad producing video.
//gst-pad_get_current_caps()- retrieves current capabilities of pad
new_pad_caps = gst_pad_get_current_caps(new_pad);
new_pad_struct = gst_caps_get_structure(new_pad_caps, 0);
new_pad_type = gst_structure_get_name(new_pad_struct);
if(!g_str_has_prefix(new_pad_type, "video/x-raw"))
{
g_print("It has new pad type");
}
// gst_pad_link tries to link two pads . the link must be specified from source to sink and both pads must be owned by elements residing in same pipeline
ret = gst_pad_link(new_pad, sink_pad);
if(GST_PAD_LINK_FAILED(ret))
{
g_print("type is new_pad_type");
}
if(new_pad_caps !=NULL)
{
gst_caps_unref(new_pad_caps);
}
gst_object_unref(sink_pad);
}
int main(int argc, char *argv[])
{
GMainLoop *loop;
CustomData data;
GstBus *bus;
GstMessage *msg;
gboolean terminate = FALSE;
gst_init(&argc, &argv);
// loop = g_main_loop_new(NULL, FALSE);
// create the elements
data.source = gst_element_factory_make("v4l2src", "source");
data.convert = gst_element_factory_make("videoconvert", "convert");
data.sink = gst_element_factory_make("xvimagesink", "sink");
data.pipeline = gst_pipeline_new("new-pipeline");
if(!data.pipeline || !data.source || !data.convert || !data.sink)
{
g_printerr("Not all elements could be created\n");
return -1;
}
//we did not link source at this point of time, we will do it later
gst_bin_add_many(GST_BIN(data.pipeline), data.source, data.convert, data.sink, NULL);
// we link convert element to sink, do not link them with source. we dont have source pads here. so we just have videoconvert->sink unlinked
// gst_element_link(data.source, data.convert);
if(!gst_element_link(data.convert,data.sink))
{
g_printerr("elements could not be linked\n");
gst_object_unref(data.pipeline);
return -1;
}
// we set the device source
//g_object_set(source, "device", "/dev/video0", NULL);
//connect to pad added signal.
// we want to attach pad added signal to source element. to do so, we are using g_signal_connect and provide callback function and datapointer.
// when source element has enough information to start producing data, it will create source pads and trigger the pad added signal. at this point, our callback is called
g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(pad_added_handler), &data );
//g_signal_connect(G_OBJECT(data.source), "pad-added", G_CALLBACK(handler), &data);
GstStateChangeReturn ret;
ret =gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
// g_main_loop_run(loop);
/* Listen to the bus */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE,
GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
terminate = TRUE;
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n",
gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
/* We should not reach here */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
/* Free resources */
gst_object_unref (bus);
gst_element_set_state(data.pipeline, GST_STATE_NULL);
gst_object_unref(data.pipeline);
return 0;
}
and I am getting error like this
Pipeline state changed from NULL to READY:
Pipeline state changed from READY to PAUSED:
Error received from element source: Internal data stream error.
Debugging information: gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:new-pipeline/GstV4l2Src:source:
streaming stopped, reason not-linked (-1)
Please let me know what changes should I make to make my pipeline work. Thanks! the above code is based on dynamic pipeline example from gstreamer tutorials. I dont understand where I am going wrong.
The following works though
#include <gst/gst.h>
int main(int argc, char *argv[])
{
GstElement *pipeline, *source,*filter, *convert, *sink;
GstBus *bus;
GstMessage *msg;
GstCaps *caps;
gst_init(&argc, &argv);
source = gst_element_factory_make("v4l2src", "source");
filter = gst_element_factory_make("capsfilter","filter");
convert = gst_element_factory_make("videoconvert", "convert");
sink = gst_element_factory_make("xvimagesink", "sink");\
pipeline = gst_pipeline_new("pipe");
gst_bin_add_many(GST_BIN(pipeline), source, convert,sink, NULL);
gst_element_link_many(source,convert,sink,NULL);
caps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING, "YUY2", NULL);
g_object_set(G_OBJECT(filter), "caps", caps, NULL);
gst_element_set_state(pipeline,GST_STATE_PLAYING);
bus = gst_element_get_bus (pipeline);
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-Stream reached.\n");
break;
default:
/* We should not reach here because we only asked for ERRORs and EOS */
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
/* Free resources */
gst_object_unref(bus);
gst_element_set_state(pipeline,GST_STATE_NULL);
gst_object_unref(pipeline);
}
Any ideas why if I add pads, it is not working well??
Im fairly new to IOS programming and objective-c. I have an embedded system that runs a program written in C that is sending UDP packet to iPhone app I am working on.
I am able to read the packet data (NSData) if it only contains a string but, cannot if the data is structured with additional markup.
Here is the C code that sends the packet.
typedef struct s_msg_temp_report {
uint8_t id0;
uint8_t id1;
uint8_t name[9];
uint8_t led;
uint32_t temp;
} t_msg_temp_report;
static t_msg_temp_report msg_temp_report =
{
.id0 = 0,
.id1 = 2,
.name = DEMO_PRODUCT_NAME,
.led = 0,
.temp = 0,
};
/* Send client report. */
msg_temp_report.temp = (uint32_t)(at30tse_read_temperature() * 100);
msg_temp_report.led = !port_pin_get_output_level(LED_0_PIN);
ret = sendto(tx_socket, &msg_temp_report, sizeof(t_msg_temp_report),
0,(struct sockaddr *)&addr, sizeof(addr));
if (ret == M2M_SUCCESS) {
puts("Assignment 3.3: sensor report sent");
} else {
puts("Assignment 3.3: failed to send status report !");
}
What is the best way to to process (NSData) object data into a usable object for string conversion?
I am trying to capture the stream of two IP cameras directly connected to a mini PCIe dual gigabit expansion card in a nVidia Jetson TK1.
I achieved to capture the stream of both cameras using gstreamer with the next command:
gst-launch-0.10 rtspsrc location=rtsp://admin:123456#192.168.0.123:554/mpeg4cif latency=0 ! decodebin ! ffmpegcolorspace ! autovideosink rtspsrc location=rtsp://admin:123456#192.168.2.254:554/mpeg4cif latency=0 ! decodebin ! ffmpegcolorspace ! autovideosink
It displays one window per camera, but gives this output just when the capture starts:
WARNING: from element /GstPipeline:pipeline0/GstAutoVideoSink:autovideosink1/GstXvImageSink:autovideosink1-actual-sink-xvimage: A lot of buffers are being dropped.
Additional debug info:
gstbasesink.c(2875): gst_base_sink_is_too_late (): /GstPipeline:pipeline0/GstAutoVideoSink:autovideosink1/GstXvImageSink:autovideosink1-actual-sink-xvimage:
There may be a timestamping problem, or this computer is too slow.
---> TVMR: Video-conferencing detected !!!!!!!!!
The stream is played good, with "good" synchronization also between cameras, but after a while, suddenly one of the cameras stops, and usually few seconds later the other one stops too. Using an interface snifer like Wireshark I can check that the rtsp packets are still sending from the cameras.
My purpose is to use this cameras to use them as a stereo camera using openCV. I am able to capture the stream with OpenCV with the following function:
camera[0].open("rtsp://admin:123456#192.168.2.254:554/mpeg4cif");//right
camera[1].open("rtsp://admin:123456#192.168.0.123:554/mpeg4cif");//left
It randomnly starts the capture good or bad, synchronized or not, with delay or not, but after a while is impossible to use the captured images as you can observe in the image:
And the output while running the openCV program usually is this: (I have copied the most complete one)
[h264 # 0x1b9580] slice type too large (2) at 0 23
[h264 # 0x1b9580] decode_slice_header error
[h264 # 0x1b1160] left block unavailable for requested intra mode at 0 6
[h264 # 0x1b1160] error while decoding MB 0 6, bytestream (-1)
[h264 # 0x1b1160] mmco: unref short failure
[h264 # 0x1b9580] too many reference frames
[h264 # 0x1b1160] pps_id (-1) out of range
The used cameras are two SIP-1080J modules.
Anyone knows how to achieve a good capture using openCV? First of all get rid of those h264 messages and have stable images while the program executes.
If not, how can I improve the pipelines and buffers using gstreamer to have a good capture without the sudden stop of the stream?. Although I never captured through openCV using gstreamer, perhaps some day I will know how to do it and solve this problem.
Thanks a lot.
After some days of deep search and some attempts, I turned on directly to use the gstreamer-0.10 API. First I learned how to use it with the tutorials from http://docs.gstreamer.com/pages/viewpage.action?pageId=327735
For most of the tutorials, you just need to install libgstreamer0.10-dev and some other packages. I installed all by:
sudo apt-get install libgstreamer0*
Then copy the code of the example you want to try into a .c file and type from the terminal in the folder where the .c file is located (In some examples you have to add more libs to pkg-config):
gcc basic-tutorial-1.c $(pkg-config --cflags --libs gstreamer-0.10) -o basic-tutorial-1.c
After that I did not feel lost I started to try to mix some c and c++ code. You can compile it using a proper g++ command, or with a CMakeLists.txt or the way you want to... As I am developing with a nVidia Jetson TK1, I use Nsight Eclipse Edition and I need to configure the project properties properly to be able to use the gstreamer-0.10 libs and the openCV libs.
Mixing some code, finally I am able to capture the streams of my two IP cameras in real time without appreciable delay, without bad decoding in any frame and both streams synchronized. The only thing left that I have not solved yet is the obtaining of frames in color and not in gray scale when (I have tried with other CV_ values with "segmentation fault" result):
v = Mat(Size(640, 360),CV_8U, (char*)GST_BUFFER_DATA(gstImageBuffer));
The complete code is next where I capture using gstreamer, transform the capture to a openCV Mat object and then show it. The code is for just a capture of one IP camera. You can replicate the objects and methods for capture multiple cameras at the same time.
#include <opencv2/core/core.hpp>
#include <opencv2/contrib/contrib.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/video/video.hpp>
#include <gst/gst.h>
#include <gst/app/gstappsink.h>
#include <gst/app/gstappbuffer.h>
#include <glib.h>
#define DEFAULT_LATENCY_MS 1
using namespace cv;
typedef struct _vc_cfg_data {
char server_ip_addr[100];
} vc_cfg_data;
typedef struct _vc_gst_data {
GMainLoop *loop;
GMainContext *context;
GstElement *pipeline;
GstElement *rtspsrc,*depayloader, *decoder, *converter, *sink;
GstPad *recv_rtp_src_pad;
} vc_gst_data;
typedef struct _vc_data {
vc_gst_data gst_data;
vc_cfg_data cfg;
} vc_data;
/* Global data */
vc_data app_data;
static void vc_pad_added_handler (GstElement *src, GstPad *new_pad, vc_data *data);
#define VC_CHECK_ELEMENT_ERROR(e, name) \
if (!e) { \
g_printerr ("Element %s could not be created. Exiting.\n", name); \
return -1; \
}
/*******************************************************************************
Gstreamer pipeline creation and init
*******************************************************************************/
int vc_gst_pipeline_init(vc_data *data)
{
GstStateChangeReturn ret;
// Template
GstPadTemplate* rtspsrc_pad_template;
// Create a new GMainLoop
data->gst_data.loop = g_main_loop_new (NULL, FALSE);
data->gst_data.context = g_main_loop_get_context(data->gst_data.loop);
// Create gstreamer elements
data->gst_data.pipeline = gst_pipeline_new ("videoclient");
VC_CHECK_ELEMENT_ERROR(data->gst_data.pipeline, "pipeline");
//RTP UDP Source - for received RTP messages
data->gst_data.rtspsrc = gst_element_factory_make ("rtspsrc", "rtspsrc");
VC_CHECK_ELEMENT_ERROR(data->gst_data.rtspsrc,"rtspsrc");
printf("URL: %s\n",data->cfg.server_ip_addr);
g_print ("Setting RTSP source properties: \n");
g_object_set (G_OBJECT (data->gst_data.rtspsrc), "location", data->cfg.server_ip_addr, "latency", DEFAULT_LATENCY_MS, NULL);
//RTP H.264 Depayloader
data->gst_data.depayloader = gst_element_factory_make ("rtph264depay","depayloader");
VC_CHECK_ELEMENT_ERROR(data->gst_data.depayloader,"rtph264depay");
//ffmpeg decoder
data->gst_data.decoder = gst_element_factory_make ("ffdec_h264", "decoder");
VC_CHECK_ELEMENT_ERROR(data->gst_data.decoder,"ffdec_h264");
data->gst_data.converter = gst_element_factory_make ("ffmpegcolorspace", "converter");
VC_CHECK_ELEMENT_ERROR(data->gst_data.converter,"ffmpegcolorspace");
// i.MX Video sink
data->gst_data.sink = gst_element_factory_make ("appsink", "sink");
VC_CHECK_ELEMENT_ERROR(data->gst_data.sink,"appsink");
gst_app_sink_set_max_buffers((GstAppSink*)data->gst_data.sink, 1);
gst_app_sink_set_drop ((GstAppSink*)data->gst_data.sink, TRUE);
g_object_set (G_OBJECT (data->gst_data.sink),"sync", FALSE, NULL);
//Request pads from rtpbin, starting with the RTP receive sink pad,
//This pad receives RTP data from the network (rtp-udpsrc).
rtspsrc_pad_template = gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (data->gst_data.rtspsrc),"recv_rtp_src_0");
// Use the template to request the pad
data->gst_data.recv_rtp_src_pad = gst_element_request_pad (data->gst_data.rtspsrc, rtspsrc_pad_template,
"recv_rtp_src_0", NULL);
// Print the name for confirmation
g_print ("A new pad %s was created\n",
gst_pad_get_name (data->gst_data.recv_rtp_src_pad));
// Add elements into the pipeline
g_print(" Adding elements to pipeline...\n");
gst_bin_add_many (GST_BIN (data->gst_data.pipeline),
data->gst_data.rtspsrc,
data->gst_data.depayloader,
data->gst_data.decoder,
data->gst_data.converter,
data->gst_data.sink,
NULL);
// Link some of the elements together
g_print(" Linking some elements ...\n");
if(!gst_element_link_many (data->gst_data.depayloader, data->gst_data.decoder, data->gst_data.converter, data->gst_data.sink, NULL))
g_print("Error: could not link all elements\n");
// Connect to the pad-added signal for the rtpbin. This allows us to link
//the dynamic RTP source pad to the depayloader when it is created.
if(!g_signal_connect (data->gst_data.rtspsrc, "pad-added",
G_CALLBACK (vc_pad_added_handler), data))
g_print("Error: could not add signal handler\n");
// Set the pipeline to "playing" state
g_print ("Now playing A\n");
ret = gst_element_set_state (data->gst_data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline A to the playing state.\n");
gst_object_unref (data->gst_data.pipeline);
return -1;
}
return 0;
}
static void vc_pad_added_handler (GstElement *src, GstPad *new_pad, vc_data *data) {
GstPad *sink_pad = gst_element_get_static_pad (data->gst_data.depayloader, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
/* Check the new pad's name */
if (!g_str_has_prefix (GST_PAD_NAME (new_pad), "recv_rtp_src_")) {
g_print (" It is not the right pad. Need recv_rtp_src_. Ignoring.\n");
goto exit;
}
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked (sink_pad)) {
g_print (" Sink pad from %s already linked. Ignoring.\n", GST_ELEMENT_NAME (src));
goto exit;
}
/* Check the new pad's type */
new_pad_caps = gst_pad_get_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad);
}
int vc_gst_pipeline_clean(vc_data *data) {
GstStateChangeReturn ret;
GstStateChangeReturn ret2;
/* Cleanup Gstreamer */
if(!data->gst_data.pipeline)
return 0;
/* Send the main loop a quit signal */
g_main_loop_quit(data->gst_data.loop);
g_main_loop_unref(data->gst_data.loop);
ret = gst_element_set_state (data->gst_data.pipeline, GST_STATE_NULL);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline A to the NULL state.\n");
gst_object_unref (data->gst_data.pipeline);
return -1;
}
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (data->gst_data.pipeline));
/* Zero out the structure */
memset(&data->gst_data, 0, sizeof(vc_gst_data));
return 0;
}
void handleKey(char key)
{
switch (key)
{
case 27:
break;
}
}
int vc_mainloop(vc_data* data)
{
GstBuffer *gstImageBuffer;
Mat v;
namedWindow("view",WINDOW_NORMAL);
while (1) {
gstImageBuffer = gst_app_sink_pull_buffer((GstAppSink*)data->gst_data.sink);
if (gstImageBuffer != NULL )
{
v = Mat(Size(640, 360),CV_8U, (char*)GST_BUFFER_DATA(gstImageBuffer));
imshow("view", v);
handleKey((char)waitKey(3));
gst_buffer_unref(gstImageBuffer);
}else{
g_print("gsink buffer didn't return buffer.");
}
}
return 0;
}
int main (int argc, char *argv[])
{
setenv("DISPLAY", ":0", 0);
strcpy(app_data.cfg.server_ip_addr, "rtsp://admin:123456#192.168.0.123:554/mpeg4cif");
gst_init (&argc, &argv);
if(vc_gst_pipeline_init(&app_data) == -1) {
printf("Gstreamer pipeline creation and init failed\n");
goto cleanup;
}
vc_mainloop(&app_data);
printf ("Returned, stopping playback\n");
cleanup:
return vc_gst_pipeline_clean(&app_data);
return 0;
}
I hope this helps!! ;)
uri = 'rtsp://admin:123456#192.168.0.123:554/mpeg4cif'
gst_str = ("rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! videoconvert ! appsink sync=false").format(uri, 200, 3072, 2048)
cap= cv2.VideoCapture(gst_str,cv2.CAP_GSTREAMER)
while(True):
_,frame = cap.read()
if frame is None:
break
cv2.imshow("",frame)
cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
So, basically I want to play some audio files (mp3 and caf mostly). But the callback never gets called. Only when I call them to prime the queue.
Here's my data struct:
struct AQPlayerState
{
CAStreamBasicDescription mDataFormat;
AudioQueueRef mQueue;
AudioQueueBufferRef mBuffers[kBufferNum];
AudioFileID mAudioFile;
UInt32 bufferByteSize;
SInt64 mCurrentPacket;
UInt32 mNumPacketsToRead;
AudioStreamPacketDescription *mPacketDescs;
bool mIsRunning;
};
Here's my callback function:
static void HandleOutputBuffer (void *aqData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer)
{
NSLog(#"HandleOutput");
AQPlayerState *pAqData = (AQPlayerState *) aqData;
if (pAqData->mIsRunning == false) return;
UInt32 numBytesReadFromFile;
UInt32 numPackets = pAqData->mNumPacketsToRead;
AudioFileReadPackets (pAqData->mAudioFile,
false,
&numBytesReadFromFile,
pAqData->mPacketDescs,
pAqData->mCurrentPacket,
&numPackets,
inBuffer->mAudioData);
if (numPackets > 0) {
inBuffer->mAudioDataByteSize = numBytesReadFromFile;
AudioQueueEnqueueBuffer (pAqData->mQueue,
inBuffer,
(pAqData->mPacketDescs ? numPackets : 0),
pAqData->mPacketDescs);
pAqData->mCurrentPacket += numPackets;
} else {
// AudioQueueStop(pAqData->mQueue, false);
// AudioQueueDispose(pAqData->mQueue, true);
// AudioFileClose (pAqData->mAudioFile);
// free(pAqData->mPacketDescs);
// free(pAqData->mFloatBuffer);
pAqData->mIsRunning = false;
}
}
And here's my method:
- (void)playFile
{
AQPlayerState aqData;
// get the source file
NSString *p = [[NSBundle mainBundle] pathForResource:#"1_Female" ofType:#"mp3"];
NSURL *url2 = [NSURL fileURLWithPath:p];
CFURLRef srcFile = (__bridge CFURLRef)url2;
OSStatus result = AudioFileOpenURL(srcFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &aqData.mAudioFile);
CFRelease (srcFile);
CheckError(result, "Error opinning sound file");
UInt32 size = sizeof(aqData.mDataFormat);
CheckError(AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyDataFormat, &size, &aqData.mDataFormat),
"Error getting file's data format");
CheckError(AudioQueueNewOutput(&aqData.mDataFormat, HandleOutputBuffer, &aqData, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &aqData.mQueue),
"Error AudioQueueNewOutPut");
// we need to calculate how many packets we read at a time and how big a buffer we need
// we base this on the size of the packets in the file and an approximate duration for each buffer
{
bool isFormatVBR = (aqData.mDataFormat.mBytesPerPacket == 0 || aqData.mDataFormat.mFramesPerPacket == 0);
// first check to see what the max size of a packet is - if it is bigger
// than our allocation default size, that needs to become larger
UInt32 maxPacketSize;
size = sizeof(maxPacketSize);
CheckError(AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize),
"Error getting max packet size");
// adjust buffer size to represent about a second of audio based on this format
CalculateBytesForTime(aqData.mDataFormat, maxPacketSize, 1.0/*seconds*/, &aqData.bufferByteSize, &aqData.mNumPacketsToRead);
if (isFormatVBR) {
aqData.mPacketDescs = new AudioStreamPacketDescription [aqData.mNumPacketsToRead];
} else {
aqData.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
}
printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)aqData.bufferByteSize, (int)aqData.mNumPacketsToRead);
}
// if the file has a magic cookie, we should get it and set it on the AQ
size = sizeof(UInt32);
result = AudioFileGetPropertyInfo(aqData.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);
if (!result && size) {
char* cookie = new char [size];
CheckError(AudioFileGetProperty(aqData.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie),
"Error getting cookie from file");
CheckError(AudioQueueSetProperty(aqData.mQueue, kAudioQueueProperty_MagicCookie, cookie, size),
"Error setting cookie to file");
delete[] cookie;
}
aqData.mCurrentPacket = 0;
for (int i = 0; i < kBufferNum; ++i) {
CheckError(AudioQueueAllocateBuffer (aqData.mQueue,
aqData.bufferByteSize,
&aqData.mBuffers[i]),
"Error AudioQueueAllocateBuffer");
HandleOutputBuffer (&aqData,
aqData.mQueue,
aqData.mBuffers[i]);
}
// set queue's gain
Float32 gain = 1.0;
CheckError(AudioQueueSetParameter (aqData.mQueue,
kAudioQueueParam_Volume,
gain),
"Error AudioQueueSetParameter");
aqData.mIsRunning = true;
CheckError(AudioQueueStart(aqData.mQueue,
NULL),
"Error AudioQueueStart");
}
And the output when I press play:
Buffer Byte Size: 40310, Num Packets to Read: 38
HandleOutput start
HandleOutput start
HandleOutput start
I tryed replacing CFRunLoopGetCurrent() with CFRunLoopGetMain() and CFRunLoopCommonModes with CFRunLoopDefaultMode, but nothing.
Shouldn't the primed buffers start playing right away I start the queue?
When I start the queue, no callbacks are bang fired.
What am I doing wrong? Thanks for any ideas
What you are basically trying to do here is a basic example of audio playback using Audio Queues. Without looking at your code in detail to see what's missing (that could take a while) i'd rather recommend to you to follow the steps in this basic sample code that does exactly what you're doing (without the extras that aren't really relevant.. for example why are you trying to add audio gain?)
Somewhere else you were trying to play audio using audio units. Audio units are more complex than basic audio queue playback, and I wouldn't attempt them before being very comfortable with audio queues. But you can look at this example project for a basic example of audio queues.
In general when it comes to Core Audio programming in iOS, it's best you take your time with the basic examples and build your way up.. the problem with a lot of tutorials online is that they add extra stuff and often mix it with obj-c code.. when Core Audio is purely C code (ie the extra stuff won't add anything to the learning process). I strongly recommend you go over the book Learning Core Audio if you haven't already. All the sample code is available online, but you can also clone it from this repo for convenience. That's how I learned core audio. It takes time :)
I have been trying to create a source client for ice cast for ios. I have been able to connect using asyncsocket to connect to the socket. I am also able to write data to the server. The icecast configuration is done for mp3 format. But the mp3 file written to the server is corrupt. I am providing some code snippets.
Header:
NSString *string = #"SOURCE /sync HTTP/1.0\r\n"
"Authorization: Basic c291cmNlOmhhY2ttZQ==\r\n"
"User-Agent: butt-0.1.12\r\n"
"User-Agent: butt-0.1.12\r\n"
"content-type: audio/mpeg\r\n"
"ice-name: sync's Stream\r\n"
"ice-public: 0\r\n"
"ice-genre: Rock\r\n"
"ice-description: This is my server description\r\n"
"Connection: keep-alive\r\n"
"ice-audio-info: ice-samplerate=44100;ice-bitrate=48;ice-channels=2\r\n\r\n";
NSData *data = [string dataUsingEncoding:NSUTF8StringEncoding];
//sending http request to write the header
NSLog(#"Sending HTTP Request.");
[socket writeData:data withTimeout:-1 tag:1];
//write buffer data to server
[socket writeData:self.dataBuffer withTimeout:-1 tag:1];
for recording i am using aqrecorder using the following code to record it.
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
try {
if (inNumPackets > 0) {
// write packets to file
XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
"AudioFileWritePackets failed");
aqr->mRecordPacket += inNumPackets;
NSLog(#"size = %u",(unsigned int)inBuffer->mAudioDataByteSize);
data = [[[NSData alloc]initWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize]retain];
server *srv = [[server alloc]init];
srv.dataBuffer=data;
[srv connecting];
}
// if we're not stopping, re-enqueue the buffe so that it gets filled again
if (aqr->IsRunning())
XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
void AQRecorder::StartRecord(CFStringRef inRecordFile)
{
// server *srv=[[server alloc]init];
// [srv connecting];
int i, bufferByteSize;
UInt32 size;
CFURLRef url = nil;
try {
mFileName = CFStringCreateCopy(kCFAllocatorDefault, inRecordFile);
// // specify the recording format
// SetupAudioFormat(kAudioFormatMPEG4AAC);
// specify the recording format, use hardware AAC if available
// otherwise use IMA4
if(IsAACHardwareEncoderAvailable())
SetupAudioFormat(kAudioFormatMPEG4AAC);
else
SetupAudioFormat(kAudioFormatAppleIMA4);
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
this /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
mRecordPacket = 0;
size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
NSString *recordFile = [NSTemporaryDirectory() stringByAppendingPathComponent: (NSString*)inRecordFile];
//url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)recordFile, NULL);
url = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)recordFile, kCFURLPOSIXPathStyle, false);
// create the audio file
OSStatus status = AudioFileCreateWithURL(url, kAudioFileCAFType, &mRecordFormat, kAudioFileFlags_EraseFile, &mRecordFile);
CFRelease(url);
XThrowIfError(status, "AudioFileCreateWithURL failed");
// copy the cookie first to give the file object as much info as we can about the data going in
// not necessary for pcm, but required for some compressed audio
CopyEncoderCookieToFile();
// allocate and enqueue buffers
bufferByteSize = ComputeRecordBufferSize(&mRecordFormat, kBufferDurationSeconds); // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");;
}
}
Do i need to change the format to write to the server?
You're not sending MP3 data, you're sending AAC or M4A data. I don't believe Icecast supports M4A. Are you actually using Icecast or some other server?
For AAC, your Content-Type header is wrong. Try audio/aac, audio/aacp, audio/mp4 or audio/mpeg4-generic.
Also, you only need one User-Agent header, and you should pick something that matches the software you are writing rather than copying someone else's. In the future, there might need to be an adjustment of protocol for your code, and that would only be possible if you used your own user-agent string.