C Lang Newb: My goal: Using a character stream using getchar() into an array and write array to a binary file, and then retrieve binary file into array and output string on console. I was successful with all parts except the output to console.
Call to function:
if (c == '6')
loadDoc();
Write file function:
int saveDoc(char document[MAXSIZE], int size){
FILE *f;
f = fopen("document.bin", "wb");
if (!f) {
printf("Error during writing to file !\n");
}
else{
fwrite(&document, sizeof(document), size, f);
fclose(f);
}
return 0;
}
Read file function:
char loadDoc(){
char buffer[1000];
FILE *f;
f = fopen("document.bin", "rb");
if (!f) {
printf("Error during reading file !\n");
}
else {
printf("Size of buffer: %d\n", sizeof(buffer));
fread(&buffer, sizeof(buffer), 1, f);
printf("File cpntents: \n %s\n", buffer);
fclose(f);
}
return 0;
}
Output:
********************
1) Start New Document:
2) View Document:
3) Edit Document:
4) Delete Document:
5) Save Document:
6) Load Document:
7) Exit
********************
6
Size of buffer: 1000
File cpntents:
#p#
When I open file in notebook, actual content of file is:
#p# & P ÿÿÿÿ& ðýb µ# ú ~ù 3 ÿÿÿÿÿÿÿÿ1 þb B# 3 è# 3 =A
The input entered into file saved and expected output to console with file is read:
This code isn't working as expected.
Here is the correct working solution in GCC:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void save_to_binary_file(const char* file_name, const char* text_data)
{
FILE* file = fopen(file_name, "wb");
if(file == NULL)
printf("Failed to open file %s\n", file_name);
if(fwrite(text_data, sizeof(char), strlen(text_data) + 1, file) != (strlen(text_data) + 1))
puts("Failed to write all text data\n");
fclose(file);
}
char* load_binary_file_as_text(const char* file_name)
{
FILE* file = fopen(file_name, "rb");
if(file == NULL)
printf("Failed to open file %s\n", file_name);
fseek(file, 0, SEEK_END);
size_t file_size_in_bytes = ftell(file);
fseek(file, 0, SEEK_SET);
char* read_buffer = (char*)malloc(file_size_in_bytes);
if(fread(read_buffer, 1, file_size_in_bytes, file) != file_size_in_bytes)
puts("Failed to read all the bytes\n");
fclose(file);
return read_buffer;
}
int main()
{
save_to_binary_file("Data.data", "ABCD");
char* buffer = load_binary_file_as_text("Data.data");
printf("%s\n", buffer);
free(buffer);
return 0;
}
The problem was the '&' in the fwrite() call. Removed it and works as expected.
I am going to edit the content of one frame from a mp4 file using OpenCV and ffmpeg 3.3. However, I encountered some problems such as the width and the height of video are zero, some functions are deprecated. I have changed the old function to updated function, but still cannot extract a correct frame. Please help.
Can anyone show an example of extracting a frame from a mp4 file using ffmpeg 3.3?
#include "stdafx.h"
#include <iostream>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
// FFmpeg
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/pixdesc.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
}
#define CODEC_TYPE_VIDEO AVMEDIA_TYPE_VIDEO
int main(int argc, char* argv[])
{
// initialize FFmpeg library
av_register_all();
// av_log_set_level(AV_LOG_DEBUG);
int ret;
// open input file context
AVFormatContext* inctx = nullptr;
//ret = avformat_open_input(&inctx, infile, nullptr, nullptr);
ret = avformat_open_input(&inctx, "C:\\car.mp4", nullptr, nullptr);
// retrive input stream information
ret = avformat_find_stream_info(inctx, nullptr);
if (ret < 0) {
std::cerr << "fail to avformat_find_stream_info: ret=" << ret;
return 2;
}
// find primary video stream
AVCodec* vcodec = nullptr;
vcodec = avcodec_find_decoder(AV_CODEC_ID_MPEG4);
if (!vcodec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
const int vstrm_idx = ret;
AVStream* vstrm = inctx->streams[vstrm_idx];
// open video decoder context
AVCodecContext *c = NULL;
c = avcodec_alloc_context3(vcodec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
if (avcodec_open2(c, vcodec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
// print input video stream informataion
// initialize sample scaler
c->pix_fmt = AV_PIX_FMT_YUV420P;
c->width = 1280;
c->height = 720;
if (vcodec->capabilities & CODEC_CAP_TRUNCATED)
c->flags |= CODEC_FLAG_TRUNCATED;
c->flags2 |= CODEC_FLAG2_FAST;
int width = 1280;
int height = 720;
SwsContext* swsctx = sws_getCachedContext(nullptr, width,
height, AV_PIX_FMT_YUV420P, width, height, AV_PIX_FMT_RGB32,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
}
Not sure about writing processed frames, not remember, but seems this worked for me:
extern "C" {
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include "libavcodec/avcodec.h"
#include <libavutil/opt.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
#include <libavutil/mathematics.h>
}
#include "opencv2/opencv.hpp"
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#endif
using namespace std;
using namespace cv;
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
char buf1[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->pts);
char buf2[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->dts);
char buf3[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_string(buf1, pkt->duration);
char buf4[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->pts, time_base);
char buf5[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->dts, time_base);
char buf6[AV_TS_MAX_STRING_SIZE] = { 0 };
av_ts_make_time_string(buf1, pkt->duration, time_base);
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
buf1, buf4,
buf2, buf5,
buf3, buf6,
pkt->stream_index);
}
int main(int argc, char **argv)
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
int frameFinished = 0;
pFrame = av_frame_alloc();
pFrameRGB = av_frame_alloc();
const char *in_filename, *out_filename;
int ret, i;
in_filename = "../../TestClips/Audio Video Sync Test.mp4";
out_filename = "out.avi";
// Initialize FFMPEG
av_register_all();
// Get input file format context
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0)
{
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
// Extract streams description
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
{
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
// Print detailed information about the input or output format,
// such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
av_dump_format(ifmt_ctx, 0, in_filename, 0);
// Allocate an AVFormatContext for an output format.
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx)
{
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
// The output container format.
ofmt = ofmt_ctx->oformat;
// Allocating output streams
for (i = 0; i < ifmt_ctx->nb_streams; i++)
{
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream)
{
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0)
{
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
goto end;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
}
// Open output file
if (!(ofmt->flags & AVFMT_NOFILE))
{
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
// Write output file header
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0)
{
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
// Search for input video codec info
AVCodec *in_codec = NULL;
AVCodecContext* avctx = NULL;
int video_stream_index = -1;
for (int i = 0; i < ifmt_ctx->nb_streams; i++)
{
if (ifmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
video_stream_index = i;
avctx = ifmt_ctx->streams[i]->codec;
in_codec = avcodec_find_decoder(avctx->codec_id);
if (!in_codec)
{
fprintf(stderr, "in codec not found\n");
exit(1);
}
break;
}
}
// Search for output video codec info
AVCodec *out_codec = NULL;
AVCodecContext* o_avctx = NULL;
int o_video_stream_index = -1;
for (int i = 0; i < ofmt_ctx->nb_streams; i++)
{
if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
{
o_video_stream_index = i;
out_codec = avcodec_find_encoder(ofmt_ctx->streams[i]->codec->codec_id);
if (!out_codec)
{
fprintf(stderr, "out codec not found\n");
exit(1);
}
o_avctx = avcodec_alloc_context3(out_codec);
o_avctx->height = avctx->height;
o_avctx->width = avctx->width;
o_avctx->sample_aspect_ratio = avctx->sample_aspect_ratio;
o_avctx->gop_size = 2;
o_avctx->max_b_frames = 2;
if (out_codec->pix_fmts)
{
o_avctx->pix_fmt = out_codec->pix_fmts[0];
}
else
{
o_avctx->pix_fmt = avctx->pix_fmt;
}
o_avctx->time_base = avctx->time_base;
if (avcodec_open2(o_avctx, out_codec, NULL) < 0)
{
fprintf(stderr, "cannot open encoder\n");
exit(1);
}
break;
}
}
// Show output format info
av_dump_format(ofmt_ctx, 0, out_filename, 1);
// openCV pixel format
enum AVPixelFormat pFormat = AV_PIX_FMT_RGB24;
// Data size
int numBytes = avpicture_get_size(pFormat, avctx->width, avctx->height);
// allocate buffer
uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
// fill frame structure
avpicture_fill((AVPicture *)pFrameRGB, buffer, pFormat, avctx->width, avctx->height);
// frame area
int y_size = avctx->width * avctx->height;
// Open input codec
avcodec_open2(avctx, in_codec, NULL);
// Main loop
while (1)
{
AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
{
break;
}
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
//log_packet(ifmt_ctx, &pkt, "in");
// copy packet
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
//log_packet(ofmt_ctx, &pkt, "out");
if (pkt.stream_index == video_stream_index)
{
avcodec_decode_video2(avctx, pFrame, &frameFinished, &pkt);
if (frameFinished)
{
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getCachedContext(NULL,
avctx->width,
avctx->height,
avctx->pix_fmt,
avctx->width,
avctx->height,
AV_PIX_FMT_BGR24,
SWS_BICUBIC,
NULL,
NULL,
NULL);
sws_scale(img_convert_ctx,
((AVPicture*)pFrame)->data,
((AVPicture*)pFrame)->linesize,
0,
avctx->height,
((AVPicture *)pFrameRGB)->data,
((AVPicture *)pFrameRGB)->linesize);
sws_freeContext(img_convert_ctx);
// Do some image processing
cv::Mat img(pFrame->height, pFrame->width, CV_8UC3, pFrameRGB->data[0], false);
cv::GaussianBlur(img, img, Size(5, 5), 3);
cv::imshow("Display", img);
cv::waitKey(5);
// --------------------------------
// Transform back to initial format
// --------------------------------
img_convert_ctx = sws_getCachedContext(NULL,
avctx->width,
avctx->height,
AV_PIX_FMT_BGR24,
avctx->width,
avctx->height,
avctx->pix_fmt,
SWS_BICUBIC,
NULL,
NULL,
NULL);
sws_scale(img_convert_ctx,
((AVPicture*)pFrameRGB)->data,
((AVPicture*)pFrameRGB)->linesize,
0,
avctx->height,
((AVPicture *)pFrame)->data,
((AVPicture *)pFrame)->linesize);
int got_packet = 0;
AVPacket enc_pkt = { 0 };
av_init_packet(&enc_pkt);
avcodec_encode_video2(o_avctx, &enc_pkt, pFrame, &got_packet);
if (o_avctx->coded_frame->pts != AV_NOPTS_VALUE)
{
enc_pkt.pts = av_rescale_q(o_avctx->coded_frame->pts, o_avctx->time_base, ofmt_ctx->streams[video_stream_index]->time_base);
}
if (o_avctx->coded_frame->key_frame)
{
enc_pkt.flags |= AV_PKT_FLAG_KEY;
}
av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
av_packet_unref(&enc_pkt);
sws_freeContext(img_convert_ctx);
}
}
else // write sound frame
{
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
}
if (ret < 0)
{
fprintf(stderr, "Error muxing packet\n");
break;
}
// Decrease packet ref counter
av_packet_unref(&pkt);
}
av_write_trailer(ofmt_ctx);
end:
avcodec_close(avctx);
avcodec_close(o_avctx);
av_free(pFrame);
av_free(pFrameRGB);
avformat_close_input(&ifmt_ctx);
// close output
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
{
avio_closep(&ofmt_ctx->pb);
}
avformat_free_context(ofmt_ctx);
if (ret < 0 && ret != AVERROR_EOF)
{
char buf_err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
av_make_error_string(buf_err, AV_ERROR_MAX_STRING_SIZE, ret);
fprintf(stderr, "Error occurred: %s\n", buf_err);
return 1;
}
return 0;
}
All examples and books I've seen so far recommends using waitKey(1) to force repaint OpenCV window. That looks weird and too hacky. Why wait for even 1ms when you don't have to?
Are there any alternatives? I tried cv::updateWindow but it seems to require OpenGL and therefore crashes. I'm using VC++ on Windows.
I looked in to source and as #Dan Masek said, there doesn't seem to be any other functions to process windows message. So I ended up writing my own little DoEvents() function for VC++. Below is the full source code that uses OpenCV to display video frame by frame while skipping desired number of frames.
#include <windows.h>
#include <iostream>
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
bool DoEvents();
int main(int argc, char *argv[])
{
VideoCapture cap(argv[1]);
if (!cap.isOpened())
return -1;
namedWindow("tree", CV_GUI_EXPANDED | CV_WINDOW_AUTOSIZE);
double frnb(cap.get(CV_CAP_PROP_FRAME_COUNT));
std::cout << "frame count = " << frnb << endl;
for (double fIdx = 0; fIdx < frnb; fIdx += 50) {
Mat frame;
cap.set(CV_CAP_PROP_POS_FRAMES, fIdx);
bool success = cap.read(frame);
if (!success) {
cout << "Cannot read frame " << endl;
break;
}
imshow("tree", frame);
if (!DoEvents())
return 0;
}
return 0;
}
bool DoEvents()
{
MSG msg;
BOOL result;
while (::PeekMessage(&msg, NULL, 0, 0, PM_NOREMOVE))
{
result = ::GetMessage(&msg, NULL, 0, 0);
if (result == 0) // WM_QUIT
{
::PostQuitMessage(msg.wParam);
return false;
}
else if (result == -1)
return true; //error occured
else
{
::TranslateMessage(&msg);
::DispatchMessage(&msg);
}
}
return true;
}
How can I capture 802.11 beacon frames on my linux machine using a C program.Also how can I send a frame response using a C program?
Try using libpcap. See this answer on SO for a basic example.
below program only work :-
1. if your device support link layer type as DLT_IEEE802_11_RADIO
2. if you ahev libpacp installed in your machine.
3. compile it with libcap library( ex:- cc prog.c -lpcap
4. during run provide interface on command line ( ex:- a.out wlan0)
#include <pcap.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
void packet_view(unsigned char *args,const struct pcap_pkthdr *h,const unsigned char *p);
#define SNAP_LEN 3000
void packet_view(
unsigned char *args,
const struct pcap_pkthdr *h,
const unsigned char *p
){
int len;
len = 0;
printf("PACKET\n");
while(len < h->len) {
printf("%02x ", *(p++));
if(!(++len % 16))
printf("\n");
}
printf("\n");
return ;
}
int main(int argc, char **argv)
{
char *dev = NULL; /* capture device name */
char errbuf[PCAP_ERRBUF_SIZE]; /* error buffer */
pcap_t *handle; /* packet capture handle */
char filter_exp[] = "wlan type mgt subtype beacon"; /* filter expression [3] */
// char filter_exp[] = "ip"; /* filter expression [3] */
struct bpf_program fp; /* compiled filter program (expression) */
bpf_u_int32 mask; /* subnet mask */
bpf_u_int32 net; /* ip */
int num_packets = 10; /* number of packets to capture */
/* check for capture device name on command-line */
if (argc == 2) {
dev = argv[1];
}
else if (argc > 2) {
fprintf(stderr, "error: unrecognized command-line options\n\n");
exit(EXIT_FAILURE);
}
else {
/* find a capture device if not specified on command-line */
dev = pcap_lookupdev(errbuf);
if (dev == NULL) {
fprintf(stderr, "Couldn't find default device: %s\n",
errbuf);
exit(EXIT_FAILURE);
}
}
/* get network number and mask associated with capture device */
if (pcap_lookupnet(dev, &net, &mask, errbuf) == -1) {
fprintf(stderr, "Couldn't get netmask for device %s: %s\n",
dev, errbuf);
net = 0;
mask = 0;
}
/* print capture info */
printf("Device: %s\n", dev);
printf("Number of packets: %d\n", num_packets);
printf("Filter expression: %s\n", filter_exp);
/* open capture device */
handle = pcap_open_live(dev, SNAP_LEN, 1, -1, errbuf);
if (handle == NULL) {
fprintf(stderr, "Couldn't open device %s: %s\n", dev, errbuf);
exit(EXIT_FAILURE);
}
/* for Ethernet device change the type to DLT_EN10MB */
/* your wlan device need to supprot this link layer type otherwise failure */
if (pcap_datalink(handle) != DLT_IEEE802_11_RADIO) {
fprintf(stderr, "%s is not an Wlan packet\n", dev);
exit(EXIT_FAILURE);
}
/* compile the filter expression */
if (pcap_compile(handle, &fp, filter_exp, 1,PCAP_NETMASK_UNKNOWN) == -1) {
fprintf(stderr, "Couldn't parse filter %s: %s\n",
filter_exp, pcap_geterr(handle));
exit(EXIT_FAILURE);
}
/* apply the compiled filter */
if (pcap_setfilter(handle, &fp) == -1) {
fprintf(stderr, "Couldn't install filter %s: %s\n %s: %s\n",
filter_exp, pcap_geterr(handle));
exit(EXIT_FAILURE);
}
/* now we can set our callback function */
pcap_loop(handle, num_packets, packet_view, NULL);
/* cleanup */
pcap_freecode(&fp);
pcap_close(handle);
printf("\nCapture complete.\n");
return 0;
}
i am still new on opencv, i make simple program based on sample to access webcam but always fails. i change variable id to 0,1,2...100 but i got same result. this is my program:
#include "cv.h"
#include "highgui.h"
#include "stdio.h"
#include "iostream"
// A Simple Camera Capture Framework
int main()
{
IplImage* img = NULL;
CvCapture* cap = NULL;
int id=0;
cap = cvCaptureFromCAM(id);
cvNamedWindow("Images",CV_WINDOW_AUTOSIZE);
if ( !cap )
printf("ERROR\n\n");
else
for(;;)
{
img = cvQueryFrame(cap);
cvShowImage("Imagenes", img);
cvWaitKey(10);
}
cvReleaseImage(&img);
cvReleaseCapture(&cap);
return 0;
}
thank you for your help
Do yourself a favor and check the return of the functions. Maybe some of them are failing and you'll never know why.
Another tip: try with id = -1.
#include <iostream>
#include <sstream>
#include <string>
#include <cv.h>
#include <highgui.h>
int main()
{
CvCapture* capture = NULL;
if ((capture = cvCaptureFromCAM(-1)) == NULL)
{
fprintf(stderr, "ERROR: capture is NULL \n");
return -1;
}
cvNamedWindow("mywindow", CV_WINDOW_AUTOSIZE);
cvQueryFrame(capture); // Sometimes needed to get correct data
IplImage* frame = NULL;
while (1)
{
if ((frame = cvQueryFrame(capture)) == NULL)
{
fprintf( stderr, "ERROR: cvQueryFrame failed\n");
break;
}
if (frame == NULL)
{
usleep(100000);
continue;
}
cvShowImage("mywindow", frame); // Do not release the frame!
int key = cvWaitKey(10);
if (key == 27) // ESC was pressed
break;
}
cvReleaseCapture(&capture);
cvDestroyWindow("mywindow");
return 0;
}