I'm trying to encode an OpenGL scene into a mp4 video.
On the left (red), is the video encoded the fastest way possible, but the result looks jerky. If I add a pause (NSThread sleep) each time I encode a frame, it looks better (right, blue), but it takes so much longer..
I suspect I'm doing something wrong somewhere, so here's the code of my encoder (I can provide a full project on github if needed) :
Encoding loop :
while(!finished) {
if([self.encoder isReadyToEncodeNewFrame]) {
if([self.encoder encodeFrame]) {
self.frameCpt++;
}
}
}
Encoder :
#import "GLEncoder.h"
#include <OpenGLES/ES2/glext.h>
#import <UIKit/UIKit.h>
#import <AVFoundation/AVFoundation.h>
#interface GLEncoder () {
CVOpenGLESTextureCacheRef _coreVideoTextureCache;
CVPixelBufferRef _renderTarget;
}
#property (nonatomic, assign) GLuint fboHook;
#property (nonatomic, assign) GLuint fboTexture;
#property (nonatomic, assign) int videoWidth;
#property (nonatomic, assign) int videoHeight;
#property (nonatomic, assign) int FPS;
#property (nonatomic, assign) BOOL isEncodingFrame;
#property (nonatomic, assign) BOOL hasFinishedEncoding;
#property (nonatomic, assign) int frameCpt;
#property (nonatomic, strong) NSString * videoFilePath;
#property (nonatomic, strong) EAGLContext * eaglContext;
#property (nonatomic, strong) NSURL * videoFileURL;
#property (nonatomic, strong) AVAssetWriter *assetWriter;
#property (nonatomic, strong) AVAssetWriterInput *assetWriterInput;
#property (nonatomic, strong) AVAssetWriterInputPixelBufferAdaptor *inputPixelBufferAdapter;
#property (nonatomic, strong) id<GLEncoderDelegate> delegate;
#end
#implementation GLEncoder
- (id)initWithWidth:(int)videoWidth
andHeight:(int)videoHeight
andFPS:(int)FPS
andEAGLContext:(EAGLContext *)context {
self.videoWidth = videoWidth;
self.videoHeight = videoHeight;
self.FPS = FPS;
self.eaglContext = context;
self.frameCpt = 0;
self.isEncodingFrame = NO;
self.hasFinishedEncoding = NO;
return self;
}
- (void)setDelegate:(id<GLEncoderDelegate>)newDelegate {
self.delegate = newDelegate;
}
- (void)setupEncoding:(nonnull NSString *)oVideoFilePath fboHook:(GLuint)fboHook {
self.fboHook = fboHook;
self.videoFilePath = oVideoFilePath;
self.videoFileURL = [NSURL fileURLWithPath:oVideoFilePath];
if ([[NSFileManager defaultManager] fileExistsAtPath:self.videoFilePath ])
[[NSFileManager defaultManager] removeItemAtPath:self.videoFilePath error:nil];
NSError *error = nil;
self.assetWriter = [[AVAssetWriter alloc] initWithURL:self.videoFileURL
fileType:AVFileTypeMPEG4
error:&error];
NSDictionary *outputSettingsDictionary = #{AVVideoCodecKey:
AVVideoCodecH264,
AVVideoWidthKey:
#(self.videoWidth),
AVVideoHeightKey:
#(self.videoHeight)};
self.assetWriterInput = [AVAssetWriterInput
assetWriterInputWithMediaType:AVMediaTypeVideo
outputSettings:outputSettingsDictionary];
NSDictionary *sourcePixelBufferAttributesDictionary = [NSDictionary dictionaryWithObjectsAndKeys:
#(kCVPixelFormatType_32BGRA),
kCVPixelBufferPixelFormatTypeKey,
#(self.videoWidth),
kCVPixelBufferWidthKey,
#(self.videoHeight),
kCVPixelBufferHeightKey,
nil];
self.inputPixelBufferAdapter = [AVAssetWriterInputPixelBufferAdaptor
assetWriterInputPixelBufferAdaptorWithAssetWriterInput:self.assetWriterInput
sourcePixelBufferAttributes:sourcePixelBufferAttributesDictionary];
[self.assetWriter addInput:self.assetWriterInput];
[self.assetWriter startWriting];
[self.assetWriter startSessionAtSourceTime:CMTimeMake(0, self.FPS)];
_coreVideoTextureCache = NULL;
_renderTarget = NULL;
CVOpenGLESTextureCacheCreate(kCFAllocatorDefault,
NULL,
self.eaglContext,
NULL,
&_coreVideoTextureCache);
CVPixelBufferPoolCreatePixelBuffer(NULL,
[self.inputPixelBufferAdapter pixelBufferPool],
&_renderTarget);
CVOpenGLESTextureRef renderTexture;
CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
_coreVideoTextureCache,
_renderTarget,
NULL,
GL_TEXTURE_2D,
GL_RGBA,
self.videoWidth,
self.videoHeight,
GL_BGRA,
GL_UNSIGNED_BYTE,
0,
&renderTexture);
self.fboTexture = CVOpenGLESTextureGetName(renderTexture);
glBindTexture(CVOpenGLESTextureGetTarget(renderTexture), self.fboTexture);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindFramebuffer(GL_FRAMEBUFFER, self.fboHook);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.fboTexture, 0);
}
- (BOOL)isReadyToEncodeNewFrame {
return (self.assetWriterInput.readyForMoreMediaData && !self.isEncodingFrame);
}
- (BOOL)encodeFrame {
__block BOOL success = NO;
if (!self.hasFinishedEncoding) {
self.isEncodingFrame = YES;
if (self.assetWriterInput.readyForMoreMediaData) {
if (self.delegate)
[self.delegate drawOpenGLScene];
CVPixelBufferLockBaseAddress(_renderTarget, 0);
CMTime frameTime = CMTimeMake(self.frameCpt, self.FPS);
if ([_inputPixelBufferAdapter appendPixelBuffer:_renderTarget withPresentationTime:frameTime]) {
self.frameCpt++;
success = YES;
}
CVPixelBufferUnlockBaseAddress(_renderTarget, 0);
// This is where I pause after each frame has been encoded
[NSThread sleepForTimeInterval:0.05];
}
self.isEncodingFrame = NO;
}
return success;
}
- (void)finishEncoding:(BlockRunnable)completionHandler {
self.hasFinishedEncoding = YES;
[self.assetWriterInput markAsFinished];
[self.assetWriter finishWritingWithCompletionHandler:^{
self.assetWriter = nil;
completionHandler();
}];
}
#end
Related
i want to record a window's snapshot to a mp4 file in a macOS app.
start the creater with function [aMp4CreaterEntity startRecordWithSize:CGSizeMake(2320, 1080) pixelType:kCVPixelFormatType_32ARGB]
run a timer 15 times per second, snapshotting the window, use the function CGWindowListCreateImage to get CGImageRef which width = 2320 and height = 1080,
call the creater function [aMp4CreaterEntity recordImage:theCGImageRef timeStamp:[[NSDate date] timeIntervalSince1970]] to send CGImageRef to my aMp4CreaterEntity
call [aMp4CreaterEntity stopRecord] function, and get the mp4 file at
everything runs alright, except the mp4 file's content contains only half of what was sent earlier, and the laster content was lost. and every CVPixelBufferRef was append by AVAssetWriterInputPixelBufferAdaptor when i debug.
At first I think the CMTime setting is wrong, but after modify it to half or double, the error is continue.
I'm new to audio and video, can someone help me solve this problem or explain it in detail?
BTW: i record the audio in an other file at the same time, but it has the same problem - earlier half content. and i can read the swift code direct.
this is my recorder sample code with Objective-C.
#import "Mp4Creater.h"
#import <AVFoundation/AVFoundation.h>
#interface Mp4Creater()
#property (nonatomic, strong) AVAssetWriter *videoWriter;
#property (nonatomic, strong) AVAssetWriterInput *videoInput;
#property (nonatomic, strong) AVAssetWriterInputPixelBufferAdaptor *videoAdaptor;
#property (nonatomic, strong) NSString *videoOutputPath;
#property (nonatomic, strong) NSDictionary *videoSettings;
#property (nonatomic, assign) NSTimeInterval startTs;
#property (nonatomic, assign) NSTimeInterval latestTs;
#property (nonatomic, strong) NSOperationQueue *opQueue;
#property (nonatomic, assign) BOOL isRecording;
#property (nonatomic, assign) NSUInteger frameRate; // 15
#property (nonatomic, assign) NSUInteger iFrameInterval; // 3s
#end
#implementation Mp4Creater
- (instancetype)init
{
self = [super init];
if (self) {
_videoWriter = nil;
_videoInput = nil;
_videoAdaptor = nil;
_videoOutputPath = nil;
_videoSettings = nil;
_startTs = -1;
_latestTs = -1;
_isRecording = NO;
_frameRate = 15;
_iFrameInterval = 3;
}
return self;
}
- (void)dealloc
{
[_opQueue cancelAllOperations];
}
- (BOOL)addVideoInputWithSize:(CGSize)size pixelType:(UInt32)pixelType {
NSString *codecKey = AVVideoCodecTypeH264;
_videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:codecKey, AVVideoCodecKey,
[NSNumber numberWithInt: size.width], AVVideoWidthKey,
[NSNumber numberWithInt: size.height], AVVideoHeightKey,
[NSDictionary dictionaryWithObjectsAndKeys:AVVideoYCbCrMatrix_ITU_R_709_2, AVVideoYCbCrMatrixKey, AVVideoTransferFunction_ITU_R_709_2, AVVideoTransferFunctionKey, AVVideoColorPrimaries_ITU_R_709_2, AVVideoColorPrimariesKey, nil], AVVideoColorPropertiesKey,
[NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt: size.width * size.height * 2], AVVideoAverageBitRateKey,
[NSNumber numberWithInt: (int)(_frameRate*_iFrameInterval)], AVVideoMaxKeyFrameIntervalKey,
[NSNumber numberWithInt: (int)(_iFrameInterval)], AVVideoMaxKeyFrameIntervalDurationKey,
AVVideoProfileLevelH264BaselineAutoLevel, AVVideoProfileLevelKey,
nil], AVVideoCompressionPropertiesKey,
nil];
AVAssetWriterInput *videoInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:_videoSettings];
videoInput.expectsMediaDataInRealTime = YES;
if ([_videoWriter canAddInput:videoInput]) {
[_videoWriter addInput:videoInput];
_videoInput = videoInput;
}
else {
return NO;
}
NSDictionary *sourcePixelBufferAttributes = #{(NSString *)kCVPixelBufferPixelFormatTypeKey : #(pixelType)};
_videoAdaptor = [[AVAssetWriterInputPixelBufferAdaptor alloc] initWithAssetWriterInput:_videoInput sourcePixelBufferAttributes:sourcePixelBufferAttributes];
return YES;
}
- (BOOL)startRecordWithSize:(CGSize)size pixelType:(UInt32)pixelType {
if (self.isRecording) {
return YES;
}
self.startTs = -1;
NSString *outputFile;
NSString *guid = [[NSUUID new] UUIDString];
NSString *fileName = [NSString stringWithFormat:#"video_%#.mp4", guid];
outputFile = [NSTemporaryDirectory() stringByAppendingPathComponent:fileName];
self.videoOutputPath = outputFile;
NSError *error = nil;
//----initialize compression engine
self.videoWriter = [[AVAssetWriter alloc]initWithURL:[NSURL fileURLWithPath:self.videoOutputPath]
fileType:AVFileTypeMPEG4
error:&error];
self.videoWriter.shouldOptimizeForNetworkUse = YES;
if(error) {
return NO;
}
if (self.videoWriter == nil) {
return NO;
}
if (![self addVideoInputWithSize:size pixelType:pixelType ]) {
[self stopRecord];
return NO;
}
self->_isRecording = YES;
[self.videoWriter startWriting];
[self.videoWriter startSessionAtSourceTime:kCMTimeZero];
_opQueue = [[NSOperationQueue alloc] init];
_opQueue.maxConcurrentOperationCount = 1;
return YES;
}
- (void)stopRecord {
if (!self.isRecording) {
return;
}
[_opQueue cancelAllOperations];
NSOperationQueue *oldQueue = _opQueue;
_opQueue = nil;
[oldQueue waitUntilAllOperationsAreFinished];
if (self.videoInput != nil) {
[self.videoInput markAsFinished];
}
self.videoInput = nil;
self.videoAdaptor = nil;
if (self.videoWriter != nil) {
__block BOOL success = NO;
if (self.videoWriter.status == AVAssetWriterStatusWriting) {
success = YES;
}
[self.videoWriter finishWritingWithCompletionHandler:^{
if (self.videoWriter.status == AVAssetWriterStatusCompleted) {
if (success) {
return;
}
}
}];
}
self->_isRecording = NO;
}
- (void)recordImage:(CGImageRef)image timeStamp:(NSTimeInterval)ts {
CGImageRef retainImage = CGImageRetain(image);
__weak __typeof__(self) weak_self = self;
[_opQueue addOperationWithBlock:^{
__typeof__(self) self = weak_self;
if (!self.isRecording) {
return;
}
if (self.startTs < 0) {
self.startTs = ts;
}
self.latestTs = ts;
CMTime cmTime = CMTimeMake((ts - self.startTs) * 1000, 1000);
if (self.videoWriter != nil) {
if (self.videoWriter.status == AVAssetWriterStatusWriting) {
if (self.videoInput != nil && self.videoInput.isReadyForMoreMediaData) {
CVPixelBufferRef buffer = [self CVPixelBufferRefFromCGImage:retainImage];
if (buffer != NULL) {
[self.videoAdaptor appendPixelBuffer:buffer withPresentationTime:cmTime];
CVPixelBufferRelease(buffer);
}
}
}
}
CGImageRelease(retainImage);
}];
}
- (CVPixelBufferRef)CVPixelBufferRefFromCGImage:(CGImageRef)image {
size_t pixelsWide = CGImageGetWidth(image);
size_t pixelsHigh = CGImageGetHeight(image);
NSInteger bitmapBytesPerRow = (pixelsWide * 4);
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey, nil];
CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, pixelsWide, pixelsHigh, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef)options, &pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
if (status == kCVReturnSuccess) {
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
NSParameterAssert(pxdata != NULL);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, pixelsWide, pixelsHigh, 8, bitmapBytesPerRow, rgbColorSpace, (CGBitmapInfo)kCGImageAlphaPremultipliedFirst);
NSParameterAssert(context);
CGContextConcatCTM(context, CGAffineTransformIdentity);
CGContextDrawImage(context, CGRectMake(0, 0, pixelsWide, pixelsHigh), image);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
return pxbuffer;
}
else {
return nil;
}
}
#end
I have an AVPlayer that plays audio from a mono AVAsset, doing some processing via an audio processing tap along the way. How can I convert this asset to stereo before reaching the tap? The second channel can be empty or a copy of the first channel (it’ll get filled up manually in the tap).
I've tried converting the mono to stereo within the tap, but apparently we have no control over the ASBD or AudioBufferList structure once we're inside the tap. I've also done offline conversion, but this presents big obstacles (can be quite slow, not suitable for web streaming).
Here is the barebones (but complete) code which you can use with any mono audio file. You'll see that by the time it hits the processing tap, there’s just the one channel available instead of the desired two channels. To use the code, you just need to add the MediaPlayer and TapProcessor classes below to a blank Single View Application, use the following ViewController code in place of the default code, and add in your own mono audio file to your project. Thanks for reading.
MediaPlayer.h
#import <Foundation/Foundation.h>
#interface MediaPlayer : NSObject
#end
MediaPlayer.m
#import "MediaPlayer.h"
#import "TapProcessor.h"
#import <AVFoundation/AVFoundation.h>
#interface MediaPlayer()
#property (nonatomic, strong) AVAsset *asset;
#property (nonatomic, strong) AVPlayer *player;
#property (nonatomic, strong) TapProcessor *audioTapProcessor;
#end
#implementation MediaPlayer
- (id)init {
if (self = [super init]){
NSString *path = [[NSBundle mainBundle] pathForResource:#"MonoSource"
ofType:#"mp3"];
[self loadFileWithPath:path];
}
return self;
}
-(void)loadFileWithPath:(NSString*)path{
NSURL *fileURL = [NSURL fileURLWithPath:path];
NSDictionary *options = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES]
forKey:AVURLAssetPreferPreciseDurationAndTimingKey];
self.asset = [AVURLAsset URLAssetWithURL:fileURL options:options];
[self.asset loadValuesAsynchronouslyForKeys:#[#"tracks"] completionHandler:^{
dispatch_async(dispatch_get_main_queue(), ^{
AVKeyValueStatus status = [self.asset statusOfValueForKey:#"tracks" error:nil];
switch (status) {
case AVKeyValueStatusLoaded:
[self setupPlayer];
break;
default:
break;
}
});
}];
}
- (void) setupPlayer{
AVPlayerItem *item = [AVPlayerItem playerItemWithAsset:self.asset];
AVAssetTrack *audioTrack = [[self.asset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0];
[self printInfoForTrack:audioTrack];
TapProcessor *newProcessor = [[TapProcessor alloc] initWithTrack:audioTrack];
AVAudioMix *audioMix = [newProcessor audioMix];
item.audioMix = audioMix;
self.player = [AVPlayer playerWithPlayerItem:item];
[self.player play];
}
-(void) printInfoForTrack:(AVAssetTrack*)track{
CMAudioFormatDescriptionRef item = (__bridge CMAudioFormatDescriptionRef)[track.formatDescriptions objectAtIndex:0];
const AudioStreamBasicDescription* desc = CMAudioFormatDescriptionGetStreamBasicDescription(item);
NSLog(#"Number of track channels: %d", desc->mChannelsPerFrame);
}
#end
TapProcessor.h
#import <Foundation/Foundation.h>
#import <AVFoundation/AVFoundation.h>
#interface TapProcessor : NSObject
- (id)initWithTrack:(AVAssetTrack *)track;
#property (readonly, nonatomic) AVAssetTrack *track;
#property (readonly, nonatomic) AVAudioMix *audioMix;
#end
TapProcessor.m
#import "TapProcessor.h"
// TAP CALLBACKS
static void tap_InitCallback(MTAudioProcessingTapRef tap,
void *clientInfo,
void **tapStorageOut){
}
static void tap_FinalizeCallback(MTAudioProcessingTapRef tap){
}
static void tap_PrepareCallback(MTAudioProcessingTapRef tap,
CMItemCount maxFrames,
const AudioStreamBasicDescription *processingFormat){
NSLog(#"Number of tap channels: %d", processingFormat->mChannelsPerFrame);
}
static void tap_UnprepareCallback(MTAudioProcessingTapRef tap){
}
static void tap_ProcessCallback(MTAudioProcessingTapRef tap,
CMItemCount numberFrames,
MTAudioProcessingTapFlags flags,
AudioBufferList *bufferListInOut,
CMItemCount *numberFramesOut,
MTAudioProcessingTapFlags *flagsOut){
MTAudioProcessingTapGetSourceAudio(tap, numberFrames, bufferListInOut, NULL, NULL, NULL);
*numberFramesOut = numberFrames;
}
#implementation TapProcessor
- (id)initWithTrack:(AVAssetTrack *)track{
self = [super init];
if (self){
_track = track;
}
return self;
}
#synthesize audioMix = _audioMix;
- (AVAudioMix *)audioMix {
if (!_audioMix){
AVMutableAudioMix *audioMix = [AVMutableAudioMix audioMix];
if (audioMix){
AVMutableAudioMixInputParameters *audioMixInputParameters = [AVMutableAudioMixInputParameters audioMixInputParametersWithTrack:self.track];
if (audioMixInputParameters) {
MTAudioProcessingTapCallbacks callbacks;
callbacks.version = kMTAudioProcessingTapCallbacksVersion_0;
callbacks.clientInfo = (__bridge void *)self,
callbacks.init = tap_InitCallback;
callbacks.finalize = tap_FinalizeCallback;
callbacks.prepare = tap_PrepareCallback;
callbacks.unprepare = tap_UnprepareCallback;
callbacks.process = tap_ProcessCallback;
MTAudioProcessingTapRef audioProcessingTap;
if (noErr == MTAudioProcessingTapCreate(kCFAllocatorDefault,
&callbacks,
kMTAudioProcessingTapCreationFlag_PreEffects,
&audioProcessingTap)){
audioMixInputParameters.audioTapProcessor = audioProcessingTap;
CFRelease(audioProcessingTap);
audioMix.inputParameters = #[audioMixInputParameters];
_audioMix = audioMix;
}
}
}
}
return _audioMix;
}
#end
ViewController.h
#import <UIKit/UIKit.h>
#interface ViewController : UIViewController
#end
ViewController.m
#import "ViewController.h"
#import "MediaPlayer.h"
#interface ViewController ()
#property (nonatomic,strong) MediaPlayer *mediaPlayer;
#end
#implementation ViewController
- (void)viewDidLoad {
[super viewDidLoad];
self.mediaPlayer = [[MediaPlayer alloc] init];
}
#end
In my project I have to add a book page flip animation, and in this book on the right side page a video will play. Once the first video will complete the page will turn like as book page and the second video will play on the next right side page and so on. Now I have to save all this things as a a video which can be downloaded, so that When the downloaded video get played from gallery it looks same as i am playing in my app. Right now I am recording the device's screen and saving it in server for download. All the things is ok except the video player. In the video that is I am recording, the portion where all the video is playing(on the right side page of the book) is not getting recorded.
I am using the bellow code to record the screen. If any one of you have other idea to do the same thing, please share with me or if need to change my code please suggest that. Thanks is advance.
// ASScreenRecorder.h
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
typedef void (^VideoCompletionBlock)(void);
#protocol ASScreenRecorderDelegate;
#interface ASScreenRecorder : NSObject
#property (nonatomic, readonly) BOOL isRecording;
#property (nonatomic, weak) id <ASScreenRecorderDelegate> delegate;
// if saveURL is nil, video will be saved into camera roll
// this property can not be changed whilst recording is in progress
#property (strong, nonatomic) NSURL *videoURL;
+ (instancetype)sharedInstance;
- (BOOL)startRecording;
- (void)stopRecordingWithCompletion:(VideoCompletionBlock)completionBlock;
#end
// If your view contains an AVCaptureVideoPreviewLayer or an openGL view
// you'll need to write that data into the CGContextRef yourself.
// In the viewcontroller responsible for the AVCaptureVideoPreviewLayer / openGL view
// set yourself as the delegate for ASScreenRecorder.
// [ASScreenRecorder sharedInstance].delegate = self
// Then implement 'writeBackgroundFrameInContext:(CGContextRef*)contextRef'
// use 'CGContextDrawImage' to draw your view into the provided CGContextRef
#protocol ASScreenRecorderDelegate <NSObject>
- (void)writeBackgroundFrameInContext:(CGContextRef*)contextRef;
#end
// ASScreenRecorder.m
// ScreenRecorder
//
// Created by Alan Skipp on 23/04/2014.
// Copyright (c) 2014 Alan Skipp. All rights reserved.
//
#import "ASScreenRecorder.h"
#import <AVFoundation/AVFoundation.h>
#import <QuartzCore/QuartzCore.h>
#import <AssetsLibrary/AssetsLibrary.h>
#interface ASScreenRecorder()
#property (strong, nonatomic) AVAssetWriter *videoWriter;
#property (strong, nonatomic) AVAssetWriterInput *videoWriterInput;
#property (strong, nonatomic) AVAssetWriterInputPixelBufferAdaptor *avAdaptor;
#property (strong, nonatomic) CADisplayLink *displayLink;
#property (strong, nonatomic) NSDictionary *outputBufferPoolAuxAttributes;
#property (nonatomic) CFTimeInterval firstTimeStamp;
#property (nonatomic) BOOL isRecording;
#end
#implementation ASScreenRecorder
{
dispatch_queue_t _render_queue;
dispatch_queue_t _append_pixelBuffer_queue;
dispatch_semaphore_t _frameRenderingSemaphore;
dispatch_semaphore_t _pixelAppendSemaphore;
CGSize _viewSize;
CGFloat _scale;
CGColorSpaceRef _rgbColorSpace;
CVPixelBufferPoolRef _outputBufferPool;
}
#pragma mark - initializers
+ (instancetype)sharedInstance {
static dispatch_once_t once;
static ASScreenRecorder *sharedInstance;
dispatch_once(&once, ^{
sharedInstance = [[self alloc] init];
});
return sharedInstance;
}
- (instancetype)init
{
self = [super init];
if (self) {
_viewSize = [UIApplication sharedApplication].delegate.window.bounds.size;
_scale = [UIScreen mainScreen].scale;
// record half size resolution for retina iPads
if ((UI_USER_INTERFACE_IDIOM() == UIUserInterfaceIdiomPad) && _scale > 1) {
_scale = 1.0;
}
_isRecording = NO;
_append_pixelBuffer_queue = dispatch_queue_create("ASScreenRecorder.append_queue", DISPATCH_QUEUE_SERIAL);
_render_queue = dispatch_queue_create("ASScreenRecorder.render_queue", DISPATCH_QUEUE_SERIAL);
dispatch_set_target_queue(_render_queue, dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_HIGH, 0));
_frameRenderingSemaphore = dispatch_semaphore_create(1);
_pixelAppendSemaphore = dispatch_semaphore_create(1);
}
return self;
}
#pragma mark - public
- (void)setVideoURL:(NSURL *)videoURL
{
NSAssert(!_isRecording, #"videoURL can not be changed whilst recording is in progress");
_videoURL = videoURL;
}
- (BOOL)startRecording
{
if (!_isRecording) {
[self setUpWriter];
_isRecording = (_videoWriter.status == AVAssetWriterStatusWriting);
_displayLink = [CADisplayLink displayLinkWithTarget:self selector:#selector(writeVideoFrame)];
[_displayLink addToRunLoop:[NSRunLoop mainRunLoop] forMode:NSRunLoopCommonModes];
}
return _isRecording;
}
- (void)stopRecordingWithCompletion:(VideoCompletionBlock)completionBlock;
{
if (_isRecording) {
_isRecording = NO;
[_displayLink removeFromRunLoop:[NSRunLoop mainRunLoop] forMode:NSRunLoopCommonModes];
[self completeRecordingSession:completionBlock];
}
}
#pragma mark - private
-(void)setUpWriter
{
_rgbColorSpace = CGColorSpaceCreateDeviceRGB();
NSDictionary *bufferAttributes = #{(id)kCVPixelBufferPixelFormatTypeKey : #(kCVPixelFormatType_32BGRA),
(id)kCVPixelBufferCGBitmapContextCompatibilityKey : #YES,
(id)kCVPixelBufferWidthKey : #(_viewSize.width * _scale),
(id)kCVPixelBufferHeightKey : #(_viewSize.height * _scale),
(id)kCVPixelBufferBytesPerRowAlignmentKey : #(_viewSize.width * _scale * 4)
};
_outputBufferPool = NULL;
CVPixelBufferPoolCreate(NULL, NULL, (__bridge CFDictionaryRef)(bufferAttributes), &_outputBufferPool);
NSError* error = nil;
_videoWriter = [[AVAssetWriter alloc] initWithURL:self.videoURL ?: [self tempFileURL]
fileType:AVFileTypeQuickTimeMovie
error:&error];
NSParameterAssert(_videoWriter);
NSInteger pixelNumber = _viewSize.width * _viewSize.height * _scale;
NSDictionary* videoCompression = #{AVVideoAverageBitRateKey: #(pixelNumber * 11.4)};
NSDictionary* videoSettings = #{AVVideoCodecKey: AVVideoCodecH264,
AVVideoWidthKey: [NSNumber numberWithInt:_viewSize.width*_scale],
AVVideoHeightKey: [NSNumber numberWithInt:_viewSize.height*_scale],
AVVideoCompressionPropertiesKey: videoCompression};
_videoWriterInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:videoSettings];
NSParameterAssert(_videoWriterInput);
_videoWriterInput.expectsMediaDataInRealTime = YES;
_videoWriterInput.transform = [self videoTransformForDeviceOrientation];
_avAdaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:_videoWriterInput sourcePixelBufferAttributes:nil];
[_videoWriter addInput:_videoWriterInput];
[_videoWriter startWriting];
[_videoWriter startSessionAtSourceTime:CMTimeMake(0, 1000)];
}
- (CGAffineTransform)videoTransformForDeviceOrientation
{
CGAffineTransform videoTransform;
switch ([UIDevice currentDevice].orientation) {
case UIDeviceOrientationLandscapeLeft:
videoTransform = CGAffineTransformMakeRotation(-M_PI_2);
break;
case UIDeviceOrientationLandscapeRight:
videoTransform = CGAffineTransformMakeRotation(M_PI_2);
break;
case UIDeviceOrientationPortraitUpsideDown:
videoTransform = CGAffineTransformMakeRotation(M_PI);
break;
default:
videoTransform = CGAffineTransformIdentity;
}
return videoTransform;
}
- (NSURL*)tempFileURL
{
NSString *outputPath = [NSHomeDirectory() stringByAppendingPathComponent:#"tmp/screenCapture.mp4"];
[self removeTempFilePath:outputPath];
return [NSURL fileURLWithPath:outputPath];
}
- (void)removeTempFilePath:(NSString*)filePath
{
NSFileManager* fileManager = [NSFileManager defaultManager];
if ([fileManager fileExistsAtPath:filePath]) {
NSError* error;
if ([fileManager removeItemAtPath:filePath error:&error] == NO) {
NSLog(#"Could not delete old recording:%#", [error localizedDescription]);
}
}
}
- (void)completeRecordingSession:(VideoCompletionBlock)completionBlock;
{
dispatch_async(_render_queue, ^{
dispatch_sync(_append_pixelBuffer_queue, ^{
[_videoWriterInput markAsFinished];
[_videoWriter finishWritingWithCompletionHandler:^{
void (^completion)(void) = ^() {
[self cleanup];
dispatch_async(dispatch_get_main_queue(), ^{
if (completionBlock) completionBlock();
});
};
if (self.videoURL) {
completion();
} else {
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
[library writeVideoAtPathToSavedPhotosAlbum:_videoWriter.outputURL completionBlock:^(NSURL *assetURL, NSError *error) {
if (error) {
NSLog(#"Error copying video to camera roll:%#", [error localizedDescription]);
} else {
[self removeTempFilePath:_videoWriter.outputURL.path];
completion();
}
}];
}
}];
});
});
}
- (void)cleanup
{
self.avAdaptor = nil;
self.videoWriterInput = nil;
self.videoWriter = nil;
self.firstTimeStamp = 0;
self.outputBufferPoolAuxAttributes = nil;
CGColorSpaceRelease(_rgbColorSpace);
CVPixelBufferPoolRelease(_outputBufferPool);
}
- (void)writeVideoFrame
{
// throttle the number of frames to prevent meltdown
// technique gleaned from Brad Larson's answer here: http://stackoverflow.com/a/5956119
if (dispatch_semaphore_wait(_frameRenderingSemaphore, DISPATCH_TIME_NOW) != 0) {
return;
}
dispatch_async(_render_queue, ^{
if (![_videoWriterInput isReadyForMoreMediaData]) return;
if (!self.firstTimeStamp) {
self.firstTimeStamp = _displayLink.timestamp;
}
CFTimeInterval elapsed = (_displayLink.timestamp - self.firstTimeStamp);
CMTime time = CMTimeMakeWithSeconds(elapsed, 1000);
CVPixelBufferRef pixelBuffer = NULL;
CGContextRef bitmapContext = [self createPixelBufferAndBitmapContext:&pixelBuffer];
if (self.delegate) {
[self.delegate writeBackgroundFrameInContext:&bitmapContext];
}
// draw each window into the context (other windows include UIKeyboard, UIAlert)
// FIX: UIKeyboard is currently only rendered correctly in portrait orientation
dispatch_sync(dispatch_get_main_queue(), ^{
UIGraphicsPushContext(bitmapContext); {
for (UIWindow *window in [[UIApplication sharedApplication] windows]) {
[window drawViewHierarchyInRect:CGRectMake(0, 0, _viewSize.width, _viewSize.height) afterScreenUpdates:NO];
}
} UIGraphicsPopContext();
});
// append pixelBuffer on a async dispatch_queue, the next frame is rendered whilst this one appends
// must not overwhelm the queue with pixelBuffers, therefore:
// check if _append_pixelBuffer_queue is ready
// if it’s not ready, release pixelBuffer and bitmapContext
if (dispatch_semaphore_wait(_pixelAppendSemaphore, DISPATCH_TIME_NOW) == 0) {
dispatch_async(_append_pixelBuffer_queue, ^{
BOOL success = [_avAdaptor appendPixelBuffer:pixelBuffer withPresentationTime:time];
if (!success) {
NSLog(#"Warning: Unable to write buffer to video");
}
CGContextRelease(bitmapContext);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
CVPixelBufferRelease(pixelBuffer);
dispatch_semaphore_signal(_pixelAppendSemaphore);
});
} else {
CGContextRelease(bitmapContext);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
CVPixelBufferRelease(pixelBuffer);
}
dispatch_semaphore_signal(_frameRenderingSemaphore);
});
}
- (CGContextRef)createPixelBufferAndBitmapContext:(CVPixelBufferRef *)pixelBuffer
{
CVPixelBufferPoolCreatePixelBuffer(NULL, _outputBufferPool, pixelBuffer);
CVPixelBufferLockBaseAddress(*pixelBuffer, 0);
CGContextRef bitmapContext = NULL;
bitmapContext = CGBitmapContextCreate(CVPixelBufferGetBaseAddress(*pixelBuffer),
CVPixelBufferGetWidth(*pixelBuffer),
CVPixelBufferGetHeight(*pixelBuffer),
8, CVPixelBufferGetBytesPerRow(*pixelBuffer), _rgbColorSpace,
kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst
);
CGContextScaleCTM(bitmapContext, _scale, _scale);
CGAffineTransform flipVertical = CGAffineTransformMake(1, 0, 0, -1, 0, _viewSize.height);
CGContextConcatCTM(bitmapContext, flipVertical);
return bitmapContext;
}
#end
#Gobinda this code works in my case. You need to specify frame if you want to record portion of window. In init method viewSize is defined as window size. So you need to change viewSize as your video frame.
I am developing an iOS App which is to record the video using the rear camera.
I have managed to get the preview layer working fine.
However, if I click the Record button, the preview freezes.
The following are my codes. Please help me solving this problem.
Pg5VideoViewController.h
#interface Pg5VideoViewController : UIViewController <AVCaptureVideoDataOutputSampleBufferDelegate,AVCaptureFileOutputRecordingDelegate> {
BOOL WeAreRecording;
IBOutlet UIView *videoViewBg;
AVCaptureSession *_captureSession;
UIImageView *_imageView;
CALayer *_customLayer;
AVCaptureVideoPreviewLayer *_prevLayer;
UIColor *pickedColor;
AVCaptureMovieFileOutput *movieFileOutput;
IBOutlet UIView *theColor;
}
#property (nonatomic,retain) IBOutlet UIView *theColor;
#property (nonatomic,retain) UIColor *pickedColor;
#property (nonatomic,retain) IBOutlet UIView *videoViewBg;
#property (nonatomic, retain) AVCaptureSession *captureSession;
#property (nonatomic, retain) UIImageView *imageView;
#property (nonatomic, retain) CALayer *customLayer;
#property (nonatomic, retain) AVCaptureVideoPreviewLayer *prevLayer;
#property (nonatomic, retain) AVCaptureMovieFileOutput *movieFileOutput;
-(void)initCapture;
-(UIColor *) colorOfPoint:(CGPoint)point;
-(IBAction)takeVideo:(id)sender;
#end
the Pg5VideoViewController.m file:
#implementation Pg5VideoViewController
#synthesize videoViewBg;
#synthesize captureSession = _captureSession;
#synthesize imageView = _imageView;
#synthesize customLayer = _customLayer;
#synthesize prevLayer = _prevLayer;
#synthesize pickedColor = _pickedColor;
#synthesize theColor = _theColor;
#synthesize movieFileOutput = _movieFileOutput;
#pragma mark -
#pragma mark Initialization
- (id)init {
self = [super init];
if (self) {
self.imageView = nil;
self.prevLayer = nil;
self.customLayer = nil;
}
return self;
}
- (void)initCapture {
AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput
deviceInputWithDevice:[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo]
error:nil];
movieFileOutput = [[AVCaptureVideoDataOutput alloc] init];
dispatch_queue_t queue;
queue = dispatch_queue_create("cameraQueue", NULL);
[movieFileOutput setSampleBufferDelegate:self queue:queue];
dispatch_release(queue);
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
[movieFileOutput setVideoSettings:videoSettings];
self.captureSession = [[AVCaptureSession alloc] init];
[self.captureSession addInput:captureInput];
[self.captureSession addOutput:movieFileOutput];
[self.captureSession setSessionPreset:AVCaptureSessionPresetMedium];
self.customLayer = [CALayer layer];
self.customLayer.frame = CGRectMake(42, 40, 940, 558);
//self.customLayer.transform = CATransform3DRotate(CATransform3DIdentity, M_PI/2.0f, 0, 0, 1);
//self.customLayer.contentsGravity = kCAGravityResizeAspectFill;
[self.view.layer addSublayer:self.customLayer];
[self.captureSession startRunning];
}
#pragma mark -
#pragma mark AVCaptureSession delegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0);
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
[self.customLayer performSelectorOnMainThread:#selector(setContents:) withObject: (id) newImage waitUntilDone:YES];
UIImage *image= [UIImage imageWithCGImage:newImage scale:1.0 orientation:UIImageOrientationRight];
CGImageRelease(newImage);
[self.imageView performSelectorOnMainThread:#selector(setImage:) withObject:image waitUntilDone:YES];
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
[pool drain];
}
- (void)captureOutput:(AVCaptureFileOutput *)captureOutput
didFinishRecordingToOutputFileAtURL:(NSURL *)outputFileURL
fromConnections:(NSArray *)connections
error:(NSError *)error
{
NSLog(#"didFinishRecordingToOutputFileAtURL - enter");
BOOL RecordedSuccessfully = YES;
if ([error code] != noErr)
{
id value = [[error userInfo] objectForKey:AVErrorRecordingSuccessfullyFinishedKey];
if (value)
{
RecordedSuccessfully = [value boolValue];
}
}
if (RecordedSuccessfully)
{
NSLog(#"didFinishRecordingToOutputFileAtURL - success");
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
if ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:outputFileURL])
{
[library writeVideoAtPathToSavedPhotosAlbum:outputFileURL
completionBlock:^(NSURL *assetURL, NSError *error)
{
if (error)
{
}
}];
}
[library release];
}
}
- (void)viewDidAppear:(BOOL)animated {
}
- (IBAction)takeVideo:(id)sender {
AVCaptureMovieFileOutput *movieFileOutput1 = [[AVCaptureMovieFileOutput alloc] init];
if(!WeAreRecording) {
NSLog(#"START RECORDING");
WeAreRecording = YES;
self.videoViewBg.backgroundColor = [UIColor redColor];
NSDateFormatter *formatter;
NSString *dateString;
formatter = [[NSDateFormatter alloc]init];
[formatter setDateFormat:#"dd-MM-yyyy HH:mm:ss"];
dateString = [formatter stringFromDate:[NSDate date]];
[formatter release];
NSLog(#"The dateString is : %#",dateString);
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectoryPath = [paths objectAtIndex:0];
NSString *movieFileName = [NSString stringWithFormat: #"%#.mp4",dateString];
NSString *filePath = [documentsDirectoryPath stringByAppendingPathComponent:movieFileName];
NSURL *outputURL = [[NSURL alloc] initFileURLWithPath:filePath];
[self.captureSession stopRunning];
[self.captureSession beginConfiguration];
// [self.captureSession removeOutput:movieFileOutput];
if([self.captureSession canAddOutput:movieFileOutput1])
{
[self.captureSession addOutput:movieFileOutput1];
}
else
{
NSLog(#"Couldn't add still output");
}
[movieFileOutput1 startRecordingToOutputFileURL:outputURL recordingDelegate:self];
[self.captureSession commitConfiguration];
[self.captureSession startRunning];
[outputURL release];
} else {
NSLog(#"STOP RECORDING");
WeAreRecording = NO;
self.videoViewBg.backgroundColor = [UIColor whiteColor];
[movieFileOutput1 stopRecording];
[self.captureSession removeOutput:movieFileOutput1];
}
}
-(void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event {
UITouch *touch = [[event allTouches] anyObject];
CGPoint loc = [touch locationInView:self.view];
self.pickedColor = [self colorOfPoint:loc];
self.theColor.backgroundColor = self.pickedColor;
}
-(UIColor *) colorOfPoint:(CGPoint)point {
unsigned char pixel[4] = {0};
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pixel, 1, 1, 8, 4, colorSpace, kCGImageAlphaPremultipliedLast);
CGContextTranslateCTM(context, -point.x, -point.y);
[self.view.layer renderInContext:context];
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
UIColor *color = [UIColor colorWithRed:pixel[0]/255.0 green:pixel[1]/255.0 blue:pixel[2]/255.0 alpha:pixel[3]/255.0];
return color;
}
// Implement viewDidLoad to do additional setup after loading the view, typically from a nib.
- (void)viewDidLoad {
[super viewDidLoad];
[self initCapture];
WeAreRecording = NO;
self.videoViewBg.layer.cornerRadius = 55;
}
// Override to allow orientations other than the default portrait orientation.
- (BOOL)shouldAutorotateToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation {
if(interfaceOrientation == UIInterfaceOrientationLandscapeRight) {
return YES;
}
return NO;
}
- (void)didReceiveMemoryWarning {
// Releases the view if it doesn't have a superview.
[super didReceiveMemoryWarning];
// Release any cached data, images, etc. that aren't in use.
}
- (void)viewDidUnload {
[super viewDidUnload];
self.imageView = nil;
self.customLayer = nil;
self.prevLayer = nil;
[self.captureSession stopRunning];
// Release any retained subviews of the main view.
// e.g. self.myOutlet = nil;
}
- (void)dealloc {
[movieFileOutput release];
[self.captureSession release];
[super dealloc];
}
#end
Please help
The problem here is not trivial. AVFoundation simply can't handle both AVCaptureMovieFileOutput and AVCaptureVideoDataOutput simultaneously. That means you can't dipslay preview (which requires AVCaptureVideoDataOutput) when recording (which requires AVCaptureMovieFileOutput). This is very stupid, but that's life.
The only way I know how to do this to use only AVCaptureVideoDataOutput, and inside captureOutput:didOutputSampleBuffer:fromConnection:, write the frames manually to the video file. The following code snippets should help
Properties
#property (strong, nonatomic) AVAssetWriter* recordingAssetWriter;
#property (strong, nonatomic) AVAssetWriterInput* recordingAssetWriterInput;
#property (strong, nonatomic) AVAssetWriterInputPixelBufferAdaptor* recordingPixelBufferAdaptor;
To initialize the video file (when you start recording or something)
// Init AVAssetWriter
NSError* error = nil;
self.recordingAssetWriter = [[AVAssetWriter alloc] initWithURL:<the video file URL> fileType:AVFileTypeMPEG4 error:&error];
// Init AVAssetWriterInput & AVAssetWriterInputPixelBufferAdaptor
NSDictionary* settings = #{AVVideoWidthKey: #(480), AVVideoHeightKey: #(640), AVVideoCodecKey: AVVideoCodecH264};
self.recordingAssetWriterInput = [[AVAssetWriterInput alloc] initWithMediaType:AVMediaTypeVideo outputSettings:settings];
self.recordingAssetWriterInput.expectsMediaDataInRealTime = YES;
self.recordingPixelBufferAdaptor = [[AVAssetWriterInputPixelBufferAdaptor alloc] initWithAssetWriterInput:self.recordingAssetWriterInput sourcePixelBufferAttributes:#{(NSString*)kCVPixelBufferPixelFormatTypeKey: #(kCVPixelFormatType_32BGRA)}];
// Add Input
[self.recordingAssetWriter addInput:self.recordingAssetWriterInput];
// Start ...
_recording = YES;
To write frames to the video file
// Inside the captureOutput:didOutputSampleBuffer:fromConnection: delegate method
// _recording is the flag to see if we're recording
if (_recording) {
CMTime sampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
if (self.recordingAssetWriter.status != AVAssetWriterStatusWriting) {
[self.recordingAssetWriter startWriting];
[self.recordingAssetWriter startSessionAtSourceTime:sampleTime];
}
if (self.recordingAssetWriterInput.readyForMoreMediaData) {
[self.recordingPixelBufferAdaptor appendPixelBuffer:pixelBuffer withPresentationTime:sampleTime];
}
}
To finalize the video file when finish recording:
[self.recordingAssetWriterInput markAsFinished];
[self.recordingAssetWriter finishWritingWithCompletionHandler:^{
// Do not do this immediately after calling finishWritingWithCompletionHandler, since it is an async method
self.recordingAssetWriter = nil;
self.recordingAssetWriterInput = nil;
self.recordingPixelBufferAdaptor = nil;
}];
Note that I ommited error checking for clarity.
I'm trying to build a non-realtime face detection application.
Following this article: http://maniacdev.com/2011/11/tutorial-easy-face-detection-with-core-image-in-ios-5/ I can load in a jpg and detect faces.
I would like to automatically take a picture every 20 seconds, then display the image in a UIImageView* and then run the existing detect face function on it.
My question is two fold.
Is there an easy way to take a sample picture from the camera and
load it into a UIImageView* without saving it?
How can i automate this to happen every 30 seconds with no user interaction?
Thanks!
Look at AVFoundation Programming Guide
AVFoundation Programming Guide
This guide shows you how to use the AVFoundation to capture media.
You will need to take into account Device Rotation as the camera will display only its raw output until you rotate the output via CATransformMatrix But that is a bit more in depth than you want.
You may be able to get away with just knowing. You rotate 45° from the original point to the final rotation location.
Here is my code for my little camera testing utility.
Build a UIView and connect the IBOutlets and IBActions
ViewController.h
#import <UIKit/UIKit.h>
#interface ViewController : UIViewController
#property (weak, nonatomic) IBOutlet UIView *previewViewContainer;
#property (weak, nonatomic) IBOutlet UIView *playerViewContainer;
- (IBAction)button1Pressed:(id)sender;
- (IBAction)button2Pressed:(id)sender;
- (IBAction)button3Pressed:(id)sender;
- (IBAction)button4Pressed:(id)sender;
- (IBAction)startPressed:(id)sender;
- (IBAction)stopPressed:(id)sender;
- (IBAction)swapInputsPressed:(id)sender;
- (IBAction)recordPressed:(id)sender;
#end
ViewController.m
#import "ViewController.h"
#import <AVFoundation/AVFoundation.h>
#interface ViewController ()
#property (nonatomic, strong) AVCaptureSession *captureSession;
#property (nonatomic, strong) AVCaptureVideoPreviewLayer *capturePreviewLayer;
#property (nonatomic, strong) AVCaptureDeviceInput *frontCam;
#property (nonatomic, readonly) BOOL frontCamIsSet;
#property (nonatomic, readonly) BOOL hasFrontCam;
#property (nonatomic, readonly) BOOL isUsingFrontCam;
#property (nonatomic, strong) AVCaptureDeviceInput *backCam;
#property (nonatomic, readonly) BOOL backCamIsSet;
#property (nonatomic, readonly) BOOL hasBackCam;
#property (nonatomic, readonly) BOOL isUsingBackCam;
#property (nonatomic, strong) AVCaptureDeviceInput *mic;
#property (nonatomic, readonly) BOOL micIsSet;
#property (nonatomic, readonly) BOOL hasMic;
#end
CGFloat DegreesToRadians(CGFloat degrees)
{
return degrees * M_PI / 180;
};
CGFloat RadiansToDegrees(CGFloat radians)
{
return radians * 180 / M_PI;
};
#implementation ViewController
#pragma mark - Helper Methods
- (NSArray *) inputDevices{
return [AVCaptureDevice devices];
}
- (NSArray *) videoInputDevices{
return [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
}
- (NSArray *) audioInputDevices{
return [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
}
#pragma mark - Properties
#synthesize captureSession = _captureSession;
- (AVCaptureSession *)captureSession{
if (_captureSession == nil){
_captureSession = [[AVCaptureSession alloc] init];
}
return _captureSession;
}
#synthesize capturePreviewLayer = _capturePreviewLayer;
- (AVCaptureVideoPreviewLayer *)capturePreviewLayer{
if (_capturePreviewLayer == nil){
_capturePreviewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:self.captureSession];
}
return _capturePreviewLayer;
}
#synthesize frontCam = _frontCam;
- (AVCaptureDeviceInput *)frontCam{
if (_frontCam == nil && !self.frontCamIsSet){
_frontCamIsSet = YES;
NSArray *videoDevices = [self videoInputDevices];
for (AVCaptureDevice *inputDevice in videoDevices) {
if ([inputDevice position] == AVCaptureDevicePositionFront){
NSError *error = nil;
_frontCam = [AVCaptureDeviceInput deviceInputWithDevice:inputDevice error:&error];
if (!_frontCam){
NSLog(#"Error Attaching Front Cam %#",error);
}
}
}
}
return _frontCam;
}
- (BOOL)hasFrontCam{
return self.frontCam != nil;
}
#synthesize isUsingFrontCam = _isUsingFrontCam;
#synthesize backCam = _backCam;
- (AVCaptureDeviceInput *)backCam{
if (_backCam == nil && !self.backCamIsSet){
_backCamIsSet = YES;
NSArray *videoDevices = [self videoInputDevices];
for (AVCaptureDevice *inputDevice in videoDevices) {
if ([inputDevice position] == AVCaptureDevicePositionBack){
NSError *error = nil;
_backCam = [AVCaptureDeviceInput deviceInputWithDevice:inputDevice error:&error];
if (!_backCam){
NSLog(#"Error Attaching Back Cam %#",error);
}
}
}
}
return _backCam;
}
- (BOOL)hasBackCam{
return self.backCam != nil;
}
#synthesize mic = _mic;
- (AVCaptureDeviceInput *)mic{
if (_mic == nil && !self.micIsSet){
_micIsSet = YES;
NSArray *audioDevices = [self audioInputDevices];
for (AVCaptureDevice *inputDevice in audioDevices) {
NSError *error = nil;
_mic = [AVCaptureDeviceInput deviceInputWithDevice:inputDevice error:&error];
if (!_mic){
NSLog(#"Error Attaching Mic %#",error);
}
}
}
return _mic;
}
- (BOOL)hasMic{
return self.mic != nil;
}
- (BOOL)isUsingBackCam{
return !self.isUsingFrontCam;
}
- (IBAction)button1Pressed:(id)sender {
if (NO && self.hasFrontCam && [self.captureSession canAddInput:self.frontCam]){
_isUsingFrontCam = YES;
[self.captureSession addInput:self.frontCam];
}
else if(self.hasBackCam && [self.captureSession canAddInput:self.backCam]){
_isUsingFrontCam = NO;
[self.captureSession addInput:self.backCam];
}
if (self.hasMic && [self.captureSession canAddInput:self.mic]) {
[self.captureSession addInput:self.mic];
}
}
- (IBAction)button2Pressed:(id)sender {
self.capturePreviewLayer.frame = self.previewViewContainer.layer.bounds;
[self.previewViewContainer.layer addSublayer:self.capturePreviewLayer];
}
- (void) orientationChanged:(NSNotification*) notification{
NSLog(#"Notification Of Orientation Change\n\n%#",notification.userInfo);
if (_capturePreviewLayer != nil){
CGFloat rotate90 = DegreesToRadians(90);
CGFloat rotateFinish = 0;
UIDeviceOrientation orientation = [UIDevice currentDevice].orientation;
switch (orientation) {
case UIDeviceOrientationLandscapeLeft:
rotateFinish += rotate90;
case UIDeviceOrientationPortraitUpsideDown:
rotateFinish += rotate90;
case UIDeviceOrientationLandscapeRight:
rotateFinish += rotate90;
case UIDeviceOrientationPortrait:
default:
break;
}
_capturePreviewLayer.transform = CATransform3DMakeRotation(rotateFinish, 0.0, 0.0, 1.0);
}
}
- (IBAction)button3Pressed:(id)sender {
}
- (IBAction)button4Pressed:(id)sender {
}
- (IBAction)startPressed:(id)sender {
[self.captureSession startRunning];
}
- (IBAction)stopPressed:(id)sender {
[self.captureSession stopRunning];
}
- (IBAction)swapInputsPressed:(id)sender {
if (!self.isUsingFrontCam){
_isUsingFrontCam = YES;
[self.captureSession removeInput:self.backCam];
[self.captureSession addInput:self.frontCam];
}
else {
_isUsingFrontCam = NO;
[self.captureSession removeInput:self.frontCam];
[self.captureSession addInput:self.backCam];
}
}
- (IBAction)recordPressed:(id)sender {
}
- (NSString *) applicationDocumentsDirectory{
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *basePath = ([paths count] > 0) ? [paths objectAtIndex:0] : nil;
return basePath;
}
- (void)viewDidLoad{
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
[[NSNotificationCenter defaultCenter] addObserver:self
selector:#selector(orientationChanged:)
name:UIDeviceOrientationDidChangeNotification
object:nil];
}
- (void) dealloc{
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
[[NSNotificationCenter defaultCenter] removeObserver:self
name:UIDeviceOrientationDidChangeNotification
object:nil];
}
- (void)didReceiveMemoryWarning{
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
#end
Fortunately for you I just built this test app for grabbing photos.
Oh before I forget. Rending a CALayer into a graphic is as simple as
+ (UIImage *) captureImageOfView:(UIView *)srcView{
UIGraphicsBeginImageContext(srcView.bounds.size);
[srcView.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *anImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return anImage;
}
However I recommend you look into the AVFoundation programming guide to see how they actually capture it. This was just my own demo app and as i said. its not complete.