I used GPUImage lib, my front camera session preset is AVCaptureSessionPresetPhoto, back camera is AVCaptureSessionPresetHigh,
if (self.isFrontFacingCameraPresent) {
[self setCaptureSessionPreset: AVCaptureSessionPresetHigh];
} else {
[self setCaptureSessionPreset:AVCaptureSessionPresetPhoto];
}
[self rotateCamera];
The initial status is using front camera, the resolution is 1280x960;
Now changed back camera, the resolution is 1920x1080;
Then change front camera, the resolution is 1280x720, it's very strange;
I checked this delegate method:
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
fetched the width and height:
CVImageBufferRef cameraFrame = CMSampleBufferGetImageBuffer(sampleBuffer);
int bufferWidth = (int) CVPixelBufferGetWidth(cameraFrame);
int bufferHeight = (int) CVPixelBufferGetHeight(cameraFrame);
The bufferHeight is 720, I don't why when changed back front camera, the height changed from 960 to 720! Maybe it's apple's bug?
I solved the issue, bye change the rotateCamera function, I rewrite a function used to switch camera between front and back:
- (void)switchCameraFrontAndBack {
NSError *error;
AVCaptureDeviceInput *newVideoInput;
AVCaptureDevicePosition currentCameraPosition = self.cameraPosition;
if (currentCameraPosition == AVCaptureDevicePositionBack)
{
currentCameraPosition = AVCaptureDevicePositionFront;
}
else
{
currentCameraPosition = AVCaptureDevicePositionBack;
}
AVCaptureDevice *backFacingCamera = nil;
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices)
{
if ([device position] == currentCameraPosition)
{
backFacingCamera = device;
}
}
newVideoInput = [[AVCaptureDeviceInput alloc] initWithDevice:backFacingCamera error:&error];
if (newVideoInput != nil)
{
[_captureSession beginConfiguration];
[_captureSession removeInput:videoInput];
[self configSessionPreset:currentCameraPosition];
if ([_captureSession canAddInput:newVideoInput])
{
[_captureSession addInput:newVideoInput];
videoInput = newVideoInput;
}
else
{
[_captureSession addInput:videoInput];
}
[_captureSession commitConfiguration];
}
_inputCamera = backFacingCamera;
[self setOutputImageOrientation:self.outputImageOrientation];
}
- (void)configSessionPreset:(AVCaptureDevicePosition)currentPosition {
if (currentPosition == AVCaptureDevicePositionBack) {
if (WIDTH <= Iphone4SWidth) {
if ([self.captureSession canSetSessionPreset:AVCaptureSessionPreset1280x720]) {
[self setCaptureSessionPreset:AVCaptureSessionPreset1280x720];
} else if ([self.captureSession canSetSessionPreset:AVCaptureSessionPreset1920x1080]) {
[self setCaptureSessionPreset:AVCaptureSessionPreset1920x1080];
}
} else {
if ([self.captureSession canSetSessionPreset:AVCaptureSessionPreset1920x1080]) {
[self setCaptureSessionPreset:AVCaptureSessionPreset1920x1080];
} else {
[self setCaptureSessionPreset: AVCaptureSessionPresetHigh];
}
}
} else {
[self setCaptureSessionPreset:AVCaptureSessionPresetPhoto];
}
}
The bufferHeight is 720, I don't why when changed back front camera, the height changed from 960 to 720! Maybe it's apple's bug?
when use AVCaptureSessionPresetHigh, the actual resolution ratio is diffrent from diffrent camera, the Front and the Back is diffrent, it will get the highest resolution of the camera . I guess you used the iphone5.
Related
I am creating camera app which capture video using SCRecorder. I am trying to apply multiple filters to a video. I am changing Recorder's video configuration as below
func swipeableFilterView(_ swipeableFilterView: SCSwipeableFilterView, didScrollTo filter: SCFilter?) {
selectedFilter = filter!
recorder.videoConfiguration.filter = filter!
}
I am capturing video with applying filters when i change camera back to front then I am getting Black screen at right side as Bellowed Image:
with back camera it will works perfect
here is code of appendVideoSampleBuffer
- (void)appendVideoSampleBuffer:(CMSampleBufferRef)sampleBuffer toRecordSession:(SCRecordSession *)recordSession duration:(CMTime)duration connection:(AVCaptureConnection *)connection completion:(void(^)(BOOL success))completion {
#autoreleasepool {
CVPixelBufferRef sampleBufferImage = CMSampleBufferGetImageBuffer(sampleBuffer);
size_t bufferWidth = (CGFloat)CVPixelBufferGetWidth(sampleBufferImage);
size_t bufferHeight = (CGFloat)CVPixelBufferGetHeight(sampleBufferImage);
CMTime time = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
SCFilter *filterGroup = _videoConfiguration.filter;
SCFilter *transformFilter = [self _transformFilterUsingBufferWidth:bufferWidth bufferHeight:bufferHeight mirrored:
_device == AVCaptureDevicePositionFront
];
if (filterGroup == nil && transformFilter == nil) {
[recordSession appendVideoPixelBuffer:sampleBufferImage atTime:time duration:duration completion:completion];
return;
}
CVPixelBufferRef pixelBuffer = [recordSession createPixelBuffer];
if (pixelBuffer == nil) {
completion(NO);
return;
}
CIImage *image = [CIImage imageWithCVPixelBuffer:sampleBufferImage];
CFTimeInterval seconds = CMTimeGetSeconds(time);
if (transformFilter != nil) {
image = [transformFilter imageByProcessingImage:image atTime:seconds];
}
if (filterGroup != nil) {
image = [filterGroup imageByProcessingImage:image atTime:seconds];
}
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
[_context render:image toCVPixelBuffer:pixelBuffer];
[recordSession appendVideoPixelBuffer:pixelBuffer atTime:time duration:duration completion:^(BOOL success) {
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
CVPixelBufferRelease(pixelBuffer);
completion(success);
}];
}
}
I debugged the code and I think issue is with
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
any one please help me!
I'm trying to modify the on-device text recognition example provided by Google here to make it work with a live camera feed.
When holding the camera over text (that works with the image example) my console produces the following in a stream before ultimately running out of memory:
2018-05-16 10:48:22.129901+1200 TextRecognition[32138:5593533] An empty result returned from from GMVDetector for VisionTextDetector.
This is my video capture method:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if let textDetector = self.textDetector {
let visionImage = VisionImage(buffer: sampleBuffer)
let metadata = VisionImageMetadata()
metadata.orientation = .rightTop
visionImage.metadata = metadata
textDetector.detect(in: visionImage) { (features, error) in
guard error == nil, let features = features, !features.isEmpty else {
// Error. You should also check the console for error messages.
// ...
return
}
// Recognized and extracted text
print("Detected text has: \(features.count) blocks")
// ...
}
}
}
Is this the right way to do it?
ML Kit has long migrated out of Firebase and became a standalone SDK (migration guide).
The Quick Start sample app in Swift showing how to do text recognition from a live video stream using ML Kit (with CMSampleBuffer) is now available here:
https://github.com/googlesamples/mlkit/tree/master/ios/quickstarts/textrecognition/TextRecognitionExample
The live feed is implemented in the CameraViewController.swift:
https://github.com/googlesamples/mlkit/blob/master/ios/quickstarts/textrecognition/TextRecognitionExample/CameraViewController.swift
ML Kit is still in the process of adding sample code for CMSampleBuffer usage to Firebase Quick Start.
In the meantime, below code works for CMSampleBuffer.
Set up AV Capture (use kCVPixelFormatType_32BGRA for kCVPixelBufferPixelFormatTypeKey):
#property(nonatomic, strong) AVCaptureSession *session;
#property(nonatomic, strong) AVCaptureVideoDataOutput *videoDataOutput;
- (void)setupVideoProcessing {
self.videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
NSDictionary *rgbOutputSettings = #{
(__bridge NSString*)kCVPixelBufferPixelFormatTypeKey : #(kCVPixelFormatType_32BGRA)
};
[self.videoDataOutput setVideoSettings:rgbOutputSettings];
if (![self.session canAddOutput:self.videoDataOutput]) {
[self cleanupVideoProcessing];
NSLog(#"Failed to setup video output");
return;
}
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
[self.videoDataOutput setSampleBufferDelegate:self queue:self.videoDataOutputQueue];
[self.session addOutput:self.videoDataOutput];
}
Consume the CMSampleBuffer and run detection:
- (void)runDetection:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
size_t imageWidth = CVPixelBufferGetWidth(imageBuffer);
size_t imageHeight = CVPixelBufferGetHeight(imageBuffer);
AVCaptureDevicePosition devicePosition = self.isUsingFrontCamera ? AVCaptureDevicePositionFront : AVCaptureDevicePositionBack;
// Calculate the image orientation.
UIDeviceOrientation deviceOrientation = [[UIDevice currentDevice] orientation];
ImageOrientation orientation =
[ImageUtility imageOrientationFromOrientation:deviceOrientation
withCaptureDevicePosition:devicePosition
defaultDeviceOrientation:[self deviceOrientationFromInterfaceOrientation]];
// Invoke text detection.
FIRVisionImage *image = [[FIRVisionImage alloc] initWithBuffer:sampleBuffer];
FIRVisionImageMetadata *metadata = [[FIRVisionImageMetadata alloc] init];
metadata.orientation = orientation;
image.metadata = metadata;
FIRVisionTextDetectionCallback callback =
^(NSArray<id<FIRVisionText>> *_Nullable features, NSError *_Nullable error) {
...
};
[self.textDetector detectInImage:image completion:callback];
}
The helper function of ImageUtility used above to determine the orientation:
+ (FIRVisionDetectorImageOrientation)imageOrientationFromOrientation:(UIDeviceOrientation)deviceOrientation
withCaptureDevicePosition:(AVCaptureDevicePosition)position
defaultDeviceOrientation:(UIDeviceOrientation)defaultOrientation {
if (deviceOrientation == UIDeviceOrientationFaceDown ||
deviceOrientation == UIDeviceOrientationFaceUp ||
deviceOrientation == UIDeviceOrientationUnknown) {
deviceOrientation = defaultOrientation;
}
FIRVisionDetectorImageOrientation orientation = FIRVisionDetectorImageOrientationTopLeft;
switch (deviceOrientation) {
case UIDeviceOrientationPortrait:
if (position == AVCaptureDevicePositionFront) {
orientation = FIRVisionDetectorImageOrientationLeftTop;
} else {
orientation = FIRVisionDetectorImageOrientationRightTop;
}
break;
case UIDeviceOrientationLandscapeLeft:
if (position == AVCaptureDevicePositionFront) {
orientation = FIRVisionDetectorImageOrientationBottomLeft;
} else {
orientation = FIRVisionDetectorImageOrientationTopLeft;
}
break;
case UIDeviceOrientationPortraitUpsideDown:
if (position == AVCaptureDevicePositionFront) {
orientation = FIRVisionDetectorImageOrientationRightBottom;
} else {
orientation = FIRVisionDetectorImageOrientationLeftBottom;
}
break;
case UIDeviceOrientationLandscapeRight:
if (position == AVCaptureDevicePositionFront) {
orientation = FIRVisionDetectorImageOrientationTopRight;
} else {
orientation = FIRVisionDetectorImageOrientationBottomRight;
}
break;
default:
orientation = FIRVisionDetectorImageOrientationTopLeft;
break;
}
return orientation;
}
I have problems with running iPhone X flash in torch mode.
Back AVCaptureDeviceTypeBuiltInTelephotoCamera selected as capture device:
com.apple.avfoundation.avcapturedevice.built-in_video:2' -
AVCaptureDeviceTypeBuiltInTelephotoCamera
After checking touch mode availability with:
[self.captureDevice isTorchModeSupported:AVCaptureTorchModeOn]
I'm trying to switch flash light into torch mode with
[self.captureDevice lockForConfiguration:nil];
BOOL result = [self.captureDevice setTorchModeOnWithLevel:1 error:&error];
[self.captureDevice unlockForConfiguration];
This call is successful. result == YES and error == nil. But flash light blinks once then turns off.
I saw this behaviour on iPhone X myself and there is a reports of the same behaviour from iPhone 8 and iPhone 8 Plus owners. Some users say that this problem appeared after update to iOS 11.1. But I couldn't reproduce it with iPhone 8 myself.
Is there any ideas how to fix or debug this problem?
Full code snippet from my app listed below:
// Retrieve the back camera
if ([AVCaptureDeviceDiscoverySession class]) {
DDLogDebug(#"Search camera with AVCaptureDeviceDiscoverySession");
AVCaptureDevice* camera =
[AVCaptureDeviceDiscoverySession
discoverySessionWithDeviceTypes: #[AVCaptureDeviceTypeBuiltInTelephotoCamera]
mediaType:AVMediaTypeVideo
position:AVCaptureDevicePositionBack].devices.firstObject;
if (!camera) {
camera = [AVCaptureDeviceDiscoverySession
discoverySessionWithDeviceTypes: #[AVCaptureDeviceTypeBuiltInTelephotoCamera]
mediaType:AVMediaTypeVideo
position:AVCaptureDevicePositionBack].devices.firstObject;
}
DDLogDebug(#"Did find %# camera", camera);
self.captureDevice = camera;
} else {
DDLogDebug(#"Haven't found camera device with AVCaptureDeviceDiscoverySession");
}
if (!self.captureDevice) {
DDLogDebug(#"Searching at [AVCaptureDevice devices], where %lu devices available", (unsigned long)AVCaptureDevice.devices.count);
for (AVCaptureDevice *device in [AVCaptureDevice devices]) {
if ([device hasMediaType:AVMediaTypeVideo] && [device hasTorch]) {
self.captureDevice = device;
break;
}
}
}
if (!self.captureDevice) {
NSError* error = [NSError buildError:^(MRErrorBuilder *builder) {
builder.localizedDescription = NSLocalizedString(#"There is no camera devices able to measure heart rate", nil);
builder.domain = kWTCameraHeartRateMonitorError;
builder.code = 27172;
}];
DDLogError(#"%#", error);
self.session = nil;
self.handler(0, 0, error);
return NO;
}
NSError *error;
AVCaptureDeviceInput *input = [[AVCaptureDeviceInput alloc] initWithDevice:self.captureDevice
error:&error];
if (error) {
DDLogError(#"%#", error);
self.session = nil;
self.handler(0, 0, error);
return NO;
}
NSString* deviceType = [self.captureDevice respondsToSelector:#selector(deviceType)] ? self.captureDevice.deviceType : #"Unknown";
DDLogDebug(#"Configurating camera '%#'/'%#' - %# id %# at %ld connected: %#", self.captureDevice.localizedName, self.captureDevice.modelID, deviceType, self.captureDevice.uniqueID, (long)self.captureDevice.position, self.captureDevice.connected?#"YES":#"NO");
self.session = [[AVCaptureSession alloc] init];
NSString* preset = [self.session canSetSessionPreset:AVCaptureSessionPresetLow] ? AVCaptureSessionPresetLow : nil;
if (preset) {
self.session.sessionPreset = preset;
}
[self.session beginConfiguration];
[self.session addInput:input];
// Find the max frame rate we can get from the given device
AVCaptureDeviceFormat *currentFormat;
for (AVCaptureDeviceFormat *format in self.captureDevice.formats)
{
NSArray *ranges = format.videoSupportedFrameRateRanges;
AVFrameRateRange *frameRates = ranges[0];
// Find the lowest resolution format at the frame rate we want.
if (frameRates.maxFrameRate == FRAMES_PER_SECOND && (!currentFormat || (CMVideoFormatDescriptionGetDimensions(format.formatDescription).width < CMVideoFormatDescriptionGetDimensions(currentFormat.formatDescription).width && CMVideoFormatDescriptionGetDimensions(format.formatDescription).height < CMVideoFormatDescriptionGetDimensions(currentFormat.formatDescription).height)))
{
currentFormat = format;
}
}
if (![self.captureDevice isTorchModeSupported:AVCaptureTorchModeOn]) {
NSError* error = [NSError buildError:^(MRErrorBuilder *builder) {
builder.localizedDescription = NSLocalizedString(#"Torch mode is not supported for your camera", nil);
builder.domain = kWTCameraHeartRateMonitorError;
builder.code = 28633;
}];
self.session = nil;
DDLogError(#"%#", error);
self.session = nil;
self.handler(0, 0, error);
return NO;
}
// Tell the device to use the max frame rate.
[self.captureDevice lockForConfiguration:nil];
DDLogVerbose(#"Turn on tourch mode with level 0.5");
self.captureDevice.flashMode = AVCaptureFlashModeOff;
BOOL result = [self.captureDevice setTorchModeOnWithLevel:0.5 error:&error];
if (!result) {
DDLogError(#"%#", error);
self.session = nil;
self.handler(0, 0, error);
return NO;
}
[self.captureDevice setFocusMode:AVCaptureFocusModeLocked];
[self.captureDevice setFocusModeLockedWithLensPosition:1.0
completionHandler:nil];
self.captureDevice.activeFormat = currentFormat;
self.captureDevice.activeVideoMinFrameDuration = CMTimeMake(1, FRAMES_PER_SECOND);
self.captureDevice.activeVideoMaxFrameDuration = CMTimeMake(1, FRAMES_PER_SECOND);
[self.captureDevice unlockForConfiguration];
// Set the output
AVCaptureVideoDataOutput* videoOutput = [AVCaptureVideoDataOutput new];
// create a queue to run the capture on
dispatch_queue_t captureQueue=dispatch_queue_create("catpureQueue", DISPATCH_QUEUE_SERIAL);
// setup our delegate
[videoOutput setSampleBufferDelegate:self queue:captureQueue];
// configure the pixel format
videoOutput.videoSettings = #{(id)kCVPixelBufferPixelFormatTypeKey: #(kCVPixelFormatType_32BGRA)};
videoOutput.alwaysDiscardsLateVideoFrames = NO;
[self.session addOutput:videoOutput];
if (debugPath) {
NSError* error;
[[NSFileManager defaultManager] removeItemAtPath:debugPath
error:nil];
BOOL result =
[[NSFileManager defaultManager] createDirectoryAtPath:debugPath
withIntermediateDirectories:YES
attributes:nil
error:&error];
if (result) {
[self setupDebugRecordAt:debugPath withFormat:currentFormat];
} else {
DDLogError(#"%#", error);
}
const char* path = [debugPath cStringUsingEncoding:NSUTF8StringEncoding];
self.filter->setDebugPath(path);
}
// Start the video session
[self.session commitConfiguration];
self.frameNumber = 0;
[self.assetWriter startWriting];
[self.assetWriter startSessionAtSourceTime:kCMTimeZero];
[self.session startRunning];
Finally, the problem is fixed. I'm not sure about exact reason. Any information related to this issue are appreciated.
This problem exists on iPhone 8, 8+ and iPhone X running iOS 11.1. I've reproduced this behaviour on iPhone 8 after updating form iOS from 11.0 to 11.1.
What I noticed is that torch turns on after calling
BOOL result = [self.captureDevice setTorchModeOnWithLevel:0.5 error:&error];
and turns off after
[self.captureDevice setFocusMode:AVCaptureFocusModeLocked];
or
[self.session commitConfiguration];
So the solution was to perform torch configuration where ALL other session and device configurations are finished and session is started.
My current implementation is:
// Session configuration ...
[self.session startRunning];
if (![self.captureDevice isTorchModeSupported:AVCaptureTorchModeOn]) {
NSError* error = [NSError buildError:^(MRErrorBuilder *builder) {
builder.localizedDescription = NSLocalizedString(#"Torch mode is not supported for your camera", nil);
builder.domain = kWTCameraHeartRateMonitorError;
builder.code = 28633;
}];
DDLogError(#"%#", error);
if (self.session) {
[self.session stopRunning];
}
self.session = nil;
self.handler(0, 0, error);
return NO;
}
[self.captureDevice lockForConfiguration:nil];
self.captureDevice.flashMode = AVCaptureFlashModeOff;
[self.captureDevice setFocusMode:AVCaptureFocusModeLocked];
[self.captureDevice setFocusModeLockedWithLensPosition:1.0
completionHandler:nil];
self.captureDevice.activeFormat = currentFormat;
self.captureDevice.activeVideoMinFrameDuration = CMTimeMake(1, FRAMES_PER_SECOND);
self.captureDevice.activeVideoMaxFrameDuration = CMTimeMake(1, FRAMES_PER_SECOND);
// This call should be placed AFTER all other configurations
BOOL result = [self.captureDevice setTorchModeOnWithLevel:0.5 error:&error];
if (!result) {
DDLogError(#"%#", error);
self.session = nil;
self.handler(0, 0, error);
return NO;
}
[self.captureDevice unlockForConfiguration];
I'm using AVFoundation. I wanna to record video using both (front and Back side) camera. I record video on one side when i change the camera mode back to front, the camera still freeze. Is it possible to record video continuously on both side.
Sample Code:
- (void) startup
{
if (_session == nil)
{
NSLog(#"Starting up server");
self.isCapturing = NO;
self.isPaused = NO;
_currentFile = 0;
_discont = NO;
// create capture device with video input
_session = [[AVCaptureSession alloc] init];
AVCaptureDevice *backCamera = [self frontCamera];
AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:backCamera error:nil];
[_session addInput:input];
// audio input from default mic
AVCaptureDevice* mic = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
AVCaptureDeviceInput* micinput = [AVCaptureDeviceInput deviceInputWithDevice:mic error:nil];
[_session addInput:micinput];
// create an output for YUV output with self as delegate
_captureQueue = dispatch_queue_create("com.softcraftsystems.comss", DISPATCH_QUEUE_SERIAL);
AVCaptureVideoDataOutput* videoout = [[AVCaptureVideoDataOutput alloc] init];
[videoout setSampleBufferDelegate:self queue:_captureQueue];
NSDictionary* setcapSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange], kCVPixelBufferPixelFormatTypeKey,
nil];
videoout.videoSettings = setcapSettings;
[_session addOutput:videoout];
_videoConnection = [videoout connectionWithMediaType:AVMediaTypeVideo];
// find the actual dimensions used so we can set up the encoder to the same.
NSDictionary* actual = videoout.videoSettings;
_cy = [[actual objectForKey:#"Height"] integerValue];
_cx = [[actual objectForKey:#"Width"] integerValue];
AVCaptureAudioDataOutput* audioout = [[AVCaptureAudioDataOutput alloc] init];
[audioout setSampleBufferDelegate:self queue:_captureQueue];
[_session addOutput:audioout];
_audioConnection = [audioout connectionWithMediaType:AVMediaTypeAudio];
// for audio, we want the channels and sample rate, but we can't get those from audioout.audiosettings on ios, so
// we need to wait for the first sample
// start capture and a preview layer
[_session startRunning];
_preview = [AVCaptureVideoPreviewLayer layerWithSession:_session];
_preview.videoGravity = AVLayerVideoGravityResizeAspectFill;
}
}
- (AVCaptureDevice *)frontCamera
{
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
if ([device position] == AVCaptureDevicePositionFront) {
return device;
}
}
return nil;
}
- (AVCaptureDevice *)backCamera
{
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
if ([device position] == AVCaptureDevicePositionBack) {
return device;
}
}
return nil;
}
- (void) startupFront
{
_session = nil;
[_session stopRunning];
if (_session == nil)
{
NSLog(#"Starting up server");
self.isCapturing = NO;
self.isPaused = NO;
_currentFile = 0;
_discont = NO;
// create capture device with video input
_session = [[AVCaptureSession alloc] init];
AVCaptureDevice *backCamera = [self backCamera];
AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:backCamera error:nil];
[_session addInput:input];
// audio input from default mic
AVCaptureDevice* mic = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
AVCaptureDeviceInput* micinput = [AVCaptureDeviceInput deviceInputWithDevice:mic error:nil];
[_session addInput:micinput];
// create an output for YUV output with self as delegate
_captureQueue = dispatch_queue_create("com.softcraftsystems.comss", DISPATCH_QUEUE_SERIAL);
AVCaptureVideoDataOutput* videoout = [[AVCaptureVideoDataOutput alloc] init];
[videoout setSampleBufferDelegate:self queue:_captureQueue];
NSDictionary* setcapSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange], kCVPixelBufferPixelFormatTypeKey,
nil];
videoout.videoSettings = setcapSettings;
[_session addOutput:videoout];
_videoConnection = [videoout connectionWithMediaType:AVMediaTypeVideo];
// find the actual dimensions used so we can set up the encoder to the same.
NSDictionary* actual = videoout.videoSettings;
_cy = [[actual objectForKey:#"Height"] integerValue];
_cx = [[actual objectForKey:#"Width"] integerValue];
AVCaptureAudioDataOutput* audioout = [[AVCaptureAudioDataOutput alloc] init];
[audioout setSampleBufferDelegate:self queue:_captureQueue];
[_session addOutput:audioout];
_audioConnection = [audioout connectionWithMediaType:AVMediaTypeAudio];
// for audio, we want the channels and sample rate, but we can't get those from audioout.audiosettings on ios, so
// we need to wait for the first sample
// start capture and a preview layer
[_session startRunning];
_preview = [AVCaptureVideoPreviewLayer layerWithSession:_session];
_preview.videoGravity = AVLayerVideoGravityResizeAspectFill;
}
}
- (void) startCapture
{
#synchronized(self)
{
if (!self.isCapturing)
{
NSLog(#"starting capture");
// create the encoder once we have the audio params
_encoder = nil;
self.isPaused = NO;
_discont = NO;
_timeOffset = CMTimeMake(0, 0);
self.isCapturing = YES;
}
}
}
- (void) stopCapture
{
#synchronized(self)
{
if (self.isCapturing)
{
NSString* filename = [NSString stringWithFormat:#"capture%d.mp4", _currentFile];
NSString* path = [NSTemporaryDirectory() stringByAppendingPathComponent:filename];
NSURL* url = [NSURL fileURLWithPath:path];
_currentFile++;
// serialize with audio and video capture
self.isCapturing = NO;
dispatch_async(_captureQueue, ^{
[_encoder finishWithCompletionHandler:^{
self.isCapturing = NO;
_encoder = nil;
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
[library writeVideoAtPathToSavedPhotosAlbum:url completionBlock:^(NSURL *assetURL, NSError *error){
NSLog(#"save completed");
[[NSFileManager defaultManager] removeItemAtPath:path error:nil];
}];
}];
});
}
}
}
- (void) pauseCapture
{
#synchronized(self)
{
if (self.isCapturing)
{
NSLog(#"Pausing capture");
self.isPaused = YES;
_discont = YES;
}
}
}
- (void) resumeCapture
{
#synchronized(self)
{
if (self.isPaused)
{
NSLog(#"Resuming capture");
self.isPaused = NO;
}
}
}
- (CMSampleBufferRef) adjustTime:(CMSampleBufferRef) sample by:(CMTime) offset
{
CMItemCount count;
CMSampleBufferGetSampleTimingInfoArray(sample, 0, nil, &count);
CMSampleTimingInfo* pInfo = malloc(sizeof(CMSampleTimingInfo) * count);
CMSampleBufferGetSampleTimingInfoArray(sample, count, pInfo, &count);
for (CMItemCount i = 0; i < count; i++)
{
pInfo[i].decodeTimeStamp = CMTimeSubtract(pInfo[i].decodeTimeStamp, offset);
pInfo[i].presentationTimeStamp = CMTimeSubtract(pInfo[i].presentationTimeStamp, offset);
}
CMSampleBufferRef sout;
CMSampleBufferCreateCopyWithNewTiming(nil, sample, count, pInfo, &sout);
free(pInfo);
return sout;
}
- (void) setAudioFormat:(CMFormatDescriptionRef) fmt
{
const AudioStreamBasicDescription *asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt);
_samplerate = asbd->mSampleRate;
_channels = asbd->mChannelsPerFrame;
}
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
BOOL bVideo = YES;
#synchronized(self)
{
if (!self.isCapturing || self.isPaused)
{
return;
}
if (connection != _videoConnection)
{
bVideo = NO;
}
if ((_encoder == nil) && !bVideo)
{
CMFormatDescriptionRef fmt = CMSampleBufferGetFormatDescription(sampleBuffer);
[self setAudioFormat:fmt];
NSString* filename = [NSString stringWithFormat:#"capture%d.mp4", _currentFile];
NSString* path = [NSTemporaryDirectory() stringByAppendingPathComponent:filename];
_encoder = [VideoEncoder encoderForPath:path Height:_cy width:_cx channels:_channels samples:_samplerate];
}
if (_discont)
{
if (bVideo)
{
return;
}
_discont = NO;
// calc adjustment
CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
CMTime last = bVideo ? _lastVideo : _lastAudio;
if (last.flags & kCMTimeFlags_Valid)
{
if (_timeOffset.flags & kCMTimeFlags_Valid)
{
pts = CMTimeSubtract(pts, _timeOffset);
}
CMTime offset = CMTimeSubtract(pts, last);
NSLog(#"Setting offset from %s", bVideo?"video": "audio");
NSLog(#"Adding %f to %f (pts %f)", ((double)offset.value)/offset.timescale, ((double)_timeOffset.value)/_timeOffset.timescale, ((double)pts.value/pts.timescale));
// this stops us having to set a scale for _timeOffset before we see the first video time
if (_timeOffset.value == 0)
{
_timeOffset = offset;
}
else
{
_timeOffset = CMTimeAdd(_timeOffset, offset);
}
}
_lastVideo.flags = 0;
_lastAudio.flags = 0;
}
// retain so that we can release either this or modified one
CFRetain(sampleBuffer);
if (_timeOffset.value > 0)
{
CFRelease(sampleBuffer);
sampleBuffer = [self adjustTime:sampleBuffer by:_timeOffset];
}
// record most recent time so we know the length of the pause
CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
CMTime dur = CMSampleBufferGetDuration(sampleBuffer);
if (dur.value > 0)
{
pts = CMTimeAdd(pts, dur);
}
if (bVideo)
{
_lastVideo = pts;
}
else
{
_lastAudio = pts;
}
}
// pass frame to encoder
[_encoder encodeFrame:sampleBuffer isVideo:bVideo];
CFRelease(sampleBuffer);
}
- (void) shutdown
{
NSLog(#"shutting down server");
if (_session)
{
[_session stopRunning];
_session = nil;
}
[_encoder finishWithCompletionHandler:^{
NSLog(#"Capture completed");
}];
}
According to me. it is not possible, continue recording when we switch the camera,
because, there resolution and quality difference between them, a video can have only one resolution and quality throughout the video.
and secondly, every time you switch between camera it'll alloc and initialize the camera.
unfortunately its not possible according to me.
but if you find solution, do tell me please.
I am trying to write an app that involves both front and rear camera and switching between them. As far as I understand, in the addVideoInput method, I have to change the IDs in
AVCaptureDevice *videoDevice = [AVCaptureDevice deviceWithUniqueID:(NSString *)deviceUniqueID];
But which NSStrings are those IDs?
Or, if it should be done in the other way, please, give a suggestion.
Thank you for help!
Ok, I have managed to find out a solution. I don't know if it's right or wrong, it was taken from http://www.bunnyhero.org/2010/08/15/turn-your-iphone-into-a-vampire-with-avfoundation-and-ios-4/
Just use
AVCaptureDevice *captureDevice = [self frontFacingCameraIfAvailable];
where frontFacingCameraIfAvailable is:
-(AVCaptureDevice *)frontFacingCameraIfAvailable
{
NSArray *videoDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
AVCaptureDevice *captureDevice = nil;
for (AVCaptureDevice *device in videoDevices)
{
if (device.position == AVCaptureDevicePositionFront)
{
captureDevice = device;
break;
}
}
// couldn't find one on the front, so just get the default video device.
if ( ! captureDevice)
{
captureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
}
return captureDevice;
}
You can usually get frontal camera using
AVCaptureDevice *frontalCamera = [AVCaptureDevice deviceWithUniqueID:#"com.apple.avfoundation.avcapturedevice.built-in_video:1"];
But I would by all means rather use your accepted method - this one is not safe at all.
I was facing issue scanning QR Code with Front Camera. I looked out for so many resources and libraries in order to do so. Library was not fulfilling my requirement as I needed customised UI for scanner. And the piece of code on the internet to scan QR Code was also deprecated. So by debugging and knowing the device type I applied position of the camera and it worked. I am posting this as an answer so that it will help another peer like me looking for the answer.
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view.
[self setupScanner];
[self openScanner:nil];
}
#pragma mark- Actions
- (IBAction)openScanner:(id)sender {
if([UIImagePickerController isCameraDeviceAvailable:UIImagePickerControllerCameraDeviceFront]){
[self.session startRunning];
}
}
- (IBAction)stopScanner:(id)sender {
[self.session stopRunning];
}
- (void)setupScanner {
#if !(TARGET_OS_SIMULATOR)
//self.device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
self.device = [self frontFacingCameraIfAvailable];
self.input = [AVCaptureDeviceInput deviceInputWithDevice:self.device error:nil];
self.session = [[AVCaptureSession alloc] init];
self.output = [[AVCaptureMetadataOutput alloc] init];
if([self.session canAddOutput:self.output]) {
[self.session addOutput:self.output];
}
if ([self.session canAddInput:self.input]){
[self.session addInput:self.input];
}
[self.output setMetadataObjectsDelegate:self queue:dispatch_get_main_queue()];
[self.output setMetadataObjectTypes:#[AVMetadataObjectTypeQRCode]];
self.preview = [AVCaptureVideoPreviewLayer layerWithSession:self.session];
self.preview.videoGravity = AVLayerVideoGravityResizeAspectFill;
self.preview.frame = CGRectMake(0, 0, CGRectGetWidth(self.pLayer.frame), CGRectGetHeight(self.pLayer.frame));
AVCaptureConnection *con = self.preview.connection;
con.videoOrientation = AVCaptureVideoOrientationLandscapeRight;
//pLayer is a UIView outlet on which the scanner fits or occupies its area to scan QR Code
[self.pLayer.layer insertSublayer:self.preview atIndex:0];
#endif
}
#pragma mark - AVCaptureMetadataOutputObjectsDelegate
- (void)captureOutput:(AVCaptureOutput *)output didOutputMetadataObjects:(NSArray<__kindof AVMetadataObject *> *)metadataObjects fromConnection:(AVCaptureConnection *)connection {
CGRect highlightViewRect = CGRectZero;
AVMetadataMachineReadableCodeObject *barCodeObject;
NSString *detectionString = nil;
NSArray *barCodeTypes = #[AVMetadataObjectTypeUPCECode, AVMetadataObjectTypeCode39Code, AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeEAN13Code, AVMetadataObjectTypeEAN8Code, AVMetadataObjectTypeCode93Code, AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypePDF417Code, AVMetadataObjectTypeQRCode, AVMetadataObjectTypeAztecCode];
for (AVMetadataObject *metadata in metadataObjects) {
for (NSString *type in barCodeTypes) {
if ([metadata.type isEqualToString:type])
{
barCodeObject = (AVMetadataMachineReadableCodeObject *)[self.preview transformedMetadataObjectForMetadataObject:(AVMetadataMachineReadableCodeObject *)metadata];
highlightViewRect = barCodeObject.bounds;
detectionString = [(AVMetadataMachineReadableCodeObject *)metadata stringValue];
break;
}
}
if (detectionString != nil) {
self.codeLabel.text = detectionString;
[self stopScanner:nil];
//Do your work with QR Code String ---
break;
}
else
self.codeLabel.text = #"CODE";
}
}
#pragma mark- Capture Device
-(AVCaptureDevice *)frontFacingCameraIfAvailable {
AVCaptureDevice *captureDevice = [AVCaptureDevice defaultDeviceWithDeviceType:AVCaptureDeviceTypeBuiltInWideAngleCamera mediaType:AVMediaTypeVideo position:AVCaptureDevicePositionFront];
NSLog(#"capture device %#",captureDevice.description);
NSLog(#"device type %#",captureDevice.deviceType);
NSLog(#"unique Id: %#",captureDevice.uniqueID);
//com.apple.avfoundation.avcapturedevice.built-in_video:1
//Device Position: 2
NSLog(#"frontFacingCameraIfAvailable-> Device Position: %ld",(long)captureDevice.position);
return captureDevice;
}
-(AVCaptureDevice *)backFacingCameraIfAvailable {
AVCaptureDevice *captureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
NSLog(#"capture device %#",captureDevice.description);
NSLog(#"device type %#",captureDevice.deviceType);
NSLog(#"unique Id: %#",captureDevice.uniqueID);
NSLog(#"backFacingCameraIfAvailable-> Device Position: %ld",(long)captureDevice.position);
return captureDevice;
}