I am currently working on an iOS app which merges desired number of videos. Once the user taps the button to merge the videos, the videos are joined and then played using AVPlayer as:
CMTime nextClipStartTime = kCMTimeZero;
NSInteger i;
CMTime transitionDuration = CMTimeMake(1, 1); // Default transition duration is one second.
// Add two video tracks and two audio tracks.
AVMutableCompositionTrack *compositionVideoTracks[2];
AVMutableCompositionTrack *compositionAudioTracks[2];
compositionVideoTracks[0] = [self.mixComposition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];
compositionVideoTracks[1] = [self.mixComposition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];
compositionAudioTracks[0] = [self.mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio preferredTrackID:kCMPersistentTrackID_Invalid];
compositionAudioTracks[1] = [self.mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio preferredTrackID:kCMPersistentTrackID_Invalid];
CMTimeRange *passThroughTimeRanges = alloca(sizeof(CMTimeRange) * [self.selectedAssets count]);
CMTimeRange *transitionTimeRanges = alloca(sizeof(CMTimeRange) * [self.selectedAssets count]);
// Place clips into alternating video & audio tracks in composition, overlapped by transitionDuration.
for (i = 0; i < [self.selectedAssets count]; i++ )
{
NSInteger alternatingIndex = i % 2; // alternating targets: 0, 1, 0, 1, ...
AVURLAsset *asset = [self.selectedAssets objectAtIndex:i];
NSLog(#"number of tracks %d",asset.tracks.count);
CMTimeRange assetTimeRange;
assetTimeRange.start = kCMTimeZero;
assetTimeRange.duration = asset.duration;
NSValue *clipTimeRange = [NSValue valueWithCMTimeRange:assetTimeRange];
CMTimeRange timeRangeInAsset;
if (clipTimeRange)
timeRangeInAsset = [clipTimeRange CMTimeRangeValue];
else
timeRangeInAsset = CMTimeRangeMake(kCMTimeZero, [asset duration]);
AVAssetTrack *clipVideoTrack = [[asset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
[compositionVideoTracks[alternatingIndex] insertTimeRange:timeRangeInAsset ofTrack:clipVideoTrack atTime:nextClipStartTime error:nil];
AVAssetTrack *clipAudioTrack = [[asset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0];
[compositionAudioTracks[alternatingIndex] insertTimeRange:timeRangeInAsset ofTrack:clipAudioTrack atTime:nextClipStartTime error:nil];
// Remember the time range in which this clip should pass through.
// Every clip after the first begins with a transition.
// Every clip before the last ends with a transition.
// Exclude those transitions from the pass through time ranges.
passThroughTimeRanges[i] = CMTimeRangeMake(nextClipStartTime, timeRangeInAsset.duration);
if (i > 0) {
passThroughTimeRanges[i].start = CMTimeAdd(passThroughTimeRanges[i].start, transitionDuration);
passThroughTimeRanges[i].duration = CMTimeSubtract(passThroughTimeRanges[i].duration, transitionDuration);
}
if (i+1 < [self.selectedAssets count]) {
passThroughTimeRanges[i].duration = CMTimeSubtract(passThroughTimeRanges[i].duration, transitionDuration);
}
// The end of this clip will overlap the start of the next by transitionDuration.
// (Note: this arithmetic falls apart if timeRangeInAsset.duration < 2 * transitionDuration.)
nextClipStartTime = CMTimeAdd(nextClipStartTime, timeRangeInAsset.duration);
nextClipStartTime = CMTimeSubtract(nextClipStartTime, transitionDuration);
// Remember the time range for the transition to the next item.
transitionTimeRanges[i] = CMTimeRangeMake(nextClipStartTime, transitionDuration);
}
// Set up the video composition if we are to perform crossfade or push transitions between clips.
NSMutableArray *instructions = [NSMutableArray array];
// Cycle between "pass through A", "transition from A to B", "pass through B", "transition from B to A".
for (i = 0; i < [self.selectedAssets count]; i++ )
{
NSInteger alternatingIndex = i % 2; // alternating targets
// Pass through clip i.
AVMutableVideoCompositionInstruction *passThroughInstruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
passThroughInstruction.timeRange = passThroughTimeRanges[i];
AVMutableVideoCompositionLayerInstruction *passThroughLayer = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:compositionVideoTracks[alternatingIndex]];
passThroughInstruction.layerInstructions = [NSArray arrayWithObject:passThroughLayer];
[instructions addObject:passThroughInstruction];
AVMutableVideoCompositionLayerInstruction *fromLayer;
AVMutableVideoCompositionLayerInstruction *toLayer;
if (i+1 < [self.selectedAssets count])
{
// Add transition from clip i to clip i+1.
AVMutableVideoCompositionInstruction *transitionInstruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
transitionInstruction.timeRange = transitionTimeRanges[i];
fromLayer = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:compositionVideoTracks[alternatingIndex]];
toLayer = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:compositionVideoTracks[1-alternatingIndex]];
// Fade out the fromLayer by setting a ramp from 1.0 to 0.0.
[fromLayer setOpacityRampFromStartOpacity:1.0 toEndOpacity:0.0 timeRange:transitionTimeRanges[i]];
transitionInstruction.layerInstructions = [NSArray arrayWithObjects:fromLayer, toLayer, nil];
[instructions addObject:transitionInstruction];
}
AVURLAsset *sourceAsset = [AVURLAsset URLAssetWithURL:[self.selectedItemsURL objectAtIndex:i] options:[NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES] forKey:AVURLAssetPreferPreciseDurationAndTimingKey]];
AVAssetTrack *sourceVideoTrack = [[sourceAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
CGSize temp = CGSizeApplyAffineTransform(sourceVideoTrack.naturalSize, sourceVideoTrack.preferredTransform);
CGSize size = CGSizeMake(fabsf(temp.width), fabsf(temp.height));
CGAffineTransform transform = sourceVideoTrack.preferredTransform;
self.videoComposition.renderSize = sourceVideoTrack.naturalSize;
if (size.width > size.height) {
[fromLayer setTransform:transform atTime:sourceAsset.duration];
} else {
float s = size.width/size.height;
CGAffineTransform new = CGAffineTransformConcat(transform, CGAffineTransformMakeScale(s,s));
float x = (size.height - size.width*s)/2;
CGAffineTransform newer = CGAffineTransformConcat(new, CGAffineTransformMakeTranslation(x, 0));
[fromLayer setTransform:newer atTime:sourceAsset.duration];
}
}
self.videoComposition.instructions = instructions;
self.videoComposition.frameDuration = CMTimeMake(1, 30);
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *myPathDocs = [documentsDirectory stringByAppendingPathComponent:[NSString stringWithFormat:#"mergeVideo-%d.mov",arc4random() % 1000]];
NSURL *url = [NSURL fileURLWithPath:myPathDocs];
self.exporter = [[AVAssetExportSession alloc] initWithAsset:self.mixComposition presetName:AVAssetExportPresetMediumQuality];
self.exporter.outputURL=url;
self.exporter.outputFileType = AVFileTypeQuickTimeMovie;
self.exporter.videoComposition = self.videoComposition;
self.exporter.shouldOptimizeForNetworkUse = YES;
self.playerItem = [AVPlayerItem playerItemWithAsset:self.mixComposition];
self.playerItem.videoComposition = self.videoComposition;
AVPlayer *player = [AVPlayer playerWithPlayerItem:self.playerItem];
AVPlayerLayer *playerLayer = [AVPlayerLayer playerLayerWithPlayer:player];
[playerLayer setFrame:CGRectMake(0, 0, self.imageView.frame.size.width, self.imageView.frame.size.height)];
[[[self imageView] layer] addSublayer:playerLayer];
playerLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[player play];
[[NSNotificationCenter defaultCenter]
addObserver:self selector:#selector(checkPlayEnded) name:AVPlayerItemDidPlayToEndTimeNotification object:self.playerItem];
I am currently facing the following issues:
If one video is in portrait, and other is in landscape, how i will be able to rotate the portrait video in landscape as my view is in landscape orientation but the portrait video retain its original ?
(i am loading videos stored in the camera roll, not recording them inside my app)
Neglecting the above mentioned issue, if i merges any number of videos, they work fine. Once i save that new video in my library, and then load it in my app again and try to join that video with some other new video, the resolution got disturbed although both videos if played separately in the app, works really fine. How can i solve that?
(I have tried to follow the WWDC 2010 video editing tutorial, so this code is extracted from there.)
You can check the orientation of the video run time in above code while you create object for AVMutableVideoCompositionInstruction.
The code that is to be appended to the code to fix issue is....
AVMutableVideoCompositionInstruction *instruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, [mutableComposition duration]);
AVAssetTrack *videoTrack = [[mutableComposition tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
AVMutableVideoCompositionLayerInstruction * layerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:videoTrack];
UIImageOrientation videoAssetOrientation_ = UIImageOrientationUp;
BOOL isVideoAssetPortrait_ = NO;
CGAffineTransform videoTransform = assetVideoTrack.preferredTransform;
if(videoTransform.a == 0 && videoTransform.b == 1.0 && videoTransform.c == -1.0 && videoTransform.d == 0)
{
videoAssetOrientation_= UIImageOrientationRight;
isVideoAssetPortrait_ = YES;
}
if(videoTransform.a == 0 && videoTransform.b == -1.0 && videoTransform.c == 1.0 && videoTransform.d == 0)
{
videoAssetOrientation_ = UIImageOrientationLeft;
isVideoAssetPortrait_ = YES;
}
CGFloat FirstAssetScaleToFitRatio = 320.0 / assetVideoTrack.naturalSize.width;
if(isVideoAssetPortrait_)
{
videoSize=CGSizeMake(350,400);
FirstAssetScaleToFitRatio = 320.0/assetVideoTrack.naturalSize.height;
CGAffineTransform FirstAssetScaleFactor = CGAffineTransformMakeScale(FirstAssetScaleToFitRatio,FirstAssetScaleToFitRatio);
[layerInstruction setTransform:CGAffineTransformConcat(assetVideoTrack.preferredTransform, FirstAssetScaleFactor) atTime:kCMTimeZero];
}
else
{
videoSize=CGSizeMake(assetVideoTrack.naturalSize.width,assetVideoTrack.naturalSize.height);
}
The above code will keep the landscape video in landscape and prevent video being convert from portrait to landscape.
I hope this will help.Instead of first converting into proper orientation and then applying editing.If you append this code your one step will be reduced and can do both things(i.e editing & orientation) in one code and also in much quicker way.
Related
In my app, I need to edit the video which recorded by AVFoundation.
The users could be add multiple images to the video, and they may drag images to anywhere of the full video screen.
Editing view:
Editing view and Exported View
and here the code:
AVMutableVideoCompositionInstruction *instruction = nil;
AVMutableVideoCompositionLayerInstruction *layerInstruction = nil;
AVAssetTrack *assetVideoTrack = nil;
AVAssetTrack *assetAudioTrack = nil;
// Check if the asset contains video and audio tracks
if ([[self.inputAsset tracksWithMediaType:AVMediaTypeVideo] count] != 0) {
assetVideoTrack = [self.inputAsset tracksWithMediaType:AVMediaTypeVideo][0];
}
if ([[self.inputAsset tracksWithMediaType:AVMediaTypeAudio] count] != 0) {
assetAudioTrack = [self.inputAsset tracksWithMediaType:AVMediaTypeAudio][0];
}
CMTime insertionPoint = kCMTimeZero;
NSError *error = nil;
// Check if a composition already exists, else create a composition using the input asset
self.composition = [AVMutableComposition composition];
// Insert the video and audio tracks from AVAsset
if (assetVideoTrack != nil) {
AVMutableCompositionTrack *compositionVideoTrack = [self.composition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];
[compositionVideoTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, [self.inputAsset duration]) ofTrack:assetVideoTrack atTime:insertionPoint error:&error];
}
if (assetAudioTrack != nil) {
AVMutableCompositionTrack *compositionAudioTrack = [self.composition addMutableTrackWithMediaType:AVMediaTypeAudio preferredTrackID:kCMPersistentTrackID_Invalid];
[compositionAudioTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, [self.inputAsset duration]) ofTrack:assetAudioTrack atTime:insertionPoint error:&error];
}
CGAffineTransform t1 = CGAffineTransformMakeTranslation(assetVideoTrack.naturalSize.height,0);
// Rotate transformation
CGAffineTransform t2 = CGAffineTransformRotate(t1, ( ( 90 ) / 180.0 * M_PI ));
// Create a new video composition
self.videoComposition = [AVMutableVideoComposition videoComposition];
//self.videoComposition.renderSize = CGSizeMake([UIScreen mainScreen].bounds.size.width, [UIScreen mainScreen].bounds.size.height);
self.videoComposition.renderSize = CGSizeMake(assetVideoTrack.naturalSize.height, assetVideoTrack.naturalSize.width);
self.videoComposition.frameDuration = CMTimeMake(1, 30);
// The rotate transform is set on a layer instruction
instruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, [self.composition duration]);
layerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:(self.composition.tracks)[0]];
[layerInstruction setTransform:t2 atTime:kCMTimeZero];
instruction.layerInstructions = #[layerInstruction];
self.videoComposition.instructions = #[instruction];
if I change
self.videoComposition.renderSize = CGSizeMake(assetVideoTrack.naturalSize.height, assetVideoTrack.naturalSize.width);
to
self.videoComposition.renderSize = CGSizeMake([UIScreen mainScreen].bounds.size.width, [UIScreen mainScreen].bounds.size.height);
In addition, I use this code to add image layer's to video:
- (void)exportWillBegin
{
//CALayer *exportWatermarkLayer = [self copyWatermarkLayer:self.watermarkLayer];
CALayer *parentLayer = [CALayer layer];
CALayer *videoLayer = [CALayer layer];
parentLayer.frame = CGRectMake(0, 0, self.videoComposition.renderSize.width, self.videoComposition.renderSize.height);
videoLayer.frame = CGRectMake(0, 0, self.videoComposition.renderSize.width, self.videoComposition.renderSize.height);
[parentLayer addSublayer:videoLayer];
//exportWatermarkLayer.position = CGPointMake(self.videoComposition.renderSize.width/2, self.videoComposition.renderSize.height/4);
//[parentLayer addSublayer:exportWatermarkLayer];
for (NSInteger i = 0; i < self.stickerViews.count; i++) {
WTStickerView *sticker = [self.stickerViews objectAtIndex:i];
CALayer *layer = sticker.contentView.layer;
// CGAffineTransform transform = CGAffineTransformIdentity;
layer.affineTransform = sticker.transform;
layer.position = sticker.center;
[parentLayer addSublayer:layer];
}
self.videoComposition.animationTool = [AVVideoCompositionCoreAnimationTool videoCompositionCoreAnimationToolWithPostProcessingAsVideoLayer:videoLayer inLayer:parentLayer];
}
can anyone help me to fix this? The image layer not in the correct position where I drag to.
I'm trying to loop some fragments of a recorded video and merge them into one video.
I've successfully merged and exported a composition with up to 16 tracks. But when I try to play the composition using AVPlayer before merging, I can only export a maximum of 8 tracks.
First, I create AVComposition and AVVideoComposition
+(void)previewUserClipDanceWithAudio:(NSURL*)videoURL audioURL:(NSURL*)audioFile loop:(NSArray*)loopTime slowMotion:(NSArray*)slowFactor showInViewController:(UIViewController*)viewController completion:(void(^)(BOOL success, AVVideoComposition* videoComposition, AVComposition* composition))completion{
AVMutableComposition* mixComposition = [[AVMutableComposition alloc] init];
NSMutableArray *arrayInstruction = [[NSMutableArray alloc] init];
AVMutableVideoCompositionInstruction *videoCompositionInstruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
AVURLAsset *audioAsset = [[AVURLAsset alloc]initWithURL:audioFile options:nil];
//NSLog(#"audio File %#",audioFile);
CMTime duration = kCMTimeZero;
AVAsset *currentAsset = [AVAsset assetWithURL:videoURL];
BOOL isCurrentAssetPortrait = YES;
for(NSInteger i=0;i< [loopTime count]; i++) {
//handle looptime array
NSInteger loopDur = [[loopTime objectAtIndex:i] intValue];
NSInteger value = labs(loopDur);
//NSLog(#"loopInfo %d value %d",loopInfo,value);
//handle slowmotion array
double slowInfo = [[slowFactor objectAtIndex:i] doubleValue];
double videoScaleFactor = fabs(slowInfo);
AVMutableCompositionTrack *currentTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];
AVMutableCompositionTrack *audioTrack;
audioTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio
preferredTrackID:kCMPersistentTrackID_Invalid];
if (i==0) {
[currentTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, currentAsset.duration) ofTrack:[[currentAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] atTime:duration error:nil];
[audioTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, currentAsset.duration) ofTrack:[[currentAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0] atTime:duration error:nil];
} else {
[currentTrack insertTimeRange:CMTimeRangeMake(CMTimeSubtract(currentAsset.duration, CMTimeMake(value, 10)), CMTimeMake(value, 10)) ofTrack:[[currentAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] atTime:duration error:nil];
if (videoScaleFactor==1) {
[audioTrack insertTimeRange:CMTimeRangeMake(CMTimeSubtract(currentAsset.duration, CMTimeMake(value, 10)), CMTimeMake(value, 10)) ofTrack:[[currentAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0] atTime:duration error:nil];
}
//slow motion here
if (videoScaleFactor!=1) {
[currentTrack scaleTimeRange:CMTimeRangeMake(CMTimeSubtract(currentAsset.duration, CMTimeMake(value, 10)), CMTimeMake(value, 10))
toDuration:CMTimeMake(value*videoScaleFactor, 10)];
NSLog(#"slowmo %f",value*videoScaleFactor);
}
}
AVMutableVideoCompositionLayerInstruction *currentAssetLayerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:currentTrack];
AVAssetTrack *currentAssetTrack = [[currentAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
BOOL isCurrentAssetPortrait = YES;
//CGFloat assetScaleToFitRatio;
//assetScaleToFitRatio = [self getScaleToFitRatioCurrentTrack:currentTrack];
if(isCurrentAssetPortrait){
//NSLog(#"portrait");
if (slowInfo<0) {
CGRect screenRect = [[UIScreen mainScreen] bounds];
CGFloat ratio = screenRect.size.height / screenRect.size.width;
// we have to adjust the ratio for 16:9 screens
if (ratio == 1.775) ratio = 1.77777777777778;
CGFloat complimentSize = (currentAssetTrack.naturalSize.height*ratio);
CGFloat tx = (currentAssetTrack.naturalSize.width-complimentSize)/2;
// invert translation because of portrait
tx *= -1;
// t1: rotate and position video since it may have been cropped to screen ratio
CGAffineTransform t1 = CGAffineTransformTranslate(currentAssetTrack.preferredTransform, tx, 0);
// t2/t3: mirror video vertically
CGAffineTransform t2 = CGAffineTransformTranslate(t1, currentAssetTrack.naturalSize.width, 0);
CGAffineTransform t3 = CGAffineTransformScale(t2, -1, 1);
[currentAssetLayerInstruction setTransform:t3 atTime:duration];
} else if (loopDur<0) {
CGRect screenRect = [[UIScreen mainScreen] bounds];
CGFloat ratio = screenRect.size.height / screenRect.size.width;
// we have to adjust the ratio for 16:9 screens
if (ratio == 1.775) ratio = 1.77777777777778;
CGFloat complimentSize = (currentAssetTrack.naturalSize.height*ratio);
CGFloat tx = (currentAssetTrack.naturalSize.width-complimentSize)/2;
// invert translation because of portrait
tx *= -1;
// t1: rotate and position video since it may have been cropped to screen ratio
CGAffineTransform t1 = CGAffineTransformTranslate(currentAssetTrack.preferredTransform, tx, 0);
// t2/t3: mirror video horizontally
CGAffineTransform t2 = CGAffineTransformTranslate(t1, 0, currentAssetTrack.naturalSize.height);
CGAffineTransform t3 = CGAffineTransformScale(t2, 1, -1);
[currentAssetLayerInstruction setTransform:t3 atTime:duration];
} else {
[currentAssetLayerInstruction setTransform:currentAssetTrack.preferredTransform atTime:duration];
}
}else{
// CGFloat translateAxisX = (currentTrack.naturalSize.width > MAX_WIDTH )?(0.0):0.0;// if use <, 640 video will be moved left by 10px. (float)(MAX_WIDTH - currentTrack.naturalSize.width)/(float)4.0
// CGAffineTransform FirstAssetScaleFactor = CGAffineTransformMakeScale(assetScaleToFitRatio,assetScaleToFitRatio);
// [currentAssetLayerInstruction setTransform:
// CGAffineTransformConcat(CGAffineTransformConcat(currentAssetTrack.preferredTransform, FirstAssetScaleFactor),CGAffineTransformMakeTranslation(translateAxisX, 0)) atTime:duration];
}
if (i==0) {
duration=CMTimeAdd(duration, currentAsset.duration);
} else {
if (videoScaleFactor!=1) {
duration=CMTimeAdd(duration, CMTimeMake(value*videoScaleFactor, 10));
} else {
duration=CMTimeAdd(duration, CMTimeMake(value, 10));
}
}
[currentAssetLayerInstruction setOpacity:0.0 atTime:duration];
[arrayInstruction addObject:currentAssetLayerInstruction];
}
AVMutableCompositionTrack *AudioBGTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio preferredTrackID:kCMPersistentTrackID_Invalid];
[AudioBGTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, audioAsset.duration) ofTrack:[[audioAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0] atTime:CMTimeSubtract(duration, audioAsset.duration) error:nil];
videoCompositionInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, duration);
videoCompositionInstruction.layerInstructions = arrayInstruction;
CGSize naturalSize;
if(isCurrentAssetPortrait){
naturalSize = CGSizeMake(MAX_HEIGHT,MAX_WIDTH);//currentAssetTrack.naturalSize.height,currentAssetTrack.naturalSize.width);
} else {
naturalSize = CGSizeMake(MAX_WIDTH,MAX_HEIGHT);//currentAssetTrack.naturalSize;
}
AVMutableVideoComposition *videoComposition = [AVMutableVideoComposition videoComposition];
videoComposition.instructions = [NSArray arrayWithObject:videoCompositionInstruction];
videoComposition.frameDuration = CMTimeMake(1, 30);
videoComposition.renderSize = CGSizeMake(naturalSize.width,naturalSize.height);
NSLog(#"prepared");
AVVideoComposition *composition = [videoComposition copy];
AVComposition *mixedComposition = [mixComposition copy];
completion(YES, composition, mixedComposition);
}
Then, I set the AVPlayer
-(void)playVideoWithComposition:(AVVideoComposition*)videoComposition inMutableComposition:(AVComposition*)composition{
MBProgressHUD *hud = [MBProgressHUD showHUDAddedTo:self.view animated:YES];
hud.label.text = myLanguage(#"kMergeClip");
savedComposition = [composition copy];
savedVideoComposition = [videoComposition copy];
playerItem = [AVPlayerItem playerItemWithAsset:composition];
playerItem.videoComposition = videoComposition;
[[NSNotificationCenter defaultCenter] addObserver:self selector:#selector(repeatVideo:) name:AVPlayerItemDidPlayToEndTimeNotification object:playerItem];
if (!player) {
player = [AVPlayer playerWithPlayerItem:playerItem];
layer = [AVPlayerLayer playerLayerWithPlayer:player];
layer.frame = [UIScreen mainScreen].bounds;
[self.ibPlayerView.layer insertSublayer:layer atIndex:0];
NSLog(#"create new player");
}
if (player.currentItem != playerItem ) {
[player replaceCurrentItemWithPlayerItem:playerItem];
}
player.actionAtItemEnd = AVPlayerActionAtItemEndNone;
//[player seekToTime:kCMTimeZero];
[playerItem addObserver:self
forKeyPath:#"status"
options:NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew
context:#"AVPlayerStatus"];
}
When user previews all the video they want and hit save. I use this method to export
+(void)mergeUserCLip:(AVVideoComposition*)videoComposition withAsset:(AVComposition*)mixComposition showInViewController:(UIViewController*)viewController completion:(void(^)(BOOL success, NSURL *fileURL))completion{
MBProgressHUD *hud = [MBProgressHUD showHUDAddedTo:viewController.view animated:YES];
hud.mode = MBProgressHUDModeDeterminateHorizontalBar;
hud.label.text = myLanguage(#"kMergeClip");
//Name merge clip using beat name
//NSString* beatName = [[[NSString stringWithFormat:#"%#",audioFile] lastPathComponent] stringByDeletingPathExtension];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *tmpDir = [[documentsDirectory stringByDeletingLastPathComponent] stringByAppendingPathComponent:#"tmp"];
NSString *myPathDocs = [tmpDir stringByAppendingPathComponent:[NSString stringWithFormat:#"merge-beat.mp4"]];
//Not remove here, will remove when call previewPlayVC
[[NSFileManager defaultManager] removeItemAtPath:myPathDocs error:nil];
// 1 - set up the overlay
CALayer *overlayLayer = [CALayer layer];
UIImage *overlayImage = [UIImage imageNamed:#"watermark.png"];
[overlayLayer setContents:(id)[overlayImage CGImage]];
overlayLayer.frame = CGRectMake(720-221, 1280-109, 181, 69);
[overlayLayer setMasksToBounds:YES];
// aLayer = [CALayer layer];
// [aLayer addSublayer:labelLogo.layer];
// aLayer.frame = CGRectMake(MAX_WIDTH- labelLogo.width - 10.0, MAX_HEIGHT-50.0, 20.0, 20.0);
// aLayer.opacity = 1;
// 2 - set up the parent layer
CALayer *parentLayer = [CALayer layer];
CALayer *videoLayer = [CALayer layer];
parentLayer.frame = CGRectMake(0, 0, MAX_HEIGHT,MAX_WIDTH);
videoLayer.frame = CGRectMake(0, 0, MAX_HEIGHT,MAX_WIDTH);
[parentLayer addSublayer:videoLayer];
[parentLayer addSublayer:overlayLayer];
// 3 - apply magic
AVMutableVideoComposition *mutableVideoComposition = [videoComposition copy];
mutableVideoComposition.animationTool = [AVVideoCompositionCoreAnimationTool
videoCompositionCoreAnimationToolWithPostProcessingAsVideoLayer:videoLayer inLayer:parentLayer];
NSURL *url = [NSURL fileURLWithPath:myPathDocs];
myLog(#"Path: %#", myPathDocs);
AVAssetExportSession *exporter = [[AVAssetExportSession alloc] initWithAsset:mixComposition presetName:AVAssetExportPreset1280x720];
exporter.outputURL = url;
exporter.outputFileType = AVFileTypeMPEG4;
exporter.videoComposition = mutableVideoComposition;
exporter.shouldOptimizeForNetworkUse = NO;
[exporter exportAsynchronouslyWithCompletionHandler:^ {
//NSLog(#"exporting");
switch (exporter.status) {
case AVAssetExportSessionStatusCompleted: {
NSURL *url = [NSURL fileURLWithPath:myPathDocs];
hud.progress = 1.0f;
dispatch_async(dispatch_get_main_queue(), ^{
[MBProgressHUD hideHUDForView:viewController.view animated:YES];
});
[self checkTmpSize];
if (completion) {
completion(YES, url);
}
}
break;
case AVAssetExportSessionStatusExporting:
myLog(#"Exporting!");
break;
case AVAssetExportSessionStatusWaiting:
myLog(#"Waiting");
break;
default:
break;
}
}];
}
If select options to loop less than 8 times, the above code works fine.
If select options more than 8 times, export session freezes showing export.progress = 0.0000000
If I remove this line
playerItem.videoComposition = videoComposition;
Then I cannot preview the mixed video but enable to export normally (up to 16 tracks).
Or If I remove the line in export code:
exporter.videoComposition = mutableVideoComposition;
Then it's possible to preview the mixed video, and export normally WITHOUT video composition.
So I guess there's something wrong with AVVideoComposition and/or the way I implement it.
I would appreciate any suggestion.
Many thanks.
I highly doubt the reason for this is using AVPlayer to preview video somehow hinders AVAssetExportSession as described in below posts:
iOS 5: Error merging 3 videos with AVAssetExportSession
AVPlayerItem fails with AVStatusFailed and error code “Cannot Decode”
I ran into this issue while attempting to concatenate N videos while playing up to 3 videos I AVPlayer instances in an UICollectionView. It's been discussed in the Stack Overflow question you linked that iOS is capable of only handling so many instances of AVPlayer. Each instance uses up a "render pipeline". I discovered that each instance of AVMutableCompositionTrack also uses up one of these render pipelines.
Therefore if you use too many AVPlayer instances or try to create an AVMutableComposition with too many AVMutableCompositionTrack tracks, you can run out of resources to decode H264 and you will receive the "Cannot Decode" error. I was able to get around the issue by only using two instances of AVMutableCompositionTrack. This way I could "overlap" segments of video while also applying transitions (which requires two video tracks to "play" concurrently).
In short: minimize your usage of AVMutableCompositionTrack as well as AVPlayer. You can check out the AVCustomEdit sample code by Apple for an example of this. Specifically, check out the buildTransitionComposition method inside the APLSimpleEditor class.
try this, clear player item before exporting
[self.player replaceCurrentItemWithPlayerItem:nil];
I need to accomplish this function: There is a GIF overlay on a video, hoping to composition this video and GIF to a new video. I'm using the following code, but result is only the video without GIF:
- (void)mixVideoAsset:(AVAsset *)videoAsset {
LLog(#"Begining");
NSDate * begin = [NSDate date];
// 2 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
AVMutableComposition *mixComposition = [[AVMutableComposition alloc] init];
// 3 - Video track
AVMutableCompositionTrack *videoTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeVideo
preferredTrackID:kCMPersistentTrackID_Invalid];
[videoTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, videoAsset.duration)
ofTrack:[[videoAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0]
atTime:kCMTimeZero error:nil];
// - Audio
AVMutableCompositionTrack *audioCompositionTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio preferredTrackID:kCMPersistentTrackID_Invalid];
AVAssetTrack *audioTrack = [[videoAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0];
[audioCompositionTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, audioTrack.timeRange.duration) ofTrack:audioTrack atTime:kCMTimeZero error:nil];
// 3.1 - Create AVMutableVideoCompositionInstruction
AVMutableVideoCompositionInstruction *mainInstruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, videoAsset.duration);
// 3.2 - Create an AVMutableVideoCompositionLayerInstruction for the video track and fix the orientation.
AVMutableVideoCompositionLayerInstruction *videolayerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:videoTrack];
AVAssetTrack *videoAssetTrack = [[videoAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
UIImageOrientation videoAssetOrientation_ = UIImageOrientationUp;
BOOL isVideoAssetPortrait_ = NO;
CGAffineTransform videoTransform = videoAssetTrack.preferredTransform;
if (videoTransform.a == 0 && videoTransform.b == 1.0 && videoTransform.c == -1.0 && videoTransform.d == 0) {
videoAssetOrientation_ = UIImageOrientationRight;
isVideoAssetPortrait_ = YES;
}
if (videoTransform.a == 0 && videoTransform.b == -1.0 && videoTransform.c == 1.0 && videoTransform.d == 0) {
videoAssetOrientation_ = UIImageOrientationLeft;
isVideoAssetPortrait_ = YES;
}
if (videoTransform.a == 1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == 1.0) {
videoAssetOrientation_ = UIImageOrientationUp;
}
if (videoTransform.a == -1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == -1.0) {
videoAssetOrientation_ = UIImageOrientationDown;
}
[videolayerInstruction setTransform:videoAssetTrack.preferredTransform atTime:kCMTimeZero];
[videolayerInstruction setOpacity:0.0 atTime:videoAsset.duration];
// 3.3 - Add instructions
mainInstruction.layerInstructions = [NSArray arrayWithObjects:videolayerInstruction,nil];
AVMutableVideoComposition *mainCompositionInst = [AVMutableVideoComposition videoComposition];
CGSize naturalSize;
if(isVideoAssetPortrait_){
naturalSize = CGSizeMake(videoAssetTrack.naturalSize.height, videoAssetTrack.naturalSize.width);
} else {
naturalSize = videoAssetTrack.naturalSize;
}
float renderWidth, renderHeight;
renderWidth = naturalSize.width;
renderHeight = naturalSize.height;
mainCompositionInst.renderSize = CGSizeMake(renderWidth, renderHeight);
mainCompositionInst.instructions = [NSArray arrayWithObject:mainInstruction];
mainCompositionInst.frameDuration = CMTimeMake(1, 30);
// Watermark Layers
[self applyVideoEffectsToComposition:mainCompositionInst size:naturalSize];
// 4 - Get path
// NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
// NSString *documentsDirectory = [paths objectAtIndex:0];
// NSString *myPathDocs = [documentsDirectory stringByAppendingPathComponent:
// [NSString stringWithFormat:#"FinalVideo-%d.mov",arc4random() % 1000]];
// NSURL *url = [NSURL fileURLWithPath:myPathDocs];
NSURL * url = TempVideoURL();
// 5 - Create exporter
AVAssetExportSession *exporter = [[AVAssetExportSession alloc] initWithAsset:mixComposition
presetName:AVAssetExportPresetHighestQuality];
exporter.outputURL=url;
exporter.outputFileType = AVFileTypeMPEG4;
exporter.shouldOptimizeForNetworkUse = YES;
exporter.videoComposition = mainCompositionInst;
[exporter exportAsynchronouslyWithCompletionHandler:^{
dispatch_async(dispatch_get_main_queue(), ^{
NSDate * endDate = [NSDate date];
NSTimeInterval interval = [endDate timeIntervalSinceDate:begin];
LLog(#"completed %f senconds",interval);
ALAssetsLibrary *assetsLibrary = [[ALAssetsLibrary alloc] init];
if ([assetsLibrary videoAtPathIsCompatibleWithSavedPhotosAlbum:exporter.outputURL]) {
[assetsLibrary writeVideoAtPathToSavedPhotosAlbum:exporter.outputURL completionBlock:NULL];
}
});
}];
}
Add Gif Watermark
- (void)applyVideoEffectsToComposition:(AVMutableVideoComposition *)composition size:(CGSize)size
{
// - set up the parent layer
CALayer *parentLayer = [CALayer layer];
CALayer *videoLayer = [CALayer layer];
parentLayer.frame = CGRectMake(0, 0, size.width, size.height);
videoLayer.frame = CGRectMake(0, 0, size.width, size.height);
[parentLayer addSublayer:videoLayer];
size.width = 100;
size.height = 100;
// - set up the overlay
CALayer *overlayLayer = [CALayer layer];
overlayLayer.frame = CGRectMake(0, 100, size.width, size.height);
NSURL *fileUrl = [[NSBundle mainBundle] URLForResource:#"jiafei" withExtension:#"gif"];
[BBGifManager startGifAnimationWithURL:fileUrl inLayer:overlayLayer];
// UIImage * image = [UIImage imageNamed:#"gifImage.gif"];
// [overlayLayer setContents:(id)[image CGImage]];
// [overlayLayer setMasksToBounds:YES];
[parentLayer addSublayer:overlayLayer];
// - apply magic
composition.animationTool = [AVVideoCompositionCoreAnimationTool
videoCompositionCoreAnimationToolWithPostProcessingAsVideoLayer:videoLayer inLayer:parentLayer];
}
Add CALayer Animations
+ (void)startGifAnimationWithURL:(NSURL *)url inLayer:(CALayer *)layer {
CAKeyframeAnimation * animation = [self animationForGifWithURL:url];
[layer addAnimation:animation forKey:#"contents"];
}
Create CAKeyFrameAnimation
+ (CAKeyframeAnimation *)animationForGifWithURL:(NSURL *)url {
CAKeyframeAnimation *animation = [CAKeyframeAnimation animationWithKeyPath:#"contents"];
NSMutableArray * frames = [NSMutableArray new];
NSMutableArray *delayTimes = [NSMutableArray new];
CGFloat totalTime = 0.0;
CGFloat gifWidth;
CGFloat gifHeight;
CGImageSourceRef gifSource = CGImageSourceCreateWithURL((CFURLRef)url, NULL);
// get frame count
size_t frameCount = CGImageSourceGetCount(gifSource);
for (size_t i = 0; i < frameCount; ++i) {
// get each frame
CGImageRef frame = CGImageSourceCreateImageAtIndex(gifSource, i, NULL);
[frames addObject:(__bridge id)frame];
CGImageRelease(frame);
// get gif info with each frame
NSDictionary *dict = (NSDictionary*)CFBridgingRelease(CGImageSourceCopyPropertiesAtIndex(gifSource, i, NULL));
NSLog(#"kCGImagePropertyGIFDictionary %#", [dict valueForKey:(NSString*)kCGImagePropertyGIFDictionary]);
// get gif size
gifWidth = [[dict valueForKey:(NSString*)kCGImagePropertyPixelWidth] floatValue];
gifHeight = [[dict valueForKey:(NSString*)kCGImagePropertyPixelHeight] floatValue];
// kCGImagePropertyGIFDictionary中kCGImagePropertyGIFDelayTime,kCGImagePropertyGIFUnclampedDelayTime值是一样的
NSDictionary *gifDict = [dict valueForKey:(NSString*)kCGImagePropertyGIFDictionary];
[delayTimes addObject:[gifDict valueForKey:(NSString*)kCGImagePropertyGIFDelayTime]];
totalTime = totalTime + [[gifDict valueForKey:(NSString*)kCGImagePropertyGIFDelayTime] floatValue];
CFRelease((__bridge CFTypeRef)(dict));
}
if (gifSource) {
CFRelease(gifSource);
}
NSMutableArray *times = [NSMutableArray arrayWithCapacity:3];
CGFloat currentTime = 0;
NSInteger count = delayTimes.count;
for (int i = 0; i < count; ++i) {
[times addObject:[NSNumber numberWithFloat:(currentTime / totalTime)]];
currentTime += [[delayTimes objectAtIndex:i] floatValue];
}
NSMutableArray *images = [NSMutableArray arrayWithCapacity:3];
for (int i = 0; i < count; ++i) {
[images addObject:[frames objectAtIndex:i]];
}
animation.keyTimes = times;
animation.values = images;
animation.timingFunction = [CAMediaTimingFunction functionWithName:kCAMediaTimingFunctionLinear];
animation.duration = totalTime;
animation.repeatCount = HUGE_VALF;
return animation;
}
You should adjust your animation settings for CoreAnimation:
animation.beginTime = AVCoreAnimationBeginTimeAtZero;
animation.removedOnCompletion = NO;
Just in case here's the example on swift3 how to do the same - insert animated frames/images into the video (not exactly the gif but array of Images). It uses AVAssetExportSession and AVMutableVideoComposition together with AVMutableVideoCompositionInstruction, and CAKeyframeAnimation to animate the frames.
I am using ELCImagePicker to select multiple video from the library and getting this "Connection to assetsd was interrupted or assetsd died" error when I am trying to export multiple recorded video selected from the library. But it works fine if I select all the downloaded video using ELCImagePicker or I use UIImagePicker to select these recorded video from the library. Is there any solution for this type of problem?
My Code:
-(void)elcImagePickerController:(ELCImagePickerController *)picker didFinishPickingMediaWithInfo:(NSArray *)info
{
[self dismissViewControllerAnimated:YES completion:nil];
for (NSDictionary *dict in info) {
if ([dict objectForKey:UIImagePickerControllerMediaType] == ALAssetTypeVideo){
if ([dict objectForKey:UIImagePickerControllerOriginalImage]){
videoUrl=[dict objectForKey:UIImagePickerControllerReferenceURL];
[self InsertVideoAsset];
}
}
}
[self GetMargedVideo];
}
Some Times my merged composition only play the audio,not the video,but sometimes both audio and video works fine. Is there any problem of the bellow code? Please help me...
-(void)GetMargedVideo{
LastTime=kCMTimeZero;
TotalTime=kCMTimeZero;
mixComposition=nil; // AVMutableComposition
mainCompositionInst=nil; // AVMutableVideoComposition
mixComposition=[AVMutableComposition composition];
mainInstruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
renderWidth=1280;
renderHeight=1280;
[Objects removeAllObjects];
//LayerInstruction used to get video layer Instructions
AVMutableVideoCompositionLayerInstruction *firstlayerInstruction;
self.stokeimage.hidden=YES;
for(int i=0; i<[VideoInfo count];i++)
{
self.stokeimage.hidden=NO;
TargetVideo=i;
VideoTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeVideo
preferredTrackID:kCMPersistentTrackID_Invalid];
AudioTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio
preferredTrackID:kCMPersistentTrackID_Invalid];
VideoProperty *vp =[VideoInfo objectAtIndex:i];
STime=vp.startTime;
ETime=vp.endTime;
TimeDiff=CMTimeSubtract(ETime, STime);
LastTime=TotalTime;
TotalTime=CMTimeAdd(TotalTime, TimeDiff);
vp.appearTime=LastTime;
TargetTime=LastTime;
avasset=[AVAsset assetWithURL:vp.Url];
//Insert Video and Audio to the Composition "mixComposition"
[VideoTrack insertTimeRange:CMTimeRangeMake(STime, TimeDiff)
ofTrack:[[avasset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] atTime:LastTime error:nil];
if([[avasset tracksWithMediaType:AVMediaTypeAudio] count])
{
if(!GetMusic)
{
[AudioTrack insertTimeRange:CMTimeRangeMake(STime, TimeDiff)
ofTrack:[[avasset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0] atTime:LastTime error:nil];
}
}
// Add instructions
if(vp.GetInstuction)
{
// GET INSTRUCTION: if Video already have instructions
firstlayerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:VideoTrack];
[firstlayerInstruction setTransform:vp.LayerInstruction atTime:LastTime];
[firstlayerInstruction setOpacity:0 atTime:TotalTime];
[Objects addObject:firstlayerInstruction];
}
else
{
// GET INSTRUCTION: When a Video add first time to the composition
AVAssetTrack *assetTrack = [[avasset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
AVMutableVideoCompositionLayerInstruction *layerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:VideoTrack];
CGAffineTransform videoTransform = assetTrack.preferredTransform;
CGSize naturalSize = assetTrack.naturalSize;
BOOL bLandscape = NO;
CGSize renderSize = CGSizeMake(self.videoplayer.frame.size.width * [[UIScreen mainScreen] scale], self.videoplayer.frame.size.width * [[UIScreen mainScreen] scale]);
renderSize =CGSizeMake(renderWidth, renderHeight);
if(self.videoplayer.frame.size.width > self.videoplayer.frame.size.height && bIsVideoPortrait)
{
bLandscape = YES;
renderSize = CGSizeMake(renderSize.height, renderSize.width);
naturalSize = CGSizeMake(naturalSize.height, naturalSize.width);
}
else if(self.videoplayer.frame.size.height > self.videoplayer.frame.size.width && !bIsVideoPortrait)
{
bLandscape = YES;
renderSize = CGSizeMake(renderSize.height, renderSize.width);
naturalSize = CGSizeMake(naturalSize.height, naturalSize.width);
}
//Orientation Check
CGAffineTransform firstTransform = assetTrack.preferredTransform;
BOOL PotraitVideo=NO;
if (firstTransform.a == 0 && firstTransform.b == 1.0 && firstTransform.c == -1.0 && firstTransform.d == 0) {
PotraitVideo=YES;
// NSLog(#"Potratit Video");
}
if (firstTransform.a == 0 && firstTransform.b == -1.0 && firstTransform.c == 1.0 && firstTransform.d == 0) {
PotraitVideo=YES;
// NSLog(#"Potratit Video");
}
//Orientation Check Finish
if(bIsVideoPortrait)
naturalSize = CGSizeMake(naturalSize.height, naturalSize.width);
scaleValue = 1;
translationPoint = CGPointMake(self.videoplayer.frame.origin.x, self.videoplayer.frame.origin.y);
CGFloat pointX = translationPoint.x * naturalSize.width / self.videoplayer.frame.size.width;
CGFloat pointY = translationPoint.y * naturalSize.height / self.videoplayer.frame.size.height;
pointY=0;
pointX=0;
CGAffineTransform new = CGAffineTransformConcat(videoTransform, CGAffineTransformMakeScale(scaleValue, scaleValue));
CGAffineTransform newer = CGAffineTransformConcat(new, CGAffineTransformMakeTranslation(pointX, pointY));
CGFloat rotateTranslateX = 0;
CGFloat rotateTranslateY = 0;
if(rotationValue - 0.0f > 0.01f && rotationValue - 180.f < 0.01)
rotateTranslateX = MIN((naturalSize.width * rotationValue) / 90.0f, naturalSize.width);
if(rotationValue - 90.0f > 0.01f && rotationValue < 360.0f)
rotateTranslateY = MIN((naturalSize.height * rotationValue) / 180.0f, naturalSize.height);
CGAffineTransform rotationT = CGAffineTransformConcat(newer, CGAffineTransformMakeRotation(DEGREES_TO_RADIANS(rotationValue)));
CGAffineTransform rotateTranslate = CGAffineTransformConcat(rotationT, CGAffineTransformMakeTranslation(rotateTranslateX, rotateTranslateY));
CGSize temp = CGSizeApplyAffineTransform(assetTrack.naturalSize, videoTransform);
CGSize size = CGSizeMake(fabsf(temp.width), fabsf(temp.height));
if(bLandscape)
{
size = CGSizeMake(size.height, size.width);
}
float s1 = renderSize.width/size.width;
float s2 = renderSize.height/size.height;
float s = MIN(s1, s2);
CGAffineTransform new2 = CGAffineTransformConcat(rotateTranslate, CGAffineTransformMakeScale(s,s));
float x = (renderSize.width - size.width*s)/2;
float y = (renderSize.height - size.height*s)/2;
newer2 = CGAffineTransformIdentity;
if(bLandscape)
newer2 = CGAffineTransformConcat(new2, CGAffineTransformMakeTranslation(x, y));
else
newer2 = CGAffineTransformConcat(new2, CGAffineTransformMakeTranslation(x, y));
//Store layerInstruction to an array "Objects"
[layerInstruction setTransform:newer2 atTime:LastTime];
[layerInstruction setOpacity:0.0 atTime: TotalTime];
[Objects addObject:layerInstruction];
vp.GetInstuction=YES;
vp.LayerInstruction=newer2;
vp.Portrait=PotraitVideo;
[VideoInfo replaceObjectAtIndex:i withObject:vp];
}
}
if(GetMusic)
{
OriginalAsset=mixComposition;
AudioTrack=nil;
AudioTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio
preferredTrackID:kCMPersistentTrackID_Invalid];
[AudioTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, TotalTime)
ofTrack:[[MusicAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0] atTime:kCMTimeZero error:nil];
}
//Apply all the instruction to the the Videocomposition "mainCompositionInst"
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, TotalTime);
mainInstruction.layerInstructions = Objects;
mainCompositionInst = [AVMutableVideoComposition videoComposition];
mainCompositionInst.instructions = [NSArray arrayWithObject:mainInstruction];
mainCompositionInst.frameDuration = CMTimeMake(1, 30);
mainCompositionInst.renderSize = CGSizeMake(renderWidth, renderHeight);
[self PlayVideo];
}
I am using AVCaptureMovieFileOutput to record some video. I have the preview layer displayed using AVLayerVideoGravityResizeAspectFill which zooms in slightly. The problem I have is that the final video is larger, containing extra image that didn't fit on the screen during preview.
This is the preview and resulting video
Is there a way I can specify a CGRect that I want to cut from the video using AVAssetExportSession?
EDIT ----
When I apply a CGAffineTransformScale to the AVAssetTrack it zooms into the video, and with the AVMutableVideoComposition renderSize set to view.bounds it crops off the ends. Great, there's just 1 problem left. The width of the video does not stretch to the correct width, it just gets filled with black.
EDIT 2 ----
The suggested question/answer is incomplete..
Some of my code:
In my - (void)captureOutput:(AVCaptureFileOutput *)captureOutput didFinishRecordingToOutputFileAtURL:(NSURL *)outputFileURL fromConnections:(NSArray *)connections error:(NSError *)error method I have this to crop and resize the video.
- (void)flipAndSave:(NSURL *)videoURL withCompletionBlock:(void(^)(NSURL *returnURL))completionBlock
{
AVURLAsset *firstAsset = [AVURLAsset assetWithURL:videoURL];
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
AVMutableComposition *mixComposition = [[AVMutableComposition alloc] init];
// 2 - Video track
AVMutableCompositionTrack *firstTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeVideo
preferredTrackID:kCMPersistentTrackID_Invalid];
[firstTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, firstAsset.duration)
ofTrack:[[firstAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] atTime:kCMTimeZero error:nil];
// 2.1 - Create AVMutableVideoCompositionInstruction
AVMutableVideoCompositionInstruction *mainInstruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
mainInstruction.timeRange = CMTimeRangeMake(CMTimeMakeWithSeconds(0, 600), firstAsset.duration);
// 2.2 - Create an AVMutableVideoCompositionLayerInstruction for the first track
AVMutableVideoCompositionLayerInstruction *firstlayerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:firstTrack];
AVAssetTrack *firstAssetTrack = [[firstAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
UIImageOrientation firstAssetOrientation_ = UIImageOrientationUp;
BOOL isFirstAssetPortrait_ = NO;
CGAffineTransform firstTransform = firstAssetTrack.preferredTransform;
if (firstTransform.a == 0 && firstTransform.b == 1.0 && firstTransform.c == -1.0 && firstTransform.d == 0) {
firstAssetOrientation_ = UIImageOrientationRight;
isFirstAssetPortrait_ = YES;
}
if (firstTransform.a == 0 && firstTransform.b == -1.0 && firstTransform.c == 1.0 && firstTransform.d == 0) {
firstAssetOrientation_ = UIImageOrientationLeft;
isFirstAssetPortrait_ = YES;
}
if (firstTransform.a == 1.0 && firstTransform.b == 0 && firstTransform.c == 0 && firstTransform.d == 1.0) {
firstAssetOrientation_ = UIImageOrientationUp;
}
if (firstTransform.a == -1.0 && firstTransform.b == 0 && firstTransform.c == 0 && firstTransform.d == -1.0) {
firstAssetOrientation_ = UIImageOrientationDown;
}
// [firstlayerInstruction setTransform:firstAssetTrack.preferredTransform atTime:kCMTimeZero];
// [firstlayerInstruction setCropRectangle:self.view.bounds atTime:kCMTimeZero];
CGFloat scale = [self getScaleFromAsset:firstAssetTrack];
firstTransform = CGAffineTransformScale(firstTransform, scale, scale);
[firstlayerInstruction setTransform:firstTransform atTime:kCMTimeZero];
// 2.4 - Add instructions
mainInstruction.layerInstructions = [NSArray arrayWithObjects:firstlayerInstruction,nil];
AVMutableVideoComposition *mainCompositionInst = [AVMutableVideoComposition videoComposition];
mainCompositionInst.instructions = [NSArray arrayWithObject:mainInstruction];
mainCompositionInst.frameDuration = CMTimeMake(1, 30);
// CGSize videoSize = firstAssetTrack.naturalSize;
CGSize videoSize = self.view.bounds.size;
BOOL isPortrait_ = [self isVideoPortrait:firstAsset];
if(isPortrait_) {
videoSize = CGSizeMake(videoSize.height, videoSize.width);
}
NSLog(#"%#", NSStringFromCGSize(videoSize));
mainCompositionInst.renderSize = videoSize;
// 3 - Audio track
AVMutableCompositionTrack *AudioTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio
preferredTrackID:kCMPersistentTrackID_Invalid];
[AudioTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, firstAsset.duration)
ofTrack:[[firstAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0] atTime:kCMTimeZero error:nil];
// 4 - Get path
NSString *outputPath = [[NSString alloc] initWithFormat:#"%#%#", NSTemporaryDirectory(), #"cutoutput.mov"];
NSURL *outputURL = [[NSURL alloc] initFileURLWithPath:outputPath];
NSFileManager *manager = [[NSFileManager alloc] init];
if ([manager fileExistsAtPath:outputPath])
{
[manager removeItemAtPath:outputPath error:nil];
}
// 5 - Create exporter
AVAssetExportSession *exporter = [[AVAssetExportSession alloc] initWithAsset:mixComposition
presetName:AVAssetExportPresetHighestQuality];
exporter.outputURL=outputURL;
exporter.outputFileType = AVFileTypeQuickTimeMovie;
exporter.shouldOptimizeForNetworkUse = YES;
exporter.videoComposition = mainCompositionInst;
[exporter exportAsynchronouslyWithCompletionHandler:^{
switch ([exporter status])
{
case AVAssetExportSessionStatusFailed:
NSLog(#"Export failed: %# : %#", [[exporter error] localizedDescription], [exporter error]);
completionBlock(nil);
break;
case AVAssetExportSessionStatusCancelled:
NSLog(#"Export canceled");
completionBlock(nil);
break;
default: {
NSURL *outputURL = exporter.outputURL;
dispatch_async(dispatch_get_main_queue(), ^{
completionBlock(outputURL);
});
break;
}
}
}];
}
Here is my interpretation of your question: You are capturing video on a device with a screen ratio of 4:3, thus your AVCaptureVideoPreviewLayer is 4:3, but the video input device captures video in 16:9 so the resulting video is 'larger' than seen in the preview.
If you are simply looking to crop the extra pixels not caught by the preview then check out this http://www.netwalk.be/article/record-square-video-ios. This article shows how to crop the video into a square. However you'll only need a few modifications to crop to 4:3. I've gone and tested this, here are the changes I made:
Once you have the AVAssetTrack for the video you will need to calculate a new height.
// we convert the captured height i.e. 1080 to a 4:3 screen ratio and get the new height
CGFloat newHeight = clipVideoTrack.naturalSize.height/3*4;
Then modify these two lines, using newHeight.
videoComposition.renderSize = CGSizeMake(clipVideoTrack.naturalSize.height, newHeight);
CGAffineTransform t1 = CGAffineTransformMakeTranslation(clipVideoTrack.naturalSize.height, -(clipVideoTrack.naturalSize.width - newHeight)/2 );
So what we've done here is set the renderSize to a 4:3 ratio - the exact dimension are based on the input device. We then use a CGAffineTransform to translate the video position so that what we saw in the AVCaptureVideoPreviewLayer is what is rendered to our file.
Edit: If you want to put it all together and crop a video based on the device's screen ratio (3:2, 4:3, 16:9) and take the video orientation into mind we need to add a few things.
First here is the modified sample code with a few critical alterations:
// output file
NSString* docFolder = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
NSString* outputPath = [docFolder stringByAppendingPathComponent:#"output2.mov"];
if ([[NSFileManager defaultManager] fileExistsAtPath:outputPath])
[[NSFileManager defaultManager] removeItemAtPath:outputPath error:nil];
// input file
AVAsset* asset = [AVAsset assetWithURL:outputFileURL];
AVMutableComposition *composition = [AVMutableComposition composition];
[composition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];
// input clip
AVAssetTrack *videoTrack = [[asset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
// crop clip to screen ratio
UIInterfaceOrientation orientation = [self orientationForTrack:asset];
BOOL isPortrait = (orientation == UIInterfaceOrientationPortrait || orientation == UIInterfaceOrientationPortraitUpsideDown) ? YES: NO;
CGFloat complimentSize = [self getComplimentSize:videoTrack.naturalSize.height];
CGSize videoSize;
if(isPortrait) {
videoSize = CGSizeMake(videoTrack.naturalSize.height, complimentSize);
} else {
videoSize = CGSizeMake(complimentSize, videoTrack.naturalSize.height);
}
AVMutableVideoComposition* videoComposition = [AVMutableVideoComposition videoComposition];
videoComposition.renderSize = videoSize;
videoComposition.frameDuration = CMTimeMake(1, 30);
AVMutableVideoCompositionInstruction *instruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeMakeWithSeconds(60, 30) );
// rotate and position video
AVMutableVideoCompositionLayerInstruction* transformer = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:videoTrack];
CGFloat tx = (videoTrack.naturalSize.width-complimentSize)/2;
if (orientation == UIInterfaceOrientationPortrait || orientation == UIInterfaceOrientationLandscapeRight) {
// invert translation
tx *= -1;
}
// t1: rotate and position video since it may have been cropped to screen ratio
CGAffineTransform t1 = CGAffineTransformTranslate(videoTrack.preferredTransform, tx, 0);
// t2/t3: mirror video horizontally
CGAffineTransform t2 = CGAffineTransformTranslate(t1, isPortrait?0:videoTrack.naturalSize.width, isPortrait?videoTrack.naturalSize.height:0);
CGAffineTransform t3 = CGAffineTransformScale(t2, isPortrait?1:-1, isPortrait?-1:1);
[transformer setTransform:t3 atTime:kCMTimeZero];
instruction.layerInstructions = [NSArray arrayWithObject: transformer];
videoComposition.instructions = [NSArray arrayWithObject: instruction];
// export
exporter = [[AVAssetExportSession alloc] initWithAsset:asset presetName:AVAssetExportPresetHighestQuality] ;
exporter.videoComposition = videoComposition;
exporter.outputURL=[NSURL fileURLWithPath:outputPath];
exporter.outputFileType=AVFileTypeQuickTimeMovie;
[exporter exportAsynchronouslyWithCompletionHandler:^(void){
NSLog(#"Exporting done!");
// added export to library for testing
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
if ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:[NSURL fileURLWithPath:outputPath]]) {
[library writeVideoAtPathToSavedPhotosAlbum:[NSURL fileURLWithPath:outputPath]
completionBlock:^(NSURL *assetURL, NSError *error) {
NSLog(#"Saved to album");
if (error) {
}
}];
}
}];
What we added here is a call to get the new render size of the video based on cropping its dimensions to the screen ratio. Once we crop the size down, we need to translate the position to recenter the video. So we grab its orientation to move it in the proper direction. This will fix the off-center issue we saw with UIInterfaceOrientationLandscapeLeft. Finally CGAffineTransform t2, t3 mirror the video horizontally.
And here are the two new methods that make this happen:
- (CGFloat)getComplimentSize:(CGFloat)size {
CGRect screenRect = [[UIScreen mainScreen] bounds];
CGFloat ratio = screenRect.size.height / screenRect.size.width;
// we have to adjust the ratio for 16:9 screens
if (ratio == 1.775) ratio = 1.77777777777778;
return size * ratio;
}
- (UIInterfaceOrientation)orientationForTrack:(AVAsset *)asset {
UIInterfaceOrientation orientation = UIInterfaceOrientationPortrait;
NSArray *tracks = [asset tracksWithMediaType:AVMediaTypeVideo];
if([tracks count] > 0) {
AVAssetTrack *videoTrack = [tracks objectAtIndex:0];
CGAffineTransform t = videoTrack.preferredTransform;
// Portrait
if(t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0) {
orientation = UIInterfaceOrientationPortrait;
}
// PortraitUpsideDown
if(t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0) {
orientation = UIInterfaceOrientationPortraitUpsideDown;
}
// LandscapeRight
if(t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0) {
orientation = UIInterfaceOrientationLandscapeRight;
}
// LandscapeLeft
if(t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0) {
orientation = UIInterfaceOrientationLandscapeLeft;
}
}
return orientation;
}
These are pretty straight forward. The only thing to note is that in the getComplimentSize: method we have to manually adjust the ratio for 16:9 since the iPhone5+ resolution is mathematically shy of true 16:9.
AVCaptureVideoDataOutput is a concrete sub-class of AVCaptureOutput you use to process uncompressed frames from the video being captured, or to access compressed frames.
An instance of AVCaptureVideoDataOutput produces video frames you can process using other media APIs. You can access the frames with the captureOutput:didOutputSampleBuffer:fromConnection: delegate method.
Configuring a Session
You use a preset on the session to specify the image quality and resolution you want. A preset is a constant that identifies one of a number of possible configurations; in some cases the actual configuration is device-specific:
https://developer.apple.com/library/mac/documentation/AudioVideo/Conceptual/AVFoundationPG/Articles/04_MediaCapture.html
the actual values these presets represent for various devices, see “Saving to a Movie File” and “Capturing Still Images.”
If you want to set a size-specific configuration, you should check whether it is supported before setting it:
if ([session canSetSessionPreset:AVCaptureSessionPreset1280x720]) {
session.sessionPreset = AVCaptureSessionPreset1280x720;
}
else {
// Handle the failure.
}