目标是在
Swift的设备上捕获全屏视频.在下面的代码中,视频捕获似乎是在全屏幕上进行的(录制相机预览使用全屏幕),但视频的渲染发生在不同的分辨率.对于5S具体来说,似乎捕获发生在320×568,但渲染发生在320×480.
如何捕获和渲染全屏视频?
视频捕获代码:
private func initPBJVision() { // Store PBJVision in var for convenience let vision = PBJVision.sharedInstance() // Configure PBJVision vision.delegate = self vision.cameraMode = PBJCameraMode.Video vision.cameraOrientation = PBJCameraOrientation.Portrait vision.focusMode = PBJFocusMode.ContinuousAutoFocus vision.outputFormat = PBJOutputFormat.Preset vision.cameraDevice = PBJCameraDevice.Back // Let taps start/pause recording let tapHandler = UITapGestureRecognizer(target: self,action: "doTap:") view.addGestureRecognizer(tapHandler) // Log status print("Configured PBJVision") } private func startCameraPreview() { // Store PBJVision in var for convenience let vision = PBJVision.sharedInstance() // Connect PBJVision camera preview to <videoView> // -- Get preview width let deviceWidth = CGRectGetWidth(view.frame) let deviceHeight = CGRectGetHeight(view.frame) // -- Configure PBJVision's preview layer let previewLayer = vision.previewLayer previewLayer.frame = CGRectMake(0,deviceWidth,deviceHeight) previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill ... }
视频呈现代码:
func exportVideo(fileUrl: NSURL) { // Create main composition object let videoAsset = AVURLAsset(URL: fileUrl,options: nil) let mainComposition = AVMutableComposition() let compositionVideoTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeVideo,preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid)) let compositionAudioTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeAudio,preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid)) // -- Extract and apply video & audio tracks to composition let sourceVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0] let sourceAudioTrack = videoAsset.tracksWithMediaType(AVMediaTypeAudio)[0] do { try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero,videoAsset.duration),ofTrack: sourceVideoTrack,atTime: kCMTimeZero) } catch { print("Error with insertTimeRange. Video error: \(error).") } do { try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero,ofTrack: sourceAudioTrack,atTime: kCMTimeZero) } catch { print("Error with insertTimeRange. Audio error: \(error).") } // Add text to video // -- Create video composition object let renderSize = compositionVideoTrack.naturalSize let videoComposition = AVMutableVideoComposition() videoComposition.renderSize = renderSize videoComposition.frameDuration = CMTimeMake(Int64(1),Int32(videoFrameRate)) // -- Add instruction to video composition object let instruction = AVMutableVideoCompositionInstruction() instruction.timeRange = CMTimeRangeMake(kCMTimeZero,videoAsset.duration) let videoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack) instruction.layerInstructions = [videoLayerInstruction] videoComposition.instructions = [instruction] // -- Define video frame let videoFrame = CGRectMake(0,renderSize.width,renderSize.height) print("Video Frame: \(videoFrame)") // <-- Prints frame of 320x480 so render size already wrong here ...