根据自然大小的分辨率将视频与图像合并

问题描述

我正在尝试合并视频上的贴纸。它现在适用于风景,也适用于肖像视频。

由于合并横向问题,我无法根据视频的自然大小保持贴纸比例和来源。使用肖像,这完全是一团糟。如果有人想查看代码并对其进行测试,这里是 GitHub link

以下是最重要的两种方法

一个获取视频的分辨率,并根据该分辨率管理包含图像的视频容器大小

private func setupVideoPlayer() {
    let currentFrameSize = currentVideoFrameSize()
    
    videoeHeightConstraint.constant = currentFrameSize.height
    videoWidthConstraint.constant = currentFrameSize.width
    
    videoPlayer = AVPlayer(playerItem: playerItem)
    
    let playerLayer = AVPlayerLayer(player: videoPlayer)
    playerLayer.frame = CGRect(x: 0,y: 0,width: currentFrameSize.width,height: currentFrameSize.height)
    playerLayer.videoGravity = .resizeAspect
    
    videoContentView.layer.addSublayer(playerLayer)
    videoContentView.bringSubviewToFront(stickersContentView)
    videoPlayer?.play()
}

private func currentVideoFrameSize() -> CGSize {
    guard let asset = playerItem?.asset as? AVURLAsset,let track = asset.tracks(withMediaType: AVMediaType.video).first else { return .zero }
    let trackSize      = track.naturalSize
    let videoViewSize  = videoContentView.superview!.bounds.size
    let trackRatio     = trackSize.width / trackSize.height
    let videoViewRatio = videoViewSize.width / videoViewSize.height
    
    var newSize: CGSize
    if videoViewRatio > trackRatio {
        newSize = CGSize(width: trackSize.width * videoViewSize.height / trackSize.height,height: videoViewSize.height)
    } else {
        newSize = CGSize(width: videoViewSize.width,height: trackSize.height * videoViewSize.width / trackSize.width)
    }
    
    let assetInfo = VideoManager.shared.orientationFromTransform(transform: track.preferredTransform)
    if assetInfo.isPortrait {
        let tempSize = newSize
        newSize.width = tempSize.height
        newSize.height = tempSize.width
    }
    
    return newSize
}

对于合并:

func makeVideoFrom(video: VideoData,images: [VideoOverlayImage],completion:@escaping Completion) -> Void {
    var outputSize: CGSize = .zero
    var insertTime: CMTime = .zero
    var arrayLayerInstructions: [AVMutableVideoCompositionLayerInstruction] = []
    var arrayLayerImages: [CALayer] = []
                    
    // Init composition
    let mixComposition = AVMutableComposition()
    
    // Get video track
    guard let videoTrack = video.asset.tracks(withMediaType: AVMediaType.video).first else { return }
    
    let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
    
    var videoSize = videoTrack.naturalSize
    if assetInfo.isPortrait == true {
        videoSize.width = videoTrack.naturalSize.height
        videoSize.height = videoTrack.naturalSize.width
    }

    if videoSize.height > outputSize.height {
        outputSize = videoSize
    }

    if outputSize.width == 0 || outputSize.height == 0 {
        outputSize = defaultSize
    }
    
    // Get audio track
    var audioTrack: AVAssetTrack?
    if video.asset.tracks(withMediaType: AVMediaType.audio).count > 0 {
        audioTrack = video.asset.tracks(withMediaType: AVMediaType.audio).first
    }
    
    // Init video & audio composition track
    let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
    
    let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
    
    do {
        let startTime = CMTime.zero
        let duration = video.asset.duration
        
        // Add video track to video composition at specific time
        try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime,duration: duration),of: videoTrack,at: insertTime)
        
        // Add audio track to audio composition at specific time
        if let audioTrack = audioTrack {
            try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime,of: audioTrack,at: insertTime)
        }
        
        // Add instruction for video track
        let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,asset: video.asset,standardSize: outputSize,atTime: insertTime)
        
        // Hide video track before changing to new track
        let endTime = CMTimeAdd(insertTime,duration)
        let timeScale = video.asset.duration.timescale
        let durationAnimation = CMTime.init(seconds: 1,preferredTimescale: timeScale)
        
        layerInstruction.setopacityRamp(fromStartOpacity: 1.0,toEndOpacity: 0.0,timeRange: CMTimeRange.init(start: endTime,duration: durationAnimation))
        
        arrayLayerInstructions.append(layerInstruction)
        
        // Increase the insert time
        insertTime = CMTimeAdd(insertTime,duration)
    } catch {
        print("Load track error")
    }
    
    // Merge
    for image in images {
        let animatedImageLayer = CALayer()
                    
        let aspectWidth  = assetInfo.isPortrait ? outputSize.width/video.frame.height : outputSize.width/video.frame.width
        let aspectHeight = assetInfo.isPortrait ? outputSize.height/video.frame.width : outputSize.height/video.frame.height
        let aspectRatio = min(aspectWidth,aspectHeight)

        let scaledWidth  = image.frame.width * aspectRatio
        let scaledHeight = image.frame.height * aspectRatio
        
        let cx = (image.frame.minX * aspectRatio) + (scaledWidth / 2)
        let cy = (image.frame.minY * aspectRatio) + (scaledHeight / 2)

        var iFrame = image.frame
        iFrame.size.width = scaledWidth
        iFrame.size.height = scaledWidth
        animatedImageLayer.frame = iFrame
        animatedImageLayer.position = CGPoint(x: assetInfo.isPortrait ? cy : cx,y: assetInfo.isPortrait ? cx : cy)
        
        if let animatedURL = URL(string: image.url),let animation = animatedImage(with: animatedURL) {
            animatedImageLayer.add(animation,forKey: "contents")
        }
        
        arrayLayerImages.append(animatedImageLayer)
    }
    
    // Init Video layer
    let videoLayer = CALayer()
    videoLayer.frame = CGRect(x: 0,width: outputSize.width,height: outputSize.height)
    
    let parentlayer = CALayer()
    parentlayer.frame = CGRect(x: 0,height: outputSize.height)
    
    parentlayer.addSublayer(videoLayer)
    
    // Add Image layers
    arrayLayerImages.forEach { parentlayer.addSublayer($0) }
    
    // Main video composition instruction
    let mainInstruction = AVMutableVideoCompositionInstruction()
    mainInstruction.timeRange = CMTimeRangeMake(start: .zero,duration: insertTime)
    mainInstruction.layerInstructions = arrayLayerInstructions
            
    // Main video composition
    let mainComposition = AVMutableVideoComposition()
    mainComposition.instructions = [mainInstruction]
    mainComposition.renderSize = outputSize
    mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer,in: parentlayer)
    mainComposition.frameDuration = CMTimeMake(value: 1,timescale: 30)
    
    // Export to file
    let path = NstemporaryDirectory().appending("stickers_video_merge.mov")
    let exportURL = URL(fileURLWithPath: path)
    
    // Remove file if existed
    FileManager.default.removeItemIfExisted(exportURL)
    
    let exporter = AVAssetExportSession(asset: mixComposition,presetName: AVAssetExportPresetHighestQuality)
    exporter?.outputURL = exportURL
    exporter?.outputFileType = .mov
    exporter?.shouldOptimizeforNetworkUse = true
    exporter?.videoComposition = mainComposition
    
    // Do export
    exporter?.exportAsynchronously() {
        dispatchQueue.main.async {
            self.exportDidFinish(exporter: exporter,videoURL: exportURL,completion: completion)
        }
    }
}

主要问题:

  1. 在横向视频中,图像原点未保持,根据自然尺寸的尺寸看起来比预期的要大
  2. 在纵向视频中,我无法在背景中获取视频层,尽管它在那里并正在播放。它在最终结果中显示黑色视图。此外,图像原点和大小未根据原始视频大小进行维护。

您也可以检查 dev 分支,在那里您不需要保持任何比例,但图像帧和纵向视频存在问题。任何人都可以在您的设备上运行它并了解实际问题。

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)