尝试将图像叠加到CALayer上并保留子视图UITextView的位置

问题描述

首次发布者,looooooong时间每位使用者。我将SwiftUI用于布局,将UIRepresentables用于相机工作。 (Xcode 11.7),并尝试将图像叠加到CALayer上(最终导出到视频)。图像是从UITextView转换而来的,因此用户可以自由地对其进行编辑,捏合/缩放并将其拖动到他们的心脏内容上。在搜寻了几天之后,阅读了Ray Wenderlich的教程后,我碰壁了。下面的屏幕截图。

Before: freeform text 'coffee' added to the view

After: exported movie still,'coffee' text position is incorrect

下面是导出功能。我怀疑我在relativePosition上做错了。

谢谢任何建议,这是我编写iOS应用程序的尝试。

static func exportLayersToVideo(_ fileUrl:String,_ textView:UITextView){
    let fileURL = NSURL(fileURLWithPath: fileUrl)
    let composition = AVMutableComposition()
    let vidAsset = AVURLAsset(url: fileURL as URL,options: nil)
    
    // get video track
    let vtrack =  vidAsset.tracks(withMediaType: AVMediaType.video)
    let videoTrack: AVAssetTrack = vtrack[0]
    let vid_timerange = CMTimeRangeMake(start: CMTime.zero,duration: vidAsset.duration)
    
    let tr: CMTimeRange = CMTimeRange(start: CMTime.zero,duration: CMTime(seconds: 10.0,preferredTimescale: 600))
    composition.insertEmptyTimeRange(tr)
    
    let trackID:CMPersistentTrackID = CMPersistentTrackID(kCMPersistentTrackID_Invalid)
    
    if let compositionvideoTrack: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video,preferredTrackID: trackID) {
        
        do {
            try compositionvideoTrack.insertTimeRange(vid_timerange,of: videoTrack,at: CMTime.zero)
        } catch {
            print("error")
        }
        
        compositionvideoTrack.preferredTransform = videoTrack.preferredTransform
        
    } else {
        print("unable to add video track")
        return
    }
    
    let size = videoTrack.naturalSize
    
    let parentlayer = CALayer()
    parentlayer.frame = CGRect(x: 0,y: 0,width: size.width,height: size.height)
    
    let videolayer = CALayer()
    videolayer.frame = CGRect(x: 0,height: size.height)
    
    // Convert UITextView to Image
    let renderer = UIGraphicsImageRenderer(size: textView.bounds.size)
    let image = renderer.image { ctx in
        textView.drawHierarchy(in: textView.bounds,afterScreenUpdates: true)
    }
    
    let imglayer = CALayer()
    let scaledAspect: CGFloat = image.size.width / image.size.height
    let scaledWidth = size.width
    let scaledHeight = scaledWidth / scaledAspect
    let relativePosition = parentlayer.convert(textView.frame.origin,from: textView.layer)
    imglayer.frame = CGRect(x: relativePosition.x,y: relativePosition.y,width: scaledWidth,height: scaledHeight)
    imglayer.contents = image.cgImage

    // Adding videolayer and imglayer
    parentlayer.addSublayer(videolayer)
    parentlayer.addSublayer(imglayer)

    let layercomposition = AVMutableVideoComposition()
    layercomposition.frameDuration = CMTimeMake(value: 1,timescale: 30)
    layercomposition.renderSize = size
    layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer,in: parentlayer)
    
    // instruction for overlay
    let instruction = AVMutableVideoCompositionInstruction()
    instruction.timeRange = CMTimeRangeMake(start: CMTime.zero,duration: composition.duration)
    let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
    let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
    instruction.layerInstructions = NSArray(object: layerinstruction) as [AnyObject] as! [AVVideoCompositionLayerInstruction]
    layercomposition.instructions = NSArray(object: instruction) as [AnyObject] as! [AVVideoCompositionInstructionProtocol]
    
    //  create new file to receive data
    let dirPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory,.userDomainMask,true)
    let docsDir = dirPaths[0] as Nsstring
    let movieFilePath = docsDir.appendingPathComponent("result.mov")
    let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)
    
    // use AVAssetExportSession to export video
    let assetExport = AVAssetExportSession(asset: composition,presetName:AVAssetExportPresetHighestQuality)
    assetExport?.outputFileType = AVFileType.mov
    assetExport?.videoComposition = layercomposition
    
    // Check exist and remove old files
    do { // delete old video
        try FileManager.default.removeItem(at: movieDestinationUrl as URL)
    } catch { print("Error Removing Existing File: \(error.localizedDescription).") }
    
    do { // delete old video
        try FileManager.default.removeItem(at: fileURL as URL)
    } catch { print("Error Removing Existing File: \(error.localizedDescription).") }
    
    assetExport?.outputURL = movieDestinationUrl as URL
    assetExport?.exportAsynchronously(completionHandler: {
        switch assetExport!.status {
        case AVAssetExportSession.Status.Failed:
            print("Failed")
            print(assetExport?.error ?? "unkNown error")
        case AVAssetExportSession.Status.cancelled:
            print("cancelled")
            print(assetExport?.error ?? "unkNown error")
        default:
            print("Movie complete")
            
            PHPhotoLibrary.shared().performChanges({
                PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: movieDestinationUrl as URL)
            }) { saved,error in
                if saved {
                    print("Saved")
                }
            }
            
        }
    })
}

}

解决方法

看起来x位置正确,但是y已关闭。我认为这是因为原点位于左下角而不是左上角。试试这个:

var relativePosition = parentlayer.convert(textView.frame.origin,from: textView.layer)
relativePosition.y = size.height - relativePosition.y
imglayer.frame = CGRect(x: relativePosition.x,y: relativePosition.y,width: scaledWidth,height: scaledHeight)