Отсутствует звук при добавлении текста в pixelBuffer

Я пытаюсь добавить наложение текста на видео, при записи на iPhone 5s или ниже на устройствах высокого качества и записи текста на него, то через 1 или 2 секунды звук пропадает, но этого не происходит на более крупных устройствах, таких как iPhone 6/6s, Если я удаляю этот метод записи текста, то он работает должным образом на всех устройствах или, если я уменьшаю качество видео в 5 с, то он также работает нормально. Как я могу получить видео со звуком в iPhone 5s с наложением текста.

Вот мой код

import Foundation
import AVFoundation
import AssetsLibrary
import UIKit
import CoreImage

class VideoWriter : NSObject{
    var fileWriter: AVAssetWriter!
    var videoInput: AVAssetWriterInput!
    var audioInput: AVAssetWriterInput!
    var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor?
    var presentationTime = kCMTimeZero
    var wod:WOD!
    var watermark =  Watermark()
    var watermarkData: Dictionary<String, Any>?

    init(fileUrl:URL!, height:Int, width:Int, channels:Int, samples:Float64) {

    fileWriter = try? AVAssetWriter(outputURL: fileUrl, fileType: AVFileType.mp4)

    let videoOutputSettings: [String: Any] = [
        AVVideoCodecKey : AVVideoCodecH264 as AnyObject,
        AVVideoWidthKey : width as AnyObject,
        AVVideoHeightKey : height as AnyObject
    ];
    videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoOutputSettings)
    videoInput.expectsMediaDataInRealTime = true
    fileWriter.add(videoInput)

    let sourcePixeBufferAttributes :[String: Any] = [
        kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA, //kCVPixelFormatType_32BGRA,
        kCVPixelBufferWidthKey as String: width,
        kCVPixelBufferHeightKey as String: height
    ]

    assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: sourcePixeBufferAttributes)


    let audioOutputSettings: [String: Any] = [
        AVFormatIDKey : Int(kAudioFormatMPEG4AAC) as AnyObject,
        AVNumberOfChannelsKey : channels as AnyObject,
        AVSampleRateKey : samples as AnyObject,
        AVEncoderBitRateKey : 128000 as AnyObject
    ]
    audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioOutputSettings)
    audioInput.expectsMediaDataInRealTime = true
    fileWriter.add(audioInput)
    }

    func write(_ sample: CMSampleBuffer, isVideo: Bool){
        if CMSampleBufferDataIsReady(sample) {
        if fileWriter.status == AVAssetWriterStatus.unknown {
            print("Start writing, isVideo = \(isVideo), status = \(fileWriter.status.rawValue)")
            let startTime = CMSampleBufferGetPresentationTimeStamp(sample)
            fileWriter.startWriting()
            fileWriter.startSession(atSourceTime: startTime)
        }
        if fileWriter.status == AVAssetWriterStatus.failed {
            print("Error occured, isVideo = \(isVideo), status = \(fileWriter.status.rawValue), \(fileWriter.error!.localizedDescription)")
            return
        }

        if isVideo {
            if videoInput.isReadyForMoreMediaData {
                let time = CMSampleBufferGetPresentationTimeStamp(sample)
                let pixelBuffer = self.watermark.addWatermark(data: sample, values: self.watermarkData!)
                self.assetWriterPixelBufferInput!.append(pixelBuffer, withPresentationTime: time)
            }

        }else{
            if audioInput.isReadyForMoreMediaData {
                audioInput.append(sample)
            }
        }
    }
}

}

Если вместо assetWriterPixelBufferInput мы используем videoInput.append(sample) это работает, но мы теряем наложение текста.

Водяной знак (наложение текста) Пример кода -

class Watermark {

    func addWatermark(data: CMSampleBuffer, values: Dictionary<String, Any>) -> CVPixelBuffer{
        let pixelBuffer = CMSampleBufferGetImageBuffer(data)
        CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))

......

    self.writeImage(image: image,timerType: timerType, date: date, name: name, wod: wod, timer:timer, rounds:rounds, reps:reps, status: status, toBuffer: pixelBuffer!)
        CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        return pixelBuffer!
    }

    func writeImage(image overlayImage:UIImage, timerType:String, date:String, name:String, wod: String, timer:String, rounds:String, reps:String,status:String, toBuffer pixelBuffer:CVPixelBuffer){
        let textImage = self.createTextImage(image: overlayImage, timerType: timerType, date: date, userName: name, myWod: wod, timer: timer,  rounds:rounds, reps:reps, status: status, size: CGSize(width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer)))

        let maskImage = CIImage(image: textImage)
        let colorSpace = CGColorSpaceCreateDeviceRGB();
        let options = [kCIImageColorSpace: colorSpace]
        let inputImage = CIImage(cvImageBuffer: pixelBuffer, options: options)
        let filter = CIFilter(name: "CISourceOverCompositing")
        filter?.setValue(inputImage, forKey: kCIInputBackgroundImageKey)
        filter?.setValue(maskImage, forKey: kCIInputImageKey)
        let outputImage = filter?.outputImage

        var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Little.rawValue
        bitmapInfo |= CGImageAlphaInfo.premultipliedFirst.rawValue & CGBitmapInfo.alphaInfoMask.rawValue

        let context = CGContext(data: CVPixelBufferGetBaseAddress(pixelBuffer), width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: bitmapInfo)
        if context != nil{
            let ciContext = CIContext(cgContext: context!, options: nil)
            ciContext.render(outputImage!, to: pixelBuffer, bounds: outputImage!.extent, colorSpace: CGColorSpaceCreateDeviceRGB())
        }
    }

0 ответов

Другие вопросы по тегам