// Output video provider writing the get content in AVSpeechSynthesizer and Misc.selecctedPhotos import UIKit import AVFoundation class SampleProvider { var audios = [CMSampleBuffer]() var infoVideo = [(photoIndex: Int, frameTime: Float64)]() private var frame: CVPixelBuffer? var qFrame = 0 let frameDuration = CMTime(value: 1, timescale: 30) private var frameTime = CMTime.zero var extraFrameTime = Float64.zero var counter = 0 init(audios: [CMSampleBuffer]) { self.audios = audios } init(infoVideo: [(photoIndex: Int, frameTime: Float64)]) { self.infoVideo = infoVideo } func getNextAudio() -> CMSampleBuffer? { if audios.count == 0 { return nil } if counter == audios.count { return nil } let tmp = audios[counter] counter += 1 return tmp } private func startNewFrame() -> Int? { if infoVideo.count == 0 { return nil } if counter == infoVideo.count { return nil } // Get the image index guard let photoIndex = infoVideo[counter].photoIndex as? Int else { return nil } let image = Misc.obj.selectedPhotos[photoIndex] frame = image.toCVPixelBuffer() counter += 1 return 30 // 30 fps } func moreFrame() -> CVPixelBuffer? { if qFrame == 0 { // Get 30 fps of a blue image if let newQuantFrame = startNewFrame() { qFrame = newQuantFrame } else { return nil } } qFrame -= 1 return frame } }