iOS的音视频采集用到了AVCaptureSession。关于AVCaptureSession,苹果的官方文档里的描述如下:
AVCaptureSession管理者iOS和macOS的device,通过AVCaptureSession来访问设备。AVCaptureSession需要一个
AVCaptureInput和一个AVCaptureOutput 作为输入和输出,AVCaptureSession会链接input和output,从而从input采集数据,从output输出数据。示意图如下:
一个AVCaptureSession可以包含多个input和多个output。中间通过AVCaptureConnection来管理输入和输出。一般我们用到的系统设备的input和output无需手动添加AVCaptureConnection。AVCaptureSession会自动将匹配的input和output用AVCaptureConnection管理起来。具体的逻辑如上图。
实现代码片段如下:
private func setup() {
// self.session.beginConfiguration()
self.session.sessionPreset = .iFrame1280x720
if let input = self.getVideoInput(position: .front) {
self.session.addInput(input)
self.frontCameraInput = input
self.videoInput = input
}
if let input = self.audioInput {
if self.session.canAddInput(input) {
self.session.addInput(input)
}
}
self.addVideoDataOutput()
self.addAudioDataOutput()
}
private func addVideoDataOutput() {
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.alwaysDiscardsLateVideoFrames = false
videoOutput.videoSettings = [String(kCVPixelBufferPixelFormatTypeKey): kCVPixelFormatType_420YpCbCr8BiPlanarFullRange]
self.session.addOutput(videoOutput)
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoQueue"))
self.setVideoOutputMirroring(videoOutput: videoOutput)
self.videoOutput = videoOutput
}
private func setVideoOutputMirroring(videoOutput: AVCaptureVideoDataOutput) {
//镜像处理
videoOutput.connections.forEach { (connection) in
for port in connection.inputPorts {
if port.mediaType == .video {
if connection.isVideoMirroringSupported {
connection.isVideoMirrored = true
}
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
}
break
}
}
}
}
private func addAudioDataOutput() {
let audioOutput = AVCaptureAudioDataOutput()
self.session.addOutput(audioOutput)
audioOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "AudioQueue"))
}
private func getVideoInput(position: AVCaptureDevice.Position) -> AVCaptureDeviceInput? {
if let device = self.getVideoCaptureDevice(position: position) {
return try? AVCaptureDeviceInput(device: device)
}
return nil
}
private func getVideoCaptureDevice(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
if #available(iOS 10.2, *) {
var deviceTypes: [AVCaptureDevice.DeviceType] = [.builtInDualCamera, .builtInWideAngleCamera]
if #available(iOS 11.1, *) {
deviceTypes.append(.builtInTrueDepthCamera)
}
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: deviceTypes,
mediaType: .video,
position: .unspecified)
return discoverySession.devices.first(where: {$0.position == position})
}
return nil
}
TRTC支持自定义音频和视频采集,首先关掉SDK的音视频采集,然后通过TRTCCloud的enableCustomVideoCapture
和enableCustomAudioCapture为true,来支持自定义采集。
具体代码如下:
@objc func customCapture() {
TRTCCloud.sharedInstance()?.stopLocalPreview()
TRTCCloud.sharedInstance()?.stopScreenCapture()//如果有屏幕采集的话
TRTCCloud.sharedInstance()?.stopLocalAudio()
TRTCCloud.sharedInstance()?.enableCustomVideoCapture(true)
TRTCCloud.sharedInstance()?.enableCustomAudioCapture(true)
}
//AVCaptureVideoPreviewLayer绑定session,渲染自定义数据
func startCustomCapturePreview() {
if let localPreviewView = self.getRenderView(userId: self.currentUserId), let session = self.dataCapture?.session {
let layer = AVCaptureVideoPreviewLayer(session: session)
layer.frame = localPreviewView.bounds
localPreviewView.layer.addSublayer(layer)
}
}
//然后将采集到的数据封装成SDK需要的格式
func captureVideoDataOutput(capture: TRTCAVCapture, sampleBuffer: CMSampleBuffer) {
let videoFrame = TRTCVideoFrame()
videoFrame.bufferType = .pixelBuffer
videoFrame.pixelFormat = ._NV12
videoFrame.pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
videoFrame.timestamp = 0
TRTCCloud.sharedInstance()?.sendCustomVideoData(videoFrame)
}
func captureAudioDataOutput(capture: TRTCAVCapture, sampleBuffer: CMSampleBuffer) {
var audioBufferList: AudioBufferList = AudioBufferList()
var blockBuffer: CMBlockBuffer? = nil
let result = CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer,
bufferListSizeNeededOut: nil,
bufferListOut: &audioBufferList,
bufferListSize: MemoryLayout<AudioBufferList>.size,
blockBufferAllocator: nil,
blockBufferMemoryAllocator: nil,
flags: 0,
blockBufferOut: &blockBuffer)
if result != 0 {
return
}
let audioFrame = TRTCAudioFrame()
if let format = CMSampleBufferGetFormatDescription(sampleBuffer) {
let formatDescription = CMAudioFormatDescriptionGetStreamBasicDescription(format)
audioFrame.channels = Int32(formatDescription?.pointee.mChannelsPerFrame ?? 0)
audioFrame.sampleRate = TRTCAudioSampleRate(rawValue: Int(formatDescription?.pointee.mSampleRate ?? 0)) ?? .rate48000
}
if let data = audioBufferList.mBuffers.mData {
audioFrame.data = Data(bytes: data, count: Int(audioBufferList.mBuffers.mDataByteSize))
}
audioFrame.timestamp = UInt64(CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) * 1000)
TRTCCloud.sharedInstance()?.sendCustomAudioData(audioFrame)
}
以上就是TRTC自定义采集的方案。自定义采集音频需要自己处理回声等问题,所以谨慎使用。
TRTC官方文档:https://cloud.tencent.com/document/product/647/34066
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。