avcapturesession - AVCaptureOutput 和对象识别
问题描述
到目前为止,我是新手,Swift
并且一直发现 Stackoverflow 对我的故障排除非常有帮助。在我当前的项目中,除了已经存在的捕获功能之外,我还试图创建一个类似于附加屏幕截图的置信度标签:
当我运行我的代码时,相机视图和捕获功能可以工作,但对象识别(inception V3)似乎没有接收来自AVCaptureOutput
(它也没有将结果打印到控制台)的数据。
我没有收到任何错误消息,所以我不知道我做错了什么。任何反馈将不胜感激
谢谢!
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let device = deviceDiscoverySession.devices
for device in device {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
}else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}
}
currentCamera = backCamera
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer() {
camerapreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
camerapreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
camerapreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
camerapreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(camerapreviewLayer!, at: 0)
}
func setupRunningCaptureSession() {
captureSession.startRunning()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let model = try? VNCoreMLModel(for: Inceptionv3().model) else { return }
let request = VNCoreMLRequest(model: model) { (finishedReq, err) in
//perhaps check the err
// print(finishedReq.results)
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
print(firstObservation.identifier, firstObservation.confidence)
DispatchQueue.main.async {
self.confidenceLabel.text = "\(firstObservation.identifier) \(firstObservation.confidence * 100)"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
解决方案
推荐阅读
- python - 无法使用两个线程在一个脚本中执行两个函数
- google-pay - Google PAY API - LoyalityObject 与 LoyalityClass
- android - 将应用程序 gradle API 更新到目标级别 26 失败 nes app
- php - Laravel 5.6 项目部署到共享主机
- r - 用先前的值填充特定数量的 data.table 行
- java - 处理 Blanked EditText 和 EditText With Character 的异常?(更好地了解)
- jquery - 如何更改背景 svg 分配的设置?
- css - 如何使用 CSS 使自定义嵌入代码具有响应性
- mongodb - 猫鼬 - 如何在不插入的情况下获取模式的最终文档
- javascript - 如何从 html 表中仅获取一列值?