首页 > 解决方案 > 使用 Objective C 自定义相机捕捉图像和裁剪

问题描述

我正在使用定制的相机来捕捉图像。(我正在使用 AVCaptureStillImageOutput jpegStillImageNSDataRepresentation 将其转换为 UIImage)我的目标是裁剪图像的特定区域。在我的图像捕获视图中,我绘制了一个动态大小的矩形,例如现在宽度:50px 和高度:30px,我只想捕获矩形内的区域。

我怎样才能做到这一点?

标签: iosobjective-c

解决方案


试试这段代码,我想这会对你有所帮助

添加框架:

     import GLKit
      import CoreImage

        //Extension for image
        extension UIImage {
            func crop( rect: CGRect) -> UIImage {
                var rect = rect
                rect.origin.x*=self.scale
                rect.origin.y*=self.scale
                rect.size.width*=self.scale
                rect.size.height*=self.scale

                let imageRef = self.cgImage!.cropping(to: rect)
                let image = UIImage(cgImage: imageRef!, scale: self.scale, orientation: self.imageOrientation)
                return image
            }
        }
        extension UIImage {
            func fixed() -> UIImage {

                let ciContext = CIContext(options: nil)

                let cgImg = ciContext.createCGImage(ciImage!, from: ciImage!.extent)

                let image = UIImage(cgImage: cgImg!, scale: originalImage.scale, orientation: originalImage.imageOrientation)

                return image
            }
        }
        extension CIDetector {
            static let rectangle: CIDetector? = {
                let detector = CIDetector(ofType: CIDetectorTypeRectangle, context: nil, options: [:])
                return detector
            }()

            static let text: CIDetector? = {
                let detector = CIDetector(ofType: CIDetectorTypeText, context: nil, options: [:])
                return detector
            }()
            static let face: CIDetector? = {
                let detector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [:])
                return detector
            }()
        }


        extension CIColor {
            static let redTone: CIColor = {
                return CIColor(red: 1.0, green: 0, blue: 0, alpha: 0.2)
            }()

            static let blueTone: CIColor = {
                return CIColor(red: 0.0, green: 0, blue: 1.0, alpha: 0.3)
            }()
        }

        extension CITextFeature: Measurable {}
        extension CIRectangleFeature: Measurable {}
        protocol Measurable {
            var bounds: CGRect { get }
            var topLeft: CGPoint { get }
            var topRight: CGPoint { get }
            var bottomLeft: CGPoint { get }
            var bottomRight: CGPoint { get }
        }

        extension Measurable {
            func perspectiveOverlay(on image: CIImage, with color: CIColor) -> CIImage? {
                var overlay = CIImage(color: color)

                overlay = overlay.cropped(to: bounds)
                overlay = overlay.applyingFilter(
                    "CIPerspectiveTransformWithExtent",
                    parameters: [
                        "inputExtent": CIVector(cgRect: image.extent),
                        "inputTopLeft": CIVector(cgPoint: topLeft),
                        "inputTopRight": CIVector(cgPoint: topRight),
                        "inputBottomLeft": CIVector(cgPoint: bottomLeft),
                        "inputBottomRight": CIVector(cgPoint: bottomRight)])


                return overlay
            }
        }
         private var originalImage = UIImage()

       class ViewController: UIViewController{
             //Camera Capture requiered properties
        var videoDataOutput: AVCaptureVideoDataOutput!
        var videoDataOutputQueue: DispatchQueue!
        var previewLayer:AVCaptureVideoPreviewLayer!
        var captureDevice : AVCaptureDevice!
        let session = AVCaptureSession()
        var detector : CIDetector?;
        var context: CIContext?;
        private var rect: CIRectangleFeature = CIRectangleFeature()

        var testimatch: CIImage!
        fileprivate var testextent: CGRect!
        fileprivate let glkView: GLKView = {
            let view = GLKView(frame: .zero, context: EAGLContext(api: .openGLES2)!)
            view.transform = CGAffineTransform(rotationAngle: .pi / 2)
            return view
        }()
        var imatch: CIImage!
        @IBOutlet weak var imageView: UIImageView!
        var testMeasurable : Measurable!
       private var cropedImage: UIImage!
        fileprivate var extent: CGRect!
        fileprivate var ciContextt: CIContext!
        fileprivate var glContext: EAGLContext!
            override func viewDidLoad() {
                context = CIContext();
                detector = CIDetector(ofType: CIDetectorTypeFace, context: context);
                self.setupAVCapture()
            }
         }
// AVCaptureVideoDataOutputSampleBufferDelegate protocol and related methods
extension ViewController:  AVCaptureVideoDataOutputSampleBufferDelegate{
    func setupAVCapture(){
        session.sessionPreset = AVCaptureSession.Preset.vga640x480
        guard let device = AVCaptureDevice
            .default(AVCaptureDevice.DeviceType.builtInWideAngleCamera,
                     for: .video,
                     position: AVCaptureDevice.Position.back) else {
                        return
        }
        captureDevice = device
        beginSession()
    }
    func beginSession(){
        var deviceInput: AVCaptureDeviceInput!

        do {
            deviceInput = try AVCaptureDeviceInput(device: captureDevice)
            guard deviceInput != nil else {
                print("error: cant get deviceInput")
                return
            }

            if self.session.canAddInput(deviceInput){
                self.session.addInput(deviceInput)
            }

            videoDataOutput = AVCaptureVideoDataOutput()

            videoDataOutput.alwaysDiscardsLateVideoFrames=true
            videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue")
            videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
            videoDataOutput!.videoSettings = [kCVPixelBufferPixelFormatTypeKey: Int(kCVPixelFormatType_32BGRA)] as [String : Any]
            if session.canAddOutput(self.videoDataOutput){
                session.addOutput(self.videoDataOutput)
            }
            videoDataOutput.connection(with: .video)?.isEnabled = true

            // let connection = videoDataOutput.connection(with: .video)
            // connection?.videoOrientation = .portrait

            previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
            previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill

            let rootLayer :CALayer = self.view.layer
            rootLayer.masksToBounds = true
            previewLayer.frame = rootLayer.bounds

            glkView.frame = rootLayer.bounds
            glkView.bindDrawable()
            glkView.isOpaque = false
            glkView.layer.isOpaque = false
            self.previewLayer.addSublayer(glkView.layer)

            rootLayer.addSublayer(self.previewLayer)

            ciContextt = CIContext(eaglContext: glkView.context)
            extent = CGRect(x: -5, y: -60, width: glkView.drawableWidth + 20, height: glkView.drawableHeight + 120)
            //  extent = CGRect(x: 0, y: 0, width: glkView.drawableWidth , height: glkView.drawableHeight)

            session.startRunning()
        } catch let error as NSError {
            deviceInput = nil
            print("error: \(error.localizedDescription)")
        }
    }



    func newimagerect(feature : Measurable) -> UIImage{

        let cgimg = self.context?.createCGImage(self.imatch!, from: testimatch!.extent)

        originalImage = UIImage(ciImage: self.imatch)

        let newImage = UIImage(cgImage: cgimg!, scale: originalImage.scale, orientation: originalImage.imageOrientation)
        self.imageView.image = newImage
        self.imageView.image?.crop(rect: testextent)

        let perspectiveCorrection = CIFilter(name: "CIPerspectiveCorrection")!
        let docImage = self.imatch!
        perspectiveCorrection.setValue(CIVector(cgPoint:feature.topLeft),
                                       forKey: "inputTopLeft")
        perspectiveCorrection.setValue(CIVector(cgPoint:feature.topRight),
                                       forKey: "inputTopRight")
        perspectiveCorrection.setValue(CIVector(cgPoint:feature.bottomRight),
                                       forKey: "inputBottomRight")
        perspectiveCorrection.setValue(CIVector(cgPoint:feature.bottomLeft),
                                       forKey: "inputBottomLeft")
        perspectiveCorrection.setValue(docImage,
                                       forKey: kCIInputImageKey)

        let outputImage = perspectiveCorrection.outputImage

        let updatedImage = UIImage(ciImage: outputImage!, scale: originalImage.scale, orientation: originalImage.imageOrientation)


        cropedImage = updatedImage.fixed()
       return cropedImage

    }

    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {

        let pixelBuffer : CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!;
        var image : CIImage = CIImage(cvPixelBuffer: pixelBuffer);

        DispatchQueue.main.async {
            self.imatch =  image

                let stDetect = CIDetector.rectangle?.features(in: image, options: [ CIDetectorAccuracy: CIDetectorAccuracyLow , CIDetectorTracking: true, CIDetectorMinFeatureSize  : 0.2]).first.flatMap{ f in
                    if let feature = f as? Measurable {
                        feature.perspectiveOverlay(on: image, with: CIColor.redTone).flatMap{
                            image = $0
                            self.glkView.isHidden = false
                            self.testMeasurable = feature
                            self.draw(image: image)
                        }
                    }
                }
                if stDetect == nil{
                    self.glkView.isHidden = true

                }

        }
    }
    func draw(image: CIImage) {
        DispatchQueue.main.async {
            //Draw rect
            self.glkView.bindDrawable()
            if self.glkView.context != EAGLContext.current() {
                EAGLContext.setCurrent(self.glkView.context)
            }
            glClearColor(0.0,0.0,0.0,0.0);
            self.ciContextt.draw(image, in: self.extent, from: image.extent)
            self.testimatch = image
            self.testextent = self.extent
            self.glkView.display()

        }
    }
    // clean up AVCapture
    func stopCamera(){
        session.stopRunning()
    }
}

您可以仅使用调用从相机获取带有矩形的 uiimage:

让图像 = self.newimagerect(特征:self.testMeasurable)


推荐阅读