-5.5 C
New York
Saturday, February 22, 2025

ios – CreateML mannequin performing utterly in a different way on macOS vs iPadOS


I’ve skilled a face sentiment CoreML mannequin utilizing CreateML’s app. When utilizing the dwell preview within the CreateML’s app I get a mannequin that roughly works (i.e modifications in facial features leads to modifications in prediction).

MLModel

Nonetheless, after I use the mannequin in my app (iPad + Mac) it solely performs on the Mac model and never the iPad model. The Mac model behaves equally to the dwell preview within the CreateML’s app however the iPad model offers a optimistic sentiment 98% of the time. Under is the my code for implementing the mannequin which largely follows an instance app on Apple’s web site.

class CameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
    var bufferSize: CGSize = .zero
    
    non-public let session = AVCaptureSession()
    non-public var previewLayer: AVCaptureVideoPreviewLayer! = nil
    non-public let videoDataOutput = AVCaptureVideoDataOutput()
    
    non-public let videoDataOutputQueue = DispatchQueue(label: "VideoDataOutput", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
    
    let frameSize: CGSize = .zero
    
    non-public var requests = [VNRequest]()
    @Binding var sentiment: String
    
    init(sentiment: Binding) {
        self._sentiment = sentiment
        tremendous.init(nibName: nil, bundle: nil)
    }
    
    required init?(coder: NSCoder) {
        fatalError("init(coder:) has not been carried out")
    }
    
    override func viewDidLoad() {
        tremendous.viewDidLoad()
        setupAVCapture()
        setupVision()
    }
    
    override func didReceiveMemoryWarning() {
        tremendous.didReceiveMemoryWarning()
        // Eliminate any sources that may be recreated.
    }
    
    func setupVision() {
        do {
            let config = MLModelConfiguration()
            let faceModel = strive FaceEmotion(configuration: config)
            let visionModel = strive VNCoreMLModel(for: faceModel.mannequin)
            let objectRecognition = VNCoreMLRequest(mannequin: visionModel) { (request, error) in
                guard let outcomes = request.outcomes as? [VNClassificationObservation], let topResult = outcomes.first else {
                    return
                }
                DispatchQueue.principal.async {
                    self.sentiment = "(topResult.identifier) (topResult.confidence)"
                }
            }
            self.requests = [objectRecognition]
        } catch let error as NSError {
            print("Mannequin loading went mistaken: (error)")
        }
    }
    
    func setupAVCapture() {
        var deviceInput: AVCaptureDeviceInput!
        
        // Choose a video gadget, make an enter
        let videoDevice = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, place: .entrance).units.first
        do {
            deviceInput = strive AVCaptureDeviceInput(gadget: videoDevice!)
        } catch {
            print("Couldn't create video gadget enter: (error)")
            return
        }
        
        session.beginConfiguration()
//            session.sessionPreset = .cif352x288 // Mannequin picture dimension is smaller.
        
        // Add a video enter
        guard session.canAddInput(deviceInput) else {
            print("Couldn't add video gadget enter to the session")
            session.commitConfiguration()
            return
        }
        session.addInput(deviceInput)
        if session.canAddOutput(videoDataOutput) {
            session.addOutput(videoDataOutput)
            // Add a video knowledge output
            videoDataOutput.alwaysDiscardsLateVideoFrames = true
            videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)]
            videoDataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
        } else {
            print("Couldn't add video knowledge output to the session")
            session.commitConfiguration()
            return
        }
        let captureConnection = videoDataOutput.connection(with: .video)
        // At all times course of the frames
        captureConnection?.isEnabled = true
        do {
            strive  videoDevice!.lockForConfiguration()
            let dimensions = CMVideoFormatDescriptionGetDimensions((videoDevice?.activeFormat.formatDescription)!)
            bufferSize.width = CGFloat(dimensions.width)
            bufferSize.peak = CGFloat(dimensions.peak)
            videoDevice!.unlockForConfiguration()
        } catch {
            print(error)
        }
        session.commitConfiguration()
        previewLayer = AVCaptureVideoPreviewLayer(session: session)
//            previewLayer.ro
        previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
        previewLayer.connection?.videoRotationAngle = 0
        previewLayer.body = view.bounds
        view.layer.addSublayer(previewLayer)
        session.startRunning()
    }

    
    // Clear up seize setup
    func teardownAVCapture() {
        previewLayer.removeFromSuperlayer()
        previewLayer = nil
    }
    
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
            return
        }
        
        let exifOrientation = exifOrientationFromDeviceOrientation()
        
        let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: exifOrientation, choices: [:])
        do {
            strive imageRequestHandler.carry out(self.requests)
        } catch {
            print(error)
        }
    }
    
    public func exifOrientationFromDeviceOrientation() -> CGImagePropertyOrientation {
        let curDeviceOrientation = UIDevice.present.orientation
        let exifOrientation: CGImagePropertyOrientation
        
        swap curDeviceOrientation {
        case UIDeviceOrientation.portraitUpsideDown:  // System oriented vertically, dwelling button on the highest
            exifOrientation = .left
        case UIDeviceOrientation.landscapeLeft:       // System oriented horizontally, dwelling button on the suitable
            exifOrientation = .upMirrored
        case UIDeviceOrientation.landscapeRight:      // System oriented horizontally, dwelling button on the left
            exifOrientation = .down
        case UIDeviceOrientation.portrait:            // System oriented vertically, dwelling button on the underside
            exifOrientation = .up
        default:
            exifOrientation = .up
        }
        return exifOrientation
    }
}

Any assist could be a lot appreciated.

Related Articles

LEAVE A REPLY

Please enter your comment!
Please enter your name here

Latest Articles