I am making an attempt to make use of Compose Multiplatform and create an app that makes use of ML Package for Android and Imaginative and prescient for iOS to learn phrases shot by the digital camera. On Android all the things works wonderful. On iOS, nonetheless, there’s a big downside: The efficiency is horrible and the app doesn’t reply. In easy phrases, I open the rear digital camera and attempt to learn the phrases however for some cause I can not discover the appropriate thread wherein to do the heavy lifting as a result of the app fails (BAD REQUEST) or as a result of the UI would not replace even when On the Log facet I see the phrases recognised
import androidx.compose.basis.format.fillMaxSize
import androidx.compose.runtime.Composable
import androidx.compose.runtime.DisposableEffect
import androidx.compose.runtime.LaunchedEffect
import androidx.compose.runtime.getValue
import androidx.compose.runtime.mutableStateOf
import androidx.compose.runtime.keep in mind
import androidx.compose.runtime.setValue
import androidx.compose.ui.Modifier
import androidx.compose.ui.interop.UIKitView
import kotlinx.cinterop.ExperimentalForeignApi
import kotlinx.cinterop.useContents
import platform.AVFoundation.AVAuthorizationStatusAuthorized
import platform.AVFoundation.AVAuthorizationStatusDenied
import platform.AVFoundation.AVAuthorizationStatusNotDetermined
import platform.AVFoundation.AVAuthorizationStatusRestricted
import platform.AVFoundation.AVCaptureConnection
import platform.AVFoundation.AVCaptureDevice
import platform.AVFoundation.AVCaptureDeviceInput
import platform.AVFoundation.AVCaptureInput
import platform.AVFoundation.AVCaptureOutput
import platform.AVFoundation.AVCaptureSession
import platform.AVFoundation.AVCaptureSessionPresetPhoto
import platform.AVFoundation.AVCaptureVideoDataOutput
import platform.AVFoundation.AVCaptureVideoDataOutputSampleBufferDelegateProtocol
import platform.AVFoundation.AVCaptureVideoPreviewLayer
import platform.AVFoundation.AVLayerVideoGravityResizeAspectFill
import platform.AVFoundation.AVMediaTypeVideo
import platform.AVFoundation.authorizationStatusForMediaType
import platform.AVFoundation.requestAccessForMediaType
import platform.CoreGraphics.CGRectMake
import platform.CoreMedia.CMSampleBufferGetImageBuffer
import platform.CoreMedia.CMSampleBufferRef
import platform.CoreVideo.kCVPixelBufferPixelFormatTypeKey
import platform.CoreVideo.kCVPixelFormatType_32BGRA
import platform.UIKit.UIScreen
import platform.UIKit.UIView
import platform.Imaginative and prescient.VNImageRequestHandler
import platform.Imaginative and prescient.VNRecognizeTextRequest
import platform.Imaginative and prescient.VNRecognizedText
import platform.Imaginative and prescient.VNRecognizedTextObservation
import platform.darwin.NSObject
import platform.darwin.dispatch_async
import platform.darwin.dispatch_get_main_queue
@OptIn(ExperimentalForeignApi::class)
precise class CameraPermissionManager {
personal lateinit var captureSession: AVCaptureSession
personal lateinit var textRecognitionRequest: VNRecognizeTextRequest
personal lateinit var videoLayer: AVCaptureVideoPreviewLayer
@Composable
precise enjoyable RequestCameraPermission(
onPermissionGranted: @Composable () -> Unit,
onPermissionDenied: @Composable () -> Unit
) {
var hasPermission by keep in mind { mutableStateOf(false) }
var permissionRequested by keep in mind { mutableStateOf(false) }
LaunchedEffect(Unit) {
val standing = AVCaptureDevice.authorizationStatusForMediaType(AVMediaTypeVideo)
when (standing) {
AVAuthorizationStatusAuthorized -> {
hasPermission = true
}
AVAuthorizationStatusNotDetermined -> {
AVCaptureDevice.requestAccessForMediaType(AVMediaTypeVideo) { granted ->
dispatch_async(dispatch_get_main_queue()) {
hasPermission = granted
permissionRequested = true
}
}
}
AVAuthorizationStatusDenied, AVAuthorizationStatusRestricted -> {
hasPermission = false
}
else -> {}
}
}
if (hasPermission) {
onPermissionGranted()
} else if (permissionRequested) {
onPermissionDenied()
}
}
@Composable
precise enjoyable StartCameraPreview(onTextDetected: (String) -> Unit) {
val screenWidth = UIScreen.mainScreen.bounds.useContents { dimension.width }
val screenHeight = UIScreen.mainScreen.bounds.useContents { dimension.peak }
val previewView = keep in mind {
UIView(body = CGRectMake(0.0, 0.0, screenWidth, screenHeight))
}
LaunchedEffect(Unit) {
// Setup AVCaptureSession
captureSession = AVCaptureSession().apply {
sessionPreset = AVCaptureSessionPresetPhoto
}
val machine = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
val enter = machine?.let { AVCaptureDeviceInput.deviceInputWithDevice(it, null) }
captureSession.addInput(enter as AVCaptureInput)
videoLayer = AVCaptureVideoPreviewLayer(session = captureSession).apply {
this.videoGravity = AVLayerVideoGravityResizeAspectFill
this.body = previewView.bounds
}
previewView.layer.addSublayer(videoLayer)
// Setup Imaginative and prescient Textual content Recognition
textRecognitionRequest = VNRecognizeTextRequest { request, _ ->
val observations = request?.outcomes?.filterIsInstance()
observations?.forEach { statement ->
val topCandidate = statement.topCandidates(1u).firstOrNull()?.toString() ?: ""
onTextDetected(topCandidate)
}
}
val videoOutput = AVCaptureVideoDataOutput().apply {
videoSettings = mapOf(kCVPixelBufferPixelFormatTypeKey to kCVPixelFormatType_32BGRA)
setSampleBufferDelegate(
createSampleBufferDelegate(onTextDetected),
dispatch_get_main_queue()
)
}
captureSession.addOutput(videoOutput)
captureSession.startRunning()
}
DisposableEffect(Unit) {
onDispose {
captureSession.stopRunning()
}
}
UIKitView(
manufacturing facility = {
previewView
},
modifier = Modifier.fillMaxSize()
)
}
personal enjoyable createSampleBufferDelegate(onTextDetected: (String) -> Unit): AVCaptureVideoDataOutputSampleBufferDelegateProtocol {
return object : NSObject(), AVCaptureVideoDataOutputSampleBufferDelegateProtocol {
override enjoyable captureOutput(
output: AVCaptureOutput,
didOutputSampleBuffer: CMSampleBufferRef?,
fromConnection: AVCaptureConnection
) {
if (didOutputSampleBuffer != null) {
processSampleBuffer(didOutputSampleBuffer, onTextDetected)
}
}
}
}
personal enjoyable processSampleBuffer(
sampleBuffer: CMSampleBufferRef,
onTextDetected: (String) -> Unit
) {
val pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
if (pixelBuffer == null)
val handler = VNImageRequestHandler(pixelBuffer, choices = mapOf())
val request = VNRecognizeTextRequest { request, error ->
if (error != null) Error recognizing textual content: $error")
return@VNRecognizeTextRequest
val observations = request?.outcomes?.filterIsInstance()
observations?.forEach { statement ->
val topCandidates = statement.topCandidates(1u)
topCandidates.firstOrNull()?.let { candidate ->
val recognizedText = candidate as? VNRecognizedText
recognizedText?.string?.let { textual content ->
onTextDetected(textual content)
} ?: run Acknowledged textual content is null")
}
}
}
attempt {
handler.performRequests(listOf(request), null)
} catch (e: Exception)
}
}
What am I doing flawed? What ought to I alter? Imaginative and prescient needs to be one of the best for iOS and but as quickly as I attempt to change the thread for the phrase recognition half all the things freezes and nothing works. Do you may have any working solutions?