As stated in this answer: https://stackoverflow.com/a/75902996/29516370, you should add
#if targetEnvironment(simulator)
if #available(iOS 17.0, *) {
let allDevices = MLComputeDevice.allComputeDevices
for device in allDevices {
if(device.description.contains("MLCPUComputeDevice")){
request.setComputeDevice(.some(device), for: .main)
break
}
}
} else {
request.usesCPUOnly = true
}
#endif
Tip: i found a solution simply by printing error.localizedDescription
: "Could not create inference context"
:
import UIKit
@preconcurrency import Vision
let visionQueue = DispatchQueue(label: "com.example.vision")
extension UIImage {
@MainActor func detectBarcodes(completion: @Sendable @escaping ([VNBarcodeObservation]) ->()) {
let request = VNDetectBarcodesRequest()
#if targetEnvironment(simulator)
if #available(iOS 17.0, *) {
let allDevices = MLComputeDevice.allComputeDevices
for device in allDevices {
if(device.description.contains("MLCPUComputeDevice")){
request.setComputeDevice(.some(device), for: .main)
break
}
}
} else {
request.usesCPUOnly = true
}
#endif
request.queueFor(image: self) { result in
completion(result as? [VNBarcodeObservation] ?? [])
}
}
}
extension VNDetectBarcodesRequest {
@MainActor func queueFor(image: UIImage, completion: @Sendable @escaping ([Any]?) -> ()) {
let handler = VNImageRequestHandler(cgImage: image.cgImage!, orientation: .up, options: [:])
DispatchQueue.main.async {
do {
let w = "ok here 1✅"
try handler.perform([self])
let b = "ok here 2✅"
} catch {
error.localizedDescription
}
}
}
}
let image = UIImage(named: "5.jpg")!
image.detectBarcodes { barcodes in
for barcode in barcodes {
let a = barcode.payloadStringValue
}
}