Skip to content

Instantly share code, notes, and snippets.

@davidseek
Last active January 19, 2021 01:52
Show Gist options
  • Save davidseek/8b638a825462900fc5f1d118cd4c579c to your computer and use it in GitHub Desktop.
Save davidseek/8b638a825462900fc5f1d118cd4c579c to your computer and use it in GitHub Desktop.
ImageRecognizer.swift
import Foundation
import Vision
struct ImageRecognizer {
static func get(from image: CGImage, onComplete: @escaping (String) -> Void) {
// Kick off a new ML Configuration.
// Here I'm not sure if it's a better idea
// to create one configurator for the whole class,
// or to create a new one every time we want to recognize stuff.
// Didn't spend enough time with it to care, honestly.
let configurator = MLModelConfiguration()
// Kicking off a new classifier for out Suites model
let classifier = try! Suites(configuration: configurator).model
// Getting a model
// MARK: - TODO, do not force unwrap
let model = try! VNCoreMLModel(for: classifier)
// And using it
handle(VNCoreMLRequest(model: model) { (finished, error) in
if let error = error {
// MARK: - TODO, Handle appropriately
return
}
guard let results = finished.results as? [VNClassificationObservation],
let first = results.first else {
// MARK: - TODO, Handle appropriately
return
}
onComplete(first.identifier)
}, for: image)
}
// MARK: - Private
// We have extracted the handler into its own class to clean up the code.
// Without it we've had a whole bunch of nesting.
private static func handle(_ request: VNCoreMLRequest, for image: CGImage) {
do {
try VNImageRequestHandler(cgImage: image, options: [:]).perform([request])
} catch {
fatalError(error.localizedDescription)
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment