I was able to get the results with this code and with preview working but:
in preview it says "unable to retrieve results from the vision request".
import torch
import coremltools as ct
from fastai.vision.all import *
import json
from PIL import Image
from torchvision import transforms
# Load your Fastai model (replace with your actual path)
learn = load_learner('lemonmodel.pkl')
# Set the model to eval mode before tracing
learn.model.eval()
# Example input image (you can use any image from your dataset)
input_image = PILImage.create('example.jpg')
# Preprocess the image (assuming you used these transforms during training)
resize = transforms.Resize((192, 192)) # Resize image to match model input size (e.g., 192x192)
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# Apply preprocessing: Resize, Convert to tensor, Normalize
input_image = resize(input_image) # Resize to expected size
input_tensor = to_tensor(input_image) # Convert to tensor
input_tensor = normalize(input_tensor) # Normalize with mean and std
# Add a batch dimension
input_tensor = input_tensor.unsqueeze(0)
# Ensure float32 type
input_tensor = input_tensor.float()
# Trace the model (using the batch-size of 1)
trace = torch.jit.trace(learn.model, input_tensor)
# Define the Core ML input type (image type with correct shape for Core ML)
_input = ct.ImageType(
name="input_1",
shape=(1, 3, 192, 192), # Correct shape for Core ML [batch_size, channels, height, width]
bias=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225], # Mean normalization
scale=1.0 / 255 # Scale normalization
)
# Define the Core ML output type (we do NOT specify the shape, let Core ML infer it)
_output = ct.TensorType(
name="output_1", # Name for the output
)
# Convert the model to Core ML format
mlmodel = ct.convert(
trace,
inputs=[_input],
outputs=[_output], # Let Core ML infer the output shape
minimum_deployment_target=ct.target.iOS14 # iOS deployment target
)
# Set model type as 'imageClassifier' for the Preview tab
mlmodel.type = 'imageClassifier'
# Define labels for classification
labels_json = {
"labels": ["lemon", "lime"], # Replace with your actual class labels
}
# Setting up the metadata with correct 'preview' params
mlmodel.user_defined_metadata['com.apple.coreml.model.preview.params'] = json.dumps(labels_json)
# Set model metadata for Xcode integration
mlmodel.user_defined_metadata["com.apple.coreml.model.preview.type"] = "imageClassifier"
mlmodel.input_description["input_1"] = "Input image to be classified"
mlmodel.output_description["output_1"] = "Classification probabilities for each label"
# Set additional metadata for the Xcode UI (optional)
mlmodel.author = "Your Name or Organization"
mlmodel.short_description = "A classifier for detecting lemon and lime in images."
mlmodel.version = "1.0"
# Save the model as .mlmodel
mlmodel.save("LemonClassifier333.mlmodel")