From c0bc02fc48190ffa10ccf76030a98918bc3dda77 Mon Sep 17 00:00:00 2001 From: Mihail Yonchev <45242072+insertmike@users.noreply.github.com> Date: Fri, 12 Sep 2025 22:06:31 +0300 Subject: [PATCH] fix(models): use runtime device detection instead of hardcoded webgpu If device is not specified, transformers.js chooses based on environment (WebGPU when available, otherwise WASM). Avoids 'Unsupported device: webgpu' in non-WebGPU runtimes." --- next-vercel-ai-sdk/src/app/models.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/next-vercel-ai-sdk/src/app/models.ts b/next-vercel-ai-sdk/src/app/models.ts index b92149c4..986006db 100644 --- a/next-vercel-ai-sdk/src/app/models.ts +++ b/next-vercel-ai-sdk/src/app/models.ts @@ -10,7 +10,6 @@ export const MODELS: ModelConfig[] = [ { id: "HuggingFaceTB/SmolLM2-360M-Instruct", name: "SmolLM2 360M", - device: "webgpu", dtype: "q4", supportsWorker: true, }, @@ -23,26 +22,22 @@ export const MODELS: ModelConfig[] = [ { id: "onnx-community/Qwen3-0.6B-ONNX", name: "Qwen3 0.6B", - device: "webgpu", dtype: "q4f16", supportsWorker: true, }, { id: "onnx-community/Llama-3.2-1B-Instruct-q4f16", name: "Llama 3.2 1B", - device: "webgpu", supportsWorker: true, }, { id: "onnx-community/DeepSeek-R1-Distill-Qwen-1.5B-ONNX", name: "Deepseek R1 Distill 1.5B", - device: "webgpu", dtype: "q4f16", }, { id: "HuggingFaceTB/SmolVLM-256M-Instruct", name: "SmolVLM 256M (Vision)", - device: "webgpu", dtype: "fp32", isVisionModel: true, supportsWorker: true,