From fbde7d28c238a3ae4b568953db23763aaf2fb03b Mon Sep 17 00:00:00 2001 From: Morgan Date: Thu, 17 Apr 2025 07:09:34 +0900 Subject: [PATCH] Added new models --- src/lib/providers/openai/models.json | 16 +++++ src/lib/providers/openai/models.svelte | 98 ++++---------------------- 2 files changed, 28 insertions(+), 86 deletions(-) create mode 100644 src/lib/providers/openai/models.json diff --git a/src/lib/providers/openai/models.json b/src/lib/providers/openai/models.json new file mode 100644 index 0000000..6c87045 --- /dev/null +++ b/src/lib/providers/openai/models.json @@ -0,0 +1,16 @@ +{ + "gpt-4.1": { "prompt": 2.0, "completion": 8.0, "max": 131072 }, + "gpt-4.1-mini": { "prompt": 0.4, "completion": 1.6, "max": 131072 }, + "gpt-4o-mini": { "prompt": 0.15, "completion": 0.6, "max": 131072 }, + "gpt-4o": { "prompt": 2.5, "completion": 10.0, "max": 131072 }, + "o1-mini": { "prompt": 1.1, "completion": 4.4, "max": 131072, "reasoning": true }, + "o1": { "prompt": 15.0, "completion": 60.0, "max": 200000, "reasoning": true }, + "o3-mini": { "prompt": 1.1, "completion": 4.4, "max": 200000, "reasoning": true }, + "o3": { "prompt": 10.0, "completion": 40.0, "max": 200000, "reasoning": true }, + "o4-mini": { "prompt": 1.1, "completion": 4.4, "max": 200000, "reasoning": true }, + "claude-3-7-sonnet-20250219": { "prompt": 3.0, "completion": 15.0, "max": 200000 }, + "claude-3-5-sonnet-20241022": { "prompt": 3.75, "completion": 15.0, "max": 200000 }, + "claude-3-5-haiku-20241022": { "prompt": 1.0, "completion": 4.0, "max": 200000 }, + "deepseek-r1-distill-qwen-32b": { "prompt": 0.69, "completion": 0.69, "max": 16384 }, + "deepseek-r1-distill-llama-70b": { "prompt": 3.0, "completion": 3.0, "max": 4096 } +} \ No newline at end of file diff --git a/src/lib/providers/openai/models.svelte b/src/lib/providers/openai/models.svelte index a20f6ea..57e9999 100644 --- a/src/lib/providers/openai/models.svelte +++ b/src/lib/providers/openai/models.svelte @@ -9,6 +9,7 @@ import { checkModel } from "./util.svelte"; import { encode } from "gpt-tokenizer"; import { get } from "svelte/store"; + import chatModelsJson from './models.json'; const hiddenSettings = { startSequence: true, @@ -45,7 +46,7 @@ }, countPromptTokens: (prompts: Message[], model: Model, chat: Chat): number => { return ( - prompts.reduce((a, m) => { + prompts.reduce((a, m) => { a += countMessageTokens(m, model, chat); return a; }, 0) + 3 @@ -53,92 +54,17 @@ }, } as ModelDetail; - export const chatModels: Record = { + export const chatModels: Record = {}; - // OpenAI Models - - "gpt-4o-mini": { - ...chatModelBase, - prompt: 0.15 / 1_000_000, - completion: 0.6 / 1_000_000, - max: 131072, - }, - "gpt-4o": { - ...chatModelBase, - prompt: 2.5 / 1_000_000, - completion: 10 / 1_000_000, - max: 131072, - }, - "gpt-4.1": { - ...chatModelBase, - prompt: 2.0 / 1_000_000, - completion: 8.0 / 1_000_000, - max: 131072, - }, - "gpt-4.1-mini": { - ...chatModelBase, - prompt: 0.4 / 1_000_000, - completion: 1.6 / 1_000_000, - max: 131072, - }, - "o1-mini": { - ...chatModelBase, - reasoning: true, - prompt: 1.1 / 1_000_000, - completion: 4.4 / 1_000_000, - max: 131072, - }, - "o1": { - ...chatModelBase, - reasoning: true, - prompt: 15 / 1_000_000, - completion: 60 / 1_000_000, - max: 200000, - }, - "o3-mini": { - ...chatModelBase, - reasoning: true, - prompt: 1.1 / 1_000_000, - completion: 4.4 / 1_000_000, - max: 200000, - }, - - // Anthropic Models - - "claude-3-7-sonnet-20250219": { - ...chatModelBase, - prompt: 3 / 1_000_000, - completion: 15 / 1_000_000, - max: 200000, - }, - "claude-3-5-sonnet-20241022": { - ...chatModelBase, - prompt: 3.75 / 1_000_000, - completion: 15.0 / 1_000_000, - max: 200000, - }, - "claude-3-5-haiku-20241022": { - ...chatModelBase, - prompt: 1 / 1_000_000, - completion: 4 / 1_000_000, - max: 200000, - }, - - // Groq Models - - "deepseek-r1-distill-qwen-32b": { - ...chatModelBase, - prompt: 0.69 / 1_000_000, - completion: 0.69 / 1_000_000, - max: 16384, - }, - "deepseek-r1-distill-llama-70b": { - ...chatModelBase, - prompt: 3 / 1_000_000, - completion: 3 / 1_000_000, - max: 4096, - }, - }; + for (const [key, { prompt, completion, max, reasoning }] of Object.entries(chatModelsJson)) { + chatModels[key] = { + ...chatModelBase, + prompt: prompt / 1_000_000, + completion: completion / 1_000_000, + max, + ...(reasoning ? { reasoning } : {}), + }; + } const imageModelBase = { type: "image",