Add correct pricing for GTP-4 model, store model in message
This commit is contained in:
parent
98fa296a97
commit
b6d2e4d1af
|
@ -7,10 +7,12 @@
|
||||||
type Response,
|
type Response,
|
||||||
type Message,
|
type Message,
|
||||||
type Settings,
|
type Settings,
|
||||||
supportedModels,
|
type Model,
|
||||||
type ResponseModels,
|
type ResponseModels,
|
||||||
type SettingsSelect,
|
type SettingsSelect,
|
||||||
type Chat
|
type Chat,
|
||||||
|
type Usage,
|
||||||
|
supportedModels
|
||||||
} from './Types.svelte'
|
} from './Types.svelte'
|
||||||
import Code from './Code.svelte'
|
import Code from './Code.svelte'
|
||||||
|
|
||||||
|
@ -23,22 +25,24 @@
|
||||||
|
|
||||||
export let params = { chatId: '' }
|
export let params = { chatId: '' }
|
||||||
const chatId: number = parseInt(params.chatId)
|
const chatId: number = parseInt(params.chatId)
|
||||||
let updating: boolean = false
|
|
||||||
|
|
||||||
|
let updating: boolean = false
|
||||||
let input: HTMLTextAreaElement
|
let input: HTMLTextAreaElement
|
||||||
let settings: HTMLDivElement
|
let settings: HTMLDivElement
|
||||||
let chatNameSettings: HTMLFormElement
|
let chatNameSettings: HTMLFormElement
|
||||||
let recognition: any = null
|
let recognition: any = null
|
||||||
let recording = false
|
let recording = false
|
||||||
|
|
||||||
const settingsMap: Settings[] = [
|
const modelSetting: Settings & SettingsSelect = {
|
||||||
{
|
|
||||||
key: 'model',
|
key: 'model',
|
||||||
name: 'Model',
|
name: 'Model',
|
||||||
default: 'gpt-3.5-turbo',
|
default: 'gpt-3.5-turbo',
|
||||||
options: supportedModels,
|
options: supportedModels,
|
||||||
type: 'select'
|
type: 'select'
|
||||||
},
|
}
|
||||||
|
|
||||||
|
let settingsMap: Settings[] = [
|
||||||
|
modelSetting,
|
||||||
{
|
{
|
||||||
key: 'temperature',
|
key: 'temperature',
|
||||||
name: 'Sampling Temperature',
|
name: 'Sampling Temperature',
|
||||||
|
@ -95,11 +99,23 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
// Reference: https://openai.com/pricing#language-models
|
||||||
|
const tokenPrice : Record<string, [number, number]> = {
|
||||||
|
'gpt-4-32k': [0.00006, 0.00012], // $0.06 per 1000 tokens prompt, $0.12 per 1000 tokens completion
|
||||||
|
'gpt-4': [0.00003, 0.00006], // $0.03 per 1000 tokens prompt, $0.06 per 1000 tokens completion
|
||||||
|
'gpt-3.5': [0.000002, 0.000002] // $0.002 per 1000 tokens (both prompt and completion)
|
||||||
|
}
|
||||||
|
|
||||||
$: chat = $chatsStorage.find((chat) => chat.id === chatId) as Chat
|
$: chat = $chatsStorage.find((chat) => chat.id === chatId) as Chat
|
||||||
const tokenPrice = 0.000002 // $0.002 per 1000 tokens
|
|
||||||
|
onMount(async () => {
|
||||||
|
// Pre-select the last used model
|
||||||
|
if (chat.messages.length > 0) {
|
||||||
|
modelSetting.default = chat.messages[chat.messages.length - 1].model || modelSetting.default
|
||||||
|
settingsMap = settingsMap
|
||||||
|
}
|
||||||
|
|
||||||
// Focus the input on mount
|
// Focus the input on mount
|
||||||
onMount(async () => {
|
|
||||||
input.focus()
|
input.focus()
|
||||||
|
|
||||||
// Try to detect speech recognition support
|
// Try to detect speech recognition support
|
||||||
|
@ -208,6 +224,16 @@
|
||||||
return response
|
return response
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const getPrice = (tokens: Usage, model: Model) : number => {
|
||||||
|
for (const [key, [promptPrice, completionPrice]] of Object.entries(tokenPrice)) {
|
||||||
|
if (model.startsWith(key)) {
|
||||||
|
return ((tokens.prompt_tokens * promptPrice) + (tokens.completion_tokens * completionPrice))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
const submitForm = async (recorded: boolean = false): Promise<void> => {
|
const submitForm = async (recorded: boolean = false): Promise<void> => {
|
||||||
// Compose the input message
|
// Compose the input message
|
||||||
const inputMessage: Message = { role: 'user', content: input.value }
|
const inputMessage: Message = { role: 'user', content: input.value }
|
||||||
|
@ -229,7 +255,10 @@
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
response.choices.forEach((choice) => {
|
response.choices.forEach((choice) => {
|
||||||
|
// Store usage and model in the message
|
||||||
choice.message.usage = response.usage
|
choice.message.usage = response.usage
|
||||||
|
choice.message.model = response.model
|
||||||
|
|
||||||
// Remove whitespace around the message that the OpenAI API sometimes returns
|
// Remove whitespace around the message that the OpenAI API sometimes returns
|
||||||
choice.message.content = choice.message.content.trim()
|
choice.message.content = choice.message.content.trim()
|
||||||
addMessage(chatId, choice.message)
|
addMessage(chatId, choice.message)
|
||||||
|
@ -298,7 +327,7 @@
|
||||||
|
|
||||||
// Load available models from OpenAI
|
// Load available models from OpenAI
|
||||||
const allModels = (await (
|
const allModels = (await (
|
||||||
await fetch(import.meta.env.VITE_API_BASE + '/v1/models', {
|
await fetch(apiBase + '/v1/models', {
|
||||||
method: 'GET',
|
method: 'GET',
|
||||||
headers: {
|
headers: {
|
||||||
Authorization: `Bearer ${$apiKeyStorage}`,
|
Authorization: `Bearer ${$apiKeyStorage}`,
|
||||||
|
@ -306,10 +335,11 @@
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
).json()) as ResponseModels
|
).json()) as ResponseModels
|
||||||
const filteredModels = supportedModels.filter((model) => allModels.data.find((m) => m.id === model));
|
const filteredModels = supportedModels.filter((model) => allModels.data.find((m) => m.id === model))
|
||||||
|
|
||||||
// Update the models in the settings
|
// Update the models in the settings
|
||||||
(settingsMap[0] as SettingsSelect).options = filteredModels
|
modelSetting.options = filteredModels
|
||||||
|
settingsMap = settingsMap
|
||||||
}
|
}
|
||||||
|
|
||||||
const closeSettings = () => {
|
const closeSettings = () => {
|
||||||
|
@ -431,9 +461,9 @@
|
||||||
/>
|
/>
|
||||||
{#if message.usage}
|
{#if message.usage}
|
||||||
<p class="is-size-7">
|
<p class="is-size-7">
|
||||||
This message was generated using <span class="has-text-weight-bold">{message.usage.total_tokens}</span>
|
This message was generated on <em>{message.model || modelSetting.default}</em> using <span class="has-text-weight-bold">{message.usage.total_tokens}</span>
|
||||||
tokens ~=
|
tokens ~=
|
||||||
<span class="has-text-weight-bold">${(message.usage.total_tokens * tokenPrice).toFixed(6)}</span>
|
<span class="has-text-weight-bold">${getPrice(message.usage, message.model || modelSetting.default).toFixed(6)}</span>
|
||||||
</p>
|
</p>
|
||||||
{/if}
|
{/if}
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -1,4 +1,14 @@
|
||||||
<script context="module" lang="ts">
|
<script context="module" lang="ts">
|
||||||
|
export const supportedModels = [ // See: https://platform.openai.com/docs/models/model-endpoint-compatibility
|
||||||
|
'gpt-4',
|
||||||
|
'gpt-4-0314',
|
||||||
|
'gpt-4-32k',
|
||||||
|
'gpt-4-32k-0314',
|
||||||
|
'gpt-3.5-turbo',
|
||||||
|
'gpt-3.5-turbo-0301'
|
||||||
|
]
|
||||||
|
export type Model = typeof supportedModels[number];
|
||||||
|
|
||||||
export type Usage = {
|
export type Usage = {
|
||||||
completion_tokens: number;
|
completion_tokens: number;
|
||||||
prompt_tokens: number;
|
prompt_tokens: number;
|
||||||
|
@ -9,6 +19,7 @@
|
||||||
role: 'user' | 'assistant' | 'system' | 'error';
|
role: 'user' | 'assistant' | 'system' | 'error';
|
||||||
content: string;
|
content: string;
|
||||||
usage?: Usage;
|
usage?: Usage;
|
||||||
|
model?: Model;
|
||||||
};
|
};
|
||||||
|
|
||||||
export type Chat = {
|
export type Chat = {
|
||||||
|
@ -17,17 +28,6 @@
|
||||||
messages: Message[];
|
messages: Message[];
|
||||||
};
|
};
|
||||||
|
|
||||||
// See: https://platform.openai.com/docs/models/model-endpoint-compatibility
|
|
||||||
export const supportedModels = [
|
|
||||||
'gpt-4',
|
|
||||||
'gpt-4-0314',
|
|
||||||
'gpt-4-32k',
|
|
||||||
'gpt-4-32k-0314',
|
|
||||||
'gpt-3.5-turbo',
|
|
||||||
'gpt-3.5-turbo-0301'
|
|
||||||
]
|
|
||||||
type Model = typeof supportedModels[number];
|
|
||||||
|
|
||||||
export type Request = {
|
export type Request = {
|
||||||
model?: Model;
|
model?: Model;
|
||||||
messages: Message[];
|
messages: Message[];
|
||||||
|
@ -72,6 +72,7 @@
|
||||||
finish_reason: string;
|
finish_reason: string;
|
||||||
}[];
|
}[];
|
||||||
usage: Usage;
|
usage: Usage;
|
||||||
|
model: Model;
|
||||||
};
|
};
|
||||||
|
|
||||||
type ResponseError = {
|
type ResponseError = {
|
||||||
|
|
Loading…
Reference in New Issue