chatgpt-web/src/lib/Stats.svelte

48 lines
2.1 KiB
Svelte

<script context="module" lang="ts">
import { countTokens, getModelDetail, getRoleTag } from './Models.svelte'
import type { ChatSettings, Message, Model, Usage } from './Types.svelte'
export const getPrice = (tokens: Usage, model: Model): number => {
const t = getModelDetail(model)
return ((tokens.prompt_tokens * t.prompt) + (tokens.completion_tokens * t.completion))
}
export const countPromptTokens = (prompts:Message[], model:Model, settings: ChatSettings):number => {
const detail = getModelDetail(model)
const count = prompts.reduce((a, m) => {
a += countMessageTokens(m, model, settings)
return a
}, 0)
switch (detail.type) {
case 'Petals':
return count
case 'OpenAIChat':
default:
// Not sure how OpenAI formats it, but this seems to get close to the right counts.
// Would be nice to know. This works for gpt-3.5. gpt-4 could be different.
// Complete stab in the dark here -- update if you know where all the extra tokens really come from.
return count + 3 // Always seems to be message counts + 3
}
}
export const countMessageTokens = (message:Message, model:Model, settings: ChatSettings):number => {
const detail = getModelDetail(model)
const start = detail.start && detail.start[0]
const stop = detail.stop && detail.stop[0]
switch (detail.type) {
case 'Petals':
return countTokens(model, (start || '') + getRoleTag(message.role, model, settings) + ': ' + message.content + (stop || '###'))
case 'OpenAIChat':
default:
// Not sure how OpenAI formats it, but this seems to get close to the right counts.
// Would be nice to know. This works for gpt-3.5. gpt-4 could be different.
// Complete stab in the dark here -- update if you know where all the extra tokens really come from.
return countTokens(model, '## ' + message.role + ' ##:\r\n\r\n' + message.content + '\r\n\r\n\r\n')
}
}
export const getModelMaxTokens = (model:Model):number => {
return getModelDetail(model).max
}
</script>