Change max token on model change

This commit is contained in:
Webifi 2023-06-02 06:05:50 -05:00
parent 0b8a997b0e
commit f5ef5ff5db
4 changed files with 8 additions and 5 deletions

View File

@ -36,7 +36,7 @@
} from '@fortawesome/free-solid-svg-icons/index' } from '@fortawesome/free-solid-svg-icons/index'
// import { encode } from 'gpt-tokenizer' // import { encode } from 'gpt-tokenizer'
import { v4 as uuidv4 } from 'uuid' import { v4 as uuidv4 } from 'uuid'
import { countPromptTokens, getMaxModelPrompt, getPrice } from './Stats.svelte' import { countPromptTokens, getModelMaxTokens, getPrice } from './Stats.svelte'
import { autoGrowInputOnEvent, sizeTextElements } from './Util.svelte' import { autoGrowInputOnEvent, sizeTextElements } from './Util.svelte'
import ChatSettingsModal from './ChatSettingsModal.svelte' import ChatSettingsModal from './ChatSettingsModal.svelte'
import Footer from './Footer.svelte' import Footer from './Footer.svelte'
@ -148,7 +148,7 @@
updating = true updating = true
const model = chat.settings.model || defaultModel const model = chat.settings.model || defaultModel
const maxTokens = getMaxModelPrompt(model) // max tokens for model const maxTokens = getModelMaxTokens(model) // max tokens for model
let response: Response let response: Response

View File

@ -27,6 +27,7 @@
import { exportProfileAsJSON } from './Export.svelte' import { exportProfileAsJSON } from './Export.svelte'
import { afterUpdate } from 'svelte' import { afterUpdate } from 'svelte'
import ChatSettingField from './ChatSettingField.svelte' import ChatSettingField from './ChatSettingField.svelte'
import { getModelMaxTokens } from './Stats.svelte';
export let chatId:number export let chatId:number
export const show = () => { showSettings() } export const show = () => { showSettings() }
@ -123,6 +124,7 @@
const profileSelect = getChatSettingObjectByKey('profile') as ChatSetting & SettingSelect const profileSelect = getChatSettingObjectByKey('profile') as ChatSetting & SettingSelect
profileSelect.options = getProfileSelect() profileSelect.options = getProfileSelect()
chatDefaults.profile = getDefaultProfileKey() chatDefaults.profile = getDefaultProfileKey()
chatDefaults.max_tokens = getModelMaxTokens(chatSettings.model||'')
// const defaultProfile = globalStore.defaultProfile || profileSelect.options[0].value // const defaultProfile = globalStore.defaultProfile || profileSelect.options[0].value
} }
@ -192,7 +194,7 @@
const setDirty = () => { const setDirty = () => {
chatSettings.isDirty = true chatSettings.isDirty = true
} }
</script> </script>

View File

@ -277,7 +277,8 @@ const chatSettingsList: ChatSetting[] = [
max: 32768, max: 32768,
step: 128, step: 128,
type: 'number', type: 'number',
forceApi: true // Since default here is different than gpt default, will make sure we always send it forceApi: true, // Since default here is different than gpt default, will make sure we always send it
afterChange: (chatId, setting) => true, // refresh settings
}, },
{ {
key: 'presence_penalty', key: 'presence_penalty',

View File

@ -38,7 +38,7 @@
}, 0) + 3 }, 0) + 3
} }
export const getMaxModelPrompt = (model:Model):number => { export const getModelMaxTokens = (model:Model):number => {
return getModelDetail(model)[2] return getModelDetail(model)[2]
} }