Change max token on model change
This commit is contained in:
		
							parent
							
								
									0b8a997b0e
								
							
						
					
					
						commit
						f5ef5ff5db
					
				| 
						 | 
				
			
			@ -36,7 +36,7 @@
 | 
			
		|||
  } from '@fortawesome/free-solid-svg-icons/index'
 | 
			
		||||
  // import { encode } from 'gpt-tokenizer'
 | 
			
		||||
  import { v4 as uuidv4 } from 'uuid'
 | 
			
		||||
  import { countPromptTokens, getMaxModelPrompt, getPrice } from './Stats.svelte'
 | 
			
		||||
  import { countPromptTokens, getModelMaxTokens, getPrice } from './Stats.svelte'
 | 
			
		||||
  import { autoGrowInputOnEvent, sizeTextElements } from './Util.svelte'
 | 
			
		||||
  import ChatSettingsModal from './ChatSettingsModal.svelte'
 | 
			
		||||
  import Footer from './Footer.svelte'
 | 
			
		||||
| 
						 | 
				
			
			@ -148,7 +148,7 @@
 | 
			
		|||
    updating = true
 | 
			
		||||
 | 
			
		||||
    const model = chat.settings.model || defaultModel
 | 
			
		||||
    const maxTokens = getMaxModelPrompt(model) // max tokens for model
 | 
			
		||||
    const maxTokens = getModelMaxTokens(model) // max tokens for model
 | 
			
		||||
 | 
			
		||||
    let response: Response
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -27,6 +27,7 @@
 | 
			
		|||
  import { exportProfileAsJSON } from './Export.svelte'
 | 
			
		||||
  import { afterUpdate } from 'svelte'
 | 
			
		||||
  import ChatSettingField from './ChatSettingField.svelte'
 | 
			
		||||
    import { getModelMaxTokens } from './Stats.svelte';
 | 
			
		||||
 | 
			
		||||
  export let chatId:number
 | 
			
		||||
  export const show = () => { showSettings() }
 | 
			
		||||
| 
						 | 
				
			
			@ -123,6 +124,7 @@
 | 
			
		|||
    const profileSelect = getChatSettingObjectByKey('profile') as ChatSetting & SettingSelect
 | 
			
		||||
    profileSelect.options = getProfileSelect()
 | 
			
		||||
    chatDefaults.profile = getDefaultProfileKey()
 | 
			
		||||
    chatDefaults.max_tokens = getModelMaxTokens(chatSettings.model||'')
 | 
			
		||||
    // const defaultProfile = globalStore.defaultProfile || profileSelect.options[0].value
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -277,7 +277,8 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
        max: 32768,
 | 
			
		||||
        step: 128,
 | 
			
		||||
        type: 'number',
 | 
			
		||||
        forceApi: true // Since default here is different than gpt default, will make sure we always send it
 | 
			
		||||
        forceApi: true, // Since default here is different than gpt default, will make sure we always send it
 | 
			
		||||
        afterChange: (chatId, setting) => true, // refresh settings
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'presence_penalty',
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -38,7 +38,7 @@
 | 
			
		|||
    }, 0) + 3
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  export const getMaxModelPrompt = (model:Model):number => {
 | 
			
		||||
  export const getModelMaxTokens = (model:Model):number => {
 | 
			
		||||
    return getModelDetail(model)[2]
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue