Change default token limit to 512
This commit is contained in:
		
							parent
							
								
									8e18a240ba
								
							
						
					
					
						commit
						0b8a997b0e
					
				| 
						 | 
					@ -58,7 +58,7 @@ const gptDefaults = {
 | 
				
			||||||
  n: 1,
 | 
					  n: 1,
 | 
				
			||||||
  stream: false,
 | 
					  stream: false,
 | 
				
			||||||
  stop: null,
 | 
					  stop: null,
 | 
				
			||||||
  max_tokens: 128,
 | 
					  max_tokens: 512,
 | 
				
			||||||
  presence_penalty: 0,
 | 
					  presence_penalty: 0,
 | 
				
			||||||
  frequency_penalty: 0,
 | 
					  frequency_penalty: 0,
 | 
				
			||||||
  logit_bias: null,
 | 
					  logit_bias: null,
 | 
				
			||||||
| 
						 | 
					@ -275,7 +275,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
				
			||||||
              'The token count of your prompt plus max_tokens cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).\n',
 | 
					              'The token count of your prompt plus max_tokens cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).\n',
 | 
				
			||||||
        min: 1,
 | 
					        min: 1,
 | 
				
			||||||
        max: 32768,
 | 
					        max: 32768,
 | 
				
			||||||
        step: 1024,
 | 
					        step: 128,
 | 
				
			||||||
        type: 'number',
 | 
					        type: 'number',
 | 
				
			||||||
        forceApi: true // Since default here is different than gpt default, will make sure we always send it
 | 
					        forceApi: true // Since default here is different than gpt default, will make sure we always send it
 | 
				
			||||||
      },
 | 
					      },
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue