add tooltip to each of the chat settings. Show the info about the settings from the official documentation.
This commit is contained in:
		
							parent
							
								
									7924dd8bca
								
							
						
					
					
						commit
						e84ebf8a2d
					
				| 
						 | 
					@ -47,6 +47,9 @@
 | 
				
			||||||
      key: 'temperature',
 | 
					      key: 'temperature',
 | 
				
			||||||
      name: 'Sampling Temperature',
 | 
					      name: 'Sampling Temperature',
 | 
				
			||||||
      default: 1,
 | 
					      default: 1,
 | 
				
			||||||
 | 
					      title: 'What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n' +
 | 
				
			||||||
 | 
					              '\n' +
 | 
				
			||||||
 | 
					              'We generally recommend altering this or top_p but not both.',
 | 
				
			||||||
      min: 0,
 | 
					      min: 0,
 | 
				
			||||||
      max: 2,
 | 
					      max: 2,
 | 
				
			||||||
      step: 0.1,
 | 
					      step: 0.1,
 | 
				
			||||||
| 
						 | 
					@ -56,6 +59,9 @@
 | 
				
			||||||
      key: 'top_p',
 | 
					      key: 'top_p',
 | 
				
			||||||
      name: 'Nucleus Sampling',
 | 
					      name: 'Nucleus Sampling',
 | 
				
			||||||
      default: 1,
 | 
					      default: 1,
 | 
				
			||||||
 | 
					      title: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n' +
 | 
				
			||||||
 | 
					              '\n' +
 | 
				
			||||||
 | 
					              'We generally recommend altering this or temperature but not both',
 | 
				
			||||||
      min: 0,
 | 
					      min: 0,
 | 
				
			||||||
      max: 1,
 | 
					      max: 1,
 | 
				
			||||||
      step: 0.1,
 | 
					      step: 0.1,
 | 
				
			||||||
| 
						 | 
					@ -65,6 +71,7 @@
 | 
				
			||||||
      key: 'n',
 | 
					      key: 'n',
 | 
				
			||||||
      name: 'Number of Messages',
 | 
					      name: 'Number of Messages',
 | 
				
			||||||
      default: 1,
 | 
					      default: 1,
 | 
				
			||||||
 | 
					      title: 'How many chat completion choices to generate for each input message.',
 | 
				
			||||||
      min: 1,
 | 
					      min: 1,
 | 
				
			||||||
      max: 10,
 | 
					      max: 10,
 | 
				
			||||||
      step: 1,
 | 
					      step: 1,
 | 
				
			||||||
| 
						 | 
					@ -73,6 +80,9 @@
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
      key: 'max_tokens',
 | 
					      key: 'max_tokens',
 | 
				
			||||||
      name: 'Max Tokens',
 | 
					      name: 'Max Tokens',
 | 
				
			||||||
 | 
					      title: 'The maximum number of tokens to generate in the completion.\n' +
 | 
				
			||||||
 | 
					              '\n' +
 | 
				
			||||||
 | 
					              'The token count of your prompt plus max_tokens cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).\n',
 | 
				
			||||||
      default: 0,
 | 
					      default: 0,
 | 
				
			||||||
      min: 0,
 | 
					      min: 0,
 | 
				
			||||||
      max: 32768,
 | 
					      max: 32768,
 | 
				
			||||||
| 
						 | 
					@ -83,6 +93,7 @@
 | 
				
			||||||
      key: 'presence_penalty',
 | 
					      key: 'presence_penalty',
 | 
				
			||||||
      name: 'Presence Penalty',
 | 
					      name: 'Presence Penalty',
 | 
				
			||||||
      default: 0,
 | 
					      default: 0,
 | 
				
			||||||
 | 
					      title: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.',
 | 
				
			||||||
      min: -2,
 | 
					      min: -2,
 | 
				
			||||||
      max: 2,
 | 
					      max: 2,
 | 
				
			||||||
      step: 0.2,
 | 
					      step: 0.2,
 | 
				
			||||||
| 
						 | 
					@ -92,6 +103,7 @@
 | 
				
			||||||
      key: 'frequency_penalty',
 | 
					      key: 'frequency_penalty',
 | 
				
			||||||
      name: 'Frequency Penalty',
 | 
					      name: 'Frequency Penalty',
 | 
				
			||||||
      default: 0,
 | 
					      default: 0,
 | 
				
			||||||
 | 
					      title: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.',
 | 
				
			||||||
      min: -2,
 | 
					      min: -2,
 | 
				
			||||||
      max: 2,
 | 
					      max: 2,
 | 
				
			||||||
      step: 0.2,
 | 
					      step: 0.2,
 | 
				
			||||||
| 
						 | 
					@ -538,6 +550,7 @@
 | 
				
			||||||
                  class="input"
 | 
					                  class="input"
 | 
				
			||||||
                  inputmode="decimal"
 | 
					                  inputmode="decimal"
 | 
				
			||||||
                  type={setting.type}
 | 
					                  type={setting.type}
 | 
				
			||||||
 | 
					                  title="{setting.title}"
 | 
				
			||||||
                  id="settings-{setting.key}"
 | 
					                  id="settings-{setting.key}"
 | 
				
			||||||
                  min={setting.min}
 | 
					                  min={setting.min}
 | 
				
			||||||
                  max={setting.max}
 | 
					                  max={setting.max}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue