Merge pull request #59 from shivan2418/add_tooltips_for_settings
add tooltip to each of the chat settings, explaining what it does
This commit is contained in:
commit
662676adc9
|
@ -37,6 +37,7 @@
|
|||
key: 'model',
|
||||
name: 'Model',
|
||||
default: 'gpt-3.5-turbo',
|
||||
title: 'The model to use - GPT-3.5 is cheaper, but GPT-4 is more powerful.',
|
||||
options: supportedModels,
|
||||
type: 'select'
|
||||
}
|
||||
|
@ -47,6 +48,9 @@
|
|||
key: 'temperature',
|
||||
name: 'Sampling Temperature',
|
||||
default: 1,
|
||||
title: 'What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n' +
|
||||
'\n' +
|
||||
'We generally recommend altering this or top_p but not both.',
|
||||
min: 0,
|
||||
max: 2,
|
||||
step: 0.1,
|
||||
|
@ -56,6 +60,9 @@
|
|||
key: 'top_p',
|
||||
name: 'Nucleus Sampling',
|
||||
default: 1,
|
||||
title: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n' +
|
||||
'\n' +
|
||||
'We generally recommend altering this or temperature but not both',
|
||||
min: 0,
|
||||
max: 1,
|
||||
step: 0.1,
|
||||
|
@ -65,6 +72,7 @@
|
|||
key: 'n',
|
||||
name: 'Number of Messages',
|
||||
default: 1,
|
||||
title: 'How many chat completion choices to generate for each input message.',
|
||||
min: 1,
|
||||
max: 10,
|
||||
step: 1,
|
||||
|
@ -73,6 +81,9 @@
|
|||
{
|
||||
key: 'max_tokens',
|
||||
name: 'Max Tokens',
|
||||
title: 'The maximum number of tokens to generate in the completion.\n' +
|
||||
'\n' +
|
||||
'The token count of your prompt plus max_tokens cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).\n',
|
||||
default: 0,
|
||||
min: 0,
|
||||
max: 32768,
|
||||
|
@ -83,6 +94,7 @@
|
|||
key: 'presence_penalty',
|
||||
name: 'Presence Penalty',
|
||||
default: 0,
|
||||
title: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.',
|
||||
min: -2,
|
||||
max: 2,
|
||||
step: 0.2,
|
||||
|
@ -92,6 +104,7 @@
|
|||
key: 'frequency_penalty',
|
||||
name: 'Frequency Penalty',
|
||||
default: 0,
|
||||
title: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.',
|
||||
min: -2,
|
||||
max: 2,
|
||||
step: 0.2,
|
||||
|
@ -538,6 +551,7 @@
|
|||
class="input"
|
||||
inputmode="decimal"
|
||||
type={setting.type}
|
||||
title="{setting.title}"
|
||||
id="settings-{setting.key}"
|
||||
min={setting.min}
|
||||
max={setting.max}
|
||||
|
@ -546,7 +560,7 @@
|
|||
/>
|
||||
{:else if setting.type === 'select'}
|
||||
<div class="select">
|
||||
<select id="settings-{setting.key}">
|
||||
<select id="settings-{setting.key}" title="{setting.title}">
|
||||
{#each setting.options as option}
|
||||
<option value={option} selected={option === setting.default}>{option}</option>
|
||||
{/each}
|
||||
|
|
|
@ -60,6 +60,7 @@
|
|||
export type Settings = {
|
||||
key: string;
|
||||
name: string;
|
||||
title: string;
|
||||
} & (SettingsNumber | SettingsSelect);
|
||||
|
||||
type ResponseOK = {
|
||||
|
|
Loading…
Reference in New Issue