Merge pull request #59 from shivan2418/add_tooltips_for_settings

add tooltip to each of the chat settings, explaining what it does
This commit is contained in:
Niek van der Maas 2023-03-21 19:51:34 +01:00 committed by GitHub
commit 662676adc9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 16 additions and 1 deletions

View File

@ -37,6 +37,7 @@
key: 'model', key: 'model',
name: 'Model', name: 'Model',
default: 'gpt-3.5-turbo', default: 'gpt-3.5-turbo',
title: 'The model to use - GPT-3.5 is cheaper, but GPT-4 is more powerful.',
options: supportedModels, options: supportedModels,
type: 'select' type: 'select'
} }
@ -47,6 +48,9 @@
key: 'temperature', key: 'temperature',
name: 'Sampling Temperature', name: 'Sampling Temperature',
default: 1, default: 1,
title: 'What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n' +
'\n' +
'We generally recommend altering this or top_p but not both.',
min: 0, min: 0,
max: 2, max: 2,
step: 0.1, step: 0.1,
@ -56,6 +60,9 @@
key: 'top_p', key: 'top_p',
name: 'Nucleus Sampling', name: 'Nucleus Sampling',
default: 1, default: 1,
title: 'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n' +
'\n' +
'We generally recommend altering this or temperature but not both',
min: 0, min: 0,
max: 1, max: 1,
step: 0.1, step: 0.1,
@ -65,6 +72,7 @@
key: 'n', key: 'n',
name: 'Number of Messages', name: 'Number of Messages',
default: 1, default: 1,
title: 'How many chat completion choices to generate for each input message.',
min: 1, min: 1,
max: 10, max: 10,
step: 1, step: 1,
@ -73,6 +81,9 @@
{ {
key: 'max_tokens', key: 'max_tokens',
name: 'Max Tokens', name: 'Max Tokens',
title: 'The maximum number of tokens to generate in the completion.\n' +
'\n' +
'The token count of your prompt plus max_tokens cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).\n',
default: 0, default: 0,
min: 0, min: 0,
max: 32768, max: 32768,
@ -83,6 +94,7 @@
key: 'presence_penalty', key: 'presence_penalty',
name: 'Presence Penalty', name: 'Presence Penalty',
default: 0, default: 0,
title: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.',
min: -2, min: -2,
max: 2, max: 2,
step: 0.2, step: 0.2,
@ -92,6 +104,7 @@
key: 'frequency_penalty', key: 'frequency_penalty',
name: 'Frequency Penalty', name: 'Frequency Penalty',
default: 0, default: 0,
title: 'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.',
min: -2, min: -2,
max: 2, max: 2,
step: 0.2, step: 0.2,
@ -538,6 +551,7 @@
class="input" class="input"
inputmode="decimal" inputmode="decimal"
type={setting.type} type={setting.type}
title="{setting.title}"
id="settings-{setting.key}" id="settings-{setting.key}"
min={setting.min} min={setting.min}
max={setting.max} max={setting.max}
@ -546,7 +560,7 @@
/> />
{:else if setting.type === 'select'} {:else if setting.type === 'select'}
<div class="select"> <div class="select">
<select id="settings-{setting.key}"> <select id="settings-{setting.key}" title="{setting.title}">
{#each setting.options as option} {#each setting.options as option}
<option value={option} selected={option === setting.default}>{option}</option> <option value={option} selected={option === setting.default}>{option}</option>
{/each} {/each}

View File

@ -60,6 +60,7 @@
export type Settings = { export type Settings = {
key: string; key: string;
name: string; name: string;
title: string;
} & (SettingsNumber | SettingsSelect); } & (SettingsNumber | SettingsSelect);
type ResponseOK = { type ResponseOK = {