From fb2290308fff0d136df2570625484832fa5e3252 Mon Sep 17 00:00:00 2001
From: Webifi
- You are encouraged to set up a Petals server to share your GPU resources with the public swarm. Minimum requirements to contribute Llama 2 completions are a GTX 1080 8GB, but the larger/faster the better. + You are encouraged to set up a Petals server to share your GPU resources with the public swarm. Minimum requirements to contribute Llama 2 completions are a GTX 1080 8GB, but the larger/faster the better.
- If you're receiving errors while using Petals, check swarm health and consider adding your GPU to the swarm to help. + If you're receiving errors while using Petals, check swarm health and consider adding your GPU to the swarm to help.
Because Petals uses a public swarm, do not send sensitive information when using Petals. diff --git a/src/lib/Models.svelte b/src/lib/Models.svelte index ad1b755..c33791a 100644 --- a/src/lib/Models.svelte +++ b/src/lib/Models.svelte @@ -1,403 +1,183 @@ \ No newline at end of file diff --git a/src/lib/Settings.svelte b/src/lib/Settings.svelte index 2f1c356..bd502c7 100644 --- a/src/lib/Settings.svelte +++ b/src/lib/Settings.svelte @@ -62,12 +62,8 @@ export const getExcludeFromProfile = () => { return excludeFromProfile } -const isNotOpenAI = (chatId) => { - return getModelDetail(getChatSettings(chatId).model).type !== 'OpenAIChat' -} - -const isNotPetals = (chatId) => { - return getModelDetail(getChatSettings(chatId).model).type !== 'Petals' +const hideModelSetting = (chatId, setting) => { + return getModelDetail(getChatSettings(chatId).model).hideSetting(chatId, setting) } const gptDefaults = { @@ -108,7 +104,7 @@ const defaults:ChatSettings = { hiddenPromptPrefix: '', hppContinuePrompt: '', hppWithSummaryPrompt: false, - imageGenerationSize: '', + imageGenerationModel: '', startSequence: '', stopSequence: '', aggressiveStop: true, @@ -120,6 +116,7 @@ const defaults:ChatSettings = { systemMessageStart: '', systemMessageEnd: '', leadPrompt: '', + repititionPenalty: 1, // useResponseAlteration: false, // responseAlterations: [], isDirty: false @@ -142,12 +139,6 @@ const excludeFromProfile = { isDirty: true } -export const imageGenerationSizes = [ - '1024x1024', '512x512', '256x256' -] - -export const imageGenerationSizeTypes = ['', ...imageGenerationSizes] - export const chatSortOptions = { name: { text: 'Name', icon: faArrowDownAZ, value: '', sortFn: (a, b) => { return a.name < b.name ? -1 : a.name > b.name ? 1 : 0 } }, created: { text: 'Created', icon: faArrowDown91, value: '', sortFn: (a, b) => { return ((b.created || 0) - (a.created || 0)) || (b.id - a.id) } }, @@ -363,16 +354,13 @@ const summarySettings: ChatSetting[] = [ hide: (chatId) => getChatSettings(chatId).continuousChat !== 'summary' }, { - key: 'imageGenerationSize', - name: 'Image Generation Size', + key: 'imageGenerationModel', + name: 'Image Generation Model', header: 'Image Generation', headerClass: 'is-info', title: 'Prompt an image with: show me an image of ...', type: 'select', - options: [ - { value: '', text: 'OFF - Disable Image Generation' }, - ...imageGenerationSizes.map(s => { return { value: s, text: s } }) - ] + options: [] } ] @@ -427,13 +415,9 @@ const summarySettings: ChatSetting[] = [ const modelSetting: ChatSetting & SettingSelect = { key: 'model', name: 'Model', - title: 'The model to use - GPT-3.5 is cheaper, but GPT-4 is more powerful.', + title: 'The model to use. Some may cost more than others.', header: (chatId) => { - if (isNotOpenAI(chatId)) { - return 'Below are the settings that can be changed for the API calls. See this overview to start, though not all settings translate to Petals.' - } else { - return 'Below are the settings that OpenAI allows to be changed for the API calls. See the OpenAI API docs for more details.' - } + return getModelDetail(getChatSettings(chatId).model).help }, headerClass: 'is-warning', options: [], @@ -453,7 +437,7 @@ const chatSettingsList: ChatSetting[] = [ name: 'Stream Response', title: 'Stream responses as they are generated.', type: 'boolean', - hide: isNotOpenAI + hide: hideModelSetting }, { key: 'temperature', @@ -485,7 +469,7 @@ const chatSettingsList: ChatSetting[] = [ max: 10, step: 1, type: 'number', - hide: isNotOpenAI + hide: hideModelSetting }, { key: 'max_tokens', @@ -497,7 +481,6 @@ const chatSettingsList: ChatSetting[] = [ max: 32768, step: 1, type: 'number', - hide: isNotOpenAI, forceApi: true // Since default here is different than gpt default, will make sure we always send it }, { @@ -508,7 +491,7 @@ const chatSettingsList: ChatSetting[] = [ max: 2, step: 0.2, type: 'number', - hide: isNotOpenAI + hide: hideModelSetting }, { key: 'frequency_penalty', @@ -518,8 +501,18 @@ const chatSettingsList: ChatSetting[] = [ max: 2, step: 0.2, type: 'number', - hide: isNotOpenAI + hide: hideModelSetting }, + // { + // key: 'repititionPenalty', + // name: 'Repitition Penalty', + // title: 'Number between 1.0 and infinity. Penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.', + // min: 0, + // max: 1000, + // step: 0.1, + // type: 'number', + // hide: isNotPetals + // }, { key: 'startSequence', name: 'Start Sequence', @@ -529,25 +522,25 @@ const chatSettingsList: ChatSetting[] = [ const val = getModelDetail(getChatSettings(chatId).model).start return val || '' }, - hide: isNotPetals + hide: hideModelSetting }, { key: 'stopSequence', - name: 'Stop Sequence', - title: 'Characters used to signal end of message chain.', - type: 'text', + name: 'Stop Sequences', + title: 'Characters used to signal end of message chain. Separate multiple with a comma.', + type: 'textarea', placeholder: (chatId) => { const val = getModelDetail(getChatSettings(chatId).model).stop - return (val && val[0]) || '' + return (val && val.join(',')) || '' }, - hide: isNotPetals + hide: hideModelSetting }, { key: 'aggressiveStop', name: 'Use aggressive stop', title: 'Sometimes generation can continue even after a stop sequence. This will stop generation client side if generation continues after stop sequence.', type: 'boolean', - hide: isNotPetals + hide: hideModelSetting }, { key: 'deliminator', @@ -558,7 +551,7 @@ const chatSettingsList: ChatSetting[] = [ const val = getModelDetail(getChatSettings(chatId).model).deliminator return val || '' }, - hide: isNotPetals + hide: hideModelSetting }, { key: 'userMessageStart', @@ -569,7 +562,7 @@ const chatSettingsList: ChatSetting[] = [ const val = getModelDetail(getChatSettings(chatId).model).userStart return val || '' }, - hide: isNotPetals + hide: hideModelSetting }, { key: 'userMessageEnd', @@ -580,7 +573,7 @@ const chatSettingsList: ChatSetting[] = [ const val = getModelDetail(getChatSettings(chatId).model).userEnd return val || '' }, - hide: isNotPetals + hide: hideModelSetting }, { key: 'assistantMessageStart', @@ -591,7 +584,7 @@ const chatSettingsList: ChatSetting[] = [ const val = getModelDetail(getChatSettings(chatId).model).assistantStart return val || '' }, - hide: isNotPetals + hide: hideModelSetting }, { key: 'assistantMessageEnd', @@ -602,7 +595,7 @@ const chatSettingsList: ChatSetting[] = [ const val = getModelDetail(getChatSettings(chatId).model).assistantEnd return val || '' }, - hide: isNotPetals + hide: hideModelSetting }, { key: 'leadPrompt', @@ -613,7 +606,7 @@ const chatSettingsList: ChatSetting[] = [ const val = getModelDetail(getChatSettings(chatId).model).leadPrompt return val || '' }, - hide: isNotPetals + hide: hideModelSetting }, { key: 'systemMessageStart', @@ -624,7 +617,7 @@ const chatSettingsList: ChatSetting[] = [ const val = getModelDetail(getChatSettings(chatId).model).systemStart return val || '' }, - hide: isNotPetals + hide: hideModelSetting }, { key: 'systemMessageEnd', @@ -635,7 +628,7 @@ const chatSettingsList: ChatSetting[] = [ const val = getModelDetail(getChatSettings(chatId).model).systemEnd return val || '' }, - hide: isNotPetals + hide: hideModelSetting }, { // logit bias editor not implemented yet diff --git a/src/lib/Sidebar.svelte b/src/lib/Sidebar.svelte index 29be54e..e090c9a 100644 --- a/src/lib/Sidebar.svelte +++ b/src/lib/Sidebar.svelte @@ -1,7 +1,7 @@ diff --git a/src/lib/providers/openai/models.svelte b/src/lib/providers/openai/models.svelte new file mode 100644 index 0000000..bd15b64 --- /dev/null +++ b/src/lib/providers/openai/models.svelte @@ -0,0 +1,119 @@ + \ No newline at end of file diff --git a/src/lib/ChatRequestOpenAi.svelte b/src/lib/providers/openai/request.svelte similarity index 51% rename from src/lib/ChatRequestOpenAi.svelte rename to src/lib/providers/openai/request.svelte index 37495ef..d7be6a5 100644 --- a/src/lib/ChatRequestOpenAi.svelte +++ b/src/lib/providers/openai/request.svelte @@ -1,24 +1,24 @@ \ No newline at end of file diff --git a/src/lib/providers/openai/util.svelte b/src/lib/providers/openai/util.svelte new file mode 100644 index 0000000..10a46e2 --- /dev/null +++ b/src/lib/providers/openai/util.svelte @@ -0,0 +1,60 @@ + \ No newline at end of file diff --git a/src/lib/providers/petals/models.svelte b/src/lib/providers/petals/models.svelte new file mode 100644 index 0000000..f23b269 --- /dev/null +++ b/src/lib/providers/petals/models.svelte @@ -0,0 +1,72 @@ + \ No newline at end of file diff --git a/src/lib/ChatRequestPetals.svelte b/src/lib/providers/petals/request.svelte similarity index 83% rename from src/lib/ChatRequestPetals.svelte rename to src/lib/providers/petals/request.svelte index 8df2175..815b799 100644 --- a/src/lib/ChatRequestPetals.svelte +++ b/src/lib/providers/petals/request.svelte @@ -1,22 +1,23 @@ \ No newline at end of file diff --git a/src/lib/providers/petals/util.svelte b/src/lib/providers/petals/util.svelte new file mode 100644 index 0000000..9da7d56 --- /dev/null +++ b/src/lib/providers/petals/util.svelte @@ -0,0 +1,16 @@ + \ No newline at end of file