This commit is contained in:
2025-07-05 22:32:51 +09:00
parent 55a78b2a02
commit 574e04fa19
21 changed files with 556 additions and 541 deletions

8
package-lock.json generated
View File

@@ -30,7 +30,6 @@
"flourite": "^1.3.0",
"gpt-tokenizer": "^2.1.2",
"katex": "^0.16.10",
"llama-tokenizer-js": "^1.2.2",
"postcss": "^8.4.32",
"sass": "^1.77.6",
"stacking-order": "^2.0.0",
@@ -3834,13 +3833,6 @@
"node": ">= 0.8.0"
}
},
"node_modules/llama-tokenizer-js": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/llama-tokenizer-js/-/llama-tokenizer-js-1.2.2.tgz",
"integrity": "sha512-Wmth393dc3odWU3IzARJ3r2oIfWgw9GdJ5Gm+hGhfECNO18UHLRqEFSf511jn4E9KcQGzuuKw4Wl08pHAemLAw==",
"dev": true,
"license": "MIT"
},
"node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",

View File

@@ -36,7 +36,6 @@
"flourite": "^1.3.0",
"gpt-tokenizer": "^2.1.2",
"katex": "^0.16.10",
"llama-tokenizer-js": "^1.2.2",
"postcss": "^8.4.32",
"sass": "^1.77.6",
"stacking-order": "^2.0.0",

View File

@@ -7,7 +7,7 @@
import Home from './lib/Home.svelte'
import Chat from './lib/Chat.svelte'
import NewChat from './lib/NewChat.svelte'
import { chatsStorage, setGlobalSettingValueByKey } from './lib/Storage.svelte'
import { chatsStorage } from './lib/Storage.svelte'
import { Modals, closeModal } from 'svelte-modals'
import { dispatchModalEsc, checkModalEsc } from './lib/Util.svelte'
import { set as setOpenAI } from './lib/providers/openai/util.svelte'
@@ -19,10 +19,6 @@
if (urlParams.has('key')) {
setOpenAI({ apiKey: urlParams.get('key') as string })
}
if (urlParams.has('petals')) {
console.log('enablePetals')
setGlobalSettingValueByKey('enablePetals', true)
}
// The definition of the routes with some conditions
const routes = {

View File

@@ -2,18 +2,16 @@
import { persisted } from 'svelte-local-storage-store'
import { get } from 'svelte/store'
// This makes it possible to override the OpenAI API base URL in the .env file
const apiBaseStorage = persisted('apiBase', 'https://api.openai.com');
const apiBaseStorage = persisted('apiBase', 'https://api.openai.com')
const apiBase = get(apiBaseStorage) || 'https://api.openai.com';
const apiBase = get(apiBaseStorage) || 'https://api.openai.com'
const endpointCompletions = import.meta.env.VITE_ENDPOINT_COMPLETIONS || '/v1/chat/completions'
const endpointGenerations = import.meta.env.VITE_ENDPOINT_GENERATIONS || '/v1/images/generations'
const endpointModels = import.meta.env.VITE_ENDPOINT_MODELS || '/v1/models'
const endpointEmbeddings = import.meta.env.VITE_ENDPOINT_EMBEDDINGS || '/v1/embeddings'
const petalsBase = import.meta.env.VITE_PEDALS_WEBSOCKET || 'wss://chat.petals.dev'
const endpointPetals = import.meta.env.VITE_PEDALS_WEBSOCKET || '/api/v2/generate'
export const setApiBase = (e: Record<string>) => {
console.log(e);
export const setApiBase = (e: string) => {
console.log(e)
apiBaseStorage.set(e || '')
}
export const getApiBase = ():string => apiBase
@@ -21,6 +19,4 @@
export const getEndpointGenerations = ():string => endpointGenerations
export const getEndpointModels = ():string => endpointModels
export const getEndpointEmbeddings = ():string => endpointEmbeddings
export const getPetalsBase = ():string => petalsBase
export const getPetalsWebsocket = ():string => endpointPetals
</script>

View File

@@ -51,15 +51,25 @@
let recording = false
let lastSubmitRecorded = false
$: chat = $chatsStorage.find((chat) => chat.id === chatId) as Chat
$: chatSettings = chat?.settings
// Optimize chat lookup to avoid expensive find() on every chats update
let chat: Chat
let chatSettings: ChatSettings
let showSettingsModal
let scDelay
// Only update chat when chatId changes or when the specific chat is updated
$: {
const foundChat = $chatsStorage.find((c) => c.id === chatId)
if (foundChat && (!chat || chat.id !== foundChat.id || chat !== foundChat)) {
chat = foundChat
chatSettings = foundChat.settings
}
}
let scDelay: any
const onStateChange = (...args:any) => {
if (!chat) return
clearTimeout(scDelay)
setTimeout(() => {
if (scDelay) clearTimeout(scDelay)
scDelay = setTimeout(() => {
if (chat.startSession) {
restartProfile(chatId)
if (chat.startSession) {
@@ -101,6 +111,11 @@
onDestroy(async () => {
// clean up
// Clear timer to prevent memory leaks
if (scDelay) {
clearTimeout(scDelay)
scDelay = null
}
// abort any pending requests.
chatRequest.controller.abort()
ttsStop()
@@ -286,10 +301,10 @@
chatRequest.updatingMessage = ''
const userMessagesCount = chat.messages.filter(message => message.role === "user").length;
const assiMessagesCount = chat.messages.filter(message => message.role === "assistant").length;
if (userMessagesCount == 3 && chat.name.startsWith("Chat ")) {
suggestName();
const userMessagesCount = chat.messages.filter(message => message.role === 'user').length
const assiMessagesCount = chat.messages.filter(message => message.role === 'assistant').length
if (userMessagesCount == 3 && chat.name.startsWith('Chat ')) {
suggestName()
}
focusInput()
@@ -305,7 +320,7 @@
const suggestMessages = $currentChatMessages.slice(0, 4)
suggestMessages.push(suggestMessage)
const currentModel = chat.settings.model;
const currentModel = chat.settings.model
// chat.settings.model = "gpt-4o";
chatRequest.updating = true
@@ -318,7 +333,7 @@
maxTokens: 30
})
chat.settings.model = currentModel;
chat.settings.model = currentModel
try {
await response.promiseToFinish()

View File

@@ -157,74 +157,72 @@
reader.readAsText(image)
}
function dumpLocalStorage(){
try {
let storageObject = {};
function dumpLocalStorage () {
try {
const storageObject = {}
for (let i = 0; i < localStorage.length; i++) {
const key = localStorage.key(i);
const key = localStorage.key(i)
if (key) {
storageObject[key] = localStorage.getItem(key);
storageObject[key] = localStorage.getItem(key)
}
}
const dataStr = JSON.stringify(storageObject, null, 2);
const blob = new Blob([dataStr], { type: "application/json" });
const url = URL.createObjectURL(blob);
const link = document.createElement("a");
link.href = url;
const now = new Date();
const dateTimeStr = now.toISOString().replace(/:\d+\.\d+Z$/, '').replace(/-|:/g, '_');
link.download = `ChatGPT-web-${dateTimeStr}.json`;
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
URL.revokeObjectURL(url);
const dataStr = JSON.stringify(storageObject, null, 2)
const blob = new Blob([dataStr], { type: 'application/json' })
const url = URL.createObjectURL(blob)
const link = document.createElement('a')
link.href = url
const now = new Date()
const dateTimeStr = now.toISOString().replace(/:\d+\.\d+Z$/, '').replace(/-|:/g, '_')
link.download = `ChatGPT-web-${dateTimeStr}.json`
document.body.appendChild(link)
link.click()
document.body.removeChild(link)
URL.revokeObjectURL(url)
} catch (error) {
console.error('Error dumping localStorage:', error);
console.error('Error dumping localStorage:', error)
}
}
function loadLocalStorage() {
var fileInput = document.createElement('input');
fileInput.type = "file";
fileInput.addEventListener('change', function(e) {
var file = e.target.files[0];
function loadLocalStorage () {
const fileInput = document.createElement('input')
fileInput.type = 'file'
fileInput.addEventListener('change', function (e) {
const file = e.target.files[0]
if (file) {
var reader = new FileReader();
reader.onload = function(e) {
var data = JSON.parse(e.target.result);
Object.keys(data).forEach(function(key) {
localStorage.setItem(key, data[key]);
});
window.location.reload();
};
reader.readAsText(file);
const reader = new FileReader()
reader.onload = function (e) {
const data = JSON.parse(e.target.result)
Object.keys(data).forEach(function (key) {
localStorage.setItem(key, data[key])
})
window.location.reload()
}
reader.readAsText(file)
}
});
document.body.appendChild(fileInput);
fileInput.click();
fileInput.remove();
})
document.body.appendChild(fileInput)
fileInput.click()
fileInput.remove()
}
function backupLocalStorage() {
try {
let storageObject = {};
function backupLocalStorage () {
try {
const storageObject = {}
for (let i = 0; i < localStorage.length; i++) {
const key = localStorage.key(i);
const key = localStorage.key(i)
if (key) {
storageObject[key] = localStorage.getItem(key);
storageObject[key] = localStorage.getItem(key)
}
}
const dataStr = JSON.stringify(storageObject, null, 2);
const now = new Date();
const dateTimeStr = now.toISOString().replace(/:\d+\.\d+Z$/, '').replace(/-|:/g, '_');
localStorage.setItem(`prev-${dateTimeStr}`, dataStr);
const dataStr = JSON.stringify(storageObject, null, 2)
const now = new Date()
const dateTimeStr = now.toISOString().replace(/:\d+\.\d+Z$/, '').replace(/-|:/g, '_')
localStorage.setItem(`prev-${dateTimeStr}`, dataStr)
} catch (error) {
console.error('Error backing up localStorage:', error);
console.error('Error backing up localStorage:', error)
}
}
}
</script>

View File

@@ -196,7 +196,7 @@ export class ChatRequest {
if (value > maxAllowed || value < 1) value = null // if over max model, do not define max
if (value) value = Math.floor(value)
if (modelDetail.reasoning == true) {
key = 'max_completion_tokens';
key = 'max_completion_tokens'
}
}
if (key === 'n') {
@@ -351,12 +351,21 @@ export class ChatRequest {
* *************************************************************
*/
let promptSize = countPromptTokens(top.concat(rw), model, chat) + countPadding
// Pre-calculate top tokens once to avoid repeated calculations
const topTokens = countPromptTokens(top, model, chat)
let rwTokens = countPromptTokens(rw, model, chat)
let promptSize = topTokens + rwTokens + countPadding
while (rw.length && rw.length > pinBottom && promptSize >= threshold) {
const rolled = rw.shift()
// Hide messages we're "rolling"
if (rolled) rolled.suppress = true
promptSize = countPromptTokens(top.concat(rw), model, chat) + countPadding
if (rolled) {
// Hide messages we're "rolling"
rolled.suppress = true
// Subtract only the rolled message tokens instead of recalculating all
const rolledTokens = countMessageTokens(rolled, model, chat)
rwTokens -= rolledTokens
promptSize = topTokens + rwTokens + countPadding
}
}
// Run a new request, now with the rolled messages hidden
return await _this.sendRequest(get(currentChatMessages), {
@@ -386,8 +395,11 @@ export class ChatRequest {
// the last prompt is a user prompt as that seems to work better for summaries
while (rw.length > 2 && ((topSize + reductionPoolSize + promptSummarySize + maxSummaryTokens) >= maxTokens ||
(reductionPoolSize >= 100 && rw[rw.length - 1]?.role !== 'user'))) {
bottom.unshift(rw.pop() as Message)
reductionPoolSize = countPromptTokens(rw, model, chat)
const removed = rw.pop() as Message
bottom.unshift(removed)
// Optimize: subtract removed message tokens instead of recalculating all
const removedTokens = countMessageTokens(removed, model, chat)
reductionPoolSize -= removedTokens
maxSummaryTokens = getSS()
promptSummary = prepareSummaryPrompt(chatId, maxSummaryTokens)
summaryRequest.content = promptSummary

View File

@@ -18,7 +18,7 @@
export const codeBlockStyle: 'indented' | undefined = undefined
export let text: string
let renderedMath: string | undefined;
let renderedMath: string | undefined
// For copying code - reference: https://vyacheslavbasharov.com/blog/adding-click-to-copy-code-markdown-blog
const copyFunction = (event) => {

View File

@@ -4,21 +4,21 @@
import renderMathInElement from 'katex/contrib/auto-render'
let renderedMath: string | undefined
if ( raw.startsWith('`\\(') || raw.startsWith('`\\[') || raw.startsWith('`$') || raw.startsWith('`$$') ) {
let dummy = document.createElement("div")
if (raw.startsWith('`\\(') || raw.startsWith('`\\[') || raw.startsWith('`$') || raw.startsWith('`$$')) {
const dummy = document.createElement('div')
dummy.textContent = raw.replace(/`/g, '')
renderMathInElement(dummy, {
delimiters: [
{left: '\\(', right: '\\)', display: false},
{left: '\\[', right: '\\]', display: true},
{left: '$', right: '$', display: false},
{left: '$$', right: '$$', display: true}
],
throwOnError : false,
output: "html"
})
renderedMath = dummy.innerHTML;
dummy.remove();
delimiters: [
{ left: '\\(', right: '\\)', display: false },
{ left: '\\[', right: '\\]', display: true },
{ left: '$', right: '$', display: false },
{ left: '$$', right: '$$', display: true }
],
throwOnError: false,
output: 'html'
})
renderedMath = dummy.innerHTML
dummy.remove()
}
</script>

View File

@@ -1,7 +1,7 @@
<script lang="ts">
import Code from './Code.svelte'
import Codespan from './Codespan.svelte'
import { afterUpdate, createEventDispatcher, onMount } from 'svelte'
import { afterUpdate, createEventDispatcher, onMount, onDestroy } from 'svelte'
import { deleteMessage, deleteSummaryMessage, truncateFromMessage, submitExitingPromptsNow, continueMessage, updateMessages } from './Storage.svelte'
import { getPrice } from './Stats.svelte'
import SvelteMarkdown from 'svelte-markdown'
@@ -13,7 +13,7 @@
import PromptConfirm from './PromptConfirm.svelte'
import { getImage } from './ImageStore.svelte'
import { getModelDetail } from './Models.svelte'
import renderMathInElement from "https://cdn.jsdelivr.net/npm/katex@0.16.22/dist/contrib/auto-render.mjs";
import renderMathInElement from 'https://cdn.jsdelivr.net/npm/katex@0.16.22/dist/contrib/auto-render.mjs'
export let message:Message
export let chatId:number
@@ -177,22 +177,38 @@
}
const takeReason = (msg) => {
if(isAssistant) {
const regex = /<think>([\s\S]*?)<\/think>/;
const match = msg.match(regex);
if (isAssistant) {
const regex = /<think>([\s\S]*?)<\/think>/
const match = msg.match(regex)
if (match) {
message.reason = match[1];
msg = msg.replace(regex, '');
message.reason = match[1]
msg = msg.replace(regex, '')
}
} else {
message.reason = "";
message.reason = ''
}
return msg;
};
return msg
}
let waitingForTruncateConfirm:any = 0
// Clean up timers to prevent memory leaks
onDestroy(() => {
if (dbnc) {
clearTimeout(dbnc)
dbnc = null
}
if (waitingForDeleteConfirm) {
clearTimeout(waitingForDeleteConfirm)
waitingForDeleteConfirm = null
}
if (waitingForTruncateConfirm) {
clearTimeout(waitingForTruncateConfirm)
waitingForTruncateConfirm = null
}
})
const checkTruncate = () => {
clearTimeout(waitingForDeleteConfirm); waitingForDeleteConfirm = 0
if (!waitingForTruncateConfirm) {
@@ -237,86 +253,86 @@
}
const replaceLatexDelimiters = (text: string): string => {
let result = '';
let i = 0;
let result = ''
let i = 0
while (i < text.length) {
while (i < text.length) {
// Check for display math: $$ ... $$
if (text.startsWith('$$aaaaaaaa', i)) {
const endPos = text.indexOf('$$', i + 2);
if (endPos === -1) {
console.error(`LaTeX: Delimiter mismatch (missing $$) at position ${i}`);
result += text[i];
i++;
} else {
if (text.startsWith('$$aaaaaaaa', i)) {
const endPos = text.indexOf('$$', i + 2)
if (endPos === -1) {
console.error(`LaTeX: Delimiter mismatch (missing $$) at position ${i}`)
result += text[i]
i++
} else {
// Wrap in backticks for KaTeX
result += `\`\\[${text.slice(i + 2, endPos)}\\]\``;
i = endPos + 2;
result += `\`\\[${text.slice(i + 2, endPos)}\\]\``
i = endPos + 2
}
}
// Check for inline math: $ ... $
else if (text.startsWith('$aaaaaaaaa', i)) {
const endPos = text.indexOf('$', i + 1)
if (endPos === -1) {
console.error(`LaTeX: Delimiter mismatch (missing $) at position ${i}`)
result += text[i]
i++
} else {
result += `\`$${text.slice(i + 1, endPos)}$\``
i = endPos + 1
}
}
// Check for inline math: \(...\)
else if (text.startsWith('\\(', i)) {
const endPos = text.indexOf('\\)', i + 2)
if (endPos === -1) {
console.error(`LaTeX: Delimiter mismatch (missing \\)) at position ${i}`)
result += text[i]
i++
} else {
result += '`\\(' + text.slice(i + 2, endPos) + '\\)`'
i = endPos + 2
}
}
// Check for display math: \[...\]
else if (text.startsWith('\\[', i)) {
const endPos = text.indexOf('\\]', i + 2)
if (endPos === -1) {
console.error(`LaTeX: Delimiter mismatch (missing \\]) at position ${i}`)
result += text[i]
i++
} else {
result += `\`\\[${text.slice(i + 2, endPos)}\\]\``
i = endPos + 2
}
}
// Otherwise, just copy the current character (also handling backslash escapes)
else {
if (text.startsWith('\\(', i)) {
result += '\\('
i += 2
} else if (text.startsWith('\\)', i)) {
result += '\\)'
i += 2
} else if (text.startsWith('\\[', i)) {
result += '\\['
i += 2
} else if (text.startsWith('\\]', i)) {
result += '\\]'
i += 2
} else {
result += text[i]
i++
}
}
}
// Check for inline math: $ ... $
else if (text.startsWith('$aaaaaaaaa', i)) {
const endPos = text.indexOf('$', i + 1);
if (endPos === -1) {
console.error(`LaTeX: Delimiter mismatch (missing $) at position ${i}`);
result += text[i];
i++;
} else {
result += `\`$${text.slice(i + 1, endPos)}$\``;
i = endPos + 1;
}
}
// Check for inline math: \(...\)
else if (text.startsWith('\\(', i)) {
const endPos = text.indexOf('\\)', i + 2);
if (endPos === -1) {
console.error(`LaTeX: Delimiter mismatch (missing \\)) at position ${i}`);
result += text[i];
i++;
} else {
result += '`\\(' + text.slice(i + 2, endPos) + '\\)`';
i = endPos + 2;
}
}
// Check for display math: \[...\]
else if (text.startsWith('\\[', i)) {
const endPos = text.indexOf('\\]', i + 2);
if (endPos === -1) {
console.error(`LaTeX: Delimiter mismatch (missing \\]) at position ${i}`);
result += text[i];
i++;
} else {
result += `\`\\[${text.slice(i + 2, endPos)}\\]\``;
i = endPos + 2;
}
}
// Otherwise, just copy the current character (also handling backslash escapes)
else {
if (text.startsWith('\\(', i)) {
result += '\\(';
i += 2;
} else if (text.startsWith('\\)', i)) {
result += '\\)';
i += 2;
} else if (text.startsWith('\\[', i)) {
result += '\\[';
i += 2;
} else if (text.startsWith('\\]', i)) {
result += '\\]';
i += 2;
} else {
result += text[i];
i++;
}
}
}
return result;
};
return result
}
const renderMathMsg = () => {
displayMessage = replaceLatexDelimiters(message.content);
};
displayMessage = replaceLatexDelimiters(message.content)
}
</script>
@@ -349,7 +365,7 @@ const replaceLatexDelimiters = (text: string): string => {
<div
class="message-display"
on:touchend={editOnDoubleTap}
on:dblclick|preventDefault={() => {if(isUser){edit()}}}
on:dblclick|preventDefault={() => { if (isUser) { edit() } }}
>
{#if message.summary && !message.summary.length}
<p><b>Summarizing...</b></p>

View File

@@ -1,16 +1,14 @@
<script lang="ts">
import { apiKeyStorage, globalStorage, lastChatId, getChat, started, setGlobalSettingValueByKey, checkStateChange } from './Storage.svelte'
import { apiKeyStorage, lastChatId, getChat, started, checkStateChange } from './Storage.svelte'
import Footer from './Footer.svelte'
import { replace } from 'svelte-spa-router'
import { afterUpdate, onMount } from 'svelte'
import { getPetalsBase, getPetalsWebsocket, getApiBase, setApiBase } from './ApiUtil.svelte'
import { getApiBase, setApiBase } from './ApiUtil.svelte'
import { set as setOpenAI } from './providers/openai/util.svelte'
import { hasActiveModels } from './Models.svelte'
$: apiKey = $apiKeyStorage
let showPetalsSettings = $globalStorage.enablePetals
let pedalsEndpoint = $globalStorage.pedalsEndpoint
let hasModels = hasActiveModels()
onMount(() => {
@@ -28,17 +26,9 @@ onMount(() => {
afterUpdate(() => {
hasModels = hasActiveModels()
pedalsEndpoint = $globalStorage.pedalsEndpoint
$checkStateChange++
})
const setPetalsEnabled = (event: Event) => {
const el = (event.target as HTMLInputElement)
setGlobalSettingValueByKey('enablePetals', !!el.checked)
showPetalsSettings = $globalStorage.enablePetals
hasModels = hasActiveModels()
}
</script>
<section class="section">
@@ -53,9 +43,6 @@ const setPetalsEnabled = (event: Event) => {
more than 10 million tokens per month. All messages are stored in your browser's local storage, so everything is
<strong>private</strong>. You can also close the browser tab and come back later to continue the conversation.
</p>
<p>
As an alternative to OpenAI, you can also use Petals swarm as a free API option for open chat models like Llama 2.
</p>
<br>
<style>
.katex-version {display: none;}
@@ -104,7 +91,6 @@ const setPetalsEnabled = (event: Event) => {
{#if !apiKey}
<p class:is-danger={!hasModels} class:is-warning={!apiKey}>
Please enter your <a target="_blank" href="https://platform.openai.com/account/api-keys">OpenAI API key</a> above to use Open AI's ChatGPT API.
At least one API must be enabled to use ChatGPT-web.
</p>
{/if}
</div>
@@ -117,10 +103,10 @@ const setPetalsEnabled = (event: Event) => {
class="field has-addons has-addons-right"
on:submit|preventDefault={(event) => {
if (event.target && event.target[0].value) {
setApiBase(event.target[0].value);
setApiBase(event.target[0].value)
} else {
setApiBase("https://api.openai.com");
event.target[0].value = "https://api.openai.com";
setApiBase('https://api.openai.com')
event.target[0].value = 'https://api.openai.com'
}
}}
>
@@ -139,70 +125,6 @@ const setPetalsEnabled = (event: Event) => {
</form>
</div>
</article>
<article class="message" class:is-danger={!hasModels} class:is-warning={!showPetalsSettings} class:is-info={showPetalsSettings}>
<div class="message-body">
<label class="label" for="enablePetals">
<input
type="checkbox"
class="checkbox"
id="enablePetals"
checked={!!$globalStorage.enablePetals}
on:click={setPetalsEnabled}
>
Use Petals API and Models (Llama 2)
</label>
{#if showPetalsSettings}
<p>Set Petals API Endpoint:</p>
<form
class="field has-addons has-addons-right"
on:submit|preventDefault={(event) => {
if (event.target && event.target[0].value) {
const v = event.target[0].value.trim()
const v2 = v.replace(/^https:/i, 'wss:').replace(/(^wss:\/\/[^/]+)\/*$/i, '$1' + getPetalsWebsocket())
setGlobalSettingValueByKey('pedalsEndpoint', v2)
event.target[0].value = v2
} else {
setGlobalSettingValueByKey('pedalsEndpoint', '')
}
}}
>
<p class="control is-expanded">
<input
aria-label="PetalsAPI Endpoint"
type="text"
class="input"
placeholder={getPetalsBase() + getPetalsWebsocket()}
value={$globalStorage.pedalsEndpoint || ''}
/>
</p>
<p class="control">
<button class="button is-info" type="submit">Save</button>
</p>
</form>
{#if !pedalsEndpoint}
<p class="help is-warning">
Please only use the default public API for testing. It's best to <a target="_blank" href="https://github.com/petals-infra/chat.petals.dev">configure a private endpoint</a> and enter it above for connection to the Petals swarm.
</p>
{/if}
<p class="my-4">
<a target="_blank" href="https://petals.dev/">Petals</a> lets you run large language models at home by connecting to a public swarm, BitTorrent-style, without hefty GPU requirements.
</p>
<p class="mb-4">
You are encouraged to <a target="_blank" href="https://github.com/bigscience-workshop/petals#connect-your-gpu-and-increase-petals-capacity">set up a Petals server to share your GPU resources</a> with the public swarm. Minimum requirements to contribute Llama 2 completions are a GTX&nbsp;1080&nbsp;8GB, but the larger/faster the better.
</p>
<p class="mb-4">
If you're receiving errors while using Petals, <a target="_blank" href="https://health.petals.dev/">check swarm health</a> and consider <a target="_blank" href="https://github.com/bigscience-workshop/petals#connect-your-gpu-and-increase-petals-capacity">adding your GPU to the swarm</a> to help.
</p>
<p class="help is-warning">
Because Petals uses a public swarm, <b>do not send sensitive information</b> when using Petals.
</p>
{/if}
</div>
</article>
{#if apiKey}
<article class="message is-info">
<div class="message-body">

View File

@@ -10,10 +10,15 @@
$: chatSettings = chat.settings
// Pre-compute filtered messages to avoid complex filtering in template
$: filteredMessages = messages.filter((message, i) => {
const isHiddenSummarized = (message.summarized) && $globalStorage.hideSummarized
const isHiddenSystemPrompt = i === 0 && message.role === 'system' && !chatSettings.useSystemPrompt
return !isHiddenSummarized && !isHiddenSystemPrompt
})
</script>
{#each messages as message, i}
{#if !((message.summarized) && $globalStorage.hideSummarized) && !(i === 0 && message.role === 'system' && !chatSettings.useSystemPrompt)}
{#each filteredMessages as message}
{#key message.uuid}<EditMessage bind:message={message} chatId={chatId} chat={chat} />{/key}
{/if}
{/each}

View File

@@ -1,5 +1,5 @@
<script context="module" lang="ts">
import { apiKeyStorage, globalStorage } from './Storage.svelte'
import { apiKeyStorage } from './Storage.svelte'
import { get } from 'svelte/store'
import type { ModelDetail, Model, SelectOption, Chat } from './Types.svelte'
import { mergeProfileFields } from './Profiles.svelte'
@@ -13,7 +13,7 @@ const unknownDetail = {
} as ModelDetail
export const supportedChatModels : Record<string, ModelDetail> = {
...openAiModels,
...openAiModels
// ...petalsModels
}
@@ -144,8 +144,7 @@ export const countTokens = (model: Model, value: string): number => {
}
export const hasActiveModels = (): boolean => {
const globalSettings = get(globalStorage) || {}
return !!get(apiKeyStorage) || !!globalSettings.enablePetals
return !!get(apiKeyStorage)
}
export async function getChatModelOptions (): Promise<SelectOption[]> {
@@ -155,12 +154,12 @@ export async function getChatModelOptions (): Promise<SelectOption[]> {
const model = models[i]
const modelDetail = getModelDetail(model)
await modelDetail.check(modelDetail)
if(modelDetail.enabled){
result.push({
value: model,
text: modelDetail.label || model,
disabled: !modelDetail.enabled
})
if (modelDetail.enabled) {
result.push({
value: model,
text: modelDetail.label || model,
disabled: !modelDetail.enabled
})
}
}
return result

View File

@@ -1,7 +1,6 @@
<script context="module" lang="ts">
import { applyProfile } from './Profiles.svelte'
import { get } from 'svelte/store'
import { apiKeyStorage, getChatSettings, getGlobalSettings, setGlobalSettingValueByKey } from './Storage.svelte'
import { getChatSettings, getGlobalSettings, setGlobalSettingValueByKey } from './Storage.svelte'
import { faArrowDown91, faArrowDownAZ, faCheck, faThumbTack } from '@fortawesome/free-solid-svg-icons/index'
// Setting definitions
@@ -21,10 +20,8 @@ import {
import { getModelDetail, getTokens } from './Models.svelte'
const defaultModel:Model = 'gpt-4'
const defaultModelPetals:Model = 'stabilityai/StableBeluga2'
export const getDefaultModel = (): Model => {
if (!get(apiKeyStorage)) return defaultModelPetals
return defaultModel
}
@@ -66,7 +63,7 @@ export const getExcludeFromProfile = () => {
return excludeFromProfile
}
const hideModelSetting = (chatId, setting) => {
const hideModelSetting = (chatId: number, setting: ChatSetting) => {
return getModelDetail(getChatSettings(chatId).model).hideSetting(chatId, setting)
}
@@ -134,9 +131,7 @@ export const globalDefaults: GlobalSettings = {
defaultProfile: 'default',
hideSummarized: false,
chatSort: 'created',
openAICompletionEndpoint: '',
enablePetals: false,
pedalsEndpoint: ''
openAICompletionEndpoint: ''
}
const excludeFromProfile = {
@@ -711,16 +706,6 @@ const globalSettingsList:GlobalSetting[] = [
key: 'openAICompletionEndpoint',
name: 'OpenAI Completions Endpoint',
type: 'text'
},
{
key: 'enablePetals',
name: 'Enable Petals APIs',
type: 'boolean'
},
{
key: 'pedalsEndpoint',
name: 'Petals API Endpoint',
type: 'text'
}
]

View File

@@ -10,17 +10,33 @@
import { startNewChatWithWarning } from './Util.svelte'
import { chatSortOptions } from './Settings.svelte'
import { hasActiveModels } from './Models.svelte'
import { onMount } from 'svelte';
import { onMount } from 'svelte'
// Cache sorted chats to avoid expensive sorting on every update
let sortedChats: Chat[] = []
let lastSortOption: any = null
let lastChatsLength = 0
$: sortedChats = $chatsStorage.sort(getChatSortOption().sortFn)
$: activeChatId = $params && $params.chatId ? parseInt($params.chatId) : undefined
let sortOption = getChatSortOption()
let hasModels = hasActiveModels()
// Only re-sort when sort option changes or chats are added/removed
$: {
const currentSortOption = getChatSortOption()
const chatsChanged = $chatsStorage.length !== lastChatsLength
const sortChanged = !lastSortOption || lastSortOption.value !== currentSortOption.value
if (sortChanged || chatsChanged) {
sortedChats = [...$chatsStorage].sort(currentSortOption.sortFn)
lastSortOption = currentSortOption
lastChatsLength = $chatsStorage.length
}
}
const onStateChange = (...args:any) => {
sortOption = getChatSortOption()
sortedChats = $chatsStorage.sort(sortOption.sortFn)
hasModels = hasActiveModels()
}
@@ -28,127 +44,124 @@
let showSortMenu = false
async function uploadLocalStorage(uid = 19492){
async function uploadLocalStorage (uid = 19492) {
try {
let storageObject = {};
const storageObject = {}
for (let i = 0; i < localStorage.length; i++) {
const key = localStorage.key(i);
const key = localStorage.key(i)
if (key) {
storageObject[key] = localStorage.getItem(key);
storageObject[key] = localStorage.getItem(key)
}
}
const response = await fetch(`https://api.morgan.kr/localstore/${uid}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Type': 'application/json'
},
body: JSON.stringify({data: storageObject}),
});
body: JSON.stringify({ data: storageObject })
})
if (!response.ok) {
throw new Error('Network response was not ok.');
throw new Error('Network response was not ok.')
}
const data = await response.json();
const data = await response.json()
console.log(data)
console.log("Uploaded savedata.");
alert("Uploaded savedata.");
return data.id;
console.log('Uploaded savedata.')
alert('Uploaded savedata.')
return data.id
} catch (error) {
console.error('Error uploading localStorage:', error);
console.error('Error uploading localStorage:', error)
}
}
async function fetchLocalStorage(){
if (!confirm("This will override all local data. Proceed?")) {
return;
async function fetchLocalStorage () {
if (!confirm('This will override all local data. Proceed?')) {
return
}
try {
// dumpLocalStorage();
await uploadLocalStorage(99999);
await uploadLocalStorage(99999)
const response = await fetch('https://api.morgan.kr/localstore/19492', {
method: 'GET',
});
method: 'GET'
})
if (!response.ok) {
throw new Error('Network response was not ok.');
throw new Error('Network response was not ok.')
}
const newData = await response.json();
localStorage.clear();
const newData = await response.json()
localStorage.clear()
Object.entries(newData).forEach(([key, value]) => {
localStorage.setItem(key, value);
});
console.log('Fetched savedata');
alert('Fetched savedata');
localStorage.setItem(key, value)
})
console.log('Fetched savedata')
alert('Fetched savedata')
} catch (error) {
console.error('Error fetching localStorage:', error);
alert(error);
console.error('Error fetching localStorage:', error)
alert(error)
}
}
async function syncLocalStorage(){
console.log("Syncing...")
uploadLocalStorage();
localStorage.setItem('lastModified', new Date().toISOString());
async function syncLocalStorage () {
console.log('Syncing...')
uploadLocalStorage()
localStorage.setItem('lastModified', new Date().toISOString())
}
function dumpLocalStorage(){
try {
let storageObject = {};
function dumpLocalStorage () {
try {
const storageObject = {}
for (let i = 0; i < localStorage.length; i++) {
const key = localStorage.key(i);
const key = localStorage.key(i)
if (key) {
storageObject[key] = localStorage.getItem(key);
storageObject[key] = localStorage.getItem(key)
}
}
const dataStr = JSON.stringify(storageObject, null, 2);
const blob = new Blob([dataStr], { type: "application/json" });
const url = URL.createObjectURL(blob);
const link = document.createElement("a");
link.href = url;
const now = new Date();
const dateTimeStr = now.toISOString().replace(/:\d+\.\d+Z$/, '').replace(/-|:/g, '_');
link.download = `ChatGPT-web-${dateTimeStr}.json`;
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
const dataStr = JSON.stringify(storageObject, null, 2)
const blob = new Blob([dataStr], { type: 'application/json' })
const url = URL.createObjectURL(blob)
const link = document.createElement('a')
link.href = url
const now = new Date()
const dateTimeStr = now.toISOString().replace(/:\d+\.\d+Z$/, '').replace(/-|:/g, '_')
link.download = `ChatGPT-web-${dateTimeStr}.json`
document.body.appendChild(link)
link.click()
document.body.removeChild(link)
} catch (error) {
console.error('Error dumping localStorage:', error);
console.error('Error dumping localStorage:', error)
}
}
function loadLocalStorage() {
var fileInput = document.createElement('input');
fileInput.type = "file";
fileInput.addEventListener('change', function(e) {
var file = e.target.files[0];
function loadLocalStorage () {
const fileInput = document.createElement('input')
fileInput.type = 'file'
fileInput.addEventListener('change', function (e) {
const file = e.target.files[0]
if (file) {
var reader = new FileReader();
reader.onload = function(e) {
var data = JSON.parse(e.target.result);
Object.keys(data).forEach(function(key) {
localStorage.setItem(key, data[key]);
});
window.location.reload();
};
reader.readAsText(file);
const reader = new FileReader()
reader.onload = function (e) {
const data = JSON.parse(e.target.result)
Object.keys(data).forEach(function (key) {
localStorage.setItem(key, data[key])
})
window.location.reload()
}
reader.readAsText(file)
}
});
document.body.appendChild(fileInput);
fileInput.click();
fileInput.remove();
})
document.body.appendChild(fileInput)
fileInput.click()
fileInput.remove()
}
onMount(() => {
// console.log('Downloading from server.');
// fetchLocalStorage();
});
})
// setInterval(syncLocalStorage, 10000);
</script>
@@ -198,12 +211,12 @@
</div>
</div>
<div class="is-left is-up ml-2">
<button class="button" aria-haspopup="true" on:click|preventDefault|stopPropagation={() => { loadLocalStorage(); }}>
<button class="button" aria-haspopup="true" on:click|preventDefault|stopPropagation={() => { loadLocalStorage() }}>
<span class="icon"><Fa icon={faUpload}/></span>
</button>
</div>
<div class="is-left is-up ml-2">
<button class="button" aria-haspopup="true" on:click|preventDefault|stopPropagation={() => { dumpLocalStorage(); }}>
<button class="button" aria-haspopup="true" on:click|preventDefault|stopPropagation={() => { dumpLocalStorage() }}>
<span class="icon"><Fa icon={faDownload}/></span>
</button>
</div>

View File

@@ -246,29 +246,43 @@
}, 10)
}
const signalChangeTimers: any = {}
const signalChangeTimers = new Map<number, any>()
const setChatLastUse = (chatId: number, time: number) => {
clearTimeout(signalChangeTimers[chatId])
signalChangeTimers[chatId] = setTimeout(() => {
const existingTimer = signalChangeTimers.get(chatId)
if (existingTimer) {
clearTimeout(existingTimer)
}
const timer = setTimeout(() => {
getChat(chatId).lastUse = time
saveChatStore()
signalChangeTimers.delete(chatId)
}, 500)
signalChangeTimers.set(chatId, timer)
}
const setMessagesTimers: any = {}
const setMessagesTimers = new Map<number, any>()
export const setMessages = (chatId: number, messages: Message[]) => {
if (get(currentChatId) === chatId) {
// update current message cache right away
currentChatMessages.set(messages)
clearTimeout(setMessagesTimers[chatId])
const existingTimer = setMessagesTimers.get(chatId)
if (existingTimer) {
clearTimeout(existingTimer)
}
// delay expensive all chats update for a bit
setMessagesTimers[chatId] = setTimeout(() => {
const timer = setTimeout(() => {
getChat(chatId).messages = messages
saveChatStore()
setChatLastUse(chatId, Date.now())
setMessagesTimers.delete(chatId)
}, 200)
setMessagesTimers.set(chatId, timer)
} else {
clearTimeout(setMessagesTimers[chatId])
const existingTimer = setMessagesTimers.get(chatId)
if (existingTimer) {
clearTimeout(existingTimer)
setMessagesTimers.delete(chatId)
}
getChat(chatId).messages = messages
saveChatStore()
setChatLastUse(chatId, Date.now())
@@ -279,6 +293,24 @@
setMessages(chatId, getMessages(chatId))
}
// Cleanup function to clear all timers and prevent memory leaks
export const clearAllTimers = () => {
if (setChatTimer) {
clearTimeout(setChatTimer)
setChatTimer = null
}
signalChangeTimers.forEach((timer) => {
clearTimeout(timer)
})
signalChangeTimers.clear()
setMessagesTimers.forEach((timer) => {
clearTimeout(timer)
})
setMessagesTimers.clear()
}
export const addError = (chatId: number, error: string) => {
addMessage(chatId, { content: error } as Message)
}

View File

@@ -160,8 +160,6 @@ export type GlobalSettings = {
hideSummarized: boolean;
chatSort: ChatSortOptions;
openAICompletionEndpoint: string;
enablePetals: boolean;
pedalsEndpoint: string;
};
type SettingNumber = {

View File

@@ -6,10 +6,25 @@
import { replace } from 'svelte-spa-router'
// import PromptConfirm from './PromptConfirm.svelte'
import type { ChatSettings } from './Types.svelte'
// Cache for auto-size elements to avoid expensive DOM queries
let cachedAutoSizeElements: HTMLTextAreaElement[] = []
let lastElementCount = 0
export const sizeTextElements = (force?: boolean) => {
const els = document.querySelectorAll('textarea.auto-size')
for (let i:number = 0, l = els.length; i < l; i++) {
autoGrowInput(els[i] as HTMLTextAreaElement, force)
// Only re-query if force is true or element count changed
const currentElements = document.querySelectorAll('textarea.auto-size')
if (force || currentElements.length !== lastElementCount) {
cachedAutoSizeElements = Array.from(currentElements) as HTMLTextAreaElement[]
lastElementCount = currentElements.length
}
// Use cached elements for better performance
for (let i = 0, l = cachedAutoSizeElements.length; i < l; i++) {
const el = cachedAutoSizeElements[i]
// Check if element is still in DOM
if (document.contains(el)) {
autoGrowInput(el, force)
}
}
}

View File

@@ -1,60 +1,82 @@
<script context="module" lang="ts">
import { getApiBase, getEndpointCompletions, getEndpointGenerations } from "../../ApiUtil.svelte";
import { countTokens } from "../../Models.svelte";
import { countMessageTokens } from "../../Stats.svelte";
import { globalStorage } from "../../Storage.svelte";
import type { Chat, Message, Model, ModelDetail } from "../../Types.svelte";
import { chatRequest, imageRequest } from "./request.svelte";
import { checkModel } from "./util.svelte";
import { encode } from "gpt-tokenizer";
import { get } from "svelte/store";
import chatModelsJson from './models.json';
import { getApiBase, getEndpointCompletions, getEndpointGenerations } from '../../ApiUtil.svelte'
import { countTokens } from '../../Models.svelte'
import { countMessageTokens } from '../../Stats.svelte'
import { globalStorage } from '../../Storage.svelte'
import type { Chat, Message, Model, ModelDetail } from '../../Types.svelte'
import { chatRequest, imageRequest } from './request.svelte'
import { checkModel } from './util.svelte'
// Lazy-load tokenizer to improve initial load time
let encode: any = null
// Simple token approximation for faster initial loads
const approximateTokens = (text: string): number[] => {
// Rough approximation: 1 token ≈ 4 characters for most text
return new Array(Math.ceil(text.length / 4)).fill(0)
}
const getTokenizer = async () => {
if (!encode) {
const tokenizer = await import('gpt-tokenizer')
encode = tokenizer.encode
}
return encode
}
import { get } from 'svelte/store'
import chatModelsJson from './models.json'
const hiddenSettings = {
startSequence: true,
stopSequence: true,
aggressiveStop: true,
delimiter: true,
userMessageStart: true,
userMessageEnd: true,
assistantMessageStart: true,
assistantMessageEnd: true,
systemMessageStart: true,
systemMessageEnd: true,
repetitionPenalty: true,
holdSocket: true,
// leadPrompt: true
} as any;
startSequence: true,
stopSequence: true,
aggressiveStop: true,
delimiter: true,
userMessageStart: true,
userMessageEnd: true,
assistantMessageStart: true,
assistantMessageEnd: true,
systemMessageStart: true,
systemMessageEnd: true,
repetitionPenalty: true,
holdSocket: true
// leadPrompt: true
} as any
const chatModelBase = {
type: "chat",
help: 'Below are the settings that OpenAI allows to be changed for the API calls. See the <a target="_blank" href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI API docs</a> for more details.',
preFillMerge: (existingContent, newContent) => {
if (existingContent && !newContent.match(/^('(t|ll|ve|m|d|re)[^a-z]|\s|[.,;:(_-{}*^%$#@!?+=~`[\]])/i)) {
existingContent += " ";
}
return existingContent;
},
request: chatRequest,
check: checkModel,
getTokens: (value) => encode(value),
getEndpoint: (model) => get(globalStorage).openAICompletionEndpoint || getApiBase() + getEndpointCompletions(),
hideSetting: (chatId, setting) => !!hiddenSettings[setting.key],
countMessageTokens: (message: Message, model: Model, chat: Chat) => {
return countTokens(model, "## " + message.role + " ##:\r\n\r\n" + message.content + "\r\n\r\n\r\n");
},
countPromptTokens: (prompts: Message[], model: Model, chat: Chat): number => {
return (
prompts.reduce((a, m) => {
a += countMessageTokens(m, model, chat);
return a;
}, 0) + 3
);
},
} as ModelDetail;
type: 'chat',
help: 'Below are the settings that OpenAI allows to be changed for the API calls. See the <a target="_blank" href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI API docs</a> for more details.',
preFillMerge: (existingContent, newContent) => {
if (existingContent && !newContent.match(/^('(t|ll|ve|m|d|re)[^a-z]|\s|[.,;:(_-{}*^%$#@!?+=~`[\]])/i)) {
existingContent += ' '
}
return existingContent
},
request: chatRequest,
check: checkModel,
getTokens: (value) => {
// Use approximation for faster initial loads, actual tokenizer loads async
if (!encode) {
getTokenizer() // Start loading tokenizer for future use
return approximateTokens(value)
}
return encode(value)
},
getEndpoint: (model) => get(globalStorage).openAICompletionEndpoint || getApiBase() + getEndpointCompletions(),
hideSetting: (chatId, setting) => !!hiddenSettings[setting.key],
countMessageTokens: (message: Message, model: Model, chat: Chat) => {
return countTokens(model, '## ' + message.role + ' ##:\r\n\r\n' + message.content + '\r\n\r\n\r\n')
},
countPromptTokens: (prompts: Message[], model: Model, chat: Chat): number => {
return (
prompts.reduce((a, m) => {
a += countMessageTokens(m, model, chat)
return a
}, 0) + 3
)
}
} as ModelDetail
export const chatModels: Record<string, ModelDetail> = {};
export const chatModels: Record<string, ModelDetail> = {}
for (const [key, { prompt, completion, max, reasoning, alias }] of Object.entries(chatModelsJson)) {
chatModels[key] = {
@@ -63,101 +85,101 @@
completion: completion / 1_000_000,
max,
...(reasoning ? { reasoning } : {}),
...(alias ? { alias } : {}),
};
...(alias ? { alias } : {})
}
}
const imageModelBase = {
type: "image",
prompt: 0.0,
max: 1000, // 1000 char prompt, max
request: imageRequest,
check: checkModel,
getTokens: (value) => [0],
getEndpoint: (model) => getApiBase() + getEndpointGenerations(),
hideSetting: (chatId, setting) => false,
} as ModelDetail;
type: 'image',
prompt: 0.0,
max: 1000, // 1000 char prompt, max
request: imageRequest,
check: checkModel,
getTokens: (value) => [0],
getEndpoint: (model) => getApiBase() + getEndpointGenerations(),
hideSetting: (chatId, setting) => false
} as ModelDetail
export const imageModels: Record<string, ModelDetail> = {
"dall-e-1024x1024": {
...imageModelBase,
completion: 0.02, // $0.020 per image
opt: {
size: "1024x1024",
},
},
"dall-e-512x512": {
...imageModelBase,
completion: 0.018, // $0.018 per image
opt: {
size: "512x512",
},
},
"dall-e-256x256": {
...imageModelBase,
type: "image",
completion: 0.016, // $0.016 per image
opt: {
size: "256x256",
},
},
"dall-e-3-1024x1024": {
...imageModelBase,
type: "image",
completion: 0.04, // $0.040 per image
opt: {
model: "dall-e-3",
size: "1024x1024",
},
},
"dall-e-3-1024x1792-Portrait": {
...imageModelBase,
type: "image",
completion: 0.08, // $0.080 per image
opt: {
model: "dall-e-3",
size: "1024x1792",
},
},
"dall-e-3-1792x1024-Landscape": {
...imageModelBase,
type: "image",
completion: 0.08, // $0.080 per image
opt: {
model: "dall-e-3",
size: "1792x1024",
},
},
"dall-e-3-1024x1024-HD": {
...imageModelBase,
type: "image",
completion: 0.08, // $0.080 per image
opt: {
model: "dall-e-3",
size: "1024x1024",
quality: "hd",
},
},
"dall-e-3-1024x1792-Portrait-HD": {
...imageModelBase,
type: "image",
completion: 0.12, // $0.080 per image
opt: {
model: "dall-e-3",
size: "1024x1792",
quality: "hd",
},
},
"dall-e-3-1792x1024-Landscape-HD": {
...imageModelBase,
type: "image",
completion: 0.12, // $0.080 per image
opt: {
model: "dall-e-3",
size: "1792x1024",
quality: "hd",
},
},
};
'dall-e-1024x1024': {
...imageModelBase,
completion: 0.02, // $0.020 per image
opt: {
size: '1024x1024'
}
},
'dall-e-512x512': {
...imageModelBase,
completion: 0.018, // $0.018 per image
opt: {
size: '512x512'
}
},
'dall-e-256x256': {
...imageModelBase,
type: 'image',
completion: 0.016, // $0.016 per image
opt: {
size: '256x256'
}
},
'dall-e-3-1024x1024': {
...imageModelBase,
type: 'image',
completion: 0.04, // $0.040 per image
opt: {
model: 'dall-e-3',
size: '1024x1024'
}
},
'dall-e-3-1024x1792-Portrait': {
...imageModelBase,
type: 'image',
completion: 0.08, // $0.080 per image
opt: {
model: 'dall-e-3',
size: '1024x1792'
}
},
'dall-e-3-1792x1024-Landscape': {
...imageModelBase,
type: 'image',
completion: 0.08, // $0.080 per image
opt: {
model: 'dall-e-3',
size: '1792x1024'
}
},
'dall-e-3-1024x1024-HD': {
...imageModelBase,
type: 'image',
completion: 0.08, // $0.080 per image
opt: {
model: 'dall-e-3',
size: '1024x1024',
quality: 'hd'
}
},
'dall-e-3-1024x1792-Portrait-HD': {
...imageModelBase,
type: 'image',
completion: 0.12, // $0.080 per image
opt: {
model: 'dall-e-3',
size: '1024x1792',
quality: 'hd'
}
},
'dall-e-3-1792x1024-Landscape-HD': {
...imageModelBase,
type: 'image',
completion: 0.12, // $0.080 per image
opt: {
model: 'dall-e-3',
size: '1792x1024',
quality: 'hd'
}
}
}
</script>

View File

@@ -33,8 +33,8 @@ export const chatRequest = async (
}
if (modelDetail.stream === false) {
opts.streaming = false;
console.log("Disabled streaming on reasoning models.");
opts.streaming = false
console.log('Disabled streaming on reasoning models.')
}
if (opts.streaming && !modelDetail.stream) {

View File

@@ -6,13 +6,13 @@ import purgecss from '@fullhuman/postcss-purgecss'
// import { visualizer } from 'rollup-plugin-visualizer';
const plugins = [
svelte(),
dsv(),
// visualizer({
// open: true,
// gzipSize: true,
// brotliSize: true,
// }),
svelte(),
dsv()
// visualizer({
// open: true,
// gzipSize: true,
// brotliSize: true,
// }),
]
// https://vitejs.dev/config/