Removed Petal for size

This commit is contained in:
2024-06-24 04:28:24 +09:00
parent 2dfeb877d7
commit 4d8e28c44e
7 changed files with 265 additions and 457 deletions

266
package-lock.json generated
View File

@@ -30,8 +30,8 @@
"flourite": "^1.2.4",
"gpt-tokenizer": "^2.1.2",
"katex": "^0.16.10",
"llama-tokenizer-js": "^1.1.3",
"postcss": "^8.4.32",
"rollup-plugin-visualizer": "^5.12.0",
"sass": "^1.69.7",
"stacking-order": "^2.0.0",
"svelte": "^3.59.2",
@@ -1483,12 +1483,13 @@
}
},
"node_modules/braces": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dev": true,
"license": "MIT",
"dependencies": {
"fill-range": "^7.0.1"
"fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
@@ -1607,6 +1608,61 @@
"node": ">= 6"
}
},
"node_modules/cliui": {
"version": "8.0.1",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
"integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
"dev": true,
"license": "ISC",
"dependencies": {
"string-width": "^4.2.0",
"strip-ansi": "^6.0.1",
"wrap-ansi": "^7.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/cliui/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true,
"license": "MIT"
},
"node_modules/cliui/node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"license": "MIT",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/cliui/node_modules/wrap-ansi": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
@@ -1799,6 +1855,16 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/define-lazy-prop": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz",
"integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/define-properties": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
@@ -2058,6 +2124,16 @@
"@esbuild/win32-x64": "0.18.20"
}
},
"node_modules/escalade": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz",
"integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
@@ -2626,10 +2702,11 @@
}
},
"node_modules/fill-range": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dev": true,
"license": "MIT",
"dependencies": {
"to-regex-range": "^5.0.1"
},
@@ -2782,6 +2859,16 @@
"node": ">= 0.6.0"
}
},
"node_modules/get-caller-file": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
"dev": true,
"license": "ISC",
"engines": {
"node": "6.* || 8.* || >= 10.*"
}
},
"node_modules/get-intrinsic": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
@@ -3265,6 +3352,22 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-docker": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
"integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
"dev": true,
"license": "MIT",
"bin": {
"is-docker": "cli.js"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
@@ -3313,6 +3416,7 @@
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.12.0"
}
@@ -3437,6 +3541,19 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-wsl": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
"integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-docker": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/isarray": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
@@ -3573,12 +3690,6 @@
"node": ">= 0.8.0"
}
},
"node_modules/llama-tokenizer-js": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/llama-tokenizer-js/-/llama-tokenizer-js-1.2.1.tgz",
"integrity": "sha512-SEVVc++cXR0D0Wv30AzMVWzPCAKM701vZYU31h5lCTIn4k5cfZpJ070YDcb2nPq2Ts3xgu44L19wIrq1z/XjXQ==",
"dev": true
},
"node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
@@ -3861,6 +3972,24 @@
"wrappy": "1"
}
},
"node_modules/open": {
"version": "8.4.2",
"resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz",
"integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"define-lazy-prop": "^2.0.0",
"is-docker": "^2.1.1",
"is-wsl": "^2.2.0"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/optionator": {
"version": "0.9.4",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz",
@@ -4169,6 +4298,16 @@
"url": "https://github.com/sponsors/mysticatea"
}
},
"node_modules/require-directory": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
"integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/resolve": {
"version": "1.22.8",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
@@ -4265,6 +4404,33 @@
"fsevents": "~2.3.2"
}
},
"node_modules/rollup-plugin-visualizer": {
"version": "5.12.0",
"resolved": "https://registry.npmjs.org/rollup-plugin-visualizer/-/rollup-plugin-visualizer-5.12.0.tgz",
"integrity": "sha512-8/NU9jXcHRs7Nnj07PF2o4gjxmm9lXIrZ8r175bT9dK8qoLlvKTwRMArRCMgpMGlq8CTLugRvEmyMeMXIU2pNQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"open": "^8.4.0",
"picomatch": "^2.3.1",
"source-map": "^0.7.4",
"yargs": "^17.5.1"
},
"bin": {
"rollup-plugin-visualizer": "dist/bin/cli.js"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"rollup": "2.x || 3.x || 4.x"
},
"peerDependenciesMeta": {
"rollup": {
"optional": true
}
}
},
"node_modules/run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
@@ -4547,6 +4713,16 @@
"sorcery": "bin/sorcery"
}
},
"node_modules/source-map": {
"version": "0.7.4",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz",
"integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==",
"dev": true,
"license": "BSD-3-Clause",
"engines": {
"node": ">= 8"
}
},
"node_modules/source-map-js": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz",
@@ -4983,6 +5159,7 @@
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-number": "^7.0.0"
},
@@ -5443,12 +5620,73 @@
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"dev": true
},
"node_modules/y18n": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
"integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=10"
}
},
"node_modules/yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
"dev": true
},
"node_modules/yargs": {
"version": "17.7.2",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
"integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
"dev": true,
"license": "MIT",
"dependencies": {
"cliui": "^8.0.1",
"escalade": "^3.1.1",
"get-caller-file": "^2.0.5",
"require-directory": "^2.1.1",
"string-width": "^4.2.3",
"y18n": "^5.0.5",
"yargs-parser": "^21.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/yargs-parser": {
"version": "21.1.1",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
"integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=12"
}
},
"node_modules/yargs/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true,
"license": "MIT"
},
"node_modules/yargs/node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"license": "MIT",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/yocto-queue": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",

View File

@@ -38,6 +38,7 @@
"katex": "^0.16.10",
"llama-tokenizer-js": "^1.1.3",
"postcss": "^8.4.32",
"rollup-plugin-visualizer": "^5.12.0",
"sass": "^1.69.7",
"stacking-order": "^2.0.0",
"svelte": "^3.59.2",

View File

@@ -6,7 +6,7 @@
import { getChatSettingObjectByKey } from './Settings.svelte'
import { valueOf } from './Util.svelte'
import { chatModels as openAiModels, imageModels as openAiImageModels } from './providers/openai/models.svelte'
import { chatModels as petalsModels } from './providers/petals/models.svelte'
// import { chatModels as petalsModels } from './providers/petals/models.svelte'
const unknownDetail = {
...Object.values(openAiModels)[0]
@@ -14,7 +14,7 @@ const unknownDetail = {
export const supportedChatModels : Record<string, ModelDetail> = {
...openAiModels,
...petalsModels
// ...petalsModels
}
export const supportedImageModels : Record<string, ModelDetail> = {

View File

@@ -1,98 +0,0 @@
<script context="module" lang="ts">
import { getPetalsBase, getPetalsWebsocket } from '../../ApiUtil.svelte'
import { countTokens, getDelimiter, getLeadPrompt, getRoleEnd, getRoleTag, getStartSequence } from '../../Models.svelte'
import { countMessageTokens } from '../../Stats.svelte'
import { globalStorage } from '../../Storage.svelte'
import type { Chat, Message, Model, ModelDetail } from '../../Types.svelte'
import { chatRequest } from './request.svelte'
import { checkModel } from './util.svelte'
import llamaTokenizer from 'llama-tokenizer-js'
import { get } from 'svelte/store'
const hideSettings = {
stream: true,
n: true,
presence_penalty: true,
frequency_penalty: true
} as any
const chatModelBase = {
type: 'instruct', // Used for chat, but these models operate like instruct models -- you have to manually structure the messages sent to them
help: `Below are the settings that can be changed for the API calls.
See <a target="_blank" href="https://platform.openai.com/docs/api-reference/chat/create">this overview</a> to start, though not all settings translate to Petals.
<i>Note that some models may mot be functional. See <a target="_blank" href="https://health.petals.dev">https://health.petals.dev</a> for current status.</i>`,
check: checkModel,
start: '###',
stop: ['###', '</s>'],
delimiter: '\n###\n###',
userStart: ' User: ',
userEnd: '',
assistantStart: ' [[CHARACTER_NAME]]: ',
assistantEnd: '',
leadPrompt: ' [[CHARACTER_NAME]]: ',
systemEnd: '',
prompt: 0.000000, // $0.000 per 1000 tokens prompt
completion: 0.000000, // $0.000 per 1000 tokens completion
max: 4096, // 4k max token buffer
request: chatRequest,
getEndpoint: (model) => get(globalStorage).pedalsEndpoint || (getPetalsBase() + getPetalsWebsocket()),
getTokens: (value) => llamaTokenizer.encode(value),
hideSetting: (chatId, setting) => !!hideSettings[setting.key],
countMessageTokens: (message:Message, model:Model, chat: Chat):number => {
const delim = getDelimiter(chat)
return countTokens(model, getRoleTag(message.role, model, chat) + ': ' +
message.content + getRoleEnd(message.role, model, chat) + (delim || '###'))
},
countPromptTokens: (prompts:Message[], model:Model, chat: Chat):number => {
return prompts.reduce((a, m) => {
a += countMessageTokens(m, model, chat)
return a
}, 0) + countTokens(model, getStartSequence(chat)) + countTokens(model, getLeadPrompt(chat))
}
} as ModelDetail
export const chatModels : Record<string, ModelDetail> = {
'enoch/llama-65b-hf': {
...chatModelBase,
label: 'Petals - Llama-65b',
max: 2048
},
'timdettmers/guanaco-65b': {
...chatModelBase,
label: 'Petals - Guanaco-65b',
max: 2048
},
// 'codellama/CodeLlama-34b-Instruct-hf ': {
// ...chatModelBase,
// label: 'Petals - CodeLlama-34b',
// max: 2048
// },
// 'meta-llama/Llama-2-70b-hf': {
// ...chatModelBase,
// label: 'Petals - Llama-2-70b'
// },
'meta-llama/Llama-2-70b-chat-hf': {
...chatModelBase,
label: 'Petals - Llama-2-70b-chat',
start: '<s>',
stop: ['</s>', '[INST]', '[/INST]', '<<SYS>>', '<</SYS>>'],
delimiter: '</s><s>',
userStart: '[INST] User: ',
userEnd: ' [/INST]',
systemStart: '[INST] <<SYS>>\n',
systemEnd: '\n<</SYS>> [/INST]'
// leadPrompt: ''
},
'stabilityai/StableBeluga2': {
...chatModelBase,
label: 'Petals - StableBeluga-2-70b'
}
// 'tiiuae/falcon-180B-chat': {
// ...chatModelBase,
// start: '###',
// stop: ['###', '</s>', '<|endoftext|>'],
// label: 'Petals - Falcon-180b-chat'
// }
}
</script>

View File

@@ -1,326 +0,0 @@
<script context="module" lang="ts">
import { ChatCompletionResponse } from '../../ChatCompletionResponse.svelte'
import { ChatRequest } from '../../ChatRequest.svelte'
import { countTokens, getDelimiter, getEndpoint, getLeadPrompt, getModelDetail, getRoleEnd, getRoleTag, getStartSequence, getStopSequence } from '../../Models.svelte'
import type { ChatCompletionOpts, Message, Request } from '../../Types.svelte'
import { getModelMaxTokens } from '../../Stats.svelte'
import { updateMessages } from '../../Storage.svelte'
import { escapeRegex } from '../../Util.svelte'
const levenshteinDistance = (str1 = '', str2 = '') => {
const track = Array(str2.length + 1).fill(null).map(() =>
Array(str1.length + 1).fill(null))
for (let i = 0; i <= str1.length; i += 1) {
track[0][i] = i
}
for (let j = 0; j <= str2.length; j += 1) {
track[j][0] = j
}
for (let j = 1; j <= str2.length; j += 1) {
for (let i = 1; i <= str1.length; i += 1) {
const indicator = str1[i - 1] === str2[j - 1] ? 0 : 1
track[j][i] = Math.min(
track[j][i - 1] + 1, // deletion
track[j - 1][i] + 1, // insertion
track[j - 1][i - 1] + indicator // substitution
)
}
}
return track[str2.length][str1.length]
}
export const chatRequest = async (
request: Request,
chatRequest: ChatRequest,
chatResponse: ChatCompletionResponse,
opts: ChatCompletionOpts): Promise<ChatCompletionResponse> => {
// Petals
const chat = chatRequest.getChat()
const chatSettings = chat.settings
const model = chatRequest.getModel()
const modelDetail = getModelDetail(model)
const signal = chatRequest.controller.signal
const providerData = chatRequest.providerData.petals || {}
chatRequest.providerData.petals = providerData
const modelChanged = model !== providerData.lastModel
providerData.lastModel = model
let ws: WebSocket = providerData.ws
const abortListener = (e:Event) => {
chatRequest.updating = false
chatRequest.updatingMessage = ''
chatResponse.updateFromError('User aborted request.')
signal.removeEventListener('abort', abortListener)
ws.close()
}
signal.addEventListener('abort', abortListener)
const startSequence = getStartSequence(chat)
let stopSequences = [...new Set(getStopSequence(chat).split(',').filter(s => s.trim()).concat((modelDetail.stop || ['###', '</s>']).slice()))]
let stopSequence = stopSequences[0] || '###'
if (startSequence.length) {
const sld = stopSequences.slice()
.filter(s => s === '###' || '</s>' || countTokens(model, s) === 1)
.sort((a, b) => levenshteinDistance(a, startSequence) - levenshteinDistance(b, startSequence))
stopSequence = sld[0] || stopSequence
}
stopSequences.push(stopSequence)
const delimiter = getDelimiter(chat)
const leadPromptSequence = getLeadPrompt(chat)
if (delimiter) stopSequences.unshift(delimiter.trim())
stopSequences = stopSequences.sort((a, b) => b.length - a.length)
const stopSequencesC = stopSequences.filter(s => s !== stopSequence)
const maxTokens = getModelMaxTokens(model)
const userAfterSystem = true
// Enforce strict order of messages
const fMessages = (request.messages || [] as Message[])
const rMessages = fMessages.reduce((a, m, i) => {
a.push(m)
// if (m.role === 'system') m.content = m.content.trim()
const nm = fMessages[i + 1]
if (userAfterSystem && m.role === 'system' && (!nm || nm.role !== 'user')) {
const nc = {
role: 'user',
content: ''
} as Message
a.push(nc)
}
return a
},
[] as Message[])
// make sure top_p and temperature are set the way we need
let temperature = request.temperature
if (temperature === undefined || isNaN(temperature as any)) temperature = 1
if (!temperature || temperature <= 0) temperature = 0.01
let topP = request.top_p
if (topP === undefined || isNaN(topP as any)) topP = 1
if (!topP || topP <= 0) topP = 0.01
// build the message array
const buildMessage = (m: Message): string => {
return getRoleTag(m.role, model, chat) + m.content + getRoleEnd(m.role, model, chat)
}
const buildInputArray = (a: Message[]) => {
return a.reduce((a, m, i) => {
let c = buildMessage(m)
let replace = false
const lm = a[a.length - 1]
// Merge content if needed
if (lm) {
if (lm.role === 'system' && m.role === 'user' && c.includes('[[SYSTEM_PROMPT]]')) {
c = c.replaceAll('[[SYSTEM_PROMPT]]', lm.content)
replace = true
} else {
c = c.replaceAll('[[SYSTEM_PROMPT]]', '')
}
if (lm.role === 'user' && m.role === 'assistant' && c.includes('[[USER_PROMPT]]')) {
c = c.replaceAll('[[USER_PROMPT]]', lm.content)
replace = true
} else {
c = c.replaceAll('[[USER_PROMPT]]', '')
}
}
// Clean up merge fields on last
if (!rMessages[i + 1]) {
c = c.replaceAll('[[USER_PROMPT]]', '').replaceAll('[[SYSTEM_PROMPT]]', '')
}
const result = {
role: m.role,
content: c.trim()
} as Message
if (replace) {
a[a.length - 1] = result
} else {
a.push(result)
}
return a
}, [] as Message[])
}
const lastMessage = rMessages[rMessages.length - 1]
let doLead = true
if (lastMessage && lastMessage.role === 'assistant') {
lastMessage.content = leadPromptSequence + lastMessage.content
doLead = false
}
// const inputArray = buildInputArray(rMessages).map(m => m.content)
const lInputArray = doLead
? (rMessages.length > 1 ? buildInputArray(rMessages.slice(0, -1)).map(m => m.content) : [])
: buildInputArray(rMessages.slice()).map(m => m.content)
const nInputArray = buildInputArray(rMessages.slice(-1)).map(m => m.content)
const leadPrompt = (leadPromptSequence && doLead) ? delimiter + leadPromptSequence : ''
const lastPrompt = startSequence + lInputArray.join(delimiter)
const nextPrompt = doLead ? nInputArray.slice(-1).join('') + leadPrompt : ''
// set up the request
chatResponse.onFinish(() => {
const message = chatResponse.getMessages()[0]
if (message) {
for (let i = 0, l = stopSequences.length; i < l; i++) {
const ss = stopSequences[i].trim()
if (message.content.trim().endsWith(ss)) {
message.content = message.content.trim().slice(0, message.content.trim().length - ss.length)
updateMessages(chat.id)
}
}
}
!chatSettings.holdSocket && ws.close()
})
let maxLen = Math.min(opts.maxTokens || chatSettings.max_tokens || maxTokens, maxTokens)
let midDel = ''
for (let i = 0, l = delimiter.length; i < l; i++) {
const chk = delimiter.slice(0, i)
if ((providerData.knownBuffer || '').slice(0 - (i + 1)) === chk) midDel = chk
}
midDel = midDel.length ? delimiter.slice(0, 0 - midDel.length) : delimiter
let inputPrompt = doLead ? midDel : ''
const getNewWs = ():Promise<WebSocket> => new Promise<WebSocket>((resolve, reject) => {
// console.warn('requesting new ws')
const nws = new WebSocket(getEndpoint(model))
let opened = false
let done = false
nws.onmessage = event => {
if (done) return
done = true
const response = JSON.parse(event.data)
if (!response.ok) {
const err = new Error('Error opening socket: ' + response.traceback)
chatResponse.updateFromError(err.message)
console.error(err)
reject(err)
}
nws.onerror = err => {
console.error(err)
throw err
}
// console.warn('got new ws')
inputPrompt = lastPrompt + (doLead && lInputArray.length ? delimiter : '')
providerData.knownBuffer = ''
providerData.ws = nws
resolve(nws)
}
nws.onclose = () => {
chatResponse.updateFromClose()
}
nws.onerror = err => {
if (done) return
done = true
console.error(err)
reject(err)
}
nws.onopen = () => {
if (opened) return
opened = true
const promptTokenCount = countTokens(model, lastPrompt + delimiter + nextPrompt)
if (promptTokenCount > maxLen) {
maxLen = Math.min(maxLen + promptTokenCount, maxTokens)
}
// update with real count
chatResponse.setPromptTokenCount(promptTokenCount)
const req = {
type: 'open_inference_session',
model,
max_length: chatSettings.holdSocket ? maxTokens : maxLen
} as any
nws.send(JSON.stringify(req))
}
})
const wsOpen = (ws && ws.readyState === WebSocket.OPEN)
if (!chatSettings.holdSocket || wsOpen) {
const rgxp = new RegExp('(<s>|</s>|\\s|' + escapeRegex(stopSequence) + ')', 'g')
const kb = providerData.knownBuffer.replace(rgxp, '')
const lp = lastPrompt.replace(rgxp, '')
const lm = kb === lp
if (!chatSettings.holdSocket || modelChanged || !lm ||
countTokens(model, providerData.knownBuffer + inputPrompt) >= maxTokens) {
wsOpen && ws.close()
ws = await getNewWs()
}
}
if (!ws || ws.readyState !== WebSocket.OPEN) {
ws = await getNewWs()
}
inputPrompt += nextPrompt
providerData.knownBuffer += inputPrompt
// console.log(
// '\n\n*** inputPrompt: ***\n\n',
// inputPrompt
// )
const petalsRequest = {
type: 'generate',
inputs: inputPrompt,
max_new_tokens: 1, // wait for up to 1 tokens before displaying
stop_sequence: stopSequence,
do_sample: 1, // enable top p and the like
temperature,
top_p: topP,
repetition_penalty: chatSettings.repetitionPenalty
} as any
if (stopSequencesC.length) petalsRequest.extra_stop_sequences = stopSequencesC
// Update token count
chatResponse.setPromptTokenCount(countTokens(model, providerData.knownBuffer))
ws.onmessage = event => {
// Remove updating indicator
chatRequest.updating = chatRequest.updating && 1 // hide indicator, but still signal we're updating
chatRequest.updatingMessage = ''
const response = JSON.parse(event.data)
if (!response.ok) {
if (response.traceback.includes('Maximum length exceeded')) {
return chatResponse.finish('length')
}
if (!chatRequest.updating) return
const err = new Error('Error in response: ' + response.traceback)
console.error(err)
chatResponse.updateFromError(err.message)
throw err
}
providerData.knownBuffer += response.outputs
chatResponse.updateFromAsyncResponse(
{
model,
choices: [{
delta: {
content: response.outputs,
role: 'assistant'
},
finish_reason: (response.stop ? 'stop' : null)
}]
} as any
)
if (chatSettings.aggressiveStop && !response.stop) {
// check if we should've stopped
const message = chatResponse.getMessages()[0]
const pad = 10 // look back 10 characters + stop sequence
if (message) {
const mc = (message.content).trim()
for (let i = 0, l = stopSequences.length; i < l; i++) {
const ss = stopSequences[i].trim()
const ind = mc.slice(0 - (ss.length + pad)).indexOf(ss)
if (ind > -1) {
const offset = (ss.length + pad) - ind
message.content = mc.slice(0, mc.length - offset)
response.stop = true
updateMessages(chat.id)
chatResponse.finish()
if (ss !== stopSequence) {
providerData.knownBuffer += stopSequence
}
ws.close()
}
}
}
}
}
ws.send(JSON.stringify(petalsRequest))
return chatResponse
}
</script>

View File

@@ -1,16 +0,0 @@
<script context="module" lang="ts">
import { globalStorage } from '../../Storage.svelte'
import { get } from 'svelte/store'
import type { ModelDetail } from '../../Types.svelte'
export const set = (opt: Record<string, any>) => {
//
}
export const checkModel = async (modelDetail: ModelDetail) => {
if (modelDetail.type === 'chat' || modelDetail.type === 'instruct') {
modelDetail.enabled = get(globalStorage).enablePetals
}
}
</script>

View File

@@ -3,8 +3,17 @@ import { svelte } from '@sveltejs/vite-plugin-svelte'
import dsv from '@rollup/plugin-dsv'
import purgecss from '@fullhuman/postcss-purgecss'
// import { visualizer } from 'rollup-plugin-visualizer';
const plugins = [svelte(), dsv()]
const plugins = [
svelte(),
dsv(),
// visualizer({
// open: true,
// gzipSize: true,
// brotliSize: true,
// }),
]
// https://vitejs.dev/config/
export default defineConfig(({ command, mode, ssrBuild }) => {