Add hidden prompt prefix setting
This commit is contained in:
parent
3cb56e9477
commit
ac7d99948f
|
@ -24,7 +24,7 @@
|
|||
} from './Types.svelte'
|
||||
import Prompts from './Prompts.svelte'
|
||||
import Messages from './Messages.svelte'
|
||||
import { prepareSummaryPrompt, restartProfile } from './Profiles.svelte'
|
||||
import { mergeProfileFields, prepareSummaryPrompt, restartProfile } from './Profiles.svelte'
|
||||
|
||||
import { afterUpdate, onMount, onDestroy } from 'svelte'
|
||||
import Fa from 'svelte-fa/src/fa.svelte'
|
||||
|
@ -188,6 +188,8 @@
|
|||
|
||||
let summarySize = chatSettings.summarySize
|
||||
|
||||
const hiddenPromptPrefix = mergeProfileFields(chatSettings, chatSettings.hiddenPromptPrefix).trim()
|
||||
|
||||
// console.log('Estimated',promptTokenCount,'prompt token for this request')
|
||||
|
||||
if (chatSettings.continuousChat && !opts.didSummary &&
|
||||
|
@ -342,7 +344,14 @@
|
|||
|
||||
try {
|
||||
const request: Request = {
|
||||
messages: filtered.map(m => { return { role: m.role, content: m.content } }) as Message[],
|
||||
messages: filtered.map((m, i) => {
|
||||
const r = { role: m.role, content: m.content }
|
||||
if (i === filtered.length - 1 && m.role === 'user' && hiddenPromptPrefix && !opts.summaryRequest) {
|
||||
// If the last prompt is a user prompt, and we have a hiddenPromptPrefix, inject it
|
||||
r.content = hiddenPromptPrefix + '\n\n' + m.content
|
||||
}
|
||||
return r
|
||||
}) as Message[],
|
||||
|
||||
// Provide the settings by mapping the settingsMap to key/value pairs
|
||||
...getRequestSettingList().reduce((acc, setting) => {
|
||||
|
@ -351,16 +360,15 @@
|
|||
if (typeof setting.apiTransform === 'function') {
|
||||
value = setting.apiTransform(chatId, setting, value)
|
||||
}
|
||||
if (opts.summaryRequest && opts.maxTokens) {
|
||||
// requesting summary. do overrides
|
||||
if (key === 'max_tokens') value = opts.maxTokens // only as large as we need for summary
|
||||
if (key === 'n') value = 1 // never more than one completion for summary
|
||||
if (opts.maxTokens) {
|
||||
if (key === 'max_tokens') value = opts.maxTokens // only as large as requested
|
||||
}
|
||||
if (opts.streaming) {
|
||||
if (opts.streaming || opts.summaryRequest) {
|
||||
/*
|
||||
Streaming goes insane with more than one completion.
|
||||
Doesn't seem like there's any way to separate the jumbled mess of deltas for the
|
||||
different completions.
|
||||
Summary should only have one completion
|
||||
*/
|
||||
if (key === 'n') value = 1
|
||||
}
|
||||
|
|
|
@ -70,22 +70,25 @@ export const getProfile = (key:string, forReset:boolean = false):ChatSettings =>
|
|||
return clone
|
||||
}
|
||||
|
||||
export const mergeProfileFields = (settings: ChatSettings, content: string|undefined, maxWords: number|undefined = undefined): string => {
|
||||
if (!content?.toString) return ''
|
||||
content = (content + '').replaceAll('[[CHARACTER_NAME]]', settings.characterName || 'ChatGPT')
|
||||
if (maxWords) content = (content + '').replaceAll('[[MAX_WORDS]]', maxWords.toString())
|
||||
return content
|
||||
}
|
||||
|
||||
export const prepareProfilePrompt = (chatId:number) => {
|
||||
const settings = getChatSettings(chatId)
|
||||
const characterName = settings.characterName
|
||||
const currentProfilePrompt = settings.systemPrompt
|
||||
return currentProfilePrompt.replaceAll('[[CHARACTER_NAME]]', characterName)
|
||||
return mergeProfileFields(settings, settings.systemPrompt).trim()
|
||||
}
|
||||
|
||||
export const prepareSummaryPrompt = (chatId:number, promptsSize:number, maxTokens:number|undefined = undefined) => {
|
||||
const settings = getChatSettings(chatId)
|
||||
const characterName = settings.characterName || 'ChatGPT'
|
||||
maxTokens = maxTokens || settings.summarySize
|
||||
maxTokens = Math.min(Math.floor(promptsSize / 4), maxTokens) // Make sure we're shrinking by at least a 4th
|
||||
const currentSummaryPrompt = settings.summaryPrompt
|
||||
return currentSummaryPrompt
|
||||
.replaceAll('[[CHARACTER_NAME]]', characterName)
|
||||
.replaceAll('[[MAX_WORDS]]', Math.floor(maxTokens * 0.75).toString()) // ~.75 words per token. May need to reduce
|
||||
// ~.75 words per token. May need to reduce
|
||||
return mergeProfileFields(settings, currentSummaryPrompt, Math.floor(maxTokens * 0.75)).trim()
|
||||
}
|
||||
|
||||
// Restart currently loaded profile
|
||||
|
|
|
@ -84,6 +84,7 @@ const defaults:ChatSettings = {
|
|||
systemPrompt: '',
|
||||
autoStartSession: false,
|
||||
trainingPrompts: [],
|
||||
hiddenPromptPrefix: '',
|
||||
// useResponseAlteration: false,
|
||||
// responseAlterations: [],
|
||||
isDirty: false
|
||||
|
@ -167,6 +168,14 @@ const systemPromptSettings: ChatSetting[] = [
|
|||
type: 'textarea',
|
||||
hide: (chatId) => !getChatSettings(chatId).useSystemPrompt
|
||||
},
|
||||
{
|
||||
key: 'hiddenPromptPrefix',
|
||||
name: 'Hidden Prompt Prefix',
|
||||
title: 'A prompt that will be silently injected before every user prompt.',
|
||||
placeholder: 'Enter user prompt prefix here. You can remind ChatGPT how to act.',
|
||||
type: 'textarea',
|
||||
hide: (chatId) => !getChatSettings(chatId).useSystemPrompt
|
||||
},
|
||||
{
|
||||
key: 'trainingPrompts',
|
||||
name: 'Training Prompts',
|
||||
|
|
|
@ -58,7 +58,6 @@
|
|||
profileName: string,
|
||||
profileDescription: string,
|
||||
continuousChat: (''|'fifo'|'summary');
|
||||
// useSummarization: boolean;
|
||||
summaryThreshold: number;
|
||||
summarySize: number;
|
||||
pinTop: number;
|
||||
|
@ -67,6 +66,7 @@
|
|||
useSystemPrompt: boolean;
|
||||
systemPrompt: string;
|
||||
autoStartSession: boolean;
|
||||
hiddenPromptPrefix: string;
|
||||
trainingPrompts?: Message[];
|
||||
useResponseAlteration?: boolean;
|
||||
responseAlterations?: ResponseAlteration[];
|
||||
|
|
Loading…
Reference in New Issue