Add hidden prompt prefix setting

This commit is contained in:
Webifi 2023-06-09 17:21:01 -05:00
parent 3cb56e9477
commit ac7d99948f
4 changed files with 35 additions and 15 deletions

View File

@ -24,7 +24,7 @@
} from './Types.svelte' } from './Types.svelte'
import Prompts from './Prompts.svelte' import Prompts from './Prompts.svelte'
import Messages from './Messages.svelte' import Messages from './Messages.svelte'
import { prepareSummaryPrompt, restartProfile } from './Profiles.svelte' import { mergeProfileFields, prepareSummaryPrompt, restartProfile } from './Profiles.svelte'
import { afterUpdate, onMount, onDestroy } from 'svelte' import { afterUpdate, onMount, onDestroy } from 'svelte'
import Fa from 'svelte-fa/src/fa.svelte' import Fa from 'svelte-fa/src/fa.svelte'
@ -188,6 +188,8 @@
let summarySize = chatSettings.summarySize let summarySize = chatSettings.summarySize
const hiddenPromptPrefix = mergeProfileFields(chatSettings, chatSettings.hiddenPromptPrefix).trim()
// console.log('Estimated',promptTokenCount,'prompt token for this request') // console.log('Estimated',promptTokenCount,'prompt token for this request')
if (chatSettings.continuousChat && !opts.didSummary && if (chatSettings.continuousChat && !opts.didSummary &&
@ -342,7 +344,14 @@
try { try {
const request: Request = { const request: Request = {
messages: filtered.map(m => { return { role: m.role, content: m.content } }) as Message[], messages: filtered.map((m, i) => {
const r = { role: m.role, content: m.content }
if (i === filtered.length - 1 && m.role === 'user' && hiddenPromptPrefix && !opts.summaryRequest) {
// If the last prompt is a user prompt, and we have a hiddenPromptPrefix, inject it
r.content = hiddenPromptPrefix + '\n\n' + m.content
}
return r
}) as Message[],
// Provide the settings by mapping the settingsMap to key/value pairs // Provide the settings by mapping the settingsMap to key/value pairs
...getRequestSettingList().reduce((acc, setting) => { ...getRequestSettingList().reduce((acc, setting) => {
@ -351,16 +360,15 @@
if (typeof setting.apiTransform === 'function') { if (typeof setting.apiTransform === 'function') {
value = setting.apiTransform(chatId, setting, value) value = setting.apiTransform(chatId, setting, value)
} }
if (opts.summaryRequest && opts.maxTokens) { if (opts.maxTokens) {
// requesting summary. do overrides if (key === 'max_tokens') value = opts.maxTokens // only as large as requested
if (key === 'max_tokens') value = opts.maxTokens // only as large as we need for summary
if (key === 'n') value = 1 // never more than one completion for summary
} }
if (opts.streaming) { if (opts.streaming || opts.summaryRequest) {
/* /*
Streaming goes insane with more than one completion. Streaming goes insane with more than one completion.
Doesn't seem like there's any way to separate the jumbled mess of deltas for the Doesn't seem like there's any way to separate the jumbled mess of deltas for the
different completions. different completions.
Summary should only have one completion
*/ */
if (key === 'n') value = 1 if (key === 'n') value = 1
} }

View File

@ -70,22 +70,25 @@ export const getProfile = (key:string, forReset:boolean = false):ChatSettings =>
return clone return clone
} }
export const mergeProfileFields = (settings: ChatSettings, content: string|undefined, maxWords: number|undefined = undefined): string => {
if (!content?.toString) return ''
content = (content + '').replaceAll('[[CHARACTER_NAME]]', settings.characterName || 'ChatGPT')
if (maxWords) content = (content + '').replaceAll('[[MAX_WORDS]]', maxWords.toString())
return content
}
export const prepareProfilePrompt = (chatId:number) => { export const prepareProfilePrompt = (chatId:number) => {
const settings = getChatSettings(chatId) const settings = getChatSettings(chatId)
const characterName = settings.characterName return mergeProfileFields(settings, settings.systemPrompt).trim()
const currentProfilePrompt = settings.systemPrompt
return currentProfilePrompt.replaceAll('[[CHARACTER_NAME]]', characterName)
} }
export const prepareSummaryPrompt = (chatId:number, promptsSize:number, maxTokens:number|undefined = undefined) => { export const prepareSummaryPrompt = (chatId:number, promptsSize:number, maxTokens:number|undefined = undefined) => {
const settings = getChatSettings(chatId) const settings = getChatSettings(chatId)
const characterName = settings.characterName || 'ChatGPT'
maxTokens = maxTokens || settings.summarySize maxTokens = maxTokens || settings.summarySize
maxTokens = Math.min(Math.floor(promptsSize / 4), maxTokens) // Make sure we're shrinking by at least a 4th maxTokens = Math.min(Math.floor(promptsSize / 4), maxTokens) // Make sure we're shrinking by at least a 4th
const currentSummaryPrompt = settings.summaryPrompt const currentSummaryPrompt = settings.summaryPrompt
return currentSummaryPrompt // ~.75 words per token. May need to reduce
.replaceAll('[[CHARACTER_NAME]]', characterName) return mergeProfileFields(settings, currentSummaryPrompt, Math.floor(maxTokens * 0.75)).trim()
.replaceAll('[[MAX_WORDS]]', Math.floor(maxTokens * 0.75).toString()) // ~.75 words per token. May need to reduce
} }
// Restart currently loaded profile // Restart currently loaded profile

View File

@ -84,6 +84,7 @@ const defaults:ChatSettings = {
systemPrompt: '', systemPrompt: '',
autoStartSession: false, autoStartSession: false,
trainingPrompts: [], trainingPrompts: [],
hiddenPromptPrefix: '',
// useResponseAlteration: false, // useResponseAlteration: false,
// responseAlterations: [], // responseAlterations: [],
isDirty: false isDirty: false
@ -167,6 +168,14 @@ const systemPromptSettings: ChatSetting[] = [
type: 'textarea', type: 'textarea',
hide: (chatId) => !getChatSettings(chatId).useSystemPrompt hide: (chatId) => !getChatSettings(chatId).useSystemPrompt
}, },
{
key: 'hiddenPromptPrefix',
name: 'Hidden Prompt Prefix',
title: 'A prompt that will be silently injected before every user prompt.',
placeholder: 'Enter user prompt prefix here. You can remind ChatGPT how to act.',
type: 'textarea',
hide: (chatId) => !getChatSettings(chatId).useSystemPrompt
},
{ {
key: 'trainingPrompts', key: 'trainingPrompts',
name: 'Training Prompts', name: 'Training Prompts',

View File

@ -58,7 +58,6 @@
profileName: string, profileName: string,
profileDescription: string, profileDescription: string,
continuousChat: (''|'fifo'|'summary'); continuousChat: (''|'fifo'|'summary');
// useSummarization: boolean;
summaryThreshold: number; summaryThreshold: number;
summarySize: number; summarySize: number;
pinTop: number; pinTop: number;
@ -67,6 +66,7 @@
useSystemPrompt: boolean; useSystemPrompt: boolean;
systemPrompt: string; systemPrompt: string;
autoStartSession: boolean; autoStartSession: boolean;
hiddenPromptPrefix: string;
trainingPrompts?: Message[]; trainingPrompts?: Message[];
useResponseAlteration?: boolean; useResponseAlteration?: boolean;
responseAlterations?: ResponseAlteration[]; responseAlterations?: ResponseAlteration[];