fix prompt continuation for petals
This commit is contained in:
parent
54aac40788
commit
3b72f73080
|
@ -45,7 +45,7 @@ const chatModelBase = {
|
|||
return prompts.reduce((a, m) => {
|
||||
a += countMessageTokens(m, model, chat)
|
||||
return a
|
||||
}, 0) + countTokens(model, getStartSequence(chat)) + ((prompts[prompts.length - 1] || {}).role !== 'assistant' ? countTokens(model, getLeadPrompt(chat)) : 0)
|
||||
}, 0) + countTokens(model, getStartSequence(chat)) + countTokens(model, getLeadPrompt(chat))
|
||||
}
|
||||
} as ModelDetail
|
||||
|
||||
|
|
|
@ -62,6 +62,12 @@ export const chatRequest = async (
|
|||
const buildMessage = (m: Message): string => {
|
||||
return getRoleTag(m.role, model, chat) + m.content + getRoleEnd(m.role, model, chat)
|
||||
}
|
||||
const lastMessage = rMessages[rMessages.length - 1]
|
||||
let doLead = true
|
||||
if (lastMessage && lastMessage.role === 'assistant') {
|
||||
lastMessage.content = leadPromptSequence + lastMessage.content
|
||||
doLead = false
|
||||
}
|
||||
const inputArray = rMessages.reduce((a, m, i) => {
|
||||
let c = buildMessage(m)
|
||||
let replace = false
|
||||
|
@ -96,7 +102,7 @@ export const chatRequest = async (
|
|||
}
|
||||
return a
|
||||
}, [] as Message[])
|
||||
const leadPrompt = (leadPromptSequence && ((inputArray[inputArray.length - 1] || {}) as Message).role !== 'assistant') ? delimiter + leadPromptSequence : ''
|
||||
const leadPrompt = (leadPromptSequence && doLead) ? delimiter + leadPromptSequence : ''
|
||||
const fullPromptInput = getStartSequence(chat) + inputArray.map(m => m.content).join(delimiter) + leadPrompt
|
||||
|
||||
let maxLen = Math.min(opts.maxTokens || chatSettings.max_tokens || maxTokens, maxTokens)
|
||||
|
|
Loading…
Reference in New Issue