Change default request config for Llama instruct
This commit is contained in:
parent
4f18d40000
commit
01f7a657db
|
@ -22,14 +22,14 @@ const chatModelBase = {
|
||||||
See <a target="_blank" href="https://platform.openai.com/docs/api-reference/chat/create">this overview</a> to start, though not all settings translate to Petals.
|
See <a target="_blank" href="https://platform.openai.com/docs/api-reference/chat/create">this overview</a> to start, though not all settings translate to Petals.
|
||||||
<i>Note that some models may mot be functional. See <a target="_blank" href="https://health.petals.dev">https://health.petals.dev</a> for current status.</i>`,
|
<i>Note that some models may mot be functional. See <a target="_blank" href="https://health.petals.dev">https://health.petals.dev</a> for current status.</i>`,
|
||||||
check: checkModel,
|
check: checkModel,
|
||||||
start: '<s>',
|
start: '###',
|
||||||
stop: ['###', '</s>'],
|
stop: ['###', '</s>'],
|
||||||
delimiter: '\n###\n\n',
|
delimiter: '###',
|
||||||
userStart: 'User:\n',
|
userStart: ' User: ',
|
||||||
userEnd: '',
|
userEnd: '',
|
||||||
assistantStart: '[[CHARACTER_NAME]]:\n',
|
assistantStart: ' [[CHARACTER_NAME]]: ',
|
||||||
assistantEnd: '',
|
assistantEnd: '',
|
||||||
leadPrompt: '[[CHARACTER_NAME]]:\n',
|
leadPrompt: ' [[CHARACTER_NAME]]: ',
|
||||||
systemEnd: '',
|
systemEnd: '',
|
||||||
prompt: 0.000000, // $0.000 per 1000 tokens prompt
|
prompt: 0.000000, // $0.000 per 1000 tokens prompt
|
||||||
completion: 0.000000, // $0.000 per 1000 tokens completion
|
completion: 0.000000, // $0.000 per 1000 tokens completion
|
||||||
|
|
Loading…
Reference in New Issue