Add StableBeluga2. Update prompt structures
This commit is contained in:
		
							parent
							
								
									8b2f2515f9
								
							
						
					
					
						commit
						7c588ce212
					
				| 
						 | 
					@ -57,11 +57,15 @@ const modelDetails : Record<string, ModelDetail> = {
 | 
				
			||||||
      'timdettmers/guanaco-65b': {
 | 
					      'timdettmers/guanaco-65b': {
 | 
				
			||||||
        type: 'Petals',
 | 
					        type: 'Petals',
 | 
				
			||||||
        label: 'Petals - Guanaco-65b',
 | 
					        label: 'Petals - Guanaco-65b',
 | 
				
			||||||
        stop: ['###', '</s>'],
 | 
					        start: '',
 | 
				
			||||||
        deliminator: '###',
 | 
					        stop: ['###', 'System:', 'Assistant:', 'User:', '</s>'],
 | 
				
			||||||
        userStart: '<|user|>',
 | 
					        deliminator: '\n###\n\n',
 | 
				
			||||||
        assistantStart: '<|[[CHARACTER_NAME]]|>',
 | 
					        userStart: 'User:\n',
 | 
				
			||||||
        systemStart: '',
 | 
					        userEnd: '',
 | 
				
			||||||
 | 
					        assistantStart: 'Assistant:\n',
 | 
				
			||||||
 | 
					        assistantEnd: '',
 | 
				
			||||||
 | 
					        systemStart: 'System:\n',
 | 
				
			||||||
 | 
					        systemEnd: '',
 | 
				
			||||||
        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
					        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
				
			||||||
        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
					        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
				
			||||||
        max: 2048 // 2k max token buffer
 | 
					        max: 2048 // 2k max token buffer
 | 
				
			||||||
| 
						 | 
					@ -85,10 +89,31 @@ const modelDetails : Record<string, ModelDetail> = {
 | 
				
			||||||
      'meta-llama/Llama-2-70b-hf': {
 | 
					      'meta-llama/Llama-2-70b-hf': {
 | 
				
			||||||
        type: 'Petals',
 | 
					        type: 'Petals',
 | 
				
			||||||
        label: 'Petals - Llama-2-70b',
 | 
					        label: 'Petals - Llama-2-70b',
 | 
				
			||||||
        stop: ['###', '</s>'],
 | 
					        start: '',
 | 
				
			||||||
        userStart: '<|user|>',
 | 
					        stop: ['###', 'System:', 'Assistant:', 'User:', '</s>'],
 | 
				
			||||||
        assistantStart: '<|[[CHARACTER_NAME]]|>',
 | 
					        deliminator: '\n###\n\n',
 | 
				
			||||||
        systemStart: '',
 | 
					        userStart: 'User:\n',
 | 
				
			||||||
 | 
					        userEnd: '',
 | 
				
			||||||
 | 
					        assistantStart: 'Assistant:\n',
 | 
				
			||||||
 | 
					        assistantEnd: '',
 | 
				
			||||||
 | 
					        systemStart: 'System:\n',
 | 
				
			||||||
 | 
					        systemEnd: '',
 | 
				
			||||||
 | 
					        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
				
			||||||
 | 
					        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
				
			||||||
 | 
					        max: 4096 // 4k max token buffer
 | 
				
			||||||
 | 
					      },
 | 
				
			||||||
 | 
					      'stabilityai/StableBeluga2': {
 | 
				
			||||||
 | 
					        type: 'Petals',
 | 
				
			||||||
 | 
					        label: 'Petals - StableBeluga2',
 | 
				
			||||||
 | 
					        start: '',
 | 
				
			||||||
 | 
					        stop: ['###', 'System:', 'Assistant:', 'User:', '</s>'],
 | 
				
			||||||
 | 
					        deliminator: '\n###\n\n',
 | 
				
			||||||
 | 
					        userStart: 'User:\n',
 | 
				
			||||||
 | 
					        userEnd: '',
 | 
				
			||||||
 | 
					        assistantStart: 'Assistant:\n',
 | 
				
			||||||
 | 
					        assistantEnd: '',
 | 
				
			||||||
 | 
					        systemStart: 'System:\n',
 | 
				
			||||||
 | 
					        systemEnd: '',
 | 
				
			||||||
        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
					        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
				
			||||||
        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
					        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
				
			||||||
        max: 4096 // 4k max token buffer
 | 
					        max: 4096 // 4k max token buffer
 | 
				
			||||||
| 
						 | 
					@ -139,6 +164,7 @@ export const supportedModels : Record<string, ModelDetail> = {
 | 
				
			||||||
      // 'enoch/llama-65b-hf': modelDetails['enoch/llama-65b-hf'],
 | 
					      // 'enoch/llama-65b-hf': modelDetails['enoch/llama-65b-hf'],
 | 
				
			||||||
      // 'timdettmers/guanaco-65b': modelDetails['timdettmers/guanaco-65b'],
 | 
					      // 'timdettmers/guanaco-65b': modelDetails['timdettmers/guanaco-65b'],
 | 
				
			||||||
      'meta-llama/Llama-2-70b-hf': modelDetails['meta-llama/Llama-2-70b-hf'],
 | 
					      'meta-llama/Llama-2-70b-hf': modelDetails['meta-llama/Llama-2-70b-hf'],
 | 
				
			||||||
 | 
					      'stabilityai/StableBeluga2': modelDetails['stabilityai/StableBeluga2'],
 | 
				
			||||||
      'meta-llama/Llama-2-70b-chat-hf': modelDetails['meta-llama/Llama-2-70b-chat-hf']
 | 
					      'meta-llama/Llama-2-70b-chat-hf': modelDetails['meta-llama/Llama-2-70b-chat-hf']
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue