Merge pull request #287 from Webifi/main
Fix bug with profile URL generation
This commit is contained in:
		
						commit
						4070025b99
					
				| 
						 | 
					@ -102,8 +102,8 @@
 | 
				
			||||||
    // location.protocol + '//' + location.host + location.pathname
 | 
					    // location.protocol + '//' + location.host + location.pathname
 | 
				
			||||||
    const uri = '#/chat/new?petals=true&' + Object.entries(chatSettings).reduce((a, [k, v]) => {
 | 
					    const uri = '#/chat/new?petals=true&' + Object.entries(chatSettings).reduce((a, [k, v]) => {
 | 
				
			||||||
      const t = typeof v
 | 
					      const t = typeof v
 | 
				
			||||||
      if (hasChatSetting(k) && (t === 'boolean' || t === 'string' || t === 'number')) {
 | 
					      if (hasChatSetting(k as any) && (t === 'boolean' || t === 'string' || t === 'number')) {
 | 
				
			||||||
        a.push(encodeURI(k) + '=' + encodeURI(v as any))
 | 
					        a.push(encodeURIComponent(k) + '=' + encodeURIComponent(v as any))
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
      return a
 | 
					      return a
 | 
				
			||||||
    }, [] as string[]).join('&')
 | 
					    }, [] as string[]).join('&')
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -21,7 +21,7 @@ import {
 | 
				
			||||||
    import { getModelDetail, getTokens } from './Models.svelte'
 | 
					    import { getModelDetail, getTokens } from './Models.svelte'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const defaultModel:Model = 'gpt-3.5-turbo'
 | 
					const defaultModel:Model = 'gpt-3.5-turbo'
 | 
				
			||||||
const defaultModelPetals:Model = 'meta-llama/Llama-2-70b-chat-hf'
 | 
					const defaultModelPetals:Model = 'stabilityai/StableBeluga2'
 | 
				
			||||||
 | 
					
 | 
				
			||||||
export const getDefaultModel = (): Model => {
 | 
					export const getDefaultModel = (): Model => {
 | 
				
			||||||
  if (!get(apiKeyStorage)) return defaultModelPetals
 | 
					  if (!get(apiKeyStorage)) return defaultModelPetals
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,7 +23,7 @@ const hiddenSettings = {
 | 
				
			||||||
      repetitionPenalty: true,
 | 
					      repetitionPenalty: true,
 | 
				
			||||||
      holdSocket: true
 | 
					      holdSocket: true
 | 
				
			||||||
      // leadPrompt: true
 | 
					      // leadPrompt: true
 | 
				
			||||||
}
 | 
					} as any
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const chatModelBase = {
 | 
					const chatModelBase = {
 | 
				
			||||||
  type: 'chat',
 | 
					  type: 'chat',
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -14,7 +14,7 @@ const hideSettings = {
 | 
				
			||||||
  n: true,
 | 
					  n: true,
 | 
				
			||||||
  presence_penalty: true,
 | 
					  presence_penalty: true,
 | 
				
			||||||
  frequency_penalty: true
 | 
					  frequency_penalty: true
 | 
				
			||||||
}
 | 
					} as any
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const chatModelBase = {
 | 
					const chatModelBase = {
 | 
				
			||||||
  type: 'instruct', // Used for chat, but these models operate like instruct models -- you have to manually structure the messages sent to them
 | 
					  type: 'instruct', // Used for chat, but these models operate like instruct models -- you have to manually structure the messages sent to them
 | 
				
			||||||
| 
						 | 
					@ -85,8 +85,14 @@ export const chatModels : Record<string, ModelDetail> = {
 | 
				
			||||||
      },
 | 
					      },
 | 
				
			||||||
      'stabilityai/StableBeluga2': {
 | 
					      'stabilityai/StableBeluga2': {
 | 
				
			||||||
        ...chatModelBase,
 | 
					        ...chatModelBase,
 | 
				
			||||||
        label: 'Petals - StableBeluga-2'
 | 
					        label: 'Petals - StableBeluga-2-70b'
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
 | 
					      // 'tiiuae/falcon-180B-chat': {
 | 
				
			||||||
 | 
					      //   ...chatModelBase,
 | 
				
			||||||
 | 
					      //   start: '###',
 | 
				
			||||||
 | 
					      //   stop: ['###', '</s>', '<|endoftext|>'],
 | 
				
			||||||
 | 
					      //   label: 'Petals - Falcon-180b-chat'
 | 
				
			||||||
 | 
					      // }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
</script>
 | 
					</script>
 | 
				
			||||||
| 
						 | 
					@ -70,13 +70,15 @@ export const chatRequest = async (
 | 
				
			||||||
      stopSequences = stopSequences.sort((a, b) => b.length - a.length)
 | 
					      stopSequences = stopSequences.sort((a, b) => b.length - a.length)
 | 
				
			||||||
      const stopSequencesC = stopSequences.filter(s => s !== stopSequence)
 | 
					      const stopSequencesC = stopSequences.filter(s => s !== stopSequence)
 | 
				
			||||||
      const maxTokens = getModelMaxTokens(model)
 | 
					      const maxTokens = getModelMaxTokens(model)
 | 
				
			||||||
 | 
					      const userAfterSystem = true
 | 
				
			||||||
    
 | 
					    
 | 
				
			||||||
      // Enforce strict order of messages
 | 
					      // Enforce strict order of messages
 | 
				
			||||||
      const fMessages = (request.messages || [] as Message[])
 | 
					      const fMessages = (request.messages || [] as Message[])
 | 
				
			||||||
      const rMessages = fMessages.reduce((a, m, i) => {
 | 
					      const rMessages = fMessages.reduce((a, m, i) => {
 | 
				
			||||||
        a.push(m)
 | 
					        a.push(m)
 | 
				
			||||||
 | 
					        // if (m.role === 'system') m.content = m.content.trim()
 | 
				
			||||||
        const nm = fMessages[i + 1]
 | 
					        const nm = fMessages[i + 1]
 | 
				
			||||||
        if (m.role === 'system' && (!nm || nm.role !== 'user')) {
 | 
					        if (userAfterSystem && m.role === 'system' && (!nm || nm.role !== 'user')) {
 | 
				
			||||||
          const nc = {
 | 
					          const nc = {
 | 
				
			||||||
            role: 'user',
 | 
					            role: 'user',
 | 
				
			||||||
            content: ''
 | 
					            content: ''
 | 
				
			||||||
| 
						 | 
					@ -97,7 +99,7 @@ export const chatRequest = async (
 | 
				
			||||||
      const buildMessage = (m: Message): string => {
 | 
					      const buildMessage = (m: Message): string => {
 | 
				
			||||||
        return getRoleTag(m.role, model, chat) + m.content + getRoleEnd(m.role, model, chat)
 | 
					        return getRoleTag(m.role, model, chat) + m.content + getRoleEnd(m.role, model, chat)
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
      const buildInputArray = (a) => {
 | 
					      const buildInputArray = (a: Message[]) => {
 | 
				
			||||||
        return a.reduce((a, m, i) => {
 | 
					        return a.reduce((a, m, i) => {
 | 
				
			||||||
          let c = buildMessage(m)
 | 
					          let c = buildMessage(m)
 | 
				
			||||||
          let replace = false
 | 
					          let replace = false
 | 
				
			||||||
| 
						 | 
					@ -141,7 +143,7 @@ export const chatRequest = async (
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
      // const inputArray = buildInputArray(rMessages).map(m => m.content)
 | 
					      // const inputArray = buildInputArray(rMessages).map(m => m.content)
 | 
				
			||||||
      const lInputArray = doLead
 | 
					      const lInputArray = doLead
 | 
				
			||||||
        ? buildInputArray(rMessages.slice(0, -1)).map(m => m.content)
 | 
					        ? (rMessages.length > 1 ? buildInputArray(rMessages.slice(0, -1)).map(m => m.content) : [])
 | 
				
			||||||
        : buildInputArray(rMessages.slice()).map(m => m.content)
 | 
					        : buildInputArray(rMessages.slice()).map(m => m.content)
 | 
				
			||||||
      const nInputArray = buildInputArray(rMessages.slice(-1)).map(m => m.content)
 | 
					      const nInputArray = buildInputArray(rMessages.slice(-1)).map(m => m.content)
 | 
				
			||||||
      const leadPrompt = (leadPromptSequence && doLead) ? delimiter + leadPromptSequence : ''
 | 
					      const leadPrompt = (leadPromptSequence && doLead) ? delimiter + leadPromptSequence : ''
 | 
				
			||||||
| 
						 | 
					@ -194,7 +196,7 @@ export const chatRequest = async (
 | 
				
			||||||
            throw err
 | 
					            throw err
 | 
				
			||||||
          }
 | 
					          }
 | 
				
			||||||
          // console.warn('got new ws')
 | 
					          // console.warn('got new ws')
 | 
				
			||||||
          inputPrompt = lastPrompt + (doLead ? delimiter : '')
 | 
					          inputPrompt = lastPrompt + (doLead && lInputArray.length ? delimiter : '')
 | 
				
			||||||
          providerData.knownBuffer = ''
 | 
					          providerData.knownBuffer = ''
 | 
				
			||||||
          providerData.ws = nws
 | 
					          providerData.ws = nws
 | 
				
			||||||
          resolve(nws)
 | 
					          resolve(nws)
 | 
				
			||||||
| 
						 | 
					@ -217,11 +219,12 @@ export const chatRequest = async (
 | 
				
			||||||
          }
 | 
					          }
 | 
				
			||||||
          // update with real count
 | 
					          // update with real count
 | 
				
			||||||
          chatResponse.setPromptTokenCount(promptTokenCount)
 | 
					          chatResponse.setPromptTokenCount(promptTokenCount)
 | 
				
			||||||
          nws.send(JSON.stringify({
 | 
					          const req = {
 | 
				
			||||||
            type: 'open_inference_session',
 | 
					            type: 'open_inference_session',
 | 
				
			||||||
            model,
 | 
					            model,
 | 
				
			||||||
            max_length: chatSettings.holdSocket ? maxTokens : maxLen
 | 
					            max_length: chatSettings.holdSocket ? maxTokens : maxLen
 | 
				
			||||||
          }))
 | 
					          } as any
 | 
				
			||||||
 | 
					          nws.send(JSON.stringify(req))
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
      })
 | 
					      })
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue