Begin refactoring model providers to be less anti-pattern
This commit is contained in:
		
							parent
							
								
									7624ef7999
								
							
						
					
					
						commit
						fb2290308f
					
				| 
						 | 
				
			
			@ -7,15 +7,17 @@
 | 
			
		|||
  import Home from './lib/Home.svelte'
 | 
			
		||||
  import Chat from './lib/Chat.svelte'
 | 
			
		||||
  import NewChat from './lib/NewChat.svelte'
 | 
			
		||||
  import { chatsStorage, apiKeyStorage } from './lib/Storage.svelte'
 | 
			
		||||
  import { chatsStorage } from './lib/Storage.svelte'
 | 
			
		||||
  import { Modals, closeModal } from 'svelte-modals'
 | 
			
		||||
  import { dispatchModalEsc, checkModalEsc } from './lib/Util.svelte'
 | 
			
		||||
  import { set as setOpenAI } from './lib/providers/openai/util.svelte'
 | 
			
		||||
  import { hasActiveModels } from './lib/Models.svelte'
 | 
			
		||||
 | 
			
		||||
  // Check if the API key is passed in as a "key" query parameter - if so, save it
 | 
			
		||||
  // Example: https://niek.github.io/chatgpt-web/#/?key=sk-...
 | 
			
		||||
  const urlParams: URLSearchParams = new URLSearchParams($querystring)
 | 
			
		||||
  if (urlParams.has('key')) {
 | 
			
		||||
    apiKeyStorage.set(urlParams.get('key') as string)
 | 
			
		||||
    setOpenAI({ apiKey: urlParams.get('key') as string })
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // The definition of the routes with some conditions
 | 
			
		||||
| 
						 | 
				
			
			@ -25,7 +27,7 @@
 | 
			
		|||
    '/chat/new': wrap({
 | 
			
		||||
      component: NewChat,
 | 
			
		||||
      conditions: () => {
 | 
			
		||||
        return !!$apiKeyStorage
 | 
			
		||||
        return hasActiveModels()
 | 
			
		||||
      }
 | 
			
		||||
    }),
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,12 +5,14 @@
 | 
			
		|||
  const endpointGenerations = import.meta.env.VITE_ENDPOINT_GENERATIONS || '/v1/images/generations'
 | 
			
		||||
  const endpointModels = import.meta.env.VITE_ENDPOINT_MODELS || '/v1/models'
 | 
			
		||||
  const endpointEmbeddings = import.meta.env.VITE_ENDPOINT_EMBEDDINGS || '/v1/embeddings'
 | 
			
		||||
  const endpointPetals = import.meta.env.VITE_PEDALS_WEBSOCKET || 'wss://chat.petals.dev/api/v2/generate'
 | 
			
		||||
  const petalsBase = import.meta.env.VITE_PEDALS_WEBSOCKET || 'wss://chat.petals.dev'
 | 
			
		||||
  const endpointPetals = import.meta.env.VITE_PEDALS_WEBSOCKET || '/api/v2/generate'
 | 
			
		||||
 | 
			
		||||
  export const getApiBase = ():string => apiBase
 | 
			
		||||
  export const getEndpointCompletions = ():string => endpointCompletions
 | 
			
		||||
  export const getEndpointGenerations = ():string => endpointGenerations
 | 
			
		||||
  export const getEndpointModels = ():string => endpointModels
 | 
			
		||||
  export const getEndpointEmbeddings = ():string => endpointEmbeddings
 | 
			
		||||
  export const getPetals = ():string => endpointPetals
 | 
			
		||||
  export const getPetalsBase = ():string => petalsBase
 | 
			
		||||
  export const getPetalsWebsocket = ():string => endpointPetals
 | 
			
		||||
</script>
 | 
			
		||||
| 
						 | 
				
			
			@ -230,7 +230,8 @@
 | 
			
		|||
        // Compose the input message
 | 
			
		||||
        const inputMessage: Message = { role: 'user', content: input.value, uuid: uuidv4() }
 | 
			
		||||
        addMessage(chatId, inputMessage)
 | 
			
		||||
      } else if (!fillMessage && $currentChatMessages.length && $currentChatMessages[$currentChatMessages.length - 1].finish_reason === 'length') {
 | 
			
		||||
      } else if (!fillMessage && $currentChatMessages.length &&
 | 
			
		||||
        $currentChatMessages[$currentChatMessages.length - 1].role === 'assistant') {
 | 
			
		||||
        fillMessage = $currentChatMessages[$currentChatMessages.length - 1]
 | 
			
		||||
      }
 | 
			
		||||
  
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,9 +1,9 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
import { setImage } from './ImageStore.svelte'
 | 
			
		||||
import { countTokens } from './Models.svelte'
 | 
			
		||||
import { countTokens, getModelDetail } from './Models.svelte'
 | 
			
		||||
// TODO: Integrate API calls
 | 
			
		||||
import { addMessage, getLatestKnownModel, setLatestKnownModel, subtractRunningTotal, updateMessages, updateRunningTotal } from './Storage.svelte'
 | 
			
		||||
import type { Chat, ChatCompletionOpts, ChatImage, Message, Model, Response, ResponseImage, Usage } from './Types.svelte'
 | 
			
		||||
import type { Chat, ChatCompletionOpts, ChatImage, Message, Model, Response, Usage } from './Types.svelte'
 | 
			
		||||
import { v4 as uuidv4 } from 'uuid'
 | 
			
		||||
 | 
			
		||||
export class ChatCompletionResponse {
 | 
			
		||||
| 
						 | 
				
			
			@ -53,9 +53,9 @@ export class ChatCompletionResponse {
 | 
			
		|||
  private finishListeners: ((m: Message[]) => void)[] = []
 | 
			
		||||
 | 
			
		||||
  private initialFillMerge (existingContent:string, newContent:string):string {
 | 
			
		||||
    if (!this.didFill && this.isFill && existingContent && !newContent.match(/^'(t|ll|ve|m|d|re)[^a-z]/i)) {
 | 
			
		||||
      // add a trailing space if our new content isn't a contraction
 | 
			
		||||
      existingContent += ' '
 | 
			
		||||
    const modelDetail = getModelDetail(this.model)
 | 
			
		||||
    if (!this.didFill && this.isFill && modelDetail.preFillMerge) {
 | 
			
		||||
      existingContent = modelDetail.preFillMerge(existingContent, newContent)
 | 
			
		||||
    }
 | 
			
		||||
    this.didFill = true
 | 
			
		||||
    return existingContent
 | 
			
		||||
| 
						 | 
				
			
			@ -69,15 +69,15 @@ export class ChatCompletionResponse {
 | 
			
		|||
    return this.promptTokenCount
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async updateImageFromSyncResponse (response: ResponseImage, prompt: string, model: Model) {
 | 
			
		||||
  async updateImageFromSyncResponse (images: string[], prompt: string, model: Model) {
 | 
			
		||||
    this.setModel(model)
 | 
			
		||||
    for (let i = 0; i < response.data.length; i++) {
 | 
			
		||||
      const d = response.data[i]
 | 
			
		||||
    for (let i = 0; i < images.length; i++) {
 | 
			
		||||
      const b64image = images[i]
 | 
			
		||||
      const message = {
 | 
			
		||||
        role: 'image',
 | 
			
		||||
        uuid: uuidv4(),
 | 
			
		||||
        content: prompt,
 | 
			
		||||
        image: await setImage(this.chat.id, { b64image: d.b64_json } as ChatImage),
 | 
			
		||||
        image: await setImage(this.chat.id, { b64image } as ChatImage),
 | 
			
		||||
        model,
 | 
			
		||||
        usage: {
 | 
			
		||||
          prompt_tokens: 0,
 | 
			
		||||
| 
						 | 
				
			
			@ -175,7 +175,7 @@ export class ChatCompletionResponse {
 | 
			
		|||
      } as Message)
 | 
			
		||||
    }
 | 
			
		||||
    this.notifyMessageChange()
 | 
			
		||||
    setTimeout(() => this.finish(), 200) // give others a chance to signal the finish first
 | 
			
		||||
    setTimeout(() => this.finish('abort'), 200) // give others a chance to signal the finish first
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  updateFromClose (force: boolean = false): void {
 | 
			
		||||
| 
						 | 
				
			
			@ -212,10 +212,13 @@ export class ChatCompletionResponse {
 | 
			
		|||
    })
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private finish = (): void => {
 | 
			
		||||
    this.messages.forEach(m => { m.streaming = false }) // make sure all are marked stopped
 | 
			
		||||
    updateMessages(this.chat.id)
 | 
			
		||||
  finish = (reason: string = ''): void => {
 | 
			
		||||
    if (this.finished) return
 | 
			
		||||
    this.messages.forEach(m => {
 | 
			
		||||
      m.streaming = false
 | 
			
		||||
      if (reason) m.finish_reason = reason
 | 
			
		||||
    }) // make sure all are marked stopped
 | 
			
		||||
    updateMessages(this.chat.id)
 | 
			
		||||
    this.finished = true
 | 
			
		||||
    const message = this.messages[0]
 | 
			
		||||
    const model = this.model || getLatestKnownModel(this.chat.settings.model)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,11 +1,12 @@
 | 
			
		|||
<script lang="ts">
 | 
			
		||||
  import { replace } from 'svelte-spa-router'
 | 
			
		||||
  import type { Chat } from './Types.svelte'
 | 
			
		||||
  import { deleteChat, hasActiveModels, pinMainMenu, saveChatStore } from './Storage.svelte'
 | 
			
		||||
  import { deleteChat, pinMainMenu, saveChatStore } from './Storage.svelte'
 | 
			
		||||
  import Fa from 'svelte-fa/src/fa.svelte'
 | 
			
		||||
  import { faTrash, faCircleCheck, faPencil } from '@fortawesome/free-solid-svg-icons/index'
 | 
			
		||||
  import { faMessage } from '@fortawesome/free-regular-svg-icons/index'
 | 
			
		||||
  import { onMount } from 'svelte'
 | 
			
		||||
  import { hasActiveModels } from './Models.svelte'
 | 
			
		||||
 | 
			
		||||
  export let chat:Chat
 | 
			
		||||
  export let activeChatId:number|undefined
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -18,7 +18,7 @@
 | 
			
		|||
    faEyeSlash
 | 
			
		||||
  } from '@fortawesome/free-solid-svg-icons/index'
 | 
			
		||||
  import { faSquareMinus, faSquarePlus as faSquarePlusOutline } from '@fortawesome/free-regular-svg-icons/index'
 | 
			
		||||
  import { addChatFromJSON, chatsStorage, checkStateChange, clearChats, clearMessages, copyChat, globalStorage, setGlobalSettingValueByKey, showSetChatSettings, pinMainMenu, getChat, deleteChat, saveChatStore, saveCustomProfile, hasActiveModels } from './Storage.svelte'
 | 
			
		||||
  import { addChatFromJSON, chatsStorage, checkStateChange, clearChats, clearMessages, copyChat, globalStorage, setGlobalSettingValueByKey, showSetChatSettings, pinMainMenu, getChat, deleteChat, saveChatStore, saveCustomProfile } from './Storage.svelte'
 | 
			
		||||
  import { exportAsMarkdown, exportChatAsJSON } from './Export.svelte'
 | 
			
		||||
  import { newNameForProfile, restartProfile } from './Profiles.svelte'
 | 
			
		||||
  import { replace } from 'svelte-spa-router'
 | 
			
		||||
| 
						 | 
				
			
			@ -27,6 +27,7 @@
 | 
			
		|||
  import PromptConfirm from './PromptConfirm.svelte'
 | 
			
		||||
  import { startNewChatWithWarning, startNewChatFromChatId, errorNotice, encodeHTMLEntities } from './Util.svelte'
 | 
			
		||||
  import type { ChatSettings } from './Types.svelte'
 | 
			
		||||
  import { hasActiveModels } from './Models.svelte'
 | 
			
		||||
 | 
			
		||||
  export let chatId
 | 
			
		||||
  export const show = (showHide:boolean = true) => {
 | 
			
		||||
| 
						 | 
				
			
			@ -223,7 +224,7 @@
 | 
			
		|||
      </a>
 | 
			
		||||
      <hr class="dropdown-divider">
 | 
			
		||||
      <a href={'#/'} class="dropdown-item" on:click={close}>
 | 
			
		||||
        <span class="menu-icon"><Fa icon={faKey}/></span> API Key
 | 
			
		||||
        <span class="menu-icon"><Fa icon={faKey}/></span> API Setting
 | 
			
		||||
      </a>
 | 
			
		||||
    </div>
 | 
			
		||||
  </div>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2,15 +2,13 @@
 | 
			
		|||
    import { ChatCompletionResponse } from './ChatCompletionResponse.svelte'
 | 
			
		||||
    import { mergeProfileFields, prepareSummaryPrompt } from './Profiles.svelte'
 | 
			
		||||
    import { countMessageTokens, countPromptTokens, getModelMaxTokens } from './Stats.svelte'
 | 
			
		||||
    import type { Chat, ChatCompletionOpts, ChatSettings, Message, Model, Request, RequestImageGeneration } from './Types.svelte'
 | 
			
		||||
    import { deleteMessage, getChatSettingValueNullDefault, insertMessages, getApiKey, addError, currentChatMessages, getMessages, updateMessages, deleteSummaryMessage } from './Storage.svelte'
 | 
			
		||||
    import type { Chat, ChatCompletionOpts, ChatSettings, Message, Model, Request } from './Types.svelte'
 | 
			
		||||
    import { deleteMessage, getChatSettingValueNullDefault, insertMessages, addError, currentChatMessages, getMessages, updateMessages, deleteSummaryMessage } from './Storage.svelte'
 | 
			
		||||
    import { scrollToBottom, scrollToMessage } from './Util.svelte'
 | 
			
		||||
    import { getDefaultModel, getRequestSettingList } from './Settings.svelte'
 | 
			
		||||
    import { v4 as uuidv4 } from 'uuid'
 | 
			
		||||
    import { get } from 'svelte/store'
 | 
			
		||||
    import { getEndpoint, getModelDetail } from './Models.svelte'
 | 
			
		||||
    import { runOpenAiCompletionRequest } from './ChatRequestOpenAi.svelte'
 | 
			
		||||
    import { runPetalsCompletionRequest } from './ChatRequestPetals.svelte'
 | 
			
		||||
    import { getModelDetail } from './Models.svelte'
 | 
			
		||||
 | 
			
		||||
export class ChatRequest {
 | 
			
		||||
      constructor () {
 | 
			
		||||
| 
						 | 
				
			
			@ -53,59 +51,6 @@ export class ChatRequest {
 | 
			
		|||
        throw new Error(`${response.status} - ${errorResponse}`)
 | 
			
		||||
      }
 | 
			
		||||
    
 | 
			
		||||
      async imageRequest (message: Message, prompt: string, count:number, messages: Message[], opts: ChatCompletionOpts, overrides: ChatSettings = {} as ChatSettings): Promise<ChatCompletionResponse> {
 | 
			
		||||
        const _this = this
 | 
			
		||||
        count = count || 1
 | 
			
		||||
        _this.updating = true
 | 
			
		||||
        _this.updatingMessage = 'Generating Image...'
 | 
			
		||||
        const size = this.chat.settings.imageGenerationSize
 | 
			
		||||
        const request: RequestImageGeneration = {
 | 
			
		||||
          prompt,
 | 
			
		||||
          response_format: 'b64_json',
 | 
			
		||||
          size,
 | 
			
		||||
          n: count
 | 
			
		||||
        }
 | 
			
		||||
        // fetchEventSource doesn't seem to throw on abort,
 | 
			
		||||
        // so we deal with it ourselves
 | 
			
		||||
        _this.controller = new AbortController()
 | 
			
		||||
        const signal = _this.controller.signal
 | 
			
		||||
        const abortListener = (e:Event) => {
 | 
			
		||||
          chatResponse.updateFromError('User aborted request.')
 | 
			
		||||
          signal.removeEventListener('abort', abortListener)
 | 
			
		||||
        }
 | 
			
		||||
        signal.addEventListener('abort', abortListener)
 | 
			
		||||
        // Create request
 | 
			
		||||
        const fetchOptions = {
 | 
			
		||||
          method: 'POST',
 | 
			
		||||
          headers: {
 | 
			
		||||
            Authorization: `Bearer ${getApiKey()}`,
 | 
			
		||||
            'Content-Type': 'application/json'
 | 
			
		||||
          },
 | 
			
		||||
          body: JSON.stringify(request),
 | 
			
		||||
          signal
 | 
			
		||||
        }
 | 
			
		||||
        const chatResponse = new ChatCompletionResponse(opts)
 | 
			
		||||
 | 
			
		||||
        try {
 | 
			
		||||
          const response = await fetch(getEndpoint('dall-e-' + size), fetchOptions)
 | 
			
		||||
          if (!response.ok) {
 | 
			
		||||
            await _this.handleError(response)
 | 
			
		||||
          } else {
 | 
			
		||||
            const json = await response.json()
 | 
			
		||||
            // Remove updating indicator
 | 
			
		||||
            _this.updating = false
 | 
			
		||||
            _this.updatingMessage = ''
 | 
			
		||||
            // console.log('image json', json, json?.data[0])
 | 
			
		||||
            chatResponse.updateImageFromSyncResponse(json, prompt, 'dall-e-' + size)
 | 
			
		||||
          }
 | 
			
		||||
        } catch (e) {
 | 
			
		||||
          chatResponse.updateFromError(e)
 | 
			
		||||
          throw e
 | 
			
		||||
        }
 | 
			
		||||
        message.suppress = true
 | 
			
		||||
        return chatResponse
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      /**
 | 
			
		||||
       * Send API request
 | 
			
		||||
       * @param messages
 | 
			
		||||
| 
						 | 
				
			
			@ -123,8 +68,10 @@ export class ChatRequest {
 | 
			
		|||
        _this.updating = true
 | 
			
		||||
 | 
			
		||||
        const lastMessage = messages[messages.length - 1]
 | 
			
		||||
        const chatResponse = new ChatCompletionResponse(opts)
 | 
			
		||||
        _this.controller = new AbortController()
 | 
			
		||||
 | 
			
		||||
        if (chatSettings.imageGenerationSize && !opts.didSummary && !opts.summaryRequest && lastMessage?.role === 'user') {
 | 
			
		||||
        if (chatSettings.imageGenerationModel && !opts.didSummary && !opts.summaryRequest && lastMessage?.role === 'user') {
 | 
			
		||||
          const im = lastMessage.content.match(imagePromptDetect)
 | 
			
		||||
          if (im) {
 | 
			
		||||
            // console.log('image prompt request', im)
 | 
			
		||||
| 
						 | 
				
			
			@ -136,11 +83,24 @@ export class ChatRequest {
 | 
			
		|||
            )
 | 
			
		||||
            if (isNaN(n)) n = 1
 | 
			
		||||
            n = Math.min(Math.max(1, n), 4)
 | 
			
		||||
            return await this.imageRequest(lastMessage, im[9], n, messages, opts, overrides)
 | 
			
		||||
            lastMessage.suppress = true
 | 
			
		||||
 | 
			
		||||
            const imageModelDetail = getModelDetail(chatSettings.imageGenerationModel)
 | 
			
		||||
            return await imageModelDetail.request({} as unknown as Request, _this, chatResponse, {
 | 
			
		||||
              ...opts,
 | 
			
		||||
              prompt: im[9],
 | 
			
		||||
              count: n
 | 
			
		||||
            })
 | 
			
		||||
    
 | 
			
		||||
            // (lastMessage, im[9], n, messages, opts, overrides)
 | 
			
		||||
            // throw new Error('Image prompt:' + im[7])
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const model = this.getModel()
 | 
			
		||||
        const modelDetail = getModelDetail(model)
 | 
			
		||||
        const maxTokens = getModelMaxTokens(model)
 | 
			
		||||
 | 
			
		||||
        const includedRoles = ['user', 'assistant'].concat(chatSettings.useSystemPrompt ? ['system'] : [])
 | 
			
		||||
    
 | 
			
		||||
        // Submit only the role and content of the messages, provide the previous messages as well for context
 | 
			
		||||
| 
						 | 
				
			
			@ -151,9 +111,6 @@ export class ChatRequest {
 | 
			
		|||
    
 | 
			
		||||
        // If we're doing continuous chat, do it
 | 
			
		||||
        if (!opts.didSummary && !opts.summaryRequest && chatSettings.continuousChat) return await this.doContinuousChat(filtered, opts, overrides)
 | 
			
		||||
    
 | 
			
		||||
        const model = this.getModel()
 | 
			
		||||
        const maxTokens = getModelMaxTokens(model)
 | 
			
		||||
 | 
			
		||||
        // Inject hidden prompts if requested
 | 
			
		||||
        // if (!opts.summaryRequest)
 | 
			
		||||
| 
						 | 
				
			
			@ -253,26 +210,13 @@ export class ChatRequest {
 | 
			
		|||
          stream: opts.streaming
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Set-up and make the request
 | 
			
		||||
        const chatResponse = new ChatCompletionResponse(opts)
 | 
			
		||||
 | 
			
		||||
        const modelDetail = getModelDetail(model)
 | 
			
		||||
 | 
			
		||||
        // Make the chat completion request
 | 
			
		||||
        try {
 | 
			
		||||
          // Add out token count to the response handler
 | 
			
		||||
          // (streaming doesn't return counts, so we need to do it client side)
 | 
			
		||||
          // (some endpoints do not return counts, so we need to do it client side)
 | 
			
		||||
          chatResponse.setPromptTokenCount(promptTokenCount)
 | 
			
		||||
    
 | 
			
		||||
          // fetchEventSource doesn't seem to throw on abort,
 | 
			
		||||
          // so we deal with it ourselves
 | 
			
		||||
          _this.controller = new AbortController()
 | 
			
		||||
          const signal = _this.controller.signal
 | 
			
		||||
 | 
			
		||||
          if (modelDetail.type === 'Petals') {
 | 
			
		||||
            await runPetalsCompletionRequest(request, _this as any, chatResponse as any, signal, opts)
 | 
			
		||||
          } else {
 | 
			
		||||
            await runOpenAiCompletionRequest(request, _this as any, chatResponse as any, signal, opts)
 | 
			
		||||
          }
 | 
			
		||||
          // run request for given model
 | 
			
		||||
          await modelDetail.request(request, _this, chatResponse, opts)
 | 
			
		||||
        } catch (e) {
 | 
			
		||||
        // console.error(e)
 | 
			
		||||
          _this.updating = false
 | 
			
		||||
| 
						 | 
				
			
			@ -358,10 +302,15 @@ export class ChatRequest {
 | 
			
		|||
        // Get extra counts for when the prompts are finally sent.
 | 
			
		||||
        const countPadding = this.getTokenCountPadding(filtered, chat)
 | 
			
		||||
 | 
			
		||||
        let threshold = chatSettings.summaryThreshold
 | 
			
		||||
        if (threshold < 1) threshold = Math.round(maxTokens * threshold)
 | 
			
		||||
 | 
			
		||||
        // See if we have enough to apply any of the reduction modes
 | 
			
		||||
        const fullPromptSize = countPromptTokens(filtered, model, chat) + countPadding
 | 
			
		||||
        if (fullPromptSize < chatSettings.summaryThreshold) return await continueRequest() // nothing to do yet
 | 
			
		||||
        console.log('Check Continuous Chat', fullPromptSize, threshold)
 | 
			
		||||
        if (fullPromptSize < threshold) return await continueRequest() // nothing to do yet
 | 
			
		||||
        const overMax = fullPromptSize > maxTokens * 0.95
 | 
			
		||||
        console.log('Running Continuous Chat Reduction', fullPromptSize, threshold)
 | 
			
		||||
 | 
			
		||||
        // Isolate the pool of messages we're going to reduce
 | 
			
		||||
        const pinTop = chatSettings.pinTop
 | 
			
		||||
| 
						 | 
				
			
			@ -383,7 +332,7 @@ export class ChatRequest {
 | 
			
		|||
           */
 | 
			
		||||
    
 | 
			
		||||
          let promptSize = countPromptTokens(top.concat(rw), model, chat) + countPadding
 | 
			
		||||
          while (rw.length && rw.length > pinBottom && promptSize >= chatSettings.summaryThreshold) {
 | 
			
		||||
          while (rw.length && rw.length > pinBottom && promptSize >= threshold) {
 | 
			
		||||
            const rolled = rw.shift()
 | 
			
		||||
            // Hide messages we're "rolling"
 | 
			
		||||
            if (rolled) rolled.suppress = true
 | 
			
		||||
| 
						 | 
				
			
			@ -411,7 +360,7 @@ export class ChatRequest {
 | 
			
		|||
          const topSize = countPromptTokens(top, model, chat)
 | 
			
		||||
          let maxSummaryTokens = getSS()
 | 
			
		||||
          let promptSummary = prepareSummaryPrompt(chatId, maxSummaryTokens)
 | 
			
		||||
          const summaryRequest = { role: 'system', content: promptSummary } as Message
 | 
			
		||||
          const summaryRequest = { role: 'user', content: promptSummary } as Message
 | 
			
		||||
          let promptSummarySize = countMessageTokens(summaryRequest, model, chat)
 | 
			
		||||
          // Make sure there is enough room to generate the summary, and try to make sure
 | 
			
		||||
          // the last prompt is a user prompt as that seems to work better for summaries
 | 
			
		||||
| 
						 | 
				
			
			@ -458,7 +407,7 @@ export class ChatRequest {
 | 
			
		|||
              const mergedPrompts = rw.map(m => {
 | 
			
		||||
                return '[' + (m.role === 'assistant' ? '[[CHARACTER_NAME]]' : '[[USER_NAME]]') + ']\n' +
 | 
			
		||||
                  m.content
 | 
			
		||||
              }).join('\n\n')
 | 
			
		||||
              }).join('\n###\n\n')
 | 
			
		||||
                .replaceAll('[[CHARACTER_NAME]]', chatSettings.characterName)
 | 
			
		||||
                .replaceAll('[[USER_NAME]]', 'Me')
 | 
			
		||||
              summaryRequest.content = summaryRequestMessage.replaceAll('[[MERGED_PROMPTS]]', mergedPrompts)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,12 +36,12 @@
 | 
			
		|||
  buildFieldControls()
 | 
			
		||||
 | 
			
		||||
  onMount(() => {
 | 
			
		||||
    show = (typeof setting.hide !== 'function') || !setting.hide(chatId)
 | 
			
		||||
    show = (typeof setting.hide !== 'function') || !setting.hide(chatId, setting)
 | 
			
		||||
    buildFieldControls()
 | 
			
		||||
  })
 | 
			
		||||
 | 
			
		||||
  afterUpdate(() => {
 | 
			
		||||
    show = (typeof setting.hide !== 'function') || !setting.hide(chatId)
 | 
			
		||||
    show = (typeof setting.hide !== 'function') || !setting.hide(chatId, setting)
 | 
			
		||||
    header = valueOf(chatId, setting.header)
 | 
			
		||||
    headerClass = valueOf(chatId, setting.headerClass)
 | 
			
		||||
    placeholder = valueOf(chatId, setting.placeholder)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -34,7 +34,7 @@
 | 
			
		|||
  import { replace } from 'svelte-spa-router'
 | 
			
		||||
  import { openModal } from 'svelte-modals'
 | 
			
		||||
  import PromptConfirm from './PromptConfirm.svelte'
 | 
			
		||||
  import { getModelOptions } from './Models.svelte'
 | 
			
		||||
  import { getChatModelOptions, getImageModelOptions } from './Models.svelte'
 | 
			
		||||
 | 
			
		||||
  export let chatId:number
 | 
			
		||||
  export const show = () => { showSettings() }
 | 
			
		||||
| 
						 | 
				
			
			@ -47,6 +47,7 @@
 | 
			
		|||
 | 
			
		||||
  const settingsList = getChatSettingList()
 | 
			
		||||
  const modelSetting = getChatSettingObjectByKey('model') as ChatSetting & SettingSelect
 | 
			
		||||
  const imageModelSetting = getChatSettingObjectByKey('imageGenerationModel') as ChatSetting & SettingSelect
 | 
			
		||||
  const chatDefaults = getChatDefaults()
 | 
			
		||||
  const excludeFromProfile = getExcludeFromProfile()
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -185,7 +186,8 @@
 | 
			
		|||
 | 
			
		||||
    // Update the models in the settings
 | 
			
		||||
    if (modelSetting) {
 | 
			
		||||
      modelSetting.options = await getModelOptions()
 | 
			
		||||
      modelSetting.options = await getChatModelOptions()
 | 
			
		||||
      imageModelSetting.options = await getImageModelOptions()
 | 
			
		||||
    }
 | 
			
		||||
    // Refresh settings modal
 | 
			
		||||
    showSettingsModal++
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -254,7 +254,7 @@
 | 
			
		|||
  <div class="tool-drawer-mask"></div>
 | 
			
		||||
  <div class="tool-drawer">
 | 
			
		||||
    <div class="button-pack">
 | 
			
		||||
      {#if message.finish_reason === 'length'}
 | 
			
		||||
      {#if message.finish_reason === 'length' || message.finish_reason === 'abort'}
 | 
			
		||||
      <a
 | 
			
		||||
        href={'#'}
 | 
			
		||||
        title="Continue "
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,10 +1,11 @@
 | 
			
		|||
<script lang="ts">
 | 
			
		||||
  import { apiKeyStorage, globalStorage, lastChatId, getChat, started, setGlobalSettingValueByKey, hasActiveModels, checkStateChange } from './Storage.svelte'
 | 
			
		||||
  import { apiKeyStorage, globalStorage, lastChatId, getChat, started, setGlobalSettingValueByKey, checkStateChange } from './Storage.svelte'
 | 
			
		||||
  import Footer from './Footer.svelte'
 | 
			
		||||
  import { replace } from 'svelte-spa-router'
 | 
			
		||||
  import { afterUpdate, onMount } from 'svelte'
 | 
			
		||||
  import { getPetals } from './ApiUtil.svelte'
 | 
			
		||||
  import { clearModelOptionCache } from './Models.svelte'
 | 
			
		||||
  import { getPetalsBase, getPetalsWebsocket } from './ApiUtil.svelte'
 | 
			
		||||
  import { set as setOpenAI } from './providers/openai/util.svelte'
 | 
			
		||||
  import { hasActiveModels } from './Models.svelte'
 | 
			
		||||
 | 
			
		||||
$: apiKey = $apiKeyStorage
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -26,7 +27,6 @@ onMount(() => {
 | 
			
		|||
})
 | 
			
		||||
 | 
			
		||||
afterUpdate(() => {
 | 
			
		||||
    clearModelOptionCache()
 | 
			
		||||
    hasModels = hasActiveModels()
 | 
			
		||||
    pedalsEndpoint = $globalStorage.pedalsEndpoint
 | 
			
		||||
    $checkStateChange++
 | 
			
		||||
| 
						 | 
				
			
			@ -36,6 +36,7 @@ const setPetalsEnabled = (event: Event) => {
 | 
			
		|||
    const el = (event.target as HTMLInputElement)
 | 
			
		||||
    setGlobalSettingValueByKey('enablePetals', !!el.checked)
 | 
			
		||||
    showPetalsSettings = $globalStorage.enablePetals
 | 
			
		||||
    hasModels = hasActiveModels()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
</script>
 | 
			
		||||
| 
						 | 
				
			
			@ -64,11 +65,12 @@ const setPetalsEnabled = (event: Event) => {
 | 
			
		|||
      <form
 | 
			
		||||
        class="field has-addons has-addons-right"
 | 
			
		||||
        on:submit|preventDefault={(event) => {
 | 
			
		||||
          let val = ''
 | 
			
		||||
          if (event.target && event.target[0].value) {
 | 
			
		||||
            apiKeyStorage.set((event.target[0].value).trim())
 | 
			
		||||
          } else {
 | 
			
		||||
            apiKeyStorage.set('') // remove api key
 | 
			
		||||
            val = (event.target[0].value).trim()
 | 
			
		||||
          }
 | 
			
		||||
          setOpenAI({ apiKey: val })
 | 
			
		||||
          hasModels = hasActiveModels()
 | 
			
		||||
        }}
 | 
			
		||||
      >
 | 
			
		||||
        <p class="control is-expanded">
 | 
			
		||||
| 
						 | 
				
			
			@ -117,7 +119,10 @@ const setPetalsEnabled = (event: Event) => {
 | 
			
		|||
          class="field has-addons has-addons-right"
 | 
			
		||||
          on:submit|preventDefault={(event) => {
 | 
			
		||||
            if (event.target && event.target[0].value) {
 | 
			
		||||
              setGlobalSettingValueByKey('pedalsEndpoint', (event.target[0].value).trim())
 | 
			
		||||
              const v = event.target[0].value.trim()
 | 
			
		||||
              const v2 = v.replace(/^https:/i, 'wss:').replace(/(^wss:\/\/[^/]+)\/*$/i, '$1' + getPetalsWebsocket())
 | 
			
		||||
              setGlobalSettingValueByKey('pedalsEndpoint', v2)
 | 
			
		||||
              event.target[0].value = v2
 | 
			
		||||
            } else {
 | 
			
		||||
              setGlobalSettingValueByKey('pedalsEndpoint', '')
 | 
			
		||||
            }
 | 
			
		||||
| 
						 | 
				
			
			@ -128,7 +133,7 @@ const setPetalsEnabled = (event: Event) => {
 | 
			
		|||
              aria-label="PetalsAPI Endpoint"
 | 
			
		||||
              type="text"
 | 
			
		||||
              class="input"
 | 
			
		||||
              placeholder={getPetals()}
 | 
			
		||||
              placeholder={getPetalsBase() + getPetalsWebsocket()}
 | 
			
		||||
              value={$globalStorage.pedalsEndpoint || ''}
 | 
			
		||||
            />
 | 
			
		||||
          </p>
 | 
			
		||||
| 
						 | 
				
			
			@ -148,10 +153,10 @@ const setPetalsEnabled = (event: Event) => {
 | 
			
		|||
          <a target="_blank" href="https://petals.dev/">Petals</a> lets you run large language models at home by connecting to a public swarm, BitTorrent-style, without hefty GPU requirements.
 | 
			
		||||
        </p>
 | 
			
		||||
        <p class="mb-4">
 | 
			
		||||
          You are encouraged to <a target="_blank" href="https://github.com/bigscience-workshop/petals/wiki/FAQ:-Frequently-asked-questions#running-a-server">set up a Petals server to share your GPU resources</a> with the public swarm. Minimum requirements to contribute Llama 2 completions are a GTX 1080 8GB, but the larger/faster the better.
 | 
			
		||||
          You are encouraged to <a target="_blank" href="https://github.com/bigscience-workshop/petals#connect-your-gpu-and-increase-petals-capacity">set up a Petals server to share your GPU resources</a> with the public swarm. Minimum requirements to contribute Llama 2 completions are a GTX 1080 8GB, but the larger/faster the better.
 | 
			
		||||
        </p>
 | 
			
		||||
        <p class="mb-4">
 | 
			
		||||
          If you're receiving errors while using Petals, <a target="_blank" href="https://health.petals.dev/">check swarm health</a> and consider <a target="_blank" href="https://github.com/bigscience-workshop/petals/wiki/FAQ:-Frequently-asked-questions#running-a-server">adding your GPU to the swarm</a> to help.
 | 
			
		||||
          If you're receiving errors while using Petals, <a target="_blank" href="https://health.petals.dev/">check swarm health</a> and consider <a target="_blank" href="https://github.com/bigscience-workshop/petals#connect-your-gpu-and-increase-petals-capacity">adding your GPU to the swarm</a> to help.
 | 
			
		||||
        </p>
 | 
			
		||||
        <p class="help is-warning">
 | 
			
		||||
          Because Petals uses a public swarm, <b>do not send sensitive information</b> when using Petals.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,403 +1,183 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
    import { getApiBase, getEndpointCompletions, getEndpointGenerations, getEndpointModels, getPetals } from './ApiUtil.svelte'
 | 
			
		||||
    import { apiKeyStorage, globalStorage } from './Storage.svelte'
 | 
			
		||||
import { get, writable } from 'svelte/store'
 | 
			
		||||
    import type { ModelDetail, Model, ResponseModels, SelectOption, Chat } from './Types.svelte'
 | 
			
		||||
import { encode } from 'gpt-tokenizer'
 | 
			
		||||
import llamaTokenizer from 'llama-tokenizer-js'
 | 
			
		||||
    import { mergeProfileFields } from './Profiles.svelte'
 | 
			
		||||
    import { getChatSettingObjectByKey } from './Settings.svelte'
 | 
			
		||||
    import { valueOf } from './Util.svelte'
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * TODO: All of this + what's scattered about need to be refactored to interfaces and classes
 | 
			
		||||
 *       to make it all more modular
 | 
			
		||||
 */
 | 
			
		||||
const modelOptionCache = writable([] as SelectOption[])
 | 
			
		||||
 | 
			
		||||
// Reference: https://openai.com/pricing#language-models
 | 
			
		||||
// Eventually we'll add API hosts and endpoints to this
 | 
			
		||||
const modelDetails : Record<string, ModelDetail> = {
 | 
			
		||||
      'gpt-4-32k': {
 | 
			
		||||
        type: 'OpenAIChat',
 | 
			
		||||
        prompt: 0.00006, // $0.06 per 1000 tokens prompt
 | 
			
		||||
        completion: 0.00012, // $0.12 per 1000 tokens completion
 | 
			
		||||
        max: 32768 // 32k max token buffer
 | 
			
		||||
      },
 | 
			
		||||
      'gpt-4': {
 | 
			
		||||
        type: 'OpenAIChat',
 | 
			
		||||
        prompt: 0.00003, // $0.03 per 1000 tokens prompt
 | 
			
		||||
        completion: 0.00006, // $0.06 per 1000 tokens completion
 | 
			
		||||
        max: 8192 // 8k max token buffer
 | 
			
		||||
      },
 | 
			
		||||
      'gpt-3.5': {
 | 
			
		||||
        type: 'OpenAIChat',
 | 
			
		||||
        prompt: 0.0000015, // $0.0015 per 1000 tokens prompt
 | 
			
		||||
        completion: 0.000002, // $0.002 per 1000 tokens completion
 | 
			
		||||
        max: 4096 // 4k max token buffer
 | 
			
		||||
      },
 | 
			
		||||
      'gpt-3.5-turbo-16k': {
 | 
			
		||||
        type: 'OpenAIChat',
 | 
			
		||||
        prompt: 0.000003, // $0.003 per 1000 tokens prompt
 | 
			
		||||
        completion: 0.000004, // $0.004 per 1000 tokens completion
 | 
			
		||||
        max: 16384 // 16k max token buffer
 | 
			
		||||
      },
 | 
			
		||||
      'enoch/llama-65b-hf': {
 | 
			
		||||
        type: 'Petals',
 | 
			
		||||
        label: 'Petals - Llama-65b',
 | 
			
		||||
        stop: ['###', 'System:', 'Assistant:', 'User:', '</s>'],
 | 
			
		||||
        deliminator: '\n###\n\n',
 | 
			
		||||
        userStart: 'User:\n',
 | 
			
		||||
        userEnd: '',
 | 
			
		||||
        assistantStart: 'Assistant:\n',
 | 
			
		||||
        assistantEnd: '',
 | 
			
		||||
        leadPrompt: 'Assistant:\n',
 | 
			
		||||
        systemStart: 'System:\n',
 | 
			
		||||
        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
			
		||||
        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
			
		||||
        max: 2048 // 2k max token buffer
 | 
			
		||||
      },
 | 
			
		||||
      'timdettmers/guanaco-65b': {
 | 
			
		||||
        type: 'Petals',
 | 
			
		||||
        label: 'Petals - Guanaco-65b',
 | 
			
		||||
        start: '',
 | 
			
		||||
        stop: ['###', 'System:', 'Assistant:', 'User:', '</s>'],
 | 
			
		||||
        deliminator: '\n###\n\n',
 | 
			
		||||
        userStart: 'User:\n',
 | 
			
		||||
        userEnd: '',
 | 
			
		||||
        assistantStart: 'Assistant:\n',
 | 
			
		||||
        assistantEnd: '',
 | 
			
		||||
        leadPrompt: 'Assistant:\n',
 | 
			
		||||
        systemStart: 'System:\n',
 | 
			
		||||
        systemEnd: '',
 | 
			
		||||
        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
			
		||||
        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
			
		||||
        max: 2048 // 2k max token buffer
 | 
			
		||||
      },
 | 
			
		||||
      'meta-llama/Llama-2-70b-chat-hf': {
 | 
			
		||||
        type: 'Petals',
 | 
			
		||||
        label: 'Petals - Llama-2-70b-chat',
 | 
			
		||||
        start: '<s>',
 | 
			
		||||
        stop: ['</s>'],
 | 
			
		||||
        deliminator: ' </s><s>',
 | 
			
		||||
        userStart: '[INST][[SYSTEM_PROMPT]]',
 | 
			
		||||
        userEnd: ' [/INST]',
 | 
			
		||||
        assistantStart: '[[SYSTEM_PROMPT]][[USER_PROMPT]]',
 | 
			
		||||
        assistantEnd: '',
 | 
			
		||||
        systemStart: '<<SYS>>\n',
 | 
			
		||||
        systemEnd: '\n<</SYS>>\n\n',
 | 
			
		||||
        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
			
		||||
        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
			
		||||
        max: 4096 // 4k max token buffer
 | 
			
		||||
      },
 | 
			
		||||
      'meta-llama/Llama-2-70b-hf': {
 | 
			
		||||
        type: 'Petals',
 | 
			
		||||
        label: 'Petals - Llama-2-70b',
 | 
			
		||||
        start: '',
 | 
			
		||||
        stop: ['###', 'System:', 'Assistant:', 'User:', '</s>'],
 | 
			
		||||
        deliminator: '\n###\n\n',
 | 
			
		||||
        userStart: 'User:\n',
 | 
			
		||||
        userEnd: '',
 | 
			
		||||
        assistantStart: 'Assistant:\n',
 | 
			
		||||
        assistantEnd: '',
 | 
			
		||||
        leadPrompt: 'Assistant:\n',
 | 
			
		||||
        systemStart: 'System:\n',
 | 
			
		||||
        systemEnd: '',
 | 
			
		||||
        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
			
		||||
        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
			
		||||
        max: 4096 // 4k max token buffer
 | 
			
		||||
      },
 | 
			
		||||
      'stabilityai/StableBeluga2': {
 | 
			
		||||
        type: 'Petals',
 | 
			
		||||
        label: 'Petals - StableBeluga2',
 | 
			
		||||
        start: '',
 | 
			
		||||
        stop: ['###', 'System:', 'Assistant:', 'User:', '</s>'],
 | 
			
		||||
        deliminator: '\n###\n\n',
 | 
			
		||||
        userStart: 'User:\n',
 | 
			
		||||
        userEnd: '',
 | 
			
		||||
        assistantStart: 'Assistant:\n',
 | 
			
		||||
        assistantEnd: '',
 | 
			
		||||
        leadPrompt: 'Assistant:\n',
 | 
			
		||||
        systemStart: 'System:\n',
 | 
			
		||||
        systemEnd: '',
 | 
			
		||||
        prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
			
		||||
        completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
			
		||||
        max: 4096 // 4k max token buffer
 | 
			
		||||
      }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const imageModels : Record<string, ModelDetail> = {
 | 
			
		||||
      'dall-e-1024x1024': {
 | 
			
		||||
        type: 'OpenAIDall-e',
 | 
			
		||||
        prompt: 0.00,
 | 
			
		||||
        completion: 0.020, // $0.020 per image
 | 
			
		||||
        max: 1000 // 1000 char prompt, max
 | 
			
		||||
      },
 | 
			
		||||
      'dall-e-512x512': {
 | 
			
		||||
        type: 'OpenAIDall-e',
 | 
			
		||||
        prompt: 0.00,
 | 
			
		||||
        completion: 0.018, // $0.018 per image
 | 
			
		||||
        max: 1000 // 1000 char prompt, max
 | 
			
		||||
      },
 | 
			
		||||
      'dall-e-256x256': {
 | 
			
		||||
        type: 'OpenAIDall-e',
 | 
			
		||||
        prompt: 0.00,
 | 
			
		||||
        completion: 0.016, // $0.016 per image
 | 
			
		||||
        max: 1000 // 1000 char prompt, max
 | 
			
		||||
      }
 | 
			
		||||
}
 | 
			
		||||
  import { apiKeyStorage, globalStorage } from './Storage.svelte'
 | 
			
		||||
  import { get } from 'svelte/store'
 | 
			
		||||
  import type { ModelDetail, Model, SelectOption, Chat } from './Types.svelte'
 | 
			
		||||
  import { mergeProfileFields } from './Profiles.svelte'
 | 
			
		||||
  import { getChatSettingObjectByKey } from './Settings.svelte'
 | 
			
		||||
  import { valueOf } from './Util.svelte'
 | 
			
		||||
  import { chatModels as openAiModels, imageModels as openAiImageModels } from './providers/openai/models.svelte'
 | 
			
		||||
  import { chatModels as petalsModels } from './providers/petals/models.svelte'
 | 
			
		||||
 | 
			
		||||
const unknownDetail = {
 | 
			
		||||
  prompt: 0,
 | 
			
		||||
  completion: 0,
 | 
			
		||||
  max: 4096,
 | 
			
		||||
  type: 'OpenAIChat'
 | 
			
		||||
    ...Object.values(openAiModels)[0]
 | 
			
		||||
} as ModelDetail
 | 
			
		||||
 | 
			
		||||
// See: https://platform.openai.com/docs/models/model-endpoint-compatibility
 | 
			
		||||
// Eventually we'll add UI for managing this
 | 
			
		||||
export const supportedModels : Record<string, ModelDetail> = {
 | 
			
		||||
      'gpt-3.5-turbo': modelDetails['gpt-3.5'],
 | 
			
		||||
      'gpt-3.5-turbo-0301': modelDetails['gpt-3.5'],
 | 
			
		||||
      'gpt-3.5-turbo-0613': modelDetails['gpt-3.5'],
 | 
			
		||||
      'gpt-3.5-turbo-16k': modelDetails['gpt-3.5-turbo-16k'],
 | 
			
		||||
      'gpt-4': modelDetails['gpt-4'],
 | 
			
		||||
      'gpt-4-0314': modelDetails['gpt-4'],
 | 
			
		||||
      'gpt-4-0613': modelDetails['gpt-4'],
 | 
			
		||||
      'gpt-4-32k': modelDetails['gpt-4-32k'],
 | 
			
		||||
      'gpt-4-32k-0314': modelDetails['gpt-4-32k'],
 | 
			
		||||
      'gpt-4-32k-0613': modelDetails['gpt-4-32k'],
 | 
			
		||||
      // 'enoch/llama-65b-hf': modelDetails['enoch/llama-65b-hf'],
 | 
			
		||||
      // 'timdettmers/guanaco-65b': modelDetails['timdettmers/guanaco-65b'],
 | 
			
		||||
      'meta-llama/Llama-2-70b-hf': modelDetails['meta-llama/Llama-2-70b-hf'],
 | 
			
		||||
      'stabilityai/StableBeluga2': modelDetails['stabilityai/StableBeluga2'],
 | 
			
		||||
      'meta-llama/Llama-2-70b-chat-hf': modelDetails['meta-llama/Llama-2-70b-chat-hf']
 | 
			
		||||
export const supportedChatModels : Record<string, ModelDetail> = {
 | 
			
		||||
    ...openAiModels,
 | 
			
		||||
    ...petalsModels
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const supportedImageModels : Record<string, ModelDetail> = {
 | 
			
		||||
    ...openAiImageModels
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const lookupList = {
 | 
			
		||||
  ...imageModels,
 | 
			
		||||
  ...modelDetails,
 | 
			
		||||
  ...supportedModels
 | 
			
		||||
    ...supportedChatModels,
 | 
			
		||||
    ...supportedImageModels
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const supportedModelKeys = Object.keys({ ...supportedModels, ...imageModels })
 | 
			
		||||
Object.entries(lookupList).forEach(([k, v]) => {
 | 
			
		||||
    v.id = k
 | 
			
		||||
    v.modelQuery = v.modelQuery || k
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
export const supportedChatModelKeys = Object.keys({ ...supportedChatModels })
 | 
			
		||||
 | 
			
		||||
const tpCache : Record<string, ModelDetail> = {}
 | 
			
		||||
 | 
			
		||||
export const getModelDetail = (model: Model): ModelDetail => {
 | 
			
		||||
      // First try to get exact match, then from cache
 | 
			
		||||
      let r = supportedModels[model] || tpCache[model]
 | 
			
		||||
      if (r) return r
 | 
			
		||||
      // If no exact match, find closest match
 | 
			
		||||
      const k = Object.keys(lookupList)
 | 
			
		||||
        .sort((a, b) => b.length - a.length) // Longest to shortest for best match
 | 
			
		||||
        .find((k) => model.startsWith(k))
 | 
			
		||||
      if (k) {
 | 
			
		||||
        r = lookupList[k]
 | 
			
		||||
      } else {
 | 
			
		||||
        r = unknownDetail
 | 
			
		||||
      }
 | 
			
		||||
      // Cache it so we don't need to do that again
 | 
			
		||||
      tpCache[model] = r
 | 
			
		||||
      return r
 | 
			
		||||
    // First try to get exact match, then from cache
 | 
			
		||||
    let r = lookupList[model] || tpCache[model]
 | 
			
		||||
    if (r) return r
 | 
			
		||||
    // If no exact match, find closest match
 | 
			
		||||
    const k = Object.keys(lookupList)
 | 
			
		||||
      .sort((a, b) => b.length - a.length) // Longest to shortest for best match
 | 
			
		||||
      .find((k) => model.startsWith(k))
 | 
			
		||||
    if (k) {
 | 
			
		||||
      r = lookupList[k]
 | 
			
		||||
    }
 | 
			
		||||
    if (!r) {
 | 
			
		||||
      console.warn('Unable to find model detail for:', model, lookupList)
 | 
			
		||||
      r = unknownDetail
 | 
			
		||||
    }
 | 
			
		||||
    // Cache it so we don't need to do that again
 | 
			
		||||
    tpCache[model] = r
 | 
			
		||||
    return r
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getEndpoint = (model: Model): string => {
 | 
			
		||||
  const modelDetails = getModelDetail(model)
 | 
			
		||||
  const gSettings = get(globalStorage)
 | 
			
		||||
  switch (modelDetails.type) {
 | 
			
		||||
        case 'Petals':
 | 
			
		||||
          return gSettings.pedalsEndpoint || getPetals()
 | 
			
		||||
        case 'OpenAIDall-e':
 | 
			
		||||
          return getApiBase() + getEndpointGenerations()
 | 
			
		||||
        case 'OpenAIChat':
 | 
			
		||||
        default:
 | 
			
		||||
          return gSettings.openAICompletionEndpoint || (getApiBase() + getEndpointCompletions())
 | 
			
		||||
  }
 | 
			
		||||
    return getModelDetail(model).getEndpoint(model)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
export const getStartSequence = (chat: Chat): string => {
 | 
			
		||||
  return mergeProfileFields(
 | 
			
		||||
        chat.settings,
 | 
			
		||||
        chat.settings.startSequence || valueOf(chat.id, getChatSettingObjectByKey('startSequence').placeholder)
 | 
			
		||||
      )
 | 
			
		||||
    return mergeProfileFields(
 | 
			
		||||
      chat.settings,
 | 
			
		||||
      chat.settings.startSequence || valueOf(chat.id, getChatSettingObjectByKey('startSequence').placeholder)
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getStopSequence = (chat: Chat): string => {
 | 
			
		||||
  return chat.settings.stopSequence || valueOf(chat.id, getChatSettingObjectByKey('stopSequence').placeholder)
 | 
			
		||||
    return chat.settings.stopSequence || valueOf(chat.id, getChatSettingObjectByKey('stopSequence').placeholder)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getDeliminator = (chat: Chat): string => {
 | 
			
		||||
  return chat.settings.deliminator || valueOf(chat.id, getChatSettingObjectByKey('deliminator').placeholder)
 | 
			
		||||
    return chat.settings.deliminator || valueOf(chat.id, getChatSettingObjectByKey('deliminator').placeholder)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getLeadPrompt = (chat: Chat): string => {
 | 
			
		||||
  return mergeProfileFields(
 | 
			
		||||
        chat.settings,
 | 
			
		||||
        chat.settings.leadPrompt || valueOf(chat.id, getChatSettingObjectByKey('leadPrompt').placeholder)
 | 
			
		||||
      )
 | 
			
		||||
    return mergeProfileFields(
 | 
			
		||||
      chat.settings,
 | 
			
		||||
      chat.settings.leadPrompt || valueOf(chat.id, getChatSettingObjectByKey('leadPrompt').placeholder)
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getUserStart = (chat: Chat): string => {
 | 
			
		||||
  return mergeProfileFields(
 | 
			
		||||
        chat.settings,
 | 
			
		||||
        chat.settings.userMessageStart || valueOf(chat.id, getChatSettingObjectByKey('userMessageStart').placeholder)
 | 
			
		||||
      )
 | 
			
		||||
    return mergeProfileFields(
 | 
			
		||||
      chat.settings,
 | 
			
		||||
      chat.settings.userMessageStart || valueOf(chat.id, getChatSettingObjectByKey('userMessageStart').placeholder)
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getUserEnd = (chat: Chat): string => {
 | 
			
		||||
  return mergeProfileFields(
 | 
			
		||||
        chat.settings,
 | 
			
		||||
        chat.settings.userMessageEnd || valueOf(chat.id, getChatSettingObjectByKey('userMessageEnd').placeholder)
 | 
			
		||||
      )
 | 
			
		||||
    return mergeProfileFields(
 | 
			
		||||
      chat.settings,
 | 
			
		||||
      chat.settings.userMessageEnd || valueOf(chat.id, getChatSettingObjectByKey('userMessageEnd').placeholder)
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getAssistantStart = (chat: Chat): string => {
 | 
			
		||||
  return mergeProfileFields(
 | 
			
		||||
        chat.settings,
 | 
			
		||||
        chat.settings.assistantMessageStart || valueOf(chat.id, getChatSettingObjectByKey('assistantMessageStart').placeholder)
 | 
			
		||||
      )
 | 
			
		||||
    return mergeProfileFields(
 | 
			
		||||
      chat.settings,
 | 
			
		||||
      chat.settings.assistantMessageStart || valueOf(chat.id, getChatSettingObjectByKey('assistantMessageStart').placeholder)
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getAssistantEnd = (chat: Chat): string => {
 | 
			
		||||
  return mergeProfileFields(
 | 
			
		||||
        chat.settings,
 | 
			
		||||
        chat.settings.assistantMessageEnd || valueOf(chat.id, getChatSettingObjectByKey('assistantMessageEnd').placeholder)
 | 
			
		||||
      )
 | 
			
		||||
    return mergeProfileFields(
 | 
			
		||||
      chat.settings,
 | 
			
		||||
      chat.settings.assistantMessageEnd || valueOf(chat.id, getChatSettingObjectByKey('assistantMessageEnd').placeholder)
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getSystemStart = (chat: Chat): string => {
 | 
			
		||||
  return mergeProfileFields(
 | 
			
		||||
        chat.settings,
 | 
			
		||||
        chat.settings.systemMessageStart || valueOf(chat.id, getChatSettingObjectByKey('systemMessageStart').placeholder)
 | 
			
		||||
      )
 | 
			
		||||
    return mergeProfileFields(
 | 
			
		||||
      chat.settings,
 | 
			
		||||
      chat.settings.systemMessageStart || valueOf(chat.id, getChatSettingObjectByKey('systemMessageStart').placeholder)
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getSystemEnd = (chat: Chat): string => {
 | 
			
		||||
  return mergeProfileFields(
 | 
			
		||||
        chat.settings,
 | 
			
		||||
        chat.settings.systemMessageEnd || valueOf(chat.id, getChatSettingObjectByKey('systemMessageEnd').placeholder)
 | 
			
		||||
      )
 | 
			
		||||
    return mergeProfileFields(
 | 
			
		||||
      chat.settings,
 | 
			
		||||
      chat.settings.systemMessageEnd || valueOf(chat.id, getChatSettingObjectByKey('systemMessageEnd').placeholder)
 | 
			
		||||
    )
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getRoleTag = (role: string, model: Model, chat: Chat): string => {
 | 
			
		||||
  const modelDetails = getModelDetail(model)
 | 
			
		||||
  switch (modelDetails.type) {
 | 
			
		||||
        case 'Petals':
 | 
			
		||||
          if (role === 'assistant') return getAssistantStart(chat) + ' '
 | 
			
		||||
          if (role === 'user') return getUserStart(chat) + ' '
 | 
			
		||||
          return getSystemStart(chat) + ' '
 | 
			
		||||
        case 'OpenAIDall-e':
 | 
			
		||||
          return role
 | 
			
		||||
        case 'OpenAIChat':
 | 
			
		||||
        default:
 | 
			
		||||
          return role
 | 
			
		||||
  }
 | 
			
		||||
    if (role === 'assistant') return getAssistantStart(chat) + ' '
 | 
			
		||||
    if (role === 'user') return getUserStart(chat) + ' '
 | 
			
		||||
    return getSystemStart(chat) + ' '
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getRoleEnd = (role: string, model: Model, chat: Chat): string => {
 | 
			
		||||
  const modelDetails = getModelDetail(model)
 | 
			
		||||
  switch (modelDetails.type) {
 | 
			
		||||
        case 'Petals':
 | 
			
		||||
          if (role === 'assistant') return getAssistantEnd(chat)
 | 
			
		||||
          if (role === 'user') return getUserEnd(chat)
 | 
			
		||||
          return getSystemEnd(chat)
 | 
			
		||||
        case 'OpenAIDall-e':
 | 
			
		||||
          return ''
 | 
			
		||||
        case 'OpenAIChat':
 | 
			
		||||
        default:
 | 
			
		||||
          return ''
 | 
			
		||||
  }
 | 
			
		||||
    if (role === 'assistant') return getAssistantEnd(chat)
 | 
			
		||||
    if (role === 'user') return getUserEnd(chat)
 | 
			
		||||
    return getSystemEnd(chat)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const getTokens = (model: Model, value: string): number[] => {
 | 
			
		||||
  const modelDetails = getModelDetail(model)
 | 
			
		||||
  switch (modelDetails.type) {
 | 
			
		||||
        case 'Petals':
 | 
			
		||||
          return llamaTokenizer.encode(value)
 | 
			
		||||
        case 'OpenAIDall-e':
 | 
			
		||||
          return [0]
 | 
			
		||||
        case 'OpenAIChat':
 | 
			
		||||
        default:
 | 
			
		||||
          return encode(value)
 | 
			
		||||
  }
 | 
			
		||||
    return getModelDetail(model).getTokens(value)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const countTokens = (model: Model, value: string): number => {
 | 
			
		||||
  return getTokens(model, value).length
 | 
			
		||||
    return getTokens(model, value).length
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const clearModelOptionCache = () => {
 | 
			
		||||
  modelOptionCache.set([])
 | 
			
		||||
export const hasActiveModels = (): boolean => {
 | 
			
		||||
    const globalSettings = get(globalStorage) || {}
 | 
			
		||||
    return !!get(apiKeyStorage) || !!globalSettings.enablePetals
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function getModelOptions (): Promise<SelectOption[]> {
 | 
			
		||||
  const gSettings = get(globalStorage)
 | 
			
		||||
  const openAiKey = get(apiKeyStorage)
 | 
			
		||||
  const cachedOptions = get(modelOptionCache)
 | 
			
		||||
  if (cachedOptions && cachedOptions.length) return cachedOptions
 | 
			
		||||
  // Load available models from OpenAI
 | 
			
		||||
  let openAiModels
 | 
			
		||||
  let allowCache = true
 | 
			
		||||
  if (openAiKey) {
 | 
			
		||||
        try {
 | 
			
		||||
          openAiModels = (await (
 | 
			
		||||
            await fetch(getApiBase() + getEndpointModels(), {
 | 
			
		||||
              method: 'GET',
 | 
			
		||||
              headers: {
 | 
			
		||||
                Authorization: `Bearer ${openAiKey}`,
 | 
			
		||||
                'Content-Type': 'application/json'
 | 
			
		||||
              }
 | 
			
		||||
            })
 | 
			
		||||
          ).json()) as ResponseModels
 | 
			
		||||
        } catch (e) {
 | 
			
		||||
          allowCache = false
 | 
			
		||||
          openAiModels = { data: [] }
 | 
			
		||||
        }
 | 
			
		||||
  } else {
 | 
			
		||||
        openAiModels = { data: [] }
 | 
			
		||||
  }
 | 
			
		||||
  // const filteredModels = Object.keys(supportedModels).filter((model) => {
 | 
			
		||||
  //       switch (getModelDetail(model).type) {
 | 
			
		||||
  //         case 'Petals':
 | 
			
		||||
  //           return gSettings.enablePetals
 | 
			
		||||
  //         case 'OpenAIChat':
 | 
			
		||||
  //         default:
 | 
			
		||||
  //           return openAiModels.data && openAiModels.data.find((m) => m.id === model)
 | 
			
		||||
  //       }
 | 
			
		||||
  // })
 | 
			
		||||
export async function getChatModelOptions (): Promise<SelectOption[]> {
 | 
			
		||||
    const models = Object.keys(supportedChatModels)
 | 
			
		||||
    const result:SelectOption[] = []
 | 
			
		||||
    for (let i = 0, l = models.length; i < l; i++) {
 | 
			
		||||
      const model = models[i]
 | 
			
		||||
      const modelDetail = getModelDetail(model)
 | 
			
		||||
      await modelDetail.check(modelDetail)
 | 
			
		||||
      result.push({
 | 
			
		||||
        value: model,
 | 
			
		||||
        text: modelDetail.label || model,
 | 
			
		||||
        disabled: !modelDetail.enabled
 | 
			
		||||
      })
 | 
			
		||||
    }
 | 
			
		||||
    return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
  const openAiModelsLookup = openAiModels.data.reduce((a, v) => {
 | 
			
		||||
        a[v.id] = v
 | 
			
		||||
        return a
 | 
			
		||||
  }, {})
 | 
			
		||||
 | 
			
		||||
  const modelOptions:SelectOption[] = Object.keys(supportedModels).reduce((a, m) => {
 | 
			
		||||
        let disabled
 | 
			
		||||
        const modelDetail = getModelDetail(m)
 | 
			
		||||
        switch (modelDetail.type) {
 | 
			
		||||
          case 'Petals':
 | 
			
		||||
            disabled = !gSettings.enablePetals
 | 
			
		||||
            break
 | 
			
		||||
          case 'OpenAIChat':
 | 
			
		||||
          default:
 | 
			
		||||
            disabled = !(openAiModelsLookup[m])
 | 
			
		||||
        }
 | 
			
		||||
        const o:SelectOption = {
 | 
			
		||||
          value: m,
 | 
			
		||||
          text: modelDetail.label || m,
 | 
			
		||||
          disabled
 | 
			
		||||
        }
 | 
			
		||||
        a.push(o)
 | 
			
		||||
        return a
 | 
			
		||||
  }, [] as SelectOption[])
 | 
			
		||||
 | 
			
		||||
  if (allowCache) modelOptionCache.set(modelOptions)
 | 
			
		||||
 | 
			
		||||
  // console.log('openAiModels', openAiModels, openAiModelsLookup)
 | 
			
		||||
 | 
			
		||||
  return modelOptions
 | 
			
		||||
export async function getImageModelOptions (): Promise<SelectOption[]> {
 | 
			
		||||
    const models = Object.keys(supportedImageModels)
 | 
			
		||||
    const result:SelectOption[] = [{ value: '', text: 'OFF - Disable Image Generation' }]
 | 
			
		||||
    for (let i = 0, l = models.length; i < l; i++) {
 | 
			
		||||
      const model = models[i]
 | 
			
		||||
      const modelDetail = getModelDetail(model)
 | 
			
		||||
      await modelDetail.check(modelDetail)
 | 
			
		||||
      result.push({
 | 
			
		||||
        value: model,
 | 
			
		||||
        text: modelDetail.label || model,
 | 
			
		||||
        disabled: !modelDetail.enabled
 | 
			
		||||
      })
 | 
			
		||||
    }
 | 
			
		||||
    return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
</script>
 | 
			
		||||
| 
						 | 
				
			
			@ -62,12 +62,8 @@ export const getExcludeFromProfile = () => {
 | 
			
		|||
  return excludeFromProfile
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const isNotOpenAI = (chatId) => {
 | 
			
		||||
  return getModelDetail(getChatSettings(chatId).model).type !== 'OpenAIChat'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const isNotPetals = (chatId) => {
 | 
			
		||||
  return getModelDetail(getChatSettings(chatId).model).type !== 'Petals'
 | 
			
		||||
const hideModelSetting = (chatId, setting) => {
 | 
			
		||||
  return getModelDetail(getChatSettings(chatId).model).hideSetting(chatId, setting)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const gptDefaults = {
 | 
			
		||||
| 
						 | 
				
			
			@ -108,7 +104,7 @@ const defaults:ChatSettings = {
 | 
			
		|||
  hiddenPromptPrefix: '',
 | 
			
		||||
  hppContinuePrompt: '',
 | 
			
		||||
  hppWithSummaryPrompt: false,
 | 
			
		||||
  imageGenerationSize: '',
 | 
			
		||||
  imageGenerationModel: '',
 | 
			
		||||
  startSequence: '',
 | 
			
		||||
  stopSequence: '',
 | 
			
		||||
  aggressiveStop: true,
 | 
			
		||||
| 
						 | 
				
			
			@ -120,6 +116,7 @@ const defaults:ChatSettings = {
 | 
			
		|||
  systemMessageStart: '',
 | 
			
		||||
  systemMessageEnd: '',
 | 
			
		||||
  leadPrompt: '',
 | 
			
		||||
  repititionPenalty: 1,
 | 
			
		||||
  // useResponseAlteration: false,
 | 
			
		||||
  // responseAlterations: [],
 | 
			
		||||
  isDirty: false
 | 
			
		||||
| 
						 | 
				
			
			@ -142,12 +139,6 @@ const excludeFromProfile = {
 | 
			
		|||
  isDirty: true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const imageGenerationSizes = [
 | 
			
		||||
  '1024x1024', '512x512', '256x256'
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
export const imageGenerationSizeTypes = ['', ...imageGenerationSizes]
 | 
			
		||||
 | 
			
		||||
export const chatSortOptions = {
 | 
			
		||||
  name: { text: 'Name', icon: faArrowDownAZ, value: '', sortFn: (a, b) => { return a.name < b.name ? -1 : a.name > b.name ? 1 : 0 } },
 | 
			
		||||
  created: { text: 'Created', icon: faArrowDown91, value: '', sortFn: (a, b) => { return ((b.created || 0) - (a.created || 0)) || (b.id - a.id) } },
 | 
			
		||||
| 
						 | 
				
			
			@ -363,16 +354,13 @@ const summarySettings: ChatSetting[] = [
 | 
			
		|||
        hide: (chatId) => getChatSettings(chatId).continuousChat !== 'summary'
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'imageGenerationSize',
 | 
			
		||||
        name: 'Image Generation Size',
 | 
			
		||||
        key: 'imageGenerationModel',
 | 
			
		||||
        name: 'Image Generation Model',
 | 
			
		||||
        header: 'Image Generation',
 | 
			
		||||
        headerClass: 'is-info',
 | 
			
		||||
        title: 'Prompt an image with: show me an image of ...',
 | 
			
		||||
        type: 'select',
 | 
			
		||||
        options: [
 | 
			
		||||
          { value: '', text: 'OFF - Disable Image Generation' },
 | 
			
		||||
          ...imageGenerationSizes.map(s => { return { value: s, text: s } })
 | 
			
		||||
        ]
 | 
			
		||||
        options: []
 | 
			
		||||
      }
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -427,13 +415,9 @@ const summarySettings: ChatSetting[] = [
 | 
			
		|||
const modelSetting: ChatSetting & SettingSelect = {
 | 
			
		||||
      key: 'model',
 | 
			
		||||
      name: 'Model',
 | 
			
		||||
      title: 'The model to use - GPT-3.5 is cheaper, but GPT-4 is more powerful.',
 | 
			
		||||
      title: 'The model to use. Some may cost more than others.',
 | 
			
		||||
      header: (chatId) => {
 | 
			
		||||
        if (isNotOpenAI(chatId)) {
 | 
			
		||||
          return 'Below are the settings that can be changed for the API calls. See <a target="_blank" href="https://platform.openai.com/docs/api-reference/chat/create">this overview</a> to start, though not all settings translate to Petals.'
 | 
			
		||||
        } else {
 | 
			
		||||
          return 'Below are the settings that OpenAI allows to be changed for the API calls. See the <a target="_blank" href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI API docs</a> for more details.'
 | 
			
		||||
        }
 | 
			
		||||
        return getModelDetail(getChatSettings(chatId).model).help
 | 
			
		||||
      },
 | 
			
		||||
      headerClass: 'is-warning',
 | 
			
		||||
      options: [],
 | 
			
		||||
| 
						 | 
				
			
			@ -453,7 +437,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
        name: 'Stream Response',
 | 
			
		||||
        title: 'Stream responses as they are generated.',
 | 
			
		||||
        type: 'boolean',
 | 
			
		||||
        hide: isNotOpenAI
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'temperature',
 | 
			
		||||
| 
						 | 
				
			
			@ -485,7 +469,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
        max: 10,
 | 
			
		||||
        step: 1,
 | 
			
		||||
        type: 'number',
 | 
			
		||||
        hide: isNotOpenAI
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'max_tokens',
 | 
			
		||||
| 
						 | 
				
			
			@ -497,7 +481,6 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
        max: 32768,
 | 
			
		||||
        step: 1,
 | 
			
		||||
        type: 'number',
 | 
			
		||||
        hide: isNotOpenAI,
 | 
			
		||||
        forceApi: true // Since default here is different than gpt default, will make sure we always send it
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
| 
						 | 
				
			
			@ -508,7 +491,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
        max: 2,
 | 
			
		||||
        step: 0.2,
 | 
			
		||||
        type: 'number',
 | 
			
		||||
        hide: isNotOpenAI
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'frequency_penalty',
 | 
			
		||||
| 
						 | 
				
			
			@ -518,8 +501,18 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
        max: 2,
 | 
			
		||||
        step: 0.2,
 | 
			
		||||
        type: 'number',
 | 
			
		||||
        hide: isNotOpenAI
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      // {
 | 
			
		||||
      //   key: 'repititionPenalty',
 | 
			
		||||
      //   name: 'Repitition Penalty',
 | 
			
		||||
      //   title: 'Number between 1.0 and infinity. Penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.',
 | 
			
		||||
      //   min: 0,
 | 
			
		||||
      //   max: 1000,
 | 
			
		||||
      //   step: 0.1,
 | 
			
		||||
      //   type: 'number',
 | 
			
		||||
      //   hide: isNotPetals
 | 
			
		||||
      // },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'startSequence',
 | 
			
		||||
        name: 'Start Sequence',
 | 
			
		||||
| 
						 | 
				
			
			@ -529,25 +522,25 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
          const val = getModelDetail(getChatSettings(chatId).model).start
 | 
			
		||||
          return val || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'stopSequence',
 | 
			
		||||
        name: 'Stop Sequence',
 | 
			
		||||
        title: 'Characters used to signal end of message chain.',
 | 
			
		||||
        type: 'text',
 | 
			
		||||
        name: 'Stop Sequences',
 | 
			
		||||
        title: 'Characters used to signal end of message chain. Separate multiple with a comma.',
 | 
			
		||||
        type: 'textarea',
 | 
			
		||||
        placeholder: (chatId) => {
 | 
			
		||||
          const val = getModelDetail(getChatSettings(chatId).model).stop
 | 
			
		||||
          return (val && val[0]) || ''
 | 
			
		||||
          return (val && val.join(',')) || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'aggressiveStop',
 | 
			
		||||
        name: 'Use aggressive stop',
 | 
			
		||||
        title: 'Sometimes generation can continue even after a stop sequence. This will stop generation client side if generation continues after stop sequence.',
 | 
			
		||||
        type: 'boolean',
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'deliminator',
 | 
			
		||||
| 
						 | 
				
			
			@ -558,7 +551,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
          const val = getModelDetail(getChatSettings(chatId).model).deliminator
 | 
			
		||||
          return val || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'userMessageStart',
 | 
			
		||||
| 
						 | 
				
			
			@ -569,7 +562,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
          const val = getModelDetail(getChatSettings(chatId).model).userStart
 | 
			
		||||
          return val || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'userMessageEnd',
 | 
			
		||||
| 
						 | 
				
			
			@ -580,7 +573,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
          const val = getModelDetail(getChatSettings(chatId).model).userEnd
 | 
			
		||||
          return val || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'assistantMessageStart',
 | 
			
		||||
| 
						 | 
				
			
			@ -591,7 +584,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
          const val = getModelDetail(getChatSettings(chatId).model).assistantStart
 | 
			
		||||
          return val || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'assistantMessageEnd',
 | 
			
		||||
| 
						 | 
				
			
			@ -602,7 +595,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
          const val = getModelDetail(getChatSettings(chatId).model).assistantEnd
 | 
			
		||||
          return val || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'leadPrompt',
 | 
			
		||||
| 
						 | 
				
			
			@ -613,7 +606,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
          const val = getModelDetail(getChatSettings(chatId).model).leadPrompt
 | 
			
		||||
          return val || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'systemMessageStart',
 | 
			
		||||
| 
						 | 
				
			
			@ -624,7 +617,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
          const val = getModelDetail(getChatSettings(chatId).model).systemStart
 | 
			
		||||
          return val || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        key: 'systemMessageEnd',
 | 
			
		||||
| 
						 | 
				
			
			@ -635,7 +628,7 @@ const chatSettingsList: ChatSetting[] = [
 | 
			
		|||
          const val = getModelDetail(getChatSettings(chatId).model).systemEnd
 | 
			
		||||
          return val || ''
 | 
			
		||||
        },
 | 
			
		||||
        hide: isNotPetals
 | 
			
		||||
        hide: hideModelSetting
 | 
			
		||||
      },
 | 
			
		||||
      {
 | 
			
		||||
        // logit bias editor not implemented yet
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,7 +1,7 @@
 | 
			
		|||
<script lang="ts">
 | 
			
		||||
  import { params } from 'svelte-spa-router'
 | 
			
		||||
  import ChatMenuItem from './ChatMenuItem.svelte'
 | 
			
		||||
  import { apiKeyStorage, chatsStorage, pinMainMenu, checkStateChange, getChatSortOption, setChatSortOption, hasActiveModels } from './Storage.svelte'
 | 
			
		||||
  import { chatsStorage, pinMainMenu, checkStateChange, getChatSortOption, setChatSortOption } from './Storage.svelte'
 | 
			
		||||
  import Fa from 'svelte-fa/src/fa.svelte'
 | 
			
		||||
  import { faSquarePlus, faKey } from '@fortawesome/free-solid-svg-icons/index'
 | 
			
		||||
  import ChatOptionMenu from './ChatOptionMenu.svelte'
 | 
			
		||||
| 
						 | 
				
			
			@ -9,6 +9,7 @@
 | 
			
		|||
  import { clickOutside } from 'svelte-use-click-outside'
 | 
			
		||||
  import { startNewChatWithWarning } from './Util.svelte'
 | 
			
		||||
  import { chatSortOptions } from './Settings.svelte'
 | 
			
		||||
  import { hasActiveModels } from './Models.svelte'
 | 
			
		||||
 | 
			
		||||
  $: sortedChats = $chatsStorage.sort(getChatSortOption().sortFn)
 | 
			
		||||
  $: activeChatId = $params && $params.chatId ? parseInt($params.chatId) : undefined
 | 
			
		||||
| 
						 | 
				
			
			@ -76,8 +77,8 @@
 | 
			
		|||
      <div class="level-right">
 | 
			
		||||
        {#if !hasModels}
 | 
			
		||||
        <div class="level-item">
 | 
			
		||||
          <a href={'#/'} class="panel-block" class:is-disabled={!$apiKeyStorage}
 | 
			
		||||
            ><span class="greyscale mr-1"><Fa icon={faKey} /></span> API key</a
 | 
			
		||||
          <a href={'#/'} class="panel-block" class:is-disabled={!hasModels}
 | 
			
		||||
            ><span class="greyscale mr-1"><Fa icon={faKey} /></span> API Setting</a
 | 
			
		||||
          ></div>
 | 
			
		||||
        {:else}
 | 
			
		||||
        <div class="level-item">
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,5 +1,5 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
  import { countTokens, getModelDetail, getRoleTag, getStopSequence } from './Models.svelte'
 | 
			
		||||
  import { countTokens, getDeliminator, getLeadPrompt, getModelDetail, getRoleEnd, getRoleTag, getStartSequence } from './Models.svelte'
 | 
			
		||||
  import type { Chat, Message, Model, Usage } from './Types.svelte'
 | 
			
		||||
 | 
			
		||||
  export const getPrice = (tokens: Usage, model: Model): number => {
 | 
			
		||||
| 
						 | 
				
			
			@ -15,7 +15,7 @@
 | 
			
		|||
    }, 0)
 | 
			
		||||
    switch (detail.type) {
 | 
			
		||||
      case 'Petals':
 | 
			
		||||
        return count
 | 
			
		||||
        return count + countTokens(model, getStartSequence(chat)) + countTokens(model, getLeadPrompt(chat))
 | 
			
		||||
      case 'OpenAIChat':
 | 
			
		||||
      default:
 | 
			
		||||
        // Not sure how OpenAI formats it, but this seems to get close to the right counts.
 | 
			
		||||
| 
						 | 
				
			
			@ -27,10 +27,11 @@
 | 
			
		|||
 | 
			
		||||
  export const countMessageTokens = (message:Message, model:Model, chat: Chat):number => {
 | 
			
		||||
    const detail = getModelDetail(model)
 | 
			
		||||
    const stop = getStopSequence(chat)
 | 
			
		||||
    const delim = getDeliminator(chat)
 | 
			
		||||
    switch (detail.type) {
 | 
			
		||||
      case 'Petals':
 | 
			
		||||
        return countTokens(model, getRoleTag(message.role, model, chat) + ': ' + message.content + (stop || '###'))
 | 
			
		||||
        return countTokens(model, getRoleTag(message.role, model, chat) + ': ' +
 | 
			
		||||
        message.content + getRoleEnd(message.role, model, chat) + (delim || '###'))
 | 
			
		||||
      case 'OpenAIChat':
 | 
			
		||||
      default:
 | 
			
		||||
        // Not sure how OpenAI formats it, but this seems to get close to the right counts.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -30,11 +30,6 @@
 | 
			
		|||
    return get(apiKeyStorage)
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  export const hasActiveModels = (): boolean => {
 | 
			
		||||
    const globalSettings = get(globalStorage) || {}
 | 
			
		||||
    return !!get(apiKeyStorage) || !!globalSettings.enablePetals
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  export const newChatID = (): number => {
 | 
			
		||||
    const chats = get(chatsStorage)
 | 
			
		||||
    const chatId = chats.reduce((maxId, chat) => Math.max(maxId, chat.id), 0) + 1
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,31 +1,12 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
    import type { IconDefinition } from '@fortawesome/free-solid-svg-icons'
 | 
			
		||||
import { supportedModelKeys } from './Models.svelte'
 | 
			
		||||
import { imageGenerationSizeTypes } from './Settings.svelte'
 | 
			
		||||
  import type { IconDefinition } from '@fortawesome/free-solid-svg-icons'
 | 
			
		||||
  import { supportedChatModelKeys } from './Models.svelte'
 | 
			
		||||
  import { ChatRequest } from './ChatRequest.svelte'
 | 
			
		||||
  import { ChatCompletionResponse } from './ChatCompletionResponse.svelte'
 | 
			
		||||
 | 
			
		||||
export type Model = typeof supportedModelKeys[number];
 | 
			
		||||
export type Model = typeof supportedChatModelKeys[number];
 | 
			
		||||
 | 
			
		||||
export type ImageGenerationSizes = typeof imageGenerationSizeTypes[number];
 | 
			
		||||
 | 
			
		||||
export type RequestType = 'OpenAIChat' | 'OpenAIDall-e' | 'Petals'
 | 
			
		||||
 | 
			
		||||
export type ModelDetail = {
 | 
			
		||||
    type: RequestType;
 | 
			
		||||
    label?: string;
 | 
			
		||||
    start?: string;
 | 
			
		||||
    stop?: string[];
 | 
			
		||||
    deliminator?: string;
 | 
			
		||||
    userStart?: string,
 | 
			
		||||
    userEnd?: string,
 | 
			
		||||
    assistantStart?: string,
 | 
			
		||||
    assistantEnd?: string,
 | 
			
		||||
    systemStart?: string,
 | 
			
		||||
    systemEnd?: string,
 | 
			
		||||
    leadPrompt?: string,
 | 
			
		||||
    prompt: number;
 | 
			
		||||
    completion: number;
 | 
			
		||||
    max: number;
 | 
			
		||||
  };
 | 
			
		||||
export type RequestType = 'chat' | 'image'
 | 
			
		||||
 | 
			
		||||
export type Usage = {
 | 
			
		||||
    completion_tokens: number;
 | 
			
		||||
| 
						 | 
				
			
			@ -63,23 +44,6 @@ export type ResponseAlteration = {
 | 
			
		|||
    replace: string;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
export type ResponseImageDetail = {
 | 
			
		||||
    url: string;
 | 
			
		||||
    b64_json: string;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
export type ResponseImage = {
 | 
			
		||||
    created: number;
 | 
			
		||||
    data: ResponseImageDetail[];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
export type RequestImageGeneration = {
 | 
			
		||||
    prompt: string;
 | 
			
		||||
    n?: number;
 | 
			
		||||
    size?: ImageGenerationSizes;
 | 
			
		||||
    response_format?: keyof ResponseImageDetail;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
export type Request = {
 | 
			
		||||
    model: Model;
 | 
			
		||||
    messages?: Message[];
 | 
			
		||||
| 
						 | 
				
			
			@ -115,7 +79,7 @@ export type ChatSettings = {
 | 
			
		|||
    hiddenPromptPrefix: string;
 | 
			
		||||
    hppContinuePrompt: string; // hiddenPromptPrefix used, optional glue when trying to continue truncated completion
 | 
			
		||||
    hppWithSummaryPrompt: boolean; // include hiddenPromptPrefix when before summary prompt
 | 
			
		||||
    imageGenerationSize: ImageGenerationSizes;
 | 
			
		||||
    imageGenerationModel: Model;
 | 
			
		||||
    trainingPrompts?: Message[];
 | 
			
		||||
    useResponseAlteration?: boolean;
 | 
			
		||||
    responseAlterations?: ResponseAlteration[];
 | 
			
		||||
| 
						 | 
				
			
			@ -130,6 +94,7 @@ export type ChatSettings = {
 | 
			
		|||
    leadPrompt: string;
 | 
			
		||||
    systemMessageStart: string;
 | 
			
		||||
    systemMessageEnd: string;
 | 
			
		||||
    repititionPenalty: number;
 | 
			
		||||
    isDirty?: boolean;
 | 
			
		||||
  } & Request;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -171,13 +136,6 @@ export type Chat = {
 | 
			
		|||
 | 
			
		||||
export type Response = ResponseOK & ResponseError;
 | 
			
		||||
 | 
			
		||||
export type ResponseModels = {
 | 
			
		||||
    object: 'list';
 | 
			
		||||
    data: {
 | 
			
		||||
      id: string;
 | 
			
		||||
    }[];
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
export type ChatCompletionOpts = {
 | 
			
		||||
    chat: Chat;
 | 
			
		||||
    autoAddMessages: boolean;
 | 
			
		||||
| 
						 | 
				
			
			@ -186,7 +144,9 @@ export type ChatCompletionOpts = {
 | 
			
		|||
    didSummary?:boolean;
 | 
			
		||||
    streaming?:boolean;
 | 
			
		||||
    onMessageChange?: (messages: Message[]) => void;
 | 
			
		||||
    fillMessage?:Message,
 | 
			
		||||
    fillMessage?:Message;
 | 
			
		||||
    count?:number;
 | 
			
		||||
    prompt?:string;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
export type ChatSortOptions = 'name'|'created'|'lastUse'|'lastAccess';
 | 
			
		||||
| 
						 | 
				
			
			@ -276,7 +236,7 @@ export type ChatSetting = {
 | 
			
		|||
    header?: string | ValueFn;
 | 
			
		||||
    headerClass?: string | ValueFn;
 | 
			
		||||
    placeholder?: string | ValueFn;
 | 
			
		||||
    hide?: (chatId:number) => boolean;
 | 
			
		||||
    hide?: (chatId:number, setting:ChatSetting) => boolean;
 | 
			
		||||
    apiTransform?: (chatId:number, setting:ChatSetting, value:any) => any;
 | 
			
		||||
    fieldControls?: FieldControl[];
 | 
			
		||||
    beforeChange?: (chatId:number, setting:ChatSetting, value:any) => boolean;
 | 
			
		||||
| 
						 | 
				
			
			@ -304,4 +264,34 @@ export type SettingPrompt = {
 | 
			
		|||
    passed: boolean;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
export type ModelDetail = {
 | 
			
		||||
    type: RequestType;
 | 
			
		||||
    id?: string;
 | 
			
		||||
    modelQuery?: string;
 | 
			
		||||
    label?: string;
 | 
			
		||||
    start?: string;
 | 
			
		||||
    stop?: string[];
 | 
			
		||||
    deliminator?: string;
 | 
			
		||||
    userStart?: string,
 | 
			
		||||
    userEnd?: string,
 | 
			
		||||
    assistantStart?: string,
 | 
			
		||||
    assistantEnd?: string,
 | 
			
		||||
    systemStart?: string,
 | 
			
		||||
    systemEnd?: string,
 | 
			
		||||
    leadPrompt?: string,
 | 
			
		||||
    prompt?: number;
 | 
			
		||||
    completion?: number;
 | 
			
		||||
    max?: number;
 | 
			
		||||
    opt?: Record<string, any>;
 | 
			
		||||
    preFillMerge?: (existingContent:string, newContent:string)=>string;
 | 
			
		||||
    enabled?: boolean;
 | 
			
		||||
    hide?: boolean;
 | 
			
		||||
    check: (modelDetail: ModelDetail) => Promise<void>;
 | 
			
		||||
    getTokens: (val: string) => number[];
 | 
			
		||||
    getEndpoint: (model: Model) => string;
 | 
			
		||||
    help: string;
 | 
			
		||||
    hideSetting: (chatId: number, setting: ChatSetting) => boolean;
 | 
			
		||||
    request: (request: Request, chatRequest: ChatRequest, chatResponse: ChatCompletionResponse, opts: ChatCompletionOpts) => Promise<ChatCompletionResponse>;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
</script>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -0,0 +1,119 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
    import { getApiBase, getEndpointCompletions, getEndpointGenerations } from '../../ApiUtil.svelte'
 | 
			
		||||
    import { globalStorage } from '../../Storage.svelte'
 | 
			
		||||
    import type { ModelDetail } from '../../Types.svelte'
 | 
			
		||||
    import { chatRequest, imageRequest } from './request.svelte'
 | 
			
		||||
    import { checkModel } from './util.svelte'
 | 
			
		||||
    import { encode } from 'gpt-tokenizer'
 | 
			
		||||
    import { get } from 'svelte/store'
 | 
			
		||||
 | 
			
		||||
const hiddenSettings = {
 | 
			
		||||
      startSequence: true,
 | 
			
		||||
      stopSequence: true,
 | 
			
		||||
      aggressiveStop: true,
 | 
			
		||||
      deliminator: true,
 | 
			
		||||
      userMessageStart: true,
 | 
			
		||||
      userMessageEnd: true,
 | 
			
		||||
      assistantMessageStart: true,
 | 
			
		||||
      assistantMessageEnd: true,
 | 
			
		||||
      leadPrompt: true,
 | 
			
		||||
      systemMessageStart: true,
 | 
			
		||||
      systemMessageEnd: true,
 | 
			
		||||
      repititionPenalty: true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const chatModelBase = {
 | 
			
		||||
  type: 'chat',
 | 
			
		||||
  help: 'Below are the settings that OpenAI allows to be changed for the API calls. See the <a target="_blank" href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI API docs</a> for more details.',
 | 
			
		||||
  preFillMerge: (existingContent, newContent) => {
 | 
			
		||||
        // continuing assistant prompt. see if we need to add a space before we merge the new completion
 | 
			
		||||
        // there has to be a better way to do this
 | 
			
		||||
        if (existingContent && !newContent.match(/^('(t|ll|ve|m|d|re)[^a-z]|\s|[.,;:(_-{}*^%$#@!?+=~`[\]])/i)) {
 | 
			
		||||
          // add a trailing space if our new content isn't a contraction
 | 
			
		||||
          existingContent += ' '
 | 
			
		||||
        }
 | 
			
		||||
        return existingContent
 | 
			
		||||
  },
 | 
			
		||||
  request: chatRequest,
 | 
			
		||||
  check: checkModel,
 | 
			
		||||
  getTokens: (value) => encode(value),
 | 
			
		||||
  getEndpoint: (model) => get(globalStorage).openAICompletionEndpoint || (getApiBase() + getEndpointCompletions()),
 | 
			
		||||
  hideSetting: (chatId, setting) => !!hiddenSettings[setting.key]
 | 
			
		||||
} as ModelDetail
 | 
			
		||||
 | 
			
		||||
// Reference: https://openai.com/pricing#language-models
 | 
			
		||||
const gpt35 = {
 | 
			
		||||
      ...chatModelBase,
 | 
			
		||||
      prompt: 0.0000015, // $0.0015 per 1000 tokens prompt
 | 
			
		||||
      completion: 0.000002, // $0.002 per 1000 tokens completion
 | 
			
		||||
      max: 4096 // 4k max token buffer
 | 
			
		||||
}
 | 
			
		||||
const gpt3516k = {
 | 
			
		||||
      ...chatModelBase,
 | 
			
		||||
      prompt: 0.000003, // $0.003 per 1000 tokens prompt
 | 
			
		||||
      completion: 0.000004, // $0.004 per 1000 tokens completion
 | 
			
		||||
      max: 16384 // 16k max token buffer
 | 
			
		||||
}
 | 
			
		||||
const gpt4 = {
 | 
			
		||||
      ...chatModelBase,
 | 
			
		||||
      prompt: 0.00003, // $0.03 per 1000 tokens prompt
 | 
			
		||||
      completion: 0.00006, // $0.06 per 1000 tokens completion
 | 
			
		||||
      max: 8192 // 8k max token buffer
 | 
			
		||||
}
 | 
			
		||||
const gpt432k = {
 | 
			
		||||
      ...chatModelBase,
 | 
			
		||||
      prompt: 0.00006, // $0.06 per 1000 tokens prompt
 | 
			
		||||
      completion: 0.00012, // $0.12 per 1000 tokens completion
 | 
			
		||||
      max: 32768 // 32k max token buffer
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const chatModels : Record<string, ModelDetail> = {
 | 
			
		||||
  'gpt-3.5-turbo': { ...gpt35 },
 | 
			
		||||
  'gpt-3.5-turbo-0301': { ...gpt35 },
 | 
			
		||||
  'gpt-3.5-turbo-0613': { ...gpt35 },
 | 
			
		||||
  'gpt-3.5-turbo-16k': { ...gpt3516k },
 | 
			
		||||
  'gpt-4': { ...gpt4 },
 | 
			
		||||
  'gpt-4-0314': { ...gpt4 },
 | 
			
		||||
  'gpt-4-0613': { ...gpt4 },
 | 
			
		||||
  'gpt-4-32k': { ...gpt432k },
 | 
			
		||||
  'gpt-4-32k-0314': { ...gpt432k },
 | 
			
		||||
  'gpt-4-32k-0613': { ...gpt432k }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const imageModelBase = {
 | 
			
		||||
  type: 'image',
 | 
			
		||||
  prompt: 0.00,
 | 
			
		||||
  max: 1000, // 1000 char prompt, max
 | 
			
		||||
  request: imageRequest,
 | 
			
		||||
  check: checkModel,
 | 
			
		||||
  getTokens: (value) => [0],
 | 
			
		||||
  getEndpoint: (model) => getApiBase() + getEndpointGenerations(),
 | 
			
		||||
  hideSetting: (chatId, setting) => false
 | 
			
		||||
} as ModelDetail
 | 
			
		||||
 | 
			
		||||
export const imageModels : Record<string, ModelDetail> = {
 | 
			
		||||
      'dall-e-1024x1024': {
 | 
			
		||||
        ...imageModelBase,
 | 
			
		||||
        completion: 0.020, // $0.020 per image
 | 
			
		||||
        opt: {
 | 
			
		||||
          size: '1024x1024'
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      'dall-e-512x512': {
 | 
			
		||||
        ...imageModelBase,
 | 
			
		||||
        completion: 0.018, // $0.018 per image
 | 
			
		||||
        opt: {
 | 
			
		||||
          size: '512x512'
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
      'dall-e-256x256': {
 | 
			
		||||
        ...imageModelBase,
 | 
			
		||||
        type: 'image',
 | 
			
		||||
        completion: 0.016, // $0.016 per image
 | 
			
		||||
        opt: {
 | 
			
		||||
          size: '256x256'
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
</script>
 | 
			
		||||
| 
						 | 
				
			
			@ -1,24 +1,24 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
    import { EventStreamContentType, fetchEventSource } from '@microsoft/fetch-event-source'
 | 
			
		||||
    import ChatCompletionResponse from './ChatCompletionResponse.svelte'
 | 
			
		||||
    import ChatRequest from './ChatRequest.svelte'
 | 
			
		||||
    import { getEndpoint } from './Models.svelte'
 | 
			
		||||
    import { getApiKey } from './Storage.svelte'
 | 
			
		||||
    import type { ChatCompletionOpts, Request } from './Types.svelte'
 | 
			
		||||
    import { ChatCompletionResponse } from '../../ChatCompletionResponse.svelte'
 | 
			
		||||
    import { ChatRequest } from '../../ChatRequest.svelte'
 | 
			
		||||
    import { getEndpoint, getModelDetail } from '../../Models.svelte'
 | 
			
		||||
    import { getApiKey } from '../../Storage.svelte'
 | 
			
		||||
    import type { ChatCompletionOpts, Request } from '../../Types.svelte'
 | 
			
		||||
 | 
			
		||||
export const runOpenAiCompletionRequest = async (
 | 
			
		||||
export const chatRequest = async (
 | 
			
		||||
  request: Request,
 | 
			
		||||
  chatRequest: ChatRequest,
 | 
			
		||||
  chatResponse: ChatCompletionResponse,
 | 
			
		||||
  signal: AbortSignal,
 | 
			
		||||
  opts: ChatCompletionOpts) => {
 | 
			
		||||
  opts: ChatCompletionOpts): Promise<ChatCompletionResponse> => {
 | 
			
		||||
    // OpenAI Request
 | 
			
		||||
      const model = chatRequest.getModel()
 | 
			
		||||
      const signal = chatRequest.controller.signal
 | 
			
		||||
      const abortListener = (e:Event) => {
 | 
			
		||||
        chatRequest.updating = false
 | 
			
		||||
        chatRequest.updatingMessage = ''
 | 
			
		||||
        chatResponse.updateFromError('User aborted request.')
 | 
			
		||||
        chatRequest.removeEventListener('abort', abortListener)
 | 
			
		||||
        signal.removeEventListener('abort', abortListener)
 | 
			
		||||
      }
 | 
			
		||||
      signal.addEventListener('abort', abortListener)
 | 
			
		||||
      const fetchOptions = {
 | 
			
		||||
| 
						 | 
				
			
			@ -93,8 +93,82 @@ export const runOpenAiCompletionRequest = async (
 | 
			
		|||
          // Remove updating indicator
 | 
			
		||||
          chatRequest.updating = false
 | 
			
		||||
          chatRequest.updatingMessage = ''
 | 
			
		||||
          chatResponse.updateFromSyncResponse(json)
 | 
			
		||||
          const images = json?.data.map(d => d.b64_json)
 | 
			
		||||
          chatResponse.updateFromSyncResponse(images || [])
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
      return chatResponse
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ResponseImageDetail = {
 | 
			
		||||
    url: string;
 | 
			
		||||
    b64_json: string;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
type RequestImageGeneration = {
 | 
			
		||||
    prompt: string;
 | 
			
		||||
    n?: number;
 | 
			
		||||
    size?: string;
 | 
			
		||||
    response_format?: keyof ResponseImageDetail;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
export const imageRequest = async (
 | 
			
		||||
  na: Request,
 | 
			
		||||
  chatRequest: ChatRequest,
 | 
			
		||||
  chatResponse: ChatCompletionResponse,
 | 
			
		||||
  opts: ChatCompletionOpts): Promise<ChatCompletionResponse> => {
 | 
			
		||||
  const chat = chatRequest.getChat()
 | 
			
		||||
  const chatSettings = chat.settings
 | 
			
		||||
  const count = opts.count || 1
 | 
			
		||||
  const prompt = opts.prompt || ''
 | 
			
		||||
  chatRequest.updating = true
 | 
			
		||||
  chatRequest.updatingMessage = 'Generating Image...'
 | 
			
		||||
  const imageModel = chatSettings.imageGenerationModel
 | 
			
		||||
  const imageModelDetail = getModelDetail(imageModel)
 | 
			
		||||
  const size = imageModelDetail.opt?.size || '256x256'
 | 
			
		||||
  const request: RequestImageGeneration = {
 | 
			
		||||
        prompt,
 | 
			
		||||
        response_format: 'b64_json',
 | 
			
		||||
        size,
 | 
			
		||||
        n: count
 | 
			
		||||
  }
 | 
			
		||||
  // fetchEventSource doesn't seem to throw on abort,
 | 
			
		||||
  // so we deal with it ourselves
 | 
			
		||||
  const signal = chatRequest.controller.signal
 | 
			
		||||
  const abortListener = (e:Event) => {
 | 
			
		||||
        chatResponse.updateFromError('User aborted request.')
 | 
			
		||||
        signal.removeEventListener('abort', abortListener)
 | 
			
		||||
  }
 | 
			
		||||
  signal.addEventListener('abort', abortListener)
 | 
			
		||||
  // Create request
 | 
			
		||||
  const fetchOptions = {
 | 
			
		||||
        method: 'POST',
 | 
			
		||||
        headers: {
 | 
			
		||||
          Authorization: `Bearer ${getApiKey()}`,
 | 
			
		||||
          'Content-Type': 'application/json'
 | 
			
		||||
        },
 | 
			
		||||
        body: JSON.stringify(request),
 | 
			
		||||
        signal
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
        const response = await fetch(getEndpoint(imageModel), fetchOptions)
 | 
			
		||||
        if (!response.ok) {
 | 
			
		||||
          await chatRequest.handleError(response)
 | 
			
		||||
        } else {
 | 
			
		||||
          const json = await response.json()
 | 
			
		||||
          // Remove updating indicator
 | 
			
		||||
          chatRequest.updating = false
 | 
			
		||||
          chatRequest.updatingMessage = ''
 | 
			
		||||
          // console.log('image json', json, json?.data[0])
 | 
			
		||||
          const images = json?.data.map(d => d.b64_json)
 | 
			
		||||
          chatResponse.updateImageFromSyncResponse(images, prompt, imageModel)
 | 
			
		||||
        }
 | 
			
		||||
  } catch (e) {
 | 
			
		||||
        chatResponse.updateFromError(e)
 | 
			
		||||
        throw e
 | 
			
		||||
  }
 | 
			
		||||
  return chatResponse
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
</script>
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,60 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
    import { apiKeyStorage } from '../../Storage.svelte'
 | 
			
		||||
    import { get } from 'svelte/store'
 | 
			
		||||
    import type { ModelDetail } from '../../Types.svelte'
 | 
			
		||||
    import { getApiBase, getEndpointModels } from '../../ApiUtil.svelte'
 | 
			
		||||
 | 
			
		||||
type ResponseModels = {
 | 
			
		||||
  object?: string;
 | 
			
		||||
  data: {
 | 
			
		||||
    id: string;
 | 
			
		||||
  }[];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
let availableModels: Record<string, boolean> | undefined
 | 
			
		||||
 | 
			
		||||
let _resetSupportedModelsTimer
 | 
			
		||||
 | 
			
		||||
export const set = (opt: Record<string, any>) => {
 | 
			
		||||
  availableModels = undefined
 | 
			
		||||
  apiKeyStorage.set(opt.apiKey || '')
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const getSupportedModels = async (): Promise<Record<string, boolean>> => {
 | 
			
		||||
  if (availableModels) return availableModels
 | 
			
		||||
  const openAiKey = get(apiKeyStorage)
 | 
			
		||||
  if (!openAiKey) return {}
 | 
			
		||||
  try {
 | 
			
		||||
        const result = (await (
 | 
			
		||||
          await fetch(getApiBase() + getEndpointModels(), {
 | 
			
		||||
            method: 'GET',
 | 
			
		||||
            headers: {
 | 
			
		||||
              Authorization: `Bearer ${openAiKey}`,
 | 
			
		||||
              'Content-Type': 'application/json'
 | 
			
		||||
            }
 | 
			
		||||
          })
 | 
			
		||||
        ).json()) as ResponseModels
 | 
			
		||||
        availableModels = result.data.reduce((a, v) => {
 | 
			
		||||
          a[v.id] = v
 | 
			
		||||
          return a
 | 
			
		||||
        }, {})
 | 
			
		||||
        return availableModels
 | 
			
		||||
  } catch (e) {
 | 
			
		||||
        availableModels = {}
 | 
			
		||||
        clearTimeout(_resetSupportedModelsTimer)
 | 
			
		||||
        _resetSupportedModelsTimer = setTimeout(() => { availableModels = undefined }, 1000)
 | 
			
		||||
        return availableModels
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const checkModel = async (modelDetail: ModelDetail) => {
 | 
			
		||||
  const supportedModels = await getSupportedModels()
 | 
			
		||||
  if (modelDetail.type === 'chat') {
 | 
			
		||||
        modelDetail.enabled = !!supportedModels[modelDetail.modelQuery || '']
 | 
			
		||||
  } else {
 | 
			
		||||
        // image request.  If we have any models, allow image endpoint
 | 
			
		||||
        modelDetail.enabled = !!Object.keys(supportedModels).length
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
</script>
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,72 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
    import { getPetalsBase, getPetalsWebsocket } from '../../ApiUtil.svelte'
 | 
			
		||||
    import { globalStorage } from '../../Storage.svelte'
 | 
			
		||||
    import type { ModelDetail } from '../../Types.svelte'
 | 
			
		||||
    import { chatRequest } from './request.svelte'
 | 
			
		||||
    import { checkModel } from './util.svelte'
 | 
			
		||||
    import llamaTokenizer from 'llama-tokenizer-js'
 | 
			
		||||
    import { get } from 'svelte/store'
 | 
			
		||||
 | 
			
		||||
const hideSettings = {
 | 
			
		||||
  stream: true,
 | 
			
		||||
  n: true,
 | 
			
		||||
  presence_penalty: true,
 | 
			
		||||
  frequency_penalty: true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const chatModelBase = {
 | 
			
		||||
  type: 'chat',
 | 
			
		||||
  help: 'Below are the settings that can be changed for the API calls. See <a target="_blank" href="https://platform.openai.com/docs/api-reference/chat/create">this overview</a> to start, though not all settings translate to Petals.',
 | 
			
		||||
  check: checkModel,
 | 
			
		||||
  start: '<s>',
 | 
			
		||||
  stop: ['###', '</s>'],
 | 
			
		||||
  deliminator: '\n###\n\n',
 | 
			
		||||
  userStart: 'User:\n',
 | 
			
		||||
  userEnd: '',
 | 
			
		||||
  assistantStart: '[[CHARACTER_NAME]]:\n',
 | 
			
		||||
  assistantEnd: '',
 | 
			
		||||
  leadPrompt: '[[CHARACTER_NAME]]:\n',
 | 
			
		||||
  systemEnd: '',
 | 
			
		||||
  prompt: 0.000000, // $0.000 per 1000 tokens prompt
 | 
			
		||||
  completion: 0.000000, // $0.000 per 1000 tokens completion
 | 
			
		||||
  max: 4096, // 4k max token buffer
 | 
			
		||||
  request: chatRequest,
 | 
			
		||||
  getEndpoint: (model) => get(globalStorage).pedalsEndpoint || (getPetalsBase() + getPetalsWebsocket()),
 | 
			
		||||
  getTokens: (value) => llamaTokenizer.encode(value),
 | 
			
		||||
  hideSetting: (chatId, setting) => !!hideSettings[setting.key]
 | 
			
		||||
} as ModelDetail
 | 
			
		||||
 | 
			
		||||
export const chatModels : Record<string, ModelDetail> = {
 | 
			
		||||
      // 'enoch/llama-65b-hf': {
 | 
			
		||||
      //   ...chatModelBase,
 | 
			
		||||
      //   label: 'Petals - Llama-65b'
 | 
			
		||||
      // },
 | 
			
		||||
      'timdettmers/guanaco-65b': {
 | 
			
		||||
        ...chatModelBase,
 | 
			
		||||
        label: 'Petals - Guanaco-65b',
 | 
			
		||||
        max: 2048
 | 
			
		||||
      },
 | 
			
		||||
      'meta-llama/Llama-2-70b-hf': {
 | 
			
		||||
        ...chatModelBase,
 | 
			
		||||
        label: 'Petals - Llama-2-70b'
 | 
			
		||||
      },
 | 
			
		||||
      'meta-llama/Llama-2-70b-chat-hf': {
 | 
			
		||||
        ...chatModelBase,
 | 
			
		||||
        label: 'Petals - Llama-2-70b-chat',
 | 
			
		||||
        start: '<s>',
 | 
			
		||||
        stop: ['</s>', '[INST]', '[/INST]', '<<SYS>>', '<</SYS>>'],
 | 
			
		||||
        deliminator: ' </s><s>',
 | 
			
		||||
        userStart: '[INST][[SYSTEM_PROMPT]]',
 | 
			
		||||
        userEnd: ' [/INST]',
 | 
			
		||||
        assistantStart: '[[SYSTEM_PROMPT]][[USER_PROMPT]]',
 | 
			
		||||
        systemStart: '<<SYS>>\n',
 | 
			
		||||
        systemEnd: '\n<</SYS>>\n\n'
 | 
			
		||||
      },
 | 
			
		||||
      'stabilityai/StableBeluga2': {
 | 
			
		||||
        ...chatModelBase,
 | 
			
		||||
        label: 'Petals - StableBeluga-2',
 | 
			
		||||
        max: 2048
 | 
			
		||||
      }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
</script>
 | 
			
		||||
| 
						 | 
				
			
			@ -1,22 +1,23 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
    import ChatCompletionResponse from './ChatCompletionResponse.svelte'
 | 
			
		||||
    import ChatRequest from './ChatRequest.svelte'
 | 
			
		||||
    import { getDeliminator, getEndpoint, getLeadPrompt, getModelDetail, getRoleEnd, getRoleTag, getStartSequence, getStopSequence } from './Models.svelte'
 | 
			
		||||
    import type { ChatCompletionOpts, Message, Request } from './Types.svelte'
 | 
			
		||||
    import { getModelMaxTokens } from './Stats.svelte'
 | 
			
		||||
    import { updateMessages } from './Storage.svelte'
 | 
			
		||||
    import { ChatCompletionResponse } from '../../ChatCompletionResponse.svelte'
 | 
			
		||||
    import { ChatRequest } from '../../ChatRequest.svelte'
 | 
			
		||||
    import { getDeliminator, getEndpoint, getLeadPrompt, getModelDetail, getRoleEnd, getRoleTag, getStartSequence, getStopSequence } from '../../Models.svelte'
 | 
			
		||||
    import type { ChatCompletionOpts, Message, Request } from '../../Types.svelte'
 | 
			
		||||
    import { getModelMaxTokens } from '../../Stats.svelte'
 | 
			
		||||
    import { updateMessages } from '../../Storage.svelte'
 | 
			
		||||
 | 
			
		||||
export const runPetalsCompletionRequest = async (
 | 
			
		||||
export const chatRequest = async (
 | 
			
		||||
  request: Request,
 | 
			
		||||
  chatRequest: ChatRequest,
 | 
			
		||||
  chatResponse: ChatCompletionResponse,
 | 
			
		||||
  signal: AbortSignal,
 | 
			
		||||
  opts: ChatCompletionOpts) => {
 | 
			
		||||
  opts: ChatCompletionOpts): Promise<ChatCompletionResponse> => {
 | 
			
		||||
      // Petals
 | 
			
		||||
      const chat = chatRequest.getChat()
 | 
			
		||||
      const chatSettings = chat.settings
 | 
			
		||||
      const model = chatRequest.getModel()
 | 
			
		||||
      const modelDetail = getModelDetail(model)
 | 
			
		||||
      const ws = new WebSocket(getEndpoint(model))
 | 
			
		||||
      const signal = chatRequest.controller.signal
 | 
			
		||||
      const abortListener = (e:Event) => {
 | 
			
		||||
        chatRequest.updating = false
 | 
			
		||||
        chatRequest.updatingMessage = ''
 | 
			
		||||
| 
						 | 
				
			
			@ -25,23 +26,16 @@ export const runPetalsCompletionRequest = async (
 | 
			
		|||
        ws.close()
 | 
			
		||||
      }
 | 
			
		||||
      signal.addEventListener('abort', abortListener)
 | 
			
		||||
      const stopSequences = (modelDetail.stop || ['###', '</s>']).slice()
 | 
			
		||||
      const stopSequence = getStopSequence(chat)
 | 
			
		||||
      let stopSequences = [...new Set(getStopSequence(chat).split(',').filter(s => s.trim()).concat((modelDetail.stop || ['###', '</s>']).slice()))]
 | 
			
		||||
      const stopSequence = '</s>'
 | 
			
		||||
      stopSequences.push(stopSequence)
 | 
			
		||||
      const deliminator = getDeliminator(chat)
 | 
			
		||||
      if (deliminator) stopSequences.unshift(deliminator)
 | 
			
		||||
      let stopSequenceC = stopSequence
 | 
			
		||||
      if (stopSequence !== '###') {
 | 
			
		||||
        stopSequences.push(stopSequence)
 | 
			
		||||
        stopSequenceC = '</s>'
 | 
			
		||||
      }
 | 
			
		||||
      const haveSeq = {}
 | 
			
		||||
      const stopSequencesC = stopSequences.filter((ss) => {
 | 
			
		||||
        const have = haveSeq[ss]
 | 
			
		||||
        haveSeq[ss] = true
 | 
			
		||||
        return !have && ss !== '###' && ss !== stopSequenceC
 | 
			
		||||
      })
 | 
			
		||||
      const leadPromptSequence = getLeadPrompt(chat)
 | 
			
		||||
      if (deliminator) stopSequences.unshift(deliminator.trim())
 | 
			
		||||
      stopSequences = stopSequences.sort((a, b) => b.length - a.length)
 | 
			
		||||
      const stopSequencesC = stopSequences.filter(s => s !== stopSequence)
 | 
			
		||||
      const maxTokens = getModelMaxTokens(model)
 | 
			
		||||
      let maxLen = Math.min(opts.maxTokens || chatRequest.chat.max_tokens || maxTokens, maxTokens)
 | 
			
		||||
      let maxLen = Math.min(opts.maxTokens || chatSettings.max_tokens || maxTokens, maxTokens)
 | 
			
		||||
      const promptTokenCount = chatResponse.getPromptTokenCount()
 | 
			
		||||
      if (promptTokenCount > maxLen) {
 | 
			
		||||
        maxLen = Math.min(maxLen + promptTokenCount, maxTokens)
 | 
			
		||||
| 
						 | 
				
			
			@ -135,15 +129,16 @@ export const runPetalsCompletionRequest = async (
 | 
			
		|||
            }
 | 
			
		||||
            return a
 | 
			
		||||
          }, [] as Message[])
 | 
			
		||||
          const leadPrompt = ((inputArray[inputArray.length - 1] || {}) as Message).role !== 'assistant' ? getLeadPrompt(chat) : ''
 | 
			
		||||
          const leadPrompt = (leadPromptSequence && ((inputArray[inputArray.length - 1] || {}) as Message).role !== 'assistant') ? deliminator + leadPromptSequence : ''
 | 
			
		||||
          const petalsRequest = {
 | 
			
		||||
            type: 'generate',
 | 
			
		||||
            inputs: getStartSequence(chat) + inputArray.map(m => m.content).join(deliminator) + leadPrompt,
 | 
			
		||||
            max_new_tokens: 1, // wait for up to 1 tokens before displaying
 | 
			
		||||
            stop_sequence: stopSequenceC,
 | 
			
		||||
            stop_sequence: stopSequence,
 | 
			
		||||
            do_sample: 1, // enable top p and the like
 | 
			
		||||
            temperature,
 | 
			
		||||
            top_p: topP
 | 
			
		||||
            // repitition_penalty: chatSettings.repititionPenalty
 | 
			
		||||
          } as any
 | 
			
		||||
          if (stopSequencesC.length) petalsRequest.extra_stop_sequences = stopSequencesC
 | 
			
		||||
          ws.send(JSON.stringify(petalsRequest))
 | 
			
		||||
| 
						 | 
				
			
			@ -170,7 +165,7 @@ export const runPetalsCompletionRequest = async (
 | 
			
		|||
                  }]
 | 
			
		||||
                } as any
 | 
			
		||||
            )
 | 
			
		||||
            if (chat.settings.aggressiveStop && !response.stop) {
 | 
			
		||||
            if (chatSettings.aggressiveStop && !response.stop) {
 | 
			
		||||
              // check if we should've stopped
 | 
			
		||||
              const message = chatResponse.getMessages()[0]
 | 
			
		||||
              const pad = 10 // look back 10 characters + stop sequence
 | 
			
		||||
| 
						 | 
				
			
			@ -202,5 +197,6 @@ export const runPetalsCompletionRequest = async (
 | 
			
		|||
          throw err
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
      return chatResponse
 | 
			
		||||
}
 | 
			
		||||
</script>
 | 
			
		||||
| 
						 | 
				
			
			@ -0,0 +1,16 @@
 | 
			
		|||
<script context="module" lang="ts">
 | 
			
		||||
    import { globalStorage } from '../../Storage.svelte'
 | 
			
		||||
    import { get } from 'svelte/store'
 | 
			
		||||
    import type { ModelDetail } from '../../Types.svelte'
 | 
			
		||||
 | 
			
		||||
export const set = (opt: Record<string, any>) => {
 | 
			
		||||
  //
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const checkModel = async (modelDetail: ModelDetail) => {
 | 
			
		||||
  if (modelDetail.type === 'chat') {
 | 
			
		||||
        modelDetail.enabled = get(globalStorage).enablePetals
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
</script>
 | 
			
		||||
		Loading…
	
		Reference in New Issue