From e3006742cc83a4bcb990f8446356e0df136e4b45 Mon Sep 17 00:00:00 2001 From: extremeheat Date: Sun, 18 Feb 2024 20:50:19 -0500 Subject: [PATCH] Add HTML viz for side-by-side comparing LLM output --- .gitignore | 4 +- README.md | 1 + src/index.d.ts | 14 +++++-- src/index.js | 3 +- src/tools.js | 111 +++++++++++++++++++++++++++++++++++++++++++++++++ test/viz.js | 10 +++++ 6 files changed, 138 insertions(+), 5 deletions(-) create mode 100644 src/tools.js create mode 100644 test/viz.js diff --git a/.gitignore b/.gitignore index 07353d7..039847f 100644 --- a/.gitignore +++ b/.gitignore @@ -130,4 +130,6 @@ dist .pnp.* playground package-lock.json -*.key \ No newline at end of file +*.key + +test/*.html \ No newline at end of file diff --git a/README.md b/README.md index 6cc4675..23aeb2d 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ const { ChatSession, CompletionService } = require('langxlang') ```js const service = new CompletionService({ openai: [key], gemini: [key] }) const response = await service.requestCompletion('gpt-3.5-turbo-16k', /* empty system prompt */, 'Tell me about yourself') +console.log(response.text) ``` #### Chatting with a model diff --git a/src/index.d.ts b/src/index.d.ts index a19e37b..b15a96f 100644 --- a/src/index.d.ts +++ b/src/index.d.ts @@ -1,4 +1,5 @@ declare module 'langxlang' { + type Model = 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo' | 'gpt-4' | 'gpt-4-turbo-preview' | 'gemini-1.0-pro' class CompletionService { // Creates an instance of completion service. // Note: as an alternative to explicitly passing the API keys in the constructor you can: @@ -10,14 +11,21 @@ declare module 'langxlang' { cachePath: string // Request a non-streaming completion from the model. - requestCompletion(model: string, systemPrompt: string, userPrompt: string): Promise + requestCompletion(model: Model, systemPrompt: string, userPrompt: string): Promise<{ text: string }> } class ChatSession { // ChatSession is for back and forth conversation between a user an an LLM. - constructor(completionService: CompletionService, model: string, systemPrompt: string) + constructor(completionService: CompletionService, model: Model, systemPrompt: string) // Send a message to the LLM and receive a response as return value. The chunkCallback // can be defined to listen to bits of the message stream as it's being written by the LLM. - sendMessage(message: string, chunkCallback: ({ content: string }) => void): Promise + sendMessage(message: Model, chunkCallback: ({ content: string }) => void): Promise } + + interface Tools { + // Generate HTML that shows side-by-side outputs for the system/user prompt across different models. + makeVizForPrompt(systemPrompt: string, userPrompt: string, models: Model[]): Promise + } + + const tools: Tools } \ No newline at end of file diff --git a/src/index.js b/src/index.js index b691f2b..30b9c60 100644 --- a/src/index.js +++ b/src/index.js @@ -10,5 +10,6 @@ module.exports = { ChatSession, openai, palm2, - gemini + gemini, + tools: require('./tools') } diff --git a/src/tools.js b/src/tools.js new file mode 100644 index 0000000..81e9be5 --- /dev/null +++ b/src/tools.js @@ -0,0 +1,111 @@ +const { CompletionService } = require('./CompletionService') + +function makeVizHtml (data) { + return ` + + + + + + LLM Output Viz + + + + +
+
+

System Prompt

+
SYS PROMPT
+
+
+

User Prompt

+
USR PROMPT
+
+ ${ + data.models.map(([modelName, modelId]) => + `

${modelName}

MODEL OUTPUT
`).join('\n') + } +
+ + + + + ` +} + +async function makeVizForPrompt (system, user, models) { + const service = new CompletionService() + const data = { models: [], outputs: {} } + for (const model of models) { + const { text } = await service.requestCompletion(model, system, user) + switch (model) { + case 'gpt-3.5-turbo-16k': + data.models.push(['GPT-3.5 Turbo 16k', '3516turbo']) + data.outputs['3516turbo'] = text + break + case 'gpt-3.5-turbo': + data.models.push(['GPT-3.5 Turbo', '35turbo']) + data.outputs['35turbo'] = text + break + case 'gpt-4': + data.models.push(['GPT-4', 'gpt4']) + data.outputs.gpt4 = text + break + case 'gpt-4-turbo-preview': + data.models.push(['GPT-4 Turbo Preview', 'gpt4turbo']) + data.outputs.gpt4turbo = text + break + case 'gemini-1.0-pro': + data.models.push(['Gemini 1.0 Pro', 'gemini']) + data.outputs.gemini = text + break + default: + data.models.push([model, model]) + data.outputs[model] = text + } + } + data.system = system + data.user = user + return makeVizHtml(data) +} + +module.exports = { makeVizForPrompt } diff --git a/test/viz.js b/test/viz.js new file mode 100644 index 0000000..23bb006 --- /dev/null +++ b/test/viz.js @@ -0,0 +1,10 @@ +const { tools } = require('langxlang') +const fs = require('fs') +const path = require('path') + +async function main () { + // const all = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo-preview', 'gemini-1.0-pro'] + const viz = await tools.makeVizForPrompt('', 'Why is the sky blue?', ['gpt-3.5-turbo']) + fs.writeFileSync(path.join(__dirname, 'viz.html'), viz) +} +main()