Skip to content

Commit

Permalink
removed langchain
Browse files Browse the repository at this point in the history
Took 1 hour 25 minutes
  • Loading branch information
mikepsinn committed Nov 30, 2024
1 parent 3a1f1fa commit 5be748b
Show file tree
Hide file tree
Showing 44 changed files with 1,014 additions and 3,783 deletions.
5 changes: 0 additions & 5 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,6 @@ OLLAMA_MODEL="llama3"

REDIS_URL="redis://localhost:6379"

# Get key from https://smith.langchain.com if you want to trace
LANGCHAIN_TRACING_V2=true
LANGCHAIN_API_KEY=<your-api-key>
LANGCHAIN_CALLBACKS_BACKGROUND=true

# Get key from https://www.perplexity.ai/settings/api
PERPLEXITY_API_KEY="KEY_HERE"
# Get key from https://dashboard.exa.ai/api-keys
Expand Down
4 changes: 0 additions & 4 deletions app/docs/[org]/[repo]/lib/cache.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,5 @@
import { getRedisClient } from "@/lib/utils/redis"





const CACHE_TTL = 5 * 60 // 5 minutes in seconds

export async function getCachedData<T>(
Expand Down
223 changes: 83 additions & 140 deletions lib/chain.ts
Original file line number Diff line number Diff line change
@@ -1,174 +1,117 @@
import { Document } from "@langchain/core/documents"
import { BaseLanguageModel } from "@langchain/core/language_models/base"
import { BaseChatModel } from "@langchain/core/language_models/chat_models"
import { AIMessage, BaseMessage, HumanMessage } from "@langchain/core/messages"
import { StringOutputParser } from "@langchain/core/output_parsers"
import {
ChatPromptTemplate,
MessagesPlaceholder,
PromptTemplate,
} from "@langchain/core/prompts"
import {
Runnable,
RunnableBranch,
RunnableLambda,
RunnableMap,
RunnableSequence,
} from "@langchain/core/runnables"
// Local type definitions
type Message = {
content: string;
role: 'human' | 'ai' | 'system';
}

type RetrievalChainInput = {
chat_history: string
question: string
chat_history: Message[];
question: string;
}

type Document = {
pageContent: string;
metadata?: Record<string, any>;
}

interface LLMInterface {
call: (messages: Message[]) => Promise<string>;
}

interface RetrieverInterface {
getRelevantDocs: (query: string) => Promise<Document[]>;
}

export function groupMessagesByConversation(messages: any[]) {
// check if messages are in even numbers if not remove the last message
export function groupMessagesByConversation(messages: Message[]) {
if (messages.length % 2 !== 0) {
messages.pop()
messages.pop();
}

const groupedMessages = []
// [ { human: "", ai: "" } ]
const groupedMessages = [];
for (let i = 0; i < messages.length; i += 2) {
groupedMessages.push({
human: messages[i].content,
ai: messages[i + 1].content,
})
});
}

return groupedMessages
return groupedMessages;
}

const formatChatHistoryAsString = (history: BaseMessage[]) => {
const formatChatHistory = (history: Message[]): string => {
return history
.map((message) => `${message._getType()}: ${message.content}`)
.join("\n")
.map((message) => `${message.role}: ${message.content}`)
.join('\n');
}

const formatDocs = (docs: Document[]) => {
const formatDocs = (docs: Document[]): string => {
return docs
.map((doc, i) => `<doc id='${i}'>${doc.pageContent}</doc>`)
.join("\n")
.join('\n');
}

const serializeHistory = (input: any) => {
const chatHistory = input.chat_history || []
const convertedChatHistory = []
for (const message of chatHistory) {
if (message.human !== undefined) {
convertedChatHistory.push(new HumanMessage({ content: message.human }))
}
if (message["ai"] !== undefined) {
convertedChatHistory.push(new AIMessage({ content: message.ai }))
}
}
return convertedChatHistory
const serializeHistory = (input: any): Message[] => {
const chatHistory = input.chat_history || [];
return chatHistory.map((msg: any) => ({
content: msg.human ? msg.human : msg.ai,
role: msg.human ? 'human' : 'ai'
}));
}

const createRetrieverChain = (
llm: BaseLanguageModel,
retriever: Runnable,
question_template: string
) => {
const CONDENSE_QUESTION_PROMPT =
PromptTemplate.fromTemplate(question_template)
const condenseQuestionChain = RunnableSequence.from([
CONDENSE_QUESTION_PROMPT,
llm,
new StringOutputParser(),
]).withConfig({
runName: "CondenseQuestion",
})
const hasHistoryCheckFn = RunnableLambda.from(
(input: RetrievalChainInput) => input.chat_history.length > 0
).withConfig({ runName: "HasChatHistoryCheck" })
const conversationChain = condenseQuestionChain.pipe(retriever).withConfig({
runName: "RetrievalChainWithHistory",
})
const basicRetrievalChain = RunnableLambda.from(
(input: RetrievalChainInput) => input.question
)
.withConfig({
runName: "Itemgetter:question",
})
.pipe(retriever)
.withConfig({ runName: "RetrievalChainWithNoHistory" })

return RunnableBranch.from([
[hasHistoryCheckFn, conversationChain],
basicRetrievalChain,
]).withConfig({
runName: "FindDocs",
})
async function createCondensedQuestion(
llm: LLMInterface,
question: string,
chatHistory: string,
template: string
): Promise<string> {
const prompt = template
.replace('{chat_history}', chatHistory)
.replace('{question}', question);

return await llm.call([{ role: 'human', content: prompt }]);
}

export const createChain = ({
export async function createChain({
llm,
question_template,
question_llm,
retriever,
response_template,
}: {
llm: BaseLanguageModel<any> | BaseChatModel<any>
question_llm: BaseLanguageModel<any> | BaseChatModel<any>
retriever: Runnable
question_template: string
response_template: string
}) => {
const retrieverChain = createRetrieverChain(
question_llm,
retriever,
question_template
)
const context = RunnableMap.from({
context: RunnableSequence.from([
({ question, chat_history }) => {
return {
question: question,
chat_history: formatChatHistoryAsString(chat_history),
}
},
retrieverChain,
RunnableLambda.from(formatDocs).withConfig({
runName: "FormatDocumentChunks",
}),
]),
question: RunnableLambda.from(
(input: RetrievalChainInput) => input.question
).withConfig({
runName: "Itemgetter:question",
}),
chat_history: RunnableLambda.from(
(input: RetrievalChainInput) => input.chat_history
).withConfig({
runName: "Itemgetter:chat_history",
}),
}).withConfig({ tags: ["RetrieveDocs"] })
const prompt = ChatPromptTemplate.fromMessages([
["system", response_template],
new MessagesPlaceholder("chat_history"),
["human", "{question}"],
])
llm: LLMInterface;
question_llm: LLMInterface;
retriever: RetrieverInterface;
question_template: string;
response_template: string;
}) {
return async function(input: RetrievalChainInput): Promise<string> {
// Convert chat history to proper format
const formattedHistory = formatChatHistory(input.chat_history);

// Get condensed question if there's chat history
let searchQuery = input.question;
if (input.chat_history.length > 0) {
searchQuery = await createCondensedQuestion(
question_llm,
input.question,
formattedHistory,
question_template
);
}

// Retrieve relevant documents
const docs = await retriever.getRelevantDocs(searchQuery);
const formattedDocs = formatDocs(docs);

// Prepare final prompt
const messages: Message[] = [
{ role: 'system', content: response_template },
...input.chat_history,
{ role: 'human', content: input.question }
];

const responseSynthesizerChain = RunnableSequence.from([
prompt,
llm,
new StringOutputParser(),
]).withConfig({
tags: ["GenerateResponse"],
})
return RunnableSequence.from([
{
question: RunnableLambda.from(
(input: RetrievalChainInput) => input.question
).withConfig({
runName: "Itemgetter:question",
}),
chat_history: RunnableLambda.from(serializeHistory).withConfig({
runName: "SerializeHistory",
}),
},
context,
responseSynthesizerChain,
])
// Get final response
const response = await llm.call(messages);
return response;
};
}
Loading

0 comments on commit 5be748b

Please sign in to comment.