diff --git a/.changeset/pink-days-appear.md b/.changeset/pink-days-appear.md new file mode 100644 index 0000000000..95c4e7e3d2 --- /dev/null +++ b/.changeset/pink-days-appear.md @@ -0,0 +1,7 @@ +--- +"@llamaindex/anthropic": patch +"llamaindex": patch +"@llamaindex/core": patch +--- + +added support for tool calls with results in message history for athropic agent diff --git a/packages/llamaindex/tests/llm/index.test.ts b/packages/llamaindex/tests/llm/index.test.ts index 4a88cf9c5e..1509df3a19 100644 --- a/packages/llamaindex/tests/llm/index.test.ts +++ b/packages/llamaindex/tests/llm/index.test.ts @@ -1,5 +1,6 @@ +import type { MessageParam } from "@anthropic-ai/sdk/resources/messages"; import { setEnvs } from "@llamaindex/env"; -import { Anthropic } from "llamaindex"; +import { Anthropic, OpenAI, type ChatMessage } from "llamaindex"; import { beforeAll, describe, expect, test } from "vitest"; beforeAll(() => { @@ -8,11 +9,25 @@ beforeAll(() => { }); }); -describe("Anthropic llm", () => { - test("format messages", () => { - const anthropic = new Anthropic(); - expect( - anthropic.formatMessages([ +describe("Message Formatting", () => { + describe("Basic Message Formatting", () => { + test("OpenAI formats basic user and assistant messages correctly", () => { + const inputMessages: ChatMessage[] = [ + { content: "Hello", role: "user" }, + { content: "Hi there!", role: "assistant" }, + { content: "Be helpful", role: "system" }, + ]; + const expectedOutput = [ + { role: "user", content: "Hello" }, + { role: "assistant", content: "Hi there!" }, + { role: "system", content: "Be helpful" }, + ]; + expect(OpenAI.toOpenAIMessage(inputMessages)).toEqual(expectedOutput); + }); + + test("Anthropic formats basic messages correctly", () => { + const anthropic = new Anthropic(); + const inputMessages: ChatMessage[] = [ { content: "You are a helpful assistant.", role: "assistant", @@ -21,20 +36,53 @@ describe("Anthropic llm", () => { content: "Hello?", role: "user", }, - ]), - ).toEqual([ - { - content: "You are a helpful assistant.", - role: "assistant", - }, - { - content: "Hello?", - role: "user", - }, - ]); + ]; + const expectedOutput: MessageParam[] = [ + { + content: "You are a helpful assistant.", + role: "assistant", + }, + { + content: "Hello?", + role: "user", + }, + ]; + + expect(anthropic.formatMessages(inputMessages)).toEqual(expectedOutput); + }); + + test("OpenAI handles system messages correctly", () => { + const inputMessages: ChatMessage[] = [ + { content: "You are a coding assistant", role: "system" }, + { content: "Hello", role: "user" }, + ]; + const expectedOutput = [ + { role: "system", content: "You are a coding assistant" }, + { role: "user", content: "Hello" }, + ]; + expect(OpenAI.toOpenAIMessage(inputMessages)).toEqual(expectedOutput); + }); - expect( - anthropic.formatMessages([ + test("Anthropic handles multi-turn conversation correctly", () => { + const anthropic = new Anthropic(); + const inputMessages: ChatMessage[] = [ + { content: "Hi", role: "user" }, + { content: "Hello! How can I help?", role: "assistant" }, + { content: "What's the weather?", role: "user" }, + ]; + const expectedOutput: MessageParam[] = [ + { content: "Hi", role: "user" }, + { content: "Hello! How can I help?", role: "assistant" }, + { content: "What's the weather?", role: "user" }, + ]; + expect(anthropic.formatMessages(inputMessages)).toEqual(expectedOutput); + }); + }); + + describe("Advanced Message Formatting", () => { + test("Anthropic filters out system messages", () => { + const anthropic = new Anthropic(); + const inputMessages: ChatMessage[] = [ { content: "You are a helpful assistant.", role: "assistant", @@ -51,24 +99,58 @@ describe("Anthropic llm", () => { content: "What is your name?", role: "user", }, - ]), - ).toEqual([ - { - content: "You are a helpful assistant.", - role: "assistant", - }, - { - content: "Hello?\nWhat is your name?", - role: "user", - }, - ]); - - expect( - anthropic.formatMessages([ + ]; + const expectedOutput: MessageParam[] = [ { content: "You are a helpful assistant.", role: "assistant", }, + { + content: "Hello?\nWhat is your name?", + role: "user", + }, + ]; + + expect(anthropic.formatMessages(inputMessages)).toEqual(expectedOutput); + }); + + test("Anthropic merges consecutive messages from the same role", () => { + const anthropic = new Anthropic(); + const inputMessages: ChatMessage[] = [ + { + content: "Hello?", + role: "user", + }, + { + content: "How are you?", + role: "user", + }, + { + content: "I am fine, thank you!", + role: "assistant", + }, + { + content: "And you?", + role: "assistant", + }, + ]; + const expectedOutput: MessageParam[] = [ + { + content: "Hello?\nHow are you?", + role: "user", + }, + { + content: "I am fine, thank you!\nAnd you?", + role: "assistant", + }, + ]; + + expect(anthropic.formatMessages(inputMessages)).toEqual(expectedOutput); + }); + + test("Anthropic handles image content", () => { + const anthropic = new Anthropic(); + const inputMessages: ChatMessage[] = [ { content: [ { @@ -84,29 +166,180 @@ describe("Anthropic llm", () => { ], role: "user", }, - ]), - ).toEqual([ + ]; + const expectedOutput: MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text", + text: "What do you see in the image?", + }, + { + type: "image", + source: { + type: "base64", + media_type: "image/jpeg", + data: "/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAQDAwQDAwQEAwQFBAQFBgoHBgYGBg0JCggKDw0QEA8NDw4RExgUERIXEg4PFRwVFxkZGxsbEBQdHx0aHxgaGxr/2wBDAQQFBQYFBgwHBwwaEQ8RGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhr/wAARCAAgACADASIAAhEBAxEB/8QAGAABAAMBAAAAAAAAAAAAAAAACAQHCQb/xAAvEAABAgUCBAUDBQEAAAAAAAACAQMEBQYHERIhAAgTYSIxMkJxI2KCFBVBUVKh/8QAGAEAAwEBAAAAAAAAAAAAAAAAAwQFAQL/xAAnEQABBAECAwkAAAAAAAAAAAACAQMEEQAFMiExYRITFCJBcXKBof/aAAwDAQACEQMRAD8Aufmb5mnbWREFRdvIMZ3cWcaBh2NHUGEFwtIKQp63CX0h+S7YQgRzGSq6kgqGAS8NQRc6fmkIMWwSxJEyP+m0bwggQr5iIom6KnnxXty61jK+uJUVUxzxm/M5g5EASr6G9WGwTsIIIp2FOHJfi0kyvzS9Cv0zGwEF+2whOAUY4a6mnm2lREURLPoTggNG5tS6xpmOT4GQptwNUZc6sbexzcZRVSTKTOgudMPEL0j7E2uQNOxIqcaYcqXNaxe2HKnauBiAraDZ6n0k0tTBpPNwE9pptqDP3DtlBC1Q8qNw5K4AwLEunYkWMwcYg6fnqoH/ADPHA2/qeZWquhJJ3pODmEhmg/qGl2XAloebL5HWK/K8dOMOM7xVPfJrMhmQiq0SFXOlyPc+jIq3lwakpeYNq27K491kfvbzls07ECiSdlThhWKvj1LLx0VVLWGqSBuFJ1jc3WBEUb8K4TUieHz3xni7ea3lSZvZDhUVImxAVtBso39VdLUe0nk2a+0030n+K7YUc95/J66tRIp3SVXUpGyUI7wvPxDBoJ/UaLIuIqtuInRwiiqp4z3XbBYr3cGp9P30zJXiSjk1HLsqdIvxvzV1q8ZtB3ppa5bkwZkDz7LsF09Qxgi0Roa6UUU1LnxYH5JP74D1LUjNrkXigabc6kZM5vPFZi3NPi3dVXnFT+EQUM17IvEi1tL1xUkcEHb+lo6duvRUO644wwSDpaPWgG7sAApIKqqqm4jvxo1yvcrjdoTiqtrQ2I+u5nr19ItbUA2a5IAX3GvuP8U2ypMS5pSwFC5peTtM0lnSkMWVVUJb48a+8//Z", + }, + }, + ], + }, + ]; + + expect(anthropic.formatMessages(inputMessages)).toEqual(expectedOutput); + }); + }); + + describe("Tool Message Formatting", () => { + const toolCallMessages: ChatMessage[] = [ + { + role: "user", + content: "What's the weather in London?", + }, { - content: "You are a helpful assistant.", role: "assistant", + content: "Let me check the weather.", + options: { + toolCall: [ + { + id: "call_123", + name: "weather", + input: JSON.stringify({ location: "London" }), + }, + ], + }, }, { - content: [ - { - text: "What do you see in the image?", - type: "text", + role: "assistant", + content: "The weather in London is sunny, +20°C", + options: { + toolResult: { + id: "call_123", }, - { - source: { - data: "/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAQDAwQDAwQEAwQFBAQFBgoHBgYGBg0JCggKDw0QEA8NDw4RExgUERIXEg4PFRwVFxkZGxsbEBQdHx0aHxgaGxr/2wBDAQQFBQYFBgwHBwwaEQ8RGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhr/wAARCAAgACADASIAAhEBAxEB/8QAGAABAAMBAAAAAAAAAAAAAAAACAQHCQb/xAAvEAABAgUCBAUDBQEAAAAAAAACAQMEBQYHERIhAAgTYSIxMkJxI2KCFBVBUVKh/8QAGAEAAwEBAAAAAAAAAAAAAAAAAwQFAQL/xAAnEQABBAECAwkAAAAAAAAAAAACAQMEEQAFMiExYRITFCJBcXKBof/aAAwDAQACEQMRAD8Aufmb5mnbWREFRdvIMZ3cWcaBh2NHUGEFwtIKQp63CX0h+S7YQgRzGSq6kgqGAS8NQRc6fmkIMWwSxJEyP+m0bwggQr5iIom6KnnxXty61jK+uJUVUxzxm/M5g5EASr6G9WGwTsIIIp2FOHJfi0kyvzS9Cv0zGwEF+2whOAUY4a6mnm2lREURLPoTggNG5tS6xpmOT4GQptwNUZc6sbexzcZRVSTKTOgudMPEL0j7E2uQNOxIqcaYcqXNaxe2HKnauBiAraDZ6n0k0tTBpPNwE9pptqDP3DtlBC1Q8qNw5K4AwLEunYkWMwcYg6fnqoH/ADPHA2/qeZWquhJJ3pODmEhmg/qGl2XAloebL5HWK/K8dOMOM7xVPfJrMhmQiq0SFXOlyPc+jIq3lwakpeYNq27K491kfvbzls07ECiSdlThhWKvj1LLx0VVLWGqSBuFJ1jc3WBEUb8K4TUieHz3xni7ea3lSZvZDhUVImxAVtBso39VdLUe0nk2a+0030n+K7YUc95/J66tRIp3SVXUpGyUI7wvPxDBoJ/UaLIuIqtuInRwiiqp4z3XbBYr3cGp9P30zJXiSjk1HLsqdIvxvzV1q8ZtB3ppa5bkwZkDz7LsF09Qxgi0Roa6UUU1LnxYH5JP74D1LUjNrkXigabc6kZM5vPFZi3NPi3dVXnFT+EQUM17IvEi1tL1xUkcEHb+lo6duvRUO644wwSDpaPWgG7sAApIKqqqm4jvxo1yvcrjdoTiqtrQ2I+u5nr19ItbUA2a5IAX3GvuP8U2ypMS5pSwFC5peTtM0lnSkMWVVUJb48a+8//Z", - media_type: "image/jpeg", - type: "base64", + }, + }, + ]; + + test("OpenAI formats tool calls correctly", () => { + const expectedOutput = [ + { + role: "user", + content: "What's the weather in London?", + }, + { + role: "assistant", + content: "Let me check the weather.", + tool_calls: [ + { + id: "call_123", + type: "function", + function: { + name: "weather", + arguments: JSON.stringify({ location: "London" }), + }, + }, + ], + }, + { + role: "tool", + content: "The weather in London is sunny, +20°C", + tool_call_id: "call_123", + }, + ]; + + expect(OpenAI.toOpenAIMessage(toolCallMessages)).toEqual(expectedOutput); + }); + + test("Anthropic formats tool calls correctly", () => { + const anthropic = new Anthropic(); + const expectedOutput: MessageParam[] = [ + { + role: "user", + content: "What's the weather in London?", + }, + { + role: "assistant", + content: [ + { + type: "text", + text: "Let me check the weather.", }, - type: "image", + { + type: "tool_use", + id: "call_123", + name: "weather", + input: { + location: "London", + }, + }, + ], + }, + { + role: "user", // anthropic considers all that comes not from their inference API is user role + content: [ + { + type: "tool_result", + tool_use_id: "call_123", + content: "The weather in London is sunny, +20°C", + }, + ], + }, + ]; + + expect(anthropic.formatMessages(toolCallMessages)).toEqual( + expectedOutput, + ); + }); + + test("OpenAI formats multiple tool calls correctly", () => { + const multiToolMessages: ChatMessage[] = [ + { + role: "assistant", + content: "Let me check both weather and time.", + options: { + toolCall: [ + { + id: "weather_123", + name: "weather", + input: JSON.stringify({ location: "London" }), + }, + { + id: "time_456", + name: "time", + input: JSON.stringify({ timezone: "GMT" }), + }, + ], }, - ], - role: "user", - }, - ]); + }, + ]; + + const expectedOutput = [ + { + role: "assistant", + content: "Let me check both weather and time.", + tool_calls: [ + { + id: "weather_123", + type: "function", + function: { + name: "weather", + arguments: JSON.stringify({ location: "London" }), + }, + }, + { + id: "time_456", + type: "function", + function: { + name: "time", + arguments: JSON.stringify({ timezone: "GMT" }), + }, + }, + ], + }, + ]; + + expect(OpenAI.toOpenAIMessage(multiToolMessages)).toEqual(expectedOutput); + }); }); }); diff --git a/packages/providers/anthropic/src/llm.ts b/packages/providers/anthropic/src/llm.ts index 251f7b2953..853d3238ad 100644 --- a/packages/providers/anthropic/src/llm.ts +++ b/packages/providers/anthropic/src/llm.ts @@ -4,19 +4,12 @@ import type { BetaCacheControlEphemeral, BetaTextBlockParam, } from "@anthropic-ai/sdk/resources/beta/index"; +import type { TextBlock } from "@anthropic-ai/sdk/resources/index"; import type { - TextBlock, - TextBlockParam, -} from "@anthropic-ai/sdk/resources/index"; -import type { - ImageBlockParam, - MessageCreateParamsNonStreaming, MessageParam, Model, Tool, - ToolResultBlockParam, ToolUseBlock, - ToolUseBlockParam, } from "@anthropic-ai/sdk/resources/messages"; import { wrapLLMEvent } from "@llamaindex/core/decorator"; import type { @@ -193,99 +186,128 @@ export class Anthropic extends ToolCallLLM< formatMessages( messages: ChatMessage[], ): MessageParam[] { - const result: MessageParam[] = messages - .filter( - (message) => message.role === "user" || message.role === "assistant", - ) - .map((message) => { - const options = message.options ?? {}; - if ("toolResult" in options) { - const { id, isError } = options.toolResult; - return { - role: "user", - content: [ - { - type: "tool_result", - is_error: isError, - content: [ - { - type: "text", - text: extractText(message.content), - }, - ], - tool_use_id: id, - }, - ] satisfies ToolResultBlockParam[], - } satisfies MessageParam; - } else if ("toolCall" in options) { - const aiThinkingText = extractText(message.content); - return { - role: "assistant", - content: [ - // this could be empty when you call two tools in one query - ...(aiThinkingText.trim() - ? [ - { - type: "text", - text: aiThinkingText, - } satisfies TextBlockParam, - ] - : []), - ...options.toolCall.map( - (toolCall) => - ({ - type: "tool_use", - id: toolCall.id, - name: toolCall.name, - input: toolCall.input, - }) satisfies ToolUseBlockParam, - ), - ], - } satisfies MessageParam; - } + const formattedMessages = messages.flatMap((message) => { + const options = message.options ?? {}; + if (message.role === "system") { + // Skip system messages + return []; + } + + if ("toolCall" in options) { + const formattedMessage: MessageParam = { + role: "assistant", + content: [ + { + type: "text" as const, + text: extractText(message.content), + }, + ...options.toolCall.map((tool) => ({ + type: "tool_use" as const, + id: tool.id, + name: tool.name, + input: + typeof tool.input === "string" + ? JSON.parse(tool.input) + : tool.input, + })), + ], + }; + + return formattedMessage; + } + + // Handle tool results + if ("toolResult" in options) { + const formattedMessage: MessageParam = { + role: "user", + content: [ + { + type: "tool_result" as const, + tool_use_id: options.toolResult.id, + content: extractText(message.content), + }, + ], + }; + + return formattedMessage; + } + + // Handle regular messages + if (typeof message.content === "string") { + const role: "user" | "assistant" = + message.role === "assistant" ? "assistant" : "user"; return { - content: - typeof message.content === "string" - ? message.content - : message.content.map( - (content): TextBlockParam | ImageBlockParam => - content.type === "text" - ? { - type: "text", - text: content.text, - } - : { - type: "image", - source: { - data: content.image_url.url.substring( - content.image_url.url.indexOf(",") + 1, - ), - media_type: - `image/${content.image_url.url.substring("data:image/".length, content.image_url.url.indexOf(";base64"))}` as - | "image/jpeg" - | "image/png" - | "image/gif" - | "image/webp", - type: "base64", - }, - }, - ), - role: message.role as "user" | "assistant", + role, + content: message.content, } satisfies MessageParam; - }); - // merge messages with the same role - // in case of 'messages: roles must alternate between "user" and "assistant", but found multiple "user" roles in a row' - const realResult: MessageParam[] = []; - for (let i = 0; i < result.length; i++) { + } + + // Handle multi-modal content + const role: "user" | "assistant" = + message.role === "assistant" ? "assistant" : "user"; + + return { + role, + content: message.content.map((content) => { + if (content.type === "text") { + return { + type: "text" as const, + text: content.text, + }; + } + return { + type: "image" as const, + source: { + type: "base64" as const, + media_type: `image/${content.image_url.url.substring( + "data:image/".length, + content.image_url.url.indexOf(";base64"), + )}` as "image/jpeg" | "image/png" | "image/gif" | "image/webp", + data: content.image_url.url.substring( + content.image_url.url.indexOf(",") + 1, + ), + }, + }; + }), + } satisfies MessageParam; + }); + + return this.mergeConsecutiveMessages(formattedMessages); + } + + // Add helper method to prepare tools for API call + private prepareToolsForAPI(tools: BaseTool[]): Tool[] { + return tools.map((tool) => { + if (tool.metadata.parameters?.type !== "object") { + throw new TypeError("Tool parameters must be an object"); + } + return { + input_schema: { + type: "object", + properties: tool.metadata.parameters.properties, + required: tool.metadata.parameters.required, + }, + name: tool.metadata.name, + description: tool.metadata.description, + }; + }); + } + + private mergeConsecutiveMessages(messages: MessageParam[]): MessageParam[] { + const result: MessageParam[] = []; + + for (let i = 0; i < messages.length; i++) { if (i === 0) { - realResult.push(result[i]!); + result.push(messages[i]!); continue; } - const current = result[i]!; - const previous = result[i - 1]!; + + const current = messages[i]!; + const previous = result[result.length - 1]!; + if (current.role === previous.role) { - // merge two messages with the same role + // Merge content based on type if (Array.isArray(previous.content)) { if (Array.isArray(current.content)) { previous.content.push(...current.content); @@ -298,25 +320,19 @@ export class Anthropic extends ToolCallLLM< } else { if (Array.isArray(current.content)) { previous.content = [ - { - type: "text", - text: previous.content, - }, + { type: "text", text: previous.content }, ...current.content, ]; } else { - previous.content += `\n${current.content}`; + previous.content = `${previous.content}\n${current.content}`; } } - // no need to push the message - } - // if the roles are different, just push the message - else { - realResult.push(current); + } else { + result.push(current); } } - return realResult; + return result; } chat( @@ -336,130 +352,104 @@ export class Anthropic extends ToolCallLLM< @wrapLLMEvent async chat( params: - | LLMChatParamsNonStreaming< - AnthropicAdditionalChatOptions, - AnthropicToolCallLLMMessageOptions - > - | LLMChatParamsStreaming< - AnthropicAdditionalChatOptions, - AnthropicToolCallLLMMessageOptions - >, + | LLMChatParamsNonStreaming + | LLMChatParamsStreaming, ): Promise< | ChatResponse | AsyncIterable> > { - let { messages } = params; - - const { stream, tools } = params; - - let systemPrompt: string | Array | null = null; + const { messages, stream, tools } = params; + // Handle system messages + let systemPrompt: string | BetaTextBlockParam[] | null = null; const systemMessages = messages.filter( (message) => message.role === "system", ); if (systemMessages.length > 0) { - systemPrompt = systemMessages.map((message) => - message.options && "cache_control" in message.options - ? { - type: "text", - text: extractText(message.content), - cache_control: message.options.cache_control, - } - : { - type: "text", - text: extractText(message.content), - }, - ); - messages = messages.filter((message) => message.role !== "system"); + systemPrompt = systemMessages.map((message): BetaTextBlockParam => { + const textContent = extractText(message.content); + if (message.options && "cache_control" in message.options) { + return { + type: "text" as const, + text: textContent, + cache_control: message.options + .cache_control as BetaCacheControlEphemeral, + }; + } + return { + type: "text" as const, + text: textContent, + }; + }); } + const beta = - systemPrompt?.find((message) => "cache_control" in message) !== undefined; + Array.isArray(systemPrompt) && + systemPrompt.some((message) => "cache_control" in message); - // case: Non-streaming let anthropic = this.session.anthropic; if (beta) { // @ts-expect-error type casting anthropic = anthropic.beta.promptCaching; } - // case: Streaming if (stream) { if (tools) { console.error("Tools are not supported in streaming mode"); } - return this.streamChat(messages, systemPrompt, anthropic); + return this.streamChat( + messages.filter((m) => m.role !== "system"), + systemPrompt, + anthropic, + ); } - if (tools) { - const params: MessageCreateParamsNonStreaming = { - messages: this.formatMessages(messages), - tools: tools.map(Anthropic.toTool), - model: this.getModelName(this.model), - temperature: this.temperature, - max_tokens: this.maxTokens ?? 4096, - top_p: this.topP, - ...(systemPrompt && { system: systemPrompt }), - }; - // Remove tools if there are none, as it will cause an error - if (tools.length === 0) { - delete params.tools; - } - const response = await anthropic.messages.create(params); - - const toolUseBlock = response.content.filter( - (content): content is ToolUseBlock => content.type === "tool_use", - ); + const apiParams = { + model: this.getModelName(this.model), + messages: this.mergeConsecutiveMessages( + this.formatMessages(messages.filter((m) => m.role !== "system")), + ), + max_tokens: this.maxTokens ?? 4096, + temperature: this.temperature, + top_p: this.topP, + ...(systemPrompt && { system: systemPrompt }), + }; - return { - raw: response, - message: { - content: response.content - .filter((content): content is TextBlock => content.type === "text") - .map((content) => ({ - type: "text", - text: content.text, - })), - role: "assistant", - options: - toolUseBlock.length > 0 - ? { - toolCall: toolUseBlock.map((block) => ({ - id: block.id, - name: block.name, - input: - typeof block.input === "object" - ? JSON.stringify(block.input) - : `${block.input}`, - })), - } - : {}, - }, - }; - } else { - const response = await anthropic.messages.create({ - model: this.getModelName(this.model), - messages: this.formatMessages(messages), - max_tokens: this.maxTokens ?? 4096, - temperature: this.temperature, - top_p: this.topP, - ...(systemPrompt && { system: systemPrompt }), + if (tools?.length) { + Object.assign(apiParams, { + tools: this.prepareToolsForAPI(tools), }); - - return { - raw: response, - message: { - content: response.content - .filter((content): content is TextBlock => content.type === "text") - .map((content) => ({ - type: "text", - text: content.text, - })), - role: "assistant", - options: {}, - }, - }; } + + const response = await anthropic.messages.create(apiParams); + + const toolUseBlock = response.content.filter( + (content): content is ToolUseBlock => content.type === "tool_use", + ); + + return { + raw: response, + message: { + content: response.content + .filter((content): content is TextBlock => content.type === "text") + .map((content) => ({ + type: "text" as const, + text: content.text, + })), + role: "assistant", + options: + toolUseBlock.length > 0 + ? { + toolCall: toolUseBlock.map((block) => ({ + id: block.id, + name: block.name, + input: JSON.stringify(block.input), + })), + } + : {}, + }, + }; } protected async *streamChat(