diff --git a/scripts/jsdoc-automation/src/AIService/AIService.ts b/scripts/jsdoc-automation/src/AIService/AIService.ts index b7429a5d2d9..fafb17e742e 100644 --- a/scripts/jsdoc-automation/src/AIService/AIService.ts +++ b/scripts/jsdoc-automation/src/AIService/AIService.ts @@ -7,3190 +7,49 @@ import type { Configuration } from "../Configuration.js"; import { CodeFormatter } from "./utils/CodeFormatter.js"; import { DocumentOrganizer } from "./utils/DocumentOrganizer.js"; //import { CustomErrorParams, InputTypeOfTupleWithRest, IssueData, OutputTypeOfTupleWithRest, ParseParams, ParsePathComponent, ParseStatus, RefinementCtx, RefinementEffect, SafeParseReturnType, z, ZodBranded, ZodCatch, ZodCustomIssue, ZodDefault, ZodEffects, ZodError, ZodIntersection, ZodInvalidArgumentsIssue, ZodInvalidDateIssue, ZodInvalidEnumValueIssue, ZodInvalidIntersectionTypesIssue, ZodInvalidLiteralIssue, ZodInvalidReturnTypeIssue, ZodInvalidStringIssue, ZodInvalidUnionDiscriminatorIssue, ZodInvalidUnionIssue, ZodIssueBase, ZodIssueCode, ZodNotFiniteIssue, ZodNotMultipleOfIssue, ZodOptionalDef, ZodParsedType, ZodPipeline, ZodPromise, ZodReadonly, ZodTooBigIssue, ZodTooSmallIssue, ZodTupleDef, ZodUnion, ZodUnrecognizedKeysIssue } from "zod"; +//import { StructuredToolInterface, StructuredToolParams } from "@langchain/core/tools.js"; +//import { ToolChoice } from "@langchain/core/language_models/chat_models.js"; +//import { BaseChatModelParams } from "@langchain/core/language_models/chat_models.js"; +//import { FunctionDefinition } from "@langchain/core/language_models/base.js"; +//import { BaseFunctionCallOptions } from "@langchain/core/language_models/base.js"; +//import { Serialized } from "@langchain/core/dist/load/serializable.js"; -//export declare type InputTypeOfTupleWithRest = Rest extends ZodTypeAny ? [...InputTypeOfTuple, ...Rest["_input"][]] : InputTypeOfTuple; - -export declare type InputTypeOfTuple = AssertArray<{ - [k in keyof T]: T[k] extends ZodType ? T[k]["_input"] : never; -}>; - -type stripPath = T extends `${infer _Start}/${infer Rest}` ? stripPath : T; -//export declare type OutputTypeOfTupleWithRest = Rest extends ZodTypeAny ? [...OutputTypeOfTuple, ...Rest["_output"][]] : OutputTypeOfTuple; -export declare type ParseParams = { - path: (string | number)[]; - errorMap: ZodErrorMap; - async: boolean; -}; -export declare type ParsePathComponent = string | number; -export declare class ParseStatus { - value: "aborted" | "dirty" | "valid"; - dirty(): void; - abort(): void; - static mergeArray(status: ParseStatus, results: SyncParseReturnType[]): SyncParseReturnType; - static mergeObjectAsync(status: ParseStatus, pairs: { - key: ParseReturnType; - value: ParseReturnType; - }[]): Promise>; - static mergeObjectSync(status: ParseStatus, pairs: { - key: SyncParseReturnType; - value: SyncParseReturnType; - alwaysSet?: boolean; - }[]): SyncParseReturnType; -} -export declare type IssueData = stripPath & { - path?: (string | number)[]; - fatal?: boolean; -}; -export interface RefinementCtx { - addIssue: (arg: IssueData) => void; - path: (string | number)[]; -} - -export declare type RefinementEffect = { - type: "refinement"; - refinement: (arg: T, ctx: RefinementCtx) => any; -}; -//export declare class ZodBranded extends ZodType, ZodBrandedDef, T["_input"]> { - //_parse(input: ParseInput): ParseReturnType; -// unwrap(): T; -//} -// export declare class ZodCatch extends ZodType, unknown> { -// _parse(input: ParseInput): ParseReturnType; -// removeCatch(): T; -// static create: (type: T_1, params: { -// errorMap?: ZodErrorMap | undefined; -// invalid_type_error?: string | undefined; -// required_error?: string | undefined; -// message?: string | undefined; -// description?: string | undefined; -// } & { -// catch: T_1["_output"] | (() => T_1["_output"]); -// }) => ZodCatch; -// } -export interface ZodCustomIssue extends ZodIssueBase { - code: typeof ZodIssueCode.custom; - params?: { - [k: string]: any; - }; -} -export declare type SafeParseReturnType = SafeParseSuccess | SafeParseError; -import { - AssertArray, - BRAND, - InputTypeOfTupleWithRest, - - OutputTypeOfTuple, - SafeParseError, - SafeParseSuccess, - z, - ZodBrandedDef, - ZodCatchDef, - ZodDefault, - ZodEffects, - ZodError, - ZodIntersection, - ZodIssueBase, - ZodIssueCode, - ZodNullable, - ZodOptionalDef, - ZodPipeline, - ZodPromise, - ZodReadonly, - ZodTupleDef, - ZodType, - ZodUnion - // ZodDefault, ZodEffects, ZodError, ZodIntersection, - // ZodInvalidArgumentsIssue, ZodInvalidDateIssue, ZodInvalidEnumValueIssue, - // ZodInvalidIntersectionTypesIssue, ZodInvalidLiteralIssue, ZodInvalidReturnTypeIssue, - // ZodInvalidStringIssue, ZodInvalidUnionDiscriminatorIssue, ZodInvalidUnionIssue, - // ZodIssueBase, ZodIssueCode, ZodNotFiniteIssue, ZodNotMultipleOfIssue, - // ZodOptionalDef, ZodParsedType, ZodPipeline, - // ZodPromise, ZodReadonly, ZodTooBigIssue, - // ZodTooSmallIssue, ZodTupleDef, ZodUnion, ZodUnrecognizedKeysIssue - } from "zod"; -import { AsyncCaller } from "langsmith/dist/utils/async_caller.js"; -import { ClientOptions } from "ws"; - export declare type CustomErrorParams = Partial>; -//import { ZodInvalidTypeIssue, ZodTypeDef } from "zod"; -export interface ZodTypeDef { - errorMap?: ZodErrorMap; - description?: string; -} -export interface ZodInvalidTypeIssue extends ZodIssueBase { - code: typeof ZodIssueCode.invalid_type; - expected: ZodParsedType; - received: ZodParsedType; -} -export interface AsyncLocalStorageInterface { - getStore: () => any | undefined; - - run: (store: any, callback: () => T) => T; - - enterWith: (store: any) => void; - } - export const TRACING_ALS_KEY = Symbol.for("ls:tracing_async_local_storage"); -export const getGlobalAsyncLocalStorageInstance = (): - | AsyncLocalStorageInterface - | undefined => { - return (globalThis as any)[TRACING_ALS_KEY]; -}; -export const setGlobalAsyncLocalStorageInstance = ( - instance: AsyncLocalStorageInterface - ) => { - (globalThis as any)[TRACING_ALS_KEY] = instance; - }; -export async function consumeCallback( - promiseFn: () => Promise | T | void, - wait: boolean - ): Promise { - // - } - -export class BaseRunManager { - constructor( - public readonly runId: string, - public readonly handlers: BaseCallbackHandler[], - protected readonly inheritableHandlers: BaseCallbackHandler[], - protected readonly tags: string[], - protected readonly inheritableTags: string[], - protected readonly metadata: Record, - protected readonly inheritableMetadata: Record, - protected readonly _parentRunId?: string - ) {} - - get parentRunId() { - return this._parentRunId; - } - - async handleText(text: string): Promise { - await Promise.all( - this.handlers.map((handler) => - consumeCallback(async () => { - try { - await handler.handleText?.( - text, - this.runId, - this._parentRunId, - this.tags - ); - } catch (err) { - const logFunction = handler.raiseError - ? console.error - : console.warn; - logFunction( - `Error in handler ${handler.constructor.name}, handleText: ${err}` - ); - if (handler.raiseError) { - throw err; - } - } - }, handler.awaitHandlers) - ) - ); - } - - async handleCustomEvent( - eventName: string, - // eslint-disable-next-line @typescript-eslint/no-explicit-any - data: any, - _runId?: string, - _tags?: string[], - // eslint-disable-next-line @typescript-eslint/no-explicit-any - _metadata?: Record - ): Promise { - await Promise.all( - this.handlers.map((handler) => - consumeCallback(async () => { - try { - await handler.handleCustomEvent?.( - eventName, - data, - this.runId, - this.tags, - this.metadata - ); - } catch (err) { - const logFunction = handler.raiseError - ? console.error - : console.warn; - logFunction( - `Error in handler ${handler.constructor.name}, handleCustomEvent: ${err}` - ); - if (handler.raiseError) { - throw err; - } - } - }, handler.awaitHandlers) - ) - ); - } -} -export class CallbackManagerForChainRun - extends BaseRunManager - implements BaseCallbackManagerMethods -{ - getChild(tag?: string): CallbackManager { - // eslint-disable-next-line @typescript-eslint/no-use-before-define - const manager = new CallbackManager(this.runId); - manager.setHandlers(this.inheritableHandlers); - manager.addTags(this.inheritableTags); - manager.addMetadata(this.inheritableMetadata); - if (tag) { - manager.addTags([tag], false); - } - return manager; - } - - async handleChainError( - err: Error | unknown, - _runId?: string, - _parentRunId?: string, - _tags?: string[], - kwargs?: { inputs?: Record } - ): Promise { - await Promise.all( - this.handlers.map((handler) => - consumeCallback(async () => { - if (!handler.ignoreChain) { - try { - await handler.handleChainError?.( - err, - this.runId, - this._parentRunId, - this.tags, - kwargs - ); - } catch (err) { - const logFunction = handler.raiseError - ? console.error - : console.warn; - logFunction( - `Error in handler ${handler.constructor.name}, handleChainError: ${err}` - ); - if (handler.raiseError) { - throw err; - } - } - } - }, handler.awaitHandlers) - ) - ); - } - - async handleChainEnd( - output: ChainValues, - _runId?: string, - _parentRunId?: string, - _tags?: string[], - kwargs?: { inputs?: Record } - ): Promise { - await Promise.all( - this.handlers.map((handler) => - consumeCallback(async () => { - if (!handler.ignoreChain) { - try { - await handler.handleChainEnd?.( - output, - this.runId, - this._parentRunId, - this.tags, - kwargs - ); - } catch (err) { - const logFunction = handler.raiseError - ? console.error - : console.warn; - logFunction( - `Error in handler ${handler.constructor.name}, handleChainEnd: ${err}` - ); - if (handler.raiseError) { - throw err; - } - } - } - }, handler.awaitHandlers) - ) - ); - } - - async handleAgentAction(action: AgentAction): Promise { - await Promise.all( - this.handlers.map((handler) => - consumeCallback(async () => { - if (!handler.ignoreAgent) { - try { - await handler.handleAgentAction?.( - action, - this.runId, - this._parentRunId, - this.tags - ); - } catch (err) { - const logFunction = handler.raiseError - ? console.error - : console.warn; - logFunction( - `Error in handler ${handler.constructor.name}, handleAgentAction: ${err}` - ); - if (handler.raiseError) { - throw err; - } - } - } - }, handler.awaitHandlers) - ) - ); - } - - async handleAgentEnd(action: AgentFinish): Promise { - await Promise.all( - this.handlers.map((handler) => - consumeCallback(async () => { - if (!handler.ignoreAgent) { - try { - await handler.handleAgentEnd?.( - action, - this.runId, - this._parentRunId, - this.tags - ); - } catch (err) { - const logFunction = handler.raiseError - ? console.error - : console.warn; - logFunction( - `Error in handler ${handler.constructor.name}, handleAgentEnd: ${err}` - ); - if (handler.raiseError) { - throw err; - } - } - } - }, handler.awaitHandlers) - ) - ); - } -} -// import { wrapOpenAI } from "langsmith/wrappers"; -//import { FakeListChatModel } from "@langchain/core/utils/testing"; -//import { AIMessageChunk, BaseMessage, BaseMessageChunk, BaseMessageLike } from "@langchain/core/messages"; -//import { ChatGenerationChunk, ChatResult, LLMResult } from "@langchain/core/outputs"; -//import { BaseChatModel, BaseChatModelParams, LangSmithParams } from "@langchain/core/language_models/chat_models"; -//import { CallbackManager, CallbackManagerForChainRun, CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; -export declare type CatchallOutput = ZodType extends T ? unknown : { - [k: string]: T["_output"]; -}; -export declare type ZodIssue = ZodIssueOptionalMessage & { - fatal?: boolean; - message: string; -}; -export interface ParseContext { - readonly common: { - readonly issues: ZodIssue[]; - readonly contextualErrorMap?: ZodErrorMap; - readonly async: boolean; - }; - readonly path: ParsePath; - readonly schemaErrorMap?: ZodErrorMap; - readonly parent: ParseContext | null; - readonly data: any; - readonly parsedType: ZodParsedType; -} -export declare type PassthroughType = T extends "passthrough" ? { - [k: string]: unknown; -} : unknown; -// export declare class ZodArray extends ZodType, ZodArrayDef, Cardinality extends "atleastone" ? [T["_input"], ...T["_input"][]] : T["_input"][]> { -// _parse(input: ParseInput): ParseReturnType; -// get element(): T; -// min(minLength: number, message?: errorUtil.ErrMessage): this; -// max(maxLength: number, message?: errorUtil.ErrMessage): this; -// length(len: number, message?: errorUtil.ErrMessage): this; -// nonempty(message?: errorUtil.ErrMessage): ZodArray; -// static create: (schema: T_1, params?: RawCreateParams) => ZodArray; -// } -//declare function createZodEnum>(values: T, params?: RawCreateParams): ZodEnum>; -//declare function createZodEnum(values: T, params?: RawCreateParams): ZodEnum; -// export declare class ZodEnum extends ZodType, T[number]> { -// #private; -// _parse(input: ParseInput): ParseReturnType; -// get options(): T; -// get enum(): Values; -// get Values(): Values; -// get Enum(): Values; -// extract(values: ToExtract, newDef?: RawCreateParams): ZodEnum>; -// exclude(values: ToExclude, newDef?: RawCreateParams): ZodEnum>, [string, ...string[]]>>; -// static create: typeof createZodEnum; -// } - -export declare type ZodErrorMap = (issue: ZodIssueOptionalMessage, _ctx: ErrorMapCtx) => { - message: string; -}; -// export declare class ZodNullable extends ZodType, T["_input"] | null> { -// _parse(input: ParseInput): ParseReturnType; -// unwrap(): T; -// static create: (type: T_1, params?: RawCreateParams) => ZodNullable; -// } -export declare type typecast = A extends T ? A : never; -export declare type Values = { - [k in T[number]]: k; -}; -export declare type Writeable = { - -readonly [P in keyof T]: T[P]; -}; -export interface ZodArrayDef extends ZodTypeDef { - type: T; - typeName: ZodFirstPartyTypeKind.ZodArray; - exactLength: { - value: number; - message?: string; - } | null; - minLength: { - value: number; - message?: string; - } | null; - maxLength: { - value: number; - message?: string; - } | null; -} -export declare enum ZodFirstPartyTypeKind { - ZodString = "ZodString", - ZodNumber = "ZodNumber", - ZodNaN = "ZodNaN", - ZodBigInt = "ZodBigInt", - ZodBoolean = "ZodBoolean", - ZodDate = "ZodDate", - ZodSymbol = "ZodSymbol", - ZodUndefined = "ZodUndefined", - ZodNull = "ZodNull", - ZodAny = "ZodAny", - ZodUnknown = "ZodUnknown", - ZodNever = "ZodNever", - ZodVoid = "ZodVoid", - ZodArray = "ZodArray", - ZodObject = "ZodObject", - ZodUnion = "ZodUnion", - ZodDiscriminatedUnion = "ZodDiscriminatedUnion", - ZodIntersection = "ZodIntersection", - ZodTuple = "ZodTuple", - ZodRecord = "ZodRecord", - ZodMap = "ZodMap", - ZodSet = "ZodSet", - ZodFunction = "ZodFunction", - ZodLazy = "ZodLazy", - ZodLiteral = "ZodLiteral", - ZodEnum = "ZodEnum", - ZodEffects = "ZodEffects", - ZodNativeEnum = "ZodNativeEnum", - ZodOptional = "ZodOptional", - ZodNullable = "ZodNullable", - ZodDefault = "ZodDefault", - ZodCatch = "ZodCatch", - ZodPromise = "ZodPromise", - ZodBranded = "ZodBranded", - ZodPipeline = "ZodPipeline", - ZodReadonly = "ZodReadonly" -} -export declare type EnumValues = readonly [T, ...T[]]; -export interface ZodEnumDef extends ZodTypeDef { - values: T; - typeName: ZodFirstPartyTypeKind.ZodEnum; -} -export declare type ZodIssueOptionalMessage = ZodInvalidTypeIssue -// | ZodInvalidLiteralIssue | ZodUnrecognizedKeysIssue | ZodInvalidUnionIssue | ZodInvalidUnionDiscriminatorIssue | ZodInvalidEnumValueIssue | ZodInvalidArgumentsIssue | ZodInvalidReturnTypeIssue | ZodInvalidDateIssue | ZodInvalidStringIssue | ZodTooSmallIssue | ZodTooBigIssue | ZodInvalidIntersectionTypesIssue | ZodNotMultipleOfIssue | ZodNotFiniteIssue | ZodCustomIssue; -; - - -export interface ZodNullableDef extends ZodTypeDef { - innerType: T; - typeName: ZodFirstPartyTypeKind.ZodNullable; -} - -export interface ZodObjectDef extends ZodTypeDef { - typeName: ZodFirstPartyTypeKind.ZodObject; - shape: () => T; - catchall: Catchall; - unknownKeys: UnknownKeys; -} -export declare class ZodOptional { -// extends ZodType, T["_input"] | undefined> { - //_parse(input: ParseInput): ParseReturnType; - //unwrap(): T; - //static create: (type: T_1, params?: RawCreateParams) => ZodOptional; -} -export declare type ZodParsedType = {}//= keyof typeof ZodParsedType; -export declare type ZodRawShape = { - [k: string]: ZodTypeAny; -}; -// export declare class ZodTuple extends ZodType, ZodTupleDef, InputTypeOfTupleWithRest> { -// _parse(input: ParseInput): ParseReturnType; -// get items(): T; -// rest(rest: Rest): ZodTuple; -// static create: (schemas: T_1, params?: RawCreateParams) => ZodTuple; -// } -//type ZodOptional={} -export declare type ZodTupleItems = [ZodTypeAny, ...ZodTypeAny[]]; -// export declare abstract class ZodType { -// readonly _type: Output; -// readonly _output: Output; -// readonly _input: Input; -// readonly _def: Def; -// get description(): string | undefined; -// abstract _parse(input: ParseInput): ParseReturnType; -// _getType(input: ParseInput): string; -// _getOrReturnCtx(input: ParseInput, ctx?: ParseContext | undefined): ParseContext; -// _processInputParams(input: ParseInput): { -// status: ParseStatus; -// ctx: ParseContext; -// }; -// _parseSync(input: ParseInput): SyncParseReturnType; -// _parseAsync(input: ParseInput): AsyncParseReturnType; -// parse(data: unknown, params?: Partial): Output; -// safeParse(data: unknown, params?: Partial): SafeParseReturnType; -// parseAsync(data: unknown, params?: Partial): Promise; -// safeParseAsync(data: unknown, params?: Partial): Promise>; -// /** Alias of safeParseAsync */ -// spa: (data: unknown, params?: Partial | undefined) => Promise>; -// //refine(check: (arg: Output) => arg is RefinedOutput, refinementData: IssueData | ((arg: Output, ctx: RefinementCtx) => IssueData)): ZodEffects; -// //=> arg is RefinedOutput, message?: string | CustomErrorParams | ((arg: Output) => CustomErrorParams)): ZodEffects; -// //refine(check: (arg: Output) => unknown | Promise, message?: string | CustomErrorParams | ((arg: Output) => CustomErrorParams)): ZodEffects; -// //refinement(check: (arg: Output) => arg is RefinedOutput, refinementData: IssueData | ((arg: Output, ctx: RefinementCtx) => IssueData)): ZodEffects; -// //refinement(check: (arg: Output) => boolean, refinementData: IssueData | ((arg: Output, ctx: RefinementCtx) => IssueData)): ZodEffects; -// //_refinement(refinement: RefinementEffect["refinement"]): ZodEffects; -// //superRefine(refinement: (arg: Output, ctx: RefinementCtx) => arg is RefinedOutput): ZodEffects; -// //superRefine(refinement: (arg: Output, ctx: RefinementCtx) => void): ZodEffects; -// //superRefine(refinement: (arg: Output, ctx: RefinementCtx) => Promise): ZodEffects; -// constructor(def: Def); -// optional(): ZodOptional; -// nullable(): ZodNullable; -// nullish(): ZodOptional>; -// array(): ZodArray; -// promise():undefined -// //: ZodPromise; -// or(option: T): ZodUnion<[this, T]>; -// and(incoming: T): ZodIntersection; -// transform(transform: (arg: Output, ctx: RefinementCtx) => NewOut | Promise): ZodEffects; -// default(def: util.noUndefined): ZodDefault; -// default(def: () => util.noUndefined): ZodDefault; -// brand(brand?: B): ZodBranded; -// catch(def: Output): ZodCatch; -// catch(def: (ctx: { -// error: ZodError; -// input: Input; -// }) => Output): ZodCatch; -// describe(description: string): this; -// pipe(target: T): ZodPipeline; -// readonly(): ZodReadonly; -// isOptional(): boolean; -// isNullable(): boolean; -// } - -export declare type ZodTypeAny = ZodType; - - -export declare type ParsePath = ParsePathComponent[]; - -export declare type FilterEnum = Values extends [] ? [] : Values extends [infer Head, ...infer Rest] ? Head extends ToExclude ? FilterEnum : [Head, ...FilterEnum] : never; - -export declare type ErrorMapCtx = { - defaultError: string; - data: any; -}; - -export declare type arrayOutputType = Cardinality extends "atleastone" ? [T["_output"], ...T["_output"][]] : T["_output"][]; - -export declare type ArrayCardinality = "many" | "atleastone"; - -export declare type CatchallInput = ZodType extends T ? unknown : { - [k: string]: T["_input"]; -}; - -export declare namespace util { - type AssertEqual = (() => V extends T ? 1 : 2) extends () => V extends U ? 1 : 2 ? true : false; - export type isAny = 0 extends 1 & T ? true : false; - export const assertEqual: (val: AssertEqual) => AssertEqual; - export function assertIs(_arg: T): void; - export function assertNever(_x: never): never; - export type Omit = Pick>; - export type OmitKeys = Pick>; - export type MakePartial = Omit & Partial>; - export type Exactly = T & Record, never>; - export const arrayToEnum: (items: U) => { [k in U[number]]: k; }; - export const getValidEnumValues: (obj: any) => any[]; - export const objectValues: (obj: any) => any[]; - export const objectKeys: ObjectConstructor["keys"]; - export const find: (arr: T[], checker: (arg: T) => any) => T | undefined; - export type identity = objectUtil.identity; - export type flatten = objectUtil.flatten; - export type noUndefined = T extends undefined ? never : T; - export const isInteger: NumberConstructor["isInteger"]; - export function joinValues(array: T, separator?: string): string; - export const jsonStringifyReplacer: (_: string, value: any) => any; - export {}; -} -export declare type UnknownKeysParam = "passthrough" | "strict" | "strip"; -export interface ParseResult { - status: "aborted" | "dirty" | "valid"; - data: any; -} -export declare type INVALID = { - status: "aborted"; -}; -export declare const INVALID: INVALID; -export declare type DIRTY = { - status: "dirty"; - value: T; -}; -export declare const DIRTY: (value: T) => DIRTY; -export declare type OK = { - status: "valid"; - value: T; -}; -export declare type SyncParseReturnType = OK | DIRTY | INVALID; -export declare type AsyncParseReturnType = Promise>; -export declare type ParseReturnType = SyncParseReturnType | AsyncParseReturnType; - -export declare type RawCreateParams = { - errorMap?: ZodErrorMap; - invalid_type_error?: string; - required_error?: string; - message?: string; - description?: string; -} | undefined; - - - -export declare type ParseInput = { - data: any; - path: (string | number)[]; - parent: ParseContext; -}; - -export declare namespace objectUtil { - export type MergeShapes = { - [k in Exclude]: U[k]; - } & V; - type optionalKeys = { - [k in keyof T]: undefined extends T[k] ? k : never; - }[keyof T]; - type requiredKeys = { - [k in keyof T]: undefined extends T[k] ? never : k; - }[keyof T]; - export type addQuestionMarks = { - [K in requiredKeys]: T[K]; - } & { - [K in optionalKeys]?: T[K]; - } & { - [k in keyof T]?: unknown; - }; - export type identity = T; - export type flatten = identity<{ - [k in keyof T]: T[k]; - }>; - export type noNeverKeys = { - [k in keyof T]: [T[k]] extends [never] ? never : k; - }[keyof T]; - export type noNever = identity<{ - [k in noNeverKeys]: k extends keyof T ? T[k] : never; - }>; - export const mergeShapes: (first: U, second: T) => T & U; - export type extendShape = { - [K in keyof A as K extends keyof B ? never : K]: A[K]; - } & { - [K in keyof B]: B[K]; - }; - export {}; -} -export declare type objectOutputType = objectUtil.flatten>> & CatchallOutput & PassthroughType; - -export declare type objectInputType = objectUtil.flatten> & CatchallInput & PassthroughType; - -//export declare type deoptional = T extends ZodOptional ? deoptional : T extends ZodNullable ? ZodNullable> : T; - -export declare type baseObjectOutputType = { - [k in keyof Shape]: Shape[k]["_output"]; -}; - -export declare type baseObjectInputType = objectUtil.addQuestionMarks<{ - [k in keyof Shape]: Shape[k]["_input"]; -}>; -export declare type AnyZodObject = ZodObject; -//import { AgentAction, AgentFinish } from "@langchain/core/dist/agents.js"; -export type AgentAction = { - tool: string; - toolInput: string | Record; - log: string; -}; -export type AgentFinish = { - returnValues: Record; - log: string; -}; -export type AgentStep = { - action: AgentAction; - observation: string; -}; - -//import { NewTokenIndices, HandleLLMNewTokenCallbackFields, BaseCallbackHandler } from "@langchain/core/dist/callbacks/base.js"; -export interface BaseCallbackHandlerInput { - ignoreLLM?: boolean; - ignoreChain?: boolean; - ignoreAgent?: boolean; - ignoreRetriever?: boolean; - ignoreCustomEvent?: boolean; - _awaitHandler?: boolean; - raiseError?: boolean; -} -export declare abstract class BaseCallbackHandler extends BaseCallbackHandlerMethodsClass implements BaseCallbackHandlerInput, Serializable { - lc_serializable: boolean; - get lc_namespace(): ["langchain_core", "callbacks", string]; - get lc_secrets(): { - [key: string]: string; - } | undefined; - get lc_attributes(): { - [key: string]: string; - } | undefined; - get lc_aliases(): { - [key: string]: string; - } | undefined; - /** - * The name of the serializable. Override to provide an alias or - * to preserve the serialized module name in minified environments. - * - * Implemented as a static method to support loading logic. - */ - static lc_name(): string; - /** - * The final serialized identifier for the module. - */ - get lc_id(): string[]; - lc_kwargs: SerializedFields; - abstract name: string; - ignoreLLM: boolean; - ignoreChain: boolean; - ignoreAgent: boolean; - ignoreRetriever: boolean; - ignoreCustomEvent: boolean; - raiseError: boolean; - awaitHandlers: boolean; - constructor(input?: BaseCallbackHandlerInput); - copy(): BaseCallbackHandler; - toJSON(): Serialized; - toJSONNotImplemented(): SerializedNotImplemented; - static fromMethods(methods: CallbackHandlerMethods): { - name: string; - lc_serializable: boolean; - readonly lc_namespace: ["langchain_core", "callbacks", string]; - readonly lc_secrets: { - [key: string]: string; - } | undefined; - readonly lc_attributes: { - [key: string]: string; - } | undefined; - readonly lc_aliases: { - [key: string]: string; - } | undefined; - /** - * The final serialized identifier for the module. - */ - readonly lc_id: string[]; - lc_kwargs: SerializedFields; - ignoreLLM: boolean; - ignoreChain: boolean; - ignoreAgent: boolean; - ignoreRetriever: boolean; - ignoreCustomEvent: boolean; - raiseError: boolean; - awaitHandlers: boolean; - copy(): BaseCallbackHandler; - toJSON(): Serialized; - toJSONNotImplemented(): SerializedNotImplemented; - /** - * Called at the start of an LLM or Chat Model run, with the prompt(s) - * and the run ID. - */ - handleLLMStart?(llm: Serialized, prompts: string[], runId: string, parentRunId?: string | undefined, extraParams?: Record | undefined, tags?: string[] | undefined, metadata?: Record | undefined, runName?: string | undefined): any; - /** - * Called when an LLM/ChatModel in `streaming` mode produces a new token - */ - handleLLMNewToken?(token: string, idx: NewTokenIndices, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined, fields?: HandleLLMNewTokenCallbackFields | undefined): any; - /** - * Called if an LLM/ChatModel run encounters an error - */ - handleLLMError?(err: any, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined): any; - /** - * Called at the end of an LLM/ChatModel run, with the output and the run ID. - */ - handleLLMEnd?(output: LLMResult, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined): any; - /** - * Called at the start of a Chat Model run, with the prompt(s) - * and the run ID. - */ - handleChatModelStart?(llm: Serialized, messages: BaseMessage[][], runId: string, parentRunId?: string | undefined, extraParams?: Record | undefined, tags?: string[] | undefined, metadata?: Record | undefined, runName?: string | undefined): any; - /** - * Called at the start of a Chain run, with the chain name and inputs - * and the run ID. - */ - handleChainStart?(chain: Serialized, inputs: ChainValues, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined, metadata?: Record | undefined, runType?: string | undefined, runName?: string | undefined): any; - /** - * Called if a Chain run encounters an error - */ - handleChainError?(err: any, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined, kwargs?: { - inputs?: Record | undefined; - } | undefined): any; - /** - * Called at the end of a Chain run, with the outputs and the run ID. - */ - handleChainEnd?(outputs: ChainValues, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined, kwargs?: { - inputs?: Record | undefined; - } | undefined): any; - /** - * Called at the start of a Tool run, with the tool name and input - * and the run ID. - */ - handleToolStart?(tool: Serialized, input: string, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined, metadata?: Record | undefined, runName?: string | undefined): any; - /** - * Called if a Tool run encounters an error - */ - handleToolError?(err: any, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined): any; - /** - * Called at the end of a Tool run, with the tool output and the run ID. - */ - handleToolEnd?(output: any, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined): any; - handleText?(text: string, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined): void | Promise; - /** - * Called when an agent is about to execute an action, - * with the action and the run ID. - */ - handleAgentAction?(action: AgentAction, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined): void | Promise; - /** - * Called when an agent finishes execution, before it exits. - * with the final output and the run ID. - */ - handleAgentEnd?(action: AgentFinish, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined): void | Promise; - handleRetrieverStart?(retriever: Serialized, query: string, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined, metadata?: Record | undefined, name?: string | undefined): any; - handleRetrieverEnd?(documents: DocumentInterface>[], runId: string, parentRunId?: string | undefined, tags?: string[] | undefined): any; - handleRetrieverError?(err: any, runId: string, parentRunId?: string | undefined, tags?: string[] | undefined): any; - handleCustomEvent?(eventName: string, data: any, runId: string, tags?: string[] | undefined, metadata?: Record | undefined): any; - }; -} -export interface NewTokenIndices { - prompt: number; - completion: number; -} -export type HandleLLMNewTokenCallbackFields = { - chunk?: GenerationChunk | ChatGenerationChunk; -}; -//import { DocumentInterface } from "@langchain/core/dist/documents/document.js"; -//import { ChainValues } from "@langchain/core/dist/utils/types/index.js"; -export interface DocumentInterface = Record> { - pageContent: string; - metadata: Metadata; - /** - * An optional identifier for the document. - * - * Ideally this should be unique across the document collection and formatted - * as a UUID, but this will not be enforced. - */ - id?: string; -} -export declare const RUN_KEY = "__run"; -export type LLMResult = { - /** - * List of the things generated. Each input could have multiple {@link Generation | generations}, hence this is a list of lists. - */ - generations: Generation[][]; - /** - * Dictionary of arbitrary LLM-provider specific output. - */ - llmOutput?: Record; - /** - * Dictionary of run metadata - */ - [RUN_KEY]?: Record; -}; -export type GenerationChunkFields = { - text: string; - generationInfo?: Record; -}; -export declare class GenerationChunk implements Generation { - text: string; - generationInfo?: Record; - constructor(fields: GenerationChunkFields); - concat(chunk: GenerationChunk): GenerationChunk; -} - -export interface ChatResult { - generations: ChatGeneration[]; - llmOutput?: Record; -} -export type ChatGenerationChunkFields = GenerationChunkFields & { - message: BaseMessageChunk; -}; -export interface ChatGeneration extends Generation { - message: BaseMessage; -} -//import { , , , , ToolDefinition } from "@langchain/core/language_models/base.js"; -export interface ToolDefinition { - type: "function"; - function: FunctionDefinition; -} - -export type StructuredOutputMethodOptions = { - name?: string; - method?: "functionCalling" | "jsonMode" | "jsonSchema" | string; - includeRaw?: IncludeRaw; - /** Whether to use strict mode. Currently only supported by OpenAI models. */ - strict?: boolean; -}; - -export type SerializedLLM = { - _model: string; - _type: string; -} & Record; - -export type BaseLanguageModelInput = BasePromptValueInterface | string | BaseMessageLike[]; -const getVerbosity = () => false; -export abstract class BaseLangChain< - RunInput, - RunOutput, - CallOptions extends RunnableConfig = RunnableConfig - > - extends Runnable - implements BaseLangChainParams -{ - /** - * Whether to print out response text. - */ - verbose: boolean; - - callbacks?: Callbacks; - - tags?: string[]; - - metadata?: Record; - - get lc_attributes(): { [key: string]: undefined } | undefined { - return { - callbacks: undefined, - verbose: undefined, - }; - } - - constructor(params: BaseLangChainParams) { - super(params); - this.verbose = params.verbose ?? getVerbosity(); - this.callbacks = params.callbacks; - this.tags = params.tags ?? []; - this.metadata = params.metadata ?? {}; - } -} -export interface BaseLanguageModelInterface< - // eslint-disable-next-line @typescript-eslint/no-explicit-any - RunOutput = any, - CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions -> extends RunnableInterface { - get callKeys(): string[]; - - generatePrompt( - promptValues: BasePromptValueInterface[], - options?: string[] | CallOptions, - callbacks?: Callbacks - ): Promise; - - /** - * @deprecated Use .invoke() instead. Will be removed in 0.2.0. - */ - predict( - text: string, - options?: string[] | CallOptions, - callbacks?: Callbacks - ): Promise; - - /** - * @deprecated Use .invoke() instead. Will be removed in 0.2.0. - */ - predictMessages( - messages: BaseMessage[], - options?: string[] | CallOptions, - callbacks?: Callbacks - ): Promise; - - _modelType(): string; - - _llmType(): string; - - getNumTokens(content: MessageContent): Promise; - - /** - * Get the identifying parameters of the LLM. - */ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - _identifyingParams(): Record; - - serialize(): SerializedLLM; -} -export declare abstract class BaseLanguageModel extends BaseLangChain implements BaseLanguageModelParams, BaseLanguageModelInterface { - /** - * Keys that the language model accepts as call options. - */ - get callKeys(): string[]; - /** - * The async caller should be used by subclasses to make any async calls, - * which will thus benefit from the concurrency and retry logic. - */ - caller: AsyncCaller; - cache?: BaseCache; - constructor({ callbacks, callbackManager, ...params }: BaseLanguageModelParams); - abstract generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise; - /** - * @deprecated Use .invoke() instead. Will be removed in 0.2.0. - */ - abstract predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise; - /** - * @deprecated Use .invoke() instead. Will be removed in 0.2.0. - */ - abstract predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise; - abstract _modelType(): string; - abstract _llmType(): string; - private _encoding?; - getNumTokens(content: MessageContent): Promise; - protected static _convertInputToPromptValue(input: BaseLanguageModelInput): BasePromptValueInterface; - /** - * Get the identifying parameters of the LLM. - */ - _identifyingParams(): Record; - /** - * Create a unique cache key for a specific call to a specific language model. - * @param callOptions Call options for the model - * @returns A unique cache key. - */ - _getSerializedCacheKeyParametersForCall({ config, ...callOptions }: CallOptions & { - config?: RunnableConfig; - }): string; - /** - * @deprecated - * Return a json-like object representing this LLM. - */ - serialize(): SerializedLLM; - /** - * @deprecated - * Load an LLM from a json-like object describing it. - */ - static deserialize(_data: SerializedLLM): Promise; - withStructuredOutput? = Record>(schema: z.ZodType | Record, config?: StructuredOutputMethodOptions): Runnable; - withStructuredOutput? = Record>(schema: z.ZodType | Record, config?: StructuredOutputMethodOptions): Runnable; - /** - * Model wrapper that returns outputs formatted to match the given schema. - * - * @template {BaseLanguageModelInput} RunInput The input type for the Runnable, expected to be the same input for the LLM. - * @template {Record} RunOutput The output type for the Runnable, expected to be a Zod schema object for structured output validation. - * - * @param {z.ZodEffects} schema The schema for the structured output. Either as a Zod schema or a valid JSON schema object. - * If a Zod schema is passed, the returned attributes will be validated, whereas with JSON schema they will not be. - * @param {string} name The name of the function to call. - * @param {"functionCalling" | "jsonMode"} [method=functionCalling] The method to use for getting the structured output. Defaults to "functionCalling". - * @param {boolean | undefined} [includeRaw=false] Whether to include the raw output in the result. Defaults to false. - * @returns {Runnable | Runnable} A new runnable that calls the LLM with structured output. - */ - withStructuredOutput? = Record>(schema: z.ZodType | Record, config?: StructuredOutputMethodOptions): Runnable | Runnable; -} - -//import { , RunnableMapLike } from "@langchain/core/dist/runnables/base.js"; -export type RunnableMapLike = { - [K in keyof RunOutput]: RunnableLike; -}; -//import { Graph } from "@langchain/core/dist/runnables/graph.js"; -export type RunnableIOSchema = { - name?: string; - schema: z.ZodType; - }; -export class Node{}; -export class Edge {}; -export declare class Graph { - nodes: Record; - edges: Edge[]; - constructor(params?: { - nodes: Record; - edges: Edge[]; - }); - toJSON(): Record; - addNode(data: RunnableInterface | RunnableIOSchema, id?: string, metadata?: Record): Node; - removeNode(node: Node): void; - addEdge(source: Node, target: Node, data?: string, conditional?: boolean): Edge; - firstNode(): Node | undefined; - lastNode(): Node | undefined; - /** - * Add all nodes and edges from another graph. - * Note this doesn't check for duplicates, nor does it connect the graphs. - */ - extend(graph: Graph, prefix?: string): ({ - id: string; - data: RunnableIOSchema | RunnableInterface>>; - } | undefined)[]; - trimFirstNode(): void; - trimLastNode(): void; - /** - * Return a new graph with all nodes re-identified, - * using their unique, readable names where possible. - */ - reid(): Graph; - drawMermaid(params?: { - withStyles?: boolean; - curveStyle?: string; - nodeColors?: Record; - wrapLabelNWords?: number; - }): string; - drawMermaidPng(params?: { - withStyles?: boolean; - curveStyle?: string; - nodeColors?: Record; - wrapLabelNWords?: number; - backgroundColor?: string; - }): Promise; -} - -// import { EventStreamCallbackHandlerInput, StreamEvent } from "@langchain/core/dist/tracers/event_stream.js"; -// import { LogStreamCallbackHandlerInput, RunLogPatch, LogStreamCallbackHandler } from "@langchain/core/dist/tracers/log_stream.js"; -// import { IterableReadableStream, IterableReadableStreamInterface } from "@langchain/core/dist/utils/stream.js"; -// import { RunnableIOSchema } from "@langchain/core/runnables.js"; -// import { BaseCallbackHandlerInput } from "@langchain/core/dist/callbacks/base.js"; -// import { BaseLangChain, BaseLanguageModelInterface } from "@langchain/core/language_models/base.js"; -// import { RUN_KEY, GenerationChunkFields } from "@langchain/core/outputs.js"; -// import { AsyncCaller } from "langsmith/dist/utils/async_caller.js"; -// import { EnumValues, ZodTypeDef, ZodInvalidTypeIssue, ZodInvalidLiteralIssue, ZodUnrecognizedKeysIssue, ZodInvalidUnionIssue, ZodInvalidUnionDiscriminatorIssue, ZodInvalidEnumValueIssue, ZodInvalidArgumentsIssue, ZodInvalidReturnTypeIssue, ZodInvalidDateIssue, ZodInvalidStringIssue, ZodTooSmallIssue, ZodTooBigIssue, ZodInvalidIntersectionTypesIssue, ZodNotMultipleOfIssue, ZodNotFiniteIssue, ZodCustomIssue, ZodOptionalDef, ZodParsedType, OutputTypeOfTupleWithRest, ZodTupleDef, InputTypeOfTupleWithRest, ParseStatus, ParseParams, SafeParseReturnType, CustomErrorParams, ZodEffects, IssueData, RefinementCtx, RefinementEffect, ZodPromise, ZodUnion, ZodIntersection, ZodDefault, ZodBranded, ZodCatch, ZodError, ZodPipeline, ZodReadonly, ParsePathComponent, z } from "zod"; -//import { , , , RunnableToolLikeArgs } from "@langchain/core/runnables.js"; -export interface RunnableToolLikeArgs extends Omit, RunOutput>, "config"> { - name: string; - description?: string; - schema: RunInput; - config?: RunnableConfig; -} -interface IterableReadableStreamInterface {} -class LogStreamCallbackHandlerInput {} -//class IterableReadableStream implements IterableReadableStreamInterface {} - -export class IterableReadableStream - extends ReadableStream - implements IterableReadableStreamInterface -{ - public reader!: ReadableStreamDefaultReader; - - ensureReader() { - if (!this.reader) { - this.reader = this.getReader(); - } - } - - async next(): Promise> { - this.ensureReader(); - try { - const result = await this.reader.read(); - if (result.done) { - this.reader.releaseLock(); // release lock when stream becomes closed - return { - done: true, - value: undefined, - }; - } else { - return { - done: false, - value: result.value, - }; - } - } catch (e) { - this.reader.releaseLock(); // release lock when stream becomes errored - throw e; - } - } - - async return(): Promise> { - this.ensureReader(); - // If wrapped in a Node stream, cancel is already called. - if (this.locked) { - const cancelPromise = this.reader.cancel(); // cancel first, but don't await yet - this.reader.releaseLock(); // release lock first - await cancelPromise; // now await it - } - return { done: true, value: undefined }; - } - - // eslint-disable-next-line @typescript-eslint/no-explicit-any - async throw(e: any): Promise> { - this.ensureReader(); - if (this.locked) { - const cancelPromise = this.reader.cancel(); // cancel first, but don't await yet - this.reader.releaseLock(); // release lock first - await cancelPromise; // now await it - } - throw e; - } - - [Symbol.asyncIterator]() { - return this; - } - - // eslint-disable-next-line @typescript-eslint/ban-ts-comment - // @ts-ignore Not present in Node 18 types, required in latest Node 22 - async [Symbol.asyncDispose]() { - await this.return(); - } - - static fromReadableStream(stream: ReadableStream) { - // From https://developer.mozilla.org/en-US/docs/Web/API/Streams_API/Using_readable_streams#reading_the_stream - const reader = stream.getReader(); - return new IterableReadableStream({ - start(controller) { - return pump(); - function pump(): Promise { - return reader.read().then(({ done, value }) => { - // When no more data needs to be consumed, close the stream - if (done) { - controller.close(); - return; - } - // Enqueue the next data chunk into our target stream - controller.enqueue(value); - return pump(); - }); - } - }, - cancel() { - reader.releaseLock(); - }, - }); - } - - static fromAsyncGenerator(generator: AsyncGenerator) { - return new IterableReadableStream({ - async pull(controller) { - const { value, done } = await generator.next(); - // When no more data needs to be consumed, close the stream - if (done) { - controller.close(); - } - // Fix: `else if (value)` will hang the streaming when nullish value (e.g. empty string) is pulled - controller.enqueue(value); - }, - async cancel(reason) { - await generator.return(reason); - }, - }); - } -} - -export function atee( - iter: AsyncGenerator, - length = 2 -): AsyncGenerator[] { - const buffers = Array.from( - { length }, - () => [] as Array | IteratorReturnResult> - ); - return buffers.map(async function* makeIter(buffer) { - while (true) { - if (buffer.length === 0) { - const result = await iter.next(); - for (const buffer of buffers) { - buffer.push(result); - } - } else if (buffer[0].done) { - return; - } else { - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - yield buffer.shift()!.value; - } - } - }); -} - -export function concat< - // eslint-disable-next-line @typescript-eslint/no-explicit-any - T extends Array | string | number | Record | any ->(first: T, second: T): T { - if (Array.isArray(first) && Array.isArray(second)) { - return first.concat(second) as T; - } else if (typeof first === "string" && typeof second === "string") { - return (first + second) as T; - } else if (typeof first === "number" && typeof second === "number") { - return (first + second) as T; - } else if ( - // eslint-disable-next-line @typescript-eslint/no-explicit-any - "concat" in (first as any) && - // eslint-disable-next-line @typescript-eslint/no-explicit-any - typeof (first as any).concat === "function" - ) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - return (first as any).concat(second) as T; - } else if (typeof first === "object" && typeof second === "object") { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - const chunk = { ...first } as Record; - // eslint-disable-next-line @typescript-eslint/no-explicit-any - for (const [key, value] of Object.entries(second as Record)) { - if (key in chunk && !Array.isArray(chunk[key])) { - chunk[key] = concat(chunk[key], value); - } else { - chunk[key] = value; - } - } - return chunk as T; - } else { - throw new Error(`Cannot concat ${typeof first} and ${typeof second}`); - } -} -export type StreamEventData = { - /** - * The input passed to the runnable that generated the event. - * Inputs will sometimes be available at the *START* of the runnable, and - * sometimes at the *END* of the runnable. - * If a runnable is able to stream its inputs, then its input by definition - * won't be known until the *END* of the runnable when it has finished streaming - * its inputs. - */ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - input?: any; - - /** - * The output of the runnable that generated the event. - * Outputs will only be available at the *END* of the runnable. - * For most runnables, this field can be inferred from the `chunk` field, - * though there might be some exceptions for special cased runnables (e.g., like - * chat models), which may return more information. - */ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - output?: any; - - /** - * A streaming chunk from the output that generated the event. - * chunks support addition in general, and adding them up should result - * in the output of the runnable that generated the event. - */ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - chunk?: any; - }; - -export type StreamEvent = { - /** - * Event names are of the format: on_[runnable_type]_(start|stream|end). - * - * Runnable types are one of: - * - llm - used by non chat models - * - chat_model - used by chat models - * - prompt -- e.g., ChatPromptTemplate - * - tool -- LangChain tools - * - chain - most Runnables are of this type - * - * Further, the events are categorized as one of: - * - start - when the runnable starts - * - stream - when the runnable is streaming - * - end - when the runnable ends - * - * start, stream and end are associated with slightly different `data` payload. - * - * Please see the documentation for `EventData` for more details. - */ - event: string; - /** The name of the runnable that generated the event. */ - name: string; - /** - * An randomly generated ID to keep track of the execution of the given runnable. - * - * Each child runnable that gets invoked as part of the execution of a parent runnable - * is assigned its own unique ID. - */ - run_id: string; - /** - * Tags associated with the runnable that generated this event. - * Tags are always inherited from parent runnables. - */ - tags?: string[]; - /** Metadata associated with the runnable that generated this event. */ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - metadata: Record; - /** - * Event data. - * - * The contents of the event data depend on the event type. - */ - // eslint-disable-next-line @typescript-eslint/no-explicit-any - data: StreamEventData; - }; -export declare class RunnableBinding extends Runnable { - static lc_name(): string; - lc_namespace: string[]; - lc_serializable: boolean; - bound: Runnable; - config: RunnableConfig; - kwargs?: Partial; - configFactories?: Array<(config: RunnableConfig) => RunnableConfig | Promise>; - constructor(fields: RunnableBindingArgs); - getName(suffix?: string | undefined): string; - _mergeConfig(...options: (Partial | RunnableConfig | undefined)[]): Promise>; - bind(kwargs: Partial): RunnableBinding; - withConfig(config: RunnableConfig): Runnable; - withRetry(fields?: { - stopAfterAttempt?: number; - onFailedAttempt?: RunnableRetryFailedAttemptHandler; - }): RunnableRetry; - invoke(input: RunInput, options?: Partial): Promise; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions?: false; - }): Promise; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions: true; - }): Promise<(RunOutput | Error)[]>; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; - _streamIterator(input: RunInput, options?: Partial | undefined): AsyncGenerator, void, unknown>; - //stream(input: RunInput, options?: Partial | undefined): Promise; - transform(generator: AsyncGenerator, options?: Partial): AsyncGenerator; - streamEvents(input: RunInput, options: Partial & { - version: "v1" | "v2"; - }, streamOptions?: Omit): IterableReadableStream; - streamEvents(input: RunInput, options: Partial & { - version: "v1" | "v2"; - encoding: "text/event-stream"; - }, streamOptions?: Omit): IterableReadableStream; - static isRunnableBinding(thing: any): thing is RunnableBinding; - /** - * Bind lifecycle listeners to a Runnable, returning a new Runnable. - * The Run object contains information about the run, including its id, - * type, input, output, error, startTime, endTime, and any tags or metadata - * added to the run. - * - * @param {Object} params - The object containing the callback functions. - * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object. - * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object. - * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object. - */ - withListeners({ onStart, onEnd, onError, }: { - onStart?: (run: Run, config?: RunnableConfig) => void | Promise; - onEnd?: (run: Run, config?: RunnableConfig) => void | Promise; - onError?: (run: Run, config?: RunnableConfig) => void | Promise; - }): Runnable; -} - -export type RunnableFunc = (input: RunInput, options: CallOptions | Record | (Record & CallOptions)) => RunOutput | Promise; -export type RunnableLike = RunnableInterface | RunnableFunc | RunnableMapLike; - -export type RunnableBatchOptions = { - /** @deprecated Pass in via the standard runnable config object instead */ - maxConcurrency?: number; - returnExceptions?: boolean; -}; - -export declare class RunnableWithFallbacks extends Runnable { - static lc_name(): string; - lc_namespace: string[]; - lc_serializable: boolean; - runnable: Runnable; - fallbacks: Runnable[]; - constructor(fields: { - runnable: Runnable; - fallbacks: Runnable[]; - }); - runnables(): Generator>>, void, unknown>; - invoke(input: RunInput, options?: Partial): Promise; - _streamIterator(input: RunInput, options?: Partial | undefined): AsyncGenerator; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions?: false; - }): Promise; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions: true; - }): Promise<(RunOutput | Error)[]>; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; -} -export declare class RunnableRetry extends RunnableBinding { - static lc_name(): string; - lc_namespace: string[]; - protected maxAttemptNumber: number; - onFailedAttempt: RunnableRetryFailedAttemptHandler; - constructor(fields: RunnableBindingArgs & { - maxAttemptNumber?: number; - onFailedAttempt?: RunnableRetryFailedAttemptHandler; - }); - _patchConfigForRetry(attempt: number, config?: Partial, runManager?: CallbackManagerForChainRun): Partial; - protected _invoke(input: RunInput, config?: CallOptions, runManager?: CallbackManagerForChainRun): Promise; - /** - * Method that invokes the runnable with the specified input, run manager, - * and config. It handles the retry logic by catching any errors and - * recursively invoking itself with the updated config for the next retry - * attempt. - * @param input The input for the runnable. - * @param runManager The run manager for the runnable. - * @param config The config for the runnable. - * @returns A promise that resolves to the output of the runnable. - */ - invoke(input: RunInput, config?: CallOptions): Promise; - _batch(inputs: RunInput[], configs?: RunnableConfig[], runManagers?: (CallbackManagerForChainRun | undefined)[], batchOptions?: RunnableBatchOptions): Promise; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions?: false; - }): Promise; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions: true; - }): Promise<(RunOutput | Error)[]>; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; -} -export type RunnableRetryFailedAttemptHandler = (error: any, input: any) => any; -export interface RunnableInterface extends SerializableInterface { - lc_serializable: boolean; - invoke(input: RunInput, options?: Partial): Promise; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions?: false; - }): Promise; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions: true; - }): Promise<(RunOutput | Error)[]>; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; - stream(input: RunInput, options?: Partial): Promise>; - transform(generator: AsyncGenerator, options: Partial): AsyncGenerator; - getName(suffix?: string): string; -} - -//import { enumUtil } from "zod/lib/helpers/enumUtil.js"; -export declare namespace enumUtil { - type UnionToIntersectionFn = (T extends unknown ? (k: () => T) => void : never) extends (k: infer Intersection) => void ? Intersection : never; - type GetUnionLast = UnionToIntersectionFn extends () => infer Last ? Last : never; - type UnionToTuple = [T] extends [never] ? Tuple : UnionToTuple>, [GetUnionLast, ...Tuple]>; - type CastToStringTuple = T extends [string, ...string[]] ? T : never; - export type UnionToTupleString = CastToStringTuple>; - export {}; -} - -//import { errorUtil } from "zod/lib/helpers/errorUtil.js"; -export declare namespace errorUtil { - type ErrMessage = string | { - message?: string; - }; - const errToObj: (message?: ErrMessage | undefined) => { - message?: string | undefined; - }; - const toString: (message?: ErrMessage | undefined) => string | undefined; -} - -//import { partialUtil } from "zod/lib/helpers/partialUtil.js"; -export declare namespace partialUtil { - type DeepPartial = T extends ZodObject ? ZodObject<{ - [k in keyof T["shape"]]: ZodOptional>; - }, T["_def"]["unknownKeys"], T["_def"]["catchall"]> : T extends ZodArray ? ZodArray, Card> : T extends ZodOptional ? ZodOptional> : T extends ZodNullable ? ZodNullable> : T extends ZodTuple ? { - [k in keyof Items]: Items[k] extends ZodTypeAny ? DeepPartial : never; - } extends infer PI ? PI extends ZodTupleItems ? ZodTuple : never : never : T; -} - -//import { Attachments } from "langsmith/dist/schemas.js"; -export type KVMap = Record; -//import { BaseRun, KVMap } from "langsmith/dist/schemas.js"; -export interface TracerSession { - tenant_id: string; - id: string; - start_time: number; - end_time?: number; - description?: string; - name?: string; - /** Extra metadata for the project. */ - extra?: KVMap; - reference_dataset_id?: string; -} -export interface TracerSessionResult extends TracerSession { - run_count?: number; - latency_p50?: number; - latency_p99?: number; - total_tokens?: number; - prompt_tokens?: number; - completion_tokens?: number; - last_run_start_time?: number; - feedback_stats?: Record; - run_facets?: KVMap[]; -} -//export type KVMap = Record; -export type RunType = "llm" | "chain" | "tool" | "retriever" | "embedding" | "prompt" | "parser"; -export type ScoreType = number | boolean | null; -export type ValueType = number | boolean | string | object | null; -export type DataType = "kv" | "llm" | "chat"; -export interface BaseExample { - dataset_id: string; - inputs: KVMap; - outputs?: KVMap; - metadata?: KVMap; - source_run_id?: string; -} -export interface AttachmentInfo { - presigned_url: string; - mime_type?: string; -} -export type AttachmentData = Uint8Array | ArrayBuffer; -export type AttachmentDescription = { - mimeType: string; - data: AttachmentData; -}; -export type Attachments = Record; - -export interface BaseRun { - /** Optionally, a unique identifier for the run. */ - id?: string; - /** A human-readable name for the run. */ - name: string; - /** The epoch time at which the run started, if available. */ - start_time?: number; - /** Specifies the type of run (tool, chain, llm, etc.). */ - run_type: string; - /** The epoch time at which the run ended, if applicable. */ - end_time?: number; - /** Any additional metadata or settings for the run. */ - extra?: KVMap; - /** Error message, captured if the run faces any issues. */ - error?: string; - /** Serialized state of the run for potential future use. */ - serialized?: object; - /** Events like 'start', 'end' linked to the run. */ - events?: KVMap[]; - /** Inputs that were used to initiate the run. */ - inputs: KVMap; - /** Outputs produced by the run, if any. */ - outputs?: KVMap; - /** ID of an example that might be related to this run. */ - reference_example_id?: string; - /** ID of a parent run, if this run is part of a larger operation. */ - parent_run_id?: string; - /** Tags for further categorizing or annotating the run. */ - tags?: string[]; - /** Unique ID assigned to every run within this nested trace. **/ - trace_id?: string; - /** - * The dotted order for the run. - * - * This is a string composed of {time}{run-uuid}.* so that a trace can be - * sorted in the order it was executed. - * - * Example: - * - Parent: 20230914T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8 - * - Children: - * - 20230914T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8.20230914T223155649Z809ed3a2-0172-4f4d-8a02-a64e9b7a0f8a - * - 20230915T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8.20230914T223155650Zc8d9f4c5-6c5a-4b2d-9b1c-3d9d7a7c5c7c - */ - dotted_order?: string; - /** - * Attachments associated with the run. - * Each entry is a tuple of [mime_type, bytes] - */ - attachments?: Attachments; -} -type S3URL = { - ROOT: { - /** A pre-signed URL */ - presigned_url: string; - /** The S3 path to the object in storage */ - s3_url: string; - }; -}; -//import { Run } from "langsmith"; -export interface Run extends BaseRun { - /** A unique identifier for the run, mandatory when loaded from DB. */ - id: string; - /** The ID of the project that owns this run. */ - session_id?: string; - /** IDs of any child runs spawned by this run. */ - child_run_ids?: string[]; - /** Child runs, loaded explicitly via a heavier query. */ - child_runs?: Run[]; - /** Stats capturing feedback for this run. */ - feedback_stats?: KVMap; - /** The URL path where this run is accessible within the app. */ - app_path?: string; - /** The manifest ID that correlates with this run. */ - manifest_id?: string; - /** The current status of the run, such as 'success'. */ - status?: string; - /** Number of tokens used in the prompt. */ - prompt_tokens?: number; - /** Number of tokens generated in the completion. */ - completion_tokens?: number; - /** Total token count, combining prompt and completion. */ - total_tokens?: number; - /** Time when the first token was processed. */ - first_token_time?: number; - /** IDs of parent runs, if multiple exist. */ - parent_run_ids?: string[]; - /** Whether the run is included in a dataset. */ - in_dataset?: boolean; - /** The output S3 URLs */ - outputs_s3_urls?: S3URL; - /** The input S3 URLs */ - inputs_s3_urls?: S3URL; -} -//import { BindToolsInput, LangSmithParams } from "@langchain/core/language_models/chat_models.js"; -export type BindToolsInput = StructuredToolInterface | Record | ToolDefinition | RunnableToolLike | StructuredToolParams; - -//import { BasePromptValueInterface } from "@langchain/core/prompt_values.js"; -export interface BasePromptValueInterface extends Serializable { - toString(): string; - toChatMessages(): BaseMessage[]; -} -export declare abstract class Runnable extends Serializable implements RunnableInterface { - protected lc_runnable: boolean; - name?: string; - getName(suffix?: string): string; - abstract invoke(input: RunInput, options?: Partial): Promise; - /** - * Bind arguments to a Runnable, returning a new Runnable. - * @param kwargs - * @returns A new RunnableBinding that, when invoked, will apply the bound args. - */ - bind(kwargs: Partial): Runnable; - /** - * Return a new Runnable that maps a list of inputs to a list of outputs, - * by calling invoke() with each input. - */ - map(): Runnable; - /** - * Add retry logic to an existing runnable. - * @param kwargs - * @returns A new RunnableRetry that, when invoked, will retry according to the parameters. - */ - withRetry(fields?: { - stopAfterAttempt?: number; - onFailedAttempt?: RunnableRetryFailedAttemptHandler; - }): RunnableRetry; - /** - * Bind config to a Runnable, returning a new Runnable. - * @param config New configuration parameters to attach to the new runnable. - * @returns A new RunnableBinding with a config matching what's passed. - */ - withConfig(config: RunnableConfig): Runnable; - /** - * Create a new runnable from the current one that will try invoking - * other passed fallback runnables if the initial invocation fails. - * @param fields.fallbacks Other runnables to call if the runnable errors. - * @returns A new RunnableWithFallbacks. - */ - withFallbacks(fields: { - fallbacks: Runnable[]; - } | Runnable[]): RunnableWithFallbacks; - protected _getOptionsList(options: Partial | Partial[], length?: number): Partial[]; - /** - * Default implementation of batch, which calls invoke N times. - * Subclasses should override this method if they can batch more efficiently. - * @param inputs Array of inputs to each batch call. - * @param options Either a single call options object to apply to each batch call or an array for each call. - * @param batchOptions.returnExceptions Whether to return errors rather than throwing on the first one - * @returns An array of RunOutputs, or mixed RunOutputs and errors if batchOptions.returnExceptions is set - */ - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions?: false; - }): Promise; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { - returnExceptions: true; - }): Promise<(RunOutput | Error)[]>; - batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; - /** - * Default streaming implementation. - * Subclasses should override this method if they support streaming output. - * @param input - * @param options - */ - _streamIterator(input: RunInput, options?: Partial): AsyncGenerator; - /** - * Stream output in chunks. - * @param input - * @param options - * @returns A readable stream that is also an iterable. - */ - stream(input: RunInput, options?: Partial): Promise>; - protected _separateRunnableConfigFromCallOptions(options?: Partial): [RunnableConfig, Omit, keyof RunnableConfig>]; - protected _callWithConfig(func: ((input: T) => Promise) | ((input: T, config?: Partial, runManager?: CallbackManagerForChainRun) => Promise), input: T, options?: Partial & { - runType?: string; - }): Promise; - /** - * Internal method that handles batching and configuration for a runnable - * It takes a function, input values, and optional configuration, and - * returns a promise that resolves to the output values. - * @param func The function to be executed for each input value. - * @param input The input values to be processed. - * @param config Optional configuration for the function execution. - * @returns A promise that resolves to the output values. - */ - _batchWithConfig(func: (inputs: T[], options?: Partial[], runManagers?: (CallbackManagerForChainRun | undefined)[], batchOptions?: RunnableBatchOptions) => Promise<(RunOutput | Error)[]>, inputs: T[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; - /** - * Helper method to transform an Iterator of Input values into an Iterator of - * Output values, with callbacks. - * Use this to implement `stream()` or `transform()` in Runnable subclasses. - */ - protected _transformStreamWithConfig(inputGenerator: AsyncGenerator, transformer: (generator: AsyncGenerator, runManager?: CallbackManagerForChainRun, options?: Partial) => AsyncGenerator, options?: Partial & { - runType?: string; - }): AsyncGenerator; - getGraph(_?: RunnableConfig): Graph; - /** - * Create a new runnable sequence that runs each individual runnable in series, - * piping the output of one runnable into another runnable or runnable-like. - * @param coerceable A runnable, function, or object whose values are functions or runnables. - * @returns A new runnable sequence. - */ - pipe(coerceable: RunnableLike): Runnable>; - /** - * Pick keys from the dict output of this runnable. Returns a new runnable. - */ - pick(keys: string | string[]): Runnable; - /** - * Assigns new fields to the dict output of this runnable. Returns a new runnable. - */ - assign(mapping: RunnableMapLike, Record>): Runnable; - /** - * Default implementation of transform, which buffers input and then calls stream. - * Subclasses should override this method if they can start producing output while - * input is still being generated. - * @param generator - * @param options - */ - transform(generator: AsyncGenerator, options: Partial): AsyncGenerator; - /** - * Stream all output from a runnable, as reported to the callback system. - * This includes all inner runs of LLMs, Retrievers, Tools, etc. - * Output is streamed as Log objects, which include a list of - * jsonpatch ops that describe how the state of the run has changed in each - * step, and the final state of the run. - * The jsonpatch ops can be applied in order to construct state. - * @param input - * @param options - * @param streamOptions - */ - streamLog(input: RunInput, options?: Partial, streamOptions?: Omit): AsyncGenerator; - protected _streamLog(input: RunInput, logStreamCallbackHandler: LogStreamCallbackHandler, config: Partial): AsyncGenerator; - /** - * Generate a stream of events emitted by the internal steps of the runnable. - * - * Use to create an iterator over StreamEvents that provide real-time information - * about the progress of the runnable, including StreamEvents from intermediate - * results. - * - * A StreamEvent is a dictionary with the following schema: - * - * - `event`: string - Event names are of the format: on_[runnable_type]_(start|stream|end). - * - `name`: string - The name of the runnable that generated the event. - * - `run_id`: string - Randomly generated ID associated with the given execution of - * the runnable that emitted the event. A child runnable that gets invoked as part of the execution of a - * parent runnable is assigned its own unique ID. - * - `tags`: string[] - The tags of the runnable that generated the event. - * - `metadata`: Record - The metadata of the runnable that generated the event. - * - `data`: Record - * - * Below is a table that illustrates some events that might be emitted by various - * chains. Metadata fields have been omitted from the table for brevity. - * Chain definitions have been included after the table. - * - * **ATTENTION** This reference table is for the V2 version of the schema. - * - * ```md - * +----------------------+-----------------------------+------------------------------------------+ - * | event | input | output/chunk | - * +======================+=============================+==========================================+ - * | on_chat_model_start | {"messages": BaseMessage[]} | | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_chat_model_stream | | AIMessageChunk("hello") | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_chat_model_end | {"messages": BaseMessage[]} | AIMessageChunk("hello world") | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_llm_start | {'input': 'hello'} | | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_llm_stream | | 'Hello' | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_llm_end | 'Hello human!' | | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_chain_start | | | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_chain_stream | | "hello world!" | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_chain_end | [Document(...)] | "hello world!, goodbye world!" | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_tool_start | {"x": 1, "y": "2"} | | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_tool_end | | {"x": 1, "y": "2"} | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_retriever_start | {"query": "hello"} | | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_retriever_end | {"query": "hello"} | [Document(...), ..] | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_prompt_start | {"question": "hello"} | | - * +----------------------+-----------------------------+------------------------------------------+ - * | on_prompt_end | {"question": "hello"} | ChatPromptValue(messages: BaseMessage[]) | - * +----------------------+-----------------------------+------------------------------------------+ - * ``` - * - * The "on_chain_*" events are the default for Runnables that don't fit one of the above categories. - * - * In addition to the standard events above, users can also dispatch custom events. - * - * Custom events will be only be surfaced with in the `v2` version of the API! - * - * A custom event has following format: - * - * ```md - * +-----------+------+------------------------------------------------------------+ - * | Attribute | Type | Description | - * +===========+======+============================================================+ - * | name | str | A user defined name for the event. | - * +-----------+------+------------------------------------------------------------+ - * | data | Any | The data associated with the event. This can be anything. | - * +-----------+------+------------------------------------------------------------+ - * ``` - * - * Here's an example: - * - * ```ts - * import { RunnableLambda } from "@langchain/core/runnables"; - * import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch"; - * // Use this import for web environments that don't support "async_hooks" - * // and manually pass config to child runs. - * // import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch/web"; - * - * const slowThing = RunnableLambda.from(async (someInput: string) => { - * // Placeholder for some slow operation - * await new Promise((resolve) => setTimeout(resolve, 100)); - * await dispatchCustomEvent("progress_event", { - * message: "Finished step 1 of 2", - * }); - * await new Promise((resolve) => setTimeout(resolve, 100)); - * return "Done"; - * }); - * - * const eventStream = await slowThing.streamEvents("hello world", { - * version: "v2", - * }); - * - * for await (const event of eventStream) { - * if (event.event === "on_custom_event") { - * console.log(event); - * } - * } - * ``` - */ - streamEvents(input: RunInput, options: Partial & { - version: "v1" | "v2"; - }, streamOptions?: Omit): IterableReadableStream; - streamEvents(input: RunInput, options: Partial & { - version: "v1" | "v2"; - encoding: "text/event-stream"; - }, streamOptions?: Omit): IterableReadableStream; - private _streamEventsV2; - private _streamEventsV1; - static isRunnable(thing: any): thing is Runnable; - /** - * Bind lifecycle listeners to a Runnable, returning a new Runnable. - * The Run object contains information about the run, including its id, - * type, input, output, error, startTime, endTime, and any tags or metadata - * added to the run. - * - * @param {Object} params - The object containing the callback functions. - * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object. - * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object. - * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object. - */ - withListeners({ onStart, onEnd, onError, }: { - onStart?: (run: Run, config?: RunnableConfig) => void | Promise; - onEnd?: (run: Run, config?: RunnableConfig) => void | Promise; - onError?: (run: Run, config?: RunnableConfig) => void | Promise; - }): Runnable; - /** - * Convert a runnable to a tool. Return a new instance of `RunnableToolLike` - * which contains the runnable, name, description and schema. - * - * @template {T extends RunInput = RunInput} RunInput - The input type of the runnable. Should be the same as the `RunInput` type of the runnable. - * - * @param fields - * @param {string | undefined} [fields.name] The name of the tool. If not provided, it will default to the name of the runnable. - * @param {string | undefined} [fields.description] The description of the tool. Falls back to the description on the Zod schema if not provided, or undefined if neither are provided. - * @param {z.ZodType} [fields.schema] The Zod schema for the input of the tool. Infers the Zod type from the input type of the runnable. - * @returns {RunnableToolLike, RunOutput>} An instance of `RunnableToolLike` which is a runnable that can be used as a tool. - */ - asTool(fields: { - name?: string; - description?: string; - schema: z.ZodType; - }): RunnableToolLike, RunOutput>; -} -export type RunnableBindingArgs = { - bound: Runnable; - kwargs?: Partial; - config: RunnableConfig; - configFactories?: Array<(config: RunnableConfig) => RunnableConfig>; -}; - -export declare class RunnableToolLike extends RunnableBinding, RunOutput> { - name: string; - description?: string; - schema: RunInput; - constructor(fields: RunnableToolLikeArgs); - static lc_name(): string; -} -//import { Runnable, RunnableToolLike } from "@langchain/core/runnables.js"; -export declare class ZodObject, Input = objectInputType> extends ZodType, Input> { - private _cached; - _getCached(): { - shape: T; - keys: string[]; - }; - _parse(input: ParseInput): ParseReturnType; - get shape(): T; - strict(message?: errorUtil.ErrMessage): ZodObject; - strip(): ZodObject; - passthrough(): ZodObject; - /** - * @deprecated In most cases, this is no longer needed - unknown properties are now silently stripped. - * If you want to pass through unknown properties, use `.passthrough()` instead. - */ - nonstrict: () => ZodObject; - extend(augmentation: Augmentation): ZodObject, UnknownKeys, Catchall>; - /** - * @deprecated Use `.extend` instead - * */ - augment: (augmentation: Augmentation) => ZodObject, UnknownKeys, Catchall, objectOutputType, Catchall, UnknownKeys>, objectInputType, Catchall, UnknownKeys>>; - /** - * Prior to zod@1.0.12 there was a bug in the - * inferred type of merged objects. Please - * upgrade if you are experiencing issues. - */ - merge(merging: Incoming): ZodObject, Incoming["_def"]["unknownKeys"], Incoming["_def"]["catchall"]>; - setKey(key: Key, schema: Schema): ZodObject; - catchall(index: Index): ZodObject; - pick>(mask: Mask): ZodObject>, UnknownKeys, Catchall>; - omit>(mask: Mask): ZodObject, UnknownKeys, Catchall>; - /** - * @deprecated - */ - deepPartial(): partialUtil.DeepPartial; - partial(): ZodObject<{ - [k in keyof T]: ZodOptional; - }, UnknownKeys, Catchall>; - partial>(mask: Mask): ZodObject : T[k]; - }>, UnknownKeys, Catchall>; - required(): ZodObject<{ - [k in keyof T]: deoptional; - }, UnknownKeys, Catchall>; - required>(mask: Mask): ZodObject : T[k]; - }>, UnknownKeys, Catchall>; - keyof(): ZodEnum>; - static create: (shape: T_1, params?: RawCreateParams) => ZodObject, any>]: objectUtil.addQuestionMarks, any>[k]; }, { [k_1 in keyof baseObjectInputType]: baseObjectInputType[k_1]; }>; - static strictCreate: (shape: T_1, params?: RawCreateParams) => ZodObject, any>]: objectUtil.addQuestionMarks, any>[k]; }, { [k_1 in keyof baseObjectInputType]: baseObjectInputType[k_1]; }>; - static lazycreate: (shape: () => T_1, params?: RawCreateParams) => ZodObject, any>]: objectUtil.addQuestionMarks, any>[k]; }, { [k_1 in keyof baseObjectInputType]: baseObjectInputType[k_1]; }>; -} - -class ToolReturnType {} -export type ZodObjectAny = ZodObject; -export interface StructuredToolInterface extends RunnableInterface<(z.output extends string ? string : never) | z.input | ToolCall, ToolReturnType> { - lc_namespace: string[]; - /** - * A Zod schema representing the parameters of the tool. - */ - schema: T | z.ZodEffects; - /** - * @deprecated Use .invoke() instead. Will be removed in 0.3.0. - * - * Calls the tool with the provided argument, configuration, and tags. It - * parses the input according to the schema, handles any errors, and - * manages callbacks. - * @param arg The input argument for the tool. - * @param configArg Optional configuration or callbacks for the tool. - * @param tags Optional tags for the tool. - * @returns A Promise that resolves with a string. - */ - call(arg: (z.output extends string ? string : never) | z.input | ToolCall, configArg?: Callbacks | RunnableConfig, - /** @deprecated */ - tags?: string[]): Promise; - /** - * The name of the tool. - */ - name: string; - /** - * A description of the tool. - */ - description: string; - returnDirect: boolean; -} - -export interface StructuredToolParams extends Pick { - /** - * An optional description of the tool to pass to the model. - */ - description?: string; -} -//import { StructuredToolInterface, StructuredToolParams } from "@langchain/core/tools.js"; -//import { ToolChoice } from "@langchain/core/language_models/chat_models.js"; -//import { BaseChatModelParams } from "@langchain/core/language_models/chat_models.js"; -//import { FunctionDefinition } from "@langchain/core/language_models/base.js"; -//import { BaseFunctionCallOptions } from "@langchain/core/language_models/base.js"; -//import { Serialized } from "@langchain/core/dist/load/serializable.js"; - -//import { ChatOpenAIFields, ChatOpenAICallOptions, ChatOpenAIStructuredOutputMethodOptions } from "./types.js"; - -dotenv.config(); - -// export class ChatWrapper { -// // private chatModel: ChatOpenAI; -// callKeys: string[] = []; -// lc_serializeable = true; -// lc_secrets = ["apiKey"]; - -// lc_serializable = true; -// lc_aliases = ["chat"]; -// temperature = 0.5; -// topP = 1; -// frequencyPenalty= 0; -// presencePenalty= 0; -// n=1000; -// modelName = "gpt-4o"; -// model="gpt-4o"; - -// constructor({ apiKey, model = "gpt-4o" }: { apiKey: string; model?: string }) { -// this.chatModel = new ChatOpenAI({ apiKey, model }); -// } - -// async invoke(prompt: string) { -// return this.chatModel.invoke( prompt ); -// } -// }; -export type OpenAICoreRequestOptions> = { - path?: string; - query?: Req | undefined; - body?: Req | undefined; - headers?: Record | undefined; - maxRetries?: number; - stream?: boolean | undefined; - timeout?: number; - httpAgent?: any; - signal?: AbortSignal | undefined | null; - idempotencyKey?: string; -}; -export interface BaseSerialized { - lc: number; - type: T; - id: string[]; - name?: string; - graph?: Record; -} -export interface SerializedFields { - [key: string]: any; -} -export interface SerializedKeyAlias { - [key: string]: string; -} -export declare function keyToJson(key: string, map?: SerializedKeyAlias): string; -export declare function keyFromJson(key: string, map?: SerializedKeyAlias): string; -export declare function mapKeys(fields: SerializedFields, mapper: typeof keyToJson, map?: SerializedKeyAlias): SerializedFields; - -export interface SerializedConstructor extends BaseSerialized<"constructor"> { - kwargs: SerializedFields; -} -export interface SerializedSecret extends BaseSerialized<"secret"> { -} -export interface SerializedNotImplemented extends BaseSerialized<"not_implemented"> { -} -export interface FunctionCall { - /** - * The arguments to call the function with, as generated by the model in JSON - * format. Note that the model does not always generate valid JSON, and may - * hallucinate parameters not defined by your function schema. Validate the - * arguments in your code before calling your function. - */ - arguments: string; - /** - * The name of the function to call. - */ - name: string; -} -export type Serialized = SerializedConstructor | SerializedSecret | SerializedNotImplemented; -export type OpenAIToolCall = { - /** - * The ID of the tool call. - */ - id: string; - /** - * The function that the model called. - */ - function: FunctionCall; - /** - * The type of the tool. Currently, only `function` is supported. - */ - type: "function"; - index?: number; -}; - -export type BaseMessageFields = { - content: MessageContent; - name?: string; - additional_kwargs?: { - /** - * @deprecated Use "tool_calls" field on AIMessages instead - */ - function_call?: FunctionCall; - /** - * @deprecated Use "tool_calls" field on AIMessages instead - */ - tool_calls?: OpenAIToolCall[]; - [key: string]: unknown; - }; - /** Response metadata. For example: response headers, logprobs, token counts. */ - response_metadata?: Record; - /** - * An optional unique identifier for the message. This should ideally be - * provided by the provider/model which created the message. - */ - id?: string; -}; -export type MessageContentText = { - type: "text"; - text: string; -}; -export type ImageDetail = "auto" | "low" | "high"; -export type MessageContentImageUrl = { - type: "image_url"; - image_url: string | { - url: string; - detail?: ImageDetail; - }; -}; -export type MessageContentComplex = MessageContentText | MessageContentImageUrl | (Record & { - type?: "text" | "image_url" | string; -}) | (Record & { - type?: never; -}); -export type MessageContent = string | MessageContentComplex[]; -export interface SerializableInterface { - get lc_id(): string[]; -} -export declare abstract class Serializable implements SerializableInterface { - lc_serializable: boolean; - lc_kwargs: SerializedFields; - /** - * A path to the module that contains the class, eg. ["langchain", "llms"] - * Usually should be the same as the entrypoint the class is exported from. - */ - abstract lc_namespace: string[]; - /** - * The name of the serializable. Override to provide an alias or - * to preserve the serialized module name in minified environments. - * - * Implemented as a static method to support loading logic. - */ - static lc_name(): string; - /** - * The final serialized identifier for the module. - */ - get lc_id(): string[]; - /** - * A map of secrets, which will be omitted from serialization. - * Keys are paths to the secret in constructor args, e.g. "foo.bar.baz". - * Values are the secret ids, which will be used when deserializing. - */ - get lc_secrets(): { - [key: string]: string; - } | undefined; - /** - * A map of additional attributes to merge with constructor args. - * Keys are the attribute names, e.g. "foo". - * Values are the attribute values, which will be serialized. - * These attributes need to be accepted by the constructor as arguments. - */ - get lc_attributes(): SerializedFields | undefined; - /** - * A map of aliases for constructor args. - * Keys are the attribute names, e.g. "foo". - * Values are the alias that will replace the key in serialization. - * This is used to eg. make argument names match Python. - */ - get lc_aliases(): { - [key: string]: string; - } | undefined; - constructor(kwargs?: SerializedFields, ..._args: never[]); - toJSON(): Serialized; - toJSONNotImplemented(): SerializedNotImplemented; -} -export interface StoredMessageData { - content: string; - role: string | undefined; - name: string | undefined; - tool_call_id: string | undefined; - additional_kwargs?: Record; - /** Response metadata. For example: response headers, logprobs, token counts. */ - response_metadata?: Record; - id?: string; -} -export interface StoredMessage { - type: string; - data: StoredMessageData; -} -export type MessageType = "human" | "ai" | "generic" | "developer" | "system" | "function" | "tool" | "remove"; -export declare abstract class BaseMessage extends Serializable implements BaseMessageFields { - lc_namespace: string[]; - lc_serializable: boolean; - get lc_aliases(): Record; - /** - * @deprecated - * Use {@link BaseMessage.content} instead. - */ - get text(): string; - /** The content of the message. */ - content: MessageContent; - /** The name of the message sender in a multi-user chat. */ - name?: string; - /** Additional keyword arguments */ - additional_kwargs: NonNullable; - /** Response metadata. For example: response headers, logprobs, token counts. */ - response_metadata: NonNullable; - /** - * An optional unique identifier for the message. This should ideally be - * provided by the provider/model which created the message. - */ - id?: string; - /** - * @deprecated Use .getType() instead or import the proper typeguard. - * For example: - * - * ```ts - * import { isAIMessage } from "@langchain/core/messages"; - * - * const message = new AIMessage("Hello!"); - * isAIMessage(message); // true - * ``` - */ - abstract _getType(): MessageType; - /** The type of the message. */ - getType(): MessageType; - constructor(fields: string | BaseMessageFields, - /** @deprecated */ - kwargs?: Record); - toDict(): StoredMessage; - static lc_name(): string; - get _printableFields(): Record; - _updateId(value: string | undefined): void; - get [Symbol.toStringTag](): any; -} - -declare abstract class BaseCallbackHandlerMethodsClass { - /** - * Called at the start of an LLM or Chat Model run, with the prompt(s) - * and the run ID. - */ - handleLLMStart?(llm: Serialized, prompts: string[], runId: string, parentRunId?: string, extraParams?: Record, tags?: string[], metadata?: Record, runName?: string): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called when an LLM/ChatModel in `streaming` mode produces a new token - */ - handleLLMNewToken?(token: string, - /** - * idx.prompt is the index of the prompt that produced the token - * (if there are multiple prompts) - * idx.completion is the index of the completion that produced the token - * (if multiple completions per prompt are requested) - */ - idx: NewTokenIndices, runId: string, parentRunId?: string, tags?: string[], fields?: HandleLLMNewTokenCallbackFields): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called if an LLM/ChatModel run encounters an error - */ - handleLLMError?(err: Error, runId: string, parentRunId?: string, tags?: string[]): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called at the end of an LLM/ChatModel run, with the output and the run ID. - */ - handleLLMEnd?(output: LLMResult, runId: string, parentRunId?: string, tags?: string[]): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called at the start of a Chat Model run, with the prompt(s) - * and the run ID. - */ - handleChatModelStart?(llm: Serialized, messages: BaseMessage[][], runId: string, parentRunId?: string, extraParams?: Record, tags?: string[], metadata?: Record, runName?: string): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called at the start of a Chain run, with the chain name and inputs - * and the run ID. - */ - handleChainStart?(chain: Serialized, inputs: ChainValues, runId: string, parentRunId?: string, tags?: string[], metadata?: Record, runType?: string, runName?: string): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called if a Chain run encounters an error - */ - handleChainError?(err: Error, runId: string, parentRunId?: string, tags?: string[], kwargs?: { - inputs?: Record; - }): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called at the end of a Chain run, with the outputs and the run ID. - */ - handleChainEnd?(outputs: ChainValues, runId: string, parentRunId?: string, tags?: string[], kwargs?: { - inputs?: Record; - }): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called at the start of a Tool run, with the tool name and input - * and the run ID. - */ - handleToolStart?(tool: Serialized, input: string, runId: string, parentRunId?: string, tags?: string[], metadata?: Record, runName?: string): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called if a Tool run encounters an error - */ - handleToolError?(err: Error, runId: string, parentRunId?: string, tags?: string[]): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - /** - * Called at the end of a Tool run, with the tool output and the run ID. - */ - handleToolEnd?(output: any, runId: string, parentRunId?: string, tags?: string[]): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - handleText?(text: string, runId: string, parentRunId?: string, tags?: string[]): Promise | void; - /** - * Called when an agent is about to execute an action, - * with the action and the run ID. - */ - handleAgentAction?(action: AgentAction, runId: string, parentRunId?: string, tags?: string[]): Promise | void; - /** - * Called when an agent finishes execution, before it exits. - * with the final output and the run ID. - */ - handleAgentEnd?(action: AgentFinish, runId: string, parentRunId?: string, tags?: string[]): Promise | void; - handleRetrieverStart?(retriever: Serialized, query: string, runId: string, parentRunId?: string, tags?: string[], metadata?: Record, name?: string): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - handleRetrieverEnd?(documents: DocumentInterface[], runId: string, parentRunId?: string, tags?: string[]): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - handleRetrieverError?(err: Error, runId: string, parentRunId?: string, tags?: string[]): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; - handleCustomEvent?(eventName: string, data: any, runId: string, tags?: string[], metadata?: Record): // eslint-disable-next-line @typescript-eslint/no-explicit-any - Promise | any; -} - -export type CallbackHandlerMethods = BaseCallbackHandlerMethodsClass; - -type BaseCallbackManagerMethods = { - [K in keyof CallbackHandlerMethods]?: (...args: Parameters[K]>) => Promise; -}; -export interface CallbackManagerOptions { - verbose?: boolean; - tracing?: boolean; -} -export type Callbacks = CallbackManager | (BaseCallbackHandler | CallbackHandlerMethods)[]; -export interface BaseCallbackConfig { - /** - * Name for the tracer run for this call. Defaults to the name of the class. - */ - runName?: string; - /** - * Tags for this call and any sub-calls (eg. a Chain calling an LLM). - * You can use these to filter calls. - */ - tags?: string[]; - /** - * Metadata for this call and any sub-calls (eg. a Chain calling an LLM). - * Keys should be strings, values should be JSON-serializable. - */ - metadata?: Record; - /** - * Callbacks for this call and any sub-calls (eg. a Chain calling an LLM). - * Tags are passed to all callbacks, metadata is passed to handle*Start callbacks. - */ - callbacks?: Callbacks; - /** - * Unique identifier for the tracer run for this call. If not provided, a new UUID - * will be generated. - */ - runId?: string; -} -export interface RunnableConfig = Record> extends BaseCallbackConfig { - /** - * Runtime values for attributes previously made configurable on this Runnable, - * or sub-Runnables. - */ - configurable?: ConfigurableFieldType; - /** - * Maximum number of times a call can recurse. If not provided, defaults to 25. - */ - recursionLimit?: number; - /** Maximum number of parallel calls to make. */ - maxConcurrency?: number; - /** - * Timeout for this call in milliseconds. - */ - timeout?: number; - /** - * Abort signal for this call. - * If provided, the call will be aborted when the signal is aborted. - * @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal - */ - signal?: AbortSignal; -} - -export interface BaseLanguageModelCallOptions extends RunnableConfig { - /** - * Stop tokens to use for this call. - * If not provided, the default stop tokens for the model will be used. - */ - stop?: string[]; -} -export interface OpenAICallOptions extends BaseLanguageModelCallOptions { - /** - * Additional options to pass to the underlying axios request. - */ - options?: OpenAICoreRequestOptions; -} -export type FunctionCallOption = { - name: string; -}; -export interface FunctionDefinition { - /** - * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain - * underscores and dashes, with a maximum length of 64. - */ - name: string; - /** - * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for - * examples, and the - * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - * documentation about the format. - * - * To describe a function that accepts no parameters, provide the value - * `{"type": "object", "properties": {}}`. - */ - parameters: Record; - /** - * A description of what the function does, used by the model to choose when and - * how to call the function. - */ - description?: string; -} -export interface BaseFunctionCallOptions extends BaseLanguageModelCallOptions { - function_call?: FunctionCallOption; - functions?: FunctionDefinition[]; -} -class ChatOpenAIToolType{} -export interface ChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCallOptions { - tools?: ChatOpenAIToolType[]; - tool_choice?: OpenAIToolChoice; - promptIndex?: number; - response_format?: ChatOpenAIResponseFormat; - seed?: number; - /** - * Additional options to pass to streamed completions. - * If provided takes precedence over "streamUsage" set at initialization time. - */ - stream_options?: { - /** - * Whether or not to include token usage in the stream. - * If set to `true`, this will include an additional - * chunk at the end of the stream with the token usage. - */ - include_usage: boolean; - }; - /** - * Whether or not to restrict the ability to - * call multiple tools in one response. - */ - parallel_tool_calls?: boolean; - /** - * If `true`, model output is guaranteed to exactly match the JSON Schema - * provided in the tool definition. If `true`, the input schema will also be - * validated according to - * https://platform.openai.com/docs/guides/structured-outputs/supported-schemas. - * - * If `false`, input schema will not be validated and model output will not - * be validated. - * - * If `undefined`, `strict` argument will not be passed to the model. - * - * @version 0.2.6 - */ - strict?: boolean; - /** - * Output types that you would like the model to generate for this request. Most - * models are capable of generating text, which is the default: - * - * `["text"]` - * - * The `gpt-4o-audio-preview` model can also be used to - * [generate audio](https://platform.openai.com/docs/guides/audio). To request that - * this model generate both text and audio responses, you can use: - * - * `["text", "audio"]` - */ - modalities?: Array; - /** - * Parameters for audio output. Required when audio output is requested with - * `modalities: ["audio"]`. - * [Learn more](https://platform.openai.com/docs/guides/audio). - */ - audio?: OpenAIClient.Chat.ChatCompletionAudioParam; - /** - * Static predicted output content, such as the content of a text file that is being regenerated. - * [Learn more](https://platform.openai.com/docs/guides/latency-optimization#use-predicted-outputs). - */ - prediction?: OpenAIClient.ChatCompletionPredictionContent; - /** - * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. - * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. - */ - reasoning_effort?: OpenAIClient.Chat.ChatCompletionReasoningEffort; -} -export type FailedAttemptHandler = (error: any) => any; -export interface AsyncCallerParams { - /** - * The maximum number of concurrent calls that can be made. - * Defaults to `Infinity`, which means no limit. - */ - maxConcurrency?: number; - /** - * The maximum number of retries that can be made for a single call, - * with an exponential backoff between each attempt. Defaults to 6. - */ - maxRetries?: number; - /** - * Custom handler to handle failed attempts. Takes the originally thrown - * error object as input, and should itself throw an error if the input - * error is not retryable. - */ - onFailedAttempt?: FailedAttemptHandler; -} -export interface BaseLangChainParams { - verbose?: boolean; - callbacks?: Callbacks; - tags?: string[]; - metadata?: Record; -} -export interface Generation { - /** - * Generated text output - */ - text: string; - /** - * Raw generation info response from the provider. - * May include things like reason for finishing (e.g. in {@link OpenAI}) - */ - generationInfo?: Record; -} -export declare abstract class BaseCache { - abstract lookup(prompt: string, llmKey: string): Promise; - abstract update(prompt: string, llmKey: string, value: T): Promise; -} -export interface BaseLanguageModelParams extends AsyncCallerParams, BaseLangChainParams { - /** - * @deprecated Use `callbacks` instead - */ - callbackManager?: CallbackManager; - cache?: BaseCache | boolean; -} -export type BaseChatModelParams = BaseLanguageModelParams & { - /** - * Whether to disable streaming. - * - * If streaming is bypassed, then `stream()` will defer to - * `invoke()`. - * - * - If true, will always bypass streaming case. - * - If false (default), will always use streaming case if available. - */ - disableStreaming?: boolean; -}; -export interface ChatOpenAIFields extends Partial, Partial, BaseChatModelParams { - configuration?: ClientOptions & LegacyOpenAIInput; -} -export declare abstract class BaseMessageChunk extends BaseMessage { - abstract concat(chunk: BaseMessageChunk): BaseMessageChunk; -} -export declare class ChatGenerationChunk extends GenerationChunk implements ChatGeneration { - message: BaseMessageChunk; - constructor(fields: ChatGenerationChunkFields); - concat(chunk: ChatGenerationChunk): ChatGenerationChunk; - - // - - lc_namespace: string[]; - lc_serializable: boolean; - get lc_aliases(): Record; - /** - * @deprecated - * Use {@link BaseMessage.content} instead. - */ - text: string; - /** The content of the message. */ - content: MessageContent; - /** The name of the message sender in a multi-user chat. */ - name?: string; - /** Additional keyword arguments */ - additional_kwargs: NonNullable; - /** Response metadata. For example: response headers, logprobs, token counts. */ - response_metadata: NonNullable; - /** - * An optional unique identifier for the message. This should ideally be - * provided by the provider/model which created the message. - */ - id?: string; - /** - * @deprecated Use .getType() instead or import the proper typeguard. - * For example: - * - * ```ts - * import { isAIMessage } from "@langchain/core/messages"; - * - * const message = new AIMessage("Hello!"); - * isAIMessage(message); // true - * ``` - */ - _getType(): MessageType; - /** The type of the message. */ - lc_kwargs: SerializedFields; - lc_id: string[]; - lc_secrets: {} - lc_attributes: {} - toJSON(): Serialized; - toJSONNotImplemented(): SerializedNotImplemented; - getType(): MessageType; - constructor(fields: string | BaseMessageFields, - /** @deprecated */ - kwargs?: Record); - toDict(): StoredMessage; - static lc_name(): string; - get _printableFields(): Record; - _updateId(value: string | undefined): void; - get [Symbol.toStringTag](): any; -} -export type ToolChoice = string | Record | "auto" | "any"; -export type BaseChatModelCallOptions = BaseLanguageModelCallOptions & { - /** - * Specifies how the chat model should use tools. - * @default undefined - * - * Possible values: - * - "auto": The model may choose to use any of the provided tools, or none. - * - "any": The model must use one of the provided tools. - * - "none": The model must not use any tools. - * - A string (not "auto", "any", or "none"): The name of a specific tool the model must use. - * - An object: A custom schema specifying tool choice parameters. Specific to the provider. - * - * Note: Not all providers support tool_choice. An error will be thrown - * if used with an unsupported model. - */ - tool_choice?: ToolChoice; -}; -export type ToolCall = { - name: string; - args: Record; - id?: string; - type?: "tool_call"; -}; -export type ToolCallChunk = { - name?: string; - args?: string; - id?: string; - index?: number; - type?: "tool_call_chunk"; -}; -export type InvalidToolCall = { - name?: string; - args?: string; - id?: string; - error?: string; - type?: "invalid_tool_call"; -}; -export type InputTokenDetails = { - /** - * Audio input tokens. - */ - audio?: number; - /** - * Input tokens that were cached and there was a cache hit. - * - * Since there was a cache hit, the tokens were read from the cache. - * More precisely, the model state given these tokens was read from the cache. - */ - cache_read?: number; - /** - * Input tokens that were cached and there was a cache miss. - * - * Since there was a cache miss, the cache was created from these tokens. - */ - cache_creation?: number; -}; -export type OutputTokenDetails = { - /** - * Audio output tokens - */ - audio?: number; - /** - * Reasoning output tokens. - * - * Tokens generated by the model in a chain of thought process (i.e. by - * OpenAI's o1 models) that are not returned as part of model output. - */ - reasoning?: number; -}; -export type UsageMetadata = { - /** - * Count of input (or prompt) tokens. Sum of all input token types. - */ - input_tokens: number; - /** - * Count of output (or completion) tokens. Sum of all output token types. - */ - output_tokens: number; - /** - * Total token count. Sum of input_tokens + output_tokens. - */ - total_tokens: number; - /** - * Breakdown of input token counts. - * - * Does *not* need to sum to full input token count. Does *not* need to have all keys. - */ - input_token_details?: InputTokenDetails; - /** - * Breakdown of output token counts. - * - * Does *not* need to sum to full output token count. Does *not* need to have all keys. - */ - output_token_details?: OutputTokenDetails; -}; -export type AIMessageFields = BaseMessageFields & { - tool_calls?: ToolCall[]; - invalid_tool_calls?: InvalidToolCall[]; - usage_metadata?: UsageMetadata; -}; -export type AIMessageChunkFields = AIMessageFields & { - tool_call_chunks?: ToolCallChunk[]; -}; -export declare class AIMessageChunk extends BaseMessageChunk { - tool_calls?: ToolCall[]; - invalid_tool_calls?: InvalidToolCall[]; - tool_call_chunks?: ToolCallChunk[]; - /** - * If provided, token usage information associated with the message. - */ - usage_metadata?: UsageMetadata; - constructor(fields: string | AIMessageChunkFields); - get lc_aliases(): Record; - static lc_name(): string; - _getType(): MessageType; - get _printableFields(): Record; - concat(chunk: AIMessageChunk): AIMessageChunk; -} -export type StringWithAutocomplete = T | (string & Record); -export type InputValues = Record; -export type PartialValues = Record Promise) | (() => string)>; -export type ChainValues = Record; - - -export type MessageFieldWithRole = { - role: StringWithAutocomplete<"user" | "assistant" | MessageType>; - content: MessageContent; - name?: string; -} & Record; -export type BaseMessageLike = BaseMessage | MessageFieldWithRole | [ - StringWithAutocomplete, - MessageContent -] | string -export declare abstract class BaseChatModel extends BaseLanguageModel { - ParsedCallOptions: Omit>; - lc_namespace: string[]; - disableStreaming: boolean; - constructor(fields: BaseChatModelParams); - _combineLLMOutput?(...llmOutputs: LLMResult["llmOutput"][]): LLMResult["llmOutput"]; - protected _separateRunnableConfigFromCallOptionsCompat(options?: Partial): [RunnableConfig, this["ParsedCallOptions"]]; - /** - * Bind tool-like objects to this chat model. - * - * @param tools A list of tool definitions to bind to this chat model. - * Can be a structured tool, an OpenAI formatted tool, or an object - * matching the provider's specific tool schema. - * @param kwargs Any additional parameters to bind. - */ - bindTools?(tools: BindToolsInput[], kwargs?: Partial): Runnable; - /** - * Invokes the chat model with a single input. - * @param input The input for the language model. - * @param options The call options. - * @returns A Promise that resolves to a BaseMessageChunk. - */ - invoke(input: BaseLanguageModelInput, options?: CallOptions): Promise; - _streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator; - _streamIterator(input: BaseLanguageModelInput, options?: CallOptions): AsyncGenerator; - getLsParams(options: this["ParsedCallOptions"]): LangSmithParams; - /** @ignore */ - _generateUncached(messages: BaseMessageLike[][], parsedOptions: this["ParsedCallOptions"], handledOptions: RunnableConfig): Promise; - _generateCached({ messages, cache, llmStringKey, parsedOptions, handledOptions, }: { - messages: BaseMessageLike[][]; - cache: BaseCache; - llmStringKey: string; - parsedOptions: any; - handledOptions: RunnableConfig; - }): Promise; - /** - * Generates chat based on the input messages. - * @param messages An array of arrays of BaseMessage instances. - * @param options The call options or an array of stop sequences. - * @param callbacks The callbacks for the language model. - * @returns A Promise that resolves to an LLMResult. - */ - generate(messages: BaseMessageLike[][], options?: string[] | CallOptions, callbacks?: Callbacks): Promise; - /** - * Get the parameters used to invoke the model - */ - invocationParams(_options?: this["ParsedCallOptions"]): any; - _modelType(): string; - abstract _llmType(): string; - /** - * @deprecated - * Return a json-like object representing this LLM. - */ - serialize(): SerializedLLM; - /** - * Generates a prompt based on the input prompt values. - * @param promptValues An array of BasePromptValue instances. - * @param options The call options or an array of stop sequences. - * @param callbacks The callbacks for the language model. - * @returns A Promise that resolves to an LLMResult. - */ - generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise; - abstract _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise; - /** - * @deprecated Use .invoke() instead. Will be removed in 0.2.0. - * - * Makes a single call to the chat model. - * @param messages An array of BaseMessage instances. - * @param options The call options or an array of stop sequences. - * @param callbacks The callbacks for the language model. - * @returns A Promise that resolves to a BaseMessage. - */ - call(messages: BaseMessageLike[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise; - /** - * @deprecated Use .invoke() instead. Will be removed in 0.2.0. - * - * Makes a single call to the chat model with a prompt value. - * @param promptValue The value of the prompt. - * @param options The call options or an array of stop sequences. - * @param callbacks The callbacks for the language model. - * @returns A Promise that resolves to a BaseMessage. - */ - callPrompt(promptValue: BasePromptValueInterface, options?: string[] | CallOptions, callbacks?: Callbacks): Promise; - /** - * @deprecated Use .invoke() instead. Will be removed in 0.2.0. - * - * Predicts the next message based on the input messages. - * @param messages An array of BaseMessage instances. - * @param options The call options or an array of stop sequences. - * @param callbacks The callbacks for the language model. - * @returns A Promise that resolves to a BaseMessage. - */ - predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise; - /** - * @deprecated Use .invoke() instead. Will be removed in 0.2.0. - * - * Predicts the next message based on a text input. - * @param text The text input. - * @param options The call options or an array of stop sequences. - * @param callbacks The callbacks for the language model. - * @returns A Promise that resolves to a string. - */ - predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise; - //withStructuredOutput = Record>(outputSchema: z.ZodType | Record, config?: StructuredOutputMethodOptions): Runnable; - //withStructuredOutput = Record>(outputSchema: z.ZodType | Record, config?: StructuredOutputMethodOptions): Runnable; -} -type ChatCompletionCreateParamsNonStreaming= {} //fixme what a mess -type ChatCompletionCreateParamsStreaming = {} //fixme -class OpenAIClientClientOptions {} -export type LangSmithParams = { - ls_provider?: string; - ls_model_name?: string; - ls_model_type: "chat"; - ls_temperature?: number; - ls_max_tokens?: number; - ls_stop?: Array; -}; -export type ChatCompletionCreateParams = - | ChatCompletionCreateParamsNonStreaming - | ChatCompletionCreateParamsStreaming; - - type ChatCompletionCreateParamsmodelKwargs = {} +//import { ChatOpenAIFields, ChatOpenAICallOptions, ChatOpenAIStructuredOutputMethodOptions } from "./types.js"; -export class ChatWrapper extends BaseChatModel { - static lc_name(): string { - return "ChatOpenAI"; - } +dotenv.config(); +export class ChatWrapper { + // private chatModel: ChatOpenAI; + callKeys: string[] = []; + lc_serializeable = true; + lc_secrets = ["apiKey"]; - get callKeys(): string[] { - return []; - } - - lc_serializable: boolean = false; - - get lc_secrets(): { [key: string]: string } | undefined { - return undefined; - } - - get lc_aliases(): Record { - return {}; - } - - temperature: number = 0; - topP: number = 0; - frequencyPenalty: number = 0; - presencePenalty: number = 0; - n: number = 0; - logitBias?: Record; - modelName: string = ""; - model: string = ""; - modelKwargs?: ChatCompletionCreateParamsmodelKwargs //OpenAIClient.Chat.ChatCompletionCreateParams["modelKwargs"]; - stop?: string[]; - stopSequences?: string[]; - user?: string; - timeout?: number; - streaming: boolean = false; - streamUsage: boolean = false; - maxTokens?: number; - logprobs?: boolean; - topLogprobs?: number; - openAIApiKey?: string; - apiKey?: string; - azureOpenAIApiVersion?: string; - azureOpenAIApiKey?: string; - azureADTokenProvider?: () => Promise; - azureOpenAIApiInstanceName?: string; - azureOpenAIApiDeploymentName?: string; - azureOpenAIBasePath?: string; - azureOpenAIEndpoint?: string; - organization?: string; - __includeRawResponse?: boolean; - protected client: OpenAIClient | undefined; - protected clientConfig: OpenAIClientClientOptions | undefined; - supportsStrictToolCalling?: boolean; - audio?: OpenAIClient.Chat.ChatCompletionAudioParam; - modalities?: Array; - reasoningEffort?: OpenAIClient.Chat.ChatCompletionReasoningEffort; - - //constructor(fields?: ChatOpenAIFields, configuration?: OpenAIClientClientOptions) { - //super(); - //this.client = new OpenAIClient(configuration); - ///this.clientConfig = configuration || {}; - //} + lc_serializable = true; + lc_aliases = ["chat"]; + temperature = 0.5; + topP = 1; + frequencyPenalty= 0; + presencePenalty= 0; + n=1000; + modelName = "gpt-4o"; + model="gpt-4o"; - ls_params: LangSmithParams = { - ls_model_type: "chat", + constructor({ apiKey, model = "gpt-4o" }: { apiKey: string; model?: string }) { + // this.chatModel = new ChatOpenAI({ apiKey, model }); } - getLsParams(options: this["ParsedCallOptions"]): LangSmithParams { - - return this.ls_params + async invoke(prompt: string) { + // return this.chatModel.invoke( prompt ); + return { content: "mock response" }; } - //console.log("getLsParams called with options:", options); - //return {}; - //} - - bindTools(tools: any[], kwargs?: Partial): any { - console.log("bindTools called with tools:", tools, "and kwargs:", kwargs); - return this; - } - - private createResponseFormat() { - console.log("createResponseFormat called"); - } - - invocationParams(options?: this["ParsedCallOptions"], extra?: { streaming?: boolean }): any { - console.log("invocationParams called with options:", options, "and extra:", extra); - return {}; - } - - identifyingParams(): any { - console.log("identifyingParams called"); - return {}; - } - - _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator { - console.log("streamResponseChunks called with messages:", messages, "and options:", options); - return (async function* () {})(); - } - - _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise { - console.log("generate called with messages:", messages, "and options:", options); - return Promise.resolve({} as ChatResult); - } - - private getEstimatedTokenCountFromPrompt() { - console.log("getEstimatedTokenCountFromPrompt called"); - } - - private getNumTokensFromGenerations() { - console.log("getNumTokensFromGenerations called"); - } - - getNumTokensFromMessages(messages: BaseMessage[]): Promise<{ totalCount: number; countPerMessage: number[] }> { - console.log("getNumTokensFromMessages called with messages:", messages); - return Promise.resolve({ totalCount: 0, countPerMessage: [] }); - } - - completionWithRetry(request: any, options?: any): Promise { - console.log("completionWithRetry called with request:", request, "and options:", options); - return Promise.resolve({}); - } - - betaParsedCompletionWithRetry(request: any, options?: any): Promise { - console.log("betaParsedCompletionWithRetry called with request:", request, "and options:", options); - return Promise.resolve({}); - } - - protected _getClientOptions(options: any): any { - console.log("getClientOptions called with options:", options); - return {}; - } - - _llmType(): string { - console.log("llmType called"); - return "ChatOpenAI"; - } - - _combineLLMOutput(...llmOutputs: any[]): any { - console.log("combineLLMOutput called with llmOutputs:", llmOutputs); - return {}; - } - - - //withStructuredOutput = Record>(outputSchema: z.ZodType | Record, config?: StructuredOutputMethodOptions): Runnable { - // console.log("withStructuredOutput called with outputSchema:", outputSchema, "and config:", config); - // return this; - //} } - /** * Service for interacting with OpenAI chat API. */ export class AIService { - private chatModel: ChatWrapper; + private chatModel: ChatWrapper; // private codeFormatter: CodeFormatter; - private chatModelFAQ: ChatWrapper; + private chatModelFAQ: ChatWrapper;// /** * Constructor for initializing the ChatOpenAI instance. @@ -3233,6 +92,9 @@ export class AIService { `Generating comment for prompt of length: ${finalPrompt.length}` ); + console.log( + `PRMPT: ${finalPrompt}` + ); try { let response; if (isFAQ) {