diff --git a/docs/.vuepress/sidebar.ts b/docs/.vuepress/sidebar.ts index a505d7dddf8..1a2e2275ed6 100644 --- a/docs/.vuepress/sidebar.ts +++ b/docs/.vuepress/sidebar.ts @@ -695,7 +695,11 @@ export const getSidebar = (locale: string) => { text: "Guides", link: `${locale}/ai/guides/fancy-greeter.md`, - children: [`${locale}/ai/guides/fancy-greeter.md`], + children: [ + `${locale}/ai/guides/fancy-greeter.md`, + `${locale}/ai/guides/subquery-docs-rag.md`, + `${locale}/ai/guides/delegation-helper.md`, + ], }, { text: "Build", @@ -716,8 +720,8 @@ export const getSidebar = (locale: string) => ], }, { - text: "Query", - link: `${locale}/ai/query/query.md`, + text: "API", + link: `${locale}/ai/api/api.md`, }, { text: "Publish", diff --git a/docs/ai/api/api.md b/docs/ai/api/api.md new file mode 100644 index 00000000000..6202c6df771 --- /dev/null +++ b/docs/ai/api/api.md @@ -0,0 +1,8 @@ +# AI App Query API + +SubQuery AI Apps expose the industry-standard [OpenAI Completions API](https://platform.openai.com/docs/api-reference/), ensuring compatibility with a wide range of applications and tools. This makes it easier for developers to integrate AI capabilities into their projects while adhering to established standards. + +The HTTP interface implements the following OpenAPI interface methods: + +- [GET `/v1/models`](https://platform.openai.com/docs/api-reference/models) +- [POST `/v1/chat/completions`](https://platform.openai.com/docs/api-reference/chat/create) diff --git a/docs/ai/guides/delegation-helper.md b/docs/ai/guides/delegation-helper.md new file mode 100644 index 00000000000..dc8ea374c6f --- /dev/null +++ b/docs/ai/guides/delegation-helper.md @@ -0,0 +1,321 @@ +# SubQuery Network Delegation Helper - Complex Example with Advanced Function tools + +This is a more advanced example of a SubQuery AI application. It is an agent specifically designed to assist users with questions relating to their token delegation on the SubQuery Network. The agent utilizes multiple tools to extract and interpret relevant information about delegation from raw on-chain data (indexed using SubQuery of course). + +This showcases an excellent integration of an AI framework with the SubQuery Indexing SDK, enabling the structuring of natural language responses based on on-chain data. + +:::info note +You can follow along in the tutorial with the [example code here](https://github.com/subquery/subql-ai-app-example/tree/main/network-delegation-helper). +::: + + + +## 1. Install the framework + + + +## 2. Create a New App + + + +## 3. Configure Manifest File + + + +We need to update the manifest file to the following: + +```ts +/** Gets the host names of any urls in a record */ +export function extractConfigHostNames( + config: Record +): string[] { + const hosts = Object.values(config) + .filter((v) => typeof v === "string") + .map((v) => { + try { + return new URL(v).hostname; + } catch (_e) { + return undefined; + } + }) + .filter((v) => !!v) as string[]; // Cast should be unnecessary with latest TS versions + + // Make unique + return [...new Set(hosts)]; +} + +const defaultConfig = Value.Default(ConfigType, {} as Config) as Config; + +const project: ProjectManifest = { + specVersion: "0.0.1", + endpoints: extractConfigHostNames(defaultConfig), + config: JSON.parse(JSON.stringify(ConfigType)), // Convert to JSON Schema + model: "llama3.1", + entry: "./project.ts", +}; + +export default project; +``` + +The code includes the import of necessary types and functions, the definition of a utility function, and the creation of a project manifest object. Since we're sharing the config logic between the manifest file and the app's code, we import `ConfigType`, which we will explain later during the code explaination. + +## 4. Configure App's Logic + + + + + +```ts +const PROMPT = ` +You are an agent designed to help a user with their token delegation on the SubQuery Network. +Given an input question, use the available tools to answer the users question quickly and concisely. +You answer must use the result of the tools available. +Do not mention that you used a tool or the name of a tool. +If you need more information to answer the question, ask the user for more details. +All token amounts are in SQT. + +If the question seems to be unrelated to the API, just return "I don't know" as the answer. +`; +``` + + + +Since delegation data can only be derived from chain events, it must be pre-indexed to enable faster and simpler querying. For this reason, we are including a relevant GraphQL endpoint that provides access to this data. Additionally, we are establishing a connection to an RPC to retrieve token balances, although the same results could be achieved if the indexer were configured to include balance data. + +Each tool can be assigned specific endpoints to ensure requests are routed correctly. Importantly, this data does not need to be hardcoded and can be provided dynamically in response to the LLM's request. In our case, the configuration might look like this: + +```ts +export const ConfigType = Type.Object({ + GRAPHQL_ENDPOINT: Type.String({ + default: "https://api.subquery.network/sq/subquery/subquery-mainnet", + }), + BASE_RPC: Type.String({ + default: "https://gateway.subquery.network/rpc/base-full", + }), + BASE_SQT_ADDR: Type.String({ + default: "0x858c50C3AF1913b0E849aFDB74617388a1a5340d", + }), +}); + +export type Config = Static; +``` + +You can now include the tools in the array and supply the configuration during initialisation: + +```ts +const entrypoint: ProjectEntry = async (config: Config): Promise => { + return { + tools: [ + new TotalDelegation(config.GRAPHQL_ENDPOINT), + new DelegatedIndexers(config.GRAPHQL_ENDPOINT), + new UnclaimedDelegatorRewards(config.GRAPHQL_ENDPOINT), + new CurrentDelegatorApy(config.GRAPHQL_ENDPOINT), + new BetterIndexerApy(config.GRAPHQL_ENDPOINT), + new TokenBalance( + new JsonRpcProvider(config.BASE_RPC), + config.BASE_SQT_ADDR + ), + ], + systemPrompt: PROMPT, + }; +}; + +export default entrypoint; +``` + +Once the tools are added to the array, you can proceed with implementing them. + +As mentioned earlier, there are two methods for obtaining data: using an SDK to access indexed data via GraphQL queries and fetching data directly from the RPC node. + +Given the number of tools included in the project, the logic can be modularized and split across multiple files. To achieve this, the original project will include a dedicated `tools.ts` file for managing these tools. + +### 4.1 Obtaining Data from Indexers + +Let’s take the `TotalDelegation` tool as an example to illustrate its implementation in detail. This tool calculates the total delegation amount of SQT for a given user address. If no delegation is found, it returns `null`. You can view the tool's implementation here: + +```ts +export class TotalDelegation extends FunctionTool { + constructor(readonly endpoint: string) { + super(); + } + + // name = 'total-delegation-amount'; + description = `This tool gets the total delegation amount of SQT for the given user address. + If no delegation is found it will return null. + `; + parameters = { + type: "object", + required: ["account"], + properties: { + account: { + type: "string", + description: + "The account or address of the user which to get delegation information for", + }, + }, + }; + + async call({ account }: { account: string }): Promise { + try { + const res = await grahqlRequest<{ + delegator: null | { totalDelegations: Amount }; + }>( + this.endpoint, + `{ + delegator(id: "${account}") { + totalDelegations + } + }` + ); + + if (!res.delegator) { + return null; + } + + return formatEther(res.delegator.totalDelegations.valueAfter.value); + } catch (error) { + return `${error}`; + } + } +} +``` + +The `TotalDelegation` tool queries a GraphQL endpoint to retrieve this information. If no delegation is found for the specified user address, it returns null. + +Every tool utilising GraphQL depends on an external function named `graphqlRequest`, which can be relocated to a separate file and implemented as follows: + +```ts +export async function grahqlRequest( + endpoint: string, + query: string, + variables?: unknown +): Promise { + const response = await fetch(endpoint, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + query, + variables, + }), + }); + + const res = await response.json(); + + if (res.errors) { + console.log(`Request failed\n${query}`); + + throw new Error( + res.errors.map((e: { message: string }) => e.message).join("\n") + ); + } + + return res.data; +} +``` + +This function is an asynchronous utility created to send GraphQL requests to a designated endpoint. It is a generic function, enabling the caller to define the expected structure of the response data. + +### 4.2 Fetching Data Directly from RPC + +In our example, only the `TokenBalance` tool retrieves data directly from the node. Let’s review its implementation: + +```ts +export class TokenBalance extends FunctionTool { + constructor( + readonly provider: AbstractProvider, + readonly tokenAddress: string + ) { + super(); + } + + // name = 'token-balance'; + description = `This tool gets the current on chain SQT balance for the given address`; + parameters = { + type: "object", + required: ["account"], + properties: { + account: { + type: "string", + description: + "The account or address of the user which to get the balance for", + }, + }, + }; + + async call({ account }: { account: string }): Promise { + try { + // Step 3: Define the ERC-20 contract ABI (only need the 'balanceOf' function) + const erc20Abi = [ + "function balanceOf(address owner) view returns (uint256)", + ]; + + const erc20Contract = new Contract( + this.tokenAddress, + erc20Abi, + this.provider + ); + + const balance = await erc20Contract.balanceOf(account); + + return formatEther(balance); + } catch (error) { + return `${error}`; + } + } +} +``` + +The tool is built to fetch the current on-chain balance of a specific token (SQT) for a given user address. The class constructor accepts two parameters: `provider`, an instance of AbstractProvider used to interact with the blockchain, and `tokenAddress`, the address of the token contract, both of which are hardcoded. + +## 5. Run the AI App + + + +Let's now try asking questions and obtaining responses using the previously demonstrated tools. For example, to extract the total delegations, you can use the following prompt: + +``` +What is the total delegation amount of SQT done for me? +``` + +This will return a response like: + +``` +The total delegation amount of SQT done for you is: 1000 SQT +``` + +This number is obtained from the indexer endpoint and can be cross-verified through a direct GraphQL query or another method to ensure accuracy and avoid bias. + +To test the second tool, you can use this prompt: + +``` +What is my balance? +``` + +Which will return: + +``` +Your balance is 12442989724000780042135 SQT +``` + +This number is fetched directly from the RPC node, and further modifications such as type casting can be requested. + +Other useful prompts for this project could include: + +``` +"My address is 0x108A496cDC32DA84e4D5905bb02ED695BC1024cd, use this for any further prompts. What is my delegation?", +"Who am I delegating to?", +"What is my balance?", +"Do I have any unclaimed rewards?", +"What is my current APY?", +"Are there better indexers to delegate to?" +``` + +## Summary + +You now have a functional SubQuery AI App that utilizes the latest LLMs and integrates various function tools that access blockchain data. This example serves as a foundation for building your own tools that leverage on-chain data, allowing you to query it in a convenient way and enabling real-time data analysis through simple natural language prompts. + +[**A full version of the code for this guide can be found here**](https://github.com/subquery/subql-ai-app-example/tree/main/fancy-greeter). + + diff --git a/docs/ai/guides/fancy-greeter.md b/docs/ai/guides/fancy-greeter.md index cd8ad7650b0..682bbf1e9e4 100644 --- a/docs/ai/guides/fancy-greeter.md +++ b/docs/ai/guides/fancy-greeter.md @@ -2,61 +2,49 @@ This basic example AI App is a good starting point to learn about prompt engineering and function tooling. It's a perfect example that shows off the key features of SubQuery's AI App Framework. +:::info note You can follow along in the tutorial with the [example code here](https://github.com/subquery/subql-ai-app-example/tree/main/fancy-greeter). +::: -## Prerequisites - -You should have the following installed - -- [Docker](https://docker.com/): This tutorial will use Docker to run a local version of SubQuery's node. -- [Ollama](https://ollama.com/). An endpoint to an Ollama instance, this could be running on your local computer or a commercial endpoint online. -- [Deno](https://docs.deno.com/runtime/getting_started/installation/): A recent version of Deno, the JS engine for the SubQuery AI App Framework. + ## 1. Install the framework -Run the following command to install the SubQuery AI framework globally on your system: - -```bash -deno install -g -f --allow-env --allow-net --allow-import --allow-read --allow-write --allow-ffi --allow-run --unstable-worker-options -n subql-ai jsr:@subql/ai-app-framework/cli -``` - -This will install the CLI and Runner. Make sure you follow the suggested instructions to add it to your path. - -You can confirm installation by running `subql-ai --help`. + -## 2. Create a new App +## 2. Create a New App -You can initialise a new app using `subql-ai init`. It will ask you to provide a name and a Ollama model to use. + -![Init a new AI App](/assets/img/ai/guide-init.png) +## 3. Review the Manifest File -After you complete the initialisation process, you will see a folder with your project name created inside the directory. Please note that there should be three files, a `project.ts`, a `manifest.ts`, a `docker-compose.yml`, and a `README.md`. + -## 3. Run the AI App +The manifest file for a default project looks like the following: -We can run the project at any time using the following command, where the `-p` is the path to the `project.ts`, and `-h` is the URL of the Ollama endpoint. - -```bash -subql-ai -p ./project.ts -h http://host.docker.internal:11434 -``` - -Once the project is running you should see the following: `Listening on http://0.0.0.0:7827/` - -You can now interact with your application. The easiest way to do that is to run the repl in another terminal. +```ts +import type { ProjectManifest } from "jsr:@subql/ai-app-framework"; + +const project: ProjectManifest = { + specVersion: "0.0.1", + // Specify any hostnames your tools will make network requests too + endpoints: [], + // Your projects runtime configuration options + config: {}, + model: "llama3.2:1b", + entry: "./project.ts", +}; -```shell -subql-ai repl +export default project; ``` -This will start a CLI chat. You can type `/bye` to exit. - -You should review the instructions on [running locally](../run/local.md) or via [Docker](../run/docker.md). +As you can see, there are very few details to configure in our default example. The two most important settings are the `model` (a selection of models can be found [here](https://ollama.com/library)) and the `entry`, where you'll specify the path to your project's entry point. -## 4. Update System Prompt +## 4. Configure System Prompt Logic -A good first place to start is by updating your system prompts in `project.ts`. System prompts are the basic way you customise the behaviour of your AI agent. + -We do this by editing the project entrypoint in the `project.ts` file, the [project entrypoint](../build/app.md#project-entrypoint) is how your tools and system prompt are initialised. + ```ts const entrypoint: ProjectEntry = async (config: Config): Promise => { @@ -72,26 +60,11 @@ const entrypoint: ProjectEntry = async (config: Config): Promise => { export default entrypoint; ``` -You can read more about edits to the `project.ts` [here](../build/app.md). - -## 5. Add a function tool - -Function tools are functions that extend the functionality of the LLM. They can be used to do many things like request data from external APIs and services, perform computations or analyse structured data outputs from the AI. You can [read more about function tooling here](../build/function_tools.md). - -We're going to add a simple function tool that does nothing more than take an input name, and reverse the name. For example, `alice` would become `ecila` and `bob` would remain `bob`. +## 5. Add a Function Tool -Function tools consist of 4 parts: + -- `name`: The name of the tool, this is used to identify the tool and must be unique amongst the provided tools. -- `description`: This is like a system prompt for the LLM to understand what the tool does and when it should be used, it should be as descriptive as possible as it allows the AI to determine when to use the tool and what it should be used for. -- `parameters`: This defines what parameters the LLM needs to gather in order to run the tool. -- `call`: This is the function implementation that takes an input that should match the defined parameters and return a string with the result. - -Note in the example below we have: - -- defined the function tool (`class ReverseNameTool extends FunctionTool { ... }`) -- Added the new function tool to the list of tools (`tools: [new ReverseNameTool()],`) -- Updated the system prompt to tell the AI to always reverse the name using the Reverse Name function (`ALWAYS REVERSE THEIR NAME USING THE REVERSE_NAME_TOOL BEFORE GREETING THEM!`). +We're going to add a simple function tool that does nothing more than take an input name, and reverse the name. For example, `alice` would become `ecila` and `bob` would remain `bob`. To accomplish this, we need to modify the code as follows: ```ts class ReverseNameTool extends FunctionTool { @@ -129,22 +102,14 @@ const entrypoint: ProjectEntry = async (config: Config): Promise => { export default entrypoint; ``` -Note that this is a very silly example of a function tool, but you can really do anything with function tools. For example: +First, define the function tool by creating a class (`class ReverseNameTool extends FunctionTool { ... }`). Next, add this new function tool to the list of tools (`tools: [new ReverseNameTool()],`). Lastly, update the system prompt to instruct the AI to always reverse the name before greeting, using the Reverse Name tool (`ALWAYS USE THE REVERSE_NAME_TOOL TO REVERSE THEIR NAME BEFORE GREETING THEM!`). + +## 6. Run the App -- Make an API request to retrieve or update data -- Get the balance of a user's wallet -- Log results of certain conversations to an external service -- Potentially even create a transaction on chain + ## Summary You now have a running SubQuery AI App that uses the latest LLMs and also incorporates a function tool. This may be a simple and rather basic example, but it's a great starting point to building complex AI Apps and agents custom built for your application. -[**A full version of the code for this guide can be found here**](https://github.com/subquery/subql-ai-app-example/tree/main/fancy-greeter). - -From here you may want to look at the following guides: - -- Detailed documentation on the [AI App Manifest](../build/app.md). -- Enhance your AI App with [function tooling](../build/function_tools.md). -- Give your AI App more knowledge with [RAG support](../build/rag.md). -- [Publish your AI App](../publish/publish.md) so it can run on the [SubQuery Decentralised Network](https://app.subquery.network). + diff --git a/docs/ai/guides/subquery-docs-rag.md b/docs/ai/guides/subquery-docs-rag.md new file mode 100644 index 00000000000..4265171a8f9 --- /dev/null +++ b/docs/ai/guides/subquery-docs-rag.md @@ -0,0 +1,153 @@ +# Project Documentation AI Assistent - Intermediate Example with RAG Support + +This is an example of an AI app utilising RAG (Retrieval-Augmented Generation). [RAG tools](../build/rag.md) are a specialised type of [function tools](../build/function_tools.md) that enhance your LLM by integrating a vector database created from anything you choose to vectorise. In most cases this will be additional data from a knowledgebase or a database, in this case we're incorportating the SubQuery documentation website as our RAG data (where you're reading this right now). + +:::info note +This tool is already in use in production for SubQuery documentation. Be sure to explore it now by clicking the corresponding button in the lower-right corner. +::: + +You can follow along in the tutorial with the [example code here](https://github.com/subquery/subql-ai-app-example/tree/main/docs). + + + +## 1. Install the framework + + + +## 2. Create a New App + + + +## 3. Configure Manifest File + + + +To proceed with this case, we need to define and add a RAG dataset. You can experiment using the [SubQuery documentation](https://github.com/subquery/documentation) or your own documentation, provided it can be vectorized (it's easiest if it's in Markdown format). + +After downloading the documentation project to your local computer, you can define it using the SubQuery CLI and the default tool by following [this guide](../build/rag.md#defining-rag). The logic for vectorization can be found on [GitHub](https://github.com/subquery/subql-ai-app-framework/blob/main/src/embeddings/generator/generator.ts). Alternatively, you can use a custom vectorization algorithm better suited to your specific needs. + +Once the vectorisation process is complete, it will generate a folder. Copy the path to the root of this generated folder and include it in the manifest file to ingest and embed your chosen RAG source data. + +After the modification, the manifest file will resemble the following structure: + +```ts +import type { ProjectManifest } from "jsr:@subql/ai-app-framework@^0.0.5"; + +const project: ProjectManifest = { + specVersion: "0.0.1", + vectorStorage: { + type: "lancedb", + path: "../path-to-the-folder", + }, + config: {}, + model: "llama3.1", + embeddingsModel: "nomic-embed-text", + entry: "./project.ts", +}; + +export default project; +``` + +## 4. Configure App's Logic + + + + + +```ts +const PROMPT = ` +You are designed to assist with answering questions about SubQuery, using its documentation as a reference. +You answer must use the result of the tools available. +Do not mention that you used a tool or the name of a tool. +If you need more information to answer the question, ask the user for more details. +`; +``` + + + +You can now add the tool to the array. + +```ts +// deno-lint-ignore require-await +const entrypoint: ProjectEntry = async (): Promise => { + return { + tools: [new SubqueryDocs()], + systemPrompt: PROMPT, + }; +}; + +export default entrypoint; +``` + +After adding the tools to the array, you can begin implementing it. No explicit configuration is needed, as this is handled automatically by the SubQuery AI Framework SDK in the background. + +```ts +export class SubqueryDocs extends RagTool { + constructor() { + super("subql-docs", "content"); + } +} +``` + +## 5. Run the AI App with developed tools + + + +Let's attempt to ask the tool specific questions: + +``` +How can I define a one-to-many relationship in a subquery project GraphQL schema? +``` + +This should return a response that looks like the following: + +::: details Full Response + +``` +It seems like the SubQuery Docs API is still under development and doesn't support defining one-to-many relationships in GraphQL queries. + +To answer your question, a one-to-many relationship can be achieved by using subqueries. Here's an example of how you could define a one-to-many relationship between two tables using a subquery: + +type OneToOneExample { + id: ID! + anotherId: ID! +} + +type Query { + oneToMany: [OneToOneExample!] @client +} + +In this query, we have a `OneToOneExample` type that has two fields: `id` and `anotherId`. The `@client` keyword is used to indicate that the field is not a built-in GraphQL field. + +The subquery is defined like so: + +query MyQuery { + oneToMany: [one TO MANY ON ONE(id: $id, anotherId: $anotherId) FROM OneToOneExample!] WHERE $id IS NOT NULL AND $anotherId IS NOT NULL +} + +In this query, we define a subquery that selects from the `OneToOneExample` type. The subquery is executed for each pair of distinct values in the `id` and `anotherId` fields. This will give us a list of records where one-to-many relationship between the two tables. + +Please note that not all GraphQL databases support this syntax, so you may need to adjust it to fit your specific database schema. +``` + +::: + +Another prompt: + +``` +Does subquery support the solana blockchain? +``` + +::: details Full Response + +``` +Subquery currently does not support Solana blockchain. +``` + +::: + +## Summary + +You now have a fully functional SubQuery AI App that leverages unstructured markdown data to deliver clearer and more concise responses about SubQuery. + + diff --git a/docs/ai/query/query.md b/docs/ai/query/query.md deleted file mode 100644 index ce38d3ecb56..00000000000 --- a/docs/ai/query/query.md +++ /dev/null @@ -1,8 +0,0 @@ -# Querying your AI App - -SubQuery AI Apps expose the industry-standard OpenAI API, ensuring compatibility with a wide range of applications and tools. This makes it easier for developers to integrate AI capabilities into their projects while adhering to established standards. - -The http interface implements OpenAPI interface methods: - -- [GET `/v1/models`](https://platform.openai.com/docs/api-reference/models) -- [POST `/v1/chat/completions`](https://platform.openai.com/docs/api-reference/chat/create) diff --git a/docs/ai/run/cli.md b/docs/ai/run/cli.md index 19472e6c42a..ff3af8ba1d8 100644 --- a/docs/ai/run/cli.md +++ b/docs/ai/run/cli.md @@ -51,7 +51,7 @@ These can also be specified with environment variables. They should be prefixed Run an AI app. ```shell -subql-ai -p ./path/to/project.ts +subql-ai -p ./path/to/manifest.ts ``` ### `info` @@ -59,7 +59,7 @@ subql-ai -p ./path/to/project.ts Get information on a project. ```shell -subql-ai info -p ./path/to/project.ts +subql-ai info -p ./path/to/manifest.ts ``` ::: details Example output @@ -143,7 +143,7 @@ subql-ai repl Publish your project to IPFS, this is how you can distribute your project ```shell -subql-ai publish -p ./path/to/project.ts +subql-ai publish -p ./path/to/manifest.ts ``` ::: tip Info diff --git a/docs/ai/run/local.md b/docs/ai/run/local.md index ace2e803e03..6c16b3b6117 100644 --- a/docs/ai/run/local.md +++ b/docs/ai/run/local.md @@ -1,11 +1,9 @@ # Running Locally -Now that you have made your application and built it, you can run it locally to test it out. - -To do so run the following command, where the `-p` is the path to the `project.ts`, and `-h` is the URL of the Ollama endpoint: +Now that you have made your application and built it, you can run it locally to test it out. To do so run the following command, where the `-p` is the path to the `manifest.ts`, and `-h` is the URL of the Ollama endpoint: ```shell -subql-ai -p ./path/to/project.ts -h http://ollama.public.url +subql-ai -p ./path/to/manifest.ts -h http://ollama.public.url ``` Once the project is running you should see the following: `Listening on http://0.0.0.0:7827/` diff --git a/docs/ai/snippets/add-a-function-tool.md b/docs/ai/snippets/add-a-function-tool.md new file mode 100644 index 00000000000..7c50bb5acb5 --- /dev/null +++ b/docs/ai/snippets/add-a-function-tool.md @@ -0,0 +1 @@ +Adding function tools is an important step of any integrated AI App. Function tools are functions that extend the functionality of the LLM. They can be used to do many things like request data from external APIs and services, perform computations or analyse structured data outputs from the AI. You can read more about function tooling [here](../build/function_tools.md). diff --git a/docs/ai/snippets/configure-app-logic.md b/docs/ai/snippets/configure-app-logic.md new file mode 100644 index 00000000000..d5b9a802fde --- /dev/null +++ b/docs/ai/snippets/configure-app-logic.md @@ -0,0 +1 @@ +To configure the app, you’ll need to edit the project entry point file (e.g., `project.ts` in this example). The [project entry point](../build/app.md#project-entrypoint) is where the tools and system prompt are initialised. diff --git a/docs/ai/snippets/configure-manifest-file.md b/docs/ai/snippets/configure-manifest-file.md new file mode 100644 index 00000000000..96a94d5cccc --- /dev/null +++ b/docs/ai/snippets/configure-manifest-file.md @@ -0,0 +1 @@ +The file `manifest.ts` defines key configuration options for your app. You can find the configuration specifics [here](../build/app.md#project-manifest). diff --git a/docs/ai/snippets/create-a-new-app.md b/docs/ai/snippets/create-a-new-app.md new file mode 100644 index 00000000000..27228373c4e --- /dev/null +++ b/docs/ai/snippets/create-a-new-app.md @@ -0,0 +1,5 @@ +You can initialise a new app using `subql-ai init`. It will ask you to provide a name and either an OpenAI endpoint or an Ollama model to use. + +![Init a new AI App](/assets/img/ai/guide-init.png) + +After you complete the initialisation process, you will see a folder with your project name created inside the directory. Please note that there should be three files, a `project.ts`, a `manifest.ts`, a `docker-compose.yml`, and a `README.md`. diff --git a/docs/ai/snippets/install-the-framework.md b/docs/ai/snippets/install-the-framework.md new file mode 100644 index 00000000000..e25bd6ba6d7 --- /dev/null +++ b/docs/ai/snippets/install-the-framework.md @@ -0,0 +1,9 @@ +Run the following command to install the SubQuery AI framework globally on your system: + +```bash +deno install -g -f --allow-env --allow-net --allow-import --allow-read --allow-write --allow-ffi --allow-run --unstable-worker-options -n subql-ai jsr:@subql/ai-app-framework/cli +``` + +This will install the CLI and Runner. Make sure you follow the suggested instructions to add it to your path. + +You can confirm installation by running `subql-ai --help`. diff --git a/docs/ai/snippets/prerequisites.md b/docs/ai/snippets/prerequisites.md new file mode 100644 index 00000000000..324e55c7ab4 --- /dev/null +++ b/docs/ai/snippets/prerequisites.md @@ -0,0 +1,11 @@ +## Prerequisites + +In order to run an AI App locally, you must have the following services installed: + +- [Docker](https://docker.com/): This tutorial will use Docker to run a local version of SubQuery's node. +- [Deno](https://docs.deno.com/runtime/getting_started/installation/): A recent version of Deno, the JS engine for the SubQuery AI App Framework. + +You will also need access to either an Ollama or OpenAI inference endpoint: + +- [Ollama](https://ollama.com/). An endpoint to an Ollama instance, this could be running on your local computer or a commercial endpoint online, or +- [OpenAI](https://platform.openai.com). You will need a paid API Key. diff --git a/docs/ai/snippets/run-the-ai-app.md b/docs/ai/snippets/run-the-ai-app.md new file mode 100644 index 00000000000..a9901abbc32 --- /dev/null +++ b/docs/ai/snippets/run-the-ai-app.md @@ -0,0 +1,15 @@ +We can run the project at any time using the following command, where the `-p` is the path to the `manifest.ts`, and `-h` is the URL of the Ollama endpoint. + +```bash +subql-ai -p ./manifest.ts -h http://host.docker.internal:11434 +``` + +Once the project is running you should see the following: `Listening on http://0.0.0.0:7827/`. You can now interact with your application. The easiest way to do that is to run the repl in another terminal. + +```shell +subql-ai repl +``` + +This will start a CLI chat. You can type `/bye` to exit. Alternatively, it is possible to launch the app via [Docker](../run/docker.md). + +You should review the instructions on [running locally](../run/local.md) or via [Docker](../run/docker.md). diff --git a/docs/ai/snippets/summary.md b/docs/ai/snippets/summary.md new file mode 100644 index 00000000000..8cdd36e8457 --- /dev/null +++ b/docs/ai/snippets/summary.md @@ -0,0 +1,7 @@ +From here you may want to look at the following guides: + +- Detailed documentation on the [AI App Manifest](../build/app.md). +- Enhance your AI App with [function tooling](../build/function_tools.md). +- Give your AI App more knowledge with [RAG support](../build/rag.md). +- [API](../api/api.md) of AI App. +- [Publish your AI App](../publish/publish.md) so it can run on the [SubQuery Decentralised Network](https://app.subquery.network). diff --git a/docs/ai/snippets/update-system-prompt.md b/docs/ai/snippets/update-system-prompt.md new file mode 100644 index 00000000000..69106c929b5 --- /dev/null +++ b/docs/ai/snippets/update-system-prompt.md @@ -0,0 +1 @@ +A good first place to start is by updating your **system prompts**. System prompts are the basic way you customise the behaviour of your AI agent. diff --git a/docs/ai/welcome.md b/docs/ai/welcome.md index 6168506b178..d328154f0d0 100644 --- a/docs/ai/welcome.md +++ b/docs/ai/welcome.md @@ -13,7 +13,7 @@ AI apps are self contained and easily scalable AI agents that you can use to pow - **Your AI journey starts here:** The SubQuery AI App framework is designed with user-friendliness in mind, providing intuitive wrappers around core features. This lowers the barrier to entry for developers of all skill levels, making it easier to create, run, and deploy AI Apps. - **Connect, create, and integrate with function tooling:** You can extend your AI Apps with additional [function tooling](./build/function_tools.md), facilitating connections to external systems and tools. This capability enables rich integrations, allowing users to create versatile applications that can interact seamlessly with blockchains and other ecosystems. - **Choose your model:** By supporting a range of open-source Ollama LLM models as well as, OpenAI, the SubQuery AI App Framework ensures that users can choose the best model for their applications without being locked into a specific model ecosystem. This flexibility fosters open-source innovation. -- **Proven standards for seamless integration:** SubQuery AI Apps expose the industry-standard [OpenAI API](./query/query.md), ensuring compatibility with a wide range of applications and tools. This makes it easier for developers to integrate AI capabilities into their projects while adhering to established standards. +- **Proven standards for seamless integration:** SubQuery AI Apps expose the industry-standard [OpenAI API](./api/api.md), ensuring compatibility with a wide range of applications and tools. This makes it easier for developers to integrate AI capabilities into their projects while adhering to established standards. ![AI App Framework Features](/assets/img/ai/features.jpg) @@ -31,14 +31,7 @@ For example, you could use it to build: ## Getting Started -### Setup your environment - -To use the framework there are a couple of dependencies: - -- [Deno](https://deno.land/). The SubQuery AI framework is built on Deno and is needed to build your app. -- An LLM - - [Ollama](https://ollama.com/). Alternatively an endpoint to an Ollama instance. - - [OpenAI](https://platform.openai.com). You will need a paid API Key. + ### Install the framework