From 9c6ed2410d129510dd9c76da366de5a51164d0b5 Mon Sep 17 00:00:00 2001 From: J2-D2-3PO <188380414+J2-D2-3PO@users.noreply.github.com> Date: Thu, 27 Feb 2025 14:55:53 -0700 Subject: [PATCH] docs(weave): Get started sidebar cleaning --- docs/docs/get-started.md | 3 ++ docs/docs/guides/core-types/models.md | 6 +++ docs/docs/quickstart.md | 58 ++++++++++++++++---------- docs/docs/tutorial-eval.md | 2 +- docs/docs/tutorial-tracing_2.md | 39 +++++++++--------- docs/docs/tutorial-weave_models.md | 59 ++++++++++++--------------- docs/sidebars.ts | 36 ++++++++-------- 7 files changed, 111 insertions(+), 92 deletions(-) create mode 100644 docs/docs/get-started.md diff --git a/docs/docs/get-started.md b/docs/docs/get-started.md new file mode 100644 index 000000000000..454dcbd53184 --- /dev/null +++ b/docs/docs/get-started.md @@ -0,0 +1,3 @@ +# Quickstart + +E plurbius unum... \ No newline at end of file diff --git a/docs/docs/guides/core-types/models.md b/docs/docs/guides/core-types/models.md index 34d2434108e7..5dc0f73edb33 100644 --- a/docs/docs/guides/core-types/models.md +++ b/docs/docs/guides/core-types/models.md @@ -169,3 +169,9 @@ A `Model` is a combination of data (which can include configuration, trained mod ``` + + +## Usage notes +- You can use `predict` instead of `invoke` for the name of the function in your Weave `Model`. +- If you want other class methods to be tracked by Weave, use `weave.op()`. +- Attributes with names that start with an underscore are ignored by Weave and won't be logged. diff --git a/docs/docs/quickstart.md b/docs/docs/quickstart.md index 0ec6ad3e151d..7b0dcbfa04cf 100644 --- a/docs/docs/quickstart.md +++ b/docs/docs/quickstart.md @@ -1,17 +1,22 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Track LLM inputs & outputs +# Log a trace -Follow these steps to track your first call or Open In Colab +Follow these steps to track your first call -## 1. Install Weave and create an API Key +:::tip +You can try the Quickstart as a Jupyter Notebook. +Open In Colab +::: -**Install weave** +## 1. Prerequisites -First install the weave library: +### Install weave + +First, install the `weave` library: @@ -26,22 +31,37 @@ First install the weave library: -**Get your API key** +### Create a W&B account + +Next, create a Weights & Biases (W&B). +1. Navigate to [https://wandb.ai](https://wandb.ai). +2. Click **Sign Up**. +3. In the sign-up modal, enter an email and password, or use one of the available authentication providers. + +### Get your API key + +Once you've created your account, copy and set you W&B API key: -Then, create a Weights & Biases (W&B) account at https://wandb.ai and copy your API key from https://wandb.ai/authorize +1. Navigate to [https://wandb.ai/authorize](https://wandb.ai/authorize). +2. Copy your API key. +3. Set the API key as to the `WANDB_API_KEY` environment variable. ## 2. Log a trace to a new project -To get started with tracking your first project with Weave: +To track LLM calls -- Import the `weave` library -- Call `weave.init('project-name')` to start tracking - - You will be prompted to log in with your API key if you are not yet logged in on your machine. - - To log to a specific W&B Team name, replace `project-name` with `team-name/project-name` - - **NOTE:** In automated environments, you can define the environment variable `WANDB_API_KEY` with your API key to login without prompting. -- Add the `@weave.op()` decorator to the python functions you want to track +1. Import the `weave` library +2. Call `weave.init('project-name')`. You will be prompted to log in with your API key if you are not yet logged in on your machine. -_In this example, we're using openai so you will need to add an OpenAI [API key](https://platform.openai.com/docs/quickstart/step-2-setup-your-api-key)._ + :::tip + To log to a specific W&B Team name, replace `project-name` with `team-name/project-name` + ::: + +3. Add the `@weave.op()` decorator to the python functions you want to track + +:::important +In the following example, you will need an OpenAI [API key](https://platform.openai.com/docs/quickstart/step-2-setup-your-api-key). +::: @@ -129,13 +149,9 @@ _In this example, we're using openai so you will need to add an OpenAI [API key] -## 3. Automated LLM library logging - -Calls made to OpenAI, Anthropic and [many more LLM libraries](./guides/integrations/index.md) are automatically tracked with Weave, with **LLM metadata**, **token usage** and **cost** being logged automatically. If your LLM library isn't currently one of our integrations you can track calls to other LLMs libraries or frameworks easily by wrapping them with `@weave.op()`. - -## 4. See traces of your application in your project +## 4. View traces in the UI -🎉 Congrats! Now, every time you call this function, weave will automatically capture the input & output data and log any changes made to the code. +🎉 Congrats! Now, every time you call this function, `weave` automatically captures the input and output data, and logs any changes made to the code. ![Weave Trace Outputs 1](../static/img/tutorial_trace_1.png) diff --git a/docs/docs/tutorial-eval.md b/docs/docs/tutorial-eval.md index 73aa39f16889..184a74517878 100644 --- a/docs/docs/tutorial-eval.md +++ b/docs/docs/tutorial-eval.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Tutorial: Build an Evaluation pipeline +# Build a basic evaluation pipeline To iterate on an application, we need a way to evaluate if it's improving. To do so, a common practice is to test it against the same set of examples when there is a change. Weave has a first-class way to track evaluations with `Model` & `Evaluation` classes. We have built the APIs to make minimal assumptions to allow for the flexibility to support a wide array of use-cases. diff --git a/docs/docs/tutorial-tracing_2.md b/docs/docs/tutorial-tracing_2.md index b6e1cb46ab0b..774cc9e4e70c 100644 --- a/docs/docs/tutorial-tracing_2.md +++ b/docs/docs/tutorial-tracing_2.md @@ -1,20 +1,22 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Track data flows and app metadata +# Track nested functions and metadata -In the [Track LLM inputs & outputs](/quickstart) tutorial, the basics of tracking the inputs and outputs of your LLMs was covered. +In the [Log a trace](/quickstart) tutorial, you learned how to create a Weave project and log your first trace. -In this tutorial you will learn how to: +In this guide, you will learn how to: -- **Track data** as it flows through your application -- **Track metadata** at call time +- Track nested function calls +- Track metadata at call time -## Tracking nested function calls +## Track nested function calls -LLM-powered applications can contain multiple LLMs calls and additional data processing and validation logic that is important to monitor. Even deep nested call structures common in many apps, Weave will keep track of the parent-child relationships in nested functions as long as `weave.op()` is added to every function you'd like to track. +Creating LLM-powered applications often requires the use of multiple functions, nested functions for LLMs calls, additional data processing, and validation logic. It is important for LLM application developers to be able to monitor and analyze these nested functions. -Building on our [basic tracing example](/quickstart), we will now add additional logic to count the returned items from our LLM and wrap them all in a higher level function. We'll then add `weave.op()` to trace every function, its call order and its parent-child relationship: +With Weave, you can automatically track the parent-child relationships in nested functions as long as `weave.op()` is added to every function you'd like to track. + +Building on the [basic tracing example](/quickstart), the following example adds additional functions (`extract_dinos` and `count_dinos`) to extract and count the items returned by `gpt-4o`. The `weave.op()` decorator is added to every function for tracing. Now, Weave keeps track of every function in the application, including parent-child relationships. @@ -74,9 +76,8 @@ Building on our [basic tracing example](/quickstart), we will now add additional result = dino_tracker(sentence) print(result) ``` - **Nested functions** - When you run the above code you will see the the inputs and outputs from the two nested functions (`extract_dinos` and `count_dinos`), as well as the automatically-logged OpenAI trace. + To view the trace data for the inputs and outputs from the nested functions, as well as the automatically-logged OpenAI trace, run the code sample, and navigate to your Weave **Traces** tab. ![Nested Weave Trace](../static/img/tutorial_tracing_2_nested_dinos.png) @@ -141,11 +142,17 @@ Building on our [basic tracing example](/quickstart), we will now add additional -## Tracking metadata +## Track metadata + +You can track metadata using the `weave.attributes` context manager. To track metadata using `weave.attributes`, pass it a dictionary of metadata to track at call time. + +:::tip +Using `weave.attributes` is only recommended for tracking run time metadata such as user ids and environment information (production, development, etc.). -Tracking metadata can be done easily by using the `weave.attributes` context manager and passing it a dictionary of the metadata to track at call time. +To track system attributes, such as a System Prompt, use [Weave `Model`s](guides/core-types/models) +::: -Continuing our example from above: +The following example builds on [Track nested function calls](#track-nested-function-calls). A dictionary containing `user_id` and `env` metadata is passed to `weave.attributes`. Now, when `dino_tracker` is called on `sentence`, Weave automatically logs the metadata. @@ -172,12 +179,6 @@ Continuing our example from above: -:::note -It's recommended to use metadata tracking to track metadata at run time, e.g. user ids or whether or not the call is part of the development process or is in production etc. - -To track system attributes, such as a System Prompt, we recommend using [weave Models](guides/core-types/models) -::: - ## What's next? - Follow the [App Versioning tutorial](/tutorial-weave_models) to capture, version and organize ad-hoc prompt, model, and application changes. diff --git a/docs/docs/tutorial-weave_models.md b/docs/docs/tutorial-weave_models.md index 2a18e1ccc653..ea96e274d071 100644 --- a/docs/docs/tutorial-weave_models.md +++ b/docs/docs/tutorial-weave_models.md @@ -1,34 +1,30 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# App versioning +# Track app versions -Tracking the [inputs, outputs, metadata](/quickstart) as well as [data flowing through your app](/tutorial-tracing_2) is critical to understanding the performance of your system. However **versioning your app over time** is also critical to understand how modifications to your code or app attributes change your outputs. Weave's `Model` class is how these changes can be tracked in Weave. - -In this tutorial you'll learn: - -- How to use Weave `Model` to track and version your app and its attributes. -- How to export, modify and re-use a Weave `Model` already logged. - -## Using `weave.Model` +In [Log a trace](/quickstart) and [Track nested functions and metadata](/tutorial-tracing_2), you learned important Weave fundamentals: logging a call to Weave, tracking nested functions, and logging metadata. Building on this, it's critical to understand how modifications to your application code and/or attributes change application outputs. With Weave's `Model` class, you can track application versions, understand how changes between versions affect application behavior, and store and version changing application attribute like model vendor IDs, systme prompts, temperature, and more. :::important +The `Model` class is currently only available in Python. +::: -The `weave.Model` class is currently only supported in Python. +In this guide, you'll learn: -::: +- How to use `Model` to track and version your app and its attributes. +- How to export, modify and reuse a `Model` that you've already logged. -Using Weave `Model`s means that attributes such as model vendor ids, prompts, temperature, and more are stored and versioned when they change. +## Use `Model` to version an app -To create a `Model` in Weave, you need the following: +To create a `Model`, do the following: -- a class that inherits from `weave.Model` -- type definitions on all class attributes -- a typed `invoke` function with the `@weave.op()` decorator +1. Define a class that inherits from `weave.Model` +2. Add type definitions to all class attributes +3. Add a typed `invoke` function with the `@weave.op()` decorator to your class. -When you change the class attributes or the code that defines your model, **these changes will be logged and the version will be updated**. This ensures that you can compare the generations across different versions of your app. +When you change the class attributes or the code that defines your model, Weave automatically logs changes and updates the application version. Now, you can easily compare output across different versions of your app. -In the example below, the **model name, temperature and system prompt will be tracked and versioned**: +In the example below, the model name, temperature and system prompt are tracked and versioned using `Model`. @@ -82,7 +78,7 @@ In the example below, the **model name, temperature and system prompt will be tr -Now you can instantiate and call the model with `invoke`: +Now, you can instantiate and call the model with `.invoke`: @@ -118,25 +114,22 @@ Now you can instantiate and call the model with `invoke`: -Now after calling `.invoke` you can see the trace in Weave **now tracks the model attributes as well as the code** for the model functions that have been decorated with `weave.op()`. You can see the model is also versioned, "v21" in this case, and if you click on the model **you can see all of the calls** that have used that version of the model +After calling `.invoke`, you can view the trace in Weave. Now, model attributes are tracked along with model functions that have been decorated with `weave.op()`. You can see the model is also versioned (in the example, `v21`). Click on the model to see all of calls that have used that version of the model. ![Re-using a weave model](../static/img/tutorial-model_invoke3.png) -**A note on using `weave.Model`:** - -- You can use `predict` instead of `invoke` for the name of the function in your Weave `Model` if you prefer. -- If you want other class methods to be tracked by weave they need to be wrapped in `weave.op()` -- Attributes starting with an underscore are ignored by weave and won't be logged - -## Exporting and re-using a logged `weave.Model` +## Export and reuse a `Model` -Because Weave stores and versions Models that have been invoked, it is possible to export and re-use these models. +Because Weave stores and versions `Model`s that have been invoked, you can export and reuse these models. To do so, complete the following steps: -**Get the Model ref** -In the Weave UI you can get the Model ref for a particular version +1. In the Weave UI, navigate to the **Models** tab. +2. In the row for the `Model` with versions that you want to export or reuse, click the contents of the **Versions** column. The available versions display. +3. In the **Object** column, click the name of the `Model` version that you want to reuse or export. A pop-up modal displays. +4. Select the **Use** tab. +5. Under `The ref for this model version is:`, copy the `Model` URI (e.g. `weave:///wandb/weave-intro-notebook/object/OpenAIGrammarCorrector:a21QVEgoDsNJKFHo7FkLd6S2gsf4frMXYMpwX2Qg7sw`). +6. To retrieve the `Model` version for export or resuse, call `weave.ref().get()`, replacing `` with your URI. -**Using the Model** -Once you have the URI of the Model object, you can export and re-use it. Note that the exported model is already initialised and ready to use: +The following code examples builds on the example in [ Use `Model` to version an app](#use-model-to-version-an-app), and shows reuse of a `Model` version specified by `weave:///morgan/jurassic-park/object/ExtractDinos:ey4udBU2MU23heQFJenkVxLBX4bmDsFk7vsGcOWPjY4`. @@ -161,7 +154,7 @@ Once you have the URI of the Model object, you can export and re-use it. Note th -Here you can now see the name Model version (v21) was used with the new input: +In the Weave UI, you can now see that the new `Model` version (`v21`) was used with the new input: ![Re-using a weave model](../static/img/tutorial-model_re-use.png) diff --git a/docs/sidebars.ts b/docs/sidebars.ts index 848fcbe6622e..212be525c1be 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -11,30 +11,30 @@ const CATEGORY_SECTION_HEADER_MIXIN: SidebarItemCategoryBase = { const sidebars: SidebarsConfig = { documentationSidebar: [ { - label: "👋 Getting Started", + label: "👋 Get Started", ...CATEGORY_SECTION_HEADER_MIXIN, items: [ - "introduction", { type: "doc", - label: "Trace LLMs", - id: "quickstart", + id: "introduction", + label: "Overview", }, { - type: "doc", - label: "Trace Applications", - id: "tutorial-tracing_2", - }, - "tutorial-weave_models", - { - type: "doc", - label: "Build an Evaluation", - id: "tutorial-eval", - }, - { - type: "doc", - label: "Evaluate a RAG App", - id: "tutorial-rag", + type: "category", + collapsible: true, + collapsed: true, + label: "Quickstart", + link: { + type: "doc", + id: "get-started", + }, + items: [ + "quickstart", + "tutorial-tracing_2", + "tutorial-weave_models", + "tutorial-eval", + "tutorial-rag" + ], }, ], },