From c70a9e178dae7fd311f100e41fc0b0e98cfd12c4 Mon Sep 17 00:00:00 2001 From: Eden Reich Date: Wed, 22 Jan 2025 18:05:47 +0000 Subject: [PATCH] docs: Enhance InferenceGatewayAPI with health check and detailed documentation Signed-off-by: Eden Reich --- src/lib.rs | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index 38345dc..3394055 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,21 +1,35 @@ +//! Inference Gateway SDK for Rust +//! +//! This crate provides a Rust client for the Inference Gateway API, allowing interaction +//! with various LLM providers through a unified interface. + use reqwest::blocking::Client; use serde::{Deserialize, Serialize}; use std::{error::Error, fmt}; +/// Represents a model available through a provider #[derive(Debug, Serialize, Deserialize)] pub struct Model { + /// Unique identifier of the model pub id: String, + /// Type of the object (always "model") pub object: String, + /// Organization that owns the model pub owned_by: String, + /// Unix timestamp of when the model was created pub created: i64, } +/// Collection of models available from a specific provider #[derive(Debug, Serialize, Deserialize)] pub struct ProviderModels { + /// The LLM provider pub provider: Provider, + /// List of available models pub models: Vec, } +/// Supported LLM providers #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub enum Provider { @@ -40,40 +54,60 @@ impl fmt::Display for Provider { } } +/// A message in a conversation with an LLM #[derive(Debug, Serialize, Deserialize)] pub struct Message { + /// Role of the message sender ("system", "user" or "assistant") pub role: String, + /// Content of the message pub content: String, } +/// Request payload for generating content #[derive(Debug, Serialize)] struct GenerateRequest { + /// Name of the model model: String, + /// Conversation history and prompt messages: Vec, } #[derive(Debug, Deserialize)] pub struct GenerateResponse { + /// Provider that generated the response pub provider: String, + /// Content of the response pub response: ResponseContent, } #[derive(Debug, Deserialize)] pub struct ResponseContent { + /// Role of the responder (typically "assistant") pub role: String, + /// Model that generated the response pub model: String, + /// Generated content pub content: String, } +/// Client for interacting with the Inference Gateway API pub struct InferenceGatewayClient { base_url: String, client: Client, token: Option, } +/// Core API interface for the Inference Gateway pub trait InferenceGatewayAPI { + /// Lists available models from all providers fn list_models(&self) -> Result, Box>; + /// Generates content using a specified model + /// + /// # Arguments + /// * `provider` - The LLM provider to use + /// * `model` - Name of the model + /// * `messages` - Conversation history and prompt fn generate_content( &self, provider: Provider, @@ -81,10 +115,15 @@ pub trait InferenceGatewayAPI { messages: Vec, ) -> Result>; + /// Checks if the API is available fn health_check(&self) -> Result>; } impl InferenceGatewayClient { + /// Creates a new client instance + /// + /// # Arguments + /// * `base_url` - Base URL of the Inference Gateway API pub fn new(base_url: &str) -> Self { Self { base_url: base_url.to_string(), @@ -93,6 +132,10 @@ impl InferenceGatewayClient { } } + /// Sets an authentication token for the client + /// + /// # Arguments + /// * `token` - JWT token for authentication pub fn with_token(mut self, token: impl Into) -> Self { self.token = Some(token.into()); self