Skip to content

Commit

Permalink
add novita-ai provider
Browse files Browse the repository at this point in the history
Signed-off-by: AnyISalIn <[email protected]>
  • Loading branch information
AnyISalIn committed Apr 17, 2024
1 parent a50f600 commit 8398ad9
Show file tree
Hide file tree
Showing 7 changed files with 426 additions and 24 deletions.
49 changes: 25 additions & 24 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -147,22 +147,23 @@ Head over to [Portkey docs](https://portkey.ai/docs/welcome/integration-guides)

## Supported Providers

|| Provider | Support | Stream | Supported Endpoints |
|---|---|---|---|--|
| <img src="docs/images/openai.png" width=35 />| OpenAI ||| `/completions`, `/chat/completions`,`/embeddings`, `/assistants`, `/threads`, `/runs`, `/images/generations`, `/audio/*`|
| <img src="docs/images/azure.png" width=35>| Azure OpenAI ||| `/completions`, `/chat/completions`,`/embeddings` |
| <img src="docs/images/anyscale.png" width=35>| Anyscale ||| `/chat/completions` |
| <img src="https://upload.wikimedia.org/wikipedia/commons/2/2d/Google-favicon-2015.png" width=35>| Google Gemini & Palm ||| `/generateMessage`, `/generateText`, `/embedText` |
| <img src="docs/images/anthropic.png" width=35>| Anthropic ||| `/messages`, `/complete` |
| <img src="docs/images/cohere.png" width=35>| Cohere ||| `/generate`, `/embed`, `/rerank` |
| <img src="https://assets-global.website-files.com/64f6f2c0e3f4c5a91c1e823a/654693d569494912cfc0c0d4_favicon.svg" width=35>| Together AI ||| `/chat/completions`, `/completions`, `/inference` |
| <img src="https://www.perplexity.ai/favicon.svg" width=35>| Perplexity ||| `/chat/completions` |
| <img src="https://docs.mistral.ai/img/favicon.ico" width=35>| Mistral ||| `/chat/completions`, `/embeddings` |
| <img src="https://docs.nomic.ai/img/nomic-logo.png" width=35>| Nomic ||| `/embeddings` |
| <img src="https://files.readme.io/d38a23e-small-studio-favicon.png" width=35>| AI21 ||| `/complete`, `/chat`, `/embed` |
| <img src="https://platform.stability.ai/small-logo-purple.svg" width=35>| Stability AI ||| `/generation/{engine_id}/text-to-image` |
| <img src="https://deepinfra.com/_next/static/media/logo.4a03fd3d.svg" width=35>| DeepInfra ||| `/inference` |
| <img src="https://ollama.com/public/ollama.png" width=35>| Ollama ||| `/chat/completions` |
| | Provider | Support | Stream | Supported Endpoints |
| -------------------------------------------------------------------------------------------------------------------------- | -------------------- | ------- | ------ | ------------------------------------------------------------------------------------------------------------------------ |
| <img src="docs/images/openai.png" width=35 /> | OpenAI ||| `/completions`, `/chat/completions`,`/embeddings`, `/assistants`, `/threads`, `/runs`, `/images/generations`, `/audio/*` |
| <img src="docs/images/azure.png" width=35> | Azure OpenAI ||| `/completions`, `/chat/completions`,`/embeddings` |
| <img src="docs/images/anyscale.png" width=35> | Anyscale ||| `/chat/completions` |
| <img src="https://upload.wikimedia.org/wikipedia/commons/2/2d/Google-favicon-2015.png" width=35> | Google Gemini & Palm ||| `/generateMessage`, `/generateText`, `/embedText` |
| <img src="docs/images/anthropic.png" width=35> | Anthropic ||| `/messages`, `/complete` |
| <img src="docs/images/cohere.png" width=35> | Cohere ||| `/generate`, `/embed`, `/rerank` |
| <img src="https://assets-global.website-files.com/64f6f2c0e3f4c5a91c1e823a/654693d569494912cfc0c0d4_favicon.svg" width=35> | Together AI ||| `/chat/completions`, `/completions`, `/inference` |
| <img src="https://www.perplexity.ai/favicon.svg" width=35> | Perplexity ||| `/chat/completions` |
| <img src="https://docs.mistral.ai/img/favicon.ico" width=35> | Mistral ||| `/chat/completions`, `/embeddings` |
| <img src="https://docs.nomic.ai/img/nomic-logo.png" width=35> | Nomic ||| `/embeddings` |
| <img src="https://files.readme.io/d38a23e-small-studio-favicon.png" width=35> | AI21 ||| `/complete`, `/chat`, `/embed` |
| <img src="https://platform.stability.ai/small-logo-purple.svg" width=35> | Stability AI ||| `/generation/{engine_id}/text-to-image` |
| <img src="https://deepinfra.com/_next/static/media/logo.4a03fd3d.svg" width=35> | DeepInfra ||| `/inference` |
| <img src="https://ollama.com/public/ollama.png" width=35> | Ollama ||| `/chat/completions` |
| <img src="https://novita.ai/favicon.ico" width=35> | Novita AI ||| `/chat/completions`, `/completions` |

> [View the complete list of 100+ supported models here](https://portkey.ai/docs/welcome/what-is-portkey#ai-providers-supported)
<br>
Expand Down Expand Up @@ -304,14 +305,14 @@ Here's a guide to [use config object in your request](https://portkey.ai/docs/ap

## Supported SDKs

| Language | Supported SDKs |
|---|---|
| Node.js / JS / TS | [Portkey SDK](https://www.npmjs.com/package/portkey-ai) <br> [OpenAI SDK](https://www.npmjs.com/package/openai) <br> [LangchainJS](https://www.npmjs.com/package/langchain) <br> [LlamaIndex.TS](https://www.npmjs.com/package/llamaindex) |
| Python | [Portkey SDK](https://pypi.org/project/portkey-ai/) <br> [OpenAI SDK](https://portkey.ai/docs/welcome/integration-guides/openai) <br> [Langchain](https://portkey.ai/docs/welcome/integration-guides/langchain-python) <br> [LlamaIndex](https://portkey.ai/docs/welcome/integration-guides/llama-index-python) |
| Go | [go-openai](https://github.com/sashabaranov/go-openai) |
| Java | [openai-java](https://github.com/TheoKanning/openai-java) |
| Rust | [async-openai](https://docs.rs/async-openai/latest/async_openai/) |
| Ruby | [ruby-openai](https://github.com/alexrudall/ruby-openai) |
| Language | Supported SDKs |
| ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Node.js / JS / TS | [Portkey SDK](https://www.npmjs.com/package/portkey-ai) <br> [OpenAI SDK](https://www.npmjs.com/package/openai) <br> [LangchainJS](https://www.npmjs.com/package/langchain) <br> [LlamaIndex.TS](https://www.npmjs.com/package/llamaindex) |
| Python | [Portkey SDK](https://pypi.org/project/portkey-ai/) <br> [OpenAI SDK](https://portkey.ai/docs/welcome/integration-guides/openai) <br> [Langchain](https://portkey.ai/docs/welcome/integration-guides/langchain-python) <br> [LlamaIndex](https://portkey.ai/docs/welcome/integration-guides/llama-index-python) |
| Go | [go-openai](https://github.com/sashabaranov/go-openai) |
| Java | [openai-java](https://github.com/TheoKanning/openai-java) |
| Rust | [async-openai](https://docs.rs/async-openai/latest/async_openai/) |
| Ruby | [ruby-openai](https://github.com/alexrudall/ruby-openai) |
<br>


Expand Down
2 changes: 2 additions & 0 deletions src/globals.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ export const SEGMIND: string = 'segmind';
export const JINA: string = 'jina';
export const FIREWORKS_AI: string = 'fireworks-ai';
export const WORKERS_AI: string = 'workers-ai';
export const NOVITA_AI: string = 'novita-ai';

export const VALID_PROVIDERS = [
ANTHROPIC,
Expand All @@ -68,6 +69,7 @@ export const VALID_PROVIDERS = [
JINA,
FIREWORKS_AI,
WORKERS_AI,
NOVITA_AI,
];

export const CONTENT_TYPES = {
Expand Down
2 changes: 2 additions & 0 deletions src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import SegmindConfig from './segmind';
import JinaConfig from './jina';
import FireworksAIConfig from './fireworks-ai';
import WorkersAiConfig from './workers-ai';
import NovitaAIConfig from './novita-ai';

const Providers: { [key: string]: ProviderConfigs } = {
openai: OpenAIConfig,
Expand All @@ -45,6 +46,7 @@ const Providers: { [key: string]: ProviderConfigs } = {
jina: JinaConfig,
'fireworks-ai': FireworksAIConfig,
'workers-ai': WorkersAiConfig,
'novita-ai': NovitaAIConfig,
};

export default Providers;
20 changes: 20 additions & 0 deletions src/providers/novita-ai/api.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import { ProviderAPIConfig } from '../types';

const NovitaAIApiConfig: ProviderAPIConfig = {
getBaseURL: () => 'https://api.novita.ai/v3/openai',
headers: ({ providerOptions }) => {
return { Authorization: `Bearer ${providerOptions.apiKey}` };
},
getEndpoint: ({ fn }) => {
switch (fn) {
case 'complete':
return '/v1/completions';
case 'chatComplete':
return '/v1/chat/completions';
default:
return '';
}
},
};

export default NovitaAIApiConfig;
220 changes: 220 additions & 0 deletions src/providers/novita-ai/chatComplete.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
import { NOVITA_AI } from '../../globals';
import {
ChatCompletionResponse,
ErrorResponse,
ProviderConfig,
} from '../types';
import {
generateErrorResponse,
generateInvalidProviderResponseError,
} from '../utils';

// TODOS: this configuration does not enforce the maximum token limit for the input parameter. If you want to enforce this, you might need to add a custom validation function or a max property to the ParameterConfig interface, and then use it in the input configuration. However, this might be complex because the token count is not a simple length check, but depends on the specific tokenization method used by the model.

export const NovitaAIChatCompleteConfig: ProviderConfig = {
model: {
param: 'model',
required: true,
default: 'lzlv_70b',
},
messages: {
param: 'messages',
required: true,
default: '',
},
max_tokens: {
param: 'max_tokens',
required: true,
default: 128,
min: 1,
},
stop: {
param: 'stop',
},
temperature: {
param: 'temperature',
},
top_p: {
param: 'top_p',
},
n: {
param: 'n',
},
top_k: {
param: 'top_k',
},
presence_penalty: {
param: 'presence_penalty',
min: -2,
max: 2,
},
frequency_penalty: {
param: 'frequency_penalty',
min: -2,
max: 2,
},
stream: {
param: 'stream',
default: false,
},
logprobs: {
param: 'logprobs',
},
tools: {
param: 'tools',
},
tool_choice: {
param: 'tool_choice',
},
response_format: {
param: 'response_format',
},
};

export interface NovitaAIChatCompleteResponse extends ChatCompletionResponse {
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}

export interface NovitaAIErrorResponse {
model: string;
job_id: string;
request_id: string;
error: string;
message?: string;
type?: string;
}

export interface NovitaAIOpenAICompatibleErrorResponse extends ErrorResponse {}

export interface NovitaAIChatCompletionStreamChunk {
id: string;
request_id: string;
object: string;
choices: {
index: number;
delta: {
content: string;
};
}[];
}

export const NovitaAIErrorResponseTransform: (
response: NovitaAIErrorResponse | NovitaAIOpenAICompatibleErrorResponse
) => ErrorResponse | false = (response) => {
if ('error' in response && typeof response.error === 'string') {
return generateErrorResponse(
{ message: response.error, type: null, param: null, code: null },
NOVITA_AI
);
}

if ('error' in response && typeof response.error === 'object') {
return generateErrorResponse(
{
message: response.error?.message || '',
type: response.error?.type || null,
param: response.error?.param || null,
code: response.error?.code || null,
},
NOVITA_AI
);
}

if ('message' in response && response.message) {
return generateErrorResponse(
{
message: response.message,
type: response.type || null,
param: null,
code: null,
},
NOVITA_AI
);
}

return false;
};

export const NovitaAIChatCompleteResponseTransform: (
response:
| NovitaAIChatCompleteResponse
| NovitaAIErrorResponse
| NovitaAIOpenAICompatibleErrorResponse,
responseStatus: number
) => ChatCompletionResponse | ErrorResponse = (response, responseStatus) => {
if (responseStatus !== 200) {
const errorResponse = NovitaAIErrorResponseTransform(
response as NovitaAIErrorResponse
);
if (errorResponse) return errorResponse;
}

if ('choices' in response) {
return {
id: response.id,
object: response.object,
created: response.created,
model: response.model,
provider: NOVITA_AI,
choices: response.choices.map((choice) => {
return {
message: {
role: 'assistant',
content: choice.message.content,
tool_calls: choice.message.tool_calls
? choice.message.tool_calls.map((toolCall: any) => ({
id: toolCall.id,
type: toolCall.type,
function: toolCall.function,
}))
: null,
},
index: 0,
logprobs: null,
finish_reason: choice.finish_reason,
};
}),
usage: {
prompt_tokens: response.usage?.prompt_tokens,
completion_tokens: response.usage?.completion_tokens,
total_tokens: response.usage?.total_tokens,
},
};
}

return generateInvalidProviderResponseError(response, NOVITA_AI);
};

export const NovitaAIChatCompleteStreamChunkTransform: (
response: string
) => string = (responseChunk) => {
let chunk = responseChunk.trim();
chunk = chunk.replace(/^data: /, '');
chunk = chunk.trim();
if (chunk === '[DONE]') {
return `data: ${chunk}\n\n`;
}
const parsedChunk: NovitaAIChatCompletionStreamChunk = JSON.parse(chunk);
return (
`data: ${JSON.stringify({
id: parsedChunk.id,
object: parsedChunk.object,
created: Math.floor(Date.now() / 1000),
model: '',
provider: NOVITA_AI,
choices: [
{
delta: {
content: parsedChunk.choices[0]?.delta.content,
},
index: 0,
finish_reason: '',
},
],
})}` + '\n\n'
);
};
Loading

0 comments on commit 8398ad9

Please sign in to comment.