Skip to content

Commit

Permalink
Vector Index PRs to Elastic (#1909)
Browse files Browse the repository at this point in the history
  • Loading branch information
michaeljguarino authored Feb 14, 2025
1 parent 9c2cc3e commit 5dc6e13
Show file tree
Hide file tree
Showing 49 changed files with 857 additions and 105 deletions.
29 changes: 29 additions & 0 deletions assets/src/generated/graphql.ts
Original file line number Diff line number Diff line change
Expand Up @@ -282,13 +282,16 @@ export type AiSettingsAttributes = {
anthropic?: InputMaybe<AnthropicSettingsAttributes>;
azure?: InputMaybe<AzureOpenaiAttributes>;
bedrock?: InputMaybe<BedrockAiAttributes>;
/** ai provider to use with embeddings (for vector indexing) */
embeddingProvider?: InputMaybe<AiProvider>;
enabled?: InputMaybe<Scalars['Boolean']['input']>;
ollama?: InputMaybe<OllamaAttributes>;
openai?: InputMaybe<OpenaiSettingsAttributes>;
provider?: InputMaybe<AiProvider>;
/** ai provider to use with tool calls */
toolProvider?: InputMaybe<AiProvider>;
tools?: InputMaybe<ToolConfigAttributes>;
vectorStore?: InputMaybe<VectorStoreAttributes>;
vertex?: InputMaybe<VertexAiAttributes>;
};

Expand Down Expand Up @@ -353,6 +356,8 @@ export type AnthropicSettings = {

export type AnthropicSettingsAttributes = {
accessToken?: InputMaybe<Scalars['String']['input']>;
/** the model to use for vector embeddings */
embeddingModel?: InputMaybe<Scalars['String']['input']>;
model?: InputMaybe<Scalars['String']['input']>;
/** the model to use for tool calls, which are less frequent and require more complex reasoning */
toolModel?: InputMaybe<Scalars['String']['input']>;
Expand Down Expand Up @@ -599,6 +604,8 @@ export type AzureOpenaiAttributes = {
accessToken: Scalars['String']['input'];
/** the api version you want to use */
apiVersion?: InputMaybe<Scalars['String']['input']>;
/** the model to use for vector embeddings */
embeddingModel?: InputMaybe<Scalars['String']['input']>;
/** the endpoint of your azure openai version, should look like: https://{endpoint}/openai/deployments/{deployment-id} */
endpoint: Scalars['String']['input'];
/** the exact model you wish to use */
Expand Down Expand Up @@ -4166,6 +4173,8 @@ export enum OidcProviderType {
export type OllamaAttributes = {
/** An http authorization header to use on calls to the Ollama api */
authorization?: InputMaybe<Scalars['String']['input']>;
/** the model to use for vector embeddings */
embeddingModel?: InputMaybe<Scalars['String']['input']>;
model: Scalars['String']['input'];
/** the model to use for tool calls, which are less frequent and require more complex reasoning */
toolModel?: InputMaybe<Scalars['String']['input']>;
Expand Down Expand Up @@ -4196,6 +4205,8 @@ export type OpenaiSettings = {
export type OpenaiSettingsAttributes = {
accessToken?: InputMaybe<Scalars['String']['input']>;
baseUrl?: InputMaybe<Scalars['String']['input']>;
/** the model to use for vector embeddings */
embeddingModel?: InputMaybe<Scalars['String']['input']>;
model?: InputMaybe<Scalars['String']['input']>;
/** the model to use for tool calls, which are less frequent and require more complex reasoning */
toolModel?: InputMaybe<Scalars['String']['input']>;
Expand Down Expand Up @@ -5669,6 +5680,7 @@ export type RootMutationType = {
upsertObservabilityWebhook?: Maybe<ObservabilityWebhook>;
upsertObserver?: Maybe<Observer>;
upsertPolicyConstraints?: Maybe<Scalars['Int']['output']>;
upsertUser?: Maybe<User>;
upsertVirtualCluster?: Maybe<Cluster>;
upsertVulnerabilities?: Maybe<Scalars['Int']['output']>;
};
Expand Down Expand Up @@ -6613,6 +6625,11 @@ export type RootMutationTypeUpsertPolicyConstraintsArgs = {
};


export type RootMutationTypeUpsertUserArgs = {
attributes: UserAttributes;
};


export type RootMutationTypeUpsertVirtualClusterArgs = {
attributes: ClusterAttributes;
parentId: Scalars['ID']['input'];
Expand Down Expand Up @@ -9251,6 +9268,16 @@ export enum ValidationUniqScope {
Project = 'PROJECT'
}

export enum VectorStore {
Elastic = 'ELASTIC'
}

export type VectorStoreAttributes = {
elastic?: InputMaybe<ElasticsearchConnectionAttributes>;
enabled?: InputMaybe<Scalars['Boolean']['input']>;
store?: InputMaybe<VectorStore>;
};

/** a shortform reference to an addon by version */
export type VersionReference = {
__typename?: 'VersionReference';
Expand All @@ -9259,6 +9286,8 @@ export type VersionReference = {
};

export type VertexAiAttributes = {
/** the model to use for vector embeddings */
embeddingModel?: InputMaybe<Scalars['String']['input']>;
/** custom vertexai endpoint if for dedicated customer deployments */
endpoint?: InputMaybe<Scalars['String']['input']>;
/** the gcp region the model is hosted in */
Expand Down
3 changes: 2 additions & 1 deletion config/test.exs
Original file line number Diff line number Diff line change
Expand Up @@ -157,4 +157,5 @@ config :bamboo, :refute_timeout, 10

config :elasticsearch,
host: "http://localhost:9200",
index: "testindex"
index: "testindex",
vector_index: "plrl-vector-testindex"
2 changes: 1 addition & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ services:
volumes:
- database_data:/var/lib/postgresql/data
es:
image: elasticsearch:8.8.2
image: elasticsearch:8.11.4
restart: always
ports:
- 9200:9200
Expand Down
74 changes: 66 additions & 8 deletions go/client/models_gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion lib/console/ai/evidence/logs.ex
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ defmodule Console.AI.Evidence.Logs do

@type parent :: Service.t | ClusterInsightComponent.t | Cluster.t

@base [query: "error fatal exception fail failed failure", limit: 10]
@base [query: "error fatal exception fail failed failure warning warn", limit: 10]
@format ~s({"timestamp": datetime, "log": string})

@preface """
Expand Down
8 changes: 8 additions & 0 deletions lib/console/ai/provider.ex
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ defmodule Console.AI.Provider do

@callback tool_call(struct, history, [atom]) :: {:ok, binary | [tool_result]} | {:error, binary}

@callback embeddings(struct, binary) :: {:ok, [{binary, [float]}]} | {:error, binary}

@callback tools?() :: boolean

def tools?() do
Expand All @@ -49,6 +51,12 @@ defmodule Console.AI.Provider do
do: handle_tool_calls(result, tools)
end

def embeddings(text) do
settings = Console.Deployments.Settings.cached()
with {:ok, %mod{} = client} <- client(settings),
do: mod.embeddings(client, text)
end

def summary(text), do: completion([{:user, text}], preface: @summary)

defp tool_client(%DeploymentSettings{ai: %AI{tool_provider: p}} = settings) when not is_nil(p),
Expand Down
2 changes: 2 additions & 0 deletions lib/console/ai/provider/anthropic.ex
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ defmodule Console.AI.Anthropic do
end
end

def embeddings(_, _), do: {:error, "embedding not implemented for this provider"}

def tools?(), do: true

defp chat(%__MODULE__{access_key: token, model: model, stream: %Stream{} = stream}, history) do
Expand Down
14 changes: 13 additions & 1 deletion lib/console/ai/provider/azure.ex
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ defmodule Console.AI.Azure do

require Logger

defstruct [:access_token, :api_version, :base_url, :model, :tool_model]
defstruct [:access_token, :api_version, :base_url, :model, :tool_model, :embedding_model]

@api_vsn "2024-10-01-preview"

Expand All @@ -19,6 +19,7 @@ defmodule Console.AI.Azure do
api_version: opts.api_version,
model: opts.model,
tool_model: opts.tool_model,
embedding_model: opts.embedding_model,
base_url: "#{opts.endpoint}/openai"
}
end
Expand All @@ -45,5 +46,16 @@ defmodule Console.AI.Azure do
|> OpenAI.tool_call(messages, tools)
end

@doc """
Generate a openai completion from the azure openai credentials chain
"""
@spec embeddings(t(), binary) :: {:ok, [{binary, [float]}]} | {:error, binary}
def embeddings(%__MODULE__{api_version: vsn, embedding_model: model} = azure, text) do
OpenAI.new(azure)
|> Map.put(:params, %{"api-version" => vsn || @api_vsn})
|> Map.put(:embedding_model, model || OpenAI.default_embedding_model())
|> OpenAI.embeddings(text)
end

def tools?(), do: true
end
2 changes: 2 additions & 0 deletions lib/console/ai/provider/bedrock.ex
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ defmodule Console.AI.Bedrock do

def tool_call(_, _, _), do: {:error, "tool calling not implemented for this provider"}

def embeddings(_, _), do: {:error, "embedding not implemented for this provider"}

def tools?(), do: false

defp build_req([{:system, system} | rest]) do
Expand Down
2 changes: 2 additions & 0 deletions lib/console/ai/provider/ollama.ex
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ defmodule Console.AI.Ollama do

def tool_call(_, _, _), do: {:error, "tool calling not implemented for this provider"}

def embeddings(_, _), do: {:error, "embedding not implemented for this provider"}

def tools?(), do: false

defp chat(%__MODULE__{url: url, model: model} = ollama, history) do
Expand Down
Loading

0 comments on commit 5dc6e13

Please sign in to comment.