diff --git a/helm-charts/agentqna/values.yaml b/helm-charts/agentqna/values.yaml index ae85c6224..f98ec2001 100644 --- a/helm-charts/agentqna/values.yaml +++ b/helm-charts/agentqna/values.yaml @@ -61,12 +61,6 @@ worker: toolPath: "/mnt/tools" service: port: 9095 - image: - repository: opea/agent-langchain - # Uncomment the following line to set desired image pull policy if needed, as one of Always, IfNotPresent, Never. - # pullPolicy: "" - # Overrides the image tag whose default is the chart appVersion. - tag: "latest" strategy: rag_agent_llama recursion_limit: "12" llm_engine: tgi @@ -83,12 +77,6 @@ supervisor: toolPath: "/mnt/tools" service: port: 9090 - image: - repository: opea/agent-langchain - # Uncomment the following line to set desired image pull policy if needed, as one of Always, IfNotPresent, Never. - # pullPolicy: "" - # Overrides the image tag whose default is the chart appVersion. - tag: "latest" strategy: react_llama recursion_limit: 10 llm_engine: tgi diff --git a/helm-charts/common/agent/README.md b/helm-charts/common/agent/README.md index 820de8951..5472ee8d8 100644 --- a/helm-charts/common/agent/README.md +++ b/helm-charts/common/agent/README.md @@ -37,10 +37,10 @@ curl http://localhost:9090/v1/chat/completions \ For global options, see Global Options. -| Key | Type | Default | Description | -| ------------------------------- | ------ | ------------------------ | ------------------------------- | -| global.HUGGINGFACEHUB_API_TOKEN | string | `""` | Your own Hugging Face API token | -| image.repository | string | `"opea/agent-langchain"` | | -| service.port | string | `"9090"` | | -| llm_endpoint_url | string | `""` | LLM endpoint | -| global.monitoring | bop; | false | Service usage metrics | +| Key | Type | Default | Description | +| ------------------------------- | ------ | -------------- | ------------------------------- | +| global.HUGGINGFACEHUB_API_TOKEN | string | `""` | Your own Hugging Face API token | +| image.repository | string | `"opea/agent"` | | +| service.port | string | `"9090"` | | +| llm_endpoint_url | string | `""` | LLM endpoint | +| global.monitoring | bop; | false | Service usage metrics | diff --git a/helm-charts/common/agent/templates/configmap.yaml b/helm-charts/common/agent/templates/configmap.yaml index 62970d7d2..2254585e4 100644 --- a/helm-charts/common/agent/templates/configmap.yaml +++ b/helm-charts/common/agent/templates/configmap.yaml @@ -23,7 +23,7 @@ data: model: {{ .Values.model | quote }} {{- end }} {{- if .Values.streaming }} - streaming: {{ .Values.streaming | quote }} + stream: {{ .Values.streaming | quote }} {{- end }} {{- if .Values.temperature }} temperature: {{ .Values.temperature | quote }} diff --git a/helm-charts/common/agent/templates/tests/test-pod.yaml b/helm-charts/common/agent/templates/tests/test-pod.yaml index 61515c7a3..d6ce3cca3 100644 --- a/helm-charts/common/agent/templates/tests/test-pod.yaml +++ b/helm-charts/common/agent/templates/tests/test-pod.yaml @@ -22,7 +22,7 @@ spec: curl http://{{ include "agent.fullname" . }}:{{ .Values.service.port }}/v1/chat/completions -sS --fail-with-body \ -X POST \ -H 'Content-Type: application/json' \ - -d '{"query":"What is OPEA?"}' && break; + -d '{"messages":"What is OPEA?", "stream":"true"}' && break; curlcode=$? if [[ $curlcode -eq 7 ]]; then sleep 10; else echo "curl failed with code $curlcode"; exit 1; fi; done; diff --git a/helm-charts/common/agent/values.yaml b/helm-charts/common/agent/values.yaml index 0ebfd4c31..7516aef04 100644 --- a/helm-charts/common/agent/values.yaml +++ b/helm-charts/common/agent/values.yaml @@ -30,7 +30,7 @@ require_human_feedback: "false" LOGFLAG: "True" image: - repository: opea/agent-langchain + repository: opea/agent # Uncomment the following line to set desired image pull policy if needed, as one of Always, IfNotPresent, Never. # pullPolicy: "" # Overrides the image tag whose default is the chart appVersion.