Skip to content

Commit

Permalink
Add HPA scaling support for ChatQnA / vLLM
Browse files Browse the repository at this point in the history
Signed-off-by: Eero Tamminen <[email protected]>
  • Loading branch information
eero-t committed Nov 29, 2024
1 parent 10af11a commit c5741dc
Show file tree
Hide file tree
Showing 9 changed files with 110 additions and 7 deletions.
1 change: 1 addition & 0 deletions helm-charts/chatqna/gaudi-vllm-values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ tgi:

vllm:
enabled: true
accelDevice: "gaudi"
image:
repository: opea/vllm-gaudi
tag: "latest"
Expand Down
6 changes: 5 additions & 1 deletion helm-charts/chatqna/hpa-values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# Enable HorizontalPodAutoscaler (HPA)
#
# That will overwrite named PrometheusAdapter configMap with ChatQnA specific
# custom metric queries for embedding, reranking, tgi services.
# custom metric queries for embedding, reranking, and LLM services.
#
# Default upstream configMap is in:
# - https://github.com/kubernetes-sigs/prometheus-adapter/blob/master/deploy/manifests/config-map.yaml
Expand All @@ -15,6 +15,10 @@ autoscaling:
# Override values in specific subcharts

# Enabling "autoscaling" for any of the subcharts requires enabling it also above!
vllm:
autoscaling:
maxReplicas: 4
enabled: true
tgi:
autoscaling:
maxReplicas: 4
Expand Down
23 changes: 18 additions & 5 deletions helm-charts/chatqna/templates/custom-metrics-configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,27 @@ metadata:
data:
config.yaml: |
rules:
{{- if .Values.tgi.autoscaling.enabled }}
{{- if and .Values.vllm.enabled .Values.vllm.autoscaling.enabled }}
# check metric with:
# kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/service/*/<metric> | jq
#
- seriesQuery: '{__name__="vllm:time_per_output_token_seconds_sum",service="{{ include "vllm.fullname" .Subcharts.vllm }}"}'
# Average output token latency from vLLM histograms, over 1 min
# (interval should be at least 4x serviceMonitor query interval,
# 0.001 divider add is to make sure there's always a valid value)
metricsQuery: 'rate(vllm:time_per_output_token_seconds_sum{service="{{ include "vllm.fullname" .Subcharts.vllm }}",<<.LabelMatchers>>}[1m]) / (0.001+rate(vllm:time_per_output_token_seconds_count{service="{{ include "vllm.fullname" .Subcharts.vllm }}",<<.LabelMatchers>>}[1m]))'
name:
matches: ^vllm:time_per_output_token_seconds_sum
as: "{{ include "vllm.metricPrefix" .Subcharts.vllm }}_token_latency"
resources:
# HPA needs both namespace + suitable object resource for its query paths:
# /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/service/*/<metric>
# (pod is not suitable object type for matching as each instance has different name)
overrides:
namespace: {resource: "namespace"}
service: {resource: "service"}
{{- end }}
{{- if and .Values.tgi.enabled .Values.tgi.autoscaling.enabled }}
{{- if .Values.tgi.accelDevice }}
- seriesQuery: '{__name__="tgi_queue_size",service="{{ include "tgi.fullname" .Subcharts.tgi }}"}'
# TGI instances queue_size sum
Expand All @@ -27,16 +44,12 @@ data:
{{- else }}
- seriesQuery: '{__name__="tgi_request_inference_duration_sum",service="{{ include "tgi.fullname" .Subcharts.tgi }}"}'
# Average request latency from TGI histograms, over 1 min
# (0.001 divider add is to make sure there's always a valid value)
metricsQuery: 'rate(tgi_request_inference_duration_sum{service="{{ include "tgi.fullname" .Subcharts.tgi }}",<<.LabelMatchers>>}[1m]) / (0.001+rate(tgi_request_inference_duration_count{service="{{ include "tgi.fullname" .Subcharts.tgi }}",<<.LabelMatchers>>}[1m]))'
name:
matches: ^tgi_request_inference_duration_sum
as: "{{ include "tgi.metricPrefix" .Subcharts.tgi }}_request_latency"
{{- end }}
resources:
# HPA needs both namespace + suitable object resource for its query paths:
# /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/service/*/<metric>
# (pod is not suitable object type for matching as each instance has different name)
overrides:
namespace: {resource: "namespace"}
service: {resource: "service"}
Expand Down
3 changes: 2 additions & 1 deletion helm-charts/common/vllm/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,5 @@ curl http://localhost:2080/v1/completions \
| global.modelUseHostPath | string | `""` | Cached models directory, vllm will not download if the model is cached here. The host path "modelUseHostPath" will be mounted to container as /data directory. Set this to null/empty will force it to download model. |
| image.repository | string | `"opea/vllm"` | |
| image.tag | string | `"latest"` | |
| global.monitoring | bool | `false` | Enable usage metrics for the service. See [monitoring instructions](../../monitoring.md) before enabling! |
| autoscaling.enabled | bool | `false` | Enable HPA autoscaling for the service deployment based on metrics it provides. See [HPA instructions](../../HPA.md) before enabling! |
| global.monitoring | bool | `false` | Enable usage metrics for the service. Required for HPA. See [monitoring instructions](../../monitoring.md) before enabling! |
2 changes: 2 additions & 0 deletions helm-charts/common/vllm/gaudi-values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

accelDevice: "gaudi"

image:
repository: opea/vllm-gaudi
tag: "latest"
Expand Down
7 changes: 7 additions & 0 deletions helm-charts/common/vllm/templates/_helpers.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,13 @@ Create chart name and version as used by the chart label.
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}

{{/*
Convert chart name to a string suitable as metric prefix
*/}}
{{- define "vllm.metricPrefix" -}}
{{- include "vllm.fullname" . | replace "-" "_" | regexFind "[a-zA-Z_:][a-zA-Z0-9_:]*" }}
{{- end }}

{{/*
Common labels
*/}}
Expand Down
7 changes: 7 additions & 0 deletions helm-charts/common/vllm/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,10 @@ metadata:
labels:
{{- include "vllm.labels" . | nindent 4 }}
spec:
{{- if ne (int .Values.replicaCount) 1 }}
# remove if replica count should not be reset on pod update (e.g. with HPA)
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "vllm.selectorLabels" . | nindent 6 }}
Expand Down Expand Up @@ -125,3 +128,7 @@ spec:
matchLabels:
{{- include "vllm.selectorLabels" . | nindent 14 }}
{{- end }}
{{- if not .Values.accelDevice }}
# extra time to finish processing buffered requests on CPU before pod is forcibly terminated
terminationGracePeriodSeconds: 120
{{- end }}
57 changes: 57 additions & 0 deletions helm-charts/common/vllm/templates/horizontal-pod-autoscaler.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

{{- if and .Values.global.monitoring .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "vllm.fullname" . }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "vllm.fullname" . }}
minReplicas: 1
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
- type: Object
object:
describedObject:
apiVersion: v1
# get metric for named object of given type (in same namespace)
kind: Service
name: {{ include "vllm.fullname" . }}
target:
# Metric is sum from all pods. "AverageValue" divides value returned from
# the custom metrics API by the number of Pods before comparing to the target:
# https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details
# https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics
type: AverageValue
{{- if .Values.accelDevice }}
averageValue: 0.1
{{- else }}
# allow larger latencies with unaccelerated service
averageValue: 1.0
{{- end }}
metric:
name: {{ include "vllm.metricPrefix" . }}_token_latency
behavior:
scaleDown:
stabilizationWindowSeconds: 180
policies:
- type: Percent
value: 25
periodSeconds: 90
scaleUp:
selectPolicy: Max
stabilizationWindowSeconds: 0
policies:
# Slow linear rampup in case additional CPU pods go to same node
# (i.e. interfere with each other)
- type: Pods
value: 1
periodSeconds: 90
#- type: Percent
# value: 25
# periodSeconds: 90
{{- end }}
11 changes: 11 additions & 0 deletions helm-charts/common/vllm/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,17 @@

replicaCount: 1

# Enabling HPA will:
# - Ignore above replica count, as it will be controlled by HPA
# - Add example HPA scaling rules with custom metrics thresholds
# - Require custom metrics ConfigMap available in the main application chart
autoscaling:
maxReplicas: 4
enabled: false

# empty for CPU (longer latencies are tolerated before HPA scaling unaccelerated service)
accelDevice: ""

port: 2080
shmSize: 1Gi
image:
Expand Down

0 comments on commit c5741dc

Please sign in to comment.