Skip to content

Commit

Permalink
Merge branch 'intel:main' into add_finetune_gaudi2
Browse files Browse the repository at this point in the history
  • Loading branch information
Deegue authored Jun 25, 2024
2 parents df3fe63 + 7b16ced commit 741488a
Show file tree
Hide file tree
Showing 28 changed files with 325 additions and 122 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/config/llama-2-7b-chat-hf-vllm-fp32.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ ipex:
enabled: false
precision: bf16
model_description:
model_id_or_path: meta-llama/Llama-2-7b-chat-hf
tokenizer_name_or_path: meta-llama/Llama-2-7b-chat-hf
model_id_or_path: NousResearch/Llama-2-7b-chat-hf
tokenizer_name_or_path: NousResearch/Llama-2-7b-chat-hf
config:
use_auth_token: ''
13 changes: 5 additions & 8 deletions .github/workflows/workflow_finetune.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ on:
default: '10.1.2.13:5000/llmray-build'
http_proxy:
type: string
default: 'http://10.24.221.169:911'
default: 'http://10.24.221.169:912'
https_proxy:
type: string
default: 'http://10.24.221.169:911'
default: 'http://10.24.221.169:912'
runner_config_path:
type: string
default: '/home/ci/llm-ray-actions-runner'
Expand All @@ -34,15 +34,15 @@ jobs:
name: finetune
strategy:
matrix:
model: [ EleutherAI/gpt-j-6b, meta-llama/Llama-2-7b-chat-hf, gpt2, bigscience/bloom-560m, facebook/opt-125m, mosaicml/mpt-7b, meta-llama/Llama-2-7b-hf, mistralai/Mistral-7B-v0.1, google/gemma-2b]
model: [ EleutherAI/gpt-j-6b, NousResearch/Llama-2-7b-chat-hf, gpt2, bigscience/bloom-560m, facebook/opt-125m, mosaicml/mpt-7b, NousResearch/Llama-2-7b-hf, mistralai/Mistral-7B-v0.1, google/gemma-2b]
isPR:
- ${{inputs.ci_type == 'pr'}}

exclude:
- { isPR: true }
include:
- { model: "EleutherAI/gpt-j-6b"}
- { model: "meta-llama/Llama-2-7b-chat-hf"}
- { model: "NousResearch/Llama-2-7b-chat-hf"}
- { model: "mistralai/Mistral-7B-v0.1"}
- { model: "google/gemma-2b"}

Expand All @@ -65,9 +65,6 @@ jobs:
- name: Checkout
uses: actions/checkout@v4

- name: Load environment variables
run: cat /root/actions-runner-config/.env >> $GITHUB_ENV

- name: Build Docker Image
run: |
DF_SUFFIX=".cpu_and_deepspeed"
Expand All @@ -83,7 +80,7 @@ jobs:
model_cache_path=${{ inputs.model_cache_path }}
USE_PROXY="1"
source dev/scripts/ci-functions.sh
start_docker ${TARGET} ${code_checkout_path} ${model_cache_path} ${USE_PROXY} ${{env.HF_ACCESS_TOKEN}}
start_docker ${TARGET} ${code_checkout_path} ${model_cache_path} ${USE_PROXY}
- name: Run Finetune Test
run: |
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/workflow_finetune_gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@ on:
default: '10.1.2.13:5000/llmray-build'
http_proxy:
type: string
default: 'http://10.24.221.169:911'
default: 'http://10.24.221.169:912'
https_proxy:
type: string
default: 'http://10.24.221.169:911'
default: 'http://10.24.221.169:912'

jobs:
finetune-gpu:
name: finetune-gpu
strategy:
matrix:
model: [ meta-llama/Llama-2-7b-chat-hf ]
model: [ NousResearch/Llama-2-7b-chat-hf ]
runs-on: self-hosted

defaults:
Expand Down
9 changes: 3 additions & 6 deletions .github/workflows/workflow_inference.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ on:
default: '10.1.2.13:5000/llmray-build'
http_proxy:
type: string
default: 'http://10.24.221.169:911'
default: 'http://10.24.221.169:912'
https_proxy:
type: string
default: 'http://10.24.221.169:911'
default: 'http://10.24.221.169:912'
runner_config_path:
type: string
default: '/home/ci/llm-ray-actions-runner'
Expand Down Expand Up @@ -67,9 +67,6 @@ jobs:
- name: Checkout
uses: actions/checkout@v4

- name: Load environment variables
run: cat /root/actions-runner-config/.env >> $GITHUB_ENV

- name: Determine Target
id: "target"
run: |
Expand All @@ -94,7 +91,7 @@ jobs:
model_cache_path=${{ inputs.model_cache_path }}
USE_PROXY="1"
source dev/scripts/ci-functions.sh
start_docker ${TARGET} ${code_checkout_path} ${model_cache_path} ${USE_PROXY} ${{env.HF_ACCESS_TOKEN}}
start_docker ${TARGET} ${code_checkout_path} ${model_cache_path} ${USE_PROXY}
- name: Start Ray Cluster
run: |
Expand Down
6 changes: 0 additions & 6 deletions .github/workflows/workflow_inference_gaudi2.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,6 @@ jobs:
- name: Checkout
uses: actions/checkout@v4

- name: Load environment variables
run: cat /root/actions-runner-config/.env >> $GITHUB_ENV

- name: Build Docker Image
run: |
DF_SUFFIX=".gaudi2"
Expand All @@ -98,7 +95,6 @@ jobs:
cid=$(docker ps -a -q --filter "name=${TARGET}")
if [[ ! -z "$cid" ]]; then docker rm $cid; fi
docker run -tid --name="${TARGET}" --hostname="${TARGET}-container" --runtime=habana -v /home/yizhong/Model-References:/root/Model-References -v ${{ inputs.code_checkout_path }}:/root/llm-on-ray -v ${{ inputs.model_cache_path }}:/root/.cache/huggingface/hub/ -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --cap-add sys_ptrace --net=host --ipc=host ${TARGET}:habana
- name: Start Ray Cluster
run: |
TARGET=${{steps.target.outputs.target}}
Expand All @@ -117,7 +113,6 @@ jobs:
conf_path = "llm_on_ray/inference/models/hpu/llama-2-7b-chat-hf-vllm-hpu.yaml"
with open(conf_path, encoding="utf-8") as reader:
result = yaml.load(reader, Loader=yaml.FullLoader)
result['model_description']["config"]["use_auth_token"] = "${{ env.HF_ACCESS_TOKEN }}"
with open(conf_path, 'w') as output:
yaml.dump(result, output, sort_keys=False)
EOF
Expand All @@ -128,7 +123,6 @@ jobs:
elif [[ ${{ matrix.model }} == "llama-2-70b-chat-hf" ]]; then
docker exec "${TARGET}" bash -c "llm_on_ray-serve --config_file llm_on_ray/inference/models/hpu/llama-2-70b-chat-hf-hpu.yaml --keep_serve_terminal"
elif [[ ${{ matrix.model }} == "llama-2-7b-chat-hf-vllm" ]]; then
docker exec "${TARGET}" bash -c "huggingface-cli login --token ${{ env.HF_ACCESS_TOKEN }}"
docker exec "${TARGET}" bash -c "llm_on_ray-serve --config_file llm_on_ray/inference/models/hpu/llama-2-7b-chat-hf-vllm-hpu.yaml --keep_serve_terminal"
fi
echo Streaming query:
Expand Down
22 changes: 2 additions & 20 deletions .github/workflows/workflow_test_benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ on:
default: '10.1.2.13:5000/llmray-build'
http_proxy:
type: string
default: 'http://10.24.221.169:911'
default: 'http://10.24.221.169:912'
https_proxy:
type: string
default: 'http://10.24.221.169:911'
default: 'http://10.24.221.169:912'
runner_config_path:
type: string
default: '/home/ci/llm-ray-actions-runner'
Expand Down Expand Up @@ -92,24 +92,6 @@ jobs:
TARGET=${{steps.target.outputs.target}}
# Additional libraries required for pytest
docker exec "${TARGET}" bash -c "pip install -r tests/requirements.txt"
CMD=$(cat << EOF
import yaml
conf_path = "llm_on_ray/inference/models/llama-2-7b-chat-hf.yaml"
with open(conf_path, encoding="utf-8") as reader:
result = yaml.load(reader, Loader=yaml.FullLoader)
result['model_description']["config"]["use_auth_token"] = "${{ env.HF_ACCESS_TOKEN }}"
with open(conf_path, 'w') as output:
yaml.dump(result, output, sort_keys=False)
conf_path = "llm_on_ray/inference/models/vllm/llama-2-7b-chat-hf-vllm.yaml"
with open(conf_path, encoding="utf-8") as reader:
result = yaml.load(reader, Loader=yaml.FullLoader)
result['model_description']["config"]["use_auth_token"] = "${{ env.HF_ACCESS_TOKEN }}"
with open(conf_path, 'w') as output:
yaml.dump(result, output, sort_keys=False)
EOF
)
docker exec "${TARGET}" python -c "$CMD"
docker exec "${TARGET}" bash -c "huggingface-cli login --token ${{ env.HF_ACCESS_TOKEN }}"
docker exec "${TARGET}" bash -c "./tests/run-tests-benchmark.sh"
- name: Stop Ray
run: |
Expand Down
9 changes: 8 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,14 @@ Deploy a model on Ray and expose an endpoint for serving. This command uses GPT2
```bash
llm_on_ray-serve --config_file llm_on_ray/inference/models/gpt2.yaml
```

You can also use model_ids to serve directly through:
```bash
llm_on_ray-serve --models gpt2
```
List all support model_ids with config file path:
```bash
llm_on_ray-serve --list_model_ids
```
The default served method is to provide an OpenAI-compatible API server ([OpenAI API Reference](https://platform.openai.com/docs/api-reference/chat)), you can access and test it in many ways:
```bash
# using curl
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/run_benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -229,4 +229,4 @@ then
fi
output_tokens_length=32
get_best_latency $iter "${input_tokens_length[*]}" $output_tokens_length $benchmark_dir
fi
fi
12 changes: 3 additions & 9 deletions dev/scripts/ci-functions.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#!/usr/bin/env bash
set -eo pipefail

HTTP_PROXY='http://10.24.221.169:911'
HTTPS_PROXY='http://10.24.221.169:911'
HTTP_PROXY='http://10.24.221.169:912'
HTTPS_PROXY='http://10.24.221.169:912'
MODEL_CACHE_PATH_LOACL='/root/.cache/huggingface/hub'
CODE_CHECKOUT_PATH_LOCAL='/root/llm-on-ray'

Expand Down Expand Up @@ -59,7 +59,6 @@ start_docker() {
local code_checkout_path=$2
local model_cache_path=$3
local USE_PROXY=$4
local HF_TOKEN=$5

cid=$(docker ps -q --filter "name=${TARGET}")
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid; fi
Expand All @@ -86,12 +85,7 @@ start_docker() {
fi

echo "docker run -tid "${docker_args[@]}" "${TARGET}:latest""
docker run -tid "${docker_args[@]}" "${TARGET}:latest"
if [ -z "$HF_TOKEN" ]; then
echo "no hf token"
else
docker exec "${TARGET}" bash -c "huggingface-cli login --token ${HF_TOKEN}"
fi
docker run -tid "${docker_args[@]}" "${TARGET}:latest"
}

start_docker_gaudi() {
Expand Down
20 changes: 11 additions & 9 deletions examples/inference/api_server_simple/query_dynamic_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@
import aiohttp
import argparse
from typing import Dict, Union
from llm_on_ray.inference.api_simple_backend.simple_protocol import (
SimpleRequest,
SimpleModelResponse,
)

parser = argparse.ArgumentParser(
description="Example script to query with multiple requests", add_help=True
Expand Down Expand Up @@ -63,9 +67,8 @@
config["top_k"] = float(args.top_k)


async def send_query(session, endpoint, prompt, config):
json_request = {"text": prompt, "config": config}
async with session.post(endpoint, json=json_request) as resp:
async def send_query(session, endpoint, req):
async with session.post(endpoint, json=req.dict()) as resp:
return await resp.text()


Expand All @@ -86,16 +89,15 @@ async def send_query(session, endpoint, prompt, config):

configs = [config1] * 5 + [config2] * (len(prompts) - 5)

reqs = [SimpleRequest(text=prompt, config=config) for prompt, config in zip(prompts, configs)]


async def send_all_query(endpoint, prompts, configs):
async def send_all_query(endpoint, reqs):
async with aiohttp.ClientSession() as session:
tasks = [
send_query(session, endpoint, prompt, config)
for prompt, config in zip(prompts, configs)
]
tasks = [send_query(session, endpoint, req) for req in reqs]
responses = await asyncio.gather(*tasks)
print("\n--------------\n".join(responses))
print("\nTotal responses:", len(responses))


asyncio.run(send_all_query(args.model_endpoint, prompts, configs))
asyncio.run(send_all_query(args.model_endpoint, reqs))
14 changes: 10 additions & 4 deletions examples/inference/api_server_simple/query_single.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@
import requests
import argparse
from typing import Dict, Union
from llm_on_ray.inference.api_simple_backend.simple_protocol import (
SimpleRequest,
SimpleModelResponse,
)

parser = argparse.ArgumentParser(
description="Example script to query with single request", add_help=True
Expand Down Expand Up @@ -66,20 +70,22 @@
if args.top_k:
config["top_k"] = float(args.top_k)

sample_input = {"text": prompt, "config": config, "stream": args.streaming_response}
sample_input = SimpleRequest(text=prompt, config=config, stream=args.streaming_response)

proxies = {"http": None, "https": None}
outputs = requests.post(
args.model_endpoint,
proxies=proxies, # type: ignore
json=sample_input,
json=sample_input.dict(),
stream=args.streaming_response,
)

outputs.raise_for_status()

simple_response = SimpleModelResponse.from_requests_response(outputs)
if args.streaming_response:
for output in outputs.iter_content(chunk_size=None, decode_unicode=True):
for output in simple_response.iter_content(chunk_size=1, decode_unicode=True):
print(output, end="", flush=True)
print()
else:
print(outputs.text, flush=True)
print(simple_response.text, flush=True)
Loading

0 comments on commit 741488a

Please sign in to comment.