Skip to content

Commit

Permalink
Add simple and full knowledge pipeline functional tests
Browse files Browse the repository at this point in the history
This is a port of the old `scripts/test_knowledge.py` into functional
tests that we can run with CI. These tests need to run on a GPU, so are
marked as 'gpu' in pytest and only execute with a new py3-functional-gpu
tox environment. This also adds a new workflow file to execute these
tests on a GPU runner.

Signed-off-by: Ben Browning <[email protected]>
  • Loading branch information
bbrowning committed Jan 8, 2025
1 parent 02ccaef commit 9e3133e
Show file tree
Hide file tree
Showing 9 changed files with 379 additions and 61 deletions.
155 changes: 155 additions & 0 deletions .github/workflows/functional-gpu-nvidia-t4-x1.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
# SPDX-License-Identifier: Apache-2.0

name: Functional GPU (NVIDIA Tesla T4 x1)

on:
# temporarily run for this PR for anything that changes this workflow file
pull_request:
paths:
- ".github/workflows/functional-gpu-nvidia-t4-x1.yml" # This workflow
# run against every merge commit to 'main' and release branches
push:
branches:
- main
- release-*
# only run on PRs that touch certain regex paths
pull_request_target:
branches:
- main
- release-*
paths:
# note this should match the merging criteria in 'mergify.yml'
- "**.py"
- "pyproject.toml"
- "requirements**.txt"
- ".github/workflows/functional-gpu-nvidia-t4-x1.yml" # This workflow

concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true

env:
LC_ALL: en_US.UTF-8

defaults:
run:
shell: bash

permissions:
contents: read

jobs:
start-small-ec2-runner:
runs-on: ubuntu-latest
outputs:
label: ${{ steps.start-ec2-runner.outputs.label }}
ec2-instance-id: ${{ steps.start-ec2-runner.outputs.ec2-instance-id }}
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_REGION }}

- name: Start EC2 runner
id: start-ec2-runner
uses: machulav/ec2-github-runner@1827d6ca7544d7044ddbd2e9360564651b463da2 # v2.3.7
with:
mode: start
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
ec2-image-id: ${{ vars.AWS_EC2_AMI }}
ec2-instance-type: g4dn.2xlarge
subnet-id: subnet-02d230cffd9385bd4
security-group-id: sg-06300447c4a5fbef3
iam-role-name: instructlab-ci-runner
aws-resource-tags: >
[
{"Key": "Name", "Value": "instructlab-ci-github-small-runner"},
{"Key": "GitHubRepository", "Value": "${{ github.repository }}"},
{"Key": "GitHubRef", "Value": "${{ github.ref }}"},
{"Key": "GitHubPR", "Value": "${{ github.event.number }}"}
]
functional-gpu-small-test:
needs:
- start-small-ec2-runner
runs-on: ${{ needs.start-small-ec2-runner.outputs.label }}

# It is important that this job has no write permissions and has
# no access to any secrets. This part is where we are running
# untrusted code from PRs.
permissions: {}

steps:
- name: Install Packages
run: |
cat /etc/os-release
sudo dnf install -y gcc gcc-c++ make git python3.11 python3.11-devel
- name: Checkout instructlab/sdg
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: "instructlab/sdg"
path: "sdg"
# https://github.com/actions/checkout/issues/249
fetch-depth: 0

- name: Fetch and checkout PR
if: github.event_name == 'pull_request_target'
run: |
git fetch origin pull/${{ github.event.pull_request.number }}/head:pr-${{ github.event.pull_request.number }}
git checkout pr-${{ github.event.pull_request.number }}
- name: Install instructlab/sdg
run: |
export PATH="/home/ec2-user/.local/bin:/usr/local/cuda/bin:$PATH"
python3.11 -m venv --upgrade-deps venv
. venv/bin/activate
nvidia-smi
python3.11 -m pip cache remove llama_cpp_python
CMAKE_ARGS="-DLLAMA_CUDA=on" python3.11 -m pip install -r requirements-dev.txt
- name: Check disk before tests
run: |
df -h
- name: Run functional gpu tests with tox
run: |
tox -e py3-functional-gpu
- name: Check disk after tests
run: |
df -h
stop-small-ec2-runner:
needs:
- start-small-ec2-runner
- functional-gpu-small-test
runs-on: ubuntu-latest
if: ${{ always() }}
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_REGION }}

- name: Stop EC2 runner
uses: machulav/ec2-github-runner@1827d6ca7544d7044ddbd2e9360564651b463da2 # v2.3.7
with:
mode: stop
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
label: ${{ needs.start-small-ec2-runner.outputs.label }}
ec2-instance-id: ${{ needs.start-small-ec2-runner.outputs.ec2-instance-id }}

functional-gpu-small-workflow-complete:
# we don't want to block PRs on failed EC2 cleanup
# so not requiring "stop-small-ec2-runner" as well
needs: ["start-small-ec2-runner", "functional-gpu-small-test"]
runs-on: ubuntu-latest
steps:
- name: Functional GPU Workflow Complete
run: echo "Functional GPU Workflow Complete"
5 changes: 5 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -102,3 +102,8 @@ exclude = [
]
# honor excludes by not following there through imports
follow_imports = "silent"

[tool.pytest.ini_options]
markers = [
"gpu: marks tests that should run with gpus (deselect with '-m \"not gpu\"')",
]
5 changes: 5 additions & 0 deletions requirements-dev-gpu.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# SPDX-License-Identifier: Apache-2.0

-r requirements-dev.txt

llama-cpp-python[server]>=0.3.0,<1.0.0
1 change: 1 addition & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

-r requirements.txt

llama-cpp-python[server]>=0.3.0,<1.0.0
pre-commit>=3.0.4,<5.0
pylint>=2.16.2,<4.0
pylint-pydantic
Expand Down
52 changes: 0 additions & 52 deletions scripts/test_knowledge.py

This file was deleted.

97 changes: 97 additions & 0 deletions tests/functional/conftest.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,111 @@
# Standard
from importlib import resources
import pathlib
import typing

# Third Party
from datasets import Dataset
from llama_cpp.server.app import create_app
from llama_cpp.server.settings import ModelSettings, ServerSettings
from openai import OpenAI
from starlette.testclient import TestClient
import pytest

# First Party
from src.instructlab.sdg.pipeline import Pipeline, PipelineContext

TESTS_PATH = pathlib.Path(__file__).parent.parent.absolute()


@pytest.fixture
def testdata_path() -> typing.Generator[pathlib.Path, None, None]:
"""Path to local test data directory"""
yield TESTS_PATH / "testdata"


@pytest.fixture
def num_gpu_layers():
return 0


@pytest.fixture
def openai_client(model, model_repo_id, num_gpu_layers):
server_settings = ServerSettings()
model_settings = [
ModelSettings(
model=model,
hf_model_repo_id=model_repo_id,
n_gpu_layers=num_gpu_layers, # just run on the CPU
verbose=True,
)
]
app = create_app(
server_settings=server_settings,
model_settings=model_settings,
)

@app.get("/")
def read_root():
return {"message": "Hello from InstructLab! Visit us at https://instructlab.ai"}

test_client = TestClient(app)
return OpenAI(
api_key="EMPTY",
base_url="http://localhost:8000/v1",
http_client=test_client,
)


@pytest.fixture
def teacher_model(openai_client):
models = openai_client.models.list()
return models.data[0].id


@pytest.fixture
def max_num_tokens():
return 256


@pytest.fixture
def pipeline_context(
openai_client,
model_family,
teacher_model,
num_instructions_to_generate,
max_num_tokens,
):
return PipelineContext(
openai_client,
model_family,
teacher_model,
num_instructions_to_generate,
max_num_tokens=max_num_tokens,
)


@pytest.fixture
def knowledge_dataset():
return Dataset.from_list(
[
{
"icl_query_1": "what is the location of the tubal tonsils?",
"icl_response_1": "The location of the tubal tonsils is the roof of the pharynx.",
"icl_query_2": "How long does the adenoid grow?",
"task_description": "Teaching about human anatomy, specifically tonsils",
"icl_response_2": "The adenoid grows until the age of 5, starts to shrink at the age of 7 and becomes small in adulthood.",
"icl_query_3": "What is the immune systems first line of defense against ingested or inhaled foreign pathogens?",
"icl_response_3": "The tonsils are the immune systems first line of defense.",
"document": "The **tonsils** are a set of lymphoid organs facing into the aerodigestive tract, which is known as Waldeyer's tonsillar ring and consists of the adenoid tonsil or pharyngeal tonsil, two tubal tonsils, two palatine tonsils, and the lingual tonsils. These organs play an important role in the immune system. When used unqualified, the term most commonly refers specifically to the palatine tonsils, which are two lymphoid organs situated at either side of the back of the human throat. The palatine tonsils and the adenoid tonsil are organs consisting of lymphoepithelial tissue located near the oropharynx and nasopharynx parts of the throat",
"icl_document": "The **tonsils** are a set of lymphoid organs facing into the aerodigestive tract, which is known as Waldeyer's tonsillar ring and consists of the adenoid tonsil or pharyngeal tonsil, two tubal tonsils, two palatine tonsils, and the lingual tonsils.",
"domain": "textbook",
"document_outline": "Medical description of tonsils",
}
]
)


@pytest.fixture
def knowledge_pipeline(pipeline_context, pipelines_package):
yaml_path = resources.files(pipelines_package).joinpath("knowledge.yaml")
return Pipeline.from_file(pipeline_context, yaml_path)
55 changes: 55 additions & 0 deletions tests/functional/test_full_pipeline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# Standard
import unittest

# Third Party
import pytest

# First Party
from src.instructlab.sdg.datamixing import _get_question_hack, _get_response_hack
from src.instructlab.sdg.pipeline import FULL_PIPELINES_PACKAGE


@pytest.fixture
def model():
return "mistral-7b-instruct-v0.2.Q5_K_M.gguf"
# return "mistral-7b-instruct-v0.2.Q4_K_M.gguf"
# return "mistral-7b-instruct-v0.2.Q3_K_S.gguf"


@pytest.fixture
def model_family():
return "mixtral"


@pytest.fixture
def model_repo_id():
return "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"


@pytest.fixture
def num_instructions_to_generate():
return 1


@pytest.fixture
def pipelines_package():
return FULL_PIPELINES_PACKAGE


@pytest.mark.gpu
class TestFullPipeline(unittest.TestCase):
@pytest.fixture(autouse=True)
def _setup_fixtures(self, knowledge_dataset, knowledge_pipeline):
self.knowledge_dataset = knowledge_dataset
self.knowledge_pipeline = knowledge_pipeline

def test_knowledge(self):
samples = self.knowledge_pipeline.generate(self.knowledge_dataset)
print(samples)
assert len(samples) > 0
for sample in samples:
print(sample)
question = _get_question_hack(sample)
response = _get_response_hack(sample)
assert len(question) > 0
assert len(response) > 0
Loading

0 comments on commit 9e3133e

Please sign in to comment.