Skip to content

Commit

Permalink
Merge branch 'main' into dyas-precommit
Browse files Browse the repository at this point in the history
  • Loading branch information
dyastremsky authored Jul 30, 2024
2 parents b1061ed + 5d1d883 commit 660fb33
Show file tree
Hide file tree
Showing 6 changed files with 139 additions and 32 deletions.
5 changes: 3 additions & 2 deletions genai-perf/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -523,8 +523,9 @@ An option to enable the generation of plots. (default: False)
##### `--profile-export-file <path>`

The path where the perf_analyzer profile export will be generated. By default,
the profile export will be to `profile_export.json`. The genai-perf file will be
exported to `<profile_export_file>_genai_perf.csv`. For example, if the profile
the profile export will be to `profile_export.json`. The genai-perf files will be
exported to `<profile_export_file>_genai_perf.json` and
`<profile_export_file>_genai_perf.csv`. For example, if the profile
export file is `profile_export.json`, the genai-perf file will be exported to
`profile_export_genai_perf.csv`. (default: `profile_export.json`)

Expand Down
20 changes: 10 additions & 10 deletions genai-perf/genai_perf/export_data/csv_exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@
import genai_perf.logging as logging
from genai_perf.export_data.exporter_config import ExporterConfig

DEFAULT_OUTPUT_DATA_CSV = "profile_export_genai_perf.csv"

logger = logging.getLogger(__name__)


Expand Down Expand Up @@ -65,14 +63,16 @@ def __init__(self, config: ExporterConfig):
self._args = config.args

def export(self) -> None:
csv_filename = self._output_dir / DEFAULT_OUTPUT_DATA_CSV
logger.info(f"Generating {csv_filename}")

with open(csv_filename, mode="w", newline="") as csvfile:
csv_writer = csv.writer(csvfile)
self._write_request_metrics(csv_writer)
csv_writer.writerow([])
self._write_system_metrics(csv_writer)
filename = (
self._output_dir / f"{self._args.profile_export_file.stem}_genai_perf.csv"
)
logger.info(f"Generating {filename}")

with open(filename, mode="w", newline="") as f:
writer = csv.writer(f)
self._write_request_metrics(writer)
writer.writerow([])
self._write_system_metrics(writer)

def _write_request_metrics(self, csv_writer) -> None:
csv_writer.writerow(self.REQUEST_METRICS_HEADER)
Expand Down
8 changes: 5 additions & 3 deletions genai-perf/genai_perf/export_data/json_exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,13 @@


import json
import os
from enum import Enum
from typing import Dict

import genai_perf.logging as logging
from genai_perf.export_data.exporter_config import ExporterConfig

DEFAULT_OUTPUT_DATA_JSON = "profile_export_genai_perf.json"

logger = logging.getLogger(__name__)


Expand All @@ -52,7 +51,10 @@ def __init__(self, config: ExporterConfig):
self._merge_stats_and_args()

def export(self) -> None:
filename = self._output_dir / DEFAULT_OUTPUT_DATA_JSON
prefix = os.path.splitext(os.path.basename(self._args["profile_export_file"]))[
0
]
filename = self._output_dir / f"{prefix}_genai_perf.json"
logger.info(f"Generating {filename}")
with open(str(filename), "w") as f:
f.write(json.dumps(self._stats_and_args, indent=2))
Expand Down
5 changes: 3 additions & 2 deletions genai-perf/genai_perf/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -626,8 +626,9 @@ def _add_output_args(parser):
default=Path("profile_export.json"),
help="The path where the perf_analyzer profile export will be "
"generated. By default, the profile export will be to profile_export.json. "
"The genai-perf file will be exported to <profile_export_file>_genai_perf.csv. "
"For example, if the profile export file is profile_export.json, the genai-perf file will be "
"The genai-perf files will be exported to <profile_export_file>_genai_perf.json and "
"<profile_export_file>_genai_perf.csv. "
"For example, if the profile export file is profile_export.json, the genai-perf CSV file will be "
"exported to profile_export_genai_perf.csv.",
)

Expand Down
45 changes: 31 additions & 14 deletions genai-perf/tests/test_csv_exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import os
from io import StringIO
from pathlib import Path
from typing import Any, List
from typing import Any, List, Tuple

import pytest
from genai_perf import parser
Expand All @@ -37,26 +38,22 @@

class TestCsvExporter:
@pytest.fixture
def mock_read_write(self, monkeypatch: pytest.MonkeyPatch) -> List[str]:
def mock_read_write(self, monkeypatch: pytest.MonkeyPatch) -> List[Tuple[str, str]]:
"""
This function will mock the open function for specific files.
"""

written_data = []

original_open = open

def custom_open(filename, *args, **kwargs):
def write(self: Any, content: str) -> int:
written_data.append(content)
print(f"Writing to {filename}") # To help with debugging failures
written_data.append((str(filename), content))
return len(content)

if str(filename) == "profile_export_genai_perf.csv":
tmp_file = StringIO()
tmp_file.write = write.__get__(tmp_file)
return tmp_file
else:
return original_open(filename, *args, **kwargs)
tmp_file = StringIO()
tmp_file.write = write.__get__(tmp_file)
return tmp_file

monkeypatch.setattr("builtins.open", custom_open)

Expand Down Expand Up @@ -115,7 +112,16 @@ def test_streaming_llm_csv_output(
"Output Token Throughput (per sec),456.00\r\n",
"Request Throughput (per sec),123.00\r\n",
]
returned_data = mock_read_write
expected_filename = "profile_export_genai_perf.csv"
returned_data = [
data
for filename, data in mock_read_write
if os.path.basename(filename) == expected_filename
]
if returned_data == []:
raise Exception(
f"Expected file {expected_filename} not found in written data."
)
assert returned_data == expected_content

def test_nonstreaming_llm_csv_output(
Expand All @@ -125,6 +131,9 @@ def test_nonstreaming_llm_csv_output(
Collect LLM metrics from profile export data and confirm correct values are
printed in csv.
"""
artifacts_dir = "artifacts/model_name-openai-chat-concurrency1"
custom_filename = "custom_export.json"
expected_filename = f"custom_export_genai_perf.csv"
argv = [
"genai-perf",
"profile",
Expand All @@ -134,6 +143,8 @@ def test_nonstreaming_llm_csv_output(
"openai",
"--endpoint-type",
"chat",
"--profile-export-file",
custom_filename,
]
monkeypatch.setattr("sys.argv", argv)
args, _ = parser.parse_args()
Expand Down Expand Up @@ -168,7 +179,13 @@ def test_nonstreaming_llm_csv_output(
"Output Token Throughput (per sec),456.00\r\n",
"Request Throughput (per sec),123.00\r\n",
]
returned_data = mock_read_write
returned_data = [
data for filename, data in mock_read_write if filename == expected_filename
]
if returned_data == []:
raise Exception(
f"Expected file {expected_filename} not found in written data."
)
assert returned_data == expected_content

def test_embedding_csv_output(
Expand Down Expand Up @@ -209,5 +226,5 @@ def test_embedding_csv_output(
"Metric,Value\r\n",
"Request Throughput (per sec),123.00\r\n",
]
returned_data = mock_read_write
returned_data = [data for _, data in mock_read_write]
assert returned_data == expected_content
88 changes: 87 additions & 1 deletion genai-perf/tests/test_json_exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,42 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import json
import os
from io import StringIO
from typing import Any, List, Tuple

import genai_perf.parser as parser
import pytest
from genai_perf.export_data.exporter_config import ExporterConfig
from genai_perf.export_data.json_exporter import JsonExporter


class TestJsonExporter:
def test_generate_json(self, monkeypatch) -> None:
@pytest.fixture
def mock_read_write(self, monkeypatch: pytest.MonkeyPatch) -> List[Tuple[str, str]]:
"""
This function will mock the open function for specific files.
"""

written_data = []

def custom_open(filename, *args, **kwargs):
def write(self: Any, content: str) -> int:
print(f"Writing to {filename}")
written_data.append((str(filename), content))
return len(content)

tmp_file = StringIO()
tmp_file.write = write.__get__(tmp_file)
return tmp_file

monkeypatch.setattr("builtins.open", custom_open)

return written_data

def test_generate_json(
self, monkeypatch, mock_read_write: pytest.MonkeyPatch
) -> None:
cli_cmd = [
"genai-perf",
"profile",
Expand All @@ -55,6 +83,64 @@ def test_generate_json(self, monkeypatch) -> None:
config.artifact_dir = args.artifact_dir
json_exporter = JsonExporter(config)
assert json_exporter._stats_and_args == json.loads(self.expected_json_output)
json_exporter.export()
expected_filename = "profile_export_genai_perf.json"
written_data = [
data
for filename, data in mock_read_write
if os.path.basename(filename) == expected_filename
]
if written_data == []:
raise Exception(
f"Expected file {expected_filename} not found in written data."
)
assert len(written_data) == 1
assert json.loads(written_data[0]) == json.loads(self.expected_json_output)

def test_generate_json_custom_export(
self, monkeypatch, mock_read_write: pytest.MonkeyPatch
) -> None:
artifacts_dir = "artifacts/gpt2_vllm-triton-vllm-concurrency1"
custom_filename = "custom_export.json"
expected_filename = f"{artifacts_dir}/custom_export_genai_perf.json"
expected_profile_filename = f"{artifacts_dir}/custom_export.json"
cli_cmd = [
"genai-perf",
"profile",
"-m",
"gpt2_vllm",
"--backend",
"vllm",
"--streaming",
"--extra-inputs",
"max_tokens:256",
"--extra-inputs",
"ignore_eos:true",
"--profile-export-file",
custom_filename,
]
monkeypatch.setattr("sys.argv", cli_cmd)
args, _ = parser.parse_args()
config = ExporterConfig()
config.stats = self.stats
config.args = args
config.extra_inputs = parser.get_extra_inputs_as_dict(args)
config.artifact_dir = args.artifact_dir
json_exporter = JsonExporter(config)
json_exporter.export()
written_data = [
data for filename, data in mock_read_write if filename == expected_filename
]
if written_data == []:
raise Exception(
f"Expected file {expected_filename} not found in written data."
)
assert len(written_data) == 1
expected_json_output = json.loads(self.expected_json_output)
expected_json_output["input_config"][
"profile_export_file"
] = expected_profile_filename
assert json.loads(written_data[0]) == expected_json_output

stats = {
"request_throughput": {"unit": "requests/sec", "avg": "7"},
Expand Down

0 comments on commit 660fb33

Please sign in to comment.