Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix console outputs #217

Merged
merged 12 commits into from
Feb 12, 2025
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: MLPerf Inference Nvidia implementations

on:
schedule:
- cron: "35 01 * * *"
- cron: "05 01 * * *"

jobs:
run_nvidia:
Expand All @@ -23,7 +23,7 @@ jobs:
model: [ "resnet50", "retinanet", "bert-99", "bert-99.9", "gptj-99.9", "3d-unet-99.9", "sdxl" ]
exclude:
- model: gptj-99.9
- system: phoenix
- system: phoenix1
- system: GO-i9

steps:
Expand Down Expand Up @@ -59,5 +59,6 @@ jobs:
mlc pull repo mlcommons@mlperf-automations --branch=dev

mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string $submission_preprocessor_args --quiet
#mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name
mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/GATEOverflow/mlperf_inference_submissions_v5.0 --repo_branch=main --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name

mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name
12 changes: 8 additions & 4 deletions script/app-mlperf-inference-mlcommons-python/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import json
import shutil
import subprocess
from utils import *


def preprocess(i):
Expand Down Expand Up @@ -51,12 +52,14 @@ def preprocess(i):

env['MLC_MLPERF_LOADGEN_EXTRA_OPTIONS'] += env['MLC_MLPERF_LOADGEN_QPS_OPT']

if 'MLC_NUM_THREADS' not in env:
if 'MLC_MINIMIZE_THREADS' in env:
if env.get('MLC_NUM_THREADS', '') == '':
if is_true(env.get('MLC_MINIMIZE_THREADS', '')) and env.get(
'MLC_HOST_CPU_TOTAL_CORES', '') != '':
env['MLC_NUM_THREADS'] = str(int(env['MLC_HOST_CPU_TOTAL_CORES']) //
(int(env.get('MLC_HOST_CPU_SOCKETS', '1')) * int(env.get('MLC_HOST_CPU_TOTAL_CORES', '1'))))
(int(env.get('MLC_HOST_CPU_SOCKETS', '1'))))
else:
env['MLC_NUM_THREADS'] = env.get('MLC_HOST_CPU_TOTAL_CORES', '1')
env['CM_NUM_THREADS'] = env['MLC_NUM_THREADS'] # For inference code

if env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '') != '' and str(env.get(
'MLC_MLPERF_MODEL_SKIP_BATCHING', False)).lower() not in ["true", "1", "yes"]:
Expand Down Expand Up @@ -270,12 +273,13 @@ def get_run_cmd_reference(
env['MODEL_FILE'] = env.get(
'MLC_MLPERF_CUSTOM_MODEL_PATH',
env.get('MLC_ML_MODEL_FILE_WITH_PATH'))

if not env['MODEL_FILE']:
return {'return': 1, 'error': 'No valid model file found!'}

env['LOG_PATH'] = env['MLC_MLPERF_OUTPUT_DIR']

extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + ' --max-batchsize ' + env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1') + \
extra_options = " --output " + env['MLC_MLPERF_OUTPUT_DIR'] + " --model-name resnet50 --dataset " + env['MLC_MLPERF_VISION_DATASET_OPTION'] + f""" --max-batchsize {env.get('MLC_MLPERF_LOADGEN_MAX_BATCHSIZE', '1')}""" + \
" --dataset-path " + env['MLC_DATASET_PREPROCESSED_PATH'] + " --model " + env['MODEL_FILE'] + \
" --preprocessed_dir " + env['MLC_DATASET_PREPROCESSED_PATH']

Expand Down
8 changes: 8 additions & 0 deletions script/app-mlperf-inference-mlcommons-python/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -888,6 +888,14 @@ variations:
ml-model:
tags: raw,_deepsparse

deepsparse,resnet50:
default_env:
DEEPSPARSE_NUM_STREAMS: 24
ENQUEUE_NUM_THREADS: 2
MLC_MLPERF_LOADGEN_MAX_BATCHSIZE: 16
MLC_MLPERF_VISION_DATASET_OPTION: imagenet_pytorch


tvm-onnx:
group: framework
env:
Expand Down
24 changes: 23 additions & 1 deletion script/detect-sudo/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,24 @@ def is_user_in_sudo_group():
return False


def timeout_input(prompt, timeout=15, default=""):
"""Prompt user for input with a timeout (cross-platform)."""
result = [default] # Store the input result

def get_input():
try:
result[0] = getpass.getpass(prompt)
except EOFError: # Handle Ctrl+D or unexpected EOF
result[0] = default

input_thread = threading.Thread(target=get_input)
input_thread.daemon = True # Daemonize thread
input_thread.start()
input_thread.join(timeout) # Wait for input with timeout

return result[0] # Return user input or default


def prompt_sudo():
if os.geteuid() != 0 and not is_user_in_sudo_group(): # No sudo required for root user

Expand All @@ -112,7 +130,11 @@ def prompt_sudo():
print("Skipping password prompt - non-interactive terminal detected!")
password = None
else:
password = getpass.getpass("Enter password (-1 to skip): ")
# password = getpass.getpass("Enter password (-1 to skip): ")
password = timeout_input(
"Enter password (-1 to skip): ",
timeout=15,
default=None)

# Check if the input is -1
if password == "-1":
Expand Down
3 changes: 2 additions & 1 deletion script/get-cudnn/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ def preprocess(i):
return {'return': 0}

if env.get('MLC_CUDNN_TAR_FILE_PATH', '') == '':
return {'return': 1, 'error': 'Please envoke mlcr "get cudnn" --tar_file={full path to the cuDNN tar file}'}
return {
'return': 1, 'error': 'Please envoke mlcr get,cudnn --tar_file={full path to the cuDNN tar file}'}

print('Untaring file - can take some time ...')

Expand Down
5 changes: 4 additions & 1 deletion script/get-ml-model-neuralmagic-zoo/run.sh
Original file line number Diff line number Diff line change
@@ -1,2 +1,5 @@
#!/bin/bash
${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py
cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/download_sparse.py"
echo "$cmd"
eval "$cmd"
test $? -eq 0 || exit $?
4 changes: 2 additions & 2 deletions script/get-mlperf-inference-src/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ variations:
deepsparse:
base:
- _branch.deepsparse
- _repo.https://github.com/neuralmagic/inference
- _repo.https://github.com/gateoverflow/nm-inference
full-history:
env:
MLC_GIT_DEPTH: ''
Expand Down Expand Up @@ -139,7 +139,7 @@ versions:
env:
MLC_MLPERF_LAST_RELEASE: v5.0
MLC_TMP_GIT_CHECKOUT: deepsparse
MLC_TMP_GIT_URL: https://github.com/neuralmagic/inference
MLC_TMP_GIT_URL: https://github.com/gateoverflow/nm-inference
main:
env:
MLC_MLPERF_LAST_RELEASE: v5.0
Expand Down
2 changes: 1 addition & 1 deletion script/get-tensorrt/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def preprocess(i):
if env.get('MLC_TENSORRT_REQUIRE_DEV', '') != 'yes':
tags.append("_dev")
return {'return': 1, 'error': 'Please envoke mlcr "' +
" ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'}
",".join(tags) + '" --tar_file={full path to the TensorRT tar file}'}

print('Untaring file - can take some time ...')

Expand Down
1 change: 1 addition & 0 deletions script/run-mlperf-inference-app/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ input_mapping:
pointpillars_checkpoint_path: MLC_ML_MODEL_POINT_PILLARS_PATH
deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH
waymo_path: MLC_DATASET_WAYMO_PATH
nm_model_zoo_stub: MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB

new_state_keys:
- app_mlperf_inference_*
Expand Down
Loading