diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..a7df87b --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,9 @@ +## Background + +## Description of Changes + +## Checklist + +- [ ] New features are documented +- [ ] Tests added for bug fixes and new features +- [ ] (@lanl.gov employees) Update copyright on changed files diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..fb593f8 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,93 @@ +# ======================================================================================== +# (C) (or copyright) 2024. Triad National Security, LLC. All rights reserved. +# +# This program was produced under U.S. Government contract 89233218CNA000001 for Los +# Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC +# for the U.S. Department of Energy/National Nuclear Security Administration. All rights +# in the program are reserved by Triad National Security, LLC, and the U.S. Department +# of Energy/National Nuclear Security Administration. The Government is granted for +# itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide +# license in this material to reproduce, prepare derivative works, distribute copies to +# the public, perform publicly and display publicly, and to permit others to do so. +# ======================================================================================== + +# This file was created in part or in whole by one of OpenAI's generative AI models + +name: Continuous Integration + +on: + pull_request: + types: [opened, synchronize, reopened] + +# Cancel outdated workflows +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + format: + if: > + ${{ !contains(github.event.pull_request.title, 'Draft:') && + !contains(github.event.pull_request.title, 'WIP:') }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' # Specify the Python version you need + - name: Install dependencies + run: | + pip install black + - name: Run format check + run: | + source env/bash + VERBOSE=1 ./style/format.sh + git diff --exit-code --ignore-submodules + + cpu: + if: > + ${{ !contains(github.event.pull_request.title, 'Draft:') && + !contains(github.event.pull_request.title, 'WIP:') }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Install dependencies + run: | + sudo apt-get update -qq + sudo apt-get install -qq --no-install-recommends tzdata + sudo apt-get install -qq git + sudo apt-get install -qq make cmake g++ + sudo apt-get install -qq libopenmpi-dev libhdf5-openmpi-dev + sudo apt-get install -qq openssh-client + sudo apt-get install -qq python3 python3-numpy python3-h5py python3-matplotlib + - name: Run CPU tests + run: | + export MAKE_PROGRAM=make + cd tst + mkdir -p build + cd build + cmake --preset=cpu-release ../../ + make -j 4 + cd .. + python3 run_tests.py regression.suite \ + --exe build/src/artemis \ + --log_file=ci_cpu_log.txt + - name: Upload CPU test log + if: always() + uses: actions/upload-artifact@v3 + with: + name: ci_cpu_log.txt + path: tst/ci_cpu_log.txt + retention-days: 3 + - name: Upload figures + if: always() + uses: actions/upload-artifact@v3 + with: + name: figs + path: tst/figs + retention-days: 3 diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index c04c77e..cdea1b4 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,3 +1,15 @@ +# ======================================================================================== +# (C) (or copyright) 2024. Triad National Security, LLC. All rights reserved. +# +# This program was produced under U.S. Government contract 89233218CNA000001 for Los +# Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC +# for the U.S. Department of Energy/National Nuclear Security Administration. All rights +# in the program are reserved by Triad National Security, LLC, and the U.S. Department +# of Energy/National Nuclear Security Administration. The Government is granted for +# itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide +# license in this material to reproduce, prepare derivative works, distribute copies to +# the public, perform publicly and display publicly, and to permit others to do so. +# ======================================================================================== name: Build And Deploy Docs @@ -31,5 +43,5 @@ jobs: uses: peaceiris/actions-gh-pages@v4 with: github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./public + publish_dir: ./public force_orphan: true diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 0000000..7609d8f --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,70 @@ +# ======================================================================================== +# (C) (or copyright) 2024. Triad National Security, LLC. All rights reserved. +# +# This program was produced under U.S. Government contract 89233218CNA000001 for Los +# Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC +# for the U.S. Department of Energy/National Nuclear Security Administration. All rights +# in the program are reserved by Triad National Security, LLC, and the U.S. Department +# of Energy/National Nuclear Security Administration. The Government is granted for +# itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide +# license in this material to reproduce, prepare derivative works, distribute copies to +# the public, perform publicly and display publicly, and to permit others to do so. +# ======================================================================================== + +# This file was created in part or in whole by one of OpenAI's generative AI models + +name: Nightly Tests + +on: + schedule: + - cron: '0 0 * * *' # Runs daily at midnight; adjust as needed + +jobs: + cpu: + runs-on: ubuntu-latest + env: + MAKE_PROGRAM: ${{ env.MAKE_PROGRAM || 'make' }} + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Install dependencies + run: | + sudo apt-get update -qq + sudo apt-get install -qq --no-install-recommends tzdata + sudo apt-get install -qq git + sudo apt-get install -qq make cmake g++ + sudo apt-get install -qq libopenmpi-dev libhdf5-openmpi-dev + sudo apt-get install -qq openssh-client + sudo apt-get install -qq python3 python3-numpy python3-h5py python3-matplotlib + - name: Update Parthenon submodule + run: | + cd external/parthenon + git pull origin develop + echo "==> Current Parthenon commit hash:" + git rev-parse HEAD + - name: Run CPU tests + run: | + cd tst + mkdir -p build + cd build + cmake -DCMAKE_CXX_COMPILER=g++ -DCMAKE_C_COMPILER=gcc ../../ + make -j 4 + cd .. + python3 run_tests.py regression.suite \ + --exe build/src/artemis \ + --log_file=ci_cpu_log.txt + - name: Upload CPU test log + if: always() + uses: actions/upload-artifact@v3 + with: + name: ci_cpu_log.txt + path: tst/ci_cpu_log.txt + retention-days: 3 + - name: Upload figures + if: always() + uses: actions/upload-artifact@v3 + with: + name: figs + path: tst/figs + retention-days: 3 diff --git a/.gitignore b/.gitignore index 71ab273..c36f8b0 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,10 @@ debug* doc/_build/* doc/src/parameters.rst +# CI files +artemis_ci_*.out +tst/figs + # Visualization package files .smhist diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index 1fd46c7..0000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,189 +0,0 @@ -# ======================================================================================== -# (C) (or copyright) 2023-2024. Triad National Security, LLC. All rights reserved. -# -# This program was produced under U.S. Government contract 89233218CNA000001 for Los -# Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC -# for the U.S. Department of Energy/National Nuclear Security Administration. All rights -# in the program are reserved by Triad National Security, LLC, and the U.S. Department -# of Energy/National Nuclear Security Administration. The Government is granted for -# itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide -# license in this material to reproduce, prepare derivative works, distribute copies to -# the public, perform publicly and display publicly, and to permit others to do so. -# ======================================================================================== - -variables: - GIT_SUBMODULE_STRATEGY: recursive - -stages: - - ci_doc - - ci_format - - ci_test - - deploy - -.default-job: - tags: - - darwin-slurm-shared - id_tokens: - SITE_ID_TOKEN: - aud: https://gitlab.lanl.gov - -.default-ci-job: - extends: .default-job - rules: - - if: '$CI_MERGE_REQUEST_TITLE =~ /Draft:/ || $CI_MERGE_REQUEST_TITLE =~ /WIP:/' - when: never - - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' - -.default-nightly-job: - extends: .default-job - rules: - - if: '$CI_PIPELINE_SOURCE == "schedule"' - - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' - when: never - -ci_format-job: - extends: .default-ci-job - stage: ci_format - variables: - SCHEDULER_PARAMETERS: "--nodes=1 --partition=skylake-gold --time=04:00:00" - script: - - cd $CI_PROJECT_DIR - - source env/bash - - VERBOSE=1 ./style/format.sh - - git diff --exit-code --ignore-submodules - -ci_cpu-job: - extends: .default-ci-job - stage: ci_test - variables: - SCHEDULER_PARAMETERS: "--nodes=1 --partition=skylake-gold --time=04:00:00" - script: - - cd $CI_PROJECT_DIR - - source env/bash - - cd $CI_PROJECT_DIR/tst - - python3 run_tests.py - regression.suite - --save_build - --make_nproc=32 - --cmake=-DCMAKE_C_COMPILER=gcc - --cmake=-DCMAKE_CXX_COMPILER=g++ - --log_file=ci_cpu_log.txt - artifacts: - when: always - expire_in: 3 days - paths: - - tst/ci_cpu_log.txt - - tst/figs - -ci_gpu-job: - extends: .default-ci-job - stage: ci_test - variables: - SCHEDULER_PARAMETERS: "--nodes=1 --partition=volta-x86 --time=04:00:00" - script: - - cd $CI_PROJECT_DIR - - source env/bash - - cd $CI_PROJECT_DIR/tst - - python3 run_tests.py - gpu.suite - --save_build - --make_nproc=32 - --cmake=-DARTEMIS_ENABLE_CUDA=On - --cmake=-DKokkos_ARCH_VOLTA70=On - --cmake=-DCMAKE_CXX_COMPILER=$CI_PROJECT_DIR/external/parthenon/external/Kokkos/bin/nvcc_wrapper - --log_file=ci_gpu_log.txt - artifacts: - when: always - expire_in: 3 days - paths: - - tst/ci_gpu_log.txt - - tst/figs - -ci_nightly-cpu-job: - extends: .default-nightly-job - stage: ci_test - variables: - SCHEDULER_PARAMETERS: "--nodes=1 --partition=skylake-gold --time=08:00:00" - script: - - cd $CI_PROJECT_DIR - - cd external/parthenon - - git pull origin develop - - echo "==> Current Parthenon commit hash:" - - git rev-parse HEAD - - cd ../../ - - source env/bash - - cd $CI_PROJECT_DIR/tst - - python3 run_tests.py - regression.suite - --make_nproc=32 - --cmake=-DCMAKE_C_COMPILER=gcc - --cmake=-DCMAKE_CXX_COMPILER=g++ - --log_file=ci_cpu_log.txt - artifacts: - when: always - expire_in: 3 days - paths: - - tst/ci_cpu_log.txt - - tst/figs - -ci_nightly-gpu-job: - extends: .default-nightly-job - stage: ci_test - variables: - SCHEDULER_PARAMETERS: "--nodes=1 --partition=volta-x86 --time=08:00:00" - script: - - cd $CI_PROJECT_DIR - - cd external/parthenon - - git pull origin develop - - echo "==> Current Parthenon commit hash:" - - git rev-parse HEAD - - cd ../../ - - source env/bash - - cd $CI_PROJECT_DIR/tst - - python3 run_tests.py - gpu.suite - --save_build - --make_nproc=32 - --make_nproc=32 - --cmake=-DARTEMIS_ENABLE_CUDA=On - --cmake=-DKokkos_ARCH_VOLTA70=On - --cmake=-DCMAKE_CXX_COMPILER=$CI_PROJECT_DIR/external/parthenon/external/Kokkos/bin/nvcc_wrapper - --log_file=ci_gpu_log.txt - artifacts: - when: always - expire_in: 3 days - paths: - - tst/ci_gpu_log.txt - - tst/figs - - -ci-doc-job: - extends: .default-ci-job - image: python:latest - stage: ci_doc - script: - - cd $CI_PROJECT_DIR/doc - - pip install -U sphinx-rtd-theme - - pip install -U pyyaml - - sphinx-build -b html . ../public - artifacts: - paths: - - public - rules: - - if: $CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH - - -pages: - extends: .default-job - image: python:latest - stage: deploy - script: - - cd $CI_PROJECT_DIR/doc - - pip install -U sphinx-rtd-theme - - pip install -U pyyaml - - sphinx-build -b html . ../public - artifacts: - paths: - - public - rules: - - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH diff --git a/CMakePresets.json b/CMakePresets.json index 0499c29..46502c2 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -10,6 +10,8 @@ "name": "cpu-debug", "cacheVariables": { "CMAKE_MAKE_PROGRAM": "$env{MAKE_PROGRAM}", + "CMAKE_C_COMPILER": "gcc", + "CMAKE_CXX_COMPILER": "g++", "CMAKE_BUILD_TYPE": "Debug", "Kokkos_ENABLE_DEBUG_BOUNDS_CHECK": "ON" } @@ -18,6 +20,8 @@ "name": "cpu-release", "cacheVariables": { "CMAKE_MAKE_PROGRAM": "$env{MAKE_PROGRAM}", + "CMAKE_C_COMPILER": "gcc", + "CMAKE_CXX_COMPILER": "g++", "CMAKE_BUILD_TYPE": "RelWithDebInfo" } }, diff --git a/README.md b/README.md index 82482f0..727e229 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,16 @@ Currently supported computers/partitions are: skylake-gold volta-x86 (gpu) - power9-rhel7 (gpu) + +## Chicoma + + cpu + gpu + +## Venado + + gg (cpu) + gh (gpu) # Installation @@ -83,7 +92,23 @@ script: ## Testing -There is a suite of tests in the `tst/` directory. To run the full regression suite, do +There is a suite of tests in the `tst/` directory. Tests are run with the included `run_tests.py` +script. This script can be run in three ways: + +1. With default arguments, where the current version of the source will be built. The resulting +executable can be saved for reuse with `--save_build`, and if saved can be reused in subsequent test +runs with `--reuse_build`. Note that `--save_build` must continue to be supplied as well to avoid +the reused build being deleted after the tests are run. +2. If the `run_tests.py` script is called from a directory with a valid `artemis` executable, that +executable will be used for testing and will not be cleaned up afterwards. +3. If the path to an `artemis` executable is provided to the `--exe` option of `run_tests.py`, that +executable will be used for testing and will not be cleaned up afterwards. + +In all cases, the tests will be run from a `tst` directory created in the same folder as the +executable being used. Figures will be created in `artemis/tst/figs` and the log file in +`artemis/tst`. + +To run the full regression suite, do python3 run_tests.py regression.suite @@ -91,12 +116,23 @@ You can also pass a list of individual tests to the script, or create your own s ## CI -We use the gitlab CI for regression testing. The CI will not run if the PR is marked "Draft:" or +We use the github CI for regression testing. The CI will not run if the PR is marked "Draft:" or "WIP:". Removing these labels from the title will not automatically launch the CI. To launch the CI with an empty commit, do git commit --allow-empty -m "trigger pipeline" && git push +A portion of the CI is run on LANL's internal Darwin platform. To launch this CI job, someone with +Darwin access (usually a LANL employee) must first create a Github Personal Access Token and store +it securely in their own environment as `ARTEMIS_GITHUB_TOKEN`, e.g. in their `~/.bashrc`: + + export ARTEMIS_GITHUB_TOKEN=[token] + +and then log in to Darwin and manually launch the CI runner: + + cd artemis + ./tst/launcher_ci_runner.py [Number of the github PR] + ## Release Artemis is released under the BSD 3-Clause License. For more details see the LICENSE.md diff --git a/env/bash b/env/bash index fd8f4e5..f69ad81 100644 --- a/env/bash +++ b/env/bash @@ -160,7 +160,7 @@ function build_artemis { DEBUG_BUILD=false # Whether to configure for Debug build (default is cmake's RelWithDebInfo) FULL_BUILD=false # Whether to completely wipe the build directory, if non-empty ASAN_BUILD=false # Whether to configure with ASAN support for error checking - options='hb:cdfa' + options='hb:cdfaj:' while getopts $options opt; do case $opt in h) @@ -172,6 +172,7 @@ function build_artemis { echo " -c : Force re-configuration of build" echo " -d : Switch to debug build from default RelWithDebInfo" echo " -f : Force complete re-build" + echo " -j [N] : Number of ranks N to use for make" echo " WARNING uses rm -rf to remove build directory if it exists" return ;; @@ -190,6 +191,9 @@ function build_artemis { echo "Full build requested" FULL_BUILD=true ;; + j) + BUILD_RANKS="$OPTARG" + ;; a) echo "ASAN build requested" ASAN_BUILD=true @@ -259,9 +263,8 @@ function build_artemis { configure_artemis fi - make -j + make -j$BUILD_RANKS make_status=$? - echo "${ABS_BUILD_DIR}" return $make_status } diff --git a/tst/launch_ci_runner.py b/tst/launch_ci_runner.py new file mode 100755 index 0000000..d9ac746 --- /dev/null +++ b/tst/launch_ci_runner.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python3 +# ======================================================================================== +# (C) (or copyright) 2024. Triad National Security, LLC. All rights reserved. +# +# This program was produced under U.S. Government contract 89233218CNA000001 for Los +# Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC +# for the U.S. Department of Energy/National Nuclear Security Administration. All rights +# in the program are reserved by Triad National Security, LLC, and the U.S. Department +# of Energy/National Nuclear Security Administration. The Government is granted for +# itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide +# license in this material to reproduce, prepare derivative works, distribute copies to +# the public, perform publicly and display publicly, and to permit others to do so. +# ======================================================================================== + +# This file was created in part or in whole by one of OpenAI's generative AI models + +import subprocess +import socket +import fnmatch +import os +import requests +import sys +import json +import subprocess +import argparse +import tempfile +import shlex + +# The personal access token (PAT) with 'repo:status' permission +# Store your token securely and do not hardcode it in the script +GITHUB_TOKEN = os.environ.get("ARTEMIS_GITHUB_TOKEN") + + +def get_pr_info(pr_number): + url = f"https://api.github.com/repos/lanl/artemis/pulls/{pr_number}" + headers = {"Authorization": f"token {GITHUB_TOKEN}"} + response = requests.get(url, headers=headers) + if response.status_code != 200: + print(f"Error fetching PR info: {response.status_code}") + print(response.text) + sys.exit(1) + return response.json() + + +def update_status( + commit_sha, state, description, context="Continuous Integration / darwin_volta-x86" +): + url = f"https://api.github.com/repos/lanl/artemis/statuses/{commit_sha}" + headers = {"Authorization": f"token {GITHUB_TOKEN}"} + data = {"state": state, "description": description, "context": context} + response = requests.post(url, headers=headers, data=json.dumps(data)) + if response.status_code != 201: + print(f"Error setting status: {response.status_code}") + print(response.text) + sys.exit(1) + + +def run_tests_in_temp_dir(pr_number, head_repo, head_ref, commit_sha): + current_dir = os.getcwd() + + # Create a temporary directory + with tempfile.TemporaryDirectory() as temp_dir: + print(f"Using temporary directory: {temp_dir}") + + # Clone the repository into the temporary directory + subprocess.run(["git", "clone", head_repo, temp_dir], check=True) + os.chdir(temp_dir) + + # Checkout the PR branch + subprocess.run(["git", "pull", "--no-rebase", "origin", head_ref], check=True) + + # Update submodules + subprocess.run( + ["git", "submodule", "update", "--init", "--recursive"], check=True + ) + + # Run the tests + try: + os.chdir(os.path.join(temp_dir, "tst")) + build_dir = os.path.join(temp_dir, "build") + + # Run subprocess command to compile code and launch run_tests.py + test_command = [ + "bash", + "-c", + "source ../env/bash && build_artemis -b " + + build_dir + + " -j 20 -f && cd " + + os.path.join(temp_dir, "tst") + + " && python3 run_tests.py gpu.suite " + + "--exe " + + os.path.join(build_dir, "src", "artemis") + + " --log_file=ci_cpu_log.txt", + ] + ret = subprocess.run(test_command, check=True) + + # CI apparently succeeded; indicate that + return True + except subprocess.CalledProcessError: + # If CI failed, indicate that + return False + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Run CI tasks with optional Slurm submission." + ) + parser.add_argument( + "pr_number", type=int, help="Pull request number for the CI run." + ) + parser.add_argument( + "--submission", + action="store_true", + help="Flag to indicate the script is running as a Slurm submission job.", + ) + args = parser.parse_args() + + # Fetch PR information + pr_info = get_pr_info(args.pr_number) + head_repo = pr_info["head"]["repo"]["clone_url"] + head_ref = pr_info["head"]["ref"] + commit_sha = pr_info["head"]["sha"] + + if args.submission: + # Update github PR status to indicate we have begun testing + update_status(commit_sha, "pending", "CI Slurm job running...") + + # Run the tests in a temporary directory + test_success = run_tests_in_temp_dir( + args.pr_number, head_repo, head_ref, commit_sha + ) + + # Update github PR status to indicate that testing has concluded + if test_success: + update_status(commit_sha, "success", "All tests passed.") + else: + update_status(commit_sha, "failure", "Tests failed.") + else: + # Check that we are on the right system + hostname = socket.gethostname() + if not fnmatch.fnmatch(hostname, "darwin-fe*"): + print("ERROR script must be run from Darwin frontend node!") + sys.exit(1) + + # Execute the sbatch command + try: + # Submit batch job with ci_runner script that will checkout and build the code and run + # tests + job_name = f"artemis_ci_darwin_volta-x86_PR{args.pr_number}" + + # Clean up existing jobs for same PR + squeue_command = f"squeue --name={shlex.quote(job_name)} --user=$(whoami) --noheader --format=%i" + squeue_result = subprocess.run( + squeue_command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + + job_ids = squeue_result.stdout.strip().split() + if len(job_ids) >= 1: + print("Canceling jobs:") + for job_id in job_ids: + print(f" {job_id}") + + # Use scancel to cancel the jobs + scancel_command = ["scancel"] + job_ids + scancel_result = subprocess.run( + scancel_command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + + sbatch_command = [ + "sbatch", + f"--job-name={job_name}", + f"--output={job_name}_%j.out", + f"--error={job_name}_%j.out", + "--partition=volta-x86", + "--time=04:00:00", + "--wrap", + f"python3 {sys.argv[0]} {args.pr_number} --submission", + ] + result = subprocess.run( + sbatch_command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True, + universal_newlines=True, + ) + print(result.stdout.strip()) + + # Update PR status that we have successfully submitted to SLURM job + update_status(commit_sha, "pending", "CI SLURM job submitted...") + except subprocess.CalledProcessError: + # Update PR status that we have failed to submit the SLURM job + update_status(commit_sha, "failure", "SLURM job submission failed.") diff --git a/tst/run_tests.py b/tst/run_tests.py old mode 100644 new mode 100755 index 0a486cb..5888117 --- a/tst/run_tests.py +++ b/tst/run_tests.py @@ -12,6 +12,8 @@ # the public, perform publicly and display publicly, and to permit others to do so. # ======================================================================================== +# This file was created in part by one of OpenAI's generative AI models + # Regression test script for Artemis. # Usage: From this directory, call this script with python: @@ -64,7 +66,16 @@ def process_suite(filename): dir_test_names = [ name for _, name, _ in iter_modules( - path=["scripts/" + test_name], prefix=test_name + "." + # path=["scripts/" + test_name], prefix=test_name + "." + path=[ + os.path.join( + artemis.get_source_directory(), + "tst", + "scripts", + test_name, + ) + ], + prefix=test_name + ".", ) ] tests += dir_test_names @@ -82,7 +93,15 @@ def main(**kwargs): dir_test_names = [ name for _, name, _ in iter_modules( - path=["scripts/" + directory], prefix=directory + "." + path=[ + os.path.join( + artemis.get_source_directory(), + "tst", + "scripts", + directory, + ) + ], + prefix=directory + ".", ) ] test_names.extend(dir_test_names) @@ -98,7 +117,12 @@ def main(**kwargs): dir_test_names = [ name for _, name, _ in iter_modules( - path=["scripts/" + test], prefix=test + "." + path=[ + os.path.join( + artemis.get_source_directory(), "tst", "scripts", test + ) + ], + prefix=test + ".", ) ] test_names.extend(dir_test_names) @@ -112,6 +136,10 @@ def main(**kwargs): test_times = [] test_results = [] test_errors = [] + + # Extract arguments + artemis_exe_path = kwargs.pop("exe") + try: # Check that required modules are installed for all test dependencies deps_installed = True @@ -130,8 +158,27 @@ def main(**kwargs): deps_installed = False if not deps_installed: logger.warning("WARNING! Not all required Python modules " "are available") + + if artemis_exe_path is not None: + # Check that path is valid + if not ( + os.path.exists(artemis_exe_path) + and os.access(artemis_exe_path, os.X_OK) + ): + logger.error("Exception occurred", exc_info=True) + test_errors.append("make()") + raise TestError(f'Provided executable "{artemis_exe_path}" not found!') + # Set the valid provided executable path + artemis.set_executable(os.path.abspath(artemis_exe_path)) + else: + # If we are in a directory with an executable, default to using that + local_path = os.path.join(os.getcwd(), "artemis") + if os.path.exists(local_path) and os.access(local_path, os.X_OK): + print(f"Found local executable {local_path}") + artemis.set_executable(local_path) + # Build Artemis - if not kwargs.pop("reuse_build"): + if not artemis.custom_exe and not kwargs.pop("reuse_build"): try: os.system("rm -rf {0}/build".format(current_dir)) # insert arguments for artemis.make() @@ -142,6 +189,7 @@ def main(**kwargs): logger.error("Exception occurred", exc_info=True) test_errors.append("make()") raise TestError("Unable to build Artemis") + # Run each test for name in test_names: t0 = timer() @@ -178,7 +226,7 @@ def main(**kwargs): # For CI, print after every individual test has finished logger.info("{} test: run(), analyze() finished".format(name)) finally: - if not kwargs.pop("save_build"): + if not kwargs.pop("save_build") and artemis_exe_path is None: os.system("rm -rf {0}/build".format(current_dir)) # Report test results @@ -232,7 +280,7 @@ def log_init(args): # setup log_file log_fn = kwargs.pop("log_file") if log_fn: - f_handler = logging.FileHandler(log_fn) + f_handler = logging.FileHandler(os.path.join(artemis.artemis_log_dir, log_fn)) f_handler.setLevel(0) # log everything f_format = logging.Formatter( "%(asctime)s|%(levelname)s" ":%(name)s: %(message)s" @@ -275,6 +323,13 @@ def log_init(args): help="do not recompile the code and reuse the build directory.", ) + parser.add_argument( + "--exe", + type=str, + default=None, + help="path to pre-built executable", + ) + args = parser.parse_args() log_init(args) diff --git a/tst/scripts/advection/advection.py b/tst/scripts/advection/advection.py index 4bb3d6c..608892f 100644 --- a/tst/scripts/advection/advection.py +++ b/tst/scripts/advection/advection.py @@ -18,7 +18,12 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis +import sys + +sys.path.append(os.path.join(artemis.artemis_dir, "analysis")) +from ahistory import ahistory logger = logging.getLogger("artemis" + __name__[7:]) # set logger name @@ -71,16 +76,21 @@ def analyze(): # error convergence rates, and error identicality between L- and R-going # advection. logger.debug("Analyzing test " + __name__) - data = np.loadtxt("build/src/" + _file_id + "-errs.dat", dtype=np.float64, ndmin=2) - history = np.loadtxt("build/src/" + _file_id + ".out0.hst") + err_path = os.path.join(artemis.get_run_directory(), _file_id + "-errs.dat") + data = np.loadtxt( + err_path, + dtype=np.float64, + ndmin=2, + ) + hist_path = os.path.join(artemis.get_run_directory(), _file_id + ".out0.hst") + history = ahistory(hist_path) + os.system(f"rm {err_path}") + os.system(f"rm {hist_path}") analyze_status = True - if np.isnan(data).any() or np.isnan(history).any(): + if np.isnan(data).any(): logger.warning("NaN encountered") analyze_status = False raise FloatingPointError("NaN encountered") - if history.shape != (44, 18): - analyze_status = False - history_line = history[-1] def history_equiv(a, b, tol=1.0e-4): if 2.0 * (np.fabs(a - b)) / (np.fabs(a) + np.fabs(b)) > tol: @@ -88,37 +98,42 @@ def history_equiv(a, b, tol=1.0e-4): else: return True - history_expected = [ - 1.00000e00, - 1.11612e-02, - 5.60000e01, - 1.60000e01, - 6.75000e00, - 2.25000e00, - 4.50000e00, - 4.50000e00, - 9.45000e00, - 6.07500e00, - 6.75000e00, - 6.75000e00, - 2.25000e00, - -2.25000e00, - 4.50000e00, - -4.50000e00, - 4.50000e00, - -4.50000e00, - ] - if len(history_line) != len(history_expected): - print( - f"Number of history rows ({len(history_line)}) do not equal expectation ({len(history_expected)})!" - ) - analyze_status = False - for n, val in enumerate(history_expected): - if not history_equiv(history_line[n], val): + history_expected = { + "time": 1.0, + "dt": 1.11612e-02, + "cycle": 56, + "nbtotal": 16, + "gas_mass_0": 6.75, + "gas_momentum_x1_0": 2.25, + "gas_momentum_x2_0": 4.5, + "gas_momentum_x3_0": 4.5, + "gas_energy_0": 9.45, + "gas_internal_energy_0": 6.075, + "dust_mass_0": 6.75, + "dust_mass_1": 6.75, + "dust_momentum_x1_0": 2.25, + "dust_momentum_x1_1": -2.25, + "dust_momentum_x2_0": 4.5, + "dust_momentum_x2_1": -4.5, + "dust_momentum_x3_0": 4.5, + "dust_momentum_x3_1": -4.5, + } + + for key in history_expected.keys(): + values = history.Get(key) + if len(values) != 11: + analyze_status = False + for value in values: + if np.isnan(value): + logger.warning("NaN encountered") + analyze_status = False + raise FloatingPointError("NaN encountered") + if not history_equiv(values[-1], history_expected[key]): print( - f"History entry {n} = {history_line[n]} does not match expectation = {val}!" + f"History entry {key} = {values[-1]} does not match expectation = {history_expected[key]}!" ) analyze_status = False + data = data.reshape([len(_int), len(_recon), len(_flux), 2, data.shape[-1]]) for ii, iv in enumerate(_int): for ri, rv in enumerate(_recon): diff --git a/tst/scripts/advection/advection_mpi.py b/tst/scripts/advection/advection_mpi.py index b724605..0e66c17 100644 --- a/tst/scripts/advection/advection_mpi.py +++ b/tst/scripts/advection/advection_mpi.py @@ -18,6 +18,7 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis import scripts.advection.advection as advection diff --git a/tst/scripts/binary/binary.py b/tst/scripts/binary/binary.py index ef5852e..335c3d2 100644 --- a/tst/scripts/binary/binary.py +++ b/tst/scripts/binary/binary.py @@ -53,7 +53,7 @@ def analyze(): analyze_status = True time, r, phi, z, [d, u, v, w, T] = load_level( - "final", base="{}.out1".format(_file_id), dir="build/src" + "final", base="{}.out1".format(_file_id), dir=artemis.get_run_directory() ) rc = 0.5 * (r[1:] + r[:-1]) pc = 0.5 * (phi[1:] + phi[:-1]) diff --git a/tst/scripts/binary_adi/binary_adi.py b/tst/scripts/binary_adi/binary_adi.py index 31b0ba9..8fb80c1 100644 --- a/tst/scripts/binary_adi/binary_adi.py +++ b/tst/scripts/binary_adi/binary_adi.py @@ -69,7 +69,7 @@ def analyze(): for dv in _de_switch: problem_id = _file_id + "_{}_de{:d}_{}".format(fv, int(10 * dv), cv) time, r, phi, z, [d, u, v, w, T] = load_level( - "final", dir="build/src", base=problem_id + ".out1" + "final", dir=artemis.get_run_directory(), base=problem_id + ".out1" ) rc = 0.5 * (r[1:] + r[:-1]) pc = 0.5 * (phi[1:] + phi[:-1]) diff --git a/tst/scripts/collisions/collisions.py b/tst/scripts/collisions/collisions.py index 9e273a9..10aca79 100644 --- a/tst/scripts/collisions/collisions.py +++ b/tst/scripts/collisions/collisions.py @@ -18,6 +18,7 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis logger = logging.getLogger("artemis" + __name__[7:]) # set logger name @@ -58,7 +59,9 @@ def analyze(): logger.debug("Analyzing test " + __name__) - fname = "build/src/{}_{:d}.reb".format(_file_id, _nranks) + fname = os.path.join( + artemis.get_run_directory(), "{}_{:d}.reb".format(_file_id, _nranks) + ) logger.debug("Reading" + fname) d = np.loadtxt(fname) with open(fname, "r") as f: diff --git a/tst/scripts/collisions/collisions_mpi.py b/tst/scripts/collisions/collisions_mpi.py index d4d1992..608dee6 100644 --- a/tst/scripts/collisions/collisions_mpi.py +++ b/tst/scripts/collisions/collisions_mpi.py @@ -16,6 +16,7 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis import scripts.collisions.collisions as collisions diff --git a/tst/scripts/coords/blast.py b/tst/scripts/coords/blast.py index 9849bf6..edc9cd8 100644 --- a/tst/scripts/coords/blast.py +++ b/tst/scripts/coords/blast.py @@ -117,7 +117,10 @@ def analyze(): else interp1d(dat3[:, 0], dat3[:, 3]) ) res = load_snap( - "build/src/" + _file_id + "_{}{:d}.out1.final.phdf".format(g, 2) + os.path.join( + artemis.get_run_directory(), + _file_id + "_{}{:d}.out1.final.phdf".format(g, 2), + ) ) pres = res[4][-1] xc = 0.5 * (res[1][:, 1:] + res[1][:, :-1]) diff --git a/tst/scripts/diffusion/alpha_disk.py b/tst/scripts/diffusion/alpha_disk.py index 07b033b..0271c57 100644 --- a/tst/scripts/diffusion/alpha_disk.py +++ b/tst/scripts/diffusion/alpha_disk.py @@ -83,7 +83,7 @@ def analyze(): os.makedirs(artemis.artemis_fig_dir, exist_ok=True) time, x, y, z, [dens, u, v, w, T] = binary.load_level( - "final", dir="build/src", base=base + ".out1" + "final", dir=artemis.get_run_directory(), base=base + ".out1" ) r = 0.5 * (x[1:] + x[:-1]) diff --git a/tst/scripts/diffusion/thermal_diffusion.py b/tst/scripts/diffusion/thermal_diffusion.py index f4b22bc..9a98a2c 100644 --- a/tst/scripts/diffusion/thermal_diffusion.py +++ b/tst/scripts/diffusion/thermal_diffusion.py @@ -90,7 +90,7 @@ def analyze(): for ax, g in zip(axes, _geom): name = "{}_{}".format(_file_id, g[:3]) time, x, y, z, [d, u, v, w, T] = binary.load_level( - "final", dir="build/src", base="{}.out1".format(name) + "final", dir=artemis.get_run_directory(), base="{}.out1".format(name) ) xc = 0.5 * (x[1:] + x[:-1]) ans = Tans(xc.ravel(), f=_flux, T0=_gtemp, x0=1.2, xi=0.2, d=dind[g], k=_kcond) diff --git a/tst/scripts/diffusion/viscous_diffusion.py b/tst/scripts/diffusion/viscous_diffusion.py index 5ad55fd..579be17 100644 --- a/tst/scripts/diffusion/viscous_diffusion.py +++ b/tst/scripts/diffusion/viscous_diffusion.py @@ -90,13 +90,13 @@ def analyze(): os.makedirs(artemis.artemis_fig_dir, exist_ok=True) time, x, y, z, [dens, u, v, w, T] = binary.load_level( - "final", dir="build/src", base=base + ".out1" + "final", dir=artemis.get_run_directory(), base=base + ".out1" ) xc = 0.5 * (x[1:] + x[:-1]) yc = 0.5 * (y[1:] + y[:-1]) time0, x0, y0, z0, [dens0, u0, v0, w0, T0] = binary.load_level( - 0, dir="build/src", base=base + ".out1" + 0, dir=artemis.get_run_directory(), base=base + ".out1" ) vx3 = w[0, :] diff --git a/tst/scripts/disk/disk.py b/tst/scripts/disk/disk.py index 3058751..1f97404 100644 --- a/tst/scripts/disk/disk.py +++ b/tst/scripts/disk/disk.py @@ -17,6 +17,7 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis logger = logging.getLogger("artemis" + __name__[7:]) # set logger name @@ -47,6 +48,13 @@ def run(**kwargs): for b in _bc: for g in _geom: bc_args = [] + geom_args = [] + if g == "cart": + geom_args = [ + "parthenon/mesh/nx1=64", + "parthenon/mesh/nx2=64", + "parthenon/mesh/nx3=64", + ] for d in directions[g]: bc_args.append("parthenon/mesh/i{}_bc={}".format(d, b)) bc_args.append("parthenon/mesh/o{}_bc={}".format(d, b)) @@ -60,7 +68,8 @@ def run(**kwargs): g, int(10 * gam), b ), "problem/polytropic_index={:.2f}".format(gam), - ], + ] + + geom_args, ) artemis.run( _nranks, @@ -71,7 +80,8 @@ def run(**kwargs): g, int(10 * gam), b ), "problem/polytropic_index={:.2f}".format(gam), - ], + ] + + geom_args, restart="disk_{}_{:d}_{}.out2.final.rhdf".format( g, int(10 * gam), b ), @@ -90,15 +100,24 @@ def analyze(): for gam in _gamma: logger.debug("Analyzing test {}_{}".format(__name__, g)) logger.debug( - "build/src/disk_{}_{:d}_{}.out1".format(g, int(10 * gam), b) + os.path.join( + artemis.get_run_directory(), + "disk_{}_{:d}_{}.out1".format(g, int(10 * gam), b), + ) ) _, (x, y, z), (d0, _, _, _, _), sys, _ = loadf( 0, - base="build/src/disk_{}_{:d}_{}.out1".format(g, int(10 * gam), b), + base=os.path.join( + artemis.get_run_directory(), + "disk_{}_{:d}_{}.out1".format(g, int(10 * gam), b), + ), ) time, (x, y, z), (d, T, u, v, w), sys, dt = loadf( "final", - base="build/src/disk_{}_{:d}_{}.out1".format(g, int(10 * gam), b), + base=os.path.join( + artemis.get_run_directory(), + "disk_{}_{:d}_{}.out1".format(g, int(10 * gam), b), + ), ) mybad = False mybad |= np.any(np.isnan(d)) diff --git a/tst/scripts/disk/disk_mpi.py b/tst/scripts/disk/disk_mpi.py index d827254..fb97cf0 100644 --- a/tst/scripts/disk/disk_mpi.py +++ b/tst/scripts/disk/disk_mpi.py @@ -16,6 +16,7 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis import scripts.disk.disk as disk diff --git a/tst/scripts/disk_nbody/disk_nbody.py b/tst/scripts/disk_nbody/disk_nbody.py index 657c553..daf9ba9 100644 --- a/tst/scripts/disk_nbody/disk_nbody.py +++ b/tst/scripts/disk_nbody/disk_nbody.py @@ -17,6 +17,7 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis logger = logging.getLogger("artemis" + __name__[7:]) # set logger name @@ -87,18 +88,23 @@ def analyze(): for gam in _gamma: logger.debug("Analyzing test {}_{}".format(__name__, g)) logger.debug( - "build/src/disk_nbody_{}_{:d}_{}.out1".format(g, int(10 * gam), b) + os.path.join( + artemis.get_run_directory(), + "disk_nbody_{}_{:d}_{}.out1".format(g, int(10 * gam), b), + ) ) _, (x, y, z), (d0, _, _, _, _), sys, _ = loadf( 0, - base="build/src/disk_nbody_{}_{:d}_{}.out1".format( - g, int(10 * gam), b + base=os.path.join( + artemis.get_run_directory(), + "/disk_nbody_{}_{:d}_{}.out1".format(g, int(10 * gam), b), ), ) time, (x, y, z), (d, T, u, v, w), sys, dt = loadf( "final", - base="build/src/disk_nbody_{}_{:d}_{}.out1".format( - g, int(10 * gam), b + base=os.path.join( + artemis.get_run_directory(), + "disk_nbody_{}_{:d}_{}.out1".format(g, int(10 * gam), b), ), ) mybad = False diff --git a/tst/scripts/disk_nbody/disk_nbody_mpi.py b/tst/scripts/disk_nbody/disk_nbody_mpi.py index eefd547..0ab31f5 100644 --- a/tst/scripts/disk_nbody/disk_nbody_mpi.py +++ b/tst/scripts/disk_nbody/disk_nbody_mpi.py @@ -16,6 +16,7 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis import scripts.disk_nbody.disk_nbody as disk diff --git a/tst/scripts/drag/drag.py b/tst/scripts/drag/drag.py index a3fa189..4a149eb 100644 --- a/tst/scripts/drag/drag.py +++ b/tst/scripts/drag/drag.py @@ -60,7 +60,9 @@ def analyze(): mom_tot = [] errors = [] for n in range(1, int(_tlim / 0.05)): - fname = "build/src/{}.out1.{:05d}.phdf".format(_file_id, n) + fname = os.path.join( + artemis.get_run_directory(), "{}.out1.{:05d}.phdf".format(_file_id, n) + ) with h5py.File(fname, "r") as f: t = f["Info"].attrs["Time"] vg = f["gas.prim.velocity_0"][...][:, 0, :].ravel() diff --git a/tst/scripts/hydro/linwave.py b/tst/scripts/hydro/linwave.py index ef719d8..2952e7d 100644 --- a/tst/scripts/hydro/linwave.py +++ b/tst/scripts/hydro/linwave.py @@ -18,6 +18,7 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis logger = logging.getLogger("artemis" + __name__[7:]) # set logger name @@ -77,7 +78,11 @@ def analyze(): # error convergence rates, and error identicality between L- and R-going # sound waves. logger.debug("Analyzing test " + __name__) - data = np.loadtxt("build/src/" + _file_id + "-errs.dat", dtype=np.float64, ndmin=2) + data = np.loadtxt( + os.path.join(artemis.get_run_directory(), _file_id + "-errs.dat"), + dtype=np.float64, + ndmin=2, + ) analyze_status = True if np.isnan(data).any(): logger.warning("NaN encountered") diff --git a/tst/scripts/hydro/linwave_mpi.py b/tst/scripts/hydro/linwave_mpi.py index b013ec8..4ac361b 100644 --- a/tst/scripts/hydro/linwave_mpi.py +++ b/tst/scripts/hydro/linwave_mpi.py @@ -18,6 +18,7 @@ # Modules import logging import numpy as np +import os import scripts.utils.artemis as artemis import scripts.hydro.linwave as linwave diff --git a/tst/scripts/nbody/nbody.py b/tst/scripts/nbody/nbody.py index 3e06203..9b78caa 100644 --- a/tst/scripts/nbody/nbody.py +++ b/tst/scripts/nbody/nbody.py @@ -61,7 +61,7 @@ def analyze(): analyze_status = True time, r, phi, z, [d, u, v, w, T] = load_level( - "final", base="{}.out1".format(_file_id), dir="build/src" + "final", base="{}.out1".format(_file_id), dir=artemis.get_run_directory() ) rc = 0.5 * (r[1:] + r[:-1]) pc = 0.5 * (phi[1:] + phi[:-1]) diff --git a/tst/scripts/ssheet/ssheet.py b/tst/scripts/ssheet/ssheet.py index 7bc8e11..b643d43 100644 --- a/tst/scripts/ssheet/ssheet.py +++ b/tst/scripts/ssheet/ssheet.py @@ -51,7 +51,7 @@ def analyze(): analyze_status = True time, x, y, z, [d, u, v, w, T] = binary.load_level( - "final", dir="build/src", base="{}.out1".format(_file_id) + "final", dir=artemis.get_run_directory(), base="{}.out1".format(_file_id) ) xc = 0.5 * (x[1:] + x[:-1]) yc = 0.5 * (y[1:] + y[:-1]) diff --git a/tst/scripts/utils/artemis.py b/tst/scripts/utils/artemis.py index 4e00313..2e90e16 100644 --- a/tst/scripts/utils/artemis.py +++ b/tst/scripts/utils/artemis.py @@ -20,12 +20,43 @@ import logging import os import subprocess +import datetime from timeit import default_timer as timer from .log_pipe import LogPipe # Global variables -artemis_rel_path = "../" -artemis_fig_dir = "./figs/" +current_dir = os.getcwd() +artemis_dir = os.path.abspath( + os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..") +) +artemis_executable = os.path.join(artemis_dir, "tst", "build", "src", "artemis") +artemis_inputs_dir = os.path.join(artemis_dir, "inputs") +# artemis_fig_dir = "./figs/" +artemis_run_dir = os.path.join(artemis_dir, "tst", "build", "src", "tst") +artemis_fig_dir = os.path.join(artemis_dir, "tst", "figs") +artemis_log_dir = os.path.join(artemis_dir, "tst") +custom_exe = False + + +# Optionally set custom path for executable, and update other variables related to where +# we run the code +def set_executable(executable_path): + global artemis_executable + global artemis_run_dir + global custom_exe + artemis_executable = executable_path + artemis_run_dir = os.path.join(os.path.dirname(artemis_executable), "tst") + custom_exe = True + + +# Function for returning the path to the run directory for this set of tests +def get_run_directory(): + return artemis_run_dir + + +# Provide base directory of artemis source tree +def get_source_directory(): + return artemis_dir # Function for compiling Artemis @@ -37,7 +68,7 @@ def make(cmake_args, make_nproc): subprocess.check_call(["mkdir", "build"], stdout=out_log) build_dir = current_dir + "/build/" os.chdir(build_dir) - cmake_command = ["cmake", "../" + artemis_rel_path] + cmake_args + cmake_command = ["cmake", artemis_dir] + cmake_args make_command = ["make", "-j" + str(make_nproc)] try: t0 = timer() @@ -60,27 +91,29 @@ def make(cmake_args, make_nproc): # Function for running Artemis (with MPI) def run(nproc, input_filename, arguments, restart=None): + # global run_directory out_log = LogPipe("artemis.run", logging.INFO) - current_dir = os.getcwd() - exe_dir = current_dir + "/build/src/" - os.chdir(exe_dir) + os.makedirs(artemis_run_dir, exist_ok=True) + + # Build the run command + run_command = ["mpiexec", "--oversubscribe", "-n", str(nproc), artemis_executable] + if restart is not None: + run_command += ["-r", restart] + input_filename_full = os.path.join(artemis_inputs_dir, input_filename) + run_command += ["-i", input_filename_full] + try: - input_filename_full = "../../" + artemis_rel_path + "inputs/" + input_filename - run_command = ["mpiexec", "-n", str(nproc), "./artemis"] - if restart is not None: - run_command += ["-r", restart] - run_command += ["-i", input_filename_full] - try: - cmd = run_command + arguments - logging.getLogger("artemis.run").debug("Executing: " + " ".join(cmd)) - subprocess.check_call(cmd, stdout=out_log) - except subprocess.CalledProcessError as err: - raise ArtemisError( - "Return code {0} from command '{1}'".format( - err.returncode, " ".join(err.cmd) - ) + # os.chdir(run_directory) + os.chdir(artemis_run_dir) + cmd = run_command + arguments + logging.getLogger("artemis.run").debug("Executing: " + " ".join(cmd)) + subprocess.check_call(cmd, stdout=out_log) + except subprocess.CalledProcessError as err: + raise ArtemisError( + "Return code {0} from command '{1}'".format( + err.returncode, " ".join(err.cmd) ) - + ) finally: out_log.close() os.chdir(current_dir) diff --git a/tst/suites/gpu.suite b/tst/suites/gpu.suite index 93d32a1..9d18b97 100644 --- a/tst/suites/gpu.suite +++ b/tst/suites/gpu.suite @@ -19,4 +19,4 @@ binary/binary binary_adi/binary_adi nbody/nbody diffusion/viscous_diffusion -diffusion/thermal_diffusion +#diffusion/thermal_diffusion diff --git a/tst/suites/parallel.suite b/tst/suites/parallel.suite index d755c83..75304c3 100644 --- a/tst/suites/parallel.suite +++ b/tst/suites/parallel.suite @@ -14,8 +14,6 @@ # parallel suite advection/advection_mpi -binary/binary_mpi -binary_adi/binary_adi_mpi coords/blast_mpi disk/disk_mpi nbody/nbody_mpi @@ -23,5 +21,4 @@ hydro/linwave_mpi ssheet/ssheet_mpi diffusion/viscous_diffusion_mpi diffusion/alpha_disk_mpi -diffusion/thermal_diffusion_mpi drag/drag_mpi diff --git a/tst/suites/parallel_slow.suite b/tst/suites/parallel_slow.suite new file mode 100644 index 0000000..ea9ea11 --- /dev/null +++ b/tst/suites/parallel_slow.suite @@ -0,0 +1,27 @@ +# ======================================================================================== +# (C) (or copyright) 2023-2024. Triad National Security, LLC. All rights reserved. +# +# This program was produced under U.S. Government contract 89233218CNA000001 for Los +# Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC +# for the U.S. Department of Energy/National Nuclear Security Administration. All rights +# in the program are reserved by Triad National Security, LLC, and the U.S. Department +# of Energy/National Nuclear Security Administration. The Government is granted for +# itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide +# license in this material to reproduce, prepare derivative works, distribute copies to +# the public, perform publicly and display publicly, and to permit others to do so. +# ======================================================================================== + +# parallel suite + +advection/advection_mpi +binary/binary_mpi +binary_aid/binary_adi_mpi +coords/blast_mpi +disk/disk_mpi +nbody/nbody_mpi +hydro/linwave_mpi +ssheet/ssheet_mpi +diffusion/viscous_diffusion_mpi +diffusion/alpha_disk_mpi +diffusion/thermal_diffusion_mpi +drag/drag_mpi