diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 500d6abeea8b..aabe7432e51c 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -40,3 +40,14 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - name: Check DAOS logging macro use. run: ./utils/cq/d_logging_check.py --github src + + ftest-tags: + name: Ftest tag check + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Check DAOS ftest tags. + run: ./src/tests/ftest/tags.py lint diff --git a/Jenkinsfile b/Jenkinsfile index 291aa7447576..d23f631d4ba2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -360,7 +360,7 @@ pipeline { stage('Get Commit Message') { steps { script { - env.COMMIT_MESSAGE = sh(script: 'git show -s --format=%B', + env.COMMIT_MESSAGE = sh(script: 'src/tests/ftest/tags.py pragmas --use-commit --target origin/' + target_branch, returnStdout: true).trim() Map pragmas = [:] // can't use eachLine() here: https://issues.jenkins.io/browse/JENKINS-46988/ diff --git a/src/container/srv_container.c b/src/container/srv_container.c index 75e1df44341e..fd011f167c0c 100644 --- a/src/container/srv_container.c +++ b/src/container/srv_container.c @@ -6,6 +6,7 @@ /** * \file * + * dummy change * ds_cont: Container Operations * * This file contains the server API methods and the RPC handlers that are both diff --git a/src/tests/ftest/datamover/dst_create.yaml b/src/tests/ftest/datamover/dst_create.yaml index 6e42a17d2b5e..d1e8eeb68b77 100644 --- a/src/tests/ftest/datamover/dst_create.yaml +++ b/src/tests/ftest/datamover/dst_create.yaml @@ -1,3 +1,4 @@ +# Dummy change hosts: test_servers: 1 test_clients: 1 diff --git a/src/tests/ftest/dfuse/mu_mount.py b/src/tests/ftest/dfuse/mu_mount.py index 8083cb684dbe..22d49045af6c 100644 --- a/src/tests/ftest/dfuse/mu_mount.py +++ b/src/tests/ftest/dfuse/mu_mount.py @@ -10,6 +10,8 @@ from dfuse_test_base import DfuseTestBase from run_utils import command_as_user, run_remote +# Dummy change + class DfuseMUMount(DfuseTestBase): """Verifies multi-user dfuse mounting""" diff --git a/src/tests/ftest/dfuse/mu_perms.py b/src/tests/ftest/dfuse/mu_perms.py index 639e797423cd..efabe7835136 100644 --- a/src/tests/ftest/dfuse/mu_perms.py +++ b/src/tests/ftest/dfuse/mu_perms.py @@ -13,6 +13,8 @@ from run_utils import command_as_user, run_remote from user_utils import get_chown_command +# Dummy change + class DfuseMUPerms(DfuseTestBase): """Verify dfuse multi-user basic permissions.""" diff --git a/src/tests/ftest/nvme/object.py b/src/tests/ftest/nvme/object.py index 4c473401731e..905a4c974d1f 100644 --- a/src/tests/ftest/nvme/object.py +++ b/src/tests/ftest/nvme/object.py @@ -64,7 +64,7 @@ def container_read(container, array_size=None): # read written objects and verify container.read_objects() - def test_runner(self, namespace, record_size, array_size, thread_per_size=4): + def run_test(self, namespace, record_size, array_size, thread_per_size=4): """Perform simultaneous writes of varying record size to a container. Args: @@ -143,7 +143,7 @@ def test_nvme_object_single_pool(self): :avocado: tags=NvmeObject,test_nvme_object_single_pool """ # perform multiple object writes to a single pool - self.test_runner("/run/pool_1/*", self.record_size[:-1], 0, self.array_size) + self.run_test("/run/pool_1/*", self.record_size[:-1], 0, self.array_size) report_errors(self, self.errors) @avocado.fail_on(DaosApiError) @@ -165,7 +165,7 @@ def test_nvme_object_multiple_pools(self): :avocado: tags=NvmeObject,test_nvme_object_multiple_pools """ # thread to perform simultaneous object writes to multiple pools - runner_manager = ThreadManager(self.test_runner, self.get_remaining_time() - 30) + runner_manager = ThreadManager(self.run_test, self.get_remaining_time() - 30) runner_manager.add( namespace='/run/pool_1/*', record_size=self.record_size, array_size=self.array_size) runner_manager.add( @@ -178,6 +178,6 @@ def test_nvme_object_multiple_pools(self): self.errors.append(result.result) report_errors(self, self.errors) - # run the test_runner after cleaning up all the pools for large nvme_pool size - self.test_runner("/run/pool_3/*", self.record_size, self.array_size) + # run again after cleaning up all the pools for large nvme_pool size + self.run_test("/run/pool_3/*", self.record_size, self.array_size) report_errors(self, self.errors) diff --git a/src/tests/ftest/tags.py b/src/tests/ftest/tags.py new file mode 100755 index 000000000000..6c17c510b8d9 --- /dev/null +++ b/src/tests/ftest/tags.py @@ -0,0 +1,604 @@ +#!/usr/bin/env python3 +""" + (C) Copyright 2023 Intel Corporation. + + SPDX-License-Identifier: BSD-2-Clause-Patent +""" +import ast +import os +import re +import subprocess # nosec +import sys +from argparse import ArgumentParser +from collections import defaultdict +from copy import deepcopy +from pathlib import Path + +import yaml + +THIS_FILE = os.path.realpath(__file__) +FTEST_DIR = os.path.dirname(THIS_FILE) +ROOT_DIR = os.path.realpath(os.path.join(FTEST_DIR, '..', '..', '..')) + + +class LintFailure(Exception): + """Exception for lint failures.""" + + +def git_root_dir(): + """Get the git root directory. + + Returns: + str: the root directory path + """ + result = subprocess.run( + ['git', 'rev-parse', '--show-toplevel'], + stdout=subprocess.PIPE, check=True, cwd=FTEST_DIR) + return result.stdout.decode().rstrip('\n') + + +def git_files_changed(target=None): + """Get a list of file from git diff. + + Args: + target (str): target branch or commit. Defaults to None, which auto-detects + + Returns: + list: paths of modified files + """ + git_root = git_root_dir() + if not target: + find_base = os.path.join(git_root, 'utils', 'githooks', 'find_base.sh') + result = subprocess.run( + f'source {find_base} &>/dev/null && echo $TARGET', + shell=True, stdout=subprocess.PIPE, check=True, cwd=FTEST_DIR) + target = result.stdout.decode().rstrip('\n') + result = subprocess.run( + ['git', 'diff', target, '--name-only', '--relative'], + stdout=subprocess.PIPE, cwd=git_root, check=True) + return [os.path.join(git_root, path) for path in result.stdout.decode().split('\n') if path] + + +def git_commit_message(): + """Get the latest git commit message. + + Returns: + str: the commit message + """ + result = subprocess.run( + 'git show -s --format=%B', + shell=True, stdout=subprocess.PIPE, check=True, cwd=FTEST_DIR) + return result.stdout.decode().rstrip('\n') + + +def all_python_files(path): + """Get a list of all .py files recursively in a directory. + + Args: + path (str): directory to look in + + Returns: + list: sorted path names of .py files + """ + return sorted(map(str, Path(path).rglob("*.py"))) + + +class FtestTagMap(): + """Represent tags for ftest/avocado.""" + + def __init__(self, path=None): + """Initialize the tag mapping. + + Args: + path (list/str, optional): the file or dir path(s) to update from + """ + self.__mapping = {} # str(file_name) : str(class_name) : str(test_name) : set(tags) + if path: + self.__update_from_path(path) + + def __iter__(self): + """Iterate over the mapping. + + Yields: + tuple: file_name, class_name mapping + """ + for item in self.__mapping.items(): + yield deepcopy(item) + + def methods(self): + """Get a mapping of methods to tags. + + Yields: + (str, set): method name and tags + """ + for _, classes in self.__mapping.items(): + for _, methods in classes.items(): + for method_name, tags in methods.items(): + yield (method_name, tags) + + def unique_tags(self, exclude=None): + """Get the set of unique tags, excluding one or more paths. + + Args: + exclude (list/str, optional): path(s) to exclude from the unique set. + Defaults to None. + + Returns: + set: the set of unique tags + """ + if not exclude: + exclude = [] + elif not isinstance(exclude, (list, tuple)): + exclude = [exclude] + unique_tags = set() + exclude = list(map(os.path.realpath, exclude)) + for file_path, classes in self.__mapping.items(): + if file_path in exclude: + continue + for functions in classes.values(): + for tags in functions.values(): + unique_tags.update(tags) + return unique_tags + + def minimal_tags(self, include_paths=None): + """Get the minimal tags representing files in the mapping. + + This computes an approximate minimal - not the absolute minimal. + + Args: + include_paths (list/str, optional): path(s) to include in the mapping. + Defaults to None, which includes all paths + + Returns: + list: list of sets of tags + """ + if not include_paths: + include_paths = [] + elif not isinstance(include_paths, (list, tuple)): + include_paths = [include_paths] + + include_paths = list(map(os.path.realpath, include_paths)) + + minimal_sets = [] + + self.__sub_yaml_py(include_paths) + + for file_path, classes in self.__mapping.items(): + if include_paths and file_path not in include_paths: + continue + # Keep track of recommended tags for each method + file_recommended = [] + for class_name, functions in classes.items(): + for function_name, tags in functions.items(): + # Try the class name and function name first + if class_name in tags: + file_recommended.append(set([class_name])) + continue + if function_name in tags: + file_recommended.append(set([function_name])) + continue + # Try using a set of tags globally unique to this test + globally_unique_tags = tags - self.unique_tags(exclude=file_path) + if globally_unique_tags and globally_unique_tags.issubset(tags): + file_recommended.append(globally_unique_tags) + continue + # Fallback to just using all of this test's tags + file_recommended.append(tags) + + if not file_recommended: + continue + + # If all functions in the file have a common set of tags, use that set + file_recommended_intersection = set.intersection(*file_recommended) + if file_recommended_intersection: + minimal_sets.append(file_recommended_intersection) + continue + + # Otherwise, use tags unique to each function + file_recommended_unique = [] + for tags in file_recommended: + if tags not in file_recommended_unique: + file_recommended_unique.append(tags) + minimal_sets.extend(file_recommended_unique) + + # Combine the minimal sets into a single set representing what avocado expects + avocado_set = set(','.join(tags) for tags in minimal_sets) + + return avocado_set + + def is_test_subset(self, tags1, tags2): + """Determine whether a set of tags is a subset with respect to tests. + + Args: + tags1 (list): list of sets of tags + tags2 (list): list of sets of tags + + Returns: + bool: whether tags1's tests is a subset of tags2's tests + """ + tests1 = set(self.__tags_to_tests(tags1)) + tests2 = set(self.__tags_to_tests(tags2)) + return tests1.issubset(tests2) + + def __tags_to_tests(self, tags): + """Convert a list of tags to the tests they would run. + + Args: + tags (list): list of sets of tags + """ + tests = [] + for method_name, test_tags in self.methods(): + for tag_set in tags: + if tag_set.issubset(test_tags): + tests.append(method_name) + break + return tests + + def __update_from_path(self, path): + """Update the mapping from a path. + + Args: + path (list/str, optional): the file or dir path(s) to update from + + Raises: + ValueError: if a path is not a file + """ + if not isinstance(path, (list, tuple)): + path = [path] + elif isinstance(path, tuple): + path = list(tuple) + + path = list(map(os.path.realpath, path)) + self.__sub_yaml_py(path) + + for _path in path: + if os.path.isdir(_path): + for __path in all_python_files(_path): + self.__parse_file(__path) + continue + if _path.endswith('.yaml'): + _path = re.sub(r'\.yaml$', '.py', _path) + if not os.path.isfile(_path): + raise ValueError(f'Expected file: {_path}') + self.__parse_file(_path) + + def __parse_file(self, path): + """Parse a file and update the internal mapping from avocado tags. + + Args: + path (str): file to parse + """ + with open(path, 'r') as file: + file_data = file.read() + + module = ast.parse(file_data) + for class_def in filter(lambda val: isinstance(val, ast.ClassDef), module.body): + for func_def in filter(lambda val: isinstance(val, ast.FunctionDef), class_def.body): + if not func_def.name.startswith('test_'): + continue + tags = self.__parse_avocado_tags(ast.get_docstring(func_def)) + self.__update(path, class_def.name, func_def.name, tags) + + @staticmethod + def __sub_yaml_py(paths): + """Replace ftest .yaml paths with .py equivalents. + + Args: + paths (list): paths to check + """ + for idx, path in enumerate(paths): + if 'ftest/' in path and path.endswith('.yaml'): + paths[idx] = re.sub(r'\.yaml$', '.py', path) + + def __update(self, file_name, class_name, test_name, tags): + """Update the internal mapping by appending the tags. + + Args: + file_name (str): file name + class_name (str): class name + test_name (str): test name + tags (set): set of tags to update + """ + if file_name not in self.__mapping: + self.__mapping[file_name] = {} + if class_name not in self.__mapping[file_name]: + self.__mapping[file_name][class_name] = {} + if test_name not in self.__mapping[file_name][class_name]: + self.__mapping[file_name][class_name][test_name] = set() + self.__mapping[file_name][class_name][test_name].update(tags) + + @staticmethod + def __parse_avocado_tags(text): + """Parse avocado tags from a string. + + Args: + text (str): the string to parse for tags + + Returns: + set: the set of tags + """ + tag_strings = re.findall(':avocado: tags=(.*)', text) + if not tag_strings: + return set() + return set(','.join(tag_strings).split(',')) + + +def get_tag_map(): + """Map core files to tags. + + Returns: + dict: the mapping + """ + with open(os.path.join(FTEST_DIR, 'tags.yaml'), 'r') as file: + return yaml.safe_load(file.read()) + + +def sorted_tags(tags): + """Get a sorted list of tags. + + Args: + tags (set): original tags + + Returns: + list: sorted tags + """ + tags_tmp = set(tags) + new_tags = [] + for tag in ('all', 'vm', 'hw', 'medium', 'large', 'pr', 'daily_regression', 'full_regression'): + if tag in tags_tmp: + new_tags.append(tag) + tags_tmp.remove(tag) + new_tags.extend(sorted(tags_tmp)) + return new_tags + + +def recommended_map_tags(paths): + """Get the recommended tags based on the config mapping. + + Args: + paths (list): paths to get tags for + + Returns: + set: the recommended tags + """ + all_mapping = get_tag_map() + recommended = set() + exclude = all_mapping['exclude'] + include = all_mapping['include'] + tags_per_file = all_mapping['map'] + + def _any_match(path, expressions): + for expression in expressions: + if re.search(rf'{expression}', path): + return True + return False + + for path in paths: + # Remove root path prefix + path = re.sub(f'{ROOT_DIR}{os.sep}', '', os.path.realpath(path)) + # Exclude any that match the exclude list but aren't in the include list + if _any_match(path, exclude) and not _any_match(path, include): + continue + # Get tags for all matches + for _pattern, _tags in tags_per_file.items(): + if re.search(rf'{_pattern}', path): + recommended.update(_tags.split(' ')) + return recommended + + +def run_linter(paths=None): + """Run the ftest tag linter. + + Args: + paths (list, optional): paths to lint. Defaults to all ftest python files + + Raises: + LintFailure: if linting fails + """ + if not paths: + paths = all_python_files(FTEST_DIR) + all_files = [] + all_classes = defaultdict(int) + all_methods = defaultdict(int) + test_wo_tags = [] + tests_wo_class_as_tag = [] + tests_wo_method_as_tag = [] + tests_wo_hw_vm_manual = [] + ftest_tag_map = FtestTagMap(paths) + for file_path, classes in iter(ftest_tag_map): + all_files.append(file_path) + for class_name, functions in classes.items(): + all_classes[class_name] += 1 + for method_name, tags in functions.items(): + all_methods[method_name] += 1 + if len(tags) == 0: + test_wo_tags.append(method_name) + if class_name not in tags: + tests_wo_class_as_tag.append(method_name) + if method_name not in tags: + tests_wo_method_as_tag.append(method_name) + if not set(tags).intersection(set(['vm', 'hw', 'manual'])): + tests_wo_hw_vm_manual.append(method_name) + + non_unique_classes = list(name for name, num in all_classes.items() if num > 1) + non_unique_methods = list(name for name, num in all_methods.items() if num > 1) + + # Make sure the tag map contains valid tags + valid_tags = ftest_tag_map.unique_tags() + map_tags = set() + for _, tags in get_tag_map()['map'].items(): + for _tags in filter(None, tags.split(' ')): + map_tags.update(_tags.split(',')) + invalid_tags = map_tags.difference(valid_tags) + + print('ftest tag lint') + + def _error_handler(_list, message, required=True): + """Exception handler for each class of failure.""" + _list_len = len(_list) + req_str = '(required)' if required else '(optional)' + print(f' {req_str} {len(_list)} {message}') + if _list_len == 0: + return None + for _test in _list: + print(f' {_test}') + if _list_len > 3: + remaining = _list_len - 3 + _list = _list[:3] + [f"... (+{remaining})"] + _list_str = ", ".join(_list) + if not required: + # Print but do not fail + return None + return LintFailure(f"{_list_len} {message}: {_list_str}") + + # Lint fails if any of the lists contain entries + errors = list(filter(None, [ + _error_handler(non_unique_classes, 'non-unique test classes'), + _error_handler(non_unique_methods, 'non-unique test methods'), + _error_handler(test_wo_tags, 'tests without tags'), + _error_handler(tests_wo_class_as_tag, 'tests without class as tag', required=False), + _error_handler(tests_wo_method_as_tag, 'tests without method name as tag'), + _error_handler(tests_wo_hw_vm_manual, 'tests without HW, VM, or manual tag'), + _error_handler(list(invalid_tags), 'invalid tags in tags.yaml')])) + if errors: + raise errors[0] + + +def run_dump(paths=None): + """Dump the tags per test. + + Formatted as + : + : + - + + Args: + paths (list, optional): path(s) to get tags for. Defaults to all ftest python files + """ + if not paths: + paths = all_python_files(FTEST_DIR) + for file_path, classes in iter(FtestTagMap(paths)): + short_file_path = re.findall(r'ftest/(.*$)', file_path)[0] + print(f'{short_file_path}:') + for class_name, functions in classes.items(): + print(f' {class_name}:') + all_methods = [] + longest_method_name = 0 + for method_name, tags in functions.items(): + longest_method_name = max(longest_method_name, len(method_name)) + all_methods.append((method_name, tags)) + for method_name, tags in all_methods: + method_name_fm = method_name.ljust(longest_method_name, " ") + tags_fm = ",".join(sorted_tags(tags)) + print(f' {method_name_fm} - {tags_fm}') + + +def run_pragmas(paths, use_commit=False): + """Get the recommended test pragmas. + + Args: + paths (list): paths to get pragmas for + use_commit (bool, optional): EXPERIMENTAL. whether to put pragmas in the + current commit message. Default only prints + + Returns: + bool: whether the commit test tags are a subset of the recommended. + Always true for use_commit=False + """ + # Get recommended tags for ftest changes + ftest_tag_map = FtestTagMap(all_python_files(FTEST_DIR)) + ftest_tag_set = ftest_tag_map.minimal_tags(paths) + + # Get recommended tags from the map + core_tag_set = recommended_map_tags(paths) + rec_tags = ' '.join(sorted(ftest_tag_set | core_tag_set)) + all_tags = rec_tags + commit_tags = '' + + # Just print the tags + if not use_commit: + print('Test-tag: ', all_tags) + return True + + commit_message = git_commit_message() + + # Modify and print the new commit message + commit_features = re.findall(r'Features:.*$', commit_message, re.MULTILINE) + commit_test_tag = re.findall(r'Test-tag:.*$', commit_message, re.MULTILINE) + first_line = re.findall(r'^.*', commit_message) + if commit_features: + # Merge "Features" into "Test-tag" + features = commit_features[0].split(':', maxsplit=1).strip() + commit_tags += ' ' + features + all_tags += ' ' + features + replacement = '\n'.join([ + '# Merged into Test-tag' + f'# {commit_features[0]}']) + commit_message = re.sub(commit_features[0], replacement, commit_message) + if commit_test_tag: + commit_tags += ' ' + commit_test_tag[0].split(':', maxsplit=1).strip() + replacement = '\n'.join([ + commit_test_tag[0], + '# Auto-recommended Test-tag', + f'# Test-tag: {all_tags}']) + commit_message = re.sub(commit_test_tag[0], replacement, commit_message) + elif first_line: + replacement = '\n'.join([ + first_line[0], + '', + '# Auto-recommended Test-tag', + f'Test-tag: {all_tags}']) + commit_message = re.sub(first_line[0], replacement, commit_message) + print(commit_message) + commit_tags = [set(tags.split(',')) for tags in commit_tags.split(' ') if tags] + rec_tags = [set(tags.split(',')) for tags in rec_tags.split(' ') if tags] + return ftest_tag_map.is_test_subset(commit_tags, rec_tags) + + +if __name__ == '__main__': + """Main execution. + + Exits with 0 on success, 1 on error, and 2 on warning. + """ + parser = ArgumentParser() + parser.add_argument( + "command", + choices=("lint", "pragmas", "dump"), + help="command to run") + parser.add_argument( + "paths", + nargs="*", + help="file paths") + parser.add_argument( + "--target", + help="target branch or commit for pragmas. Default auto-detects") + parser.add_argument( + "--use-commit", + action="store_true", + help="print an updated commit message with pragmas. Without, just pragmas are printed") + args = parser.parse_args() + if args.target and args.command != 'pragmas': + print('--target only works with "pragmas"') + sys.exit(1) + if args.use_commit and args.command != 'pragmas': + print('--use-commit only works with "pragmas"') + sys.exit(1) + args.paths = list(map(os.path.realpath, args.paths)) + + if args.command == "lint": + run_linter(args.paths) + sys.exit(0) + + if args.command == "dump": + run_dump(args.paths) + sys.exit(0) + + if args.command == "pragmas": + if not args.paths: + args.paths = git_files_changed(args.target) + if run_pragmas(args.paths, args.use_commit): + sys.exit(0) + else: + # This could be made to return an error if a commit only uses + # a subset of the recommended tags + sys.exit(0) diff --git a/src/tests/ftest/tags.yaml b/src/tests/ftest/tags.yaml new file mode 100644 index 000000000000..13d56544751d --- /dev/null +++ b/src/tests/ftest/tags.yaml @@ -0,0 +1,25 @@ +# (C) Copyright 2023 Intel Corporation. +# +# SPDX-License-Identifier: BSD-2-Clause-Patent +exclude: + - src/tests/ftest* # special handling in tags.py +include: + - src/tests/ftest/util/* +map: + .*: pr + src/cart/: cart + src/client/dfuse/dfuse.*: dfuse + src/client/dfuse/il/: dfuse,il + src/client/dfuse/ops/: dfuse + src/client/dfuse/pil4dfs/: pil4dfs + src/client/dfs/: dfs + src/container/: container + src/control/: control + src/control/security/: security + src/object/: object + src/pool/: pool + src/rebuild/: rebuild + src/security/: security + src/tests/suite/daos.*: daos_test + src/tests/suite/dfs.*: dfs_test + src/tests/suite/dfuse.*: dfuse_test diff --git a/utils/cq/words.dict b/utils/cq/words.dict index e062dc6dcc99..ba62c6c7b9fb 100644 --- a/utils/cq/words.dict +++ b/utils/cq/words.dict @@ -331,6 +331,7 @@ posix postfix pparent ppn +pragmas prebuild prebuilding prebuilt diff --git a/utils/rpms/daos.spec b/utils/rpms/daos.spec index 745c24446d65..b681757ca346 100644 --- a/utils/rpms/daos.spec +++ b/utils/rpms/daos.spec @@ -15,7 +15,7 @@ Name: daos Version: 2.5.100 -Release: 10%{?relval}%{?dist} +Release: 11%{?relval}%{?dist} Summary: DAOS Storage Engine License: BSD-2-Clause-Patent @@ -364,7 +364,7 @@ install -m 644 utils/systemd/%{agent_svc_name} %{buildroot}/%{_unitdir} mkdir -p %{buildroot}/%{conf_dir}/certs/clients mv %{buildroot}/%{conf_dir}/bash_completion.d %{buildroot}/%{_sysconfdir} # fixup env-script-interpreters -sed -i -e '1s/env //' %{buildroot}{%{daoshome}/TESTING/ftest/{cart/cart_logtest,config_file_gen,launch,slurm_setup,verify_perms}.py,%{_bindir}/daos_storage_estimator.py,%{_datarootdir}/daos/control/setup_spdk.sh} +sed -i -e '1s/env //' %{buildroot}{%{daoshome}/TESTING/ftest/{cart/cart_logtest,config_file_gen,launch,slurm_setup,tags,verify_perms}.py,%{_bindir}/daos_storage_estimator.py,%{_datarootdir}/daos/control/setup_spdk.sh} # shouldn't have source files in a non-devel RPM rm -f %{buildroot}%{daoshome}/TESTING/ftest/cart/{test_linkage.cpp,utest_{hlc,portnumber,protocol,swim}.c,wrap_cmocka.h} @@ -585,6 +585,9 @@ getent passwd daos_agent >/dev/null || useradd -s /sbin/nologin -r -g daos_agent # No files in a shim package %changelog +* Wed Nov 08 2023 Dalton Bohning 2.5.100-11 +- Add ftest/tags.py + * Fri Nov 03 2023 Phillip Henderson 2.5.100-10 - Move verify_perms.py location