diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..1dd8613e --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,66 @@ +# This workflow will try to build and install the software in different ways. + +name: Build and installation tests + +on: + push: + branches: ['master', 'devel'] + pull_request: + branches: '*' + +jobs: + pure-pip-installation: + # This stage only tests if the installation is possible. + # The evironment created herein will be discared and re-created in the test stage. + runs-on: ubuntu-latest + strategy: + matrix: + # Add multiple Python versions here to run tests on new(er) versions. + python-version: ["3.10"] + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Build and install + run: | + python -m pip install --upgrade pip + # Install with -e (in editable mode) to allow the tracking of the test coverage + pip install -e . + # Check result of installation + python -c "import loadskernel" + which loads-kernel + which model-viewer + which loads-compare + + conda-and-pip-installation: + # This stage only tests if the installation is possible. + # The evironment created herein will be discared and re-created in the test stage. + runs-on: ubuntu-latest + strategy: + matrix: + # Add multiple Python versions here to run tests on new(er) versions. + python-version: ["3.10", "3.11"] + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Build and install + run: | + # Install same requirements as to be used during regression testing + source $CONDA/etc/profile.d/conda.sh + conda activate + conda install -y -c conda-forge --file ./tests/requirements.txt + # Install with -e (in editable mode) to allow the tracking of the test coverage + pip install -e . + # Check result of installation + python -c "import loadskernel" + which loads-kernel + which model-viewer + which loads-compare + \ No newline at end of file diff --git a/.github/workflows/coding-style.yml b/.github/workflows/coding-style.yml index ac53f481..9f28bbf6 100644 --- a/.github/workflows/coding-style.yml +++ b/.github/workflows/coding-style.yml @@ -17,8 +17,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - # Add multiple Python versions here to run tests on new(er) versions. - python-version: ["3.8"] + # Add the Python versions here to run tests on new(er) versions. + python-version: ["3.11"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} @@ -40,8 +40,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - # Add multiple Python versions here to run tests on new(er) versions. - python-version: ["3.8"] + # Add multiple Python versions here to run tests on new(er) versions. + python-version: ["3.11"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} @@ -50,10 +50,14 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - python -m pip install --upgrade pip - pip install pylint + # Install same requirements as to be used during regression testing + source $CONDA/etc/profile.d/conda.sh + conda activate + conda install -y -c conda-forge --file ./tests/requirements.txt # Install the package itself to make sure that all imports work. pip install . - name: Analysing the code with pylint run: | + source $CONDA/etc/profile.d/conda.sh + conda activate pylint $(git ls-files '*.py') --fail-under=7.0 \ No newline at end of file diff --git a/.github/workflows/regression-tests.yml b/.github/workflows/regression-tests.yml index 436a2803..78ba9d82 100644 --- a/.github/workflows/regression-tests.yml +++ b/.github/workflows/regression-tests.yml @@ -1,6 +1,4 @@ -# This workflow will install and then lint the code with Flake8 and Pylint. -# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python - +# This workflow will run some regression tests. name: Regression Tests on: @@ -10,36 +8,51 @@ on: branches: '*' jobs: - pip-installation: - # This stage only tests if the installation is possible. - # The evironment created herein will be discared and re-created in the test stage. + + Pytest: runs-on: ubuntu-latest strategy: matrix: - # Add multiple Python versions here to run tests on new(er) versions. - python-version: ["3.8"] + # Add multiple Python versions here to run tests on new(er) versions. + python-version: ["3.10"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} - - name: Build and install + - name: Install dependencies run: | python -m pip install --upgrade pip - # Install with -e (in editable mode) to allow the tracking of the test coverage + # Install the package itself to make sure that all imports work. pip install -e . - # Check result of installation - which loads-kernel - which model-viewer - which loads-compare + - name: Analysing the code with pytest + run: | + # Run the actual testing of the code with pytest + # Using python -m pytest is necessary because pytest has the habit of not looking in the site-packages of the venv + python -m pytest -v --basetemp=./tmp -k test_dummy --cov=loadskernel --cov=modelviewer --cov=loadscompare --junitxml=testresult.xml + # Create some reports + coverage report + coverage xml -o coverage.xml + coverage html --directory ./coverage + - name: Upload test restults and coverage as an artifact + uses: actions/upload-artifact@v4 + with: + name: test results and coverage + path: | + testresult.xml + coverage.xml + coverage + if-no-files-found: ignore Jupyter: + # Building the Jupyter book is not really a regression test. However, it has to be in this workflow due to the handling of + # the artifacts. runs-on: ubuntu-latest strategy: matrix: - # Add multiple Python versions here to run tests on new(er) versions. - python-version: ["3.8"] + # Select Python version to be used for compiling here. + python-version: ["3.10"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} @@ -49,7 +62,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install jupyter-book + # Install the package itself to make sure that all imports work. pip install . - name: Assemble the tutorials to a jupyter book and build htlm pages run: | @@ -58,23 +71,37 @@ jobs: mkdir ./doc/html mv ./doc/tutorials/_build/html ./doc/html/tutorials - name: Upload Jupyter book as an artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: tutorials path: ./doc/html if-no-files-found: ignore - - name: Upload Jupyter book for pages + + combine-pages: + runs-on: ubuntu-latest + # Add a dependency to the build job + needs: [Jupyter, Pytest] + steps: + - uses: actions/download-artifact@v4 + with: + merge-multiple: true + - name: See what we've got and merge artifacts + run: | + ls -la + mkdir pages + mv ./tutorials ./pages/tutorials + mv ./coverage ./pages/coverage + - name: Upload artifact for pages # This is not a normal artifact but one that can be deployed to the GitHub pages in the next step uses: actions/upload-pages-artifact@v3 with: - name: github-pages # This name may not be changed according to the documentation - path: ./doc/html + name: github-pages # This name may not be changed according to the documentation + path: ./pages # There must be only one path if-no-files-found: ignore - + deploy-pages: # Add a dependency to the build job - needs: Jupyter - + needs: combine-pages # Grant GITHUB_TOKEN the permissions required to make a Pages deployment permissions: pages: write # to deploy to Pages diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d3f57367..c336ecce 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -7,7 +7,7 @@ stages: - deploy .virtenv: &virtualenv - - source /work/voss_ar/Software/miniconda3/etc/profile.d/conda.sh + - source /work/f_jwsb/software/miniconda-3.11/etc/profile.d/conda.sh - conda activate # To make things faster, re-use existing site packages. - python -m venv virtualenv --system-site-packages @@ -17,9 +17,7 @@ stages: # Check python version - which python - which pytest - # Set-up MPI - - export PATH=/work/voss_ar/Software/mpich-3.4.2/bin:$PATH - - export LD_LIBRARY_PATH=/work/voss_ar/Software/mpich-3.4.2/lib + # Check MPI - which mpiexec # Check location - pwd @@ -34,6 +32,7 @@ pip-installation: - *virtualenv - pip install -e . # Check result of installation + - python -c "import loadskernel" - which loads-kernel - which model-viewer - which loads-compare @@ -86,10 +85,12 @@ Pytest: - *virtualenv # Install with -e (in editable mode) to allow the tracking of the test coverage - pip install -e . + - pip list # Get the examples repository - git clone https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.dlr.de/loads-kernel/loads-kernel-examples.git # Run the actual testing of the code with pytest - - pytest -v --basetemp=./test_tmp --cov=loadskernel --cov=modelviewer --cov=loadscompare --junitxml=testresult.xml + # Using python -m pytest is necessary because pytest has the habit of not looking in the site-packages of the venv + - python -m pytest -v --basetemp=./tmp --cov=loadskernel --cov=modelviewer --cov=loadscompare --junitxml=testresult.xml # Create some reports - coverage report - coverage xml -o coverage.xml diff --git a/.pytest.ini b/.pytest.ini new file mode 100644 index 00000000..c3d18474 --- /dev/null +++ b/.pytest.ini @@ -0,0 +1,10 @@ +[pytest] +filterwarnings = + # Filter to avoid annoying deprecation warnings raised by third party code + # Step 1: ignore all deprecation warnings + ignore::DeprecationWarning:: + # Step 2: re-activate deprecation warnings for own modules + default::DeprecationWarning:loadskernel.* + default::DeprecationWarning:loadsviewer.* + default::DeprecationWarning:modelviewer.* + default::DeprecationWarning:tests.* \ No newline at end of file diff --git a/loadskernel/io_functions/read_bdf.py b/loadskernel/io_functions/read_bdf.py index 1c087ebe..2f5d7d97 100644 --- a/loadskernel/io_functions/read_bdf.py +++ b/loadskernel/io_functions/read_bdf.py @@ -190,7 +190,7 @@ def aggregate_cards(self, card_names): for card_name in card_names: old_size = self.cards[card_name].shape[0] sort_by_field = self.card_interpreters[card_name].field_names[0] - self.cards[card_name] = self.cards[card_name].groupby(by=sort_by_field, as_index=False).agg(sum) + self.cards[card_name] = self.cards[card_name].groupby(by=sort_by_field, as_index=False).agg("sum") new_size = self.cards[card_name].shape[0] if old_size != new_size: logging.info('Aggregating {} {}s'.format(old_size - new_size, card_name)) diff --git a/loadskernel/io_functions/read_mona.py b/loadskernel/io_functions/read_mona.py index ef1a290c..75c6cd8f 100644 --- a/loadskernel/io_functions/read_mona.py +++ b/loadskernel/io_functions/read_mona.py @@ -416,7 +416,7 @@ def add_SET1(pandas_sets): set_values = [] for _, row in pandas_sets[['values']].iterrows(): # create a copy of the current row to work with - my_row = copy.deepcopy(row[0]) + my_row = copy.deepcopy(row.iloc[0]) # remove all None values my_row = [item for item in my_row if item is not None] values = [] @@ -448,8 +448,8 @@ def add_AECOMP(pandas_aecomps): # Loop over the rows to check for NaNs and None, which occur in case an empty field was in the list. # Then, select only the valid list items. for _, row in pandas_aecomps[['LISTID']].iterrows(): - is_id = [pd.notna(x) for x in row[0]] - list_id.append(list(compress(row[0], is_id))) + is_id = [pd.notna(x) for x in row.iloc[0]] + list_id.append(list(compress(row.iloc[0], is_id))) aecomp = {} aecomp['name'] = pandas_aecomps['NAME'].to_list() diff --git a/loadskernel/plotting_standard.py b/loadskernel/plotting_standard.py index efb1b271..d6c32e7c 100755 --- a/loadskernel/plotting_standard.py +++ b/loadskernel/plotting_standard.py @@ -166,7 +166,7 @@ def plot_forces_deformation_interactive(self): def plot_monstations(self, filename_pdf): # launch plotting - self.pp = PdfPages(filename_pdf) + self.pp = PdfPages(filename_pdf, keep_empty=False) self.potato_plots() if self.cuttingforces_wing: self.cuttingforces_along_wing_plots() @@ -570,7 +570,7 @@ def plot_eigenvalues(self): ax[1].minorticks_on() ax[1].axis([-1.0, 1.0, imin, imax]) # connect with y-axis from left hand plot - ax[0].get_shared_y_axes().join(ax[0], ax[1]) + ax[0].sharey(ax[1]) ax[1].yaxis.set_tick_params(which='both', labelleft=False, labelright=False) ax[1].yaxis.offsetText.set_visible(False) # add legend diff --git a/loadskernel/spline_rules.py b/loadskernel/spline_rules.py index 91cf4857..bde8a1b2 100644 --- a/loadskernel/spline_rules.py +++ b/loadskernel/spline_rules.py @@ -32,7 +32,8 @@ def rules_point(grid_i, grid_d): # All dependent grids are mapped to one grid point, which might be CG or MAC # Assumption: the relevant point is expected to be the only/first point in the independet grid splinerules = {} - splinerules[int(grid_i['ID'])] = list(grid_d['ID']) + assert len(grid_i['ID']) == 1, "The independent grid 'grid_i' may have only one grid point for this kind of spline rules." + splinerules[grid_i['ID'][0]] = list(grid_d['ID']) return splinerules diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 00000000..1ed6a6c5 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,20 @@ +# These are the requirements to be used during regression testing +mayavi +traits +traitsui +pyface +pyfmi +h5py +mpi4py +pytest-cov +psutil +pytables +pyyaml +matplotlib +mamba +pandas +openpyxl +jupyter +jupyter-book +flake8 +pylint \ No newline at end of file diff --git a/tests/test_dummy.py b/tests/test_dummy.py new file mode 100644 index 00000000..19f01b4e --- /dev/null +++ b/tests/test_dummy.py @@ -0,0 +1,7 @@ +import platform + + +def test_dummy(): + print('The dummy test is executed.') + print('Running on python version {}'.format(platform.python_version())) + pass diff --git a/tests/test_gui.py b/tests/test_gui.py index 905a1d41..02c51eac 100644 --- a/tests/test_gui.py +++ b/tests/test_gui.py @@ -1,7 +1,10 @@ import logging -from loadscompare import compare -from modelviewer import view +try: + from loadscompare import compare + from modelviewer import view +except ImportError: + pass class TestLoadsCompare():