From 6e200c7c8720571d8821acbef9c13db48ad0750b Mon Sep 17 00:00:00 2001 From: "Jennifer A. Clark" Date: Sun, 15 Dec 2024 22:59:03 -0500 Subject: [PATCH 1/8] Address reviewer comments --- .../cubic_peng_robinson/acetone_hexane/input_xi_318.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/despasito/examples/cubic_peng_robinson/acetone_hexane/input_xi_318.json b/despasito/examples/cubic_peng_robinson/acetone_hexane/input_xi_318.json index 841f361..126b606 100755 --- a/despasito/examples/cubic_peng_robinson/acetone_hexane/input_xi_318.json +++ b/despasito/examples/cubic_peng_robinson/acetone_hexane/input_xi_318.json @@ -2,8 +2,8 @@ "bead_configuration": [[["acetone", 1]], [["hexane", 1]]], "eos": "cubic.peng_robinson", "EOSgroup": "../../library/PRgroup.json", - "EOScross": "PRcross_315.json", - "output_file": "out_xi_315.txt", + "EOScross": "PRcross_318.json", + "output_file": "out_xi_318.txt", "calculation_type" : "bubble_pressure", "pressure_options": {"maxiter":15}, "mole_fraction_options": {"maxiter":30}, From cc3fbe07cb29e59cd97f898e7cc17379d1d95304 Mon Sep 17 00:00:00 2001 From: "Jennifer A. Clark" Date: Tue, 17 Dec 2024 23:16:36 -0500 Subject: [PATCH 2/8] Convert setup.py to toml --- .gitignore | 3 + .vscode/settings.json | 3 +- despasito/__init__.py | 9 +- despasito/equations_of_state/eos_toolbox.py | 2 +- .../Fit_CH3/differential_evolution/input.json | 12 +- .../saturation_ethane.csv | 20 ---- .../Fit_CH3/grid_minimization/input.json | 9 +- .../grid_minimization/saturation_ethane.csv | 20 ---- .../butane_solubility/fit_grid.py | 36 ++++++ .../butane_solubility/input_fit_grid.json | 5 +- .../butane_solubility/input_test.json | 15 ++- .../decane_helmholtz/run_test.py | 4 +- .../hexane_heptane/hexane_heptane_test.py | 4 +- despasito/input_output/read_input.py | 2 +- despasito/parameter_fitting/__init__.py | 9 ++ despasito/parameter_fitting/fit_functions.py | 1 + despasito/parameter_fitting/global_methods.py | 8 +- despasito/thermodynamics/calc.py | 2 +- despasito/utils/general_toolbox.py | 16 ++- pyproject.toml | 104 ++++++++++++++++++ setup.py | 37 +------ src/despasito/_version.py | 1 + 22 files changed, 198 insertions(+), 124 deletions(-) create mode 100644 despasito/examples/saft_gamma_mie/butane_solubility/fit_grid.py create mode 100644 pyproject.toml create mode 100644 src/despasito/_version.py diff --git a/.gitignore b/.gitignore index 67dc286..0da6005 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,9 @@ __pycache__/ # C extensions *.so +# VS Code extension +.vscode/* + # Distribution / packaging .Python env/ diff --git a/.vscode/settings.json b/.vscode/settings.json index 7688f82..5069e33 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,6 @@ { "python.testing.pytestArgs": [], "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true + "python.testing.pytestEnabled": true, + "python.terminal.executeInFileDir": true } \ No newline at end of file diff --git a/despasito/__init__.py b/despasito/__init__.py index 5966875..8aac11f 100644 --- a/despasito/__init__.py +++ b/despasito/__init__.py @@ -7,14 +7,9 @@ import os import logging import logging.handlers +from importlib.metadata import version -# Handle versioneer -from ._version import get_versions - -versions = get_versions() -__version__ = versions["version"] -__git_revision__ = versions["full-revisionid"] -del get_versions, versions +__version__ = version("despasito") logger = logging.getLogger("despasito") logger.setLevel(30) diff --git a/despasito/equations_of_state/eos_toolbox.py b/despasito/equations_of_state/eos_toolbox.py index f7451d0..1e85523 100644 --- a/despasito/equations_of_state/eos_toolbox.py +++ b/despasito/equations_of_state/eos_toolbox.py @@ -130,7 +130,7 @@ def _partial_density_wrapper(rhoi, T, func): rho = np.array([np.sum(rhoi)]) xi = rhoi / rho - Ares = func(rho, T, xi) + Ares = func(rho, T, xi)[0] return Ares diff --git a/despasito/examples/saft_gamma_mie/Fit_CH3/differential_evolution/input.json b/despasito/examples/saft_gamma_mie/Fit_CH3/differential_evolution/input.json index dace6ec..1f21aa5 100644 --- a/despasito/examples/saft_gamma_mie/Fit_CH3/differential_evolution/input.json +++ b/despasito/examples/saft_gamma_mie/Fit_CH3/differential_evolution/input.json @@ -2,11 +2,8 @@ "EOSgroup": "SAFTgroup.json", "optimization_parameters": { "fit_bead" : "CH3", - "fit_parameter_names": ["epsilon", "sigma", "lambdar", "Sk"], - "lambdar_bounds" : [8.0, 60.0], - "epsilon_bounds" : [150.0, 1000.0], - "epsilon_CH3CH2_bounds" : [150.0, 1000.0], - "sigma_bounds" : [2.5e-1, 5.5e-1] + "fit_parameter_names": ["sigma"], + "lambdar_bounds" : [6.0, 10.0] }, "Ethane": { "bead_configuration": [[["CH3", 2]]], @@ -16,9 +13,10 @@ "weights": { "rhov" : 0.0} }, "global_opts": { - "popsize": 40, + "popsize": 10, "mutation": 1.5, "strategy": "best2bin", - "recombination": 0.1 + "recombination": 0.1, + "maxiter": 100 } } diff --git a/despasito/examples/saft_gamma_mie/Fit_CH3/differential_evolution/saturation_ethane.csv b/despasito/examples/saft_gamma_mie/Fit_CH3/differential_evolution/saturation_ethane.csv index 586eacc..5c1a7a3 100644 --- a/despasito/examples/saft_gamma_mie/Fit_CH3/differential_evolution/saturation_ethane.csv +++ b/despasito/examples/saft_gamma_mie/Fit_CH3/differential_evolution/saturation_ethane.csv @@ -1,33 +1,13 @@ # NIST Webbook #T, Psat, rhol, rhov 125, 696.69, 20411, 0.67027 -130, 1291.7, 20225, 1.1952 -135, 2275.5, 20038, 2.0284 140, 3831.5, 19850, 3.2959 -145, 6197, 19661, 5.1529 -150, 9669.8, 19470, 7.7851 155, 14612, 19277, 11.409 -160, 21452, 19083, 16.272 -165, 30688, 18886, 22.65 170, 42882, 18686, 30.849 -175, 58662, 18484, 41.2 -180, 78718, 18279, 54.065 185, 103800, 18071, 69.83 -190, 134700, 17860, 88.911 -195, 172260, 17644, 111.75 200, 217380, 17425, 138.83 -205, 270970, 17201, 170.66 -210, 334010, 16972, 207.8 215, 407460, 16738, 250.86 -220, 492350, 16498, 300.52 -225, 589710, 16252, 357.52 230, 700600, 15998, 422.72 -235, 826100, 15737, 497.07 -240, 967320, 15466, 581.71 245, 1125400, 15186, 677.96 -250, 1301500, 14894, 787.43 -255, 1496800, 14588, 912.05 260, 1712500, 14267, 1054.3 -265, 1950100, 13927, 1217.3 -270, 2210700, 13564, 1405.3 275, 2496000, 13172, 1623.9 diff --git a/despasito/examples/saft_gamma_mie/Fit_CH3/grid_minimization/input.json b/despasito/examples/saft_gamma_mie/Fit_CH3/grid_minimization/input.json index 77ebcf9..01a08b6 100644 --- a/despasito/examples/saft_gamma_mie/Fit_CH3/grid_minimization/input.json +++ b/despasito/examples/saft_gamma_mie/Fit_CH3/grid_minimization/input.json @@ -2,10 +2,8 @@ "EOSgroup": "SAFTgroup.json", "optimization_parameters": { "fit_bead" : "CH3", - "fit_parameter_names": ["epsilon", "sigma", "lambdar", "Sk"], + "fit_parameter_names": ["sigma", "lambdar"], "lambdar_bounds" : [8.0, 60.0], - "epsilon_bounds" : [150.0, 1000.0], - "epsilon_CH3CH2_bounds" : [150.0, 1000.0], "sigma_bounds" : [2.5e-1, 5.5e-1] }, "Ethane": { @@ -17,6 +15,7 @@ }, "global_opts": { "method": "grid_minimization", - "Ns": 4 - } + "Ns": 2 + }, + "minimizer_opts": {"xtol": 1e-4} } diff --git a/despasito/examples/saft_gamma_mie/Fit_CH3/grid_minimization/saturation_ethane.csv b/despasito/examples/saft_gamma_mie/Fit_CH3/grid_minimization/saturation_ethane.csv index 586eacc..5c1a7a3 100644 --- a/despasito/examples/saft_gamma_mie/Fit_CH3/grid_minimization/saturation_ethane.csv +++ b/despasito/examples/saft_gamma_mie/Fit_CH3/grid_minimization/saturation_ethane.csv @@ -1,33 +1,13 @@ # NIST Webbook #T, Psat, rhol, rhov 125, 696.69, 20411, 0.67027 -130, 1291.7, 20225, 1.1952 -135, 2275.5, 20038, 2.0284 140, 3831.5, 19850, 3.2959 -145, 6197, 19661, 5.1529 -150, 9669.8, 19470, 7.7851 155, 14612, 19277, 11.409 -160, 21452, 19083, 16.272 -165, 30688, 18886, 22.65 170, 42882, 18686, 30.849 -175, 58662, 18484, 41.2 -180, 78718, 18279, 54.065 185, 103800, 18071, 69.83 -190, 134700, 17860, 88.911 -195, 172260, 17644, 111.75 200, 217380, 17425, 138.83 -205, 270970, 17201, 170.66 -210, 334010, 16972, 207.8 215, 407460, 16738, 250.86 -220, 492350, 16498, 300.52 -225, 589710, 16252, 357.52 230, 700600, 15998, 422.72 -235, 826100, 15737, 497.07 -240, 967320, 15466, 581.71 245, 1125400, 15186, 677.96 -250, 1301500, 14894, 787.43 -255, 1496800, 14588, 912.05 260, 1712500, 14267, 1054.3 -265, 1950100, 13927, 1217.3 -270, 2210700, 13564, 1405.3 275, 2496000, 13172, 1623.9 diff --git a/despasito/examples/saft_gamma_mie/butane_solubility/fit_grid.py b/despasito/examples/saft_gamma_mie/butane_solubility/fit_grid.py new file mode 100644 index 0000000..9b571c3 --- /dev/null +++ b/despasito/examples/saft_gamma_mie/butane_solubility/fit_grid.py @@ -0,0 +1,36 @@ + +import numpy as np + +import despasito +import despasito.input_output.read_input as io +import despasito.parameter_fitting as fit +from despasito.equations_of_state import initiate_eos + +despasito.initiate_logger(console=True, verbose=10) + +Eos = initiate_eos( + eos="saft.gamma_mie", + beads=["CH2", "CH3"], + molecular_composition=np.array([[2.0, 2.0]]), + bead_library=io.json_to_dict("../../library/SAFTgroup.json"), +) + +fit.fit( + optimization_parameters={ + 'fit_bead': 'CH3', + 'fit_parameter_names': ['epsilon_CH2'], + 'epsilon_CH2_bounds': [150.0, 600.0], + 'parameters_guess': [300.0] + }, + exp_data={ + 'Knovel': { + 'data_class_type': 'liquid_density', + "eos_obj": Eos, + "calculation_type": 'liquid_properties', + "T": np.array([272.15, 323.15, 298.15]), + "rhol": np.array([10357. , 10364.8, 10140. ]), + "delta": np.array([14453., 13700., 14100.]), + }, + }, + global_opts={'method': 'grid_minimization', 'Ns': 3}, +) \ No newline at end of file diff --git a/despasito/examples/saft_gamma_mie/butane_solubility/input_fit_grid.json b/despasito/examples/saft_gamma_mie/butane_solubility/input_fit_grid.json index 21e350a..b3cf304 100644 --- a/despasito/examples/saft_gamma_mie/butane_solubility/input_fit_grid.json +++ b/despasito/examples/saft_gamma_mie/butane_solubility/input_fit_grid.json @@ -1,14 +1,13 @@ { - "bead_configuration": [[["CH3", 2],["CH2", 2]]], "EOSgroup": "../../library/SAFTgroup.json", "optimization_parameters": { "fit_bead" : "CH3", "fit_parameter_names": ["epsilon_CH2"], - "epsilon_bounds" : [150.0, 400.0], + "epsilon_CH2_bounds" : [150.0, 600.0], "parameters_guess" : [300.0] }, "Knovel": { - "bead_configuration": [[["CH3", 2],["CH2", 4]]], + "bead_configuration": [[["CH3", 2],["CH2", 2]]], "data_class_type": "liquid_density", "calculation_type": "liquid_properties", "file": "butane_solubility.csv" diff --git a/despasito/examples/saft_gamma_mie/butane_solubility/input_test.json b/despasito/examples/saft_gamma_mie/butane_solubility/input_test.json index f490767..3e26cd2 100644 --- a/despasito/examples/saft_gamma_mie/butane_solubility/input_test.json +++ b/despasito/examples/saft_gamma_mie/butane_solubility/input_test.json @@ -1,15 +1,22 @@ { - "bead_configuration": [[["CH3", 2],["CH2", 2]]], "EOSgroup": "../../library/SAFTgroup.json", "optimization_parameters": { "fit_bead" : "CH3", "fit_parameter_names": ["epsilon_CH2","sigma"], - "epsilon_bounds" : [150.0, 400.0], + "epsilon_CH2_bounds" : [150.0, 400.0], + "sigma_bounds" : [0.3, 0.5], "parameters_guess" : [300.0, 3.4e-1] }, "Knovel": { - "bead_configuration": [[["CH3", 2],["CH2", 4]]], - "data_class_type": "test", + "bead_configuration": [[["CH3", 2],["CH2", 2]]], + "data_class_type": "solubility_parameter", "file": "butane_solubility.csv" + }, + "global_opts": { + "popsize": 10, + "mutation": 1.5, + "strategy": "best2bin", + "recombination": 0.1, + "maxiter": 100 } } diff --git a/despasito/examples/saft_gamma_mie/decane_helmholtz/run_test.py b/despasito/examples/saft_gamma_mie/decane_helmholtz/run_test.py index 928cf63..d6f2321 100644 --- a/despasito/examples/saft_gamma_mie/decane_helmholtz/run_test.py +++ b/despasito/examples/saft_gamma_mie/decane_helmholtz/run_test.py @@ -1,5 +1,5 @@ import numpy as np -import despasito +from despasito.equations_of_state import initiate_eos beads = ["CH3", "CH2"] beads_per_molecule = np.array([[2.0, 8.0]]) @@ -24,7 +24,7 @@ }, } cross_library = {"CH3": {"CH2": {"epsilon": 350.770}}} -Eos = despasito.equations_of_state.initiate_eos( +Eos = initiate_eos( eos="saft.gamma_mie", beads=beads, molecular_composition=beads_per_molecule, diff --git a/despasito/examples/saft_gamma_mie/hexane_heptane/hexane_heptane_test.py b/despasito/examples/saft_gamma_mie/hexane_heptane/hexane_heptane_test.py index 901eb9f..eca9922 100755 --- a/despasito/examples/saft_gamma_mie/hexane_heptane/hexane_heptane_test.py +++ b/despasito/examples/saft_gamma_mie/hexane_heptane/hexane_heptane_test.py @@ -3,11 +3,11 @@ import despasito import despasito.input_output.read_input as io import despasito.thermodynamics as thermo -import despasito.equations_of_state +from despasito.equations_of_state import initiate_eos despasito.initiate_logger(console=True, verbose=10) -Eos = despasito.equations_of_state.initiate_eos( +Eos = initiate_eos( eos="saft.gamma_mie", beads=["CH3", "CH2"], molecular_composition=np.array([[2.0, 4.0], [2.0, 5.0]]), diff --git a/despasito/input_output/read_input.py b/despasito/input_output/read_input.py index ecb7b72..203782b 100755 --- a/despasito/input_output/read_input.py +++ b/despasito/input_output/read_input.py @@ -332,7 +332,7 @@ def process_param_fit_inputs(thermo_dict): " an optimization_parameters dictionary with 'fit_bead' and " "'fit_parameter_names' must be provided." ) - + return new_thermo_dict diff --git a/despasito/parameter_fitting/__init__.py b/despasito/parameter_fitting/__init__.py index a70d155..6c3710c 100644 --- a/despasito/parameter_fitting/__init__.py +++ b/despasito/parameter_fitting/__init__.py @@ -131,6 +131,15 @@ def fit( if key not in exp_data[k2]: exp_data[k2][key] = value + if not isinstance(optimization_parameters["fit_parameter_names"], list): + if isinstance(optimization_parameters["fit_parameter_names"], str): + optimization_parameters["fit_parameter_names"] = [ + optimization_parameters["fit_parameter_names"] + ] + else: + raise ValueError( + f"'fit_parameter_names' must be a list not: {optimization_parameters["fit_parameter_names"]}") + # Generate initial guess and bounds for parameters if none was given optimization_parameters = ff.consolidate_bounds(optimization_parameters).copy() if "bounds" in optimization_parameters: diff --git a/despasito/parameter_fitting/fit_functions.py b/despasito/parameter_fitting/fit_functions.py index 670527d..071036e 100644 --- a/despasito/parameter_fitting/fit_functions.py +++ b/despasito/parameter_fitting/fit_functions.py @@ -529,6 +529,7 @@ def compute_obj( else: logger.info("One of provided parameters, {}, is NaN".format(beadparams)) obj_total = np.inf + obj_function = np.nan if obj_total == 0.0 and np.isnan(np.sum(obj_function)): obj_total = np.inf diff --git a/despasito/parameter_fitting/global_methods.py b/despasito/parameter_fitting/global_methods.py index 4754097..e18d9d9 100644 --- a/despasito/parameter_fitting/global_methods.py +++ b/despasito/parameter_fitting/global_methods.py @@ -448,7 +448,6 @@ def grid_minimization( ] lx = len(x0_array) - # Start computation if flag_use_mp_object: x0, results, fval = global_opts["MultiprocessingObject"].pool_job( @@ -742,12 +741,7 @@ def _grid_minimization_wrapper(args): ) except Exception: logger.info("Minimization Failed:", exc_info=True) - result = np.nan * np.ones(len(x0)) - - # Return NaN if the parameters didn't change - if np.sum(np.abs(result - x0)) < 1e-6: - logger.info("Minimization Failed:", exc_info=True) - result = np.nan * np.ones(len(x0)) + result = np.nan * np.ones(len(x0)) if gtb.isiterable(x0) else np.nan logger.info("Starting parameters: {}, converged to: {}".format(x0, result)) diff --git a/despasito/thermodynamics/calc.py b/despasito/thermodynamics/calc.py index d5dc0d2..18cb208 100644 --- a/despasito/thermodynamics/calc.py +++ b/despasito/thermodynamics/calc.py @@ -8,7 +8,7 @@ import numpy as np from scipy import interpolate import scipy.optimize as spo -from scipy.ndimage.filters import gaussian_filter1d +from scipy.ndimage import gaussian_filter1d import copy import logging diff --git a/despasito/utils/general_toolbox.py b/despasito/utils/general_toolbox.py index aaeb843..a9a302d 100644 --- a/despasito/utils/general_toolbox.py +++ b/despasito/utils/general_toolbox.py @@ -97,14 +97,10 @@ def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}) ) method = "least_squares" - if ( - np.size(x0) == 1 - and np.any(bounds is not None) - and np.shape(x0) != np.shape(bounds)[0] - ): - bounds = tuple([bounds]) - - if np.any(bounds is None) and method in ["brentq", "bisect"]: + if not isiterable(bounds[0]): + bounds = [bounds] + + if np.any([np.any(x is None) for x in bounds]) and method in ["brentq", "bisect"]: if x0 is None: raise ValueError( "Optimization method, {}, requires bounds. ".format(method) @@ -117,9 +113,11 @@ def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}) method = "hybr" if np.any(bounds is not None): - for bnd in bounds: + for i,bnd in enumerate(bounds): if len(bnd) != 2: raise ValueError("bounds are not of length two") + else: + bounds[i] = tuple(bnd) # ################### Root Finding without Boundaries ################### if method in ["broyden1", "broyden2"]: diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..03e0499 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,104 @@ +[build-system] +requires = ["setuptools>=61.0", "versioningit~=2.0", "cython", "numpy"] +build-backend = "setuptools.build_meta" + +[project] +name = "despasito" +description = "Determining Equilibrium State and Parametrization Application for SAFT, Intended for Thermodynamic Output" +dynamic = ["version"] +readme = "README.md" + +authors = [ + { name = "Jennifer A. Clark", email = "jennifer.clark@gnarlyoak.com" }, + { name = "Nathan Duff" } +] + +# See https://pypi.org/classifiers/ +classifiers = [ + "License :: OSI Approved :: The 3-Clause BSD License (BSD-3-Clause)", + "Programming Language :: Python :: 3", +] +requires-python = ">=3.8" +dependencies = [ + "numpy", + "scipy", + "numba", + "flake8" +] + +[project.optional-dependencies] +test = ["pytest>=6.1.2"] +docs = [ + "sphinx", + "sphinx_rtd_theme", + "sphinx-argparse", + "m2r2", + "sphinxcontrib-napoleon", +] + +# Update the urls once the hosting is set up. +[project.urls] +"GitHub Source" = "https://github.com/Santiso-Group/despasito" +"Documentation" = "https://despasito.readthedocs.io/en/latest/" + +[tool.setuptools] +zip-safe = false +include-package-data = false +license-files = ["LICENSES/*.md"] + +[tool.setuptools.packages.find] +namespaces = false +where = ["."] + +[tool.setuptools.package-data] +despasito = [ + "py.typed" +] + +[tool.versioningit] +default-version = "1+unknown" + +[tool.versioningit.format] +distance = "{base_version}+{distance}.{vcs}{rev}" +dirty = "{base_version}+{distance}.{vcs}{rev}.dirty" +distance-dirty = "{base_version}+{distance}.{vcs}{rev}.dirty" + +[tool.versioningit.vcs] +# The method key: +method = "git" +# Parameters to pass to the method: +match = ["*"] +default-tag = "1.0.0" + +[tool.versioningit.write] +file = "src/despasito/_version.py" + +[tool.black] +line-length = 120 + +[tool.ruff] +line-length = 120 +indent-width = 4 +exclude = [ + ".eggs", + ".git", + ".git-rewrite", + ".ipynb_checkpoints", + ".mypy_cache", + ".pyenv", + ".pytest_cache", + ".pytype", + ".ruff_cache", + ".venv", + ".vscode", + "__pypackages__", + "_build", + "build", + "dist", + "site-packages", + "venv", +] + +[tool.ruff.format] +indent-style = "space" +line-ending = "auto" diff --git a/setup.py b/setup.py index f6fbbc2..c809a99 100644 --- a/setup.py +++ b/setup.py @@ -2,10 +2,9 @@ DESPASITO DESPASITO: Determining Equilibrium State and Parametrization Application for SAFT, Intended for Thermodynamic Output """ -import sys + import os -from setuptools import find_packages, Extension, setup -import versioneer +from setuptools import Extension, setup import glob import numpy as np @@ -40,34 +39,4 @@ "boundscheck": True })) -# from https://github.com/pytest-dev/pytest-runner#conditional-requirement -needs_pytest = {"pytest", "test", "ptr"}.intersection(sys.argv) -pytest_runner = ["pytest-runner"] if needs_pytest else [] - -setup( - name="despasito", - author="Jennifer A Clark", - author_email="jennifer.clark@gnarlyoak.com", - description=short_description[0], - long_description_content_type="text/markdown", - version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), - license="BSD-3-Clause", - packages=find_packages(), - include_package_data=True, - setup_requires=["numpy", "scipy",] + pytest_runner, - ext_package=fpath, - ext_modules=extensions, - extras_require={ - "extra": ["cython"], - "tests": ["pytest"], - }, - install_requires=[ - "numpy", - "scipy", - "numba", - "flake8" - ], - python_requires=">=3.6", - zip_safe=False, -) +setup(ext_modules=extensions) diff --git a/src/despasito/_version.py b/src/despasito/_version.py new file mode 100644 index 0000000..6b61516 --- /dev/null +++ b/src/despasito/_version.py @@ -0,0 +1 @@ +__version__ = "0.3.0+15.g6e200c7.dirty" From e7299801e2cfb9a1b23c1cb9918fc5242680b76e Mon Sep 17 00:00:00 2001 From: "Jennifer A. Clark" Date: Sun, 22 Dec 2024 17:52:26 -0500 Subject: [PATCH 3/8] Update toml setup compilation of cython --- .github/workflows/CI.yaml | 2 +- despasito/_version.py | 557 +------------------------------- despasito/data/README.md | 21 -- despasito/data/look_and_say.dat | 15 - pyproject.toml | 4 +- setup.py | 54 ++-- src/despasito/_version.py | 1 - 7 files changed, 29 insertions(+), 625 deletions(-) delete mode 100644 despasito/data/README.md delete mode 100644 despasito/data/look_and_say.dat delete mode 100644 src/despasito/_version.py diff --git a/.github/workflows/CI.yaml b/.github/workflows/CI.yaml index 4895237..1088752 100644 --- a/.github/workflows/CI.yaml +++ b/.github/workflows/CI.yaml @@ -38,7 +38,7 @@ jobs: ulimit -a # More info on options: https://github.com/conda-incubator/setup-miniconda - - uses: conda-incubator/setup-miniconda@v3 + - uses: conda-incubator/setup-miniconda@v4 with: python-version: ${{ matrix.python-version }} environment-file: devtools/conda-envs/test_env.yaml diff --git a/despasito/_version.py b/despasito/_version.py index 734274c..593a34f 100644 --- a/despasito/_version.py +++ b/despasito/_version.py @@ -1,556 +1 @@ -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "$Format:%d$" - git_full = "$Format:%H$" - git_date = "$Format:%ci$" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "pep440" - cfg.tag_prefix = "" - cfg.parentdir_prefix = "None" - cfg.versionfile_source = "despasito/_version.py" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen( - [c] + args, - cwd=cwd, - env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr else None), - ) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return { - "version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, - "error": None, - "date": None, - } - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print( - "Tried directories %s but none started with prefix %s" - % (str(rootdirs), parentdir_prefix) - ) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r"\d", r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %s" % r) - return { - "version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": None, - "date": date, - } - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return { - "version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": "no suitable tags", - "date": None, - } - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command( - GITS, - [ - "describe", - "--tags", - "--dirty", - "--always", - "--long", - "--match", - "%s*" % tag_prefix, - ], - cwd=root, - ) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[: git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( - full_tag, - tag_prefix, - ) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ - 0 - ].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return { - "version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None, - } - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return { - "version": rendered, - "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], - "error": None, - "date": pieces.get("date"), - } - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split("/"): - root = os.path.dirname(root) - except NameError: - return { - "version": "0+unknown", - "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None, - } - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return { - "version": "0+unknown", - "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", - "date": None, - } +__version__ = "0.3.0+16.gcc3fbe0.dirty" diff --git a/despasito/data/README.md b/despasito/data/README.md deleted file mode 100644 index d465147..0000000 --- a/despasito/data/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Sample Package Data - -This directory contains sample additional data you may want to include with your package. -This is a place where non-code related additional information (such as data files, molecular structures, etc.) can -go that you want to ship alongside your code. - -Please note that it is not recommended to place large files in your git directory. If your project requires files larger -than a few megabytes in size it is recommended to host these files elsewhere. This is especially true for binary files -as the `git` structure is unable to correctly take updates to these files and will store a complete copy of every version -in your `git` history which can quickly add up. As a note most `git` hosting services like GitHub have a 1 GB per repository -cap. - -## Including package data - -Modify your package's `setup.py` file and the `setup()` command. Include the -[`package_data`](http://setuptools.readthedocs.io/en/latest/setuptools.html#basic-use) keyword and point it at the -correct files. - -## Manifest - -* `look_and_say.dat`: first entries of the "Look and Say" integer series, sequence [A005150](https://oeis.org/A005150) diff --git a/despasito/data/look_and_say.dat b/despasito/data/look_and_say.dat deleted file mode 100644 index 97df452..0000000 --- a/despasito/data/look_and_say.dat +++ /dev/null @@ -1,15 +0,0 @@ -1 -11 -21 -1211 -111221 -312211 -13112221 -1113213211 -31131211131221 -13211311123113112211 -11131221133112132113212221 -3113112221232112111312211312113211 -1321132132111213122112311311222113111221131221 -11131221131211131231121113112221121321132132211331222113112211 -311311222113111231131112132112311321322112111312211312111322212311322113212221 \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 03e0499..d3bd0e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=61.0", "versioningit~=2.0", "cython", "numpy"] +requires = ["setuptools>=61.0", "numpy >= 1.15", "versioningit~=2.0", "wheel", "Cython>=0.29"] build-backend = "setuptools.build_meta" [project] @@ -71,7 +71,7 @@ match = ["*"] default-tag = "1.0.0" [tool.versioningit.write] -file = "src/despasito/_version.py" +file = "despasito/_version.py" [tool.black] line-length = 120 diff --git a/setup.py b/setup.py index c809a99..d7fba8d 100644 --- a/setup.py +++ b/setup.py @@ -4,39 +4,35 @@ """ import os -from setuptools import Extension, setup import glob +from setuptools import Extension, setup + + import numpy as np +from Cython.Build import cythonize -short_description = __doc__.split("\n") fpath = os.path.join("despasito", "equations_of_state", "saft", "compiled_modules") extensions = [] -try: - from Cython.Build import cythonize - flag_cython = True -except Exception: - print( - 'Cython not available on your system. Dependencies will be run with numba.' - ) - flag_cython = False - -if flag_cython: - cython_list = glob.glob(os.path.join(fpath, "*.pyx")) - for cyext in cython_list: - name = os.path.split(cyext)[-1].split(".")[-2] - cy_ext_1 = Extension( - name=name, - sources=[cyext], - include_dirs=[fpath, np.get_include()], - ) - extensions.extend( - cythonize( - [cy_ext_1], - compiler_directives={ - 'language_level': 3, - 'cdivision': False, - "boundscheck": True - })) +cython_list = glob.glob(os.path.join(fpath, "*.pyx")) +for cyext in cython_list: + name = os.path.split(cyext)[-1].split(".")[-2] + cy_ext_1 = Extension( + name=os.path.join(fpath,name).replace(os.sep,"."), + sources=[cyext], + include_dirs=[fpath, np.get_include()], + ) + extensions.extend( + cythonize( + [cy_ext_1], + compiler_directives={ + 'language_level': 3, + 'cdivision': False, + "boundscheck": True + })) -setup(ext_modules=extensions) +if __name__ == "__main__": + setup( + name='despasito', + ext_modules=extensions, + ) diff --git a/src/despasito/_version.py b/src/despasito/_version.py deleted file mode 100644 index 6b61516..0000000 --- a/src/despasito/_version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.3.0+15.g6e200c7.dirty" From a15dad4ead1085b05bdadc7050b42567a08010b3 Mon Sep 17 00:00:00 2001 From: "Jennifer A. Clark" Date: Sun, 22 Dec 2024 18:04:35 -0500 Subject: [PATCH 4/8] Update ci --- .github/workflows/CI.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/CI.yaml b/.github/workflows/CI.yaml index 1088752..4895237 100644 --- a/.github/workflows/CI.yaml +++ b/.github/workflows/CI.yaml @@ -38,7 +38,7 @@ jobs: ulimit -a # More info on options: https://github.com/conda-incubator/setup-miniconda - - uses: conda-incubator/setup-miniconda@v4 + - uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} environment-file: devtools/conda-envs/test_env.yaml From 810e538763cf58990fd1158b2a82cb6f23c2229c Mon Sep 17 00:00:00 2001 From: "Jennifer A. Clark" Date: Sun, 22 Dec 2024 18:20:16 -0500 Subject: [PATCH 5/8] bug fix error, parameter_fitting --- despasito/parameter_fitting/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/despasito/parameter_fitting/__init__.py b/despasito/parameter_fitting/__init__.py index 6c3710c..8dd922b 100644 --- a/despasito/parameter_fitting/__init__.py +++ b/despasito/parameter_fitting/__init__.py @@ -138,7 +138,8 @@ def fit( ] else: raise ValueError( - f"'fit_parameter_names' must be a list not: {optimization_parameters["fit_parameter_names"]}") + f"'fit_parameter_names' must be a list not: {optimization_parameters['fit_parameter_names']}" + ) # Generate initial guess and bounds for parameters if none was given optimization_parameters = ff.consolidate_bounds(optimization_parameters).copy() From ed6cb0b4129a140395d8b081dab47bdff82913fd Mon Sep 17 00:00:00 2001 From: "Jennifer A. Clark" Date: Sun, 22 Dec 2024 18:36:26 -0500 Subject: [PATCH 6/8] import version --- despasito/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/despasito/__init__.py b/despasito/__init__.py index 8aac11f..a80bef8 100644 --- a/despasito/__init__.py +++ b/despasito/__init__.py @@ -7,9 +7,9 @@ import os import logging import logging.handlers -from importlib.metadata import version +from pkg_resources import get_distribution -__version__ = version("despasito") +__version__ = get_distribution("despasito")._version logger = logging.getLogger("despasito") logger.setLevel(30) From e5ffa36ef51ad60f14ebb107e1cd93fa7ecda641 Mon Sep 17 00:00:00 2001 From: "Jennifer A. Clark" Date: Sun, 22 Dec 2024 18:36:26 -0500 Subject: [PATCH 7/8] import version --- despasito/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/despasito/__init__.py b/despasito/__init__.py index a80bef8..0d33f82 100644 --- a/despasito/__init__.py +++ b/despasito/__init__.py @@ -7,9 +7,7 @@ import os import logging import logging.handlers -from pkg_resources import get_distribution - -__version__ = get_distribution("despasito")._version +from ._version import __version__ as __version__ logger = logging.getLogger("despasito") logger.setLevel(30) From 742c602315e3bcd274786c7e4f29e7d7ec0e227a Mon Sep 17 00:00:00 2001 From: "Jennifer A. Clark" Date: Sun, 22 Dec 2024 19:06:53 -0500 Subject: [PATCH 8/8] black --- .github/workflows/CI.yaml | 2 +- despasito/__init__.py | 9 +- despasito/__main__.py | 8 +- despasito/_version.py | 2 +- despasito/equations_of_state/__init__.py | 28 +- .../combining_rule_types.py | 34 +- .../equations_of_state/cubic/peng_robinson.py | 60 +- despasito/equations_of_state/eos_toolbox.py | 38 +- despasito/equations_of_state/interface.py | 54 +- despasito/equations_of_state/saft/Aassoc.py | 96 +- despasito/equations_of_state/saft/Aideal.py | 4 +- .../saft/compiled_modules/ext_Aassoc_numba.py | 20 +- .../compiled_modules/ext_Aassoc_python.py | 14 +- .../compiled_modules/ext_gamma_mie_numba.py | 142 +-- .../compiled_modules/ext_gamma_mie_python.py | 134 +-- .../equations_of_state/saft/gamma_mie.py | 232 ++--- despasito/equations_of_state/saft/gamma_sw.py | 135 +-- despasito/equations_of_state/saft/saft.py | 83 +- .../equations_of_state/saft/saft_toolbox.py | 39 +- .../saft/saft_variant_example.py | 44 +- .../butane_solubility/fit_grid.py | 25 +- despasito/input_output/read_input.py | 26 +- despasito/input_output/write_output.py | 7 +- despasito/main.py | 27 +- despasito/parameter_fitting/__init__.py | 23 +- .../parameter_fitting/data_classes/TLVE.py | 44 +- .../parameter_fitting/data_classes/flash.py | 31 +- .../data_classes/liquid_density.py | 20 +- .../data_classes/saturation_properties.py | 41 +- .../data_classes/solubility_parameter.py | 31 +- despasito/parameter_fitting/fit_functions.py | 90 +- despasito/parameter_fitting/global_methods.py | 132 +-- despasito/parameter_fitting/interface.py | 11 +- despasito/tests/test_fit_pure.py | 10 +- despasito/tests/test_peng_robinson.py | 6 +- despasito/tests/test_saft_gamma_mie.py | 18 +- despasito/tests/test_saft_gamma_sw.py | 8 +- despasito/tests/test_thermo.py | 50 +- despasito/thermodynamics/calc.py | 888 ++++-------------- despasito/thermodynamics/calculation_types.py | 178 +--- despasito/utils/general_toolbox.py | 94 +- despasito/utils/parallelization.py | 8 +- setup.cfg | 16 +- 43 files changed, 715 insertions(+), 2247 deletions(-) diff --git a/.github/workflows/CI.yaml b/.github/workflows/CI.yaml index 4895237..a4a1163 100644 --- a/.github/workflows/CI.yaml +++ b/.github/workflows/CI.yaml @@ -80,7 +80,7 @@ jobs: - name: Flake8 shell: bash -l {0} run: | - python -m flake8 despasito --count --ignore=E741,W503 --max-line-length=92 --show-source --statistics + python -m flake8 despasito --count --ignore=E741,W503 --max-line-length=120 --per-file-ignores="__init__.py:F401" --show-source --statistics paper: runs-on: ubuntu-latest diff --git a/despasito/__init__.py b/despasito/__init__.py index 0d33f82..a2a9635 100644 --- a/despasito/__init__.py +++ b/despasito/__init__.py @@ -53,9 +53,7 @@ def initiate_logger(console=None, log_file=None, verbose=30): # Set up logging to console if console and handler_console is None: console_handler = logging.StreamHandler() # sys.stderr - console_handler.setFormatter( - logging.Formatter("[%(levelname)s](%(name)s): %(message)s") - ) + console_handler.setFormatter(logging.Formatter("[%(levelname)s](%(name)s): %(message)s")) console_handler.setLevel(verbose) logger.addHandler(console_handler) elif console: @@ -75,10 +73,7 @@ def initiate_logger(console=None, log_file=None, verbose=30): log_file_handler = logging.handlers.RotatingFileHandler(log_file) log_file_handler.setFormatter( - logging.Formatter( - "%(asctime)s [%(levelname)s](%(name)s:%(funcName)s:%(lineno)d): " - + "%(message)s" - ) + logging.Formatter("%(asctime)s [%(levelname)s](%(name)s:%(funcName)s:%(lineno)d): " + "%(message)s") ) log_file_handler.setLevel(verbose) logger.addHandler(log_file_handler) diff --git a/despasito/__main__.py b/despasito/__main__.py index a984a0f..f90d937 100644 --- a/despasito/__main__.py +++ b/despasito/__main__.py @@ -27,9 +27,7 @@ os.remove(args.logFile) log_file_handler = logging.handlers.RotatingFileHandler(args.logFile) log_file_handler.setFormatter( - logging.Formatter( - "%(asctime)s [%(levelname)s](%(name)s:%(funcName)s:%(lineno)d): %(message)s" - ) + logging.Formatter("%(asctime)s [%(levelname)s](%(name)s:%(funcName)s:%(lineno)d): %(message)s") ) log_file_handler.setLevel(args.verbose) logger.addHandler(log_file_handler) @@ -37,9 +35,7 @@ if args.console: # Set up logging to console console_handler = logging.StreamHandler() # sys.stderr - console_handler.setFormatter( - logging.Formatter("[%(levelname)s](%(name)s): %(message)s") - ) + console_handler.setFormatter(logging.Formatter("[%(levelname)s](%(name)s): %(message)s")) console_handler.setLevel(args.verbose) logger.addHandler(console_handler) diff --git a/despasito/_version.py b/despasito/_version.py index 593a34f..0180397 100644 --- a/despasito/_version.py +++ b/despasito/_version.py @@ -1 +1 @@ -__version__ = "0.3.0+16.gcc3fbe0.dirty" +__version__ = "0.3.0+28.ge5ffa36.dirty" diff --git a/despasito/equations_of_state/__init__.py b/despasito/equations_of_state/__init__.py index e76043d..4d715c3 100644 --- a/despasito/equations_of_state/__init__.py +++ b/despasito/equations_of_state/__init__.py @@ -25,9 +25,7 @@ def __init__(self, numba=True, cython=False, python=False): def __str__(self): - string = "Compilation: numba {}, cython {}, python {}".format( - self.numba, self.cython, self.python - ) + string = "Compilation: numba {}, cython {}, python {}".format(self.numba, self.cython, self.python) return string @@ -35,9 +33,7 @@ def __str__(self): logger = logging.getLogger(__name__) -def initiate_eos( - eos="saft.gamma_mie", numba=True, cython=False, python=False, **kwargs -): +def initiate_eos(eos="saft.gamma_mie", numba=True, cython=False, python=False, **kwargs): """ Interface between the user and our library of equations of state (EOS). @@ -69,9 +65,7 @@ def initiate_eos( kwargs["method_stat"] = method_stat(numba=numba, cython=cython, python=python) - factory_families = [ - "saft" - ] # Eos families in this list have a general object with a factory to import + factory_families = ["saft"] # Eos families in this list have a general object with a factory to import # relevant modules logger.info("Using EOS: {}".format(eos)) @@ -79,29 +73,21 @@ def initiate_eos( try: eos_fam, eos_type = eos.split(".") except Exception: - raise ValueError( - "Input should be in the form EOSfamily.EOSname (e.g. saft.gamme_mie)." - ) + raise ValueError("Input should be in the form EOSfamily.EOSname (e.g. saft.gamme_mie).") class_name = "EosType" try: if eos_fam in factory_families: - eos_module = import_module( - "." + eos_fam, package="despasito.equations_of_state." + eos_fam - ) + eos_module = import_module("." + eos_fam, package="despasito.equations_of_state." + eos_fam) kwargs["saft_name"] = eos_type else: - eos_module = import_module( - "." + eos_type, package="despasito.equations_of_state." + eos_fam - ) + eos_module = import_module("." + eos_type, package="despasito.equations_of_state." + eos_fam) eos_class = getattr(eos_module, class_name) except AttributeError: raise ImportError( "Based on your input, '{}', we expect the class, {}, in a module, {}," - " found in the package, {}, which indicates the EOS family.".format( - eos, class_name, eos_type, eos_fam - ) + " found in the package, {}, which indicates the EOS family.".format(eos, class_name, eos_type, eos_fam) ) instance = eos_class(**kwargs) diff --git a/despasito/equations_of_state/combining_rule_types.py b/despasito/equations_of_state/combining_rule_types.py index e7955b0..e21f991 100644 --- a/despasito/equations_of_state/combining_rule_types.py +++ b/despasito/equations_of_state/combining_rule_types.py @@ -82,11 +82,7 @@ def volumetric_geometric_mean(beadA, beadB, parameter, weighting_parameters=[]): tmp1 = np.sqrt(beadA[parameter] * beadB[parameter]) param2 = weighting_parameters[0] - tmp2 = ( - np.sqrt((beadA[param2] ** 3) * (beadB[param2] ** 3)) - * 8 - / ((beadA[param2] + beadB[param2]) ** 3) - ) + tmp2 = np.sqrt((beadA[param2] ** 3) * (beadB[param2] ** 3)) * 8 / ((beadA[param2] + beadB[param2]) ** 3) return tmp1 * tmp2 @@ -115,9 +111,9 @@ def weighted_mean(beadA, beadB, parameter, weighting_parameters=[]): """ param2 = weighting_parameters[0] - parameter12 = ( - beadA[parameter] * beadA[param2] + beadB[parameter] * beadB[param2] - ) / (beadA[param2] + beadB[param2]) + parameter12 = (beadA[parameter] * beadA[param2] + beadB[parameter] * beadB[param2]) / ( + beadA[param2] + beadB[param2] + ) return parameter12 @@ -173,23 +169,15 @@ def square_well_berthelot(beadA, beadB, parameter, weighting_parameters=[]): param2, param3 = weighting_parameters[0], weighting_parameters[1] tmp1 = np.sqrt(beadA[parameter] * beadB[parameter]) - tmp2 = ( - np.sqrt((beadA[param2] ** 3) * (beadB[param2] ** 3)) - * 8 - / ((beadA[param2] + beadB[param2]) ** 3) - ) + tmp2 = np.sqrt((beadA[param2] ** 3) * (beadB[param2] ** 3)) * 8 / ((beadA[param2] + beadB[param2]) ** 3) param3_12 = weighted_mean(beadA, beadB, param3, weighting_parameters=[param2]) - tmp3 = np.sqrt((beadA[param3] ** 3 - 1) * (beadB[param3] ** 3 - 1)) / ( - param3_12**3 - 1 - ) + tmp3 = np.sqrt((beadA[param3] ** 3 - 1) * (beadB[param3] ** 3 - 1)) / (param3_12**3 - 1) return tmp1 * tmp2 * tmp3 -def multipole( - beadA, beadB, parameter, temperature=None, mode="curve fit", scaled=False -): +def multipole(beadA, beadB, parameter, temperature=None, mode="curve fit", scaled=False): r""" Calculates cross interaction parameter with the multipole combining rules from the plug-in `MAPSCI `_. @@ -239,17 +227,13 @@ def multipole( tmp[key]["sigma"] = value["sigma"] * 10 # convert from nm to angstroms if mode == "curve fit": - dict_cross, _ = mr.extended_combining_rules_fitting( - tmp, temperature, shape_factor_scale=shape_factor_scale - ) + dict_cross, _ = mr.extended_combining_rules_fitting(tmp, temperature, shape_factor_scale=shape_factor_scale) elif mode == "analytical": dict_cross, _ = mr.extended_combining_rules_analytical( tmp, temperature, shape_factor_scale=shape_factor_scale ) else: - raise ValueError( - "Multipole mixing rule must be either 'curve fit' or 'analytical'." - ) + raise ValueError("Multipole mixing rule must be either 'curve fit' or 'analytical'.") output = dict_cross["beadA"]["beadB"] else: logger.warning("Temperature is None, using geometric mean.") diff --git a/despasito/equations_of_state/cubic/peng_robinson.py b/despasito/equations_of_state/cubic/peng_robinson.py index 70345ca..23371e5 100644 --- a/despasito/equations_of_state/cubic/peng_robinson.py +++ b/despasito/equations_of_state/cubic/peng_robinson.py @@ -94,25 +94,14 @@ def __init__(self, **kwargs): self._test_critical = [False for _ in self.beads] self._test_parameters = [False for _ in self.beads] for i, bead in enumerate(self.beads): - if ( - "omega" in self.bead_library[bead] - and "kappa" not in self.bead_library[bead] - ): + if "omega" in self.bead_library[bead] and "kappa" not in self.bead_library[bead]: self._test_kappa[i] = True - self._test_critical[i] = ( - "Tc" in self.bead_library[bead] and "Pc" in self.bead_library[bead] - ) - self._test_parameters[i] = ( - "ai" in self.bead_library[bead] and "bi" in self.bead_library[bead] - ) + self._test_critical[i] = "Tc" in self.bead_library[bead] and "Pc" in self.bead_library[bead] + self._test_parameters[i] = "ai" in self.bead_library[bead] and "bi" in self.bead_library[bead] if not self._test_critical[i] and not self._test_parameters[i]: - raise ValueError( - "Either 'Tc' or 'Pc' was not provided for component: {}".format( - bead - ) - ) + raise ValueError("Either 'Tc' or 'Pc' was not provided for component: {}".format(bead)) # Cross interaction parameters if "cross_library" in kwargs: @@ -155,9 +144,7 @@ def _calc_temp_dependent_parameters(self, T): for i, bead in enumerate(self.beads): if "kappa" in self.bead_library[bead]: self.eos_dict["alpha"][i] = ( - 1 - + self.bead_library[bead]["kappa"] - * (1 - np.sqrt(T / self.bead_library[bead]["Tc"])) + 1 + self.bead_library[bead]["kappa"] * (1 - np.sqrt(T / self.bead_library[bead]["Tc"])) ) ** 2 else: self.eos_dict["alpha"][i] = 1.0 @@ -235,11 +222,8 @@ def pressure(self, rho, T, xi): elif not isinstance(rho, np.ndarray): rho = np.array(rho) - P = constants.R * self.T * rho / ( - 1 - self.eos_dict["bij"] * rho - ) - rho**2 * self.eos_dict["aij"] / ( - (1 + self.eos_dict["bij"] * rho) - + rho * self.eos_dict["bij"] * (1 - self.eos_dict["bij"] * rho) + P = constants.R * self.T * rho / (1 - self.eos_dict["bij"] * rho) - rho**2 * self.eos_dict["aij"] / ( + (1 + self.eos_dict["bij"] * rho) + rho * self.eos_dict["bij"] * (1 - self.eos_dict["bij"] * rho) ) return P @@ -296,11 +280,7 @@ def fugacity_coefficient(self, P, rho, xi, T): A = self.eos_dict["aij"] * P / tmp_RT**2 sqrt2 = np.sqrt(2.0) - tmp1 = ( - A - / (2.0 * sqrt2 * B) - * np.log((Z + (1 + sqrt2) * B) / (Z + (1 - sqrt2) * B)) - ) + tmp1 = A / (2.0 * sqrt2 * B) * np.log((Z + (1 + sqrt2) * B) / (Z + (1 - sqrt2) * B)) tmp3 = Bi * (Z - 1) / B - np.log(Z - B) tmp2 = np.zeros(len(xi)) @@ -363,15 +343,8 @@ def update_parameter(self, param_name, bead_names, param_value): Value of parameter """ - if ( - param_name in ["ai", "bi"] - and self._test_critical[self.beads.index(bead_names[0])] - ): - raise ValueError( - "Bead, {}, initialized with critical properties, not ai and bi".format( - bead_names[0] - ) - ) + if param_name in ["ai", "bi"] and self._test_critical[self.beads.index(bead_names[0])]: + raise ValueError("Bead, {}, initialized with critical properties, not ai and bi".format(bead_names[0])) super().update_parameter(param_name, bead_names, param_value) def parameter_refresh(self): @@ -384,10 +357,7 @@ def parameter_refresh(self): """ for i, bead in enumerate(self.beads): - if ( - "omega" in self.bead_library[bead] - and "kappa" not in self.bead_library[bead] - ): + if "omega" in self.bead_library[bead] and "kappa" not in self.bead_library[bead]: self.bead_library[bead]["kappa"] = ( 0.37464 + 1.54226 * self.bead_library[bead]["omega"] @@ -396,14 +366,10 @@ def parameter_refresh(self): if self._test_critical[i] and not self._test_parameters[i]: self.bead_library[bead]["ai"] = ( - 0.45723553 - * (constants.R * self.bead_library[bead]["Tc"]) ** 2 - / self.bead_library[bead]["Pc"] + 0.45723553 * (constants.R * self.bead_library[bead]["Tc"]) ** 2 / self.bead_library[bead]["Pc"] ) self.bead_library[bead]["bi"] = 0.07779607 * ( - constants.R - * self.bead_library[bead]["Tc"] - / self.bead_library[bead]["Pc"] + constants.R * self.bead_library[bead]["Tc"] / self.bead_library[bead]["Pc"] ) parameters = ["ai", "bi"] diff --git a/despasito/equations_of_state/eos_toolbox.py b/despasito/equations_of_state/eos_toolbox.py index 1e85523..5d83ed2 100644 --- a/despasito/equations_of_state/eos_toolbox.py +++ b/despasito/equations_of_state/eos_toolbox.py @@ -40,9 +40,7 @@ def remove_insignificant_components(xi_old, massi_old): return xi_new, massi_new -def partial_density_central_difference( - xi, rho, T, func, step_size=1e-2, log_method=False -): +def partial_density_central_difference(xi, rho, T, func, step_size=1e-2, log_method=False): """ Take the derivative of a dependent variable calculated with a given function using the central difference method. @@ -164,9 +162,7 @@ def calc_massi(molecular_composition, bead_library, beads): if "mass" in bead_library[bead]: massi[i] += molecular_composition[i, k] * bead_library[bead]["mass"] else: - raise ValueError( - "The mass for bead, {}, was not provided.".format(bead) - ) + raise ValueError("The mass for bead, {}, was not provided.".format(bead)) return massi @@ -205,9 +201,7 @@ def extract_property(prop, bead_library, beads, default=None): prop_array[i] = bead_library[bead][prop] else: if default is None: - raise ValueError( - "The property {} for bead, {}, was not provided.".format(prop, bead) - ) + raise ValueError("The property {} for bead, {}, was not provided.".format(prop, bead)) else: prop_array[i] = default @@ -249,8 +243,7 @@ def check_bead_parameters(bead_library0, parameter_defaults): ) else: raise ValueError( - "Parameter, {}, should have been defined for parametrized " - "group, {}.".format(parameter, bead) + "Parameter, {}, should have been defined for parametrized " "group, {}.".format(parameter, bead) ) return bead_library @@ -299,27 +292,14 @@ def cross_interaction_from_dict(beads, bead_library, combining_dict, cross_libra for j, beadname2 in enumerate(beads): if j > i: for key in combining_dict: - if ( - cross_library.get(beadname, {}) - .get(beadname2, {}) - .get(key, None) - is not None - ): + if cross_library.get(beadname, {}).get(beadname2, {}).get(key, None) is not None: output[key][i, j] = cross_library[beadname][beadname2][key] - elif ( - cross_library.get(beadname2, {}) - .get(beadname, {}) - .get(key, None) - is not None - ): + elif cross_library.get(beadname2, {}).get(beadname, {}).get(key, None) is not None: output[key][i, j] = cross_library[beadname2][beadname][key] else: try: tmp = combining_rules( - bead_library[beadname], - bead_library[beadname2], - key, - **combining_dict[key] + bead_library[beadname], bead_library[beadname2], key, **combining_dict[key] ) except Exception: raise ValueError( @@ -369,9 +349,7 @@ def construct_dummy_bead_library(input_dict, keys=None): keys = [str(x) for x in range(len(input_dict[parameter]))] flag = True if len(keys) != len(input_dict[parameter]): - raise ValueError( - "Number of keys is not equal to the number of quantities given" - ) + raise ValueError("Number of keys is not equal to the number of quantities given") for i, bead in enumerate(keys): if bead not in output: diff --git a/despasito/equations_of_state/interface.py b/despasito/equations_of_state/interface.py index 551e834..2a1396c 100644 --- a/despasito/equations_of_state/interface.py +++ b/despasito/equations_of_state/interface.py @@ -65,9 +65,7 @@ def __init__(self, beads, bead_library, **kwargs): self.number_of_components = None for bead in beads: if bead not in bead_library: - raise ValueError( - "The group, '{}', was not found in parameter library".format(bead) - ) + raise ValueError("The group, '{}', was not found in parameter library".format(bead)) self.beads = None self.bead_library = None @@ -171,8 +169,7 @@ def guess_parameters(self, param_name, bead_names): if len(bead_names) > 2: raise ValueError( - "The bead names {} were given, but only a maximum of 2 are " - "permitted.".format(", ".join(bead_names)) + "The bead names {} were given, but only a maximum of 2 are " "permitted.".format(", ".join(bead_names)) ) if not set(bead_names).issubset(self.beads): raise ValueError( @@ -183,29 +180,16 @@ def guess_parameters(self, param_name, bead_names): param_value = None # Self interaction parameter if len(bead_names) == 1: - if ( - bead_names[0] in self.bead_library - and param_name in self.bead_library[bead_names[0]] - ): + if bead_names[0] in self.bead_library and param_name in self.bead_library[bead_names[0]]: param_value = self.bead_library[bead_names[0]][param_name] # Cross interaction parameter elif len(bead_names) == 2: - if ( - bead_names[1] in self.cross_library - and bead_names[0] in self.cross_library[bead_names[1]] - ): + if bead_names[1] in self.cross_library and bead_names[0] in self.cross_library[bead_names[1]]: if param_name in self.cross_library[bead_names[1]][bead_names[0]]: - param_value = self.cross_library[bead_names[1]][bead_names[0]][ - param_name - ] - elif ( - bead_names[0] in self.cross_library - and bead_names[1] in self.cross_library[bead_names[0]] - ): + param_value = self.cross_library[bead_names[1]][bead_names[0]][param_name] + elif bead_names[0] in self.cross_library and bead_names[1] in self.cross_library[bead_names[0]]: if param_name in self.cross_library[bead_names[0]][bead_names[1]]: - param_value = self.cross_library[bead_names[0]][bead_names[1]][ - param_name - ] + param_value = self.cross_library[bead_names[0]][bead_names[1]][param_name] if param_value is None: bounds = self.check_bounds(bead_names[0], param_name, np.empty(2)) @@ -331,8 +315,7 @@ def update_parameter(self, param_name, bead_names, param_value): if len(bead_names) > 2: raise ValueError( - "The bead names {} were given, but only a maximum of 2 are " - "permitted.".format(", ".join(bead_names)) + "The bead names {} were given, but only a maximum of 2 are " "permitted.".format(", ".join(bead_names)) ) if not set(bead_names).issubset(self.beads): raise ValueError( @@ -354,23 +337,12 @@ def update_parameter(self, param_name, bead_names, param_value): self.bead_library[bead_names[0]] = {param_name: param_value} # Cross interaction parameter elif len(bead_names) == 2: - if ( - bead_names[1] in self.cross_library - and bead_names[0] in self.cross_library[bead_names[1]] - ): - self.cross_library[bead_names[1]][bead_names[0]][ - param_name - ] = param_value + if bead_names[1] in self.cross_library and bead_names[0] in self.cross_library[bead_names[1]]: + self.cross_library[bead_names[1]][bead_names[0]][param_name] = param_value elif bead_names[0] in self.cross_library: if bead_names[1] in self.cross_library[bead_names[0]]: - self.cross_library[bead_names[0]][bead_names[1]][ - param_name - ] = param_value + self.cross_library[bead_names[0]][bead_names[1]][param_name] = param_value else: - self.cross_library[bead_names[0]][bead_names[1]] = { - param_name: param_value - } + self.cross_library[bead_names[0]][bead_names[1]] = {param_name: param_value} else: - self.cross_library[bead_names[0]] = { - bead_names[1]: {param_name: param_value} - } + self.cross_library[bead_names[0]] = {bead_names[1]: {param_name: param_value}} diff --git a/despasito/equations_of_state/saft/Aassoc.py b/despasito/equations_of_state/saft/Aassoc.py index d61ea96..d6a937f 100644 --- a/despasito/equations_of_state/saft/Aassoc.py +++ b/despasito/equations_of_state/saft/Aassoc.py @@ -148,10 +148,7 @@ def initiate_assoc_matrices(beads, bead_library, molecular_composition): if key.startswith("Nk"): tmp = key.split("-") if len(tmp) < 2: - raise ValueError( - "Association site names should be defined with hyphens " - + "(e.g. Nk-H)" - ) + raise ValueError("Association site names should be defined with hyphens " + "(e.g. Nk-H)") else: _, site = tmp @@ -163,11 +160,7 @@ def initiate_assoc_matrices(beads, bead_library, molecular_composition): else: ind = sitenames.index(site) nk[i][ind] = value - logger.debug( - "Bead {} has {} of the association site {}".format( - bead, value, site - ) - ) + logger.debug("Bead {} has {} of the association site {}".format(bead, value, site)) indices = assoc_site_indices(nk, molecular_composition) if indices.size == 0: @@ -176,9 +169,7 @@ def initiate_assoc_matrices(beads, bead_library, molecular_composition): flag_assoc = True if flag_assoc: - logger.info( - "The following association sites have been identified: {}".format(sitenames) - ) + logger.info("The following association sites have been identified: {}".format(sitenames)) else: logger.info("No association sites are used in this system.") @@ -262,9 +253,7 @@ def calc_assoc_matrices( nbeads = len(beads) if np.any(sitenames is None) or np.any(nk is None): - sitenames, nk, _ = initiate_assoc_matrices( - bead_library, beads, molecular_composition - ) + sitenames, nk, _ = initiate_assoc_matrices(bead_library, beads, molecular_composition) else: nsitesmax = len(sitenames) epsilonHB = np.zeros((nbeads, nbeads, nsitesmax, nsitesmax)) @@ -300,22 +289,21 @@ def calc_assoc_matrices( rd_tmp = "-".join(["rd", site2, site1]) if epsilon_tmp in bead_library[bead1] and ( - K_tmp not in bead_library[bead1] - and rc_tmp not in bead_library[bead1] + K_tmp not in bead_library[bead1] and rc_tmp not in bead_library[bead1] ): raise ValueError( "An association site energy parameter for {}-{}".format( - site1, site2, - ) - + " was given for bead {}, but not the bonding".format( - bead1 + site1, + site2, ) + + " was given for bead {}, but not the bonding".format(bead1) + " information. Either K-{}-{}/K-{}-{} or".format( - site1, site2, site2, site1, - ) - + " rc-{}-{}/rc-{}-{} must be given.".format( - site1, site2, site2, site1 + site1, + site2, + site2, + site1, ) + + " rc-{}-{}/rc-{}-{} must be given.".format(site1, site2, site2, site1) ) elif K_tmp in bead_library[bead1] and rc_tmp in bead_library[bead1]: raise ValueError( @@ -327,18 +315,14 @@ def calc_assoc_matrices( K_tmp in bead_library[bead1] or rc_tmp in bead_library[bead1] ): raise ValueError( - "An association site bonding information for {}".format( - "{}-{}".format(site1, site2) - ) + "An association site bonding information for {}".format("{}-{}".format(site1, site2)) + " was given for bead {}, but not the energy".format(bead1) + " parameter. epsilonHB must be given." ) if epsilon_tmp in bead_library[bead1]: if a == b: - epsilonHB[i, i, a, b] = -1 * np.abs( - bead_library[bead1][epsilon_tmp] - ) + epsilonHB[i, i, a, b] = -1 * np.abs(bead_library[bead1][epsilon_tmp]) else: epsilonHB[i, i, a, b] = bead_library[bead1][epsilon_tmp] epsilonHB[i, i, b, a] = epsilonHB[i, i, a, b] @@ -446,45 +430,30 @@ def calc_assoc_matrices( rd_klab[j, i, b, a] = rd_klab[i, j, a, b] elif nk[j][b] != 0: - sitea = epsilon_tmp = "-".join( - ["epsilonHB", sitenames[a], sitenames[a]] - ) - siteb = epsilon_tmp = "-".join( - ["epsilonHB", sitenames[b], sitenames[b]] - ) + sitea = epsilon_tmp = "-".join(["epsilonHB", sitenames[a], sitenames[a]]) + siteb = epsilon_tmp = "-".join(["epsilonHB", sitenames[b], sitenames[b]]) if ( epsilonHB[j, i, b, a] == 0.0 and sitea in bead_library[beads[i]] and siteb in bead_library[beads[j]] ): - epsilonHB[i, j, a, b] = np.sqrt( - epsilonHB[i, i, a, a] * epsilonHB[j, j, b, b] - ) + epsilonHB[i, j, a, b] = np.sqrt(epsilonHB[i, i, a, a] * epsilonHB[j, j, b, b]) epsilonHB[i, j, a, b] *= -1 * np.sign( - bead_library[beads[i]][sitea] - * bead_library[beads[j]][siteb] + bead_library[beads[i]][sitea] * bead_library[beads[j]][siteb] ) epsilonHB[j, i, b, a] = epsilonHB[i, j, a, b] if flag_Kklab and Kklab[i, j, a, b] == 0.0: Kklab[i, j, a, b] = ( - ( - (Kklab[i, i, a, a]) ** (1.0 / 3.0) - + (Kklab[j, j, b, b]) ** (1.0 / 3.0) - ) - / 2.0 + ((Kklab[i, i, a, a]) ** (1.0 / 3.0) + (Kklab[j, j, b, b]) ** (1.0 / 3.0)) / 2.0 ) ** 3 Kklab[j, i, b, a] = Kklab[i, j, a, b] if flag_rc_klab and rc_klab[i, j, a, b] == 0.0: - rc_klab[i, j, a, b] = ( - rc_klab[i, i, a, a] + rc_klab[j, j, b, b] - ) / 2 + rc_klab[i, j, a, b] = (rc_klab[i, i, a, a] + rc_klab[j, j, b, b]) / 2 rc_klab[j, i, b, a] = rc_klab[i, j, a, b] if flag_rd_klab and rd_klab[i, j, a, b] == 0.0: - rd_klab[i, j, a, b] = ( - rd_klab[i, i, a, a] + rd_klab[j, j, b, b] - ) / 2 + rd_klab[i, j, a, b] = (rd_klab[i, i, a, a] + rd_klab[j, j, b, b]) / 2 rd_klab[j, i, b, a] = rd_klab[i, j, a, b] output = {"epsilonHB": epsilonHB} @@ -497,13 +466,10 @@ def calc_assoc_matrices( if flag_Kklab and flag_rc_klab: raise ValueError( - "Both association site bonding volumes and cutoff distances were provided." - " This is redundant." + "Both association site bonding volumes and cutoff distances were provided." " This is redundant." ) if flag_rd_klab and not flag_rc_klab: - raise ValueError( - "Association site position were provided, but not cutoff distances." - ) + raise ValueError("Association site position were provided, but not cutoff distances.") return output @@ -551,14 +517,8 @@ def calc_bonding_volume(rc_klab, dij_bar, rd_klab=None, reduction_ratio=0.25): rd = rd_klab[k, l, a, b] tmp0 = np.pi * dij_bar[i, j] ** 2 / (18 * rd**2) - tmp11 = np.log( - (rc_klab[k, l, a, b] + 2 * rd) / dij_bar[i, j] - ) - tmp12 = ( - 6 * rc_klab[k, l, a, b] ** 3 - + 18 * rc_klab[k, l, a, b] ** 2 * rd - - 24 * rd**3 - ) + tmp11 = np.log((rc_klab[k, l, a, b] + 2 * rd) / dij_bar[i, j]) + tmp12 = 6 * rc_klab[k, l, a, b] ** 3 + 18 * rc_klab[k, l, a, b] ** 2 * rd - 24 * rd**3 tmp21 = rc_klab[k, l, a, b] + 2 * rd - dij_bar[i, j] tmp22 = ( 22 * rd**2 @@ -569,8 +529,6 @@ def calc_bonding_volume(rc_klab, dij_bar, rd_klab=None, reduction_ratio=0.25): + dij_bar[i, j] ** 2 ) - Kijklab[i, j, k, l, a, b] = tmp0 * ( - tmp11 * tmp12 + tmp21 * tmp22 - ) + Kijklab[i, j, k, l, a, b] = tmp0 * (tmp11 * tmp12 + tmp21 * tmp22) return Kijklab diff --git a/despasito/equations_of_state/saft/Aideal.py b/despasito/equations_of_state/saft/Aideal.py index d8383c1..e858046 100644 --- a/despasito/equations_of_state/saft/Aideal.py +++ b/despasito/equations_of_state/saft/Aideal.py @@ -49,9 +49,7 @@ def Aideal_contribution(rho, T, xi, massi, method="Abroglie"): if method in functions: function = functions[method] else: - raise ValueError( - "Method, {}, was not found to calculate Aideal.".format(method) - ) + raise ValueError("Method, {}, was not found to calculate Aideal.".format(method)) return function(rho, T, xi, massi) diff --git a/despasito/equations_of_state/saft/compiled_modules/ext_Aassoc_numba.py b/despasito/equations_of_state/saft/compiled_modules/ext_Aassoc_numba.py index c4d47b8..25f369f 100644 --- a/despasito/equations_of_state/saft/compiled_modules/ext_Aassoc_numba.py +++ b/despasito/equations_of_state/saft/compiled_modules/ext_Aassoc_numba.py @@ -56,13 +56,9 @@ def calc_Xika(indices, rho, xi, molecular_composition, nk, Fklab, Kklab, gr_asso rho, xi, molecular_composition, nk, Fklab, Kklab, gr_assoc = tmp_array if l_K == 4: - Xika_final, err_array = calc_Xika_4( - indices, rho, xi, molecular_composition, nk, Fklab, Kklab, gr_assoc - ) + Xika_final, err_array = calc_Xika_4(indices, rho, xi, molecular_composition, nk, Fklab, Kklab, gr_assoc) if l_K == 6: - Xika_final, err_array = calc_Xika_6( - indices, rho, xi, molecular_composition, nk, Fklab, Kklab, gr_assoc - ) + Xika_final, err_array = calc_Xika_6(indices, rho, xi, molecular_composition, nk, Fklab, Kklab, gr_assoc) return Xika_final, err_array @@ -164,9 +160,7 @@ def calc_Xika_4( break else: if obj / np.max(Xika_elements) > 1e3: - Xika_elements = Xika_elements + damp * ( - Xika_elements_new - Xika_elements - ) + Xika_elements = Xika_elements + damp * (Xika_elements_new - Xika_elements) else: Xika_elements = Xika_elements_new @@ -253,9 +247,7 @@ def calc_Xika_6( jnd = 0 for jjnd in range(l_ind): j, l, b = indices[jjnd] - delta = ( - Fklab[k, l, a, b] * Kklab[i, j, k, l, a, b] * gr_assoc[r, i, j] - ) + delta = Fklab[k, l, a, b] * Kklab[i, j, k, l, a, b] * gr_assoc[r, i, j] Xika_elements_new[ind] += ( constants.molecule_per_nm3 * rho[r] @@ -275,9 +267,7 @@ def calc_Xika_6( break else: if obj / np.max(Xika_elements) > 1e3: - Xika_elements = Xika_elements + damp * ( - Xika_elements_new - Xika_elements - ) + Xika_elements = Xika_elements + damp * (Xika_elements_new - Xika_elements) else: Xika_elements = Xika_elements_new diff --git a/despasito/equations_of_state/saft/compiled_modules/ext_Aassoc_python.py b/despasito/equations_of_state/saft/compiled_modules/ext_Aassoc_python.py index e808c05..f4a2207 100644 --- a/despasito/equations_of_state/saft/compiled_modules/ext_Aassoc_python.py +++ b/despasito/equations_of_state/saft/compiled_modules/ext_Aassoc_python.py @@ -83,15 +83,9 @@ def calc_Xika( for jnd in range(l_ind): j, l, b = indices[jnd] if l_K == 4: - delta = ( - Fklab[k, l, a, b] * Kklab[k, l, a, b] * gr_assoc[r, i, j] - ) + delta = Fklab[k, l, a, b] * Kklab[k, l, a, b] * gr_assoc[r, i, j] elif l_K == 6: - delta = ( - Fklab[k, l, a, b] - * Kklab[i, j, k, l, a, b] - * gr_assoc[r, i, j] - ) + delta = Fklab[k, l, a, b] * Kklab[i, j, k, l, a, b] * gr_assoc[r, i, j] Xika_elements_new[ind] += ( constants.molecule_per_nm3 @@ -110,9 +104,7 @@ def calc_Xika( break else: if obj / max(Xika_elements_old) > 1e3: - Xika_elements_old = Xika_elements_old + damp * ( - Xika_elements_new - Xika_elements_old - ) + Xika_elements_old = Xika_elements_old + damp * (Xika_elements_new - Xika_elements_old) else: Xika_elements_old = Xika_elements_new diff --git a/despasito/equations_of_state/saft/compiled_modules/ext_gamma_mie_numba.py b/despasito/equations_of_state/saft/compiled_modules/ext_gamma_mie_numba.py index 51ffda2..f1136ca 100644 --- a/despasito/equations_of_state/saft/compiled_modules/ext_gamma_mie_numba.py +++ b/despasito/equations_of_state/saft/compiled_modules/ext_gamma_mie_numba.py @@ -172,11 +172,9 @@ def calc_Bkl(rho, l_kl, Cmol2seg, dkl, epsilonkl, x0kl, zetax): # compute Ikl(l_kl), eq. 23 Ikl = (1.0 - (x0kl ** (3.0 - l_kl))) / (l_kl - 3.0) # compute Jkl(l_kl), eq. 24 - Jkl = ( - 1.0 - - ((x0kl ** (4.0 - l_kl)) * (l_kl - 3.0)) - + ((x0kl ** (3.0 - l_kl)) * (l_kl - 4.0)) - ) / ((l_kl - 3.0) * (l_kl - 4.0)) + Jkl = (1.0 - ((x0kl ** (4.0 - l_kl)) * (l_kl - 3.0)) + ((x0kl ** (3.0 - l_kl)) * (l_kl - 4.0))) / ( + (l_kl - 3.0) * (l_kl - 4.0) + ) tmp11 = rhos * (2.0 * np.pi) tmp12 = (dkl**3 * constants.molecule_per_nm3**2) * epsilonkl @@ -254,11 +252,7 @@ def calc_a1ii(rho, Cmol2seg, dkl, l_akl, l_rkl, x0kl, epsilonkl, zetax): ) -@numba.njit( - numba.f8[:, :]( - numba.f8[:], numba.f8, numba.f8[:], numba.f8[:], numba.f8[:], numba.f8[:] - ) -) +@numba.njit(numba.f8[:, :](numba.f8[:], numba.f8, numba.f8[:], numba.f8[:], numba.f8[:], numba.f8[:])) def calc_a1s_eff(rho, Cmol2seg, l_ii_avg, zetax, epsilonii_avg, dii_avg): r""" Return a1s,(rho*Cmol2seg,l_ij) in K @@ -319,10 +313,7 @@ def calc_a1s_eff(rho, Cmol2seg, l_ii_avg, zetax, epsilonii_avg, dii_avg): * 2.0 * np.pi * Cmol2seg - * ( - (epsilonii_avg * (dii_avg**3 * constants.molecule_per_nm3**2)) - / (l_ii_avg - 3.0) - ) + * ((epsilonii_avg * (dii_avg**3 * constants.molecule_per_nm3**2)) / (l_ii_avg - 3.0)) ) output = np.transpose(np.transpose(a1s) * rho) @@ -380,9 +371,7 @@ def calc_Bkl_eff(rho, l_ii_avg, Cmol2seg, dii_avg, epsilonii_avg, x0ii, zetax): Iii_avg = (1.0 - (x0ii ** (3.0 - l_ii_avg))) / (l_ii_avg - 3.0) # compute Jii_avg(l_ii_avg), eq. 24 Jii_avg = ( - 1.0 - - ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0)) - + ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0)) + 1.0 - ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0)) + ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0)) ) / ((l_ii_avg - 3.0) * (l_ii_avg - 4.0)) tmp11 = rhos * (2.0 * np.pi) @@ -398,11 +387,7 @@ def calc_Bkl_eff(rho, l_ii_avg, Cmol2seg, dii_avg, epsilonii_avg, x0ii, zetax): return Bii_avg -@numba.njit( - numba.f8[:, :]( - numba.f8[:], numba.f8, numba.f8[:], numba.f8[:], numba.f8[:], numba.f8[:] - ) -) +@numba.njit(numba.f8[:, :](numba.f8[:], numba.f8, numba.f8[:], numba.f8[:], numba.f8[:], numba.f8[:])) def calc_da1sii_drhos(rho, Cmol2seg, l_ii_avg, zetax, epsilonii_avg, dii_avg): r""" Return a1s,ii_avg(rho*Cmol2seg,l_ii_avg) in K as defined in eq. 25. @@ -459,30 +444,19 @@ def calc_da1sii_drhos(rho, Cmol2seg, l_ii_avg, zetax, epsilonii_avg, dii_avg): ), ) etaii_avg[:, k] = np.dot(zetax_pow, ciii_avg) - rhos_detaii_avg_drhos[:, k] = np.dot( - zetax_pow, ciii_avg * np.array([1.0, 2.0, 3.0, 4.0]) - ) + rhos_detaii_avg_drhos[:, k] = np.dot(zetax_pow, ciii_avg * np.array([1.0, 2.0, 3.0, 4.0])) - tmp1 = (1.0 - (etaii_avg / 2.0)) / ((1.0 - etaii_avg) ** 3) + ( - 5.0 - 2.0 * etaii_avg - ) / (2.0 * (1.0 - etaii_avg) ** 4) * rhos_detaii_avg_drhos - tmp2 = ( - -2.0 - * np.pi - * ( - (epsilonii_avg * (dii_avg**3 * constants.molecule_per_nm3**2)) - / (l_ii_avg - 3.0) - ) - ) + tmp1 = (1.0 - (etaii_avg / 2.0)) / ((1.0 - etaii_avg) ** 3) + (5.0 - 2.0 * etaii_avg) / ( + 2.0 * (1.0 - etaii_avg) ** 4 + ) * rhos_detaii_avg_drhos + tmp2 = -2.0 * np.pi * ((epsilonii_avg * (dii_avg**3 * constants.molecule_per_nm3**2)) / (l_ii_avg - 3.0)) da1s_drhos = tmp1 * tmp2 return da1s_drhos -@numba.njit( - numba.f8[:, :](numba.f8[:], numba.f8[:], numba.f8[:], numba.f8[:], numba.f8[:]) -) +@numba.njit(numba.f8[:, :](numba.f8[:], numba.f8[:], numba.f8[:], numba.f8[:], numba.f8[:])) def calc_dBkl_drhos(l_ii_avg, dii_avg, epsilonii_avg, x0ii, zetax): r""" Return derivative of Bkl(rho*Cmol2seg,l_ii_avg) with respect to :math:`\rho_S`. @@ -522,9 +496,7 @@ def calc_dBkl_drhos(l_ii_avg, dii_avg, epsilonii_avg, x0ii, zetax): Iii_avg = (1.0 - (x0ii ** (3.0 - l_ii_avg))) / (l_ii_avg - 3.0) # compute Jii_avg(l_ii_avg), eq. 24 Jii_avg = ( - 1.0 - - ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0)) - + ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0)) + 1.0 - ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0)) + ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0)) ) / ((l_ii_avg - 3.0) * (l_ii_avg - 4.0)) tmp = 2.0 * np.pi * dii_avg**3 * epsilonii_avg @@ -534,11 +506,8 @@ def calc_dBkl_drhos(l_ii_avg, dii_avg, epsilonii_avg, x0ii, zetax): tmp1[:, k] = ((1.0 - (zetax / 2.0)) / ((1.0 - zetax) ** 3) * Iii_avg[k]) - ( ((9.0 * zetax * (1.0 + zetax)) / (2.0 * ((1 - zetax) ** 3))) * Jii_avg[k] ) - tmp2[:, k] = ( - (5.0 - 2.0 * zetax) * zetax / (2 * (1.0 - zetax) ** 4) * Iii_avg[k] - ) - ( - ((9.0 * zetax * (zetax**2 + 4.0 * zetax + 1)) / (2.0 * ((1 - zetax) ** 4))) - * Jii_avg[k] + tmp2[:, k] = ((5.0 - 2.0 * zetax) * zetax / (2 * (1.0 - zetax) ** 4) * Iii_avg[k]) - ( + ((9.0 * zetax * (zetax**2 + 4.0 * zetax + 1)) / (2.0 * ((1 - zetax) ** 4))) * Jii_avg[k] ) dBkl_drhos = tmp * (tmp1 + tmp2) * constants.molecule_per_nm3**2 @@ -557,9 +526,7 @@ def calc_dBkl_drhos(l_ii_avg, dii_avg, epsilonii_avg, x0ii, zetax): numba.f8[:], ) ) -def calc_da1iidrhos( - rho, Cmol2seg, dii_eff, l_aii_avg, l_rii_avg, x0ii, epsilonii_avg, zetax -): +def calc_da1iidrhos(rho, Cmol2seg, dii_eff, l_aii_avg, l_rii_avg, x0ii, epsilonii_avg, zetax): r""" Compute derivative of the term, :math:`\bar{a}_{1,ii}` with respect to :math:`\rho_s` @@ -597,19 +564,14 @@ def calc_da1iidrhos( Cii = prefactor_1d(l_rii_avg, l_aii_avg) - das1_drhos_a = calc_da1sii_drhos( - rho, Cmol2seg, l_aii_avg, zetax, epsilonii_avg, dii_eff - ) - das1_drhos_r = calc_da1sii_drhos( - rho, Cmol2seg, l_rii_avg, zetax, epsilonii_avg, dii_eff - ) + das1_drhos_a = calc_da1sii_drhos(rho, Cmol2seg, l_aii_avg, zetax, epsilonii_avg, dii_eff) + das1_drhos_r = calc_da1sii_drhos(rho, Cmol2seg, l_rii_avg, zetax, epsilonii_avg, dii_eff) dB_drhos_a = calc_dBkl_drhos(l_aii_avg, dii_eff, epsilonii_avg, x0ii, zetax) dB_drhos_r = calc_dBkl_drhos(l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax) da1iidrhos = Cii * ( - ((x0ii**l_aii_avg) * (das1_drhos_a + dB_drhos_a)) - - ((x0ii**l_rii_avg) * (das1_drhos_r + dB_drhos_r)) + ((x0ii**l_aii_avg) * (das1_drhos_a + dB_drhos_a)) - ((x0ii**l_rii_avg) * (das1_drhos_r + dB_drhos_r)) ) return da1iidrhos @@ -627,9 +589,7 @@ def calc_da1iidrhos( numba.f8[:], ) ) -def calc_da2ii_1pchi_drhos( - rho, Cmol2seg, epsilonii_avg, dii_eff, x0ii, l_rii_avg, l_aii_avg, zetax -): +def calc_da2ii_1pchi_drhos(rho, Cmol2seg, epsilonii_avg, dii_eff, x0ii, l_rii_avg, l_aii_avg, zetax): r""" Compute derivative of the term, :math:`\frac{\bar{a}_{2,ii}}{1+\bar{\chi}_{ii}}` with respect to :math:`\rho_s`. @@ -668,63 +628,35 @@ def calc_da2ii_1pchi_drhos( """ # Calculate terms and derivatives used in derivative chain rule - KHS = ((1.0 - zetax) ** 4) / ( - 1.0 + (4.0 * zetax) + (4.0 * (zetax**2)) - (4.0 * (zetax**3)) + (zetax**4) - ) + KHS = ((1.0 - zetax) ** 4) / (1.0 + (4.0 * zetax) + (4.0 * (zetax**2)) - (4.0 * (zetax**3)) + (zetax**4)) dKHS_drhos = ( (4.0 * (zetax**2 - 5.0 * zetax - 2.0) * (1.0 - zetax) ** 3) / (zetax**4 - 4.0 * zetax**3 + 4.0 * zetax**2 + 4.0 * zetax + 1.0) ** 2 * (zetax / (rho * Cmol2seg)) ) - a1sii_2l_aii_avg = calc_a1s_eff( - rho, Cmol2seg, 2.0 * l_aii_avg, zetax, epsilonii_avg, dii_eff - ) - a1sii_2l_rii_avg = calc_a1s_eff( - rho, Cmol2seg, 2.0 * l_rii_avg, zetax, epsilonii_avg, dii_eff - ) - a1sii_l_rii_avgl_aii_avg = calc_a1s_eff( - rho, Cmol2seg, l_aii_avg + l_rii_avg, zetax, epsilonii_avg, dii_eff - ) + a1sii_2l_aii_avg = calc_a1s_eff(rho, Cmol2seg, 2.0 * l_aii_avg, zetax, epsilonii_avg, dii_eff) + a1sii_2l_rii_avg = calc_a1s_eff(rho, Cmol2seg, 2.0 * l_rii_avg, zetax, epsilonii_avg, dii_eff) + a1sii_l_rii_avgl_aii_avg = calc_a1s_eff(rho, Cmol2seg, l_aii_avg + l_rii_avg, zetax, epsilonii_avg, dii_eff) - Bii_2l_aii_avg = calc_Bkl_eff( - rho, 2.0 * l_aii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax - ) - Bii_2l_rii_avg = calc_Bkl_eff( - rho, 2.0 * l_rii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax - ) - Bii_l_aii_avgl_rii_avg = calc_Bkl_eff( - rho, l_aii_avg + l_rii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax - ) + Bii_2l_aii_avg = calc_Bkl_eff(rho, 2.0 * l_aii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax) + Bii_2l_rii_avg = calc_Bkl_eff(rho, 2.0 * l_rii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax) + Bii_l_aii_avgl_rii_avg = calc_Bkl_eff(rho, l_aii_avg + l_rii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax) - da1sii_2l_aii_avg = calc_da1sii_drhos( - rho, Cmol2seg, 2.0 * l_aii_avg, zetax, epsilonii_avg, dii_eff - ) - da1sii_2l_rii_avg = calc_da1sii_drhos( - rho, Cmol2seg, 2.0 * l_rii_avg, zetax, epsilonii_avg, dii_eff - ) - da1sii_l_rii_avgl_aii_avg = calc_da1sii_drhos( - rho, Cmol2seg, l_aii_avg + l_rii_avg, zetax, epsilonii_avg, dii_eff - ) + da1sii_2l_aii_avg = calc_da1sii_drhos(rho, Cmol2seg, 2.0 * l_aii_avg, zetax, epsilonii_avg, dii_eff) + da1sii_2l_rii_avg = calc_da1sii_drhos(rho, Cmol2seg, 2.0 * l_rii_avg, zetax, epsilonii_avg, dii_eff) + da1sii_l_rii_avgl_aii_avg = calc_da1sii_drhos(rho, Cmol2seg, l_aii_avg + l_rii_avg, zetax, epsilonii_avg, dii_eff) - dBii_2l_aii_avg = calc_dBkl_drhos( - 2.0 * l_aii_avg, dii_eff, epsilonii_avg, x0ii, zetax - ) - dBii_2l_rii_avg = calc_dBkl_drhos( - 2.0 * l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax - ) - dBii_l_aii_avgl_rii_avg = calc_dBkl_drhos( - l_aii_avg + l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax - ) + dBii_2l_aii_avg = calc_dBkl_drhos(2.0 * l_aii_avg, dii_eff, epsilonii_avg, x0ii, zetax) + dBii_2l_rii_avg = calc_dBkl_drhos(2.0 * l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax) + dBii_l_aii_avgl_rii_avg = calc_dBkl_drhos(l_aii_avg + l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax) # Calculate Derivative Cii = prefactor_1d(l_rii_avg, l_aii_avg) B = ( x0ii ** (2.0 * l_aii_avg) * (a1sii_2l_aii_avg + Bii_2l_aii_avg) - - 2.0 - * x0ii ** (l_aii_avg + l_rii_avg) - * (a1sii_l_rii_avgl_aii_avg + Bii_l_aii_avgl_rii_avg) + - 2.0 * x0ii ** (l_aii_avg + l_rii_avg) * (a1sii_l_rii_avgl_aii_avg + Bii_l_aii_avgl_rii_avg) + x0ii ** (2.0 * l_rii_avg) * (a1sii_2l_rii_avg + Bii_2l_rii_avg) ) @@ -732,9 +664,7 @@ def calc_da2ii_1pchi_drhos( dB = ( x0ii ** (2.0 * l_aii_avg) * (da1sii_2l_aii_avg + dBii_2l_aii_avg) - - 2.0 - * x0ii ** (l_aii_avg + l_rii_avg) - * (da1sii_l_rii_avgl_aii_avg + dBii_l_aii_avgl_rii_avg) + - 2.0 * x0ii ** (l_aii_avg + l_rii_avg) * (da1sii_l_rii_avgl_aii_avg + dBii_l_aii_avgl_rii_avg) + x0ii ** (2.0 * l_rii_avg) * (da1sii_2l_rii_avg + dBii_2l_rii_avg) ) diff --git a/despasito/equations_of_state/saft/compiled_modules/ext_gamma_mie_python.py b/despasito/equations_of_state/saft/compiled_modules/ext_gamma_mie_python.py index 68425e9..d7a5500 100644 --- a/despasito/equations_of_state/saft/compiled_modules/ext_gamma_mie_python.py +++ b/despasito/equations_of_state/saft/compiled_modules/ext_gamma_mie_python.py @@ -155,11 +155,9 @@ def calc_Bkl(rho, l_kl, Cmol2seg, dkl, epsilonkl, x0kl, zetax): # compute Ikl(l_kl), eq. 23 Ikl = (1.0 - (x0kl ** (3.0 - l_kl))) / (l_kl - 3.0) # compute Jkl(l_kl), eq. 24 - Jkl = ( - 1.0 - - ((x0kl ** (4.0 - l_kl)) * (l_kl - 3.0)) - + ((x0kl ** (3.0 - l_kl)) * (l_kl - 4.0)) - ) / ((l_kl - 3.0) * (l_kl - 4.0)) + Jkl = (1.0 - ((x0kl ** (4.0 - l_kl)) * (l_kl - 3.0)) + ((x0kl ** (3.0 - l_kl)) * (l_kl - 4.0))) / ( + (l_kl - 3.0) * (l_kl - 4.0) + ) tmp11 = rhos * (2.0 * np.pi) tmp12 = (dkl**3 * constants.molecule_per_nm3**2) * epsilonkl @@ -287,10 +285,7 @@ def calc_a1s_eff(rho, Cmol2seg, l_ii_avg, zetax, epsilonii_avg, dii_avg): * 2.0 * np.pi * Cmol2seg - * ( - (epsilonii_avg * (dii_avg**3 * constants.molecule_per_nm3**2)) - / (l_ii_avg - 3.0) - ) + * ((epsilonii_avg * (dii_avg**3 * constants.molecule_per_nm3**2)) / (l_ii_avg - 3.0)) ) output = np.transpose(np.transpose(a1s) * rho) @@ -337,9 +332,7 @@ def calc_Bkl_eff(rho, l_ii_avg, Cmol2seg, dii_avg, epsilonii_avg, x0ii, zetax): Iii_avg = (1.0 - (x0ii ** (3.0 - l_ii_avg))) / (l_ii_avg - 3.0) # compute Jii_avg(l_ii_avg), eq. 24 Jii_avg = ( - 1.0 - - ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0)) - + ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0)) + 1.0 - ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0)) + ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0)) ) / ((l_ii_avg - 3.0) * (l_ii_avg - 4.0)) tmp11 = rhos * (2.0 * np.pi) @@ -412,21 +405,12 @@ def calc_da1sii_drhos(rho, Cmol2seg, l_ii_avg, zetax, epsilonii_avg, dii_avg): ), ) etaii_avg[:, k] = np.dot(zetax_pow, ciii_avg) - rhos_detaii_avg_drhos[:, k] = np.dot( - zetax_pow, ciii_avg * np.array([1.0, 2.0, 3.0, 4.0]) - ) + rhos_detaii_avg_drhos[:, k] = np.dot(zetax_pow, ciii_avg * np.array([1.0, 2.0, 3.0, 4.0])) - tmp1 = (1.0 - (etaii_avg / 2.0)) / ((1.0 - etaii_avg) ** 3) + ( - 5.0 - 2.0 * etaii_avg - ) / (2.0 * (1.0 - etaii_avg) ** 4) * rhos_detaii_avg_drhos - tmp2 = ( - -2.0 - * np.pi - * ( - (epsilonii_avg * (dii_avg**3 * constants.molecule_per_nm3**2)) - / (l_ii_avg - 3.0) - ) - ) + tmp1 = (1.0 - (etaii_avg / 2.0)) / ((1.0 - etaii_avg) ** 3) + (5.0 - 2.0 * etaii_avg) / ( + 2.0 * (1.0 - etaii_avg) ** 4 + ) * rhos_detaii_avg_drhos + tmp2 = -2.0 * np.pi * ((epsilonii_avg * (dii_avg**3 * constants.molecule_per_nm3**2)) / (l_ii_avg - 3.0)) da1s_drhos = tmp1 * tmp2 @@ -472,9 +456,7 @@ def calc_dBkl_drhos(l_ii_avg, dii_avg, epsilonii_avg, x0ii, zetax): Iii_avg = (1.0 - (x0ii ** (3.0 - l_ii_avg))) / (l_ii_avg - 3.0) # compute Jii_avg(l_ii_avg), eq. 24 Jii_avg = ( - 1.0 - - ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0)) - + ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0)) + 1.0 - ((x0ii ** (4.0 - l_ii_avg)) * (l_ii_avg - 3.0)) + ((x0ii ** (3.0 - l_ii_avg)) * (l_ii_avg - 4.0)) ) / ((l_ii_avg - 3.0) * (l_ii_avg - 4.0)) tmp = 2.0 * np.pi * dii_avg**3 * epsilonii_avg @@ -484,20 +466,15 @@ def calc_dBkl_drhos(l_ii_avg, dii_avg, epsilonii_avg, x0ii, zetax): tmp1[:, k] = ((1.0 - (zetax / 2.0)) / ((1.0 - zetax) ** 3) * Iii_avg[k]) - ( ((9.0 * zetax * (1.0 + zetax)) / (2.0 * ((1 - zetax) ** 3))) * Jii_avg[k] ) - tmp2[:, k] = ( - (5.0 - 2.0 * zetax) * zetax / (2 * (1.0 - zetax) ** 4) * Iii_avg[k] - ) - ( - ((9.0 * zetax * (zetax**2 + 4.0 * zetax + 1)) / (2.0 * ((1 - zetax) ** 4))) - * Jii_avg[k] + tmp2[:, k] = ((5.0 - 2.0 * zetax) * zetax / (2 * (1.0 - zetax) ** 4) * Iii_avg[k]) - ( + ((9.0 * zetax * (zetax**2 + 4.0 * zetax + 1)) / (2.0 * ((1 - zetax) ** 4))) * Jii_avg[k] ) dBkl_drhos = tmp * (tmp1 + tmp2) * constants.molecule_per_nm3**2 return dBkl_drhos -def calc_da1iidrhos( - rho, Cmol2seg, dii_eff, l_aii_avg, l_rii_avg, x0ii, epsilonii_avg, zetax -): +def calc_da1iidrhos(rho, Cmol2seg, dii_eff, l_aii_avg, l_rii_avg, x0ii, epsilonii_avg, zetax): r""" Compute derivative of the term, :math:`\bar{a}_{1,ii}` with respect to :math:`\rho_s` @@ -535,27 +512,20 @@ def calc_da1iidrhos( Cii = prefactor(l_rii_avg, l_aii_avg) - das1_drhos_a = calc_da1sii_drhos( - rho, Cmol2seg, l_aii_avg, zetax, epsilonii_avg, dii_eff - ) - das1_drhos_r = calc_da1sii_drhos( - rho, Cmol2seg, l_rii_avg, zetax, epsilonii_avg, dii_eff - ) + das1_drhos_a = calc_da1sii_drhos(rho, Cmol2seg, l_aii_avg, zetax, epsilonii_avg, dii_eff) + das1_drhos_r = calc_da1sii_drhos(rho, Cmol2seg, l_rii_avg, zetax, epsilonii_avg, dii_eff) dB_drhos_a = calc_dBkl_drhos(l_aii_avg, dii_eff, epsilonii_avg, x0ii, zetax) dB_drhos_r = calc_dBkl_drhos(l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax) da1iidrhos = Cii * ( - ((x0ii**l_aii_avg) * (das1_drhos_a + dB_drhos_a)) - - ((x0ii**l_rii_avg) * (das1_drhos_r + dB_drhos_r)) + ((x0ii**l_aii_avg) * (das1_drhos_a + dB_drhos_a)) - ((x0ii**l_rii_avg) * (das1_drhos_r + dB_drhos_r)) ) return da1iidrhos -def calc_da2ii_1pchi_drhos( - rho, Cmol2seg, epsilonii_avg, dii_eff, x0ii, l_rii_avg, l_aii_avg, zetax -): +def calc_da2ii_1pchi_drhos(rho, Cmol2seg, epsilonii_avg, dii_eff, x0ii, l_rii_avg, l_aii_avg, zetax): r""" Compute derivative of the term, :math:`\frac{\bar{a}_{2,ii}}{1+\bar{\chi}_{ii}}` with respect to :math:`\rho_s`. @@ -594,63 +564,35 @@ def calc_da2ii_1pchi_drhos( """ # Calculate terms and derivatives used in derivative chain rule - KHS = ((1.0 - zetax) ** 4) / ( - 1.0 + (4.0 * zetax) + (4.0 * (zetax**2)) - (4.0 * (zetax**3)) + (zetax**4) - ) + KHS = ((1.0 - zetax) ** 4) / (1.0 + (4.0 * zetax) + (4.0 * (zetax**2)) - (4.0 * (zetax**3)) + (zetax**4)) dKHS_drhos = ( (4.0 * (zetax**2 - 5.0 * zetax - 2.0) * (1.0 - zetax) ** 3) / (zetax**4 - 4.0 * zetax**3 + 4.0 * zetax**2 + 4.0 * zetax + 1.0) ** 2 * (zetax / (rho * Cmol2seg)) ) - a1sii_2l_aii_avg = calc_a1s_eff( - rho, Cmol2seg, 2.0 * l_aii_avg, zetax, epsilonii_avg, dii_eff - ) - a1sii_2l_rii_avg = calc_a1s_eff( - rho, Cmol2seg, 2.0 * l_rii_avg, zetax, epsilonii_avg, dii_eff - ) - a1sii_l_rii_avgl_aii_avg = calc_a1s_eff( - rho, Cmol2seg, l_aii_avg + l_rii_avg, zetax, epsilonii_avg, dii_eff - ) + a1sii_2l_aii_avg = calc_a1s_eff(rho, Cmol2seg, 2.0 * l_aii_avg, zetax, epsilonii_avg, dii_eff) + a1sii_2l_rii_avg = calc_a1s_eff(rho, Cmol2seg, 2.0 * l_rii_avg, zetax, epsilonii_avg, dii_eff) + a1sii_l_rii_avgl_aii_avg = calc_a1s_eff(rho, Cmol2seg, l_aii_avg + l_rii_avg, zetax, epsilonii_avg, dii_eff) - Bii_2l_aii_avg = calc_Bkl_eff( - rho, 2.0 * l_aii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax - ) - Bii_2l_rii_avg = calc_Bkl_eff( - rho, 2.0 * l_rii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax - ) - Bii_l_aii_avgl_rii_avg = calc_Bkl_eff( - rho, l_aii_avg + l_rii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax - ) + Bii_2l_aii_avg = calc_Bkl_eff(rho, 2.0 * l_aii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax) + Bii_2l_rii_avg = calc_Bkl_eff(rho, 2.0 * l_rii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax) + Bii_l_aii_avgl_rii_avg = calc_Bkl_eff(rho, l_aii_avg + l_rii_avg, Cmol2seg, dii_eff, epsilonii_avg, x0ii, zetax) - da1sii_2l_aii_avg = calc_da1sii_drhos( - rho, Cmol2seg, 2.0 * l_aii_avg, zetax, epsilonii_avg, dii_eff - ) - da1sii_2l_rii_avg = calc_da1sii_drhos( - rho, Cmol2seg, 2.0 * l_rii_avg, zetax, epsilonii_avg, dii_eff - ) - da1sii_l_rii_avgl_aii_avg = calc_da1sii_drhos( - rho, Cmol2seg, l_aii_avg + l_rii_avg, zetax, epsilonii_avg, dii_eff - ) + da1sii_2l_aii_avg = calc_da1sii_drhos(rho, Cmol2seg, 2.0 * l_aii_avg, zetax, epsilonii_avg, dii_eff) + da1sii_2l_rii_avg = calc_da1sii_drhos(rho, Cmol2seg, 2.0 * l_rii_avg, zetax, epsilonii_avg, dii_eff) + da1sii_l_rii_avgl_aii_avg = calc_da1sii_drhos(rho, Cmol2seg, l_aii_avg + l_rii_avg, zetax, epsilonii_avg, dii_eff) - dBii_2l_aii_avg = calc_dBkl_drhos( - 2.0 * l_aii_avg, dii_eff, epsilonii_avg, x0ii, zetax - ) - dBii_2l_rii_avg = calc_dBkl_drhos( - 2.0 * l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax - ) - dBii_l_aii_avgl_rii_avg = calc_dBkl_drhos( - l_aii_avg + l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax - ) + dBii_2l_aii_avg = calc_dBkl_drhos(2.0 * l_aii_avg, dii_eff, epsilonii_avg, x0ii, zetax) + dBii_2l_rii_avg = calc_dBkl_drhos(2.0 * l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax) + dBii_l_aii_avgl_rii_avg = calc_dBkl_drhos(l_aii_avg + l_rii_avg, dii_eff, epsilonii_avg, x0ii, zetax) # Calculate Derivative Cii = prefactor(l_rii_avg, l_aii_avg) B = ( x0ii ** (2.0 * l_aii_avg) * (a1sii_2l_aii_avg + Bii_2l_aii_avg) - - 2.0 - * x0ii ** (l_aii_avg + l_rii_avg) - * (a1sii_l_rii_avgl_aii_avg + Bii_l_aii_avgl_rii_avg) + - 2.0 * x0ii ** (l_aii_avg + l_rii_avg) * (a1sii_l_rii_avgl_aii_avg + Bii_l_aii_avgl_rii_avg) + x0ii ** (2.0 * l_rii_avg) * (a1sii_2l_rii_avg + Bii_2l_rii_avg) ) @@ -658,9 +600,7 @@ def calc_da2ii_1pchi_drhos( dB = ( x0ii ** (2.0 * l_aii_avg) * (da1sii_2l_aii_avg + dBii_2l_aii_avg) - - 2.0 - * x0ii ** (l_aii_avg + l_rii_avg) - * (da1sii_l_rii_avgl_aii_avg + dBii_l_aii_avgl_rii_avg) + - 2.0 * x0ii ** (l_aii_avg + l_rii_avg) * (da1sii_l_rii_avgl_aii_avg + dBii_l_aii_avgl_rii_avg) + x0ii ** (2.0 * l_rii_avg) * (da1sii_2l_rii_avg + dBii_2l_rii_avg) ) @@ -871,13 +811,9 @@ def calc_Iij(rho, T, xi, epsilonii_avg, sigmaii_avg, sigmakl, xskl): # Iij += np.einsum("i,jk->ijk", cij[p, q] * ((sigmax3 * rho)**p), # ((T / epsilonij)**q)) if p == 0: - Iij += np.einsum( - "i,jk->ijk", cij[p, q] * np.ones(len(rho)), ((T / epsilonij) ** q) - ) + Iij += np.einsum("i,jk->ijk", cij[p, q] * np.ones(len(rho)), ((T / epsilonij) ** q)) elif p == 1: - Iij += np.einsum( - "i,jk->ijk", cij[p, q] * ((sigmax3 * rho)), ((T / epsilonij) ** q) - ) + Iij += np.einsum("i,jk->ijk", cij[p, q] * ((sigmax3 * rho)), ((T / epsilonij) ** q)) elif p == 2: rho2 = rho**2 Iij += np.einsum( diff --git a/despasito/equations_of_state/saft/gamma_mie.py b/despasito/equations_of_state/saft/gamma_mie.py index 7e6b3d9..f9854ba 100644 --- a/despasito/equations_of_state/saft/gamma_mie.py +++ b/despasito/equations_of_state/saft/gamma_mie.py @@ -16,12 +16,8 @@ import despasito.equations_of_state.saft.saft_toolbox as stb from despasito.equations_of_state.saft import Aassoc from .compiled_modules.ext_gamma_mie_python import prefactor, calc_Iij -from despasito.equations_of_state.saft.compiled_modules import ( - ext_gamma_mie_numba as ext_numba -) -from despasito.equations_of_state.saft.compiled_modules import ( - ext_gamma_mie_python as ext_python -) +from despasito.equations_of_state.saft.compiled_modules import ext_gamma_mie_numba as ext_numba +from despasito.equations_of_state.saft.compiled_modules import ext_gamma_mie_python as ext_python logger = logging.getLogger(__name__) if "cython" not in sys.modules: @@ -30,9 +26,7 @@ else: flag_cython = False try: - from despasito.equations_of_state.saft.compiled_modules import ( - ext_gamma_mie_cython as ext_cython - ) + from despasito.equations_of_state.saft.compiled_modules import ext_gamma_mie_cython as ext_cython except ImportError: raise ImportError( "Cython package is available but module: " @@ -231,19 +225,13 @@ def __init__(self, **kwargs): needed_attributes = ["molecular_composition", "beads", "bead_library"] for key in needed_attributes: if key not in kwargs: - raise ValueError( - "The one of the following inputs is missing: {}".format( - ", ".join(needed_attributes) - ) - ) + raise ValueError("The one of the following inputs is missing: {}".format(", ".join(needed_attributes))) elif key == "molecular_composition": self.eos_dict[key] = kwargs[key] elif not hasattr(self, key): setattr(self, key, kwargs[key]) - self.bead_library = tb.check_bead_parameters( - self.bead_library, self._parameter_defaults - ) + self.bead_library = tb.check_bead_parameters(self.bead_library, self._parameter_defaults) if "cross_library" not in kwargs: self.cross_library = {} @@ -251,13 +239,9 @@ def __init__(self, **kwargs): self.cross_library = kwargs["cross_library"] if "Vks" not in self.eos_dict: - self.eos_dict["Vks"] = tb.extract_property( - "Vks", self.bead_library, self.beads, default=1.0 - ) + self.eos_dict["Vks"] = tb.extract_property("Vks", self.bead_library, self.beads, default=1.0) if "Sk" not in self.eos_dict: - self.eos_dict["Sk"] = tb.extract_property( - "Sk", self.bead_library, self.beads, default=1.0 - ) + self.eos_dict["Sk"] = tb.extract_property("Sk", self.bead_library, self.beads, default=1.0) # Initialize temperature attribute if not hasattr(self, "T"): @@ -281,12 +265,9 @@ def __init__(self, **kwargs): self.eos_dict["lambdarkl"] = output["lambdar"] # compute alphakl eq. 33 - self.eos_dict["Ckl"] = prefactor( - self.eos_dict["lambdarkl"], self.eos_dict["lambdaakl"] - ) + self.eos_dict["Ckl"] = prefactor(self.eos_dict["lambdarkl"], self.eos_dict["lambdaakl"]) self.eos_dict["alphakl"] = self.eos_dict["Ckl"] * ( - (1.0 / (self.eos_dict["lambdaakl"] - 3.0)) - - (1.0 / (self.eos_dict["lambdarkl"] - 3.0)) + (1.0 / (self.eos_dict["lambdaakl"] - 3.0)) - (1.0 / (self.eos_dict["lambdarkl"] - 3.0)) ) # Initiate average interaction terms @@ -294,13 +275,9 @@ def __init__(self, **kwargs): if "num_rings" in kwargs: self.eos_dict["num_rings"] = kwargs["num_rings"] - logger.info( - "Accepted component ring structure: {}".format(kwargs["num_rings"]) - ) + logger.info("Accepted component ring structure: {}".format(kwargs["num_rings"])) else: - self.eos_dict["num_rings"] = np.zeros( - len(self.eos_dict["molecular_composition"]) - ) + self.eos_dict["num_rings"] = np.zeros(len(self.eos_dict["molecular_composition"])) def calc_component_averaged_properties(self): r""" @@ -335,9 +312,7 @@ def calc_component_averaged_properties(self): for i in range(self.ncomp): for k in range(self.nbeads): zki[i, k] = ( - self.eos_dict["molecular_composition"][i, k] - * self.eos_dict["Vks"][k] - * self.eos_dict["Sk"][k] + self.eos_dict["molecular_composition"][i, k] * self.eos_dict["Vks"][k] * self.eos_dict["Sk"][k] ) zkinorm[i] += zki[i, k] @@ -348,18 +323,10 @@ def calc_component_averaged_properties(self): for i in range(self.ncomp): for k in range(self.nbeads): for l in range(self.nbeads): - output["sigmaii_avg"][i] += ( - zki[i, k] * zki[i, l] * self.eos_dict["sigmakl"][k, l] ** 3 - ) - output["epsilonii_avg"][i] += ( - zki[i, k] * zki[i, l] * self.eos_dict["epsilonkl"][k, l] - ) - output["lambdarii_avg"][i] += ( - zki[i, k] * zki[i, l] * self.eos_dict["lambdarkl"][k, l] - ) - output["lambdaaii_avg"][i] += ( - zki[i, k] * zki[i, l] * self.eos_dict["lambdaakl"][k, l] - ) + output["sigmaii_avg"][i] += zki[i, k] * zki[i, l] * self.eos_dict["sigmakl"][k, l] ** 3 + output["epsilonii_avg"][i] += zki[i, k] * zki[i, l] * self.eos_dict["epsilonkl"][k, l] + output["lambdarii_avg"][i] += zki[i, k] * zki[i, l] * self.eos_dict["lambdarkl"][k, l] + output["lambdaaii_avg"][i] += zki[i, k] * zki[i, l] * self.eos_dict["lambdaakl"][k, l] output["sigmaii_avg"][i] = output["sigmaii_avg"][i] ** (1 / 3.0) self.eos_dict.update(output) @@ -394,10 +361,7 @@ def Ahard_sphere(self, rho, T, xi): * constants.molecule_per_nm3 * self.eos_dict["Cmol2seg"] * ( - np.sum( - np.sqrt(np.diag(self.eos_dict["xskl"])) - * (np.diag(self.eos_dict["dkl"]) ** m) - ) + np.sum(np.sqrt(np.diag(self.eos_dict["xskl"])) * (np.diag(self.eos_dict["dkl"]) ** m)) * (np.pi / 6.0) ) ) @@ -406,9 +370,7 @@ def Ahard_sphere(self, rho, T, xi): if self.ncomp == 1: tmp1 = 0 else: - tmp1 = np.log1p(-eta[:, 3]) * ( - eta[:, 2] ** 3 / (eta[:, 3] ** 2) - eta[:, 0] - ) + tmp1 = np.log1p(-eta[:, 3]) * (eta[:, 2] ** 3 / (eta[:, 3] ** 2) - eta[:, 0]) tmp2 = 3.0 * eta[:, 2] / (1 - eta[:, 3]) * eta[:, 1] tmp3 = eta[:, 2] ** 3 / (eta[:, 3] * ((1.0 - eta[:, 3]) ** 2)) @@ -630,11 +592,7 @@ def Asecond_order(self, rho, T, xi, zetaxstar=None, zetax=None, KHS=None): * (a1s_2la + B_2la) / constants.molecule_per_nm3 - ( - ( - 2.0 - * self.eos_dict["x0kl"] - ** (self.eos_dict["lambdaakl"] + self.eos_dict["lambdarkl"]) - ) + (2.0 * self.eos_dict["x0kl"] ** (self.eos_dict["lambdaakl"] + self.eos_dict["lambdarkl"])) * (a1s_lalr + B_lalr) / constants.molecule_per_nm3 ) @@ -644,9 +602,7 @@ def Asecond_order(self, rho, T, xi, zetaxstar=None, zetax=None, KHS=None): / constants.molecule_per_nm3 ) ) - a2kl *= ( - (1.0 + chikl) * self.eos_dict["epsilonkl"] * (self.eos_dict["Ckl"] ** 2) - ) # *(KHS/2.0) + a2kl *= (1.0 + chikl) * self.eos_dict["epsilonkl"] * (self.eos_dict["Ckl"] ** 2) # *(KHS/2.0) a2kl = np.einsum("i,ijk->ijk", KHS / 2.0, a2kl) @@ -693,11 +649,8 @@ def Athird_order(self, rho, T, xi, zetaxstar=None): # compute a3kl fmlist456 = self.calc_fm(self.eos_dict["alphakl"], np.array([4, 5, 6])) - a3kl = np.einsum( - "i,jk", zetaxstar, -(self.eos_dict["epsilonkl"] ** 3) * fmlist456[0] - ) * np.exp( - np.einsum("i,jk", zetaxstar, fmlist456[1]) - + np.einsum("i,jk", zetaxstar**2, fmlist456[2]) + a3kl = np.einsum("i,jk", zetaxstar, -(self.eos_dict["epsilonkl"] ** 3) * fmlist456[0]) * np.exp( + np.einsum("i,jk", zetaxstar, fmlist456[1]) + np.einsum("i,jk", zetaxstar**2, fmlist456[2]) ) # eq. 37 @@ -741,9 +694,7 @@ def Amonomer(self, rho, T, xi): self._check_temperature_dependent_parameters(T) self._check_composition_dependent_parameters(xi) - zetax = stb.calc_zetax( - rho, self.eos_dict["Cmol2seg"], self.eos_dict["xskl"], self.eos_dict["dkl"] - ) + zetax = stb.calc_zetax(rho, self.eos_dict["Cmol2seg"], self.eos_dict["xskl"], self.eos_dict["dkl"]) zetaxstar = stb.calc_zetaxstar( rho, self.eos_dict["Cmol2seg"], @@ -799,16 +750,12 @@ def gdHS(self, rho, T, xi, zetax=None): km = np.zeros((np.size(rho), 4)) gdHS = np.zeros((np.size(rho), np.size(xi))) - km[:, 0] = -np.log(1.0 - zetax) + ( - 42.0 * zetax - 39.0 * zetax**2 + 9.0 * zetax**3 - 2.0 * zetax**4 - ) / (6.0 * (1.0 - zetax) ** 3) - km[:, 1] = (zetax**4 + 6.0 * zetax**2 - 12.0 * zetax) / ( - 2.0 * (1.0 - zetax) ** 3 - ) - km[:, 2] = -3.0 * zetax**2 / (8.0 * (1.0 - zetax) ** 2) - km[:, 3] = (-(zetax**4) + 3.0 * zetax**2 + 3.0 * zetax) / ( + km[:, 0] = -np.log(1.0 - zetax) + (42.0 * zetax - 39.0 * zetax**2 + 9.0 * zetax**3 - 2.0 * zetax**4) / ( 6.0 * (1.0 - zetax) ** 3 ) + km[:, 1] = (zetax**4 + 6.0 * zetax**2 - 12.0 * zetax) / (2.0 * (1.0 - zetax) ** 3) + km[:, 2] = -3.0 * zetax**2 / (8.0 * (1.0 - zetax) ** 2) + km[:, 3] = (-(zetax**4) + 3.0 * zetax**2 + 3.0 * zetax) / (6.0 * (1.0 - zetax) ** 3) for i in range(self.ncomp): gdHS[:, i] = np.exp( @@ -938,28 +885,16 @@ def g1(self, rho, T, xi, zetax=None): Cii = prefactor(self.eos_dict["lambdarii_avg"], self.eos_dict["lambdaaii_avg"]) tmp1 = 1.0 / ( - 2.0 - * np.pi - * self.eos_dict["epsilonii_avg"] - * self.eos_dict["dii_eff"] ** 3 - * constants.molecule_per_nm3**2 + 2.0 * np.pi * self.eos_dict["epsilonii_avg"] * self.eos_dict["dii_eff"] ** 3 * constants.molecule_per_nm3**2 ) tmp11 = 3.0 * da1iidrhos - tmp21 = ( - Cii - * self.eos_dict["lambdaaii_avg"] - * (self.eos_dict["x0ii"] ** self.eos_dict["lambdaaii_avg"]) - ) + tmp21 = Cii * self.eos_dict["lambdaaii_avg"] * (self.eos_dict["x0ii"] ** self.eos_dict["lambdaaii_avg"]) tmp22 = np.einsum( "ij,i->ij", (a1sii_lambdaaii_avg + Bii_lambdaaii_avg), 1.0 / (rho * self.eos_dict["Cmol2seg"]), ) - tmp31 = ( - Cii - * self.eos_dict["lambdarii_avg"] - * (self.eos_dict["x0ii"] ** self.eos_dict["lambdarii_avg"]) - ) + tmp31 = Cii * self.eos_dict["lambdarii_avg"] * (self.eos_dict["x0ii"] ** self.eos_dict["lambdarii_avg"]) tmp32 = np.einsum( "ij,i->ij", (a1sii_lambdarii_avg + Bii_lambdarii_avg), @@ -1016,8 +951,7 @@ def g2(self, rho, T, xi, zetax=None): phi7 = np.array([10.0, 10.0, 0.57, -6.7, -8.0]) alphaii = Cii * ( - (1.0 / (self.eos_dict["lambdaaii_avg"] - 3.0)) - - (1.0 / (self.eos_dict["lambdarii_avg"] - 3.0)) + (1.0 / (self.eos_dict["lambdaaii_avg"] - 3.0)) - (1.0 / (self.eos_dict["lambdarii_avg"] - 3.0)) ) theta = np.exp(self.eos_dict["epsilonii_avg"] / T) - 1.0 @@ -1166,10 +1100,7 @@ def g2(self, rho, T, xi, zetax=None): * (a1sii_2lambdarii_avg + Bii_2lambdarii_avg) + eKC2 * (self.eos_dict["lambdarii_avg"] + self.eos_dict["lambdaaii_avg"]) - * ( - self.eos_dict["x0ii"] - ** (self.eos_dict["lambdarii_avg"] + self.eos_dict["lambdaaii_avg"]) - ) + * (self.eos_dict["x0ii"] ** (self.eos_dict["lambdarii_avg"] + self.eos_dict["lambdaaii_avg"])) * (a1sii_lambdarii_avglambdaaii_avg + Bii_lambdaaii_avglambdarii_avg) - eKC2 * self.eos_dict["lambdaaii_avg"] @@ -1204,9 +1135,7 @@ def Achain(self, rho, T, xi): self._check_temperature_dependent_parameters(T) self._check_composition_dependent_parameters(xi) - zetax = stb.calc_zetax( - rho, self.eos_dict["Cmol2seg"], self.eos_dict["xskl"], self.eos_dict["dkl"] - ) + zetax = stb.calc_zetax(rho, self.eos_dict["Cmol2seg"], self.eos_dict["xskl"], self.eos_dict["dkl"]) gdHS = self.gdHS(rho, T, xi, zetax=zetax) g1 = self.g1(rho, T, xi, zetax=zetax) g2 = self.g2(rho, T, xi, zetax=zetax) @@ -1221,9 +1150,7 @@ def Achain(self, rho, T, xi): beadsum = -1.0 + self.eos_dict["num_rings"][i] for k in range(self.nbeads): beadsum += ( - self.eos_dict["molecular_composition"][i, k] - * self.eos_dict["Vks"][k] - * self.eos_dict["Sk"][k] + self.eos_dict["molecular_composition"][i, k] * self.eos_dict["Vks"][k] * self.eos_dict["Sk"][k] ) Achain -= xi[i] * beadsum * np.log(gii[:, i]) @@ -1260,11 +1187,7 @@ def density_max(self, xi, T, maxpack=0.65): max_density = ( maxpack * 6.0 - / ( - self.eos_dict["Cmol2seg"] - * np.pi - * np.sum(self.eos_dict["xskl"] * (self.eos_dict["dkl"] ** 3)) - ) + / (self.eos_dict["Cmol2seg"] * np.pi * np.sum(self.eos_dict["xskl"] * (self.eos_dict["dkl"] ** 3))) / constants.molecule_per_nm3 ) @@ -1294,9 +1217,7 @@ def calc_fm(alphakl, mlist): """ if np.size(np.shape(alphakl)) == 2: - fmlist = np.zeros( - (np.size(mlist), np.size(alphakl, axis=0), np.size(alphakl, axis=0)) - ) + fmlist = np.zeros((np.size(mlist), np.size(alphakl, axis=0), np.size(alphakl, axis=0))) elif np.size(np.shape(alphakl)) == 1: fmlist = np.zeros((np.size(mlist), np.size(alphakl, axis=0))) else: @@ -1427,9 +1348,7 @@ def calc_gr_assoc(self, rho, T, xi, Ktype="ijklab"): elif Ktype == "ijklab": gr = self.calc_gdHS_assoc(rho, T, xi) else: - raise ValueError( - "Ktype does not indicate a known gr_assoc for this saft type." - ) + raise ValueError("Ktype does not indicate a known gr_assoc for this saft type.") return gr @@ -1466,10 +1385,7 @@ def calc_gdHS_assoc(self, rho, T, xi): * constants.molecule_per_nm3 * self.eos_dict["Cmol2seg"] * ( - np.sum( - np.sqrt(np.diag(self.eos_dict["xskl"])) - * (np.diag(self.eos_dict["dkl"]) ** m) - ) + np.sum(np.sqrt(np.diag(self.eos_dict["xskl"])) * (np.diag(self.eos_dict["dkl"]) ** m)) * (np.pi / 6.0) ) ) @@ -1521,13 +1437,9 @@ def calc_Kijklab(self, T, rc_klab, rd_klab=None, reduction_ratio=0.25): dij_bar = np.zeros((self.ncomp, self.ncomp)) for i in range(self.ncomp): for j in range(self.ncomp): - dij_bar[i, j] = np.mean( - [self.eos_dict["dii_eff"][i], self.eos_dict["dii_eff"][j]] - ) + dij_bar[i, j] = np.mean([self.eos_dict["dii_eff"][i], self.eos_dict["dii_eff"][j]]) - Kijklab = Aassoc.calc_bonding_volume( - rc_klab, dij_bar, rd_klab=rd_klab, reduction_ratio=reduction_ratio - ) + Kijklab = Aassoc.calc_bonding_volume(rc_klab, dij_bar, rd_klab=rd_klab, reduction_ratio=reduction_ratio) return Kijklab @@ -1566,9 +1478,7 @@ def parameter_refresh(self, bead_library, cross_library): self.bead_library.update(bead_library) self.cross_library.update(cross_library) - self.eos_dict["Sk"] = tb.extract_property( - "Sk", self.bead_library, self.beads, default=1.0 - ) + self.eos_dict["Sk"] = tb.extract_property("Sk", self.bead_library, self.beads, default=1.0) output = tb.cross_interaction_from_dict( self.beads, @@ -1591,21 +1501,16 @@ def parameter_refresh(self, bead_library, cross_library): self.calc_component_averaged_properties() if not np.any(np.isnan(self.xi)): - self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = ( - stb.calc_composition_dependent_variables( - self.xi, - self.eos_dict["molecular_composition"], - self.bead_library, - self.beads, - ) + self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = stb.calc_composition_dependent_variables( + self.xi, + self.eos_dict["molecular_composition"], + self.bead_library, + self.beads, ) - self.eos_dict["Ckl"] = prefactor( - self.eos_dict["lambdarkl"], self.eos_dict["lambdaakl"] - ) + self.eos_dict["Ckl"] = prefactor(self.eos_dict["lambdarkl"], self.eos_dict["lambdaakl"]) self.eos_dict["alphakl"] = self.eos_dict["Ckl"] * ( - (1.0 / (self.eos_dict["lambdaakl"] - 3.0)) - - (1.0 / (self.eos_dict["lambdarkl"] - 3.0)) + (1.0 / (self.eos_dict["lambdaakl"] - 3.0)) - (1.0 / (self.eos_dict["lambdarkl"] - 3.0)) ) def _check_density(self, rho): @@ -1697,23 +1602,18 @@ def _check_temperature_dependent_parameters(self, T): self.eos_dict["lambdarkl"] = output["lambdar"] # compute alphakl eq. 33 - self.eos_dict["Ckl"] = prefactor( - self.eos_dict["lambdarkl"], self.eos_dict["lambdaakl"] - ) + self.eos_dict["Ckl"] = prefactor(self.eos_dict["lambdarkl"], self.eos_dict["lambdaakl"]) self.eos_dict["alphakl"] = self.eos_dict["Ckl"] * ( - (1.0 / (self.eos_dict["lambdaakl"] - 3.0)) - - (1.0 / (self.eos_dict["lambdarkl"] - 3.0)) + (1.0 / (self.eos_dict["lambdaakl"] - 3.0)) - (1.0 / (self.eos_dict["lambdarkl"] - 3.0)) ) self.calc_component_averaged_properties() - self.eos_dict["dkl"], self.eos_dict["x0kl"] = ( - stb.calc_hard_sphere_matricies( - T, - self.eos_dict["sigmakl"], - self.bead_library, - self.beads, - prefactor, - ) + self.eos_dict["dkl"], self.eos_dict["x0kl"] = stb.calc_hard_sphere_matricies( + T, + self.eos_dict["sigmakl"], + self.bead_library, + self.beads, + prefactor, ) self._update_chain_temperature_dependent_variables(T) @@ -1742,13 +1642,11 @@ def _check_composition_dependent_parameters(self, xi): """ xi = np.array(xi) if not np.all(self.xi == xi): - self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = ( - stb.calc_composition_dependent_variables( - xi, - self.eos_dict["molecular_composition"], - self.bead_library, - self.beads, - ) + self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = stb.calc_composition_dependent_variables( + xi, + self.eos_dict["molecular_composition"], + self.bead_library, + self.beads, ) self.xi = xi @@ -1784,9 +1682,7 @@ def _update_chain_temperature_dependent_variables(self, T): for i in range(self.ncomp): for k in range(self.nbeads): zki[i, k] = ( - self.eos_dict["molecular_composition"][i, k] - * self.eos_dict["Vks"][k] - * self.eos_dict["Sk"][k] + self.eos_dict["molecular_composition"][i, k] * self.eos_dict["Vks"][k] * self.eos_dict["Sk"][k] ) zkinorm[i] += zki[i, k] @@ -1797,9 +1693,7 @@ def _update_chain_temperature_dependent_variables(self, T): for i in range(self.ncomp): for k in range(self.nbeads): for l in range(self.nbeads): - dii_eff[i] += ( - zki[i, k] * zki[i, l] * self.eos_dict["dkl"][k, l] ** 3 - ) + dii_eff[i] += zki[i, k] * zki[i, l] * self.eos_dict["dkl"][k, l] ** 3 dii_eff[i] = dii_eff[i] ** (1 / 3.0) self.eos_dict["dii_eff"] = dii_eff diff --git a/despasito/equations_of_state/saft/gamma_sw.py b/despasito/equations_of_state/saft/gamma_sw.py index 8de5159..b9ac9b2 100644 --- a/despasito/equations_of_state/saft/gamma_sw.py +++ b/despasito/equations_of_state/saft/gamma_sw.py @@ -181,20 +181,14 @@ def __init__(self, **kwargs): needed_attributes = ["molecular_composition", "beads", "bead_library"] for key in needed_attributes: if key not in kwargs: - raise ValueError( - "The one of the following inputs is missing: {}".format( - ", ".join(needed_attributes) - ) - ) + raise ValueError("The one of the following inputs is missing: {}".format(", ".join(needed_attributes))) elif key == "molecular_composition": if "molecular_composition" not in self.eos_dict: self.eos_dict[key] = kwargs[key] elif not hasattr(self, key): setattr(self, key, kwargs[key]) - self.bead_library = tb.check_bead_parameters( - self.bead_library, self._parameter_defaults - ) + self.bead_library = tb.check_bead_parameters(self.bead_library, self._parameter_defaults) if "cross_library" not in kwargs: self.cross_library = {} @@ -202,13 +196,9 @@ def __init__(self, **kwargs): self.cross_library = kwargs["cross_library"] if "Vks" not in self.eos_dict: - self.eos_dict["Vks"] = tb.extract_property( - "Vks", self.bead_library, self.beads, default=1.0 - ) + self.eos_dict["Vks"] = tb.extract_property("Vks", self.bead_library, self.beads, default=1.0) if "Sk" not in self.eos_dict: - self.eos_dict["Sk"] = tb.extract_property( - "Sk", self.bead_library, self.beads, default=1.0 - ) + self.eos_dict["Sk"] = tb.extract_property("Sk", self.bead_library, self.beads, default=1.0) # Initialize component attribute if not hasattr(self, "xi"): @@ -229,13 +219,9 @@ def __init__(self, **kwargs): if "num_rings" in kwargs: self.eos_dict["num_rings"] = kwargs["num_rings"] - logger.info( - "Accepted component ring structure: {}".format(kwargs["num_rings"]) - ) + logger.info("Accepted component ring structure: {}".format(kwargs["num_rings"])) else: - self.eos_dict["num_rings"] = np.zeros( - len(self.eos_dict["molecular_composition"]) - ) + self.eos_dict["num_rings"] = np.zeros(len(self.eos_dict["molecular_composition"])) # Initiate average interaction terms self.calc_component_averaged_properties() @@ -278,9 +264,7 @@ def calc_component_averaged_properties(self): for i in range(ncomp): for k in range(nbeads): zki[i, k] = ( - self.eos_dict["molecular_composition"][i, k] - * self.eos_dict["Vks"][k] - * self.eos_dict["Sk"][k] + self.eos_dict["molecular_composition"][i, k] * self.eos_dict["Vks"][k] * self.eos_dict["Sk"][k] ) zkinorm[i] += zki[i, k] @@ -293,19 +277,13 @@ def calc_component_averaged_properties(self): sigmaii[i] += zki[i, k] * self.eos_dict["sigma_kl"][k, k] ** 3 for l in range(nbeads): - epsilonii[i] += ( - zki[i, k] * zki[i, l] * self.eos_dict["epsilon_kl"][k, l] - ) - lambdaii[i] += ( - zki[i, k] * zki[i, l] * self.eos_dict["lambda_kl"][k, l] - ) + epsilonii[i] += zki[i, k] * zki[i, l] * self.eos_dict["epsilon_kl"][k, l] + lambdaii[i] += zki[i, k] * zki[i, l] * self.eos_dict["lambda_kl"][k, l] sigmaii[i] = sigmaii[i] ** (1.0 / 3.0) input_dict = {"sigma": sigmaii, "lambda": lambdaii, "epsilon": epsilonii} dummy_dict, dummy_labels = tb.construct_dummy_bead_library(input_dict) - output_dict = tb.cross_interaction_from_dict( - dummy_labels, dummy_dict, self.combining_rules - ) + output_dict = tb.cross_interaction_from_dict(dummy_labels, dummy_dict, self.combining_rules) self.eos_dict["sigma_ij"] = output_dict["sigma"] self.eos_dict["lambda_ij"] = output_dict["lambda"] self.eos_dict["epsilon_ij"] = output_dict["epsilon"] @@ -337,10 +315,7 @@ def reduced_density(self, rho, xi): reduced_density = np.zeros((np.size(rho), 4)) for m in range(4): reduced_density[:, m] = rho2 * ( - np.sum( - np.sqrt(np.diag(self.eos_dict["xskl"])) - * (np.diag(self.eos_dict["sigma_kl"]) ** m) - ) + np.sum(np.sqrt(np.diag(self.eos_dict["xskl"])) * (np.diag(self.eos_dict["sigma_kl"]) ** m)) * (np.pi / 6.0) ) @@ -482,9 +457,7 @@ def Ahard_sphere(self, rho, T, xi): zeta = self.reduced_density(rho, xi) tmp = 6.0 / (np.pi * rho * constants.molecule_per_nm3) - tmp1 = np.log1p(-zeta[:, 3]) * ( - zeta[:, 2] ** 3 / (zeta[:, 3] ** 2) - zeta[:, 0] - ) + tmp1 = np.log1p(-zeta[:, 3]) * (zeta[:, 2] ** 3 / (zeta[:, 3] ** 2) - zeta[:, 0]) tmp2 = 3.0 * zeta[:, 2] / (1 - zeta[:, 3]) * zeta[:, 1] tmp3 = zeta[:, 2] ** 3 / (zeta[:, 3] * ((1.0 - zeta[:, 3]) ** 2)) AHS = tmp * (tmp1 + tmp2 + tmp3) @@ -521,12 +494,8 @@ def Afirst_order(self, rho, T, xi, zetax=None): zetax = self.reduced_density(rho, xi)[:, 3] g0HS = self.calc_g0HS(rho, xi, zetax=zetax) - a1kl_tmp = np.tensordot( - rho * constants.molecule_per_nm3, self.eos_dict["xskl"] * self.alphakl, 0 - ) - A1 = -(self.eos_dict["Cmol2seg"] ** 2 / T) * np.sum( - a1kl_tmp * g0HS, axis=(1, 2) - ) # Units of K + a1kl_tmp = np.tensordot(rho * constants.molecule_per_nm3, self.eos_dict["xskl"] * self.alphakl, 0) + A1 = -(self.eos_dict["Cmol2seg"] ** 2 / T) * np.sum(a1kl_tmp * g0HS, axis=(1, 2)) # Units of K return A1 @@ -576,13 +545,7 @@ def Asecond_order(self, rho, T, xi, zetax=None, KHS=None): tmp2 = self.eos_dict["epsilon_kl"] * self.alphakl * self.eos_dict["xskl"] a2kl_tmp = np.tensordot(tmp1, tmp2, 0) - a2 = a2kl_tmp * ( - g0HS - + zetax[:, np.newaxis, np.newaxis] - * dzetakl - * (2.5 - zeta_eff) - / (1 - zeta_eff) ** 4 - ) + a2 = a2kl_tmp * (g0HS + zetax[:, np.newaxis, np.newaxis] * dzetakl * (2.5 - zeta_eff) / (1 - zeta_eff) ** 4) # Lymperiadis 2007 has a disconnect where Eq. 24 != Eq. 30, as Eq. 24 is # missing a minus sign. (Same in Lymperiadis 2008 for Eq. 32 and Eq. 38) @@ -704,10 +667,7 @@ def calc_gHS(self, rho, xi): tmp = ( self.eos_dict["sigma_ij"][i, i] * self.eos_dict["sigma_ij"][j, j] - / ( - self.eos_dict["sigma_ij"][i, i] - + self.eos_dict["sigma_ij"][j, j] - ) + / (self.eos_dict["sigma_ij"][i, i] + self.eos_dict["sigma_ij"][j, j]) ) gHS[:, i, j] = tmp1 + 3 * tmp * tmp2 + 2 * tmp**2 * tmp3 @@ -745,22 +705,16 @@ def calc_gSW(self, rho, T, xi, zetax=None): g0HS = self.calc_g0HS(rho, xi, zetax=zetax, mode="effective") gHS = self.calc_gHS(rho, xi) - zeta_eff = self.effective_packing_fraction( - rho, xi, mode="effective", zetax=zetax - ) + zeta_eff = self.effective_packing_fraction(rho, xi, mode="effective", zetax=zetax) dg0HSdzetaeff = (2.5 - zeta_eff) / (1.0 - zeta_eff) ** 4 ncomp = len(xi) - dckl_coef = np.array( - [[-1.50349, 0.249434], [1.40049, -0.827739], [-15.0427, 5.30827]] - ) + dckl_coef = np.array([[-1.50349, 0.249434], [1.40049, -0.827739], [-15.0427, 5.30827]]) zetax_pow = np.transpose(np.array([zetax, zetax**2, zetax**3])) dzetaijdlambda = np.zeros((np.size(rho), ncomp, ncomp)) for i in range(ncomp): for j in range(ncomp): - cikl = np.dot( - dckl_coef, np.array([1.0, (2 * self.eos_dict["lambda_ij"][i, j])]) - ) + cikl = np.dot(dckl_coef, np.array([1.0, (2 * self.eos_dict["lambda_ij"][i, j])])) dzetaijdlambda[:, i, j] = np.dot(zetax_pow, cikl) dzetaijdzetax = self._dzetaeff_dzetax(rho, xi, zetax=zetax, mode="effective") @@ -770,10 +724,7 @@ def calc_gSW(self, rho, T, xi, zetax=None): ) gSW = gHS + self.eos_dict["epsilon_ij"][np.newaxis, :, :] / T * ( - g0HS - + (self.eos_dict["lambda_ij"][np.newaxis, :, :] ** 3 - 1.0) - * dg0HSdzetaeff - * dzetaeff + g0HS + (self.eos_dict["lambda_ij"][np.newaxis, :, :] ** 3 - 1.0) * dg0HSdzetaeff * dzetaeff ) return gSW @@ -806,9 +757,7 @@ def Achain(self, rho, T, xi): beadsum = -1.0 + self.eos_dict["num_rings"][i] for k in range(self.nbeads): beadsum += ( - self.eos_dict["molecular_composition"][i, k] - * self.eos_dict["Vks"][k] - * self.eos_dict["Sk"][k] + self.eos_dict["molecular_composition"][i, k] * self.eos_dict["Vks"][k] * self.eos_dict["Sk"][k] ) Achain -= xi[i] * beadsum * np.log(gii[:, i, i]) @@ -843,11 +792,7 @@ def density_max(self, xi, T, maxpack=0.65): max_density = ( maxpack * 6.0 - / ( - self.eos_dict["Cmol2seg"] - * np.pi - * np.sum(self.eos_dict["xskl"] * (self.eos_dict["sigma_kl"] ** 3)) - ) + / (self.eos_dict["Cmol2seg"] * np.pi * np.sum(self.eos_dict["xskl"] * (self.eos_dict["sigma_kl"] ** 3))) / constants.molecule_per_nm3 ) @@ -912,13 +857,9 @@ def calc_Kijklab(self, T, rc_klab, rd_klab=None, reduction_ratio=0.25): dij_bar = np.zeros((self.ncomp, self.ncomp)) for i in range(self.ncomp): for j in range(self.ncomp): - dij_bar[i, j] = np.mean( - [self.eos_dict["sigma_ij"][i], self.eos_dict["sigma_ij"][j]] - ) + dij_bar[i, j] = np.mean([self.eos_dict["sigma_ij"][i], self.eos_dict["sigma_ij"][j]]) - Kijklab = Aassoc.calc_bonding_volume( - rc_klab, dij_bar, rd_klab=rd_klab, reduction_ratio=reduction_ratio - ) + Kijklab = Aassoc.calc_bonding_volume(rc_klab, dij_bar, rd_klab=rd_klab, reduction_ratio=reduction_ratio) return Kijklab @@ -952,9 +893,7 @@ def parameter_refresh(self, bead_library, cross_library): self.bead_library.update(bead_library) self.cross_library.update(cross_library) - self.eos_dict["Sk"] = tb.extract_property( - "Sk", self.bead_library, self.beads, default=1.0 - ) + self.eos_dict["Sk"] = tb.extract_property("Sk", self.bead_library, self.beads, default=1.0) # Update Non bonded matrices output = tb.cross_interaction_from_dict( @@ -969,13 +908,11 @@ def parameter_refresh(self, bead_library, cross_library): self.calc_component_averaged_properties() if not np.any(np.isnan(self.xi)): - self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = ( - stb.calc_composition_dependent_variables( - self.xi, - self.eos_dict["molecular_composition"], - self.bead_library, - self.beads, - ) + self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = stb.calc_composition_dependent_variables( + self.xi, + self.eos_dict["molecular_composition"], + self.bead_library, + self.beads, ) self.alphakl = ( 2.0 @@ -1042,13 +979,11 @@ def _check_composition_dependent_parameters(self, xi): """ xi = np.array(xi) if not np.all(self.xi == xi): - self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = ( - stb.calc_composition_dependent_variables( - xi, - self.eos_dict["molecular_composition"], - self.bead_library, - self.beads, - ) + self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = stb.calc_composition_dependent_variables( + xi, + self.eos_dict["molecular_composition"], + self.bead_library, + self.beads, ) self.xi = xi diff --git a/despasito/equations_of_state/saft/saft.py b/despasito/equations_of_state/saft/saft.py index 8eab8ab..b9d00e3 100644 --- a/despasito/equations_of_state/saft/saft.py +++ b/despasito/equations_of_state/saft/saft.py @@ -187,9 +187,7 @@ class EosType(EosTemplate): """ - def __init__( - self, saft_name="gamma_mie", Aideal_method=None, combining_rules=None, **kwargs - ): + def __init__(self, saft_name="gamma_mie", Aideal_method=None, combining_rules=None, **kwargs): super().__init__(**kwargs) @@ -228,30 +226,20 @@ def __init__( else: self.eos_dict[key] = tmp except Exception: - raise ValueError( - "SAFT type, {}, is missing the variable {}.".format(saft_name, key) - ) + raise ValueError("SAFT type, {}, is missing the variable {}.".format(saft_name, key)) for res in self.eos_dict["residual_helmholtz_contributions"]: setattr(self, res, getattr(self.saft_source, res)) if Aideal_method is not None: - logger.info( - "Switching Aideal method from {} to {}.".format( - self.eos_dict["Aideal_method"], Aideal_method - ) - ) + logger.info("Switching Aideal method from {} to {}.".format(self.eos_dict["Aideal_method"], Aideal_method)) self.eos_dict["Aideal_method"] = Aideal_method # Extract needed values from kwargs needed_attributes = ["bead_library", "molecular_composition", "beads"] for key in needed_attributes: if key not in kwargs: - raise ValueError( - "The one of the following inputs is missing: {}".format( - ", ".join(tmp) - ) - ) + raise ValueError("The one of the following inputs is missing: {}".format(", ".join(tmp))) if key == "molecular_composition": self.eos_dict[key] = kwargs[key] @@ -273,10 +261,8 @@ def __init__( self.eos_dict["reduction_ratio"] = kwargs["reduction_ratio"] # Initiate association site terms - self.eos_dict["sitenames"], self.eos_dict["nk"], self.eos_dict["flag_assoc"] = ( - Aassoc.initiate_assoc_matrices( - self.beads, self.bead_library, self.eos_dict["molecular_composition"] - ) + self.eos_dict["sitenames"], self.eos_dict["nk"], self.eos_dict["flag_assoc"] = Aassoc.initiate_assoc_matrices( + self.beads, self.bead_library, self.eos_dict["molecular_composition"] ) assoc_output = Aassoc.calc_assoc_matrices( self.beads, @@ -322,9 +308,7 @@ def residual_helmholtz_energy(self, rho, T, xi): if len(xi) != self.number_of_components: raise ValueError( "Number of components in mole fraction list, {}, ".format(len(xi)) - + "doesn't match self.number_of_components, {}".format( - self.number_of_components - ) + + "doesn't match self.number_of_components, {}".format(self.number_of_components) ) rho = self._check_density(rho) @@ -364,16 +348,12 @@ def helmholtz_energy(self, rho, T, xi): if len(xi) != self.number_of_components: raise ValueError( "Number of components in mole fraction list, {}, ".format(len(xi)) - + "doesn't match self.number_of_components, {}".format( - self.number_of_components - ) + + "doesn't match self.number_of_components, {}".format(self.number_of_components) ) rho = self._check_density(rho) - A = self.residual_helmholtz_energy(rho, T, xi) + self.Aideal( - rho, T, xi, method=self.eos_dict["Aideal_method"] - ) + A = self.residual_helmholtz_energy(rho, T, xi) + self.Aideal(rho, T, xi, method=self.eos_dict["Aideal_method"]) return A @@ -405,16 +385,12 @@ def Aideal(self, rho, T, xi, method="Abroglie"): if len(xi) != self.number_of_components: raise ValueError( "Number of components in mole fraction list, {}, ".format(len(xi)) - + "doesn't match self.number_of_components, {}".format( - self.number_of_components - ) + + "doesn't match self.number_of_components, {}".format(self.number_of_components) ) rho = self._check_density(rho) - return Aideal.Aideal_contribution( - rho, T, xi, self.eos_dict["massi"], method=method - ) + return Aideal.Aideal_contribution(rho, T, xi, self.eos_dict["massi"], method=method) def Aassoc(self, rho, T, xi): r""" @@ -441,16 +417,12 @@ def Aassoc(self, rho, T, xi): if len(xi) != self.number_of_components: raise ValueError( "Number of components in mole fraction list, {}, ".format(len(xi)) - + "doesn't match self.number_of_components, {}".format( - self.number_of_components - ) + + "doesn't match self.number_of_components, {}".format(self.number_of_components) ) rho = self._check_density(rho) - indices = Aassoc.assoc_site_indices( - self.eos_dict["nk"], self.eos_dict["molecular_composition"], xi=xi - ) + indices = Aassoc.assoc_site_indices(self.eos_dict["nk"], self.eos_dict["molecular_composition"], xi=xi) # compute F_klab Fklab = np.exp(self.eos_dict["epsilonHB"] / T) - 1.0 @@ -461,9 +433,7 @@ def Aassoc(self, rho, T, xi): for key in keys: if key in self.eos_dict: opts[key] = self.eos_dict[key] - self.eos_dict["Kijklab"] = self.saft_source.calc_Kijklab( - T, self.eos_dict["rc_klab"], **opts - ) + self.eos_dict["Kijklab"] = self.saft_source.calc_Kijklab(T, self.eos_dict["rc_klab"], **opts) self.T = T Kklab = self.eos_dict["Kijklab"] Ktype = "ijklab" @@ -493,10 +463,7 @@ def Aassoc(self, rho, T, xi): if self.eos_dict["nk"][k, a] != 0.0: tmp = np.log(Xika[:, ind]) + ((1.0 - Xika[:, ind]) / 2.0) Assoc_contribution += ( - xi[i] - * self.eos_dict["molecular_composition"][i, k] - * self.eos_dict["nk"][k, a] - * tmp + xi[i] * self.eos_dict["molecular_composition"][i, k] * self.eos_dict["nk"][k, a] * tmp ) else: @@ -530,16 +497,12 @@ def pressure(self, rho, T, xi, step_size=1e-6): if len(xi) != self.number_of_components: raise ValueError( "Number of components in mole fraction list, {}, ".format(len(xi)) - + "doesn't match self.number_of_components, {}".format( - self.number_of_components - ) + + "doesn't match self.number_of_components, {}".format(self.number_of_components) ) # derivative of Aideal_broglie here wrt to rho is 1/rho rho = self._check_density(rho) - P_tmp = gtb.central_difference( - rho, self.helmholtz_energy, args=(T, xi), step_size=step_size, relative=True - ) + P_tmp = gtb.central_difference(rho, self.helmholtz_energy, args=(T, xi), step_size=step_size, relative=True) pressure = P_tmp * T * constants.R * rho**2 return pressure @@ -570,9 +533,7 @@ def fugacity_coefficient(self, P, rho, xi, T, dy=1e-5, log_method=True): if len(xi) != self.number_of_components: raise ValueError( "Number of components in mole fraction list, {}, ".format(len(xi)) - + "doesn't match self.number_of_components, {}".format( - self.number_of_components - ) + + "doesn't match self.number_of_components, {}".format(self.number_of_components) ) if gtb.isiterable(T): @@ -623,9 +584,7 @@ def density_max(self, xi, T, maxpack=0.65): if len(xi) != self.number_of_components: raise ValueError( "Number of components in mole fraction list, {}, ".format(len(xi)) - + "doesn't match self.number_of_components, {}".format( - self.number_of_components - ) + + "doesn't match self.number_of_components, {}".format(self.number_of_components) ) max_density = self.saft_source.density_max(xi, T, maxpack=maxpack) @@ -686,9 +645,7 @@ def update_parameter(self, param_name, bead_names, param_value): if len(parameter_list) > 1 and len(parameter_list[1:]) != 2: raise ValueError( "Sitenames should be two different sites in the list:" - + " {}. You gave: {}".format( - self.eos_dict["sitenames"], ", ".join(parameter_list[1:]) - ) + + " {}. You gave: {}".format(self.eos_dict["sitenames"], ", ".join(parameter_list[1:])) ) super().update_parameter(param_name, bead_names, param_value) diff --git a/despasito/equations_of_state/saft/saft_toolbox.py b/despasito/equations_of_state/saft/saft_toolbox.py index 3c7e67c..d31187d 100644 --- a/despasito/equations_of_state/saft/saft_toolbox.py +++ b/despasito/equations_of_state/saft/saft_toolbox.py @@ -51,9 +51,7 @@ def calc_hard_sphere_matricies(T, sigmakl, bead_library, beads, Cprefactor_funci nbeads = np.size(beads) dkk = np.zeros(nbeads) for i in np.arange(nbeads): - prefactor = Cprefactor_funcion( - bead_library[beads[i]]["lambdar"], bead_library[beads[i]]["lambdaa"] - ) + prefactor = Cprefactor_funcion(bead_library[beads[i]]["lambdar"], bead_library[beads[i]]["lambdaa"]) dkk[i] = calc_dkk( bead_library[beads[i]]["epsilon"], bead_library[beads[i]]["sigma"], @@ -97,9 +95,7 @@ def _dkk_int(r, Ce_kT, sigma, lambdar, lambdaa): Integrand used to calculate the hard sphere diameter """ - dkk_int_tmp = 1.0 - np.exp( - -Ce_kT * (np.power(sigma / r, lambdar) - np.power(sigma / r, lambdaa)) - ) + dkk_int_tmp = 1.0 - np.exp(-Ce_kT * (np.power(sigma / r, lambdar) - np.power(sigma / r, lambdaa))) return dkk_int_tmp @@ -228,9 +224,7 @@ def calc_dkk(epsilon, sigma, T, Cprefactor, lambdar, lambdaa=6.0): return dkk -def calc_composition_dependent_variables( - xi, molecular_composition, bead_library, beads -): +def calc_composition_dependent_variables(xi, molecular_composition, bead_library, beads): r""" Calculate the factor for converting molar density to bead density and molecular mole fractions to bead fractions. @@ -268,10 +262,7 @@ def calc_composition_dependent_variables( for i in range(np.size(xi)): for j in range(np.size(beads)): Cmol2seg += ( - xi[i] - * molecular_composition[i, j] - * bead_library[beads[j]]["Vks"] - * bead_library[beads[j]]["Sk"] + xi[i] * molecular_composition[i, j] * bead_library[beads[j]]["Vks"] * bead_library[beads[j]]["Sk"] ) # initialize variables and arrays @@ -279,11 +270,7 @@ def calc_composition_dependent_variables( xsk = np.zeros(nbeads, float) # compute xsk for k in range(nbeads): - xsk[k] = ( - np.sum(xi * molecular_composition[:, k]) - * bead_library[beads[k]]["Vks"] - * bead_library[beads[k]]["Sk"] - ) + xsk[k] = np.sum(xi * molecular_composition[:, k]) * bead_library[beads[k]]["Vks"] * bead_library[beads[k]]["Sk"] xsk /= Cmol2seg # calculate xskl matrix @@ -319,11 +306,7 @@ def calc_zetaxstar(rho, Cmol2seg, xskl, sigmakl): """ # compute zetaxstar eq. 35 - zetaxstar = ( - rho - * Cmol2seg - * ((np.pi / 6.0) * np.sum(xskl * (sigmakl**3 * constants.molecule_per_nm3))) - ) + zetaxstar = rho * Cmol2seg * ((np.pi / 6.0) * np.sum(xskl * (sigmakl**3 * constants.molecule_per_nm3))) return zetaxstar @@ -351,11 +334,7 @@ def calc_zetax(rho, Cmol2seg, xskl, dkl): Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l) """ - zetax = ( - rho - * Cmol2seg - * ((np.pi / 6.0) * np.sum(xskl * (dkl**3 * constants.molecule_per_nm3))) - ) + zetax = rho * Cmol2seg * ((np.pi / 6.0) * np.sum(xskl * (dkl**3 * constants.molecule_per_nm3))) return zetax @@ -377,8 +356,6 @@ def calc_KHS(zetax): (length of densities) isothermal compressibility of system with packing fraction zetax """ - KHS = ((1.0 - zetax) ** 4) / ( - 1.0 + (4.0 * zetax) + (4.0 * (zetax**2)) - (4.0 * (zetax**3)) + (zetax**4) - ) + KHS = ((1.0 - zetax) ** 4) / (1.0 + (4.0 * zetax) + (4.0 * (zetax**2)) - (4.0 * (zetax**3)) + (zetax**4)) return KHS diff --git a/despasito/equations_of_state/saft/saft_variant_example.py b/despasito/equations_of_state/saft/saft_variant_example.py index dcd1a53..e1377fc 100644 --- a/despasito/equations_of_state/saft/saft_variant_example.py +++ b/despasito/equations_of_state/saft/saft_variant_example.py @@ -151,11 +151,7 @@ def __init__(self, **kwargs): needed_attributes = ["molecular_composition", "beads", "bead_library"] for key in needed_attributes: if key not in kwargs: - raise ValueError( - "The one of the following inputs is missing: {}".format( - ", ".join(needed_attributes) - ) - ) + raise ValueError("The one of the following inputs is missing: {}".format(", ".join(needed_attributes))) elif key == "molecular_composition": if "molecular_composition" not in self.eos_dict: self.eos_dict[key] = kwargs[key] @@ -172,9 +168,7 @@ def __init__(self, **kwargs): "Sk": 1.0, "Vks": 1.0, } - self.bead_library = tb.check_bead_parameters( - self.bead_library, self._parameter_defaults - ) + self.bead_library = tb.check_bead_parameters(self.bead_library, self._parameter_defaults) if "cross_library" not in kwargs: self.cross_library = {} @@ -214,13 +208,9 @@ def __init__(self, **kwargs): # "eos_somekeyword" if "num_rings" in kwargs: self.eos_dict["num_rings"] = kwargs["num_rings"] - logger.info( - "Accepted component ring structure: {}".format(kwargs["num_rings"]) - ) + logger.info("Accepted component ring structure: {}".format(kwargs["num_rings"])) else: - self.eos_dict["num_rings"] = np.zeros( - len(self.eos_dict["molecular_composition"]) - ) + self.eos_dict["num_rings"] = np.zeros(len(self.eos_dict["molecular_composition"])) def Amonomer(self, rho, T, xi): r""" @@ -294,11 +284,7 @@ def density_max(self, xi, T, maxpack=0.65): max_density = ( maxpack * 6.0 - / ( - self.eos_dict["Cmol2seg"] - * np.pi - * np.sum(self.eos_dict["xskl"] * (self.eos_dict["sigma_kl"] ** 3)) - ) + / (self.eos_dict["Cmol2seg"] * np.pi * np.sum(self.eos_dict["xskl"] * (self.eos_dict["sigma_kl"] ** 3))) / constants.molecule_per_nm3 ) @@ -368,13 +354,9 @@ def calc_Kijklab(self, T, rc_klab, rd_klab=None, reduction_ratio=0.25): dij_bar = np.zeros((self.ncomp, self.ncomp)) for i in range(self.ncomp): for j in range(self.ncomp): - dij_bar[i, j] = np.mean( - [self.eos_dict["sigma_ij"][i], self.eos_dict["sigma_ij"][j]] - ) + dij_bar[i, j] = np.mean([self.eos_dict["sigma_ij"][i], self.eos_dict["sigma_ij"][j]]) - Kijklab = Aassoc.calc_bonding_volume( - rc_klab, dij_bar, rd_klab=rd_klab, reduction_ratio=reduction_ratio - ) + Kijklab = Aassoc.calc_bonding_volume(rc_klab, dij_bar, rd_klab=rd_klab, reduction_ratio=reduction_ratio) return Kijklab @@ -423,13 +405,11 @@ def parameter_refresh(self, bead_library, cross_library): self.calc_component_averaged_properties() if not np.any(np.isnan(self.xi)): - self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = ( - stb.calc_composition_dependent_variables( - self.xi, - self.eos_dict["molecular_composition"], - self.bead_library, - self.beads, - ) + self.eos_dict["Cmol2seg"], self.eos_dict["xskl"] = stb.calc_composition_dependent_variables( + self.xi, + self.eos_dict["molecular_composition"], + self.bead_library, + self.beads, ) self.alphakl = ( 2.0 diff --git a/despasito/examples/saft_gamma_mie/butane_solubility/fit_grid.py b/despasito/examples/saft_gamma_mie/butane_solubility/fit_grid.py index 9b571c3..52d2238 100644 --- a/despasito/examples/saft_gamma_mie/butane_solubility/fit_grid.py +++ b/despasito/examples/saft_gamma_mie/butane_solubility/fit_grid.py @@ -1,4 +1,3 @@ - import numpy as np import despasito @@ -17,20 +16,20 @@ fit.fit( optimization_parameters={ - 'fit_bead': 'CH3', - 'fit_parameter_names': ['epsilon_CH2'], - 'epsilon_CH2_bounds': [150.0, 600.0], - 'parameters_guess': [300.0] + "fit_bead": "CH3", + "fit_parameter_names": ["epsilon_CH2"], + "epsilon_CH2_bounds": [150.0, 600.0], + "parameters_guess": [300.0], }, exp_data={ - 'Knovel': { - 'data_class_type': 'liquid_density', + "Knovel": { + "data_class_type": "liquid_density", "eos_obj": Eos, - "calculation_type": 'liquid_properties', - "T": np.array([272.15, 323.15, 298.15]), - "rhol": np.array([10357. , 10364.8, 10140. ]), - "delta": np.array([14453., 13700., 14100.]), + "calculation_type": "liquid_properties", + "T": np.array([272.15, 323.15, 298.15]), + "rhol": np.array([10357.0, 10364.8, 10140.0]), + "delta": np.array([14453.0, 13700.0, 14100.0]), }, }, - global_opts={'method': 'grid_minimization', 'Ns': 3}, -) \ No newline at end of file + global_opts={"method": "grid_minimization", "Ns": 3}, +) diff --git a/despasito/input_output/read_input.py b/despasito/input_output/read_input.py index 203782b..9212e9b 100755 --- a/despasito/input_output/read_input.py +++ b/despasito/input_output/read_input.py @@ -101,16 +101,12 @@ def extract_calc_data(input_fname, path=".", **thermo_dict): # Make bead data dictionary for EOS # process input file if "bead_configuration" in input_dict: - beads, molecular_composition = process_bead_data( - input_dict["bead_configuration"] - ) + beads, molecular_composition = process_bead_data(input_dict["bead_configuration"]) eos_dict = {"beads": beads, "molecular_composition": molecular_composition} elif "optimization_parameters" in input_dict: eos_dict = {} else: - raise ValueError( - "Bead configuration line is missing for thermodynamic calculation." - ) + raise ValueError("Bead configuration line is missing for thermodynamic calculation.") # read EOS groups file eos_dict["bead_library"] = json_to_dict(input_dict["EOSgroup"]) @@ -320,19 +316,15 @@ def process_param_fit_inputs(thermo_dict): else: new_thermo_dict[key] = value - test1 = set(["exp_data", "optimization_parameters"]).issubset( - list(new_thermo_dict.keys()) - ) - test2 = set(["fit_bead", "fit_parameter_names"]).issubset( - list(new_thermo_dict["optimization_parameters"].keys()) - ) + test1 = set(["exp_data", "optimization_parameters"]).issubset(list(new_thermo_dict.keys())) + test2 = set(["fit_bead", "fit_parameter_names"]).issubset(list(new_thermo_dict["optimization_parameters"].keys())) if not all([test1, test2]): raise ValueError( "An exp_data dictionary (dictionary with 'data_class_type' key) as well as" " an optimization_parameters dictionary with 'fit_bead' and " "'fit_parameter_names' must be provided." ) - + return new_thermo_dict @@ -412,13 +404,9 @@ def process_exp_data_file(fname): """ try: - data = np.transpose( - np.genfromtxt(fname, delimiter=",", names=True, skip_header=1) - ) + data = np.transpose(np.genfromtxt(fname, delimiter=",", names=True, skip_header=1)) except Exception: - raise ValueError( - "Cannot import '{}', Check data file formatting.".format(fname) - ) + raise ValueError("Cannot import '{}', Check data file formatting.".format(fname)) file_dict = {name: data[name] for name in data.dtype.names} # Sort through properties diff --git a/despasito/input_output/write_output.py b/despasito/input_output/write_output.py index b3a12d1..9686dce 100644 --- a/despasito/input_output/write_output.py +++ b/despasito/input_output/write_output.py @@ -24,9 +24,7 @@ def write_EOSparameters(library, filename): # sort and write SAFT dict for i in library: - library[i] = collections.OrderedDict( - sorted(list(library[i].items()), key=lambda tup: tup[0].lower()) - ) + library[i] = collections.OrderedDict(sorted(list(library[i].items()), key=lambda tup: tup[0].lower())) f = open(filename, "w") json.dump(library, f, indent=4) @@ -119,8 +117,7 @@ def writeout_fit_dict(output_dict, output_file="fit_output.txt"): header = ( "DESPASITO was used to fit parameters for the bead {} Obj. Value:" - " {}\n".format(output_dict["fit_bead"], output_dict["objective_value"]) - + "Parameter, Value\n" + " {}\n".format(output_dict["fit_bead"], output_dict["objective_value"]) + "Parameter, Value\n" ) with open(output_file, "w") as f: f.write(header) diff --git a/despasito/main.py b/despasito/main.py index b2aae4d..51535ae 100644 --- a/despasito/main.py +++ b/despasito/main.py @@ -47,10 +47,7 @@ def get_parser(): "--verbose", action="count", default=0, - help=( - "Verbose level: repeat up to three times for Warning, Info, or Debug " - + "levels." - ), + help=("Verbose level: repeat up to three times for Warning, Info, or Debug " + "levels."), ) parser.add_argument( "--log", @@ -64,20 +61,14 @@ def get_parser(): "--ncores", dest="ncores", type=int, - help=( - "Set the number of cores used. A value of -1 will request all possible " - + "resources." - ), + help=("Set the number of cores used. A value of -1 will request all possible " + "resources."), default=1, ) parser.add_argument( "-p", "--path", default=".", - help=( - "Set the location of the data/library files (e.g. SAFTcross, etc.) where" - + " despasito will look." - ), + help=("Set the location of the data/library files (e.g. SAFTcross, etc.) where" + " despasito will look."), ) parser.add_argument( "-c", @@ -124,9 +115,7 @@ def run(filename="input.json", path=".", **kwargs): # read input file (need to add command line specification) logger.info("Begin processing input file: %s" % filename) - eos_dict, thermo_dict, output_file = read_input.extract_calc_data( - filename, path, **kwargs - ) + eos_dict, thermo_dict, output_file = read_input.extract_calc_data(filename, path, **kwargs) thermo_dict["MultiprocessingObject"] = kwargs["MultiprocessingObject"] @@ -160,9 +149,7 @@ def run(filename="input.json", path=".", **kwargs): output_dict.update( { "fit_bead": thermo_dict["optimization_parameters"]["fit_bead"], - "fit_parameter_names": thermo_dict["optimization_parameters"][ - "fit_parameter_names" - ], + "fit_parameter_names": thermo_dict["optimization_parameters"]["fit_parameter_names"], } ) logger.info("Finished parametrization") @@ -173,9 +160,7 @@ def run(filename="input.json", path=".", **kwargs): output_dict = thermo(Eos, **thermo_dict.copy()) logger.info("Finished thermodynamic calculation") try: - write_output.writeout_thermo_dict( - output_dict, thermo_dict["calculation_type"], **file_dict - ) + write_output.writeout_thermo_dict(output_dict, thermo_dict["calculation_type"], **file_dict) except Exception: logger.info("Final Output: {}".format(output_dict)) diff --git a/despasito/parameter_fitting/__init__.py b/despasito/parameter_fitting/__init__.py index 8dd922b..bfb82f9 100644 --- a/despasito/parameter_fitting/__init__.py +++ b/despasito/parameter_fitting/__init__.py @@ -26,7 +26,7 @@ def fit( global_opts={}, minimizer_opts=None, MultiprocessingObject=None, - **kwargs + **kwargs, ): r""" Fit parameters for an equation of state object with given experimental data. @@ -133,14 +133,12 @@ def fit( if not isinstance(optimization_parameters["fit_parameter_names"], list): if isinstance(optimization_parameters["fit_parameter_names"], str): - optimization_parameters["fit_parameter_names"] = [ - optimization_parameters["fit_parameter_names"] - ] + optimization_parameters["fit_parameter_names"] = [optimization_parameters["fit_parameter_names"]] else: raise ValueError( f"'fit_parameter_names' must be a list not: {optimization_parameters['fit_parameter_names']}" ) - + # Generate initial guess and bounds for parameters if none was given optimization_parameters = ff.consolidate_bounds(optimization_parameters).copy() if "bounds" in optimization_parameters: @@ -156,10 +154,7 @@ def fit( if "parameters_guess" in optimization_parameters: parameters_guess = optimization_parameters["parameters_guess"] if len(parameters_guess) != len(optimization_parameters["fit_parameter_names"]): - raise ValueError( - "The number of initial parameters given isn't the same number of " - "parameters to be fit." - ) + raise ValueError("The number of initial parameters given isn't the same number of " "parameters to be fit.") else: parameters_guess = ff.initial_guess(optimization_parameters, Eos) logger.info("Initial guess in parameters: {}".format(parameters_guess)) @@ -175,9 +170,7 @@ def fit( for key, data_dict in exp_data.items(): fittype = data_dict["data_class_type"] try: - exp_module = import_module( - "." + fittype, package="despasito.parameter_fitting.data_classes" - ) + exp_module = import_module("." + fittype, package="despasito.parameter_fitting.data_classes") data_class = getattr(exp_module, "Data") except Exception: if not type_list: @@ -196,9 +189,7 @@ def fit( exp_dict[key] = instance logger.info("Initiated exp. data object: {}".format(instance.name)) except Exception: - raise AttributeError( - "Data set, {}, did not properly initiate object".format(key) - ) + raise AttributeError("Data set, {}, did not properly initiate object".format(key)) # Check global optimization method if "method" in dicts["global_opts"]: @@ -216,7 +207,7 @@ def fit( optimization_parameters["fit_bead"], optimization_parameters["fit_parameter_names"], exp_dict, - **dicts + **dicts, ) logger.info("Fitting terminated:\n{}".format(result.message)) diff --git a/despasito/parameter_fitting/data_classes/TLVE.py b/despasito/parameter_fitting/data_classes/TLVE.py index 720b832..808c4bd 100644 --- a/despasito/parameter_fitting/data_classes/TLVE.py +++ b/despasito/parameter_fitting/data_classes/TLVE.py @@ -132,36 +132,23 @@ def __init__(self, data_dict): ) ) - self.weights.update( - gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints) - ) + self.weights.update(gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints)) self.weights.update(gtb.set_defaults(self.weights, self.result_keys, 1.0)) if "Tlist" not in self.thermodict: - raise ImportError( - "Given TLVE data, values for T should have been provided." - ) + raise ImportError("Given TLVE data, values for T should have been provided.") thermo_keys = ["xilist", "yilist", "Plist"] if not any([key in self.thermodict for key in thermo_keys]): - raise ImportError( - "Given TLVE data, mole fractions and/or pressure should have been " - "provided." - ) + raise ImportError("Given TLVE data, mole fractions and/or pressure should have been " "provided.") if self.thermodict["calculation_type"] is None: if self.thermodict["xilist"]: self.thermodict["calculation_type"] = "bubble_pressure" - logger.warning( - "No calculation type has been provided. Assume a calculation type " - "of bubble_pressure" - ) + logger.warning("No calculation type has been provided. Assume a calculation type " "of bubble_pressure") elif self.thermodict["yilist"]: self.thermodict["calculation_type"] = "dew_pressure" - logger.warning( - "No calculation type has been provided. Assume a calculation type" - " of dew_pressure" - ) + logger.warning("No calculation type has been provided. Assume a calculation type" " of dew_pressure") else: raise ValueError("Unknown calculation instructions") @@ -234,10 +221,7 @@ def objective(self): if "Plist" in self.thermodict: obj_value[0] = ff.obj_function_form( - phase_list[0], - self.thermodict["Plist"], - weights=self.weights["Plist"], - **self.obj_opts + phase_list[0], self.thermodict["Plist"], weights=self.weights["Plist"], **self.obj_opts ) if self.thermodict["calculation_type"] == "bubble_pressure": @@ -246,10 +230,7 @@ def objective(self): obj_value[1] = 0 for i in range(len(yi)): obj_value[1] += ff.obj_function_form( - phase_list[1 + i], - yi[i], - weights=self.weights["yilist"], - **self.obj_opts + phase_list[1 + i], yi[i], weights=self.weights["yilist"], **self.obj_opts ) elif self.thermodict["calculation_type"] == "dew_pressure": if "xilist" in self.thermodict: @@ -257,17 +238,10 @@ def objective(self): obj_value[1] = 0 for i in range(len(xi)): obj_value[1] += ff.obj_function_form( - phase_list[1 + i], - xi[i], - weights=self.weights["xilist"], - **self.obj_opts + phase_list[1 + i], xi[i], weights=self.weights["xilist"], **self.obj_opts ) - logger.info( - "Obj. breakdown for {}: P {}, zi {}".format( - self.name, obj_value[0], obj_value[1] - ) - ) + logger.info("Obj. breakdown for {}: P {}, zi {}".format(self.name, obj_value[0], obj_value[1])) if all([(np.isnan(x) or x == 0.0) for x in obj_value]): obj_total = np.inf diff --git a/despasito/parameter_fitting/data_classes/flash.py b/despasito/parameter_fitting/data_classes/flash.py index 024adf3..d0bc8bb 100644 --- a/despasito/parameter_fitting/data_classes/flash.py +++ b/despasito/parameter_fitting/data_classes/flash.py @@ -127,26 +127,16 @@ def __init__(self, data_dict): self.npoints = np.size(self.thermodict["Tlist"]) thermo_defaults = [constants.standard_pressure, constants.standard_temperature] - self.thermodict.update( - gtb.set_defaults( - self.thermodict, thermo_keys, thermo_defaults, lx=self.npoints - ) - ) + self.thermodict.update(gtb.set_defaults(self.thermodict, thermo_keys, thermo_defaults, lx=self.npoints)) - self.weights.update( - gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints) - ) + self.weights.update(gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints)) self.weights.update(gtb.set_defaults(self.weights, self.result_keys, 1.0)) if "Plist" not in self.thermodict and "Tlist" not in self.thermodict: - raise ImportError( - "Given flash data, values for P and T should have been provided." - ) + raise ImportError("Given flash data, values for P and T should have been provided.") if "xilist" not in self.thermodict or "yilist" not in self.thermodict: - raise ImportError( - "Given flash data, mole fractions should have been provided." - ) + raise ImportError("Given flash data, mole fractions should have been provided.") logger.info( "Data type 'flash' initiated with calculation_type, {}, and data " @@ -205,10 +195,7 @@ def objective(self): obj_value[0] = 0 for i in range(len(yi)): obj_value[0] += ff.obj_function_form( - phase_list[i], - yi[i], - weights=self.weights["yilist"], - **self.obj_opts + phase_list[i], yi[i], weights=self.weights["yilist"], **self.obj_opts ) if "xilist" in self.thermodict: @@ -219,14 +206,10 @@ def objective(self): phase_list[self.Eos.number_of_components + i], xi[i], weights=self.weights["xilist"], - **self.obj_opts + **self.obj_opts, ) - logger.info( - "Obj. breakdown for {}: xi {}, yi {}".format( - self.name, obj_value[0], obj_value[1] - ) - ) + logger.info("Obj. breakdown for {}: xi {}, yi {}".format(self.name, obj_value[0], obj_value[1])) if all([(np.isnan(x) or x == 0.0) for x in obj_value]): obj_total = np.inf diff --git a/despasito/parameter_fitting/data_classes/liquid_density.py b/despasito/parameter_fitting/data_classes/liquid_density.py index 31408e0..cdba8b5 100644 --- a/despasito/parameter_fitting/data_classes/liquid_density.py +++ b/despasito/parameter_fitting/data_classes/liquid_density.py @@ -128,22 +128,13 @@ def __init__(self, data_dict): np.array([[1.0] for x in range(self.npoints)]), constants.standard_temperature, ] - self.thermodict.update( - gtb.set_defaults( - self.thermodict, thermo_keys, thermo_defaults, lx=self.npoints - ) - ) + self.thermodict.update(gtb.set_defaults(self.thermodict, thermo_keys, thermo_defaults, lx=self.npoints)) - self.weights.update( - gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints) - ) + self.weights.update(gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints)) self.weights.update(gtb.set_defaults(self.weights, self.result_keys, 1.0)) if "Tlist" not in self.thermodict and "rhol" not in self.thermodict: - raise ImportError( - "Given liquid property data, values for T, xi, and rhol should have" - " been provided." - ) + raise ImportError("Given liquid property data, values for T, xi, and rhol should have" " been provided.") logger.info( "Data type 'liquid_properties' initiated with calculation_type, {}, and " @@ -197,10 +188,7 @@ def objective(self): # objective function obj_value = ff.obj_function_form( - phase_list, - self.thermodict["rhol"], - weights=self.weights["rhol"], - **self.obj_opts + phase_list, self.thermodict["rhol"], weights=self.weights["rhol"], **self.obj_opts ) logger.info("Obj. breakdown for {}: rhol {}".format(self.name, obj_value)) diff --git a/despasito/parameter_fitting/data_classes/saturation_properties.py b/despasito/parameter_fitting/data_classes/saturation_properties.py index eb2f25e..049be69 100644 --- a/despasito/parameter_fitting/data_classes/saturation_properties.py +++ b/despasito/parameter_fitting/data_classes/saturation_properties.py @@ -120,9 +120,7 @@ def __init__(self, data_dict): self.thermodict["Psat"] = data_dict["P"] del data_dict["P"] if "P" in self.weights: - if gtb.isiterable(self.weights["P"]) and len(self.weights["P"]) != len( - self.thermodict["Psat"] - ): + if gtb.isiterable(self.weights["P"]) and len(self.weights["P"]) != len(self.thermodict["Psat"]): raise ValueError( "Array of weights for '{}' values not equal to number of " "experimental values given.".format("P") @@ -144,35 +142,23 @@ def __init__(self, data_dict): if "xilist" not in self.thermodict and self.Eos.number_of_components > 1: raise ValueError( - "Ambiguous instructions. Include xi to define intended component to " - "obtain saturation properties" + "Ambiguous instructions. Include xi to define intended component to " "obtain saturation properties" ) thermo_defaults = [ np.array([[1.0] for x in range(self.npoints)]), constants.standard_temperature, ] - self.thermodict.update( - gtb.set_defaults( - self.thermodict, thermo_keys, thermo_defaults, lx=self.npoints - ) - ) + self.thermodict.update(gtb.set_defaults(self.thermodict, thermo_keys, thermo_defaults, lx=self.npoints)) - self.weights.update( - gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints) - ) + self.weights.update(gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints)) self.weights.update(gtb.set_defaults(self.weights, self.result_keys, 1.0)) if "Tlist" not in self.thermodict: - raise ImportError( - "Given saturation data, value(s) for T should have been provided." - ) + raise ImportError("Given saturation data, value(s) for T should have been provided.") tmp = ["Psat", "rhol", "rhov"] if not any([x in self.thermodict for x in tmp]): - raise ImportError( - "Given saturation data, values for Psat, rhol, and/or rhov should have " - "been provided." - ) + raise ImportError("Given saturation data, values for Psat, rhol, and/or rhov should have " "been provided.") logger.info( "Data type 'saturation_properties' initiated with calculation_type, {}, and" @@ -230,24 +216,15 @@ def objective(self): obj_value = np.zeros(3) if "Psat" in self.thermodict: obj_value[0] = ff.obj_function_form( - phase_list[0], - self.thermodict["Psat"], - weights=self.weights["Psat"], - **self.obj_opts + phase_list[0], self.thermodict["Psat"], weights=self.weights["Psat"], **self.obj_opts ) if "rhol" in self.thermodict: obj_value[1] = ff.obj_function_form( - phase_list[1], - self.thermodict["rhol"], - weights=self.weights["rhol"], - **self.obj_opts + phase_list[1], self.thermodict["rhol"], weights=self.weights["rhol"], **self.obj_opts ) if "rhov" in self.thermodict: obj_value[2] = ff.obj_function_form( - phase_list[2], - self.thermodict["rhov"], - weights=self.weights["rhov"], - **self.obj_opts + phase_list[2], self.thermodict["rhov"], weights=self.weights["rhov"], **self.obj_opts ) logger.info( diff --git a/despasito/parameter_fitting/data_classes/solubility_parameter.py b/despasito/parameter_fitting/data_classes/solubility_parameter.py index 1ca1596..3043ba4 100644 --- a/despasito/parameter_fitting/data_classes/solubility_parameter.py +++ b/despasito/parameter_fitting/data_classes/solubility_parameter.py @@ -126,22 +126,13 @@ def __init__(self, data_dict): np.array([[1.0] for x in range(self.npoints)]), constants.standard_temperature, ] - self.thermodict.update( - gtb.set_defaults( - self.thermodict, thermo_keys, thermo_defaults, lx=self.npoints - ) - ) + self.thermodict.update(gtb.set_defaults(self.thermodict, thermo_keys, thermo_defaults, lx=self.npoints)) - self.weights.update( - gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints) - ) + self.weights.update(gtb.check_length_dict(self.weights, self.result_keys, lx=self.npoints)) self.weights.update(gtb.set_defaults(self.weights, self.result_keys, 1.0)) if "Tlist" not in self.thermodict and "delta" not in self.thermodict: - raise ImportError( - "Given solubility data, value(s) for T and delta should have been " - "provided." - ) + raise ImportError("Given solubility data, value(s) for T and delta should have been " "provided.") logger.info( "Data type 'solubility parameter' initiated with calculation_type, {}, and" @@ -199,24 +190,14 @@ def objective(self): obj_value = np.zeros(2) if "delta" in self.thermodict: obj_value[0] = ff.obj_function_form( - phase_list[0], - self.thermodict["delta"], - weights=self.weights["delta"], - **self.obj_opts + phase_list[0], self.thermodict["delta"], weights=self.weights["delta"], **self.obj_opts ) if "rhol" in self.thermodict: obj_value[1] = ff.obj_function_form( - phase_list[1], - self.thermodict["rhol"], - weights=self.weights["rhol"], - **self.obj_opts + phase_list[1], self.thermodict["rhol"], weights=self.weights["rhol"], **self.obj_opts ) - logger.info( - "Obj. breakdown for {}: delta {}, rhol {}".format( - self.name, obj_value[0], obj_value[1] - ) - ) + logger.info("Obj. breakdown for {}: delta {}, rhol {}".format(self.name, obj_value[0], obj_value[1])) if all([(np.isnan(x) or x == 0.0) for x in obj_value]): obj_total = np.inf diff --git a/despasito/parameter_fitting/fit_functions.py b/despasito/parameter_fitting/fit_functions.py index 071036e..f04f4e4 100644 --- a/despasito/parameter_fitting/fit_functions.py +++ b/despasito/parameter_fitting/fit_functions.py @@ -102,15 +102,11 @@ def check_parameter_bounds(optimization_parameters, Eos, bounds): """ - new_bounds = [ - (0, 0) for x in range(len(optimization_parameters["fit_parameter_names"])) - ] + new_bounds = [(0, 0) for x in range(len(optimization_parameters["fit_parameter_names"]))] # Check boundary parameters to be sure they're in a reasonable range for i, param in enumerate(optimization_parameters["fit_parameter_names"]): fit_parameter_names_list = param.split("_") - new_bounds[i] = tuple( - Eos.check_bounds(fit_parameter_names_list[0], param, bounds[i]) - ) + new_bounds[i] = tuple(Eos.check_bounds(fit_parameter_names_list[0], param, bounds[i])) return new_bounds @@ -176,9 +172,7 @@ def consolidate_bounds(optimization_parameters): if "bounds" in key2: tmp = key2.replace("_bounds", "") if tmp in optimization_parameters["fit_parameter_names"]: - logger.info( - "Accepted bounds for parameter, '{}': {}".format(tmp, value2) - ) + logger.info("Accepted bounds for parameter, '{}': {}".format(tmp, value2)) ind = optimization_parameters["fit_parameter_names"].index(tmp) new_optimization_parameters["bounds"][ind] = value2 else: @@ -313,20 +307,14 @@ def global_minimization(global_method, *args, **kwargs): """ logger.info("Using global optimization method: {}".format(global_method)) - calc_list = [ - o[0] - for o in getmembers(global_methods_mod) - if (isfunction(o[1]) and o[0][0] != "_") - ] + calc_list = [o[0] for o in getmembers(global_methods_mod) if (isfunction(o[1]) and o[0][0] != "_")] try: func = getattr(global_methods_mod, global_method) except Exception: raise ImportError( "The global minimization type, '{}',".format(global_method) + " was not found\nThe following " - + "calculation types are supported: {}".format( - ", ".join(calc_list) - ) + + "calculation types are supported: {}".format(", ".join(calc_list)) ) output = func(*args, **kwargs) @@ -367,11 +355,7 @@ def initialize_constraints(constraints, constraint_type): """ - calc_list = [ - o[0] - for o in getmembers(constraints_mod) - if (isfunction(o[1]) and o[0][0] != "_") - ] + calc_list = [o[0] for o in getmembers(constraints_mod) if (isfunction(o[1]) and o[0][0] != "_")] new_constraints = [] for const_type, kwargs in constraints.items(): @@ -389,11 +373,7 @@ def initialize_constraints(constraints, constraint_type): ) if "args" not in kwargs: - raise ValueError( - "Constraint function, {}, is missing arguments".format( - kwargs["function"] - ) - ) + raise ValueError("Constraint function, {}, is missing arguments".format(kwargs["function"])) if constraint_type == "class": if "type" not in kwargs or kwargs["type"] in ["linear", "nonlinear"]: @@ -403,28 +383,19 @@ def initialize_constraints(constraints, constraint_type): ) if kwargs["type"] == "linear": if "kwargs" not in kwargs: - output = LinearConstraint( - func, kwargs["args"][0], kwargs["args"][1] - ) + output = LinearConstraint(func, kwargs["args"][0], kwargs["args"][1]) else: - output = LinearConstraint( - func, kwargs["args"][0], kwargs["args"][1], **kwargs - ) + output = LinearConstraint(func, kwargs["args"][0], kwargs["args"][1], **kwargs) elif kwargs["type"] == "nonlinear": if "kwargs" not in kwargs: - output = NonlinearConstraint( - func, kwargs["args"][0], kwargs["args"][1] - ) + output = NonlinearConstraint(func, kwargs["args"][0], kwargs["args"][1]) else: - output = NonlinearConstraint( - func, kwargs["args"][0], kwargs["args"][1], **kwargs - ) + output = NonlinearConstraint(func, kwargs["args"][0], kwargs["args"][1], **kwargs) elif constraint_type == "dict": if "type" not in kwargs or kwargs["type"] in ["eq", "ineq"]: raise ValueError( - "Constraint, {}, does not".format(kwargs["function"]) - + " have type. Type can be 'eq' or 'ineq'." + "Constraint, {}, does not".format(kwargs["function"]) + " have type. Type can be 'eq' or 'ineq'." ) output = {"type": kwargs["type"], "function": func, "args": kwargs["args"]} else: @@ -435,9 +406,7 @@ def initialize_constraints(constraints, constraint_type): return tuple(new_constraints) -def compute_obj( - beadparams, fit_bead, fit_parameter_names, exp_dict, bounds, frozen_parameters=None -): +def compute_obj(beadparams, fit_bead, fit_parameter_names, exp_dict, bounds, frozen_parameters=None): r""" Fit defined parameters for equation of state object with given experimental data. @@ -483,8 +452,7 @@ def compute_obj( beadparams = np.array(list(frozen_parameters) + list(beadparams)) else: raise ValueError( - "The length of initial guess vector should be the same number of " - + "parameters to be fit." + "The length of initial guess vector should be the same number of " + "parameters to be fit." ) logger.info( @@ -501,11 +469,7 @@ def compute_obj( data_obj.update_parameters(fit_bead, fit_parameter_names, beadparams) obj_function.append(data_obj.objective()) except Exception: - logger.exception( - "Failed to evaluate objective function for {} of type {}.".format( - key, data_obj.name - ) - ) + logger.exception("Failed to evaluate objective function for {} of type {}.".format(key, data_obj.name)) obj_function.append(np.inf) obj_total = np.nansum(obj_function) @@ -513,17 +477,11 @@ def compute_obj( # Add penalty for being out of bounds for the sake of inner minimization for i, param in enumerate(beadparams): if param < bounds[i][0]: - logger.debug( - "Adding penalty to {} parameter for being lower than range".format( - fit_parameter_names[i] - ) - ) + logger.debug("Adding penalty to {} parameter for being lower than range".format(fit_parameter_names[i])) obj_total += (1e3 * (param - bounds[i][0])) ** 8 elif param > bounds[i][1]: logger.debug( - "Adding penalty to {} parameter for being higher than range".format( - fit_parameter_names[i] - ) + "Adding penalty to {} parameter for being higher than range".format(fit_parameter_names[i]) ) obj_total += (1e3 * (param - bounds[i][1])) ** 8 else: @@ -624,11 +582,7 @@ def obj_function_form( ) if gtb.isiterable(weights): weight_tmp = np.array( - [ - weights[i] - for i in range(len(data_test)) - if not np.isnan((data_test[i] - data0[i]) / data0[i]) - ] + [weights[i] for i in range(len(data_test)) if not np.isnan((data_test[i] - data0[i]) / data0[i])] ) else: weight_tmp = weights @@ -641,15 +595,11 @@ def obj_function_form( elif method == "sum-squared-deviation-boltz": data_min = np.min(data_tmp) - obj_value = np.sum( - data_tmp**2 * weight_tmp * np.exp((data_min - data_tmp) / np.abs(data_min)) - ) + obj_value = np.sum(data_tmp**2 * weight_tmp * np.exp((data_min - data_tmp) / np.abs(data_min))) elif method == "sum-deviation-boltz": data_min = np.min(data_tmp) - obj_value = np.sum( - data_tmp * weight_tmp * np.exp((data_min - data_tmp) / np.abs(data_min)) - ) + obj_value = np.sum(data_tmp * weight_tmp * np.exp((data_min - data_tmp) / np.abs(data_min))) elif method == "percent-absolute-average-deviation": obj_value = np.mean(np.abs(data_tmp) * weight_tmp) * 100 diff --git a/despasito/parameter_fitting/global_methods.py b/despasito/parameter_fitting/global_methods.py index e18d9d9..7718461 100644 --- a/despasito/parameter_fitting/global_methods.py +++ b/despasito/parameter_fitting/global_methods.py @@ -17,9 +17,7 @@ logger = logging.getLogger(__name__) -def single_objective( - parameters_guess, bounds, fit_bead, fit_parameter_names, exp_dict, global_opts={} -): +def single_objective(parameters_guess, bounds, fit_bead, fit_parameter_names, exp_dict, global_opts={}): r""" Evaluate parameter set for equation of state with given experimental data @@ -52,13 +50,9 @@ def single_objective( """ if len(global_opts) > 0: - logger.info( - "The fitting method 'single_objective' does not have further options" - ) + logger.info("The fitting method 'single_objective' does not have further options") - obj_value = ff.compute_obj( - parameters_guess, fit_bead, fit_parameter_names, exp_dict, bounds - ) + obj_value = ff.compute_obj(parameters_guess, fit_bead, fit_parameter_names, exp_dict, bounds) result = spo.OptimizeResult( x=parameters_guess, @@ -141,14 +135,9 @@ def differential_evolution( else: filename = None - if ( - "write_intermediate_file" in global_opts - and global_opts["write_intermediate_file"] - ): + if "write_intermediate_file" in global_opts and global_opts["write_intermediate_file"]: del global_opts["write_intermediate_file"] - global_opts["callback"] = _WriteParameterResults( - fit_parameter_names, obj_cut=obj_cut, filename=filename - ) + global_opts["callback"] = _WriteParameterResults(fit_parameter_names, obj_cut=obj_cut, filename=filename) # Options for differential evolution, set defaults in new_global_opts new_global_opts = {"init": "random"} @@ -157,11 +146,7 @@ def differential_evolution( if key == "MultiprocessingObject": flag_workers = "workers" in global_opts and global_opts["workers"] > 1 if value.ncores > 1 and flag_workers: - logger.info( - "Differential Evolution algorithm is using {} workers.".format( - value.ncores - ) - ) + logger.info("Differential Evolution algorithm is using {} workers.".format(value.ncores)) new_global_opts["workers"] = value._pool.map exp_dict = _del_Data_MultiprocessingObject(exp_dict) elif key not in obj_kwargs: @@ -173,10 +158,7 @@ def differential_evolution( logger.info("Differential Evolution Options: {}".format(global_opts)) result = spo.differential_evolution( - ff.compute_obj, - bounds, - args=(fit_bead, fit_parameter_names, exp_dict, bounds), - **global_opts + ff.compute_obj, bounds, args=(fit_bead, fit_parameter_names, exp_dict, bounds), **global_opts ) return result @@ -278,7 +260,7 @@ def shgo( bounds, args=(fit_bead, fit_parameter_names, exp_dict, bounds), minimizer_kwargs=minimizer_opts, - **global_opts + **global_opts, ) return result @@ -354,11 +336,7 @@ def grid_minimization( for key, value in global_opts.items(): if key == "MultiprocessingObject": if value.ncores > 1: - logger.info( - "Grid minimization algorithm is using {} workers.".format( - value.ncores - ) - ) + logger.info("Grid minimization algorithm is using {} workers.".format(value.ncores)) new_global_opts["MultiprocessingObject"] = value flag_use_mp_object = True exp_dict = _del_Data_MultiprocessingObject(exp_dict) @@ -396,13 +374,9 @@ def grid_minimization( for x0 in x0_array: tmp1 = x0[global_opts["split_grid_minimization"]:] tmp2 = x0[: global_opts["split_grid_minimization"]] - inputs.append( - (tmp1, (*args, tmp2), bounds, constraints, minimizer_opts) - ) + inputs.append((tmp1, (*args, tmp2), bounds, constraints, minimizer_opts)) else: - inputs = [ - (x0, args, bounds, constraints, minimizer_opts) for x0 in x0_array - ] + inputs = [(x0, args, bounds, constraints, minimizer_opts) for x0 in x0_array] else: # Initialization based on implementation in scipy.optimize.brute @@ -435,28 +409,17 @@ def grid_minimization( x0_array = np.reshape(x0_array, (inpt_shape[0], np.prod(inpt_shape[1:]))).T if global_opts["split_grid_minimization"] != 0: - min_parameters = list( - parameters_guess[global_opts["split_grid_minimization"]:] - ) - inputs = [ - (min_parameters, (*args, x0), bounds, constraints, minimizer_opts) - for x0 in x0_array - ] + min_parameters = list(parameters_guess[global_opts["split_grid_minimization"]:]) + inputs = [(min_parameters, (*args, x0), bounds, constraints, minimizer_opts) for x0 in x0_array] else: - inputs = [ - (x0, args, bounds, constraints, minimizer_opts) for x0 in x0_array - ] + inputs = [(x0, args, bounds, constraints, minimizer_opts) for x0 in x0_array] lx = len(x0_array) # Start computation if flag_use_mp_object: - x0, results, fval = global_opts["MultiprocessingObject"].pool_job( - _grid_minimization_wrapper, inputs - ) + x0, results, fval = global_opts["MultiprocessingObject"].pool_job(_grid_minimization_wrapper, inputs) else: - x0, results, fval = MultiprocessingJob.serial_job( - _grid_minimization_wrapper, inputs - ) + x0, results, fval = MultiprocessingJob.serial_job(_grid_minimization_wrapper, inputs) # Choose final output if global_opts["split_grid_minimization"] != 0: @@ -469,8 +432,7 @@ def grid_minimization( results_new[i] = np.array(list(x0_array[i]) + list(results[i])) else: results_new[i] = np.array( - list(x0_array[i][: global_opts["split_grid_minimization"]]) - + list(results[i]) + list(x0_array[i][: global_opts["split_grid_minimization"]]) + list(results[i]) ) results = results_new if "initial_guesses" not in global_opts: @@ -499,9 +461,7 @@ def grid_minimization( return result -def brute( - parameters_guess, bounds, fit_bead, fit_parameter_names, exp_dict, global_opts={} -): +def brute(parameters_guess, bounds, fit_bead, fit_parameter_names, exp_dict, global_opts={}): r""" Fit defined parameters for equation of state object using scipy.optimize.brute with given experimental data. @@ -544,9 +504,7 @@ def brute( if key == "MultiprocessingObject": flag_workers = "workers" in global_opts and global_opts["workers"] > 1 if value.ncores > 1 and flag_workers: - logger.info( - "Brute algorithm is using {} workers.".format(value.ncores) - ) + logger.info("Brute algorithm is using {} workers.".format(value.ncores)) new_global_opts["workers"] = value._pool.map exp_dict = _del_Data_MultiprocessingObject(exp_dict) else: @@ -556,10 +514,7 @@ def brute( logger.info("Brute Options: {}".format(global_opts)) x0, fval, grid, Jount = spo.brute( - ff.compute_obj, - bounds, - args=(fit_bead, fit_parameter_names, exp_dict, bounds), - **global_opts + ff.compute_obj, bounds, args=(fit_bead, fit_parameter_names, exp_dict, bounds), **global_opts ) result = spo.OptimizeResult( x=x0, @@ -656,14 +611,9 @@ def basinhopping( else: filename = None - if ( - "write_intermediate_file" in global_opts - and global_opts["write_intermediate_file"] - ): + if "write_intermediate_file" in global_opts and global_opts["write_intermediate_file"]: del global_opts["write_intermediate_file"] - global_opts["callback"] = _WriteParameterResults( - fit_parameter_names, obj_cut=obj_cut, filename=filename - ) + global_opts["callback"] = _WriteParameterResults(fit_parameter_names, obj_cut=obj_cut, filename=filename) # Options for basin hopping new_global_opts = {"niter": 10, "T": 0.5, "niter_success": 3} @@ -704,11 +654,7 @@ def basinhopping( minimizer_kwargs.update(global_opts["minimizer_kwargs"]) del global_opts["minimizer_kwargs"] result = spo.basinhopping( - ff.compute_obj, - parameters_guess, - **global_opts, - accept_test=custombounds, - minimizer_kwargs=minimizer_kwargs + ff.compute_obj, parameters_guess, **global_opts, accept_test=custombounds, minimizer_kwargs=minimizer_kwargs ) return result @@ -721,9 +667,7 @@ def _grid_minimization_wrapper(args): x0, obj_args, bounds, constraints, opts = args if constraints is not None: - logger.warning( - "Constraints defined, but grid_minimization does not support their use." - ) + logger.warning("Constraints defined, but grid_minimization does not support their use.") opts = opts.copy() if "method" in opts: @@ -860,17 +804,9 @@ def __call__(self, **kwargs): feasible2 = not np.isnan(kwargs["f_new"]) if tmax and tmin and feasible1 and feasible2: - logger.info( - "Accept parameters: {}, with obj. function: {}".format( - x, kwargs["f_new"] - ) - ) + logger.info("Accept parameters: {}, with obj. function: {}".format(x, kwargs["f_new"])) else: - logger.info( - "Reject parameters: {}, with obj. function: {}".format( - x, kwargs["f_new"] - ) - ) + logger.info("Reject parameters: {}, with obj. function: {}".format(x, kwargs["f_new"])) return tmax and tmin and feasible1 and feasible2 @@ -909,11 +845,7 @@ def __init__(self, beadnames, obj_cut=None, filename=None): for i in range(20): filename = "{}_{}".format(i, old_fname) if not os.path.isfile(filename): - logger.info( - "File '{}' already exists, using {}.".format( - old_fname, filename - ) - ) + logger.info("File '{}' already exists, using {}.".format(old_fname, filename)) break self.beadnames = beadnames @@ -957,9 +889,7 @@ def __call__(self, *args, **kwargs): if kwargs["convergence"] < self.obj_cut: if not os.path.isfile(self.filename): with open(self.filename, "w") as f: - f.write( - "# n, convergence, {}\n".format(", ".join(self.beadnames)) - ) + f.write("# n, convergence, {}\n".format(", ".join(self.beadnames))) with open(self.filename, "a") as f: tmp = [self.ninit, kwargs["convergence"]] + list(args[0]) @@ -969,11 +899,7 @@ def __call__(self, *args, **kwargs): if args[2] or args[1] < self.obj_cut: if not os.path.isfile(self.filename): with open(self.filename, "w") as f: - f.write( - "# n, obj. value, accepted, {}\n".format( - ", ".join(self.beadnames) - ) - ) + f.write("# n, obj. value, accepted, {}\n".format(", ".join(self.beadnames))) with open(self.filename, "a") as f: tmp = [self.ninit, args[1], args[2]] + list(args[0]) diff --git a/despasito/parameter_fitting/interface.py b/despasito/parameter_fitting/interface.py index b53fd1b..ab26b25 100644 --- a/despasito/parameter_fitting/interface.py +++ b/despasito/parameter_fitting/interface.py @@ -120,9 +120,7 @@ def update_parameters(self, fit_bead, param_names, param_values): bead_names.append(fit_parameter_names_list[1]) if len(fit_parameter_names_list) == 1: - self.Eos.update_parameter( - fit_parameter_names_list[0], [fit_bead], param_values[i] - ) + self.Eos.update_parameter(fit_parameter_names_list[0], [fit_bead], param_values[i]) elif len(fit_parameter_names_list) == 2: self.Eos.update_parameter( fit_parameter_names_list[0], @@ -147,10 +145,7 @@ def objective(self): def __str__(self): - string = ( - "Data Set Object\nName: {}\nCalculation_type: {}\nNumber of " - "Points: {}".format( - self.name, self.thermodict["calculation_type"], self.npoints - ) + string = "Data Set Object\nName: {}\nCalculation_type: {}\nNumber of " "Points: {}".format( + self.name, self.thermodict["calculation_type"], self.npoints ) return string diff --git a/despasito/tests/test_fit_pure.py b/despasito/tests/test_fit_pure.py index bb946e7..60ea711 100644 --- a/despasito/tests/test_fit_pure.py +++ b/despasito/tests/test_fit_pure.py @@ -144,10 +144,7 @@ def test_solubility_so(Eos=Eos, thermo_dict=thermo_dict0.copy()): thermo_dict = ri.process_param_fit_inputs(thermo_dict) output = fit.fit(**thermo_dict) - assert ( - output["parameters_final"][0] == pytest.approx(375.01, abs=1.0) - and output["objective_value"] < 1.1 - ) + assert output["parameters_final"][0] == pytest.approx(375.01, abs=1.0) and output["objective_value"] < 1.1 thermo_dict_mix["exp_data"] = exp_data_density @@ -158,10 +155,7 @@ def test_density_so(Eos=Eos_mix, thermo_dict=thermo_dict_mix.copy()): thermo_dict = ri.process_param_fit_inputs(thermo_dict) output = fit.fit(**thermo_dict) - assert ( - output["parameters_final"][0] == pytest.approx(350.0, abs=1.0) - and output["objective_value"] < 1.5 - ) + assert output["parameters_final"][0] == pytest.approx(350.0, abs=1.0) and output["objective_value"] < 1.5 thermo_dict0["exp_data"] = exp_data_sat diff --git a/despasito/tests/test_peng_robinson.py b/despasito/tests/test_peng_robinson.py index 5a9df06..cbe0a66 100644 --- a/despasito/tests/test_peng_robinson.py +++ b/despasito/tests/test_peng_robinson.py @@ -55,10 +55,8 @@ def test_PR_coefficients( Eos_class.bead_library[beads[1]]["kappa"], ] assert ( - Eos_class.eos_dict["ai"] - == pytest.approx(np.array([1.73993846, 1.66217026]), abs=1e-4) - and Eos_class.eos_dict["bi"] - == pytest.approx(np.array([7.00758212e-05, 6.34118233e-05]), abs=1e-9) + Eos_class.eos_dict["ai"] == pytest.approx(np.array([1.73993846, 1.66217026]), abs=1e-4) + and Eos_class.eos_dict["bi"] == pytest.approx(np.array([7.00758212e-05, 6.34118233e-05]), abs=1e-9) and tmp == pytest.approx(np.array([0.81854211, 0.70357958]), abs=1e-4) ) diff --git a/despasito/tests/test_saft_gamma_mie.py b/despasito/tests/test_saft_gamma_mie.py index 7c129fc..9e6e7e5 100644 --- a/despasito/tests/test_saft_gamma_mie.py +++ b/despasito/tests/test_saft_gamma_mie.py @@ -98,9 +98,7 @@ "Nk-a1": 1, }, } -cross_library_co2_h2o = { - "CO2": {"H2O": {"epsilon": 226.38, "epsilonHB-H-e1": 2200.0, "K-H-e1": 9.1419e-2}} -} +cross_library_co2_h2o = {"CO2": {"H2O": {"epsilon": 226.38, "epsilonHB-H-e1": 2200.0, "K-H-e1": 9.1419e-2}}} epsilonHB_co2_h2o = np.array( [ [ @@ -204,8 +202,7 @@ def test_saft_gamma_mie_class_assoc_fugacity_coeff( def test_numba_available(): - assert (path + ".ext_Aassoc_numba" in sys.modules - and path + ".ext_gamma_mie_numba" in sys.modules) + assert path + ".ext_Aassoc_numba" in sys.modules and path + ".ext_gamma_mie_numba" in sys.modules def test_saft_gamma_mie_class_assoc_P_numba( @@ -233,17 +230,12 @@ def test_saft_gamma_mie_class_assoc_P_numba( assert P == pytest.approx(15727315.77, abs=1e3) -@pytest.mark.skipif( - not flag_cython, reason="Cython is not installed with this version of python." -) +@pytest.mark.skipif(not flag_cython, reason="Cython is not installed with this version of python.") def test_cython_available(): - assert (path + ".ext_Aassoc_cython" in sys.modules - and path + ".ext_gamma_mie_cython" in sys.modules) + assert path + ".ext_Aassoc_cython" in sys.modules and path + ".ext_gamma_mie_cython" in sys.modules -@pytest.mark.skipif( - not flag_cython, reason="Cython is not installed with this version of python." -) +@pytest.mark.skipif(not flag_cython, reason="Cython is not installed with this version of python.") def test_saft_gamma_mie_class_assoc_P_cython( T=T, xi=xi_co2_h2o, diff --git a/despasito/tests/test_saft_gamma_sw.py b/despasito/tests/test_saft_gamma_sw.py index 94860ca..652793c 100644 --- a/despasito/tests/test_saft_gamma_sw.py +++ b/despasito/tests/test_saft_gamma_sw.py @@ -68,9 +68,7 @@ def test_saft_gamma_sw_class_assoc_P(T=T, xi=[1.0], Eos=Eos, density=density): assert P == pytest.approx(9447510.360679299, abs=1e3) -def test_saft_gamma_sw_class_assoc_fugacity_coefficient( - P=P, xi=[1.0], T=T, Eos=Eos, density=density -): +def test_saft_gamma_sw_class_assoc_fugacity_coefficient(P=P, xi=[1.0], T=T, Eos=Eos, density=density): # """Test ability to predict P with association sites""" phi = Eos.fugacity_coefficient(P, density, xi, T) assert phi == pytest.approx(np.array([0.8293442]), abs=1e-4) @@ -85,9 +83,7 @@ def test_saft_gamma_sw_class_assoc_fugacity_coefficient( ) -def test_saft_gamma_sw_class_assoc_P_numba( - T=T, xi=np.array([1.0]), Eos=Eos, density=density -): +def test_saft_gamma_sw_class_assoc_P_numba(T=T, xi=np.array([1.0]), Eos=Eos, density=density): # """Test ability to predict P with association sites""" P = Eos.pressure(density, T, xi)[0] assert P == pytest.approx(9447510.360679299, abs=1e3) diff --git a/despasito/tests/test_thermo.py b/despasito/tests/test_thermo.py index 32488e2..8b0b146 100644 --- a/despasito/tests/test_thermo.py +++ b/despasito/tests/test_thermo.py @@ -79,66 +79,54 @@ def test_thermo_import(): def test_saturation_properties(Eos=Eos_co2_h2o, Tlist=Tlist): output = thermo.thermo( - Eos, - calculation_type="saturation_properties", - **{"Tlist": Tlist, "xilist": [np.array([0.0, 1.0])]} + Eos, calculation_type="saturation_properties", **{"Tlist": Tlist, "xilist": [np.array([0.0, 1.0])]} ) - assert output["Psat"][0] == pytest.approx(46266.2, abs=1e1) and output["rhol"][ - 0 - ] == pytest.approx(53883.63, abs=1e-1), output["rhol"][0] == pytest.approx( - 2371.38970066, abs=1e-1 - ) + assert output["Psat"][0] == pytest.approx(46266.2, abs=1e1) and output["rhol"][0] == pytest.approx( + 53883.63, abs=1e-1 + ), output["rhol"][0] == pytest.approx(2371.38970066, abs=1e-1) def test_liquid_properties(Eos=Eos_co2_h2o, Tlist=Tlist, xilist=xilist, Plist=Plist): output = thermo.thermo( - Eos, - calculation_type="liquid_properties", - **{"Tlist": Tlist, "Plist": Plist, "xilist": xilist} + Eos, calculation_type="liquid_properties", **{"Tlist": Tlist, "Plist": Plist, "xilist": xilist} ) - assert output["rhol"][0] == pytest.approx(53831.6, abs=1e-1) and output["phil"][ - 0 - ] == pytest.approx(np.array([403.98, 6.8846e-03]), abs=1e-1) + assert output["rhol"][0] == pytest.approx(53831.6, abs=1e-1) and output["phil"][0] == pytest.approx( + np.array([403.98, 6.8846e-03]), abs=1e-1 + ) def test_vapor_properties(Eos=Eos_co2_h2o, Tlist=Tlist, yilist=yilist, Plist=Plist): output = thermo.thermo( - Eos, - calculation_type="vapor_properties", - **{"Tlist": Tlist, "Plist": Plist, "yilist": yilist} + Eos, calculation_type="vapor_properties", **{"Tlist": Tlist, "Plist": Plist, "yilist": yilist} ) - assert output["rhov"][0] == pytest.approx(2938.3, abs=1e-1) and output["phiv"][ - 0 - ] == pytest.approx(np.array([0.865397, 0.63848]), abs=1e-1) + assert output["rhov"][0] == pytest.approx(2938.3, abs=1e-1) and output["phiv"][0] == pytest.approx( + np.array([0.865397, 0.63848]), abs=1e-1 + ) -def test_activity_coefficient( - Eos=Eos_h2o_hexane, Tlist=Tlist, xilist=xilist, yilist=yilist, Plist=Plist -): +def test_activity_coefficient(Eos=Eos_h2o_hexane, Tlist=Tlist, xilist=xilist, yilist=yilist, Plist=Plist): output = thermo.thermo( Eos, calculation_type="activity_coefficient", - **{"Tlist": Tlist, "Plist": Plist, "yilist": yilist, "xilist": xilist} + **{"Tlist": Tlist, "Plist": Plist, "yilist": yilist, "xilist": xilist}, ) print(output["gamma"]) - assert output["gamma"][0] == pytest.approx( - np.array([7.23733364e04, 6.30243983e-01]), abs=1e-2 - ) + assert output["gamma"][0] == pytest.approx(np.array([7.23733364e04, 6.30243983e-01]), abs=1e-2) def test_bubble_pressure(Eos=Eos_co2_h2o, Tlist=Tlist, xilist=xilist): output = thermo.thermo( Eos, calculation_type="bubble_pressure", - **{"Tlist": Tlist, "xilist": xilist, "Pmin": [6900000], "Pmax": [7100000]} + **{"Tlist": Tlist, "xilist": xilist, "Pmin": [6900000], "Pmax": [7100000]}, + ) + assert output["P"][0] == pytest.approx(7005198.6, abs=5e1) and output["yi"][0] == pytest.approx( + [0.98779049, 0.01220951], abs=1e-4 ) - assert output["P"][0] == pytest.approx(7005198.6, abs=5e1) and output["yi"][ - 0 - ] == pytest.approx([0.98779049, 0.01220951], abs=1e-4) diff --git a/despasito/thermodynamics/calc.py b/despasito/thermodynamics/calc.py index 18cb208..041db9c 100644 --- a/despasito/thermodynamics/calc.py +++ b/despasito/thermodynamics/calc.py @@ -31,8 +31,8 @@ def pressure_vs_volume_arrays( extended_npts=20, max_density=None, density_max_opts={}, - max_array_length=int(1e+6), - **kwargs + max_array_length=int(1e6), + **kwargs, ): r""" Output arrays with specific volume and pressure arrays calculated from the given @@ -100,17 +100,11 @@ def pressure_vs_volume_arrays( if not max_density: max_density = Eos.density_max(xi, T, **density_max_opts) elif gtb.isiterable(max_density): - logger.error( - " Maxrho should be type float. Given value: {}".format(max_density) - ) + logger.error(" Maxrho should be type float. Given value: {}".format(max_density)) max_density = max_density[0] if max_density > 1e5: - raise ValueError( - "Max density of {} mol/m^3 is not feasible, check parameters.".format( - max_density - ) - ) + raise ValueError("Max density of {} mol/m^3 is not feasible, check parameters.".format(max_density)) # min rho is a fraction of max rho, such that minrho << rhogassat minrho = max_density * min_density_fraction @@ -126,12 +120,7 @@ def pressure_vs_volume_arrays( vspace = (1.0 / rholist[:-1]) - (1.0 / rholist[1:]) if np.amax(vspace) > max_volume_increment: vspaceswitch = np.where(vspace > max_volume_increment)[0][-1] - rholist_2 = ( - 1.0 - / np.arange( - 1.0 / rholist[vspaceswitch + 1], 1.0 / minrho, max_volume_increment - )[::-1] - ) + rholist_2 = 1.0 / np.arange(1.0 / rholist[vspaceswitch + 1], 1.0 / minrho, max_volume_increment)[::-1] rholist = np.append(rholist_2, rholist[(vspaceswitch + 2):]) if len(rholist) > max_array_length: @@ -246,9 +235,7 @@ def pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=[], **kwargs): logger.error("Matplotlib package is not installed, could not plot") -def calc_saturation_properties( - T, xi, Eos, density_opts={}, tol=1e-6, Pconverged=1, **kwargs -): +def calc_saturation_properties(T, xi, Eos, density_opts={}, tol=1e-6, Pconverged=1, **kwargs): r""" Computes the saturated pressure, gas and liquid densities for a single component system. @@ -288,10 +275,7 @@ def calc_saturation_properties( if np.count_nonzero(xi) != 1: if np.count_nonzero(xi > 0.1) != 1: - raise ValueError( - "Multiple components have compositions greater than 10%, check code " - "for source" - ) + raise ValueError("Multiple components have compositions greater than 10%, check code " "for source") else: ind = np.where(xi > 0.1)[0] raise ValueError( @@ -360,12 +344,8 @@ def calc_saturation_properties( ) Psat, rhol, rhov = np.nan, np.nan, np.nan - tmpv, _, _ = calc_vapor_fugacity_coefficient( - Psat, T, xi, Eos, density_opts=density_opts - ) - tmpl, _, _ = calc_liquid_fugacity_coefficient( - Psat, T, xi, Eos, density_opts=density_opts - ) + tmpv, _, _ = calc_vapor_fugacity_coefficient(Psat, T, xi, Eos, density_opts=density_opts) + tmpl, _, _ = calc_liquid_fugacity_coefficient(Psat, T, xi, Eos, density_opts=density_opts) logger.debug(" phiv: {}, phil: {}".format(tmpv, tmpl)) return Psat, rhol, rhov @@ -407,23 +387,18 @@ def objective_saturation_pressure(shift, Pv, vlist): # triangle. This isn't super accurate but we are just using the saturation # pressure to get started. slope, yroot = np.polyfit(vlist[-4:], Pv[-4:] - shift, 1) - b = ( - Pvspline.integral(roots[1], vlist[-1]) - + (Pv[-1] - shift) * (-yroot / slope - vlist[-1]) / 2 - ) + b = Pvspline.integral(roots[1], vlist[-1]) + (Pv[-1] - shift) * (-yroot / slope - vlist[-1]) / 2 # raise ValueError("Pressure curve only has two roots. If the curve hasn't # fully decayed, either increase maximum specific volume or decrease # 'pressure_min' in # :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`.") elif np.any(np.isnan(roots)): raise ValueError( - "Pressure curve without cubic properties has wrongly been accepted. Try " - + "decreasing pressure." + "Pressure curve without cubic properties has wrongly been accepted. Try " + "decreasing pressure." ) else: raise ValueError( - "Pressure curve without cubic properties has wrongly been accepted. Try " - "decreasing min_density_fraction" + "Pressure curve without cubic properties has wrongly been accepted. Try " "decreasing min_density_fraction" ) # pressure_vs_volume_plot(vlist, Pv-shift, Pvspline, markers=extrema) @@ -475,8 +450,7 @@ def calc_vapor_density(P, T, xi, Eos, density_opts={}, **kwargs): rho_tmp = np.nan flag = 3 logger.warning( - " Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid)" - " at this pressure".format(T, xi) + " Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid)" " at this pressure".format(T, xi) ) elif l_roots == 0: if Pvspline(1 / vlist[-1]) < 0: @@ -536,9 +510,7 @@ def calc_vapor_density(P, T, xi, Eos, density_opts={}, **kwargs): else: logger.debug( " Flag 0: This T and yi, {} {}, combination produces a vapor" - " at this pressure. Warning! approaching critical fluid".format( - T, xi - ) + " at this pressure. Warning! approaching critical fluid".format(T, xi) ) else: logger.warning( @@ -559,8 +531,7 @@ def calc_vapor_density(P, T, xi, Eos, density_opts={}, **kwargs): flag = 1 rho_tmp = 1.0 / roots[0] logger.debug( - " Flag 1: The T and yi, {} {}, combination produces a liquid at " - "this pressure".format(T, xi) + " Flag 1: The T and yi, {} {}, combination produces a liquid at " "this pressure".format(T, xi) ) elif len(extrema) > 1: flag = 0 @@ -601,14 +572,11 @@ def calc_vapor_density(P, T, xi, Eos, density_opts={}, **kwargs): else: logger.debug( " Flag 0: This T and yi, {} {}, combination produces a vapor " - "at this pressure. Warning! approaching critical fluid".format( - T, xi - ) + "at this pressure. Warning! approaching critical fluid".format(T, xi) ) else: # 3 roots logger.debug( - " Flag 0: This T and yi, {} {}, combination produces a vapor at this" - " pressure.".format(T, xi) + " Flag 0: This T and yi, {} {}, combination produces a vapor at this" " pressure.".format(T, xi) ) rho_tmp = 1.0 / roots[2] flag = 0 @@ -618,10 +586,7 @@ def calc_vapor_density(P, T, xi, Eos, density_opts={}, **kwargs): if rho_tmp * 1.01 > Eos.density_max(xi, T, maxpack=0.99): tmp[1] = Eos.density_max(xi, T, maxpack=0.99) - if ( - pressure_spline_error(tmp[0], P, T, xi, Eos) - * pressure_spline_error(tmp[1], P, T, xi, Eos) - ) < 0: + if (pressure_spline_error(tmp[0], P, T, xi, Eos) * pressure_spline_error(tmp[1], P, T, xi, Eos)) < 0: rho_tmp = spo.brentq( pressure_spline_error, tmp[0], @@ -700,8 +665,7 @@ def calc_liquid_density(P, T, xi, Eos, density_opts={}, **kwargs): if extrema: if len(extrema) == 1: logger.warning( - " One extrema at {}, assume weird minima behavior. Check your " - "parameters.".format(1 / extrema[0]) + " One extrema at {}, assume weird minima behavior. Check your " "parameters.".format(1 / extrema[0]) ) # Assess roots, what is the liquid density @@ -710,8 +674,7 @@ def calc_liquid_density(P, T, xi, Eos, density_opts={}, **kwargs): rho_tmp = np.nan flag = 3 logger.warning( - " Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) " - "at this pressure".format(T, xi) + " Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) " "at this pressure".format(T, xi) ) elif l_roots == 0: if Pvspline(1 / vlist[-1]): @@ -745,9 +708,7 @@ def calc_liquid_density(P, T, xi, Eos, density_opts={}, **kwargs): xi, ) + "won't produce a fluid (vapor or liquid) at this pressure, " - + "without density greater than max, {}".format( - Eos.density_max(xi, T, maxpack=0.99) - ) + + "without density greater than max, {}".format(Eos.density_max(xi, T, maxpack=0.99)) ) flag_NoOpt = True elif min(Plist) + P > 0: @@ -809,8 +770,7 @@ def calc_liquid_density(P, T, xi, Eos, density_opts={}, **kwargs): flag = 1 rho_tmp = 1.0 / roots[0] logger.debug( - " Flag 1: The T and xi, {} {},".format(T, xi) - + "combination produces a liquid at this pressure" + " Flag 1: The T and xi, {} {},".format(T, xi) + "combination produces a liquid at this pressure" ) elif len(extrema) > 1: flag = 0 @@ -824,8 +784,7 @@ def calc_liquid_density(P, T, xi, Eos, density_opts={}, **kwargs): rho_tmp = 1.0 / roots[0] flag = 1 logger.debug( - " Flag 1: The T and xi, {} {},".format(T, xi) - + "combination produces a liquid at this pressure" + " Flag 1: The T and xi, {} {},".format(T, xi) + "combination produces a liquid at this pressure" ) if flag in [1, 2]: # liquid or critical fluid @@ -835,9 +794,7 @@ def calc_liquid_density(P, T, xi, Eos, density_opts={}, **kwargs): pressure_spline_error(tmp[1], P, T, xi, Eos), ] if (P_tmp[0] * P_tmp[1]) < 0: - rho_tmp = spo.brentq( - pressure_spline_error, tmp[0], tmp[1], args=(P, T, xi, Eos), rtol=1e-7 - ) + rho_tmp = spo.brentq(pressure_spline_error, tmp[0], tmp[1], args=(P, T, xi, Eos), rtol=1e-7) else: if P_tmp[0] < 0: logger.warning( @@ -1013,10 +970,7 @@ def calc_new_mole_fractions(phase_1_mole_fraction, phil, phiv, phase=None): """ if phase is None or phase not in ["vapor", "liquid"]: - raise ValueError( - "The user must specify the desired mole fraction as either 'vapor' or " - + "'liquid'." - ) + raise ValueError("The user must specify the desired mole fraction as either 'vapor' or " + "'liquid'.") if np.sum(phase_1_mole_fraction) != 1.0: raise ValueError("Given mole fractions must add up to one.") @@ -1066,10 +1020,7 @@ def equilibrium_objective(phase_1_mole_fraction, phil, phiv, phase=None): """ if phase is None or phase not in ["vapor", "liquid"]: - raise ValueError( - "The user must specify the desired mole fraction as either 'vapor' or " - + "'liquid'." - ) + raise ValueError("The user must specify the desired mole fraction as either 'vapor' or " + "'liquid'.") if np.sum(phase_1_mole_fraction) != 1.0: raise ValueError("Given mole fractions must add up to one.") @@ -1128,7 +1079,7 @@ def calc_Prange_xi( maxfactor=2, minfactor=0.5, Pmin_allowed=100, - **kwargs + **kwargs, ): r""" Obtain minimum and maximum pressure values for bubble point calculation. @@ -1187,9 +1138,7 @@ def calc_Prange_xi( if len(kwargs) > 0: logger.debug( - "'calc_Prange_xi' does not use the following keyword arguments: {}".format( - ", ".join(list(kwargs.keys())) - ) + "'calc_Prange_xi' does not use the following keyword arguments: {}".format(", ".join(list(kwargs.keys()))) ) global _yi_global @@ -1239,9 +1188,7 @@ def calc_Prange_xi( for z in range(maxiter): # Liquid properties - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - p, T, xi, Eos, density_opts=density_opts - ) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(p, T, xi, Eos, density_opts=density_opts) if any(np.isnan(phil)): logger.error("Estimated minimum pressure is too high.") @@ -1262,14 +1209,7 @@ def calc_Prange_xi( # Calculate vapor phase properties and obj value yi_range, phiv_min, flagv_min = calc_vapor_composition( - yi_range, - xi, - phil, - p, - T, - Eos, - density_opts=density_opts, - **mole_fraction_options + yi_range, xi, phil, p, T, Eos, density_opts=density_opts, **mole_fraction_options ) obj = equilibrium_objective(xi, phil, phiv_min, phase="vapor") @@ -1285,12 +1225,9 @@ def calc_Prange_xi( # If within tolerance of liquid mole fraction elif np.sum(np.abs(xi - yi_range) / xi) < xytol and flagv_min == 2: logger.info( - "Estimated minimum pressure reproduces xi: " - "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) + "Estimated minimum pressure reproduces xi: " "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) ) - if ( - flag_max or flag_hard_max - ) and flag_liquid: # If a liquid phase exists at a higher pressure, + if (flag_max or flag_hard_max) and flag_liquid: # If a liquid phase exists at a higher pressure, # this must bound the lower pressure flag_min = True ObjRange[0] = obj @@ -1308,18 +1245,14 @@ def calc_Prange_xi( if p > Prange[1]: Prange[1] = p ObjRange[1] = np.nan - elif ( - flag_min or flag_hard_min - ) and flag_vapor: # If the 'liquid' phase is vapor at a lower pressure, + elif (flag_min or flag_hard_min) and flag_vapor: # If the 'liquid' phase is vapor at a lower pressure, # this must bound the upper pressure flag_max = True ObjRange[1] = obj Prange[1] = p phiv_max, flagv_max = phiv_min, flagv_min p = (Prange[1] - Prange[0]) / 2 + Prange[0] - elif ( - flag_critical - ): # Couldn't find phase by lowering pressure, now raise it + elif flag_critical: # Couldn't find phase by lowering pressure, now raise it ObjRange[0] = obj Prange[0] = p if flag_hard_max: @@ -1358,8 +1291,7 @@ def calc_Prange_xi( # If 'vapor' phase is liquid or unattainable elif flagv_min not in [0, 2, 4]: logger.info( - "Estimated minimum pressure produces liquid: " - "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) + "Estimated minimum pressure produces liquid: " "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) ) if flag_hard_min and p <= Pmin: flag_critical = True @@ -1393,17 +1325,13 @@ def calc_Prange_xi( # Found minimum pressure! elif obj > 0: - logger.info( - "Found estimated minimum pressure: " - "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) - ) + logger.info("Found estimated minimum pressure: " "{}, Obj. Func: {}, Range {}".format(p, obj, Prange)) Prange[0] = p ObjRange[0] = obj break elif obj < 0: logger.info( - "Estimated minimum pressure too high: " - "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) + "Estimated minimum pressure too high: " "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) ) flag_liquid = True flag_max = True @@ -1420,9 +1348,7 @@ def calc_Prange_xi( else: raise ValueError( "This shouldn't happen: " - + "xi {}, phil {}, flagl {}, yi {},".format( - xi, phil, flagl, yi_range - ) + + "xi {}, phil {}, flagl {}, yi {},".format(xi, phil, flagl, yi_range) + " phiv {}, flagv {}, obj {}, flags: {} {} {}".format( phiv_min, flagv_min, @@ -1434,8 +1360,7 @@ def calc_Prange_xi( ) else: logger.info( - "Estimated minimum pressure produced vapor as a 'liquid' phase: " - "{}, Range {}".format(p, Prange) + "Estimated minimum pressure produced vapor as a 'liquid' phase: " "{}, Range {}".format(p, Prange) ) flag_vapor = True flag_min = True @@ -1446,11 +1371,7 @@ def calc_Prange_xi( else: p = maxfactor * Prange[0] - if ( - (flag_hard_min or flag_min) - and (flag_hard_max or flag_max) - and (p < Prange[0] or p > Prange[1]) - ): + if (flag_hard_min or flag_min) and (flag_hard_max or flag_max) and (p < Prange[0] or p > Prange[1]): # if (p < Prange[0] and Prange[0] != Prange[1]) or (flag_max and # p > Prange[1]): p = (Prange[1] - Prange[0]) / 1 + Prange[0] @@ -1463,14 +1384,12 @@ def calc_Prange_xi( if flag_hard_min and Pmin == p: raise ValueError( - "In searching for the minimum pressure, the range " - "{}, converged without a solution".format(Prange) + "In searching for the minimum pressure, the range " "{}, converged without a solution".format(Prange) ) if z == maxiter - 1: raise ValueError( - "Maximum Number of Iterations Reached: Proper minimum pressure for " - "liquid density could not be found" + "Maximum Number of Iterations Reached: Proper minimum pressure for " "liquid density could not be found" ) # A flag value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means @@ -1491,15 +1410,10 @@ def calc_Prange_xi( for z in range(maxiter): # Liquid properties - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - p, T, xi, Eos, density_opts=density_opts - ) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(p, T, xi, Eos, density_opts=density_opts) if any(np.isnan(phil)): - logger.info( - "Liquid fugacity coefficient should not be NaN, pressure could be " - + "too high." - ) + logger.info("Liquid fugacity coefficient should not be NaN, pressure could be " + "too high.") flag_max = True Prange[1] = p ObjRange[1] = obj @@ -1508,14 +1422,7 @@ def calc_Prange_xi( # Calculate vapor phase properties and obj value yi_range, phiv_max, flagv_max = calc_vapor_composition( - yi_range, - xi, - phil, - p, - T, - Eos, - density_opts=density_opts, - **mole_fraction_options + yi_range, xi, phil, p, T, Eos, density_opts=density_opts, **mole_fraction_options ) obj = equilibrium_objective(xi, phil, phiv_max, phase="vapor") @@ -1523,9 +1430,7 @@ def calc_Prange_xi( if flagv_max not in [0, 2, 4] or np.any(np.isnan(yi_range)): logger.info( "New Maximum Pressure: " - + "{} isn't vapor, flag={}, Obj Func: {}, Range {}".format( - p, flagv_max, obj, Prange - ) + + "{} isn't vapor, flag={}, Obj Func: {}, Range {}".format(p, flagv_max, obj, Prange) ) if flag_critical: # looking for critical fluid Prange[0] = p @@ -1545,21 +1450,14 @@ def calc_Prange_xi( # If 'liquid' composition is reproduced elif np.sum(np.abs(xi - yi_range) / xi) < xytol: # If less than 2% - logger.info( - "Estimated Maximum Pressure Reproduces xi: " - + "{}, Obj. Func: {}".format(p, obj) - ) + logger.info("Estimated Maximum Pressure Reproduces xi: " + "{}, Obj. Func: {}".format(p, obj)) flag_max = True ObjRange[1] = obj Prange[1] = p p = (Prange[1] - Prange[0]) / 2.0 + Prange[0] # Suitable objective value found elif obj < 0: - logger.info( - "New Max Pressure: {}, flag={}, Obj Func: {}, Range {}".format( - p, flagv_max, obj, Prange - ) - ) + logger.info("New Max Pressure: {}, flag={}, Obj Func: {}, Range {}".format(p, flagv_max, obj, Prange)) if Prange[1] < p: Prange[0] = Prange[1] ObjRange[0] = ObjRange[1] @@ -1607,11 +1505,7 @@ def calc_Prange_xi( density_opts=density_opts, mole_fraction_options=mole_fraction_options, ) - logger.info( - "New Max Pressure: {}, Obj Func: {}, Range {}".format( - p, obj, Prange - ) - ) + logger.info("New Max Pressure: {}, Obj Func: {}, Range {}".format(p, obj, Prange)) if p < 0: parray = np.linspace(Prange[0], Prange[1], 20) @@ -1631,11 +1525,7 @@ def calc_Prange_xi( if len(p_min) > 1: obj_tmp = [] for p_min_tmp in p_min: - obj_tmp.append( - objective_bubble_pressure( - p_min_tmp, xi, T, Eos, density_opts=density_opts - ) - ) + obj_tmp.append(objective_bubble_pressure(p_min_tmp, xi, T, Eos, density_opts=density_opts)) p_min = p_min[obj_tmp == np.nanmin(obj_tmp)] elif len(p_min) == 0: logger.error( @@ -1643,14 +1533,8 @@ def calc_Prange_xi( + " {}\n Obj Value: {}".format(parray, obj_array) ) p = p_min - obj = objective_bubble_pressure( - p, xi, T, Eos, density_opts=density_opts - ) - logger.info( - "New Max Pressure: {}, Obj Func: {}, Range {}".format( - p, obj, Prange - ) - ) + obj = objective_bubble_pressure(p, xi, T, Eos, density_opts=density_opts) + logger.info("New Max Pressure: {}, Obj Func: {}, Range {}".format(p, obj, Prange)) if obj > 0: Prange[1] = p @@ -1663,27 +1547,17 @@ def calc_Prange_xi( else: logger.error( "Could not find maximum in pressure range:\n Pressure " - + "range {} best {}\n Obj Value range {} best {}".format( - Prange, p, ObjRange, obj - ) + + "range {} best {}\n Obj Value range {} best {}".format(Prange, p, ObjRange, obj) ) break elif flag_max: - logger.info( - "New Minimum Pressure: {}, Obj. Func: {}, Range {}".format( - p, obj, Prange - ) - ) + logger.info("New Minimum Pressure: {}, Obj. Func: {}, Range {}".format(p, obj, Prange)) Prange[0] = p ObjRange[0] = obj p = (Prange[1] - Prange[0]) / 2.0 + Prange[0] else: - logger.info( - "New Maximum Pressure: {}, Obj. Func: {}, Range {}".format( - p, obj, Prange - ) - ) + logger.info("New Maximum Pressure: {}, Obj. Func: {}, Range {}".format(p, obj, Prange)) if not flag_hard_max: if Prange[1] < p: Prange[0] = Prange[1] @@ -1706,16 +1580,12 @@ def calc_Prange_xi( if np.abs(Prange[1] - Prange[0]) < ptol: raise ValueError( - "In searching for the minimum pressure, the range " - + "{}, converged without a solution".format(Prange) + "In searching for the minimum pressure, the range " + "{}, converged without a solution".format(Prange) ) if z == maxiter - 1 or flag_min: if flag_min: - logger.error( - "Cannot reach objective value of zero. Final Pressure: " - + "{}, Obj. Func: {}".format(p, obj) - ) + logger.error("Cannot reach objective value of zero. Final Pressure: " + "{}, Obj. Func: {}".format(p, obj)) else: logger.error( "Maximum Number of Iterations Reached: A change in sign for the" @@ -1724,9 +1594,7 @@ def calc_Prange_xi( Prange = np.array([np.nan, np.nan]) Pguess = np.nan else: - logger.info( - "[Pmin, Pmax]: {}, Obj. Values: {}".format(str(Prange), str(ObjRange)) - ) + logger.info("[Pmin, Pmax]: {}, Obj. Values: {}".format(str(Prange), str(ObjRange))) logger.info("Initial guess in pressure: {} Pa".format(Pguess)) _yi_global = yi_range @@ -1749,7 +1617,7 @@ def calc_Prange_yi( xytol=0.01, maxfactor=2, minfactor=0.5, - **kwargs + **kwargs, ): r""" Obtain min and max pressure values. @@ -1810,9 +1678,7 @@ def calc_Prange_yi( if len(kwargs) > 0: logger.debug( - "'calc_Prange_yi' does not use the following keyword arguments: {}".format( - ", ".join(list(kwargs.keys())) - ) + "'calc_Prange_yi' does not use the following keyword arguments: {}".format(", ".join(list(kwargs.keys()))) ) global _xi_global @@ -1861,9 +1727,7 @@ def calc_Prange_yi( for z in range(maxiter): # Vapor properties - phiv, _, flagv = calc_vapor_fugacity_coefficient( - p, T, yi, Eos, density_opts=density_opts - ) + phiv, _, flagv = calc_vapor_fugacity_coefficient(p, T, yi, Eos, density_opts=density_opts) if any(np.isnan(phiv)): logger.error("Estimated minimum pressure is too high.") flag_max = True @@ -1882,14 +1746,7 @@ def calc_Prange_yi( # Calculate the liquid phase properties xi_range, phil_min, flagl_min = calc_liquid_composition( - xi_range, - yi, - phiv, - p, - T, - Eos, - density_opts=density_opts, - **mole_fraction_options + xi_range, yi, phiv, p, T, Eos, density_opts=density_opts, **mole_fraction_options ) obj = equilibrium_objective(yi, phil_min, phiv, phase="liquid") @@ -1904,17 +1761,12 @@ def calc_Prange_yi( else: p = p * minfactor - elif ( - np.sum(np.abs(yi - xi_range) / yi) < xytol and flagl_min == 2 - ): # If within 2% of liquid mole fraction + elif np.sum(np.abs(yi - xi_range) / yi) < xytol and flagl_min == 2: # If within 2% of liquid mole fraction logger.info( - "Estimated Minimum Pressure Reproduces yi: " - + "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) + "Estimated Minimum Pressure Reproduces yi: " + "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) ) - if ( - flag_critical - ): # Couldn't find phase by lowering pressure, now raise it + if flag_critical: # Couldn't find phase by lowering pressure, now raise it ObjRange[0] = obj Prange[0] = p if flag_hard_max: @@ -1949,23 +1801,20 @@ def calc_Prange_yi( Prange[0] = p ObjRange[0] = obj logger.info( - "Obtained estimated Minimum Pressure: " - + "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) + "Obtained estimated Minimum Pressure: " + "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) ) break elif obj > 0: flag_max = True logger.info( - "Estimated Minimum Pressure too High: " - + "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) + "Estimated Minimum Pressure too High: " + "{}, Obj. Func: {}, Range {}".format(p, obj, Prange) ) ObjRange[1] = obj Prange[1] = p p = (Prange[1] - Prange[0]) * minfactor + Prange[0] else: logger.info( - "Estimated Minimum Pressure Produced Liquid instead of Vapor Phase:" - + " {}, Range {}".format(p, Prange) + "Estimated Minimum Pressure Produced Liquid instead of Vapor Phase:" + " {}, Range {}".format(p, Prange) ) if flag_hard_min and p <= Pmin: flag_critical = True @@ -2005,17 +1854,12 @@ def calc_Prange_yi( else: raise ValueError("Pmin should never be greater than Pmax") - if ( - (flag_max or flag_hard_max) - and (flag_min or flag_hard_min) - and not Prange[0] <= p <= Prange[1] - ): + if (flag_max or flag_hard_max) and (flag_min or flag_hard_min) and not Prange[0] <= p <= Prange[1]: p = (Prange[1] - Prange[0]) * np.random.rand(1)[0] + Prange[0] if flag_hard_min and Pmin == p: raise ValueError( - "In searching for the minimum pressure, the range " - + "{}, converged without a solution".format(Prange) + "In searching for the minimum pressure, the range " + "{}, converged without a solution".format(Prange) ) if p <= 0.0: @@ -2027,8 +1871,7 @@ def calc_Prange_yi( if z == maxiter - 1: raise ValueError( - "Maximum Number of Iterations Reached: Proper minimum pressure for" - + " liquid density could not be found" + "Maximum Number of Iterations Reached: Proper minimum pressure for" + " liquid density could not be found" ) # Be sure guess in pressure is larger than lower bound @@ -2046,18 +1889,9 @@ def calc_Prange_yi( ObjArray = [ObjRange[1]] for z in range(maxiter): # Calculate objective value - phiv, _, flagv = calc_vapor_fugacity_coefficient( - p, T, yi, Eos, density_opts=density_opts - ) + phiv, _, flagv = calc_vapor_fugacity_coefficient(p, T, yi, Eos, density_opts=density_opts) xi_range, phil, flagl = calc_liquid_composition( - xi_range, - yi, - phiv, - p, - T, - Eos, - density_opts=density_opts, - **mole_fraction_options + xi_range, yi, phiv, p, T, Eos, density_opts=density_opts, **mole_fraction_options ) obj = equilibrium_objective(yi, phil, phiv, phase="liquid") @@ -2080,9 +1914,7 @@ def calc_Prange_yi( Prange[1] = p ObjRange[1] = obj logger.info( - "New Max Pressure: {}, flag={}, Obj Func: {}, Range {}".format( - Prange[1], flagv, ObjRange[1], Prange - ) + "New Max Pressure: {}, flag={}, Obj Func: {}, Range {}".format(Prange[1], flagv, ObjRange[1], Prange) ) logger.info("Got the pressure range!") slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0]) @@ -2095,11 +1927,7 @@ def calc_Prange_yi( Prange[0] = p ObjRange[0] = obj p = (Prange[1] - Prange[0]) / 2.0 + Prange[0] - logger.info( - "New Max Pressure: {}, Obj. Func: {}, Range {}".format( - Prange[0], ObjRange[0], Prange - ) - ) + logger.info("New Max Pressure: {}, Obj. Func: {}, Range {}".format(Prange[0], ObjRange[0], Prange)) else: Parray.append(p) ObjArray.append(obj) @@ -2136,11 +1964,7 @@ def calc_Prange_yi( density_opts=density_opts, mole_fraction_options=mole_fraction_options, ) - logger.info( - "New Max Pressure: {}, Obj Func: {}, Range {}".format( - p, obj, Prange - ) - ) + logger.info("New Max Pressure: {}, Obj Func: {}, Range {}".format(p, obj, Prange)) if p < 0: parray = np.linspace(Prange[0], Prange[1], 20) @@ -2160,28 +1984,16 @@ def calc_Prange_yi( if len(p_min) > 1: obj_tmp = [] for p_min_tmp in p_min: - obj_tmp.append( - objective_bubble_pressure( - p_min_tmp, xi, T, Eos, density_opts=density_opts - ) - ) + obj_tmp.append(objective_bubble_pressure(p_min_tmp, xi, T, Eos, density_opts=density_opts)) p_min = p_min[obj_tmp == np.nanmin(obj_tmp)] elif len(p_min) == 0: logger.error( "Could not find minimum in pressure range:\n " - + "Pressure: {}\n Obj Value: {}".format( - parray, obj_array - ) + + "Pressure: {}\n Obj Value: {}".format(parray, obj_array) ) p = p_min - obj = objective_bubble_pressure( - p, xi, T, Eos, density_opts=density_opts - ) - logger.info( - "New Max Pressure: {}, Obj Func: {}, Range {}".format( - p, obj, Prange - ) - ) + obj = objective_bubble_pressure(p, xi, T, Eos, density_opts=density_opts) + logger.info("New Max Pressure: {}, Obj Func: {}, Range {}".format(p, obj, Prange)) if obj > 0: Prange[1] = p @@ -2194,27 +2006,17 @@ def calc_Prange_yi( else: logger.error( "Could not find maximum in pressure range:\n Pressure " - + "range {} best {}\n Obj Value range {} best {}".format( - Prange, p, ObjRange, obj - ) + + "range {} best {}\n Obj Value range {} best {}".format(Prange, p, ObjRange, obj) ) break elif flag_hard_max: - logger.info( - "New Minimum Pressure: {}, Obj. Func: {}, Range {}".format( - p, obj, Prange - ) - ) + logger.info("New Minimum Pressure: {}, Obj. Func: {}, Range {}".format(p, obj, Prange)) Prange[0] = p ObjRange[0] = obj p = (Prange[1] - Prange[0]) / 2.0 + Prange[0] else: - logger.info( - "New Maximum Pressure: {}, Obj. Func: {}, Range {}".format( - p, obj, Prange - ) - ) + logger.info("New Maximum Pressure: {}, Obj. Func: {}, Range {}".format(p, obj, Prange)) if not flag_hard_max: if Prange[1] < p: Prange[0] = Prange[1] @@ -2227,10 +2029,7 @@ def calc_Prange_yi( if z == maxiter - 1 or flag_min: if flag_min: - logger.error( - "Cannot reach objective value of zero. Final Pressure: " - + "{}, Obj. Func: {}".format(p, obj) - ) + logger.error("Cannot reach objective value of zero. Final Pressure: " + "{}, Obj. Func: {}".format(p, obj)) else: logger.error( "Maximum Number of Iterations Reached: A change in sign for the " @@ -2239,9 +2038,7 @@ def calc_Prange_yi( Prange = np.array([np.nan, np.nan]) Pguess = np.nan elif flag_sol: - logger.info( - "[Pmin, Pmax]: {}, Obj. Values: {}".format(str(Prange), str(ObjRange)) - ) + logger.info("[Pmin, Pmax]: {}, Obj. Values: {}".format(str(Prange), str(ObjRange))) logger.info("Initial guess in pressure: {} Pa".format(Pguess)) else: logger.error( @@ -2254,19 +2051,7 @@ def calc_Prange_yi( return Prange, Pguess -def calc_vapor_composition( - yi, - xi, - phil, - P, - T, - Eos, - density_opts={}, - maxiter=50, - tol=1e-6, - tol_trivial=0.05, - **kwargs -): +def calc_vapor_composition(yi, xi, phil, P, T, Eos, density_opts={}, maxiter=50, tol=1e-6, tol_trivial=0.05, **kwargs): r""" Find vapor mole fraction given pressure, liquid mole fraction, and temperature. @@ -2320,18 +2105,14 @@ def calc_vapor_composition( """ if np.any(np.isnan(phil)): - raise ValueError( - "Cannot obtain vapor mole fraction with fugacity coefficients of NaN" - ) + raise ValueError("Cannot obtain vapor mole fraction with fugacity coefficients of NaN") global _yi_global yi_total = [np.sum(yi)] yi /= np.sum(yi) flag_check_vapor = True # Make sure we only search for vapor compositions once - flag_trivial_sol = ( - True # Make sure we only try to find alternative to trivial solution once - ) + flag_trivial_sol = True # Make sure we only try to find alternative to trivial solution once logger.info(" Solve yi: P {}, T {}, xi {}, phil {}".format(P, T, xi, phil)) for z in range(maxiter): @@ -2339,28 +2120,20 @@ def calc_vapor_composition( yi_tmp = yi / np.sum(yi) # Try yi - phiv, _, flagv = calc_vapor_fugacity_coefficient( - P, T, yi_tmp, Eos, density_opts=density_opts - ) + phiv, _, flagv = calc_vapor_fugacity_coefficient(P, T, yi_tmp, Eos, density_opts=density_opts) - if ( - any(np.isnan(phiv)) or flagv == 1 - ) and flag_check_vapor: # If vapor density doesn't exist + if (any(np.isnan(phiv)) or flagv == 1) and flag_check_vapor: # If vapor density doesn't exist flag_check_vapor = False if all(yi_tmp != 0.0) and len(yi_tmp) == 2: logger.debug(" Composition doesn't produce a vapor, let's find one!") - yi_tmp = find_new_yi( - P, T, phil, xi, Eos, density_opts=density_opts, **kwargs - ) + yi_tmp = find_new_yi(P, T, phil, xi, Eos, density_opts=density_opts, **kwargs) flag_trivial_sol = False if np.any(np.isnan(yi_tmp)): phiv, _, flagv = [np.nan, np.nan, 3] yinew = yi_tmp break else: - phiv, _, flagv = calc_vapor_fugacity_coefficient( - P, T, yi_tmp, Eos, density_opts=density_opts - ) + phiv, _, flagv = calc_vapor_fugacity_coefficient(P, T, yi_tmp, Eos, density_opts=density_opts) yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor") else: logger.debug( @@ -2371,19 +2144,11 @@ def calc_vapor_composition( elif np.sum(np.abs(xi - yi_tmp) / xi) < tol_trivial and flag_trivial_sol: flag_trivial_sol = False if all(yi_tmp != 0.0) and len(yi_tmp) == 2: - logger.debug( - " Composition produces trivial solution, let's find a " - + "different one!" - ) - yi_tmp = find_new_yi( - P, T, phil, xi, Eos, density_opts=density_opts, **kwargs - ) + logger.debug(" Composition produces trivial solution, let's find a " + "different one!") + yi_tmp = find_new_yi(P, T, phil, xi, Eos, density_opts=density_opts, **kwargs) flag_check_vapor = False else: - logger.debug( - " Composition produces trivial solution, using random guess" - + " to reset" - ) + logger.debug(" Composition produces trivial solution, using random guess" + " to reset") yi_tmp = np.random.rand(len(yi_tmp)) yi_tmp /= np.sum(yi_tmp) @@ -2392,42 +2157,27 @@ def calc_vapor_composition( yinew = yi_tmp break else: - phiv, _, flagv = calc_vapor_fugacity_coefficient( - P, T, yi_tmp, Eos, density_opts=density_opts - ) + phiv, _, flagv = calc_vapor_fugacity_coefficient(P, T, yi_tmp, Eos, density_opts=density_opts) yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor") else: yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor") yinew[np.isnan(yinew)] = 0.0 yi2 = yinew / np.sum(yinew) - phiv2, _, flagv2 = calc_vapor_fugacity_coefficient( - P, T, yi2, Eos, density_opts=density_opts - ) + phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(P, T, yi2, Eos, density_opts=density_opts) if any(np.isnan(phiv)): phiv = np.nan - logger.error( - "Fugacity coefficient of vapor should not be NaN, pressure could be" - + " too high." - ) + logger.error("Fugacity coefficient of vapor should not be NaN, pressure could be" + " too high.") # Check for bouncing between values if len(yi_total) > 3: - tmp1 = np.abs(np.sum(yinew) - yi_total[-2]) + np.abs( - yi_total[-1] - yi_total[-3] - ) + tmp1 = np.abs(np.sum(yinew) - yi_total[-2]) + np.abs(yi_total[-1] - yi_total[-3]) if tmp1 < np.abs(np.sum(yinew) - yi_total[-1]) and flagv != flagv2: - logger.debug( - " Composition bouncing between values, let's find the answer!" - ) + logger.debug(" Composition bouncing between values, let's find the answer!") bounds = np.sort([yi_tmp[0], yi2[0]]) - yi2, obj = bracket_bounding_yi( - P, T, phil, xi, Eos, bounds=bounds, density_opts=density_opts - ) - phiv2, _, flagv2 = calc_vapor_fugacity_coefficient( - P, T, yi2, Eos, density_opts=density_opts - ) + yi2, obj = bracket_bounding_yi(P, T, phil, xi, Eos, bounds=bounds, density_opts=density_opts) + phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(P, T, yi2, Eos, density_opts=density_opts) _yi_global = yi2 logger.info( " Inner Loop Final (from bracketing bouncing values) yi: " @@ -2435,11 +2185,7 @@ def calc_vapor_composition( ) break - logger.debug( - " yi guess {}, yi calc {}, phiv {}, flag {}".format( - yi_tmp, yinew, phiv, flagv - ) - ) + logger.debug(" yi guess {}, yi calc {}, phiv {}, flag {}".format(yi_tmp, yinew, phiv, flagv)) logger.debug( " Old yi_total: {}, New yi_total: {}, Change: {}".format( yi_total[-1], np.sum(yinew), np.sum(yinew) - yi_total[-1] @@ -2475,13 +2221,10 @@ def calc_vapor_composition( yi2 = yinew / np.sum(yinew) tmp = np.abs(yi2[ind_tmp] - yi_tmp[ind_tmp]) / yi_tmp[ind_tmp] logger.warning( - " More than {} iterations needed.".format(maxiter) - + " Error in Smallest Fraction: {}%".format(tmp * 100) + " More than {} iterations needed.".format(maxiter) + " Error in Smallest Fraction: {}%".format(tmp * 100) ) if tmp > 0.1: # If difference is greater than 10% - yinew = find_new_yi( - P, T, phil, xi, Eos, density_opts=density_opts, **kwargs - ) + yinew = find_new_yi(P, T, phil, xi, Eos, density_opts=density_opts, **kwargs) yi2 = yinew / np.sum(yinew) y1 = spo.least_squares( objective_find_yi, @@ -2491,13 +2234,9 @@ def calc_vapor_composition( ) yi = y1.x[0] yi2 = np.array([yi, 1 - yi]) - phiv2, _, flagv2 = calc_vapor_fugacity_coefficient( - P, T, yi2, Eos, density_opts=density_opts - ) + phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(P, T, yi2, Eos, density_opts=density_opts) obj = objective_find_yi(yi2, P, T, phil, xi, Eos, density_opts=density_opts) - logger.warning( - " Find yi with root algorithm, yi {}, obj {}".format(yi2, obj) - ) + logger.warning(" Find yi with root algorithm, yi {}, obj {}".format(yi2, obj)) if obj > tol: logger.error("Could not converge mole fraction") phiv2 = np.full(len(yi_tmp), np.nan) @@ -2506,19 +2245,7 @@ def calc_vapor_composition( return yi2, phiv2, flagv2 -def calc_liquid_composition( - xi, - yi, - phiv, - P, - T, - Eos, - density_opts={}, - maxiter=20, - tol=1e-6, - tol_trivial=0.05, - **kwargs -): +def calc_liquid_composition(xi, yi, phiv, P, T, Eos, density_opts={}, maxiter=20, tol=1e-6, tol_trivial=0.05, **kwargs): r""" Find liquid mole fraction given pressure, vapor mole fraction, and temperature. @@ -2573,16 +2300,12 @@ def calc_liquid_composition( global _xi_global if np.any(np.isnan(phiv)): - raise ValueError( - "Cannot obtain liquid mole fraction with fugacity coefficients of NaN" - ) + raise ValueError("Cannot obtain liquid mole fraction with fugacity coefficients of NaN") xi /= np.sum(xi) xi_total = [np.sum(xi)] flag_check_liquid = True # Make sure we only search for liquid compositions once - flag_trivial_sol = ( - True # Make sure we only try to find alternative to trivial solution once - ) + flag_trivial_sol = True # Make sure we only try to find alternative to trivial solution once logger.info(" Solve xi: P {}, T {}, yi {}, phiv {}".format(P, T, yi, phiv)) for z in range(maxiter): @@ -2590,28 +2313,20 @@ def calc_liquid_composition( xi_tmp = xi / np.sum(xi) # Try xi - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - P, T, xi_tmp, Eos, density_opts=density_opts - ) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(P, T, xi_tmp, Eos, density_opts=density_opts) if (any(np.isnan(phil)) or flagl in [0, 4]) and flag_check_liquid: flag_check_liquid = False if all(xi_tmp != 0.0) and len(xi_tmp) == 2: - logger.debug( - " Composition doesn't produce a liquid, let's find one!" - ) - xi_tmp = find_new_xi( - P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs - ) + logger.debug(" Composition doesn't produce a liquid, let's find one!") + xi_tmp = find_new_xi(P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs) flag_trivial_sol = False if np.any(np.isnan(xi_tmp)): phil, rhol, flagl = [np.nan, np.nan, 3] xinew = xi_tmp break else: - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - P, T, xi_tmp, Eos, density_opts=density_opts - ) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(P, T, xi_tmp, Eos, density_opts=density_opts) xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid") else: logger.debug( @@ -2622,19 +2337,11 @@ def calc_liquid_composition( elif np.sum(np.abs(yi - xi_tmp) / yi) < tol_trivial and flag_trivial_sol: flag_trivial_sol = False if all(xi_tmp != 0.0) and len(xi_tmp) == 2: - logger.debug( - " Composition produces trivial solution, let's find a" - + " different one!" - ) - xi_tmp = find_new_xi( - P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs - ) + logger.debug(" Composition produces trivial solution, let's find a" + " different one!") + xi_tmp = find_new_xi(P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs) flag_check_liquid = False else: - logger.debug( - " Composition produces trivial solution, using random guess" - + " to reset" - ) + logger.debug(" Composition produces trivial solution, using random guess" + " to reset") xi_tmp = np.random.rand(len(xi_tmp)) xi_tmp /= np.sum(xi_tmp) @@ -2643,19 +2350,13 @@ def calc_liquid_composition( xinew = xi_tmp break else: - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - P, T, xi_tmp, Eos, density_opts=density_opts - ) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(P, T, xi_tmp, Eos, density_opts=density_opts) xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid") else: xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid") xinew[np.isnan(xinew)] = 0.0 - logger.debug( - " xi guess {}, xi calc {}, phil {}".format( - xi_tmp, xinew / np.sum(xinew), phil - ) - ) + logger.debug(" xi guess {}, xi calc {}, phil {}".format(xi_tmp, xinew / np.sum(xinew), phil)) logger.debug( " Old xi_total: {}, New xi_total: {}, Change: {}".format( xi_total[-1], np.sum(xinew), np.sum(xinew) - xi_total[-1] @@ -2690,9 +2391,7 @@ def calc_liquid_composition( + "Error in Smallest Fraction: {} %%".format(tmp * 100) ) if tmp > 0.1: # If difference is greater than 10% - xinew = find_new_xi( - P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs - ) + xinew = find_new_xi(P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs) xinew = spo.least_squares( objective_find_xi, xinew[0], @@ -2702,16 +2401,12 @@ def calc_liquid_composition( xi = xinew.x[0] xi_tmp = np.array([xi, 1 - xi]) obj = objective_find_xi(xi_tmp, P, T, phiv, yi, Eos, density_opts=density_opts) - logger.warning( - " Find xi with root algorithm, xi {}, obj {}".format(xi_tmp, obj) - ) + logger.warning(" Find xi with root algorithm, xi {}, obj {}".format(xi_tmp, obj)) return xi_tmp, phil, flagl -def find_new_yi( - P, T, phil, xi, Eos, bounds=(0.01, 0.99), npoints=30, density_opts={}, **kwargs -): +def find_new_yi(P, T, phil, xi, Eos, bounds=(0.01, 0.99), npoints=30, density_opts={}, **kwargs): r""" Search vapor mole fraction combinations for a new estimate that produces a vapor density. @@ -2756,9 +2451,7 @@ def find_new_yi( for i, yi in enumerate(yi_ext): yi = np.array([yi, 1 - yi]) - obj, flagv = objective_find_yi( - yi, P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True - ) + obj, flagv = objective_find_yi(yi, P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True) flag_ext[i] = flagv obj_ext[i] = obj @@ -2793,11 +2486,7 @@ def find_new_yi( # Remove trivial solution obj_trivial = np.abs(yi_min - xi[0]) / xi[0] ind = np.where(obj_trivial == min(obj_trivial))[0][0] - logger.debug( - " Found multiple minima: {}, discard {} as trivial solution".format( - yi_min, yi_min[ind] - ) - ) + logger.debug(" Found multiple minima: {}, discard {} as trivial solution".format(yi_min, yi_min[ind])) # Remove liquid roots yi_min = np.array([yi_min[ii] for ii in range(len(yi_min)) if ii != ind]) @@ -2816,15 +2505,9 @@ def find_new_yi( density_opts=density_opts, return_flag=True, ) - yi_tmp2 = [ - yi_min[ii] for ii in range(len(yi_min)) if flagv_tmp2[ii] != 1 - ] + yi_tmp2 = [yi_min[ii] for ii in range(len(yi_min)) if flagv_tmp2[ii] != 1] if len(yi_tmp2): - obj_tmp2 = [ - obj_tmp2[ii] - for ii in range(len(obj_tmp2)) - if flagv_tmp2[ii] != 1 - ] + obj_tmp2 = [obj_tmp2[ii] for ii in range(len(obj_tmp2)) if flagv_tmp2[ii] != 1] yi_min = [yi_tmp2[np.where(obj_tmp2 == min(obj_tmp2))[0][0]]] else: yi_min = [yi_min[np.where(obj_tmp2 == min(obj_tmp2))[0][0]]] @@ -2845,18 +2528,7 @@ def find_new_yi( return yi_final -def bracket_bounding_yi( - P, - T, - phil, - xi, - Eos, - bounds=(0.01, 0.99), - maxiter=50, - tol=1e-7, - density_opts={}, - **kwargs -): +def bracket_bounding_yi(P, T, phil, xi, Eos, bounds=(0.01, 0.99), maxiter=50, tol=1e-7, density_opts={}, **kwargs): r""" Search binary vapor mole fraction combinations for a new estimate that produces a vapor density. @@ -2913,10 +2585,7 @@ def bracket_bounding_yi( ) if flag_bounds[0] == flag_bounds[1]: - logger.error( - " Both mole fractions have flag, " - + "{}, continue seeking convergence".format(flag_bounds[0]) - ) + logger.error(" Both mole fractions have flag, " + "{}, continue seeking convergence".format(flag_bounds[0])) y1 = bounds[1] flagv = flag_bounds[1] @@ -2925,9 +2594,7 @@ def bracket_bounding_yi( for i in np.arange(maxiter): y1 = np.mean(bounds) - obj, flagv = objective_find_yi( - y1, P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True - ) + obj, flagv = objective_find_yi(y1, P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True) if not flag_high_vapor: ind = np.where(flag_bounds == flagv)[0][0] @@ -2947,9 +2614,7 @@ def bracket_bounding_yi( bounds[ind], obj_bounds[ind], flag_bounds[ind] = y1, obj, flagv logger.debug( - " Bouncing mole fraction new bounds: {}, obj: {}, flag: {}".format( - bounds, obj_bounds, flag_bounds - ) + " Bouncing mole fraction new bounds: {}, obj: {}, flag: {}".format(bounds, obj_bounds, flag_bounds) ) # Check convergence @@ -2965,15 +2630,10 @@ def bracket_bounding_yi( y1, flagv = bounds[ind], flag_bounds[ind] if i == maxiter - 1: logger.debug( - " Bouncing mole fraction, max iterations ended with, " - + "y1={}, flagv={}".format(y1, flagv) + " Bouncing mole fraction, max iterations ended with, " + "y1={}, flagv={}".format(y1, flagv) ) else: - logger.debug( - " Bouncing mole fractions converged to y1={}, flagv={}".format( - y1, flagv - ) - ) + logger.debug(" Bouncing mole fractions converged to y1={}, flagv={}".format(y1, flagv)) return np.array([y1, 1 - y1]), flagv @@ -3022,9 +2682,7 @@ def objective_find_yi(yi, P, T, phil, xi, Eos, density_opts={}, return_flag=Fals yi = np.array(yi) yi /= np.sum(yi) - phiv, _, flagv = calc_vapor_fugacity_coefficient( - P, T, yi, Eos, density_opts=density_opts - ) + phiv, _, flagv = calc_vapor_fugacity_coefficient(P, T, yi, Eos, density_opts=density_opts) yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor") yi2 = yinew / np.sum(yinew) @@ -3032,14 +2690,10 @@ def objective_find_yi(yi, P, T, phil, xi, Eos, density_opts={}, return_flag=Fals if np.any(np.isnan(yi2)): obj = np.nan else: - phiv2, _, flagv2 = calc_vapor_fugacity_coefficient( - P, T, yi2, Eos, density_opts=density_opts - ) + phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(P, T, yi2, Eos, density_opts=density_opts) obj = np.sum(np.abs(yinew - xi * phil / phiv2)) - logger.debug( - " Guess yi: {}, calc yi: {}, diff={}, flagv {}".format(yi, yi2, obj, flagv) - ) + logger.debug(" Guess yi: {}, calc yi: {}, diff={}, flagv {}".format(yi, yi2, obj, flagv)) if return_flag: return obj, flagv @@ -3047,9 +2701,7 @@ def objective_find_yi(yi, P, T, phil, xi, Eos, density_opts={}, return_flag=Fals return obj -def find_new_xi( - P, T, phiv, yi, Eos, density_opts={}, bounds=(0.001, 0.999), npoints=30, **kwargs -): +def find_new_xi(P, T, phiv, yi, Eos, density_opts={}, bounds=(0.001, 0.999), npoints=30, **kwargs): r""" Search liquid mole fraction combinations for a new estimate that produces a liquid density. @@ -3083,9 +2735,7 @@ def find_new_xi( if len(kwargs) > 0: logger.debug( - " 'find_new_xi' does not use the following keyword arguments: {}".format( - ", ".join(list(kwargs.keys())) - ) + " 'find_new_xi' does not use the following keyword arguments: {}".format(", ".join(list(kwargs.keys()))) ) xi_ext = np.linspace(bounds[0], bounds[1], npoints) # Guess for yi @@ -3094,9 +2744,7 @@ def find_new_xi( for i, xi in enumerate(xi_ext): xi = np.array([xi, 1 - xi]) - obj, flagl = objective_find_xi( - xi, P, T, phiv, yi, Eos, density_opts=density_opts, return_flag=True - ) + obj, flagl = objective_find_xi(xi, P, T, phiv, yi, Eos, density_opts=density_opts, return_flag=True) flag_ext[i] = flagl obj_ext[i] = obj @@ -3128,11 +2776,7 @@ def find_new_xi( # Remove trivial solution obj_trivial = np.abs(xi_min - yi[0]) / yi[0] ind = np.where(obj_trivial == min(obj_trivial))[0][0] - logger.debug( - " Found multiple minima: {}, discard {} as trivial solution".format( - xi_min, xi_min[ind] - ) - ) + logger.debug(" Found multiple minima: {}, discard {} as trivial solution".format(xi_min, xi_min[ind])) xi_min = np.array([xi_min[ii] for ii in range(len(xi_min)) if ii != ind]) if not len(xi_min): @@ -3195,9 +2839,7 @@ def objective_find_xi(xi, P, T, phiv, yi, Eos, density_opts={}, return_flag=Fals xi = np.array(xi) xi /= np.sum(xi) - phil, _, flagl = calc_liquid_fugacity_coefficient( - P, T, xi, Eos, density_opts=density_opts - ) + phil, _, flagl = calc_liquid_fugacity_coefficient(P, T, xi, Eos, density_opts=density_opts) xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid") xi2 = xinew / np.sum(xinew) @@ -3205,14 +2847,10 @@ def objective_find_xi(xi, P, T, phiv, yi, Eos, density_opts={}, return_flag=Fals if np.any(np.isnan(xi2)): obj = np.nan else: - phil2, _, flagl2 = calc_liquid_fugacity_coefficient( - P, T, xi2, Eos, density_opts=density_opts - ) + phil2, _, flagl2 = calc_liquid_fugacity_coefficient(P, T, xi2, Eos, density_opts=density_opts) obj = np.sum(np.abs(xinew - xi * phiv / phil2)) - logger.debug( - " Guess xi: {}, calc xi: {}, diff={}, flagl {}".format(xi, xi2, obj, flagl) - ) + logger.debug(" Guess xi: {}, calc xi: {}, diff={}, flagl {}".format(xi, xi2, obj, flagl)) if return_flag: return obj, flagl @@ -3220,9 +2858,7 @@ def objective_find_xi(xi, P, T, phiv, yi, Eos, density_opts={}, return_flag=Fals return obj -def objective_bubble_pressure( - P, xi, T, Eos, density_opts={}, mole_fraction_options={}, **kwargs -): +def objective_bubble_pressure(P, xi, T, Eos, density_opts={}, mole_fraction_options={}, **kwargs): r""" Objective function used to search pressure values and solve outer loop of constant temperature bubble point calculations. @@ -3263,26 +2899,15 @@ def objective_bubble_pressure( logger.info("P Guess: {} Pa".format(P)) # find liquid density - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - P, T, xi, Eos, density_opts=density_opts - ) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(P, T, xi, Eos, density_opts=density_opts) yinew, phiv, flagv = calc_vapor_composition( - _yi_global, - xi, - phil, - P, - T, - Eos, - density_opts=density_opts, - **mole_fraction_options + _yi_global, xi, phil, P, T, Eos, density_opts=density_opts, **mole_fraction_options ) _yi_global = yinew / np.sum(yinew) # given final yi recompute - phiv, rhov, flagv = calc_vapor_fugacity_coefficient( - P, T, _yi_global, Eos, density_opts=density_opts - ) + phiv, rhov, flagv = calc_vapor_fugacity_coefficient(P, T, _yi_global, Eos, density_opts=density_opts) Pv_test = Eos.pressure(rhov, T, _yi_global) obj_value = equilibrium_objective(xi, phil, phiv, phase="vapor") @@ -3291,9 +2916,7 @@ def objective_bubble_pressure( return obj_value -def objective_dew_pressure( - P, yi, T, Eos, density_opts={}, mole_fraction_options={}, **kwargs -): +def objective_dew_pressure(P, yi, T, Eos, density_opts={}, mole_fraction_options={}, **kwargs): r""" Objective function used to search pressure values and solve outer loop of constant temperature dew point calculations. @@ -3334,26 +2957,15 @@ def objective_dew_pressure( logger.info("P Guess: {} Pa".format(P)) # find liquid density - phiv, rhov, flagv = calc_vapor_fugacity_coefficient( - P, T, yi, Eos, density_opts=density_opts - ) + phiv, rhov, flagv = calc_vapor_fugacity_coefficient(P, T, yi, Eos, density_opts=density_opts) xinew, phil, flagl = calc_liquid_composition( - _xi_global, - yi, - phiv, - P, - T, - Eos, - density_opts=density_opts, - **mole_fraction_options + _xi_global, yi, phiv, P, T, Eos, density_opts=density_opts, **mole_fraction_options ) _xi_global = xinew / np.sum(xinew) # given final yi recompute - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - P, T, _xi_global, Eos, density_opts=density_opts - ) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(P, T, _xi_global, Eos, density_opts=density_opts) Pv_test = Eos.pressure(rhol, T, _xi_global) obj_value = equilibrium_objective(yi, phil, phiv, phase="liquid") @@ -3372,7 +2984,7 @@ def calc_dew_pressure( method="bisect", pressure_options={}, Psat_set=1e7, - **kwargs + **kwargs, ): r""" Calculate dew point mole fraction and pressure given system vapor mole fraction @@ -3437,9 +3049,7 @@ def calc_dew_pressure( for i in range(np.size(yi)): yi_tmp = np.zeros_like(yi) yi_tmp[i] = 1.0 - Psat[i], _, _ = calc_saturation_properties( - T, yi_tmp, Eos, density_opts=density_opts, **kwargs - ) + Psat[i], _, _ = calc_saturation_properties(T, yi_tmp, Eos, density_opts=density_opts, **kwargs) if np.isnan(Psat[i]): Psat[i] = Psat_set logger.warning( @@ -3462,19 +3072,10 @@ def calc_dew_pressure( xi = _xi_global Prange, Pestimate = calc_Prange_yi( - T, - xi, - yi, - Eos, - density_opts=density_opts, - mole_fraction_options=mole_fraction_options, - **kwargs + T, xi, yi, Eos, density_opts=density_opts, mole_fraction_options=mole_fraction_options, **kwargs ) if np.any(np.isnan(Prange)): - raise ValueError( - "Neither a suitable pressure range, or guess in pressure could be found " - "nor was given." - ) + raise ValueError("Neither a suitable pressure range, or guess in pressure could be found " "nor was given.") else: if Pguess is not None: if Pguess > Prange[1] or Pguess < Prange[0]: @@ -3486,8 +3087,7 @@ def calc_dew_pressure( P = Pestimate else: logger.warning( - "Using given guess in pressure, {},".format(Pguess) - + " that is inside identified pressure range." + "Using given guess in pressure, {},".format(Pguess) + " that is inside identified pressure range." ) P = Pguess else: @@ -3502,12 +3102,8 @@ def calc_dew_pressure( ) # find vapor density and fugacity - phiv, rhov, flagv = calc_vapor_fugacity_coefficient( - P, T, yi, Eos, density_opts=density_opts - ) - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - P, T, xi, Eos, density_opts=density_opts - ) + phiv, rhov, flagv = calc_vapor_fugacity_coefficient(P, T, yi, Eos, density_opts=density_opts) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(P, T, xi, Eos, density_opts=density_opts) if "tol" in mole_fraction_options: if mole_fraction_options["tol"] > 1e-10: mole_fraction_options["tol"] = 1e-10 @@ -3521,11 +3117,7 @@ def calc_dew_pressure( mole_fraction_options=mole_fraction_options, ) - logger.info( - "Final Output: Obj {}, P {} Pa, flagl {}, xi {}".format( - obj, P, flagl, _xi_global - ) - ) + logger.info("Final Output: Obj {}, P {} Pa, flagl {}, xi {}".format(obj, P, flagl, _xi_global)) return P, xi, flagl, flagv, obj @@ -3540,7 +3132,7 @@ def calc_bubble_pressure( Psat_set=1e7, method="bisect", pressure_options={}, - **kwargs + **kwargs, ): r""" Calculate bubble point mole fraction and pressure given system liquid mole @@ -3605,9 +3197,7 @@ def calc_bubble_pressure( for i in range(np.size(xi)): xi_tmp = np.zeros_like(xi) xi_tmp[i] = 1.0 - Psat[i], _, _ = calc_saturation_properties( - T, xi_tmp, Eos, density_opts=density_opts, **kwargs - ) + Psat[i], _, _ = calc_saturation_properties(T, xi_tmp, Eos, density_opts=density_opts, **kwargs) if np.isnan(Psat[i]): Psat[i] = Psat_set logger.warning( @@ -3625,25 +3215,14 @@ def calc_bubble_pressure( _yi_global = xi * Psat / P _yi_global /= np.nansum(_yi_global) _yi_global = copy.deepcopy(_yi_global) - logger.info( - "Guess yi in calc_bubble_pressure with Psat: " "{}".format(_yi_global) - ) + logger.info("Guess yi in calc_bubble_pressure with Psat: " "{}".format(_yi_global)) yi = _yi_global Prange, Pestimate = calc_Prange_xi( - T, - xi, - yi, - Eos, - density_opts=density_opts, - mole_fraction_options=mole_fraction_options, - **kwargs + T, xi, yi, Eos, density_opts=density_opts, mole_fraction_options=mole_fraction_options, **kwargs ) if np.any(np.isnan(Prange)): - raise ValueError( - "Neither a suitable pressure range, or guess in pressure could be " - + "found nor was given." - ) + raise ValueError("Neither a suitable pressure range, or guess in pressure could be " + "found nor was given.") else: if Pguess is not None: if Pguess > Prange[1] or Pguess < Prange[0]: @@ -3655,8 +3234,7 @@ def calc_bubble_pressure( P = Pestimate else: logger.warning( - "Using given guess in pressure, {}, that".format(Pguess) - + " is inside identified pressure range." + "Using given guess in pressure, {}, that".format(Pguess) + " is inside identified pressure range." ) P = Pguess else: @@ -3671,12 +3249,8 @@ def calc_bubble_pressure( ) # find liquid density and fugacity - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - P, T, xi, Eos, density_opts=density_opts - ) - phiv, rhov, flagv = calc_vapor_fugacity_coefficient( - P, T, yi, Eos, density_opts=density_opts - ) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(P, T, xi, Eos, density_opts=density_opts) + phiv, rhov, flagv = calc_vapor_fugacity_coefficient(P, T, yi, Eos, density_opts=density_opts) if "tol" in mole_fraction_options: if mole_fraction_options["tol"] > 1e-10: mole_fraction_options["tol"] = 1e-10 @@ -3690,18 +3264,12 @@ def calc_bubble_pressure( mole_fraction_options=mole_fraction_options, ) - logger.info( - "Final Output: Obj {}, P {} Pa, flagv {}, yi {}".format( - obj, P, flagv, _yi_global - ) - ) + logger.info("Final Output: Obj {}, P {} Pa, flagv {}, yi {}".format(obj, P, flagv, _yi_global)) return P, _yi_global, flagv, flagl, obj -def hildebrand_solubility( - rhol, xi, T, Eos, dT=0.1, tol=1e-4, density_opts={}, **kwargs -): +def hildebrand_solubility(rhol, xi, T, Eos, dT=0.1, tol=1e-4, density_opts={}, **kwargs): r""" Calculate the solubility parameter based on temperature and composition. @@ -3752,15 +3320,9 @@ def hildebrand_solubility( logger.info("rhol should be a float, not {}".format(rhol)) # Find dZdT - vlist, Plist1 = pressure_vs_volume_arrays( - T - dT, xi, Eos, **density_opts, max_density=rhol - ) - vlist2, Plist2 = pressure_vs_volume_arrays( - T + dT, xi, Eos, **density_opts, max_density=rhol - ) - vlist, Plist = pressure_vs_volume_arrays( - T, xi, Eos, **density_opts, max_density=rhol - ) + vlist, Plist1 = pressure_vs_volume_arrays(T - dT, xi, Eos, **density_opts, max_density=rhol) + vlist2, Plist2 = pressure_vs_volume_arrays(T + dT, xi, Eos, **density_opts, max_density=rhol) + vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts, max_density=rhol) if any(vlist != vlist2): logger.error("Dependant variable vectors must be identical.") @@ -3768,9 +3330,7 @@ def hildebrand_solubility( integrand_list = gaussian_filter1d(int_tmp, sigma=0.1) # Calculate U_res - integrand_spline = interpolate.InterpolatedUnivariateSpline( - vlist, integrand_list, ext=1 - ) + integrand_spline = interpolate.InterpolatedUnivariateSpline(vlist, integrand_list, ext=1) U_res = -RT * integrand_spline.integral(1 / rhol, vlist[-1]) # Check if function converged before taking integral, if not, correct area @@ -3798,7 +3358,7 @@ def calc_flash( max_mole_fraction0=1.0, min_mole_fraction0=0.0, Psat_set=1e7, - **kwargs + **kwargs, ): r""" Binary flash calculation of vapor and liquid mole fractions. @@ -3853,11 +3413,7 @@ def calc_flash( """ if len(kwargs) > 0: - logger.debug( - "'kwargs' does not use the following keyword arguments: {}".format( - ", ".join(list(kwargs.keys())) - ) - ) + logger.debug("'kwargs' does not use the following keyword arguments: {}".format(", ".join(list(kwargs.keys())))) # Initialize Variables if Eos.number_of_components != 2: @@ -3866,17 +3422,13 @@ def calc_flash( + "{} were given.".format(Eos.number_of_components) ) - Psat, Ki0, xi, yi, phil, phiv = [ - np.zeros(Eos.number_of_components) for _ in np.arange(6) - ] + Psat, Ki0, xi, yi, phil, phiv = [np.zeros(Eos.number_of_components) for _ in np.arange(6)] # Calculate Psat and Ki for i in range(np.size(xi)): xi_tmp = np.zeros_like(xi) xi_tmp[i] = 1.0 - Psat[i], _, _ = calc_saturation_properties( - T, xi_tmp, Eos, density_opts=density_opts, **kwargs - ) + Psat[i], _, _ = calc_saturation_properties(T, xi_tmp, Eos, density_opts=density_opts, **kwargs) if np.isnan(Psat[i]): Psat[i] = Psat_set logger.warning( @@ -3917,20 +3469,14 @@ def calc_flash( yi /= np.sum(yi) # Fugacity Coefficients and New Ki values - phil, rhol, flagl = calc_liquid_fugacity_coefficient( - P, T, xi, Eos, density_opts=density_opts - ) - phiv, rhov, flagv = calc_vapor_fugacity_coefficient( - P, T, yi, Eos, density_opts=density_opts - ) + phil, rhol, flagl = calc_liquid_fugacity_coefficient(P, T, xi, Eos, density_opts=density_opts) + phiv, rhov, flagv = calc_vapor_fugacity_coefficient(P, T, yi, Eos, density_opts=density_opts) logger.info(" xi: {}, phil: {}".format(xi, phil)) logger.info(" yi: {}, phiv: {}".format(yi, phiv)) Kinew = phil / phiv err = np.sum(np.abs(Kinew - Ki)) - logger.info( - " Guess {} Ki: {}, New Ki: {}, Error: {}".format(i, Ki, Kinew, err) - ) + logger.info(" Guess {} Ki: {}, New Ki: {}, Error: {}".format(i, Ki, Kinew, err)) # Check Objective function Kiprev = Ki @@ -3944,9 +3490,7 @@ def calc_flash( if not (Kinew == Ki_tmp).all(): logger.info( " Reset Ki values, {}, according to mole".format(Kinew) - + " fraction constraint, {} to {}, to produce {}".format( - min_mole_fraction0, max_mole_fraction0, Ki_tmp - ) + + " fraction constraint, {} to {}, to produce {}".format(min_mole_fraction0, max_mole_fraction0, Ki_tmp) ) Ki = Ki_tmp if count_reset == 10: @@ -3971,10 +3515,7 @@ def calc_flash( Ki[ind] = 1 / eps Ki[flag_critical] = eps flag_critical += 1 - logger.info( - " Liquid and vapor mole fractions are equal, let search from Ki =" - + " {}".format(Ki) - ) + logger.info(" Liquid and vapor mole fractions are equal, let search from Ki =" + " {}".format(Ki)) elif err < tol: ind = np.where(Ki == min(Ki[Ki > 0]))[0][0] err = np.abs(Kinew[ind] - Ki[ind]) / Ki[ind] @@ -3989,11 +3530,7 @@ def calc_flash( if i == maxiter - 1: ind = np.where(Kiprev == min(Kiprev[Kiprev > 0]))[0][0] err = np.abs(Ki[ind] - Kiprev[ind]) / Kiprev[ind] - logger.warning( - " More than {} iterations needed. Remaining error, {}.".format( - maxiter, err - ) - ) + logger.warning(" More than {} iterations needed. Remaining error, {}.".format(maxiter, err)) # If needed, switch liquid and vapor mole fractions flag_switch = False @@ -4009,11 +3546,7 @@ def calc_flash( xi, flagl = yi, flagv yi, flagv = zi, flag - logger.info( - "Final Output: Obj {}, xi {} flagl {}, yi {} flagv {}".format( - err, xi, flagl, yi, flagv - ) - ) + logger.info("Final Output: Obj {}, xi {} flagl {}, yi {} flagv {}".format(err, xi, flagl, yi, flagv)) return xi, flagl, yi, flagv, err @@ -4048,9 +3581,7 @@ def constrain_Ki(Ki0, min_mole_fraction0=0, max_mole_fraction0=1, **kwargs): if len(kwargs) > 0: logger.debug( - "'constrain_Ki' does not use the following keyword arguments: {}".format( - ", ".join(list(kwargs.keys())) - ) + "'constrain_Ki' does not use the following keyword arguments: {}".format(", ".join(list(kwargs.keys()))) ) Ki = Ki0.copy() @@ -4095,9 +3626,7 @@ def constrain_Ki(Ki0, min_mole_fraction0=0, max_mole_fraction0=1, **kwargs): flag[1] = True elif min_mole_fraction0 < 0.0 or min_mole_fraction0 > 1.0: - raise ValueError( - "Mole fractions can only be constrained to a value between 0 and 1" - ) + raise ValueError("Mole fractions can only be constrained to a value between 0 and 1") if 0.0 <= max_mole_fraction0 <= 1.0: bound_max_x0 = (1 - max_mole_fraction0 * Ki[0]) / (1 - max_mole_fraction0) @@ -4118,9 +3647,7 @@ def constrain_Ki(Ki0, min_mole_fraction0=0, max_mole_fraction0=1, **kwargs): flag[3] = True elif max_mole_fraction0 < 0.0 or max_mole_fraction0 > 1.0: - raise ValueError( - "Mole fractions can only be constrained to a value between 0 and 1" - ) + raise ValueError("Mole fractions can only be constrained to a value between 0 and 1") max0 = min(max_list) min0 = max(min_list) @@ -4143,14 +3670,10 @@ def constrain_Ki(Ki0, min_mole_fraction0=0, max_mole_fraction0=1, **kwargs): # if x0 < min_mole_fraction0 or y0 < min_mole_fraction0: - raise ValueError( - "x0: {}, y0 {}, breach lower limit {}".format(x0, y0, max_mole_fraction0) - ) + raise ValueError("x0: {}, y0 {}, breach lower limit {}".format(x0, y0, max_mole_fraction0)) if x0 > max_mole_fraction0 or y0 > max_mole_fraction0: - raise ValueError( - "x0: {}, y0 {}, breach upper limit {}".format(x0, y0, max_mole_fraction0) - ) + raise ValueError("x0: {}, y0 {}, breach upper limit {}".format(x0, y0, max_mole_fraction0)) return Ki, flag_reset @@ -4180,34 +3703,24 @@ def mixture_fugacity_coefficient(P, T, xi, rho, Eos): tmp_test = [gtb.isiterable(x) for x in [P, T, xi[0], rho]] if sum(tmp_test) > 1: - raise ValueError( - "Only one input may be an array representing different system conditions." - ) + raise ValueError("Only one input may be an array representing different system conditions.") coefficient = [] if tmp_test[0]: for p in P: - coefficient.append( - np.sum(xi * np.log(Eos.fugacity_coefficient(p, rho, xi, T))) - ) + coefficient.append(np.sum(xi * np.log(Eos.fugacity_coefficient(p, rho, xi, T)))) coefficient = np.array(coefficient) elif tmp_test[1]: for t in T: - coefficient.append( - np.sum(xi * np.log(Eos.fugacity_coefficient(P, rho, xi, t))) - ) + coefficient.append(np.sum(xi * np.log(Eos.fugacity_coefficient(P, rho, xi, t)))) coefficient = np.array(coefficient) elif tmp_test[2]: for xi_tmp in xi: - coefficient.append( - np.sum(xi * np.log(Eos.fugacity_coefficient(P, rho, xi_tmp, T))) - ) + coefficient.append(np.sum(xi * np.log(Eos.fugacity_coefficient(P, rho, xi_tmp, T)))) coefficient = np.array(coefficient) elif tmp_test[3]: for rho_tmp in rho: - coefficient.append( - np.sum(xi * np.log(Eos.fugacity_coefficient(P, rho_tmp, xi, T))) - ) + coefficient.append(np.sum(xi * np.log(Eos.fugacity_coefficient(P, rho_tmp, xi, T)))) coefficient = np.array(coefficient) else: coefficient = np.sum(xi * np.log(Eos.fugacity_coefficient(P, rho, xi, T))) @@ -4252,9 +3765,7 @@ def fugacity_test_1(P, T, xi, rho, Eos, step_size=1e-5, **kwargs): ) Z = P / (rho * T * constants.R) - dlnPhidP = gtb.central_difference( - P, mixture_fugacity_coefficient, step_size=step_size, args=(T, xi, rho, Eos) - ) + dlnPhidP = gtb.central_difference(P, mixture_fugacity_coefficient, step_size=step_size, args=(T, xi, rho, Eos)) residual = dlnPhidP - (Z - 1) / P return residual @@ -4294,8 +3805,7 @@ def fugacity_test_2(P, T, xi, rho, Eos, step_size=1e-3, n0=1, **kwargs): if step_size >= n0: raise ValueError( - "Central difference of n0: {}, cannot be".format(n0) - + " comparable to step_size: {}".format(step_size) + "Central difference of n0: {}, cannot be".format(n0) + " comparable to step_size: {}".format(step_size) ) tmp_test = [gtb.isiterable(x) for x in [P, T, xi[0], rho]] @@ -4314,14 +3824,10 @@ def fugacity_test_2(P, T, xi, rho, Eos, step_size=1e-3, n0=1, **kwargs): logger.error("fugacity_test_2 is for multicomponent systems.") elif len(ind) != ncomp: logger.info( - "There is not a significant amount of components {} in solution".format( - np.setdiff1d(range(ncomp), ind) - ) + "There is not a significant amount of components {} in solution".format(np.setdiff1d(range(ncomp), ind)) ) - dlnPhidrho = gtb.central_difference( - n0, _fugacity_test_2, args=(n0, P, rho, xi, T, Eos), step_size=step_size - ) + dlnPhidrho = gtb.central_difference(n0, _fugacity_test_2, args=(n0, P, rho, xi, T, Eos), step_size=step_size) return np.sum(xi * dlnPhidrho) * 2 * step_size diff --git a/despasito/thermodynamics/calculation_types.py b/despasito/thermodynamics/calculation_types.py index c76a7d3..392f73d 100644 --- a/despasito/thermodynamics/calculation_types.py +++ b/despasito/thermodynamics/calculation_types.py @@ -90,17 +90,13 @@ def bubble_pressure(Eos, **sys_dict): npoints = len(thermo_dict[list(thermo_dict.keys())[0]]) thermo_defaults = [constants.standard_temperature] - thermo_dict.update( - gtb.set_defaults(thermo_dict, "Tlist", thermo_defaults, lx=npoints) - ) + thermo_dict.update(gtb.set_defaults(thermo_dict, "Tlist", thermo_defaults, lx=npoints)) # Optional values optional_keys = ["Pguess", "Pmin", "Pmax"] opts = gtb.check_length_dict(sys_dict, optional_keys, lx=npoints) if opts: - logger.info( - "Accepted user defined variables: {}".format(", ".join(list(opts.keys()))) - ) + logger.info("Accepted user defined variables: {}".format(", ".join(list(opts.keys())))) # Delete processed keys for key in thermo_keys + optional_keys: @@ -124,17 +120,15 @@ def bubble_pressure(Eos, **sys_dict): for key in per_job_var: if key in opts_tmp: opts_tmp[key] = opts_tmp[key][i] - inputs.append( - (thermo_dict["Tlist"][i], thermo_dict["xilist"][i], Eos, opts_tmp) - ) + inputs.append((thermo_dict["Tlist"][i], thermo_dict["xilist"][i], Eos, opts_tmp)) if flag_use_mp_object: - P_list, yi_list, flagv_list, flagl_list, obj_list = ( - MultiprocessingObject.pool_job(_bubble_pressure_wrapper, inputs) + P_list, yi_list, flagv_list, flagl_list, obj_list = MultiprocessingObject.pool_job( + _bubble_pressure_wrapper, inputs ) else: - P_list, yi_list, flagv_list, flagl_list, obj_list = ( - MultiprocessingJob.serial_job(_bubble_pressure_wrapper, inputs) + P_list, yi_list, flagv_list, flagl_list, obj_list = MultiprocessingJob.serial_job( + _bubble_pressure_wrapper, inputs ) logger.info("--- Calculation bubble_pressure Complete ---") @@ -166,11 +160,7 @@ def _bubble_pressure_wrapper(args): del opts["pressure_options"]["method"] P, yi, flagv, flagl, obj = calc.calc_bubble_pressure(xi, T, Eos, **opts) except Exception: - logger.warning( - "T (K), xi: {} {}, calculation did not produce a valid result.".format( - T, xi - ) - ) + logger.warning("T (K), xi: {} {}, calculation did not produce a valid result.".format(T, xi)) logger.debug("Calculation Failed:", exc_info=True) P, yi, flagl, flagv, obj = [np.nan, np.nan * np.ones(len(xi)), 3, 3, np.nan] @@ -248,17 +238,13 @@ def dew_pressure(Eos, **sys_dict): npoints = len(thermo_dict[list(thermo_dict.keys())[0]]) thermo_defaults = [constants.standard_temperature] - thermo_dict.update( - gtb.set_defaults(thermo_dict, "Tlist", thermo_defaults, lx=npoints) - ) + thermo_dict.update(gtb.set_defaults(thermo_dict, "Tlist", thermo_defaults, lx=npoints)) # Optional values optional_keys = ["Pguess", "Pmin", "Pmax"] opts = gtb.check_length_dict(sys_dict, optional_keys, lx=npoints) if opts: - logger.info( - "Accepted user defined variables: {}".format(", ".join(list(opts.keys()))) - ) + logger.info("Accepted user defined variables: {}".format(", ".join(list(opts.keys())))) # Delete processed keys for key in thermo_keys + optional_keys: @@ -282,18 +268,14 @@ def dew_pressure(Eos, **sys_dict): for key in per_job_var: if key in opts_tmp: opts_tmp[key] = opts_tmp[key][i] - inputs.append( - (thermo_dict["Tlist"][i], thermo_dict["yilist"][i], Eos, opts_tmp) - ) + inputs.append((thermo_dict["Tlist"][i], thermo_dict["yilist"][i], Eos, opts_tmp)) if flag_use_mp_object: - P_list, xi_list, flagv_list, flagl_list, obj_list = ( - MultiprocessingObject.pool_job(_dew_pressure_wrapper, inputs) + P_list, xi_list, flagv_list, flagl_list, obj_list = MultiprocessingObject.pool_job( + _dew_pressure_wrapper, inputs ) else: - P_list, xi_list, flagv_list, flagl_list, obj_list = ( - MultiprocessingJob.serial_job(_dew_pressure_wrapper, inputs) - ) + P_list, xi_list, flagv_list, flagl_list, obj_list = MultiprocessingJob.serial_job(_dew_pressure_wrapper, inputs) logger.info("--- Calculation dew_pressure Complete ---") @@ -324,11 +306,7 @@ def _dew_pressure_wrapper(args): del opts["pressure_options"]["method"] P, xi, flagl, flagv, obj = calc.calc_dew_pressure(yi, T, Eos, **opts) except Exception: - logger.warning( - "T (K), yi: {} {}, calculation did not produce a valid result.".format( - T, yi - ) - ) + logger.warning("T (K), yi: {} {}, calculation did not produce a valid result.".format(T, yi)) logger.debug("Calculation Failed:", exc_info=True) P, xi, flagl, flagv, obj = [np.nan, np.nan * np.ones(len(yi)), 3, 3, np.nan] @@ -447,10 +425,7 @@ def activity_coefficient(Eos, **sys_dict): mode = "standard" if mode is None: - raise ValueError( - "Two of the following system properties must be provided: Tlist, Plist," - " xilist, or yilist" - ) + raise ValueError("Two of the following system properties must be provided: Tlist, Plist," " xilist, or yilist") else: logger.info("Activity coefficient being calculated in {} mode.".format(mode)) @@ -461,17 +436,13 @@ def activity_coefficient(Eos, **sys_dict): np.nan * np.ones(Eos.number_of_components), np.nan, ] - thermo_dict.update( - gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints) - ) + thermo_dict.update(gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints)) # Optional values optional_keys = ["Pguess", "Pmin", "Pmax"] opts = gtb.check_length_dict(sys_dict, optional_keys, lx=npoints) if opts: - logger.info( - "Accepted user defined variables: {}".format(", ".join(list(opts.keys()))) - ) + logger.info("Accepted user defined variables: {}".format(", ".join(list(opts.keys())))) # Delete processed keys for key in thermo_keys + optional_keys: @@ -655,9 +626,7 @@ def flash(Eos, **sys_dict): npoints = len(thermo_dict[list(thermo_dict.keys())[0]]) thermo_defaults = [constants.standard_temperature, constants.standard_pressure] - thermo_dict.update( - gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints) - ) + thermo_dict.update(gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints)) # Delete processed keys for key in thermo_keys: @@ -679,19 +648,12 @@ def flash(Eos, **sys_dict): "{} were given.".format(Eos.number_of_components) ) - inputs = [ - (thermo_dict["Tlist"][i], thermo_dict["Plist"][i], Eos, opts) - for i in range(npoints) - ] + inputs = [(thermo_dict["Tlist"][i], thermo_dict["Plist"][i], Eos, opts) for i in range(npoints)] if flag_use_mp_object: - xi_list, yi_list, flagv_list, flagl_list, obj_list = ( - MultiprocessingObject.pool_job(_flash_wrapper, inputs) - ) + xi_list, yi_list, flagv_list, flagl_list, obj_list = MultiprocessingObject.pool_job(_flash_wrapper, inputs) else: - xi_list, yi_list, flagv_list, flagl_list, obj_list = ( - MultiprocessingJob.serial_job(_flash_wrapper, inputs) - ) + xi_list, yi_list, flagv_list, flagl_list, obj_list = MultiprocessingJob.serial_job(_flash_wrapper, inputs) logger.info("--- Calculation flash Complete ---") @@ -714,10 +676,7 @@ def _flash_wrapper(args): try: xi, flagl, yi, flagv, obj = calc.calc_flash(P, T, Eos, **opts) except Exception: - logger.warning( - "T (K), P (Pa): {} {}, calculation".format(T, P) - + " did not produce a valid result." - ) + logger.warning("T (K), P (Pa): {} {}, calculation".format(T, P) + " did not produce a valid result.") logger.debug("Calculation Failed:", exc_info=True) xi, yi, flagl, flagv, obj = [ np.nan * np.ones(Eos.number_of_components), @@ -775,18 +734,14 @@ def saturation_properties(Eos, **sys_dict): thermo_dict = gtb.check_length_dict(sys_dict, thermo_keys) if "Tlist" not in thermo_dict: thermo_dict["Tlist"] = np.array([constants.standard_temperature]) - logger.info( - "Assuming standard temperature, {}".format(constants.standard_temperature) - ) + logger.info("Assuming standard temperature, {}".format(constants.standard_temperature)) npoints = len(thermo_dict[list(thermo_dict.keys())[0]]) thermo_defaults = [ constants.standard_temperature, np.array([[1.0] for x in range(npoints)]), ] - thermo_dict.update( - gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints) - ) + thermo_dict.update(gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints)) # Delete processed keys for key in thermo_keys: @@ -802,18 +757,11 @@ def saturation_properties(Eos, **sys_dict): opts = sys_dict.copy() - inputs = [ - (thermo_dict["Tlist"][i], thermo_dict["xilist"][i], Eos, opts) - for i in range(npoints) - ] + inputs = [(thermo_dict["Tlist"][i], thermo_dict["xilist"][i], Eos, opts) for i in range(npoints)] if flag_use_mp_object: - Psat, rholsat, rhovsat = MultiprocessingObject.pool_job( - _saturation_properties_wrapper, inputs - ) + Psat, rholsat, rhovsat = MultiprocessingObject.pool_job(_saturation_properties_wrapper, inputs) else: - Psat, rholsat, rhovsat = MultiprocessingJob.serial_job( - _saturation_properties_wrapper, inputs - ) + Psat, rholsat, rhovsat = MultiprocessingJob.serial_job(_saturation_properties_wrapper, inputs) logger.info("--- Calculation saturation_properties Complete ---") @@ -830,21 +778,13 @@ def _saturation_properties_wrapper(args): try: Psat, rholsat, rhovsat = calc.calc_saturation_properties(T, xi, Eos, **opts) if np.isnan(Psat): - logger.warning( - "T (K), xi: {} {}, calculation did not produce a valid result.".format( - T, xi - ) - ) + logger.warning("T (K), xi: {} {}, calculation did not produce a valid result.".format(T, xi)) logger.debug("Calculation Failed:", exc_info=True) Psat, rholsat, rhovsat = [np.nan, np.nan, np.nan] else: logger.info("Psat {} Pa, rhol {}, rhov {}".format(Psat, rholsat, rhovsat)) except Exception: - logger.warning( - "T (K), xi: {} {}, calculation did not produce a valid result.".format( - T, xi - ) - ) + logger.warning("T (K), xi: {} {}, calculation did not produce a valid result.".format(T, xi)) logger.debug("Calculation Failed:", exc_info=True) Psat, rholsat, rhovsat = [np.nan, np.nan, np.nan] @@ -913,9 +853,7 @@ def liquid_properties(Eos, **sys_dict): np.array([[1.0] for x in range(npoints)]), constants.standard_pressure, ] - thermo_dict.update( - gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints) - ) + thermo_dict.update(gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints)) # Delete processed keys for key in thermo_keys: @@ -942,13 +880,9 @@ def liquid_properties(Eos, **sys_dict): for i in range(npoints) ] if flag_use_mp_object: - rhol, phil, flagl = MultiprocessingObject.pool_job( - _liquid_properties_wrapper, inputs - ) + rhol, phil, flagl = MultiprocessingObject.pool_job(_liquid_properties_wrapper, inputs) else: - rhol, phil, flagl = MultiprocessingJob.serial_job( - _liquid_properties_wrapper, inputs - ) + rhol, phil, flagl = MultiprocessingJob.serial_job(_liquid_properties_wrapper, inputs) logger.info("--- Calculation liquid_properties Complete ---") @@ -971,11 +905,7 @@ def _liquid_properties_wrapper(args): try: phil, rhol, flagl = calc.calc_liquid_fugacity_coefficient(P, T, xi, Eos, **opts) - logger.info( - "P {} Pa, T {} K, xi {}, rhol {}, phil {}, flagl {}".format( - P, T, xi, rhol, phil, flagl - ) - ) + logger.info("P {} Pa, T {} K, xi {}, rhol {}, phil {}, flagl {}".format(P, T, xi, rhol, phil, flagl)) except Exception: logger.warning("Failed to calculate rhol at {} K and {} Pa".format(T, P)) rhol, flagl = np.nan, 3 @@ -1047,9 +977,7 @@ def vapor_properties(Eos, **sys_dict): np.array([[1.0] for x in range(npoints)]), constants.standard_pressure, ] - thermo_dict.update( - gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints) - ) + thermo_dict.update(gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints)) # Delete processed keys for key in thermo_keys: @@ -1076,13 +1004,9 @@ def vapor_properties(Eos, **sys_dict): for i in range(npoints) ] if flag_use_mp_object: - rhov, phiv, flagv = MultiprocessingObject.pool_job( - _vapor_properties_wrapper, inputs - ) + rhov, phiv, flagv = MultiprocessingObject.pool_job(_vapor_properties_wrapper, inputs) else: - rhov, phiv, flagv = MultiprocessingJob.serial_job( - _vapor_properties_wrapper, inputs - ) + rhov, phiv, flagv = MultiprocessingJob.serial_job(_vapor_properties_wrapper, inputs) logger.info("--- Calculation vapor_properties Complete ---") @@ -1105,11 +1029,7 @@ def _vapor_properties_wrapper(args): try: phiv, rhov, flagv = calc.calc_vapor_fugacity_coefficient(P, T, yi, Eos, **opts) - logger.info( - "P {} Pa, T {} K, yi {}, rhov {}, phiv {}, flagv {}".format( - P, T, yi, rhov, phiv, flagv - ) - ) + logger.info("P {} Pa, T {} K, yi {}, rhov {}, phiv {}, flagv {}".format(P, T, yi, rhov, phiv, flagv)) except Exception: logger.warning("Failed to calculate rhov at {} K and {} Pa".format(T, P)) rhov, flagv = np.nan, 3 @@ -1186,9 +1106,7 @@ def solubility_parameter(Eos, **sys_dict): np.array([[1.0] for x in range(npoints)]), constants.standard_pressure, ] - thermo_dict.update( - gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints) - ) + thermo_dict.update(gtb.set_defaults(thermo_dict, thermo_keys, thermo_defaults, lx=npoints)) # Delete processed keys for key in thermo_keys: @@ -1216,13 +1134,9 @@ def solubility_parameter(Eos, **sys_dict): for i in range(npoints) ] if flag_use_mp_object: - rhol, flagl, delta = MultiprocessingObject.pool_job( - _solubility_parameter_wrapper, inputs - ) + rhol, flagl, delta = MultiprocessingObject.pool_job(_solubility_parameter_wrapper, inputs) else: - rhol, flagl, delta = MultiprocessingJob.serial_job( - _solubility_parameter_wrapper, inputs - ) + rhol, flagl, delta = MultiprocessingJob.serial_job(_solubility_parameter_wrapper, inputs) logger.info("--- Calculation solubility_parameter Complete ---") @@ -1245,11 +1159,7 @@ def _solubility_parameter_wrapper(args): try: rhol, flagl = calc.calc_liquid_density(P, T, xi, Eos, **opts) delta = calc.hildebrand_solubility(rhol, xi, T, Eos, **opts) - logger.info( - "P {} Pa, T {} K, xi {}, rhol {}, flagl {}, delta {}".format( - P, T, xi, rhol, flagl, delta - ) - ) + logger.info("P {} Pa, T {} K, xi {}, rhol {}, flagl {}, delta {}".format(P, T, xi, rhol, flagl, delta)) except Exception: logger.warning("Failed to calculate rhov at {} K and {} Pa".format(T, P)) rhol, flagl, delta = np.nan, 3, np.nan @@ -1333,9 +1243,7 @@ def verify_eos(Eos, **sys_dict): npoints = len(thermo_dict[list(thermo_dict.keys())[0]]) thermo_defaults = [constants.standard_temperature, constants.standard_pressure] - thermo_dict.update( - gtb.set_defaults(thermo_dict, ["Tlist", "Plist"], thermo_defaults, lx=npoints) - ) + thermo_dict.update(gtb.set_defaults(thermo_dict, ["Tlist", "Plist"], thermo_defaults, lx=npoints)) # Delete processed keys for key in thermo_keys: diff --git a/despasito/utils/general_toolbox.py b/despasito/utils/general_toolbox.py index 1fb985e..df01163 100644 --- a/despasito/utils/general_toolbox.py +++ b/despasito/utils/general_toolbox.py @@ -82,19 +82,11 @@ def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}) " not provided, so problem cannot be solved.".format(method) ) else: - logger.error( - "Optimization method, {}, requires x0, using bisect instead".format( - method - ) - ) + logger.error("Optimization method, {}, requires x0, using bisect instead".format(method)) method = "bisect" if np.size(x0) > 1 and method in ["brentq", "bisect"]: - logger.error( - "Optimization method, {}, is for scalar functions, using {}".format( - method, "least_squares" - ) - ) + logger.error("Optimization method, {}, is for scalar functions, using {}".format(method, "least_squares")) method = "least_squares" if not isiterable(bounds[0]): @@ -107,13 +99,11 @@ def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}) + "Because x0 was not provided, so problem cannot be solved." ) else: - logger.error( - "Optimization method, {}, requires bounds, using hybr".format(method) - ) + logger.error("Optimization method, {}, requires bounds, using hybr".format(method)) method = "hybr" if np.any(bounds is not None): - for i,bnd in enumerate(bounds): + for i, bnd in enumerate(bounds): if len(bnd) != 2: raise ValueError("bounds are not of length two") else: @@ -128,21 +118,13 @@ def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}) } for key, value in options.items(): outer_dict[key] = value - logger.debug( - "Using the method, {}, with the following options:\n{}".format( - method, outer_dict - ) - ) + logger.debug("Using the method, {}, with the following options:\n{}".format(method, outer_dict)) sol = spo.root(func, x0, args=args, method=method, options=outer_dict) elif method == "anderson": outer_dict = {"fatol": 1e-5, "maxiter": 25} for key, value in options.items(): outer_dict[key] = value - logger.debug( - "Using the method, {}, with the following options:\n{}".format( - method, outer_dict - ) - ) + logger.debug("Using the method, {}, with the following options:\n{}".format(method, outer_dict)) sol = spo.root(func, x0, args=args, method=method, options=outer_dict) elif method in [ "hybr", @@ -156,11 +138,7 @@ def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}) outer_dict = {} for key, value in options.items(): outer_dict[key] = value - logger.debug( - "Using the method, {}, with the following options:\n{}".format( - method, outer_dict - ) - ) + logger.debug("Using the method, {}, with the following options:\n{}".format(method, outer_dict)) sol = spo.root(func, x0, args=args, method=method, options=outer_dict) # ################### Minimization Methods with Boundaries ################### @@ -171,11 +149,7 @@ def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}) } for key, value in options.items(): outer_dict[key] = value - logger.debug( - "Using the method, {}, with the following options:\n{}".format( - method, outer_dict - ) - ) + logger.debug("Using the method, {}, with the following options:\n{}".format(method, outer_dict)) if len(bounds) == 2: sol = spo.minimize( func, @@ -191,11 +165,7 @@ def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}) outer_dict = {} for key, value in options.items(): outer_dict[key] = value - logger.debug( - "Using the method, {}, with the following options:\n{}".format( - method, outer_dict - ) - ) + logger.debug("Using the method, {}, with the following options:\n{}".format(method, outer_dict)) if len(bounds) == 2: sol = spo.minimize( func, @@ -214,48 +184,30 @@ def solve_root(func, args=(), method="bisect", x0=None, bounds=None, options={}) for key, value in options.items(): if key in ["xtol", "rtol", "maxiter", "full_output", "disp"]: outer_dict[key] = value - logger.debug( - "Using the method, {}, with the following options:\n{}".format( - method, outer_dict - ) - ) + logger.debug("Using the method, {}, with the following options:\n{}".format(method, outer_dict)) sol = spo.brentq(func, bounds[0][0], bounds[0][1], args=args, **outer_dict) elif method == "least_squares": outer_dict = {} for key, value in options.items(): outer_dict[key] = value - logger.debug( - "Using the method, {}, with the following options:\n{}".format( - method, outer_dict - ) - ) + logger.debug("Using the method, {}, with the following options:\n{}".format(method, outer_dict)) bnd_tmp = [[], []] for bnd in bounds: bnd_tmp[0].append(bnd[0]) bnd_tmp[1].append(bnd[1]) - sol = spo.least_squares( - func, x0, bounds=tuple(bnd_tmp), args=args, **outer_dict - ) + sol = spo.least_squares(func, x0, bounds=tuple(bnd_tmp), args=args, **outer_dict) elif method == "bisect": outer_dict = {"maxiter": 100, "rtol": 1e-12} for key, value in options.items(): if key in ["xtol", "rtol", "maxiter", "full_output", "disp"]: outer_dict[key] = value - logger.debug( - "Using the method, {}, with the following options:\n{}".format( - method, outer_dict - ) - ) + logger.debug("Using the method, {}, with the following options:\n{}".format(method, outer_dict)) sol = spo.bisect(func, bounds[0][0], bounds[0][1], args=args, **outer_dict) # Given final P estimate if method not in ["brentq", "bisect"]: solution = sol.x - logger.info( - "Optimization terminated successfully: {} {}".format( - sol.success, sol.message - ) - ) + logger.info("Optimization terminated successfully: {} {}".format(sol.success, sol.message)) else: logger.info("Optimization terminated successfully: {}".format(sol)) solution = sol @@ -303,9 +255,7 @@ def central_difference(x, func, step_size=1e-5, relative=False, args=()): step = x * step_size if not isiterable(step): step = np.array([step]) - step = np.array( - [2 * np.finfo(float).eps if xx < np.finfo(float).eps else xx for xx in step] - ) + step = np.array([2 * np.finfo(float).eps if xx < np.finfo(float).eps else xx for xx in step]) else: step = step_size @@ -380,9 +330,7 @@ def check_length_dict(dictionary, keys, lx=None): else: lx_array.append(1) if not len(lx_array): - raise ValueError( - "None of the provided keys are found in the given dictionary" - ) + raise ValueError("None of the provided keys are found in the given dictionary") lx = max(lx_array) new_dictionary = {} @@ -396,9 +344,7 @@ def check_length_dict(dictionary, keys, lx=None): elif l_tmp == lx: new_dictionary[key] = np.array(tmp, float) else: - raise ValueError( - "Entry, {}, should be length {}, not {}".format(key, lx, l_tmp) - ) + raise ValueError("Entry, {}, should be length {}, not {}".format(key, lx, l_tmp)) else: new_dictionary[key] = np.array([tmp for x in range(lx)], float) @@ -443,11 +389,7 @@ def set_defaults(dictionary, keys, values, lx=None): raise ValueError("Length of given keys and values must be equivalent.") elif not key_iterable: if len(values) != 1: - raise ValueError( - "Multiple default values for given key, {}, is ambiguous".format( - keys - ) - ) + raise ValueError("Multiple default values for given key, {}, is ambiguous".format(keys)) else: keys = [keys] diff --git a/despasito/utils/parallelization.py b/despasito/utils/parallelization.py index 39375cf..3e136cf 100644 --- a/despasito/utils/parallelization.py +++ b/despasito/utils/parallelization.py @@ -41,9 +41,7 @@ def __init__(self, ncores=-1): logger.info("Number of cores set to {}".format(ncores)) elif ncores == 1: self.flag_use_mp = False - logger.info( - "Number of cores set to 1, bypassing mp and using serial methods" - ) + logger.info("Number of cores set to 1, bypassing mp and using serial methods") else: raise ValueError("Number of cores cannot be zero or negative.") @@ -248,9 +246,7 @@ def batch_jobs(func, inputs, ncores=1, logger=None): level = handler.level logging.root.handlers = [] - pool = multiprocessing.Pool( - ncores, initializer=initialize_mp_handler, initargs=(level, logformat) - ) + pool = multiprocessing.Pool(ncores, initializer=initialize_mp_handler, initargs=(level, logformat)) output = zip(*pool.map(func, inputs)) diff --git a/setup.cfg b/setup.cfg index 7dd8caf..08f07d5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,24 +10,14 @@ omit = [yapf] # YAPF, in .style.yapf files this shows up as "[style]" header -COLUMN_LIMIT = 119 +COLUMN_LIMIT = 120 INDENT_WIDTH = 4 USE_TABS = False [flake8] # Flake8, PyFlakes, etc -max-line-length = 119 - -[versioneer] -# Automatic version numbering scheme -VCS = git -style = pep440 -versionfile_source = despasito/_version.py -versionfile_build = despasito/_version.py -tag_prefix = '' - -[aliases] -test = pytest +max-line-length = 120 +per-file-ignores = __init__.py:F401 [tool:pytest] testpaths =