diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 80e0783bd..e1d9c7ef2 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -5,7 +5,7 @@ variables: - pythonVersion: 3.9 + pythonVersion: 3.11 srcDirectory: src trigger: diff --git a/requirements.txt b/requirements.txt index c5ee4d7e4..74c615c22 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,17 +1,17 @@ -numpy == 1.26.4 -scipy == 1.6.0 -matplotlib == 3.8.4 -scikit-learn == 1.4.2 -fire == 0.6.0 -pytest == 8.2.0 -coverage == 7.5.0 -pytest-cov == 5.0.0 -pylint == 3.1.0 -wheel == 0.43.0 -pytest-azurepipelines == 1.0.5 -twine == 5.0.0 -pathlib~=1.0.1 -beartype == 0.18.5 -setuptools~=65.5.1 -torch ~= 2.2.2 -torchinfo ~= 1.8.0 +numpy >= 1.26.4 +scipy >= 1.6.0 +matplotlib >= 3.8.4 +scikit-learn >= 1.4.2 +fire >= 0.6.0 +pytest >= 8.2.0 +coverage >= 7.5.0 +pytest-cov >= 5.0.0 +pylint >= 3.1.0 +wheel >= 0.43.0 +pytest-azurepipelines >= 1.0.5 +twine >= 5.0.0 +pathlib >= 1.0.1 +beartype >= 0.18.5 +setuptools >= 65.5.1 +torch >= 2.2.2 +torchinfo >= 1.8.0 \ No newline at end of file diff --git a/setup.py b/setup.py index f719718a8..4e82123f5 100755 --- a/setup.py +++ b/setup.py @@ -1,52 +1,61 @@ #!/usr/bin/env python import sys + version = sys.argv[1] del sys.argv[1] from setuptools import setup, find_packages from pathlib import Path + this_directory = Path(__file__).parent long_description = (this_directory / "README.rst").read_text() setup( - name='UQpy', + name="UQpy", version=version, - url='https://github.com/SURGroup/UQpy', + url="https://github.com/SURGroup/UQpy", description="UQpy is a general purpose toolbox for Uncertainty Quantification", long_description=long_description, author="Michael D. Shields, Dimitris G. Giovanis, Audrey Olivier, Aakash Bangalore-Satish, Mohit Chauhan, " - "Lohit Vandanapu, Ketson R.M. dos Santos", - license='MIT', + "Lohit Vandanapu, Ketson R.M. dos Santos", + license="MIT", platforms=["OSX", "Windows", "Linux"], packages=find_packages("src"), package_dir={"": "src"}, package_data={"": ["*.pdf"]}, - python_requires='>3.9.0', + python_requires=">3.9.0", install_requires=[ - "numpy==1.26.4", "scipy>=1.6.0", "matplotlib==3.8.4", "scikit-learn==1.4.2", 'fire==0.6.0', - "beartype==0.18.5", "torch ~= 2.2.2", "torchinfo ~= 1.8.0" + "numpy >= 1.26.4", + "scipy >= 1.6.0", + "matplotlib >= 3.8.4", + "scikit-learn >= 1.4.2", + "fire >= 0.6.0", + "beartype >= 0.18.5", + "torch >= 2.2.2", + "torchinfo >= 1.8.0", ], extras_require={ - 'dev': [ - 'pytest == 8.2.0', - 'pytest-cov == 5.0.0', - 'pylint == 3.1.0', - 'pytest-azurepipelines == 1.0.5', - 'pytest-cov == 5.0.0', - 'wheel == 0.43.0', - 'twine == 5.0.0', - 'sphinx_autodoc_typehints == 1.23.0', - 'sphinx_rtd_theme == 1.2.0', - 'sphinx_gallery == 0.13.0', - 'sphinxcontrib_bibtex == 2.5.0', - 'Sphinx==6.1.3', + "dev": [ + "pytest == 8.2.0", + "pytest-cov == 5.0.0", + "pylint == 3.1.0", + "pytest-azurepipelines == 1.0.5", + "pytest-cov == 5.0.0", + "wheel == 0.43.0", + "twine == 5.0.0", + "sphinx_autodoc_typehints == 1.23.0", + "sphinx_rtd_theme == 1.2.0", + "sphinx_gallery == 0.13.0", + "sphinxcontrib_bibtex == 2.5.0", + "Sphinx == 6.1.3", + "hypothesis ~= 6.131.19" ] }, classifiers=[ - 'Programming Language :: Python :: 3', - 'Intended Audience :: Science/Research', - 'Topic :: Scientific/Engineering :: Mathematics', - 'License :: OSI Approved :: MIT License', - 'Natural Language :: English', + "Programming Language :: Python :: 3", + "Intended Audience :: Science/Research", + "Topic :: Scientific/Engineering :: Mathematics", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", ], ) diff --git a/src/UQpy/run_model/model_execution/ThirdPartyModel.py b/src/UQpy/run_model/model_execution/ThirdPartyModel.py index 0d09334fc..c833c3769 100644 --- a/src/UQpy/run_model/model_execution/ThirdPartyModel.py +++ b/src/UQpy/run_model/model_execution/ThirdPartyModel.py @@ -227,6 +227,11 @@ def execute_single_sample(self, index, sample_to_send): def postprocess_single_file(self, index, model_output): if self.output_script is not None: output = self._output_serial(index) + else: + raise RuntimeError( + "UQpy: To postprocess a run using postprocess_single_file within RunModel, output_script cannot be equal to None." + " See documentation for usage details." + ) work_dir = os.path.join(self.model_dir, "run_" + str(index)) # Remove the copied files and folders diff --git a/src/UQpy/scientific_machine_learning/functional/generalized_jensen_shannon_divergence.py b/src/UQpy/scientific_machine_learning/functional/generalized_jensen_shannon_divergence.py index 2efa7fa9e..f8593d376 100644 --- a/src/UQpy/scientific_machine_learning/functional/generalized_jensen_shannon_divergence.py +++ b/src/UQpy/scientific_machine_learning/functional/generalized_jensen_shannon_divergence.py @@ -78,7 +78,7 @@ def generalized_jensen_shannon_divergence( alpha * kl_divergence_p_m ) js_divergence /= n_samples - js_divergence = torch.tensor(js_divergence, device=device) + js_divergence = torch.tensor(js_divergence, device=device, dtype=torch.float32) if reduction == "none": return js_divergence diff --git a/src/UQpy/sensitivity/PostProcess.py b/src/UQpy/sensitivity/PostProcess.py index ade3edb39..0688057f7 100644 --- a/src/UQpy/sensitivity/PostProcess.py +++ b/src/UQpy/sensitivity/PostProcess.py @@ -67,6 +67,7 @@ def plot_sensitivity_index( error = confidence_interval[:, 1] - indices else: conf_int_flag = False + error = None # x and y data _idx = np.arange(num_vars) @@ -171,12 +172,14 @@ def plot_index_comparison( error_1 = confidence_interval_1[:, 1] - indices_1 else: conf_int_flag_1 = False + error_1 = None if confidence_interval_2 is not None: conf_int_flag_2 = True error_2 = confidence_interval_2[:, 1] - indices_2 else: conf_int_flag_2 = False + error_2 = None # x and y data _idx = np.arange(num_vars) @@ -289,6 +292,7 @@ def plot_second_order_indices( error = confidence_interval[:, 1] - indices else: conf_int_flag = False + error = None # x and y data _idx = np.arange(num_second_order_terms) diff --git a/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py b/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py index c61d6d627..56c02413e 100644 --- a/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py +++ b/src/UQpy/surrogates/polynomial_chaos/PolynomialChaosExpansion.py @@ -1,12 +1,12 @@ import logging - import numpy as np from beartype import beartype - from UQpy.utilities.ValidationTypes import NumpyFloatArray from UQpy.surrogates.baseclass.Surrogate import Surrogate from UQpy.surrogates.polynomial_chaos.regressions.baseclass.Regression import Regression -from UQpy.surrogates.polynomial_chaos.polynomials.TotalDegreeBasis import PolynomialBasis +from UQpy.surrogates.polynomial_chaos.polynomials.TotalDegreeBasis import ( + PolynomialBasis, +) from UQpy.distributions import Uniform, Normal from UQpy.surrogates.polynomial_chaos.polynomials import Legendre, Hermite @@ -14,7 +14,9 @@ class PolynomialChaosExpansion(Surrogate): @beartype - def __init__(self, polynomial_basis: PolynomialBasis, regression_method: Regression): + def __init__( + self, polynomial_basis: PolynomialBasis, regression_method: Regression + ): """ Constructs a surrogate model based on the Polynomial Chaos Expansion (polynomial_chaos) method. @@ -76,7 +78,7 @@ def fit(self, x: np.ndarray, y: np.ndarray): The :meth:`fit` method has no returns and it creates an :class:`numpy.ndarray` with the polynomial_chaos coefficients. """ - self.set_data(x,y) + self.set_data(x, y) self.logger.info("UQpy: Running polynomial_chaos.fit") self.coefficients, self.bias, self.outputs_number = self.regression_method.run(x, y, self.design_matrix) self.logger.info("UQpy: polynomial_chaos fit complete.") @@ -101,32 +103,33 @@ def leaveoneout_error(self): The :meth:`.PolynomialChaosExpansion.leaveoneout_error` method can be used to estimate the accuracy of the PCE predictor without additional simulations. Leave-one-out error :math:`Q^2` is calculated from differences between the original mathematical model and approximation :math:`\Delta_i= \mathcal{M} (x^{(i)}) - \mathcal{M}^{PCE}(x^{(i)})` as follows: - + .. math:: Q^2 = \mathbb{E}[(\Delta_i/(1-h_i))^2]/\sigma_Y^2 - + where :math:`\sigma_Y^2` is a variance of model response and :math:`h_i` represents the :math:`i` th diagonal term of matrix :math:`\mathbf{H}= \Psi ( \Psi^T \Psi )^{-1} \Psi^T` obtained from design matrix :math:`\Psi` without additional simulations :cite:`BLATMANLARS`. :return: Cross validation error of experimental design. """ - x = self.experimental_design_input y = self.experimental_design_output - if y.ndim == 1 or y.shape[1] == 1: y = y.reshape(-1, 1) n_samples = x.shape[0] mu_yval = (1 / n_samples) * np.sum(y, axis=0) - y_val = self.predict(x, ) + y_val = self.predict(x) polynomialbasis = self.design_matrix H = np.dot(polynomialbasis, np.linalg.pinv(np.dot(polynomialbasis.T, polynomialbasis))) H *= polynomialbasis Hdiag = np.sum(H, axis=1).reshape(-1, 1) - eps_val = ((n_samples - 1) / n_samples * np.sum(((y - y_val) / (1 - Hdiag)) ** 2, axis=0)) / ( - np.sum((y - mu_yval) ** 2, axis=0)) + eps_val = ( + (n_samples - 1) + / n_samples + * np.sum(((y - y_val) / (1 - Hdiag)) ** 2, axis=0) + ) / (np.sum((y - mu_yval) ** 2, axis=0)) if y.ndim == 1 or y.shape[1] == 1: eps_val = float(eps_val) @@ -151,18 +154,20 @@ def validation_error(self, x: np.ndarray, y: np.ndarray): :param y: :class:`numpy.ndarray` containing model evaluations for the validation dataset. :return: Validation error. """ - if y.ndim == 1 or y.shape[1] == 1: y = y.reshape(-1, 1) - y_val = self.predict(x, ) - + y_val = self.predict(x) n_samples = x.shape[0] mu_yval = (1 / n_samples) * np.sum(y, axis=0) - eps_val = ((n_samples - 1) / n_samples - * ((np.sum((y - y_val) ** 2, axis=0)) - / (np.sum((y - mu_yval) ** 2, axis=0)))) - + eps_val = ( + (n_samples - 1) + / n_samples + * ( + (np.sum((y - y_val) ** 2, axis=0)) + / (np.sum((y - mu_yval) ** 2, axis=0)) + ) + ) if y.ndim == 1 or y.shape[1] == 1: eps_val = float(eps_val) @@ -186,18 +191,16 @@ def get_moments(self, higher: bool = False): .. math:: \sigma^{2}_{PCE} = \mathbb{E} [( \mathcal{M}^{PCE}(x) - \mu_{PCE} )^{2} ] = \sum_{i=1}^{p} y_{i} where :math:`p` is the number of polynomials (first PCE coefficient is excluded). - + The third moment (skewness) and the fourth moment (kurtosis) are generally obtained from third and fourth order products obtained by integration, which are extremely computationally demanding. Therefore here we use analytical solution of standard linearization problem for Hermite (based on normalized Feldheim's formula ) and Legendre polynomials (based on normalized Neumann-Adams formula), though it might be still computationally demanding for high number of input random variables. - + :param higher: True corresponds to calculation of skewness and kurtosis (computationaly expensive for large basis set). :return: Returns the mean and variance. """ - if self.bias is not None: mean = self.coefficients[0, :] + np.squeeze(self.bias) else: mean = self.coefficients[0, :] - variance = np.sum(self.coefficients[1:] ** 2, axis=0) if self.coefficients.ndim == 1 or self.coefficients.shape[1] == 1: @@ -206,79 +209,90 @@ def get_moments(self, higher: bool = False): if not higher: return mean, variance - else: multindex = self.multi_index_set P, inputs_number = multindex.shape - if inputs_number == 1: marginals = [self.polynomial_basis.distributions] - else: marginals = self.polynomial_basis.distributions.marginals - skewness = np.zeros(self.outputs_number) kurtosis = np.zeros(self.outputs_number) for ii in range(0, self.outputs_number): - Beta = self.coefficients[:, ii] third_moment = 0 fourth_moment = 0 - - indices = np.array(np.meshgrid(range(1, P), range(1, P), range(1, P), range(1, P))).T.reshape(-1, 4) + indices = np.array( + np.meshgrid(range(1, P), range(1, P), range(1, P), range(1, P)) + ).T.reshape(-1, 4) i = 0 for index in indices: tripleproduct_ND = 1 quadproduct_ND = 1 - for m in range(0, inputs_number): - if i < (P - 1) ** 3: - if type(marginals[m]) == Normal: - tripleproduct_1D = Hermite.hermite_triple_product(multindex[index[0], m], - multindex[index[1], m], - multindex[index[2], m]) - - if type(marginals[m]) == Uniform: - tripleproduct_1D = Legendre.legendre_triple_product(multindex[index[0], m], - multindex[index[1], m], - multindex[index[2], m]) - + tripleproduct_1D = Hermite.hermite_triple_product( + multindex[index[0], m], + multindex[index[1], m], + multindex[index[2], m], + ) + elif type(marginals[m]) == Uniform: + tripleproduct_1D = Legendre.legendre_triple_product( + multindex[index[0], m], + multindex[index[1], m], + multindex[index[2], m], + ) + else: + raise ValueError(f"UQpy: Expected marginals of polynomial_basis to be either Normal or Uniform. Got {marginals[m]}") tripleproduct_ND = tripleproduct_ND * tripleproduct_1D - else: tripleproduct_ND = 0 quadproduct_1D = 0 - for n in range(0, multindex[index[0], m] + multindex[index[1], m] + 1): - + for n in range( + 0, multindex[index[0], m] + multindex[index[1], m] + 1 + ): if type(marginals[m]) == Normal: - tripproduct1 = Hermite.hermite_triple_product(multindex[index[0], m], - multindex[index[1], m], n) - tripproduct2 = Hermite.hermite_triple_product(multindex[index[2], m], - multindex[index[3], m], n) - - if type(marginals[m]) == Uniform: - tripproduct1 = Legendre.legendre_triple_product(multindex[index[0], m], - multindex[index[1], m], n) - tripproduct2 = Legendre.legendre_triple_product(multindex[index[2], m], - multindex[index[3], m], n) - - quadproduct_1D = quadproduct_1D + tripproduct1 * tripproduct2 - + tripproduct1 = Hermite.hermite_triple_product( + multindex[index[0], m], multindex[index[1], m], n + ) + tripproduct2 = Hermite.hermite_triple_product( + multindex[index[2], m], multindex[index[3], m], n + ) + elif type(marginals[m]) == Uniform: + tripproduct1 = Legendre.legendre_triple_product( + multindex[index[0], m], multindex[index[1], m], n + ) + tripproduct2 = Legendre.legendre_triple_product( + multindex[index[2], m], multindex[index[3], m], n + ) + else: + raise ValueError(f"UQpy: Expected marginals of polynomial_basis to be either Normal or Uniform. Got {marginals[m]}") + quadproduct_1D = ( + quadproduct_1D + tripproduct1 * tripproduct2 + ) quadproduct_ND = quadproduct_ND * quadproduct_1D - third_moment += tripleproduct_ND * Beta[index[0]] * Beta[index[1]] * Beta[index[2]] - fourth_moment += quadproduct_ND * Beta[index[0]] * Beta[index[1]] * Beta[index[2]] * Beta[index[3]] - + third_moment += ( + tripleproduct_ND + * Beta[index[0]] + * Beta[index[1]] + * Beta[index[2]] + ) + fourth_moment += ( + quadproduct_ND + * Beta[index[0]] + * Beta[index[1]] + * Beta[index[2]] + * Beta[index[3]] + ) i += 1 skewness[ii] = 1 / (np.sqrt(variance) ** 3) * third_moment - kurtosis[ii] = 1 / (variance ** 2) * fourth_moment - + kurtosis[ii] = 1 / (variance**2) * fourth_moment if self.coefficients.ndim == 1 or self.coefficients.shape[1] == 1: skewness = float(skewness[0]) kurtosis = float(kurtosis[0]) diff --git a/src/UQpy/surrogates/polynomial_chaos/physics_informed/ConstrainedPCE.py b/src/UQpy/surrogates/polynomial_chaos/physics_informed/ConstrainedPCE.py index 6414bd248..755724ff8 100644 --- a/src/UQpy/surrogates/polynomial_chaos/physics_informed/ConstrainedPCE.py +++ b/src/UQpy/surrogates/polynomial_chaos/physics_informed/ConstrainedPCE.py @@ -106,6 +106,8 @@ def lar(self, raise Exception('LAR identified constant function! Check your data.') best_error = np.inf + best_basis = None + best_index = None lar_basis = [] lar_index = [] lar_error = [] diff --git a/src/UQpy/surrogates/polynomial_chaos/physics_informed/Utilities.py b/src/UQpy/surrogates/polynomial_chaos/physics_informed/Utilities.py index adbd4ad8f..03c65dc91 100644 --- a/src/UQpy/surrogates/polynomial_chaos/physics_informed/Utilities.py +++ b/src/UQpy/surrogates/polynomial_chaos/physics_informed/Utilities.py @@ -20,9 +20,10 @@ def transformation_multiplier(data_object: PdeData, leading_variable, derivation :return: multiplier reflecting a different sizes of physical and standardized spaces """ - size = np.abs(data_object.xmax[leading_variable] - data_object.xmin[leading_variable]) + size = np.abs( + data_object.xmax[leading_variable] - data_object.xmin[leading_variable] + ) multiplier = (2 / size) ** derivation_order - return multiplier @@ -36,9 +37,8 @@ def ortho_grid(n_samples: int, nvar: int, x_min: float = -1, x_max: float = 1): :param x_max: upper bound of hypercube :return: generated grid of samples """ - xrange = (x_max - x_min) / 2 - nsim = n_samples ** nvar + nsim = n_samples**nvar x = np.linspace(x_min + xrange / n_samples, x_max - xrange / n_samples, n_samples) x_list = [x] * nvar X = np.meshgrid(*x_list) @@ -47,8 +47,12 @@ def ortho_grid(n_samples: int, nvar: int, x_min: float = -1, x_max: float = 1): @beartype -def derivative_basis(standardized_sample: np.ndarray, pce: PolynomialChaosExpansion, derivative_order: int, - leading_variable: int): +def derivative_basis( + standardized_sample: np.ndarray, + pce: PolynomialChaosExpansion, + derivative_order: int, + leading_variable: int, +): """ Evaluate derivative basis of given pce object. :param standardized_sample: samples in standardized space for an evaluation of derived basis @@ -62,31 +66,40 @@ def derivative_basis(standardized_sample: np.ndarray, pce: PolynomialChaosExpans multindex = pce.multi_index_set joint_distribution = pce.polynomial_basis.distributions - multivariate_basis = construct_basis(standardized_sample, multindex, joint_distribution, derivative_order, - leading_variable) + multivariate_basis = construct_basis( + standardized_sample, + multindex, + joint_distribution, + derivative_order, + leading_variable, + ) else: - raise Exception('derivative_basis function is defined only for positive derivative_order!') + raise Exception( + "derivative_basis function is defined only for positive derivative_order!" + ) return multivariate_basis @beartype -def construct_basis(standardized_sample: np.ndarray, multindex: np.ndarray, - joint_distribution: Distribution, - derivative_order: int = 0, leading_variable: int = 0): +def construct_basis( + standardized_sample: np.ndarray, + multindex: np.ndarray, + joint_distribution: Distribution, + derivative_order: int = 0, + leading_variable: int = 0, +): + """ + Construct and evaluate derivative basis. + :param standardized_sample: samples in standardized space for an evaluation of derived basis + :param multindex: set of multi-indices corresponding to polynomial orders in basis set + :param joint_distribution: joint probability distribution of input variables, + an object of the :py:meth:`UQpy` :class:`Distribution` class + :param derivative_order: order of derivative + :param leading_variable: leading variable of derivatives + :return: evaluated derived basis """ - Construct and evaluate derivative basis. - :param standardized_sample: samples in standardized space for an evaluation of derived basis - :param multindex: set of multi-indices corresponding to polynomial orders in basis set - :param joint_distribution: joint probability distribution of input variables, - an object of the :py:meth:`UQpy` :class:`Distribution` class - :param derivative_order: order of derivative - :param leading_variable: leading variable of derivatives - :return: evaluated derived basis - """ - card_basis, nvar = multindex.shape - if nvar == 1: marginals = [joint_distribution] else: @@ -95,28 +108,40 @@ def construct_basis(standardized_sample: np.ndarray, multindex: np.ndarray, mask_herm = [type(marg) == Normal for marg in marginals] mask_lege = [type(marg) == Uniform for marg in marginals] if derivative_order >= 0: - ns = multindex[:, leading_variable] polysd = [] - if mask_lege[leading_variable]: - for n in ns: polysd.append(legendre(n).deriv(derivative_order)) - - prep_l_deriv = np.sqrt((2 * multindex[:, leading_variable] + 1)).reshape(-1, 1) - + prep_l_deriv = np.sqrt((2 * multindex[:, leading_variable] + 1)).reshape( + -1, 1 + ) prep_deriv = [] for poly in polysd: - prep_deriv.append(np.polyval(poly, standardized_sample[:, leading_variable]).reshape(-1, 1)) - + prep_deriv.append( + np.polyval(poly, standardized_sample[:, leading_variable]).reshape( + -1, 1 + ) + ) prep_deriv = np.array(prep_deriv) - + else: + raise ValueError( + "UQpy: Currently only supports Uniform marginals and Legendre Polynomials for the leading variable." + " Check the marginals of the distribution used in your PolynomialBasis and the leading_variable index." + ) mask_herm[leading_variable] = False mask_lege[leading_variable] = False + else: + raise ValueError( + f"UQpy: Expected derivative_order >= 0, got derivative_order={derivative_order}" + ) - prep_hermite = sp.eval_hermitenorm(multindex[:, mask_herm][:, np.newaxis, :], standardized_sample[:, mask_herm]) - prep_legendre = sp.eval_legendre(multindex[:, mask_lege][:, np.newaxis, :], standardized_sample[:, mask_lege]) + prep_hermite = sp.eval_hermitenorm( + multindex[:, mask_herm][:, np.newaxis, :], standardized_sample[:, mask_herm] + ) + prep_legendre = sp.eval_legendre( + multindex[:, mask_lege][:, np.newaxis, :], standardized_sample[:, mask_lege] + ) prep_fact = np.sqrt(sp.factorial(multindex[:, mask_herm])) prep = np.sqrt((2 * multindex[:, mask_lege] + 1)) @@ -125,5 +150,7 @@ def construct_basis(standardized_sample: np.ndarray, multindex: np.ndarray, multivariate_basis *= np.prod(prep_legendre * prep[:, np.newaxis, :], axis=2).T if leading_variable is not None: - multivariate_basis *= np.prod(prep_deriv * prep_l_deriv[:, np.newaxis, :], axis=2).T + multivariate_basis *= np.prod( + prep_deriv * prep_l_deriv[:, np.newaxis, :], axis=2 + ).T return multivariate_basis diff --git a/src/UQpy/surrogates/polynomial_chaos/polynomials/baseclass/PolynomialBasis.py b/src/UQpy/surrogates/polynomial_chaos/polynomials/baseclass/PolynomialBasis.py index 649e0f28c..b28586e1a 100644 --- a/src/UQpy/surrogates/polynomial_chaos/polynomials/baseclass/PolynomialBasis.py +++ b/src/UQpy/surrogates/polynomial_chaos/polynomials/baseclass/PolynomialBasis.py @@ -15,11 +15,14 @@ class PolynomialBasis(ABC): - def __init__(self, inputs_number: int, - polynomials_number: int, - multi_index_set: np.ndarray, - polynomials: Polynomials, - distributions: Union[Distribution, list[Distribution]]): + def __init__( + self, + inputs_number: int, + polynomials_number: int, + multi_index_set: np.ndarray, + polynomials: Polynomials, + distributions: Union[Distribution, list[Distribution]], + ): """ Create polynomial basis for a given multi index set. """ @@ -34,7 +37,6 @@ def evaluate_basis(self, samples: np.ndarray): eval_matrix = np.empty([samples_number, self.polynomials_number]) for ii in range(self.polynomials_number): eval_matrix[:, ii] = self.polynomials[ii].evaluate(samples) - return eval_matrix @staticmethod @@ -57,8 +59,9 @@ def calculate_total_degree_set(inputs_number: int, degree: int): row_end = rows + row_start - 1 # recursive call - midx_set[row_start:row_end + 1, :] = PolynomialBasis. \ - calculate_total_degree_recursive(inputs_number, i, rows) + midx_set[row_start : row_end + 1, :] = ( + PolynomialBasis.calculate_total_degree_recursive(inputs_number, i, rows) + ) # update starting row row_start = row_end + 1 @@ -94,57 +97,62 @@ def calculate_total_degree_recursive(N, w, rows): row_end = row_start + sub_rows - 1 # first column - subset[row_start:row_end + 1, 0] = k * np.ones(sub_rows) + subset[row_start : row_end + 1, 0] = k * np.ones(sub_rows) # subset update --> recursive call - subset[row_start:row_end + 1, 1:] = \ - PolynomialBasis.calculate_total_degree_recursive(N - 1, w - k, sub_rows) + subset[row_start : row_end + 1, 1:] = ( + PolynomialBasis.calculate_total_degree_recursive( + N - 1, w - k, sub_rows + ) + ) # update row indices row_start = row_end + 1 return subset - - @staticmethod - def calculate_hyperbolic_set(inputs_number, degree,q): - xmono=np.zeros(inputs_number) - X=[] + @staticmethod + def calculate_hyperbolic_set(inputs_number, degree, q): + xmono = np.zeros(inputs_number) + X = [] X.append(xmono) - - while np.sum(xmono)<=degree: + while np.sum(xmono) <= degree: # generate multi-indices one by one - x=np.array(xmono) + x = np.array(xmono) i = 0 - for j in range ( inputs_number, 0, -1 ): - if ( 0 < x[j-1] ): + for j in range(inputs_number, 0, -1): + if 0 < x[j - 1]: i = j break - if ( i == 0 ): - x[inputs_number-1] = 1 - xmono=x + if i == 0: + x[inputs_number - 1] = 1 + xmono = x else: - if ( i == 1 ): + if i == 1: t = x[0] + 1 im1 = inputs_number - if ( 1 < i ): - t = x[i-1] + elif 1 < i: + t = x[i - 1] im1 = i - 1 + else: + raise ValueError( + "UQpy: Unexpected indexing error in calculate_hyperbolic_set." + " May be caused by improper indexing due to negative inputs_number." + ) + + x[i - 1] = 0 + x[im1 - 1] = x[im1 - 1] + 1 + x[inputs_number - 1] = x[inputs_number - 1] + t - 1 - x[i-1] = 0 - x[im1-1] = x[im1-1] + 1 - x[inputs_number-1] = x[inputs_number-1] + t - 1 + xmono = x - xmono=x - - # check the hyperbolic criterion - if (np.round(np.sum(xmono**q)**(1/q), 4) <= degree): + # check the hyperbolic criterion + if np.round(np.sum(xmono**q) ** (1 / q), 4) <= degree: X.append(xmono) + return np.array(X).astype(int) - return(np.array(X).astype(int)) - @staticmethod def calculate_tensor_product_set(inputs_number, degree): orders = np.arange(0, degree + 1, 1).tolist() @@ -154,8 +162,7 @@ def calculate_tensor_product_set(inputs_number, degree): midx = list(itertools.product(orders, repeat=inputs_number)) midx = [list(elem) for elem in midx] midx_sums = [int(math.fsum(midx[i])) for i in range(len(midx))] - midx_sorted = sorted(range(len(midx_sums)), - key=lambda k: midx_sums[k]) + midx_sorted = sorted(range(len(midx_sums)), key=lambda k: midx_sums[k]) midx_set = np.array([midx[midx_sorted[i]] for i in range(len(midx))]) return midx_set.astype(int) @@ -166,6 +173,9 @@ def construct_arbitrary_basis(inputs_number, distributions, multi_index_set): if inputs_number == 1: return [ Polynomials.distribution_to_polynomial[type(distributions)]( - distributions=distributions, degree=int(idx[0])) for idx in multi_index_set] + distributions=distributions, degree=int(idx[0]) + ) + for idx in multi_index_set + ] else: return [PolynomialsND(distributions, idx) for idx in multi_index_set] diff --git a/src/UQpy/surrogates/polynomial_chaos/regressions/LeastAngleRegression.py b/src/UQpy/surrogates/polynomial_chaos/regressions/LeastAngleRegression.py index d2d732aae..bfab9ed78 100644 --- a/src/UQpy/surrogates/polynomial_chaos/regressions/LeastAngleRegression.py +++ b/src/UQpy/surrogates/polynomial_chaos/regressions/LeastAngleRegression.py @@ -2,21 +2,24 @@ import numpy as np from beartype import beartype import copy - from UQpy.surrogates.polynomial_chaos.PolynomialChaosExpansion import PolynomialChaosExpansion from UQpy.surrogates.polynomial_chaos.polynomials.TotalDegreeBasis import PolynomialBasis from UQpy.surrogates.polynomial_chaos.regressions.baseclass.Regression import Regression from UQpy.surrogates.polynomial_chaos.regressions import LeastSquareRegression - from sklearn import linear_model as regresion class LeastAngleRegression(Regression): @beartype - def __init__(self, fit_intercept: bool = False, verbose: bool = False, n_nonzero_coefs: int = 1000, - normalize: bool = False): + def __init__( + self, + fit_intercept: bool = False, + verbose: bool = False, + n_nonzero_coefs: int = 1000, + normalize: bool = False, + ): """ - Class to select the best model approximation and calculate the polynomial_chaos coefficients with the Least Angle + Class to select the best model approximation and calculate the polynomial_chaos coefficients with the Least Angle Regression method combined with ordinary least squares. :param n_nonzero_coefs: Maximum number of non-zero coefficients. @@ -32,7 +35,7 @@ def __init__(self, fit_intercept: bool = False, verbose: bool = False, n_nonzero def run(self, x: np.ndarray, y: np.ndarray, design_matrix: np.ndarray): """ - Implements the LAR method to compute the polynomial_chaos coefficients. + Implements the LAR method to compute the polynomial_chaos coefficients. Recommended only for model_selection algorithm. :param x: :class:`numpy.ndarray` containing the training points (samples). @@ -44,8 +47,11 @@ def run(self, x: np.ndarray, y: np.ndarray, design_matrix: np.ndarray): P = polynomialbasis.shape[1] n_samples, inputs_number = x.shape - reg = regresion.Lars(fit_intercept=self.fit_intercept, verbose=self.verbose, - n_nonzero_coefs=self.n_nonzero_coefs) + reg = regresion.Lars( + fit_intercept=self.fit_intercept, + verbose=self.verbose, + n_nonzero_coefs=self.n_nonzero_coefs, + ) reg.fit(design_matrix, y) # LarsBeta = reg.coef_path_ @@ -62,13 +68,13 @@ def run(self, x: np.ndarray, y: np.ndarray, design_matrix: np.ndarray): def model_selection(pce_object: PolynomialChaosExpansion, target_error=1, check_overfitting=True): """ LARS model selection algorithm for given TargetError of approximation - measured by Cross validation: Leave-one-out error (1 is perfect approximation). Option to check overfitting by + measured by Cross validation: Leave-one-out error (1 is perfect approximation). Option to check overfitting by empirical rule: if three steps in a row have a decreasing accuracy, stop the algorithm. :param pce_object: existing target PCE for model_selection :param target_error: Target error of an approximation (stoping criterion). :param check_overfitting: Whether to check over-fitting by empirical rule. - :return: copy of input PolynomialChaosExpansion containing the best possible model for given data identified by LARs + :return: copy of input PolynomialChaosExpansion containing the best possible model for given data identified by LARs """ pce = copy.deepcopy(pce_object) @@ -94,18 +100,24 @@ def model_selection(pce_object: PolynomialChaosExpansion, target_error=1, check_ error = 0 overfitting = False BestLarsError = 0 + BestLarsBasis = None + BestLarsMultindex = None step = 0 - - if steps<3: - raise Exception('LAR identified constant function! Check your data.') - while BestLarsError < target_error and step < steps - 2 and overfitting == False: + if steps < 3: + raise Exception("LAR identified constant function! Check your data.") + + while ( + BestLarsError < target_error and step < steps - 2 and overfitting == False + ): mask = LarsBeta[:, step + 2] != 0 mask[0] = True larsindex.append(multindex[mask, :]) - larsbasis.append(list(np.array(pce_object.polynomial_basis.polynomials)[mask])) + larsbasis.append( + list(np.array(pce_object.polynomial_basis.polynomials)[mask]) + ) pce.polynomial_basis.polynomials_number = len(larsbasis[step]) pce.polynomial_basis.polynomials = larsbasis[step] @@ -122,7 +134,6 @@ def model_selection(pce_object: PolynomialChaosExpansion, target_error=1, check_ BestLarsMultindex = larsindex[step] BestLarsBasis = larsbasis[step] BestLarsError = LarsError[step] - else: if error > BestLarsError: BestLarsMultindex = larsindex[step] @@ -130,8 +141,12 @@ def model_selection(pce_object: PolynomialChaosExpansion, target_error=1, check_ BestLarsError = LarsError[step] if (step > 3) and (check_overfitting == True): - if (BestLarsError > 0.6) and (error < LarsError[step - 1]) and (error < LarsError[step - 2]) and ( - error < LarsError[step - 3]): + if ( + (BestLarsError > 0.6) + and (error < LarsError[step - 1]) + and (error < LarsError[step - 2]) + and (error < LarsError[step - 3]) + ): overfitting = True step += 1 diff --git a/tests/unit_tests/inference/test_bayes_parameter_estimation.py b/tests/unit_tests/inference/test_bayes_parameter_estimation.py index 357b1f0e6..7fbf4c333 100644 --- a/tests/unit_tests/inference/test_bayes_parameter_estimation.py +++ b/tests/unit_tests/inference/test_bayes_parameter_estimation.py @@ -66,8 +66,8 @@ def test_probability_model_mcmc(): nsamples=5) s = bayes_estimator.sampler.samples - assert s[0, 1] == 3.5196936384257835 - assert s[1, 0] == 11.143811671048994 - assert s[2, 0] == 10.162512455643435 - assert s[3, 1] == 0.8541521389437781 - assert s[4, 1] == 1.0095454025762525 + assert np.isclose(s[0, 1], 3.5196936384257835) + assert np.isclose(s[1, 0], 11.143811671048994) + assert np.isclose(s[2, 0], 10.162512455643435) + assert np.isclose(s[3, 1], 0.8541521389437781) + assert np.isclose(s[4, 1], 1.0095454025762525) diff --git a/tests/unit_tests/sampling/test_adaptive_kriging.py b/tests/unit_tests/sampling/test_adaptive_kriging.py index e191feb32..fa07dc920 100644 --- a/tests/unit_tests/sampling/test_adaptive_kriging.py +++ b/tests/unit_tests/sampling/test_adaptive_kriging.py @@ -76,8 +76,8 @@ def test_akmcs_expected_feasibility(): random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[23, 0] == 5.423754197908594 - assert a.samples[20, 1] == 2.0355505295053384 + assert np.isclose(a.samples[23, 0], 5.423754197908594) + assert np.isclose(a.samples[20, 1], 2.0355505295053384) def test_akmcs_expected_improvement(): @@ -99,8 +99,8 @@ def test_akmcs_expected_improvement(): random_state=2) a.run(nsamples=25, samples=x.samples) - assert a.samples[21, 0] == 6.878734574049913 - assert a.samples[20, 1] == -6.3410533857909215 + assert np.isclose(a.samples[21, 0], 6.878734574049913) + assert np.isclose(a.samples[20, 1], -6.3410533857909215) def test_akmcs_expected_improvement_global_fit(): diff --git a/tests/unit_tests/scientific_machine_learning/functional/test_functional_generalized_js_divergence.py b/tests/unit_tests/scientific_machine_learning/functional/test_functional_generalized_js_divergence.py index 7152b8b93..5efa2fbb3 100644 --- a/tests/unit_tests/scientific_machine_learning/functional/test_functional_generalized_js_divergence.py +++ b/tests/unit_tests/scientific_machine_learning/functional/test_functional_generalized_js_divergence.py @@ -8,10 +8,11 @@ def test_gaussian_js_equals_gaussian_kl(): Because JS is estimated from Monte Carlo, this tests for within 5% accuracy. """ # initialize distributions - posterior_mu = torch.tensor(1.0) - posterior_sigma = torch.tensor(1.0) - prior_mu = torch.tensor(0.0) - prior_sigma = torch.tensor(1.0) + dtype = torch.float32 + posterior_mu = torch.tensor(1.0, dtype=dtype) + posterior_sigma = torch.tensor(1.0, dtype=dtype) + prior_mu = torch.tensor(0.0, dtype=dtype) + prior_sigma = torch.tensor(1.0, dtype=dtype) posterior_distribution = [uq.Normal(1, 1)] prior_distribution = [uq.Normal(0, 1)] # compute divergences diff --git a/tests/unit_tests/scientific_machine_learning/functional/test_functional_geometric_js_divergence.py b/tests/unit_tests/scientific_machine_learning/functional/test_functional_geometric_js_divergence.py index 0738dd284..d575b25ea 100644 --- a/tests/unit_tests/scientific_machine_learning/functional/test_functional_geometric_js_divergence.py +++ b/tests/unit_tests/scientific_machine_learning/functional/test_functional_geometric_js_divergence.py @@ -48,10 +48,11 @@ def test_kl_equal( shape, ): """JSG divergence is equal to KL divergence when alpha = 0""" - post_mu = torch.full(shape, posterior_param_1) - post_sigma = torch.full(shape, posterior_param_2) - prior_mu = torch.full(shape, prior_param_1) - prior_sigma = torch.full(shape, prior_param_2) + dtype = torch.float64 + post_mu = torch.full(shape, posterior_param_1, dtype=dtype) + post_sigma = torch.full(shape, posterior_param_2, dtype=dtype) + prior_mu = torch.full(shape, prior_param_1, dtype=dtype) + prior_sigma = torch.full(shape, prior_param_2, dtype=dtype) jsg = func.geometric_jensen_shannon_divergence( post_mu, post_sigma, prior_mu, prior_sigma, alpha=0.0 ) diff --git a/tests/unit_tests/scientific_machine_learning/layers/test_bayesian_fourier3d.py b/tests/unit_tests/scientific_machine_learning/layers/test_bayesian_fourier3d.py index db78df8f4..43333c62f 100644 --- a/tests/unit_tests/scientific_machine_learning/layers/test_bayesian_fourier3d.py +++ b/tests/unit_tests/scientific_machine_learning/layers/test_bayesian_fourier3d.py @@ -7,15 +7,15 @@ @settings(deadline=1_000) @given( batch_size=integers(min_value=1, max_value=2), - width=integers(min_value=1, max_value=8), - d=integers(min_value=64, max_value=128), - w=integers(min_value=64, max_value=128), - h=integers(min_value=64, max_value=128), + width=integers(min_value=1, max_value=3), + d=integers(min_value=16, max_value=32), + w=integers(min_value=16, max_value=32), + h=integers(min_value=16, max_value=32), ) def test_output_shape(batch_size, width, d, w, h): """Fourier layers do not change the shape of the input""" x = torch.ones((batch_size, width, d, w, h)) - layer = sml.BayesianFourier3d(width, (8, 16, 32)) + layer = sml.BayesianFourier3d(width, (2, 4, 8)) y = layer(x) assert x.shape == y.shape diff --git a/tests/unit_tests/scientific_machine_learning/losses/test_geometric_js_divergence.py b/tests/unit_tests/scientific_machine_learning/losses/test_geometric_js_divergence.py index b02c00b77..0f9c4d8c3 100644 --- a/tests/unit_tests/scientific_machine_learning/losses/test_geometric_js_divergence.py +++ b/tests/unit_tests/scientific_machine_learning/losses/test_geometric_js_divergence.py @@ -35,7 +35,7 @@ def test_device(): else: device = torch.device("cpu") - model = sml.FeedForwardNeuralNetwork(sml.BayesianLinear(1, 1)) + model = sml.FeedForwardNeuralNetwork(sml.BayesianLinear(1, 1, dtype=torch.float32)) model.to(device) divergence_function = sml.GeometricJensenShannonDivergence(device=device) divergence = divergence_function(model) diff --git a/tests/unit_tests/transformations/test_nataf.py b/tests/unit_tests/transformations/test_nataf.py index 4dd5f4c69..61752e1ef 100644 --- a/tests/unit_tests/transformations/test_nataf.py +++ b/tests/unit_tests/transformations/test_nataf.py @@ -18,13 +18,13 @@ def test_wrong_distribution_in_list(): dist1 = Normal(loc=0.0, scale=1.0) rx = np.array([[1.0, 0.0], [0.0, 1.0]]) with pytest.raises(Exception): - assert Nataf(distributions=[dist1, 'Beta'], corr_x=rx) + assert Nataf(distributions=[dist1, "Beta"], corr_x=rx) def test_wrong_distribution(): rx = np.array([[1.0, 0.0], [0.0, 1.0]]) with pytest.raises(Exception): - assert Nataf(distributions='Normal', corr_x=rx) + assert Nataf(distributions="Normal", corr_x=rx) def test_identity_correlation_x_normal(): @@ -70,7 +70,8 @@ def test_non_identity_correlation_uniform_z(): dist2 = Uniform(loc=0.0, scale=1.0) rx = np.array([[1.0, 0.8], [0.8, 1.0]]) ntf_obj = Nataf(distributions=[dist1, dist2], corr_x=rx) - np.testing.assert_allclose(ntf_obj.corr_z, [[1., 0.8134732861515996], [0.8134732861515996, 1.]], rtol=1e-09) + expected_corr_z = np.array([[1.0, 0.8134732861515996], [0.8134732861515996, 1.0]]) + assert np.allclose(ntf_obj.corr_z, expected_corr_z) def test_non_identity_correlation_uniform_x(): @@ -78,7 +79,8 @@ def test_non_identity_correlation_uniform_x(): dist2 = Uniform(loc=0.0, scale=1.0) rz = np.array([[1.0, 0.8], [0.8, 1.0]]) ntf_obj = Nataf(distributions=[dist1, dist2], corr_z=rz) - assert (ntf_obj.corr_x == [[1., 0.7859392826067285], [0.7859392826067285, 1.]]).all() + expected_corr_x = np.array([[1.0, 0.7859392826067285], [0.7859392826067285, 1.0]]) + assert np.allclose(ntf_obj.corr_x, expected_corr_x) def test_attribute_h(): @@ -86,7 +88,8 @@ def test_attribute_h(): dist2 = Normal(loc=0.0, scale=1.0) rz = np.array([[1.0, 0.8], [0.8, 1.0]]) ntf_obj = Nataf(distributions=[dist1, dist2], corr_z=rz) - np.testing.assert_allclose(ntf_obj.H, [[1., 0.], [0.8, 0.6]], rtol=1e-09) + expected_H = np.array([[1.0, 0.0], [0.8, 0.6]]) + assert np.allclose(ntf_obj.H, expected_H) def test_samples_x(): @@ -131,15 +134,12 @@ def test_samples_x_jxz2(): ntf_obj = Nataf(distributions=[dist1, dist2]) samples_x = np.array([[0.3, 1.2, 3.5], [0.2, 2.4, 0.9]]).T ntf_obj.run(samples_x=samples_x, jacobian=True) - g = [] - for i in range(3): - if i == 0: - g.append((ntf_obj.jxz[i] == np.array([[1.6789373877365803, 0.0], [0.0, 2.577850090371836]])).all()) - elif i == 1: - g.append((ntf_obj.jxz[i] == np.array([[0.6433491348614259, 0.0], [0.0, 1.1906381155257868]])).all()) - else: - g.append((ntf_obj.jxz[i] == np.array([[0.5752207318528584, 0.0], [0.0, 0.958701219754764]])).all()) - assert np.all(g) + expected_jxz = [ + np.array([[1.6789373877365803, 0.0], [0.0, 2.577850090371836]]), + np.array([[0.6433491348614259, 0.0], [0.0, 1.1906381155257868]]), + np.array([[0.5752207318528584, 0.0], [0.0, 0.958701219754764]]), + ] + assert np.allclose(ntf_obj.jxz, expected_jxz) def test_samples_x1(): @@ -148,15 +148,14 @@ def test_samples_x1(): ntf_obj = Nataf(distributions=[dist1, dist2]) samples_x = np.array([[0.3, 1.2, 3.5], [0.2, 2.4, 0.9]]).T ntf_obj.run(samples_x=samples_x, jacobian=True) - g = [] - for i in range(3): - if i == 0: - g.append((ntf_obj.samples_z[i] == np.array([-1.5547735945968535, -1.501085946044025])).all()) - elif i == 1: - g.append((ntf_obj.samples_z[i] == np.array([-0.7063025628400874, 0.841621233572914])).all()) - else: - g.append((ntf_obj.samples_z[i] == np.array([0.5244005127080407, -0.5244005127080409])).all()) - assert np.all(g) + expected_samples_z = np.array( + [ + [-1.5547735945968535, -1.501085946044025], + [-0.7063025628400874, 0.841621233572914], + [0.5244005127080407, -0.5244005127080409], + ] + ) + assert np.allclose(ntf_obj.samples_z, expected_samples_z) def test_samples_z_jzx1(): @@ -173,13 +172,11 @@ def test_samples_z_jzx2(): ntf_obj = Nataf(distributions=[dist1, dist2]) samples_z = np.array([[0.3, 1.2], [0.2, 2.4]]).T ntf_obj.run(samples_z=samples_z, jacobian=True) - g = [] - for i in range(2): - if i == 0: - g.append((ntf_obj.jzx[i] == np.array([[0.524400601939789, 0.0], [0.0, 0.8524218415758338]])).all()) - else: - g.append((ntf_obj.jzx[i] == np.array([[1.0299400748281828, 0.0], [0.0, 14.884586948005541]])).all()) - assert np.all(g) + expected_jzx = [ + np.array([[0.524400601939789, 0.0], [0.0, 0.8524218415758338]]), + np.array([[1.0299400748281828, 0.0], [0.0, 14.884586948005541]]), + ] + assert np.allclose(ntf_obj.jzx, expected_jzx) def test_samples_z2(): @@ -188,13 +185,13 @@ def test_samples_z2(): ntf_obj = Nataf(distributions=[dist1, dist2]) samples_z = np.array([[0.3, 1.2], [0.2, 2.4]]).T ntf_obj.run(samples_z=samples_z, jacobian=True) - g = [] - for i in range(2): - if i == 0: - g.append((ntf_obj.samples_x[i] == np.array([3.089557110944763, 1.737779128317309])).all()) - elif i == 1: - g.append((ntf_obj.samples_x[i] == np.array([4.424651648891459, 2.9754073922262116])).all()) - assert np.all(g) + expected_samples_x = np.array( + [ + [3.089557110944763, 1.737779128317309], + [4.424651648891459, 2.9754073922262116], + ] + ) + assert np.allclose(ntf_obj.samples_x, expected_samples_x) def test_itam_beta(): @@ -272,4 +269,4 @@ def distortion_z2x_dist_object(): dist1 = Lognormal(s=0.0, loc=0.0, scale=1.0) rz = np.array([[1.0, 0.8], [0.8, 1.0]]) with pytest.raises(Exception): - assert Nataf.distortion_z2x(distributions=[dist1, 'Beta'], corr_z=rz) + assert Nataf.distortion_z2x(distributions=[dist1, "Beta"], corr_z=rz)