diff --git a/.github/workflows/unit_test.yml b/.github/workflows/unit_test.yml index 32e9ed30..62921bc5 100644 --- a/.github/workflows/unit_test.yml +++ b/.github/workflows/unit_test.yml @@ -33,4 +33,5 @@ jobs: - name: Test with pytest run: | pip install pytest pytest-cov - python -m pytest --doctest-modules --junitxml=junit/test-results.xml --cov=. --cov-report=xml --cov-report=html + python -m pytest --doctest-modules --junitxml=junit/test-results.xml --cov=. --cov-report=xml --cov-report=html -r w + diff --git a/.gitignore b/.gitignore index 759e9c7a..c25ee689 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ __pycache__/ *.nii.gz *.nii +*.npy *.dcm *.npy *.mat diff --git a/conftest.py b/conftest.py index 8d58d18b..8527f401 100644 --- a/conftest.py +++ b/conftest.py @@ -3,7 +3,8 @@ import json import csv # import datetime - +import numpy as np +from phantoms.MR_XCAT_qMRI.sim_ivim_sig import phantom def pytest_addoption(parser): parser.addoption( @@ -263,7 +264,8 @@ def algorithmlist(algorithms): for algorithm in algorithms["algorithms"]: algorithm_dict = algorithms.get(algorithm, {}) requires_matlab = algorithm_dict.get("requires_matlab", False) - yield algorithm, requires_matlab, algorithm_dict.get('deep_learning', False) + if not algorithm_dict.get('deep_learning', False): + yield algorithm, requires_matlab def bound_input(datafile, algorithms): @@ -302,3 +304,16 @@ def deep_learning_algorithms(datafile, algorithms): requires_matlab = algorithm_dict.get("requires_matlab", False) tolerances = algorithm_dict.get("tolerances", {"atol":{"f": 2e-1, "D": 8e-4, "Dp": 8e-2},"rtol":{"f": 0.2, "D": 0.3, "Dp": 0.4}}) yield algorithm, all_data, bvals, kwargs, requires_matlab, tolerances + + +@pytest.fixture(scope="session") +def threeddata(request): + current_folder = pathlib.Path.cwd() + datafile = request.config.getoption("dataFile") + generic = current_folder / datafile + with generic.open() as f: + all_data = json.load(f) + bvals = all_data.pop('config') + bvals = np.array(bvals['bvalues']) + sig, _, Dim, fim, Dpim, _=phantom(bvals, 1/1000, TR=3000, TE=40, motion=False, rician=False, interleaved=False, T1T2=True) + return sig[::16,::8,::6,:], Dim[::16,::8,::6], fim[::16,::8,::6], Dpim[::16,::8,::6], bvals diff --git a/phantoms/MR_XCAT_qMRI/sim_ivim_sig.py b/phantoms/MR_XCAT_qMRI/sim_ivim_sig.py index e95ee7a3..6934e014 100644 --- a/phantoms/MR_XCAT_qMRI/sim_ivim_sig.py +++ b/phantoms/MR_XCAT_qMRI/sim_ivim_sig.py @@ -5,6 +5,7 @@ import argparse import os from utilities.data_simulation.Download_data import download_data +import pathlib ########## # code written by Oliver J Gurney-Champion @@ -12,6 +13,7 @@ # This code generates a 4D IVIM phantom as nifti file def phantom(bvalue, noise, TR=3000, TE=40, motion=False, rician=False, interleaved=False,T1T2=True): + download_data() np.random.seed(42) if motion: states = range(1,21) @@ -19,14 +21,17 @@ def phantom(bvalue, noise, TR=3000, TE=40, motion=False, rician=False, interleav states = [1] for state in states: # Load the .mat file - mat_data = loadmat('../../download/Phantoms/XCAT_MAT_RESP/XCAT5D_RP_' + str(state) + '_CP_1.mat') + project_root = pathlib.Path(__file__).resolve().parent.parent + filename = f'XCAT5D_RP_{state}_CP_1.mat' + mat_path = project_root / '..' /'download' / 'Phantoms' / 'XCAT_MAT_RESP' / filename + mat_data = loadmat(mat_path) # Access the variables in the loaded .mat file XCAT = mat_data['IMG'] XCAT = XCAT[-1:0:-2,-1:0:-2,10:160:4] D, f, Ds = contrast_curve_calc() - S, Dim, fim, Dpim, legend = XCAT_to_MR_DCE(XCAT, TR, TE, bvalue, D, f, Ds,T1T2=T1T2) + S, Dim, fim, Dpim, legend = XCAT_to_MR_IVIM(XCAT, TR, TE, bvalue, D, f, Ds,T1T2=T1T2) if state == 1: Dim_out = Dim fim_out = fim @@ -187,7 +192,7 @@ def contrast_curve_calc(): return D, f, Ds -def XCAT_to_MR_DCE(XCAT, TR, TE, bvalue, D, f, Ds, b0=3, ivim_cont = True, T1T2=True): +def XCAT_to_MR_IVIM(XCAT, TR, TE, bvalue, D, f, Ds, b0=3, ivim_cont = True, T1T2=True): ########################################################################################### # This script converts XCAT tissue values to MR contrast based on the SSFP signal equation. # Christopher W. Roy 2018-12-04 # fetal.xcmr@gmail.com @@ -436,7 +441,6 @@ def parse_bvalues_file(file_path): motion = args.motion interleaved = args.interleaved T1T2 = args.T1T2 - download_data() for key, bvalue in bvalues.items(): bvalue = np.array(bvalue) sig, XCAT, Dim, fim, Dpim, legend = phantom(bvalue, noise, motion=motion, interleaved=interleaved,T1T2=T1T2) diff --git a/pytest.ini b/pytest.ini index ed02be7a..995a9f13 100644 --- a/pytest.ini +++ b/pytest.ini @@ -3,4 +3,7 @@ markers = slow: marks tests as slow (deselect with '-m "not slow"') addopts = -m 'not slow' -testpaths = tests \ No newline at end of file +testpaths = tests +filterwarnings = + ignore::Warning + always::tests.IVIMmodels.unit_tests.test_ivim_fit.PerformanceWarning \ No newline at end of file diff --git a/src/original/ETP_SRI/LinearFitting.py b/src/original/ETP_SRI/LinearFitting.py index d404cc38..41c03839 100644 --- a/src/original/ETP_SRI/LinearFitting.py +++ b/src/original/ETP_SRI/LinearFitting.py @@ -1,5 +1,6 @@ import numpy as np import numpy.polynomial.polynomial as poly +import warnings from utilities.data_simulation.GenerateData import GenerateData @@ -82,11 +83,17 @@ def ivim_fit(self, bvalues, signal): # print(Dp_prime) if np.any(np.asarray(Dp_prime) < 0) or not np.all(np.isfinite(Dp_prime)): - print('Perfusion fit failed') + warnings.warn('Perfusion fit failed', + category=UserWarning, + stacklevel=2 # Ensures correct file/line info in the warning + ) Dp_prime = [0, 0] f = signal[0] - D[0] else: - print("This doesn't seem to be an IVIM set of b-values") + warnings.warn('This doesn\'t seem to be an IVIM set of b-values', + category=UserWarning, + stacklevel=2 # Ensures correct file/line info in the warning + ) f = 1 Dp_prime = [0, 0] D = D[1] diff --git a/src/original/PvH_KB_NKI/DWI_functions_standalone.py b/src/original/PvH_KB_NKI/DWI_functions_standalone.py index 5d677149..31b45dc7 100644 --- a/src/original/PvH_KB_NKI/DWI_functions_standalone.py +++ b/src/original/PvH_KB_NKI/DWI_functions_standalone.py @@ -10,6 +10,8 @@ """ import numpy as np +import warnings + def generate_ADC_standalone(DWIdata, bvalues, bmin: int = 150, bmax: int = 1000, specificBvals: list = []): """ @@ -116,9 +118,11 @@ def generate_IVIMmaps_standalone(DWIdata, bvalues, bminADC=150, bmaxADC=1000, bm raise Exception('b=0 was not acquired') if len(bvalues) > 2: - print('More than two b-values were detected for D* calculation. ' + - 'Note that only the first two will be used for D*.') - + warnings.warn( + 'More than two b-values were detected for D* calculation. ' + 'Note that only the first two will be used for D*.', + category=UserWarning, + stacklevel=2 # Ensures correct file/line info in the warning + ) except Exception as e: print("Could not calculate D* due to an error: " + str(e)) return diff --git a/src/original/TF_reference/segmented_IVIMfit.py b/src/original/TF_reference/segmented_IVIMfit.py index 05974883..f65778e0 100644 --- a/src/original/TF_reference/segmented_IVIMfit.py +++ b/src/original/TF_reference/segmented_IVIMfit.py @@ -24,6 +24,7 @@ def segmented_IVIM_fit(bvalues, dw_data, b_cutoff = 200, bounds=([0.0001, 0.0, 0 """ bvalues_D = bvalues[bvalues >= b_cutoff] dw_data_D = dw_data[bvalues >= b_cutoff] + dw_data_D = np.clip(dw_data_D, 1e-6, None) # ensure we do not get 0 or negative values that cause nans/infs log_data_D = np.log(dw_data_D) D, b0_intercept = d_fit_iterative_wls(bvalues_D, log_data_D) diff --git a/src/standardized/ETP_SRI_LinearFitting.py b/src/standardized/ETP_SRI_LinearFitting.py index 2ab311c0..a1a29780 100644 --- a/src/standardized/ETP_SRI_LinearFitting.py +++ b/src/standardized/ETP_SRI_LinearFitting.py @@ -1,6 +1,8 @@ import numpy as np from src.wrappers.OsipiBase import OsipiBase from src.original.ETP_SRI.LinearFitting import LinearFit +import warnings +warnings.simplefilter('once', UserWarning) class ETP_SRI_LinearFitting(OsipiBase): diff --git a/src/standardized/IAR_LU_biexp.py b/src/standardized/IAR_LU_biexp.py index added0f8..6b4b31ae 100644 --- a/src/standardized/IAR_LU_biexp.py +++ b/src/standardized/IAR_LU_biexp.py @@ -114,8 +114,9 @@ def ivim_fit_full_volume(self, signals, bvalues, **kwargs): gtab = gradient_table(bvalues, bvec, b0_threshold=0) self.IAR_algorithm = IvimModelBiExp(gtab, bounds=self.bounds, initial_guess=self.initial_guess) - - fit_results = self.IAR_algorithm.fit(signals) + b0_index = np.where(bvalues == 0)[0][0] + mask = signals[...,b0_index]>0 + fit_results = self.IAR_algorithm.fit(signals, mask=mask) results = {} results["f"] = fit_results.model_params[..., 1] diff --git a/src/standardized/IAR_LU_modified_mix.py b/src/standardized/IAR_LU_modified_mix.py index b546b4d2..1100fde8 100644 --- a/src/standardized/IAR_LU_modified_mix.py +++ b/src/standardized/IAR_LU_modified_mix.py @@ -34,6 +34,7 @@ class IAR_LU_modified_mix(OsipiBase): supported_dimensions = 1 supported_priors = False + def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=None, weighting=None, stats=False): """ Everything this algorithm requires should be implemented here. @@ -47,6 +48,10 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non print('warning, bounds from wrapper are not (yet) used in this algorithm') self.use_bounds = False self.use_initial_guess = False + + # Additional options + self.stochastic = True + # Check the inputs # Initialize the algorithm diff --git a/src/standardized/IVIM_NEToptim.py b/src/standardized/IVIM_NEToptim.py index f8860feb..49e9d03a 100644 --- a/src/standardized/IVIM_NEToptim.py +++ b/src/standardized/IVIM_NEToptim.py @@ -56,6 +56,9 @@ def initialize(self, bounds, initial_guess, fitS0, traindata, SNR): self.fitS0=fitS0 self.deep_learning = True self.supervised = False + # Additional options + self.stochastic = True + if traindata is None: warnings.warn('no training data provided (traindata = None). Training data will be simulated') if SNR is None: diff --git a/src/standardized/OGC_AmsterdamUMC_Bayesian_biexp.py b/src/standardized/OGC_AmsterdamUMC_Bayesian_biexp.py index efe4d510..0a94dd27 100644 --- a/src/standardized/OGC_AmsterdamUMC_Bayesian_biexp.py +++ b/src/standardized/OGC_AmsterdamUMC_Bayesian_biexp.py @@ -80,7 +80,7 @@ def initialize(self, bounds=None, initial_guess=None, fitS0=True, prior_in=None, self.neg_log_prior=flat_neg_log_prior([self.bounds[0][0],self.bounds[1][0]],[self.bounds[0][1],self.bounds[1][1]],[self.bounds[0][2],self.bounds[1][2]],[self.bounds[0][3],self.bounds[1][3]]) else: print('warning, bounds are not used, as a prior is used instead') - if len(prior_in) is 4: + if len(prior_in) == 4: self.neg_log_prior = empirical_neg_log_prior(prior_in[0], prior_in[1], prior_in[2],prior_in[3]) else: self.neg_log_prior = empirical_neg_log_prior(prior_in[0], prior_in[1], prior_in[2]) @@ -113,37 +113,61 @@ def ivim_fit(self, signals, bvalues, initial_guess=None, **kwargs): return results - def ivim_fit_full_volume(self, signals, bvalues, initial_guess=None, **kwargs): + def ivim_fit_full_volume(self, signals, bvalues, njobs=4, **kwargs): """Perform the IVIM fit - Args: signals (array-like) bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None. - Returns: _type_: _description_ """ - signals = self.reshape_to_voxelwise(signals) + # normalize signals + # Get index of b=0 + shape=np.shape(signals) + + b0_index = np.where(bvalues == 0)[0][0] + # Mask of voxels where signal at b=0 >= 0.5 + valid_mask = signals[..., b0_index] >= 0 + # Select only valid voxels for fitting + signals = signals[valid_mask] + + minimum_bvalue = np.min(bvalues) # We normalize the signal to the minimum bvalue. Should be 0 or very close to 0. + b0_indices = np.where(bvalues == minimum_bvalue)[0] + normalization_factor = np.mean(signals[..., b0_indices],axis=-1) + signals = signals / np.repeat(normalization_factor[...,np.newaxis],np.shape(signals)[-1],-1) bvalues=np.array(bvalues) epsilon = 0.000001 - fit_results = fit_segmented_array(bvalues, signals, bounds=self.bounds, cutoff=self.thresholds, p0=self.initial_guess) - fit_results=np.array(fit_results+(1,)) - for i in range(4): - if fit_results[i] < self.bounds[0][i] : fit_results[0] = self.bounds[0][i]+epsilon - if fit_results[i] > self.bounds[1][i] : fit_results[0] = self.bounds[1][i]-epsilon - fit_results = self.OGC_algorithm_array(bvalues, signals, self.neg_log_prior, x0=fit_results, fitS0=self.fitS0, bounds=self.bounds) + fit_results = np.array(fit_segmented_array(bvalues, signals, bounds=self.bounds, cutoff=self.thresholds, p0=self.initial_guess)) + #fit_results=np.array(fit_results+(1,)) + # Loop over parameters (rows) + for i in range(4): + if i == 3: + fit_results[i] = np.random.normal(1,0.2,np.shape(fit_results[i])) + else: + below = fit_results[i] < self.bounds[0][i] + above = fit_results[i] > self.bounds[1][i] + + fit_results[i, below] = self.bounds[0][i] + epsilon + fit_results[i, above] = self.bounds[1][i] - epsilon + self.jobs=njobs + fit_results = self.OGC_algorithm_array(bvalues, signals,fit_results, self) + + D=np.zeros(shape[0:-1]) + D[valid_mask]=fit_results[0] + f=np.zeros(shape[0:-1]) + f[valid_mask]=fit_results[1] + Dp=np.zeros(shape[0:-1]) + Dp[valid_mask]=fit_results[2] results = {} - results["D"] = fit_results[0] - results["f"] = fit_results[1] - results["Dp"] = fit_results[2] - + results["D"] = D + results["f"] = f + results["Dp"] = Dp return results - def reshape_to_voxelwise(self, data): """ reshapes multi-D input (spatial dims, bvvalue) data to 2D voxel-wise array @@ -154,4 +178,4 @@ def reshape_to_voxelwise(self, data): """ B = data.shape[-1] voxels = int(np.prod(data.shape[:-1])) # e.g., X*Y*Z - return data.reshape(voxels, B) \ No newline at end of file + return data.reshape(voxels, B) diff --git a/src/standardized/OGC_AmsterdamUMC_biexp.py b/src/standardized/OGC_AmsterdamUMC_biexp.py index 7af0b601..58f892de 100644 --- a/src/standardized/OGC_AmsterdamUMC_biexp.py +++ b/src/standardized/OGC_AmsterdamUMC_biexp.py @@ -84,25 +84,4 @@ def ivim_fit(self, signals, bvalues, **kwargs): results["f"] = fit_results[1] results["Dp"] = fit_results[2] - return results - - def ivim_fit_full_volume(self, signals, bvalues, **kwargs): - """Perform the IVIM fit - - Args: - signals (array-like) - bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None. - - Returns: - _type_: _description_ - """ - - bvalues=np.array(bvalues) - fit_results = self.OGC_algorithm_array(bvalues, signals, p0=self.initial_guess, bounds=self.bounds, fitS0=self.fitS0) - - results = {} - results["D"] = fit_results[0] - results["f"] = fit_results[1] - results["Dp"] = fit_results[2] - return results \ No newline at end of file diff --git a/src/standardized/OGC_AmsterdamUMC_biexp_segmented.py b/src/standardized/OGC_AmsterdamUMC_biexp_segmented.py index 27d7e49a..aebf8224 100644 --- a/src/standardized/OGC_AmsterdamUMC_biexp_segmented.py +++ b/src/standardized/OGC_AmsterdamUMC_biexp_segmented.py @@ -86,4 +86,4 @@ def ivim_fit(self, signals, bvalues, **kwargs): results["f"] = fit_results[1] results["Dp"] = fit_results[2] - return results + return results \ No newline at end of file diff --git a/src/standardized/PvH_KB_NKI_IVIMfit.py b/src/standardized/PvH_KB_NKI_IVIMfit.py index b6fe98a8..338e7952 100644 --- a/src/standardized/PvH_KB_NKI_IVIMfit.py +++ b/src/standardized/PvH_KB_NKI_IVIMfit.py @@ -1,6 +1,8 @@ from src.wrappers.OsipiBase import OsipiBase from src.original.PvH_KB_NKI.DWI_functions_standalone import generate_IVIMmaps_standalone, generate_ADC_standalone import numpy as np +import warnings +warnings.simplefilter('once', UserWarning) class PvH_KB_NKI_IVIMfit(OsipiBase): """ diff --git a/src/standardized/Super_IVIM_DC.py b/src/standardized/Super_IVIM_DC.py index aa4e02af..e5419646 100644 --- a/src/standardized/Super_IVIM_DC.py +++ b/src/standardized/Super_IVIM_DC.py @@ -62,6 +62,10 @@ def initialize(self, bounds, initial_guess, fitS0, SNR, working_dir=os.getcwd(), self.use_bounds = False self.deep_learning = True self.supervised = True + + # Additional options + self.stochastic = True + modeldir = Path(working_dir) # Ensure it's a Path object modeldir = modeldir / "models" modeldir.mkdir(parents=True, exist_ok=True) diff --git a/src/standardized/TCML_TechnionIIT_SLS.py b/src/standardized/TCML_TechnionIIT_SLS.py index 55b6206c..7e9cc590 100644 --- a/src/standardized/TCML_TechnionIIT_SLS.py +++ b/src/standardized/TCML_TechnionIIT_SLS.py @@ -4,7 +4,7 @@ class TCML_TechnionIIT_SLS(OsipiBase): """ - TCML_TechnionIIT_lsqBOBYQA fitting algorithm by Moti Freiman and Noam Korngut, TechnionIIT + TCML_TechnionIIT_lsqlm fitting algorithm by Angeleene Ang, Moti Freiman and Noam Korngut, TechnionIIT """ # I'm thinking that we define default attributes for each submission like this @@ -12,24 +12,24 @@ class TCML_TechnionIIT_SLS(OsipiBase): # the user inputs fulfil the requirements # Some basic stuff that identifies the algorithm - id_author = "Moti Freiman and Noam Korngut, TechnIIT" + id_author = "Angeleene Ang, Moti Freiman and Noam Korngut, TechnIIT" id_algorithm_type = "Bi-exponential fit, segmented fitting" id_return_parameters = "f, D*, D, S0" id_units = "seconds per milli metre squared or milliseconds per micro metre squared" # Algorithm requirements required_bvalues = 4 - required_thresholds = [0,0] # Interval from "at least" to "at most", in case submissions allow a custom number of thresholds + required_thresholds = [0,1] # Interval from "at least" to "at most", in case submissions allow a custom number of thresholds required_bounds = False - required_bounds_optional = False # Bounds may not be required but are optional + required_bounds_optional = True # Bounds may not be required but are optional required_initial_guess = False - required_initial_guess_optional = True + required_initial_guess_optional = False accepted_dimensions = 1 # Not sure how to define this for the number of accepted dimensions. Perhaps like the thresholds, at least and at most? # Supported inputs in the standardized class - supported_bounds = False - supported_initial_guess = True + supported_bounds = True + supported_initial_guess = False supported_thresholds = True def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=None, fitS0=True): @@ -40,12 +40,12 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non Our OsipiBase object could contain functions that compare the inputs with the requirements. """ - super(TCML_TechnionIIT_SLS, self).__init__(bvalues=bvalues, bounds=bounds, initial_guess=initial_guess) + super(TCML_TechnionIIT_SLS, self).__init__(bvalues=bvalues, bounds=bounds) self.fit_least_squares = IVIM_fit_sls self.fitS0=fitS0 - self.initialize(bounds, initial_guess, fitS0,thresholds) + self.initialize(bounds, fitS0,thresholds) - def initialize(self, bounds, initial_guess, fitS0,thresholds): + def initialize(self, bounds, fitS0,thresholds): if bounds is None: print('warning, only D* is bounded between 0.001 and 0.5)') self.bounds = ([0.0003, 0.001, 0.009, 0],[0.008, 0.5,0.04, 3]) @@ -53,20 +53,14 @@ def initialize(self, bounds, initial_guess, fitS0,thresholds): print('warning, although bounds are given, only D* is bounded)') bounds=bounds self.bounds = bounds - if initial_guess is None: - print('warning, no initial guesses were defined, so default bounds are used of [0.001, 0.1, 0.01, 1]') - self.initial_guess = [0.001, 0.1, 0.01, 1] # D, Dp, f, S0 - else: - self.initial_guess = initial_guess - self.use_initial_guess = True self.fitS0=fitS0 - self.use_initial_guess = True - self.use_bounds = True + self.use_bounds = False if thresholds is None: self.thresholds = 150 print('warning, no thresholds were defined, so default bounds are used of 150') else: self.thresholds = thresholds + self.use_initial_guess = False def ivim_fit(self, signals, bvalues, **kwargs): """Perform the IVIM fit @@ -82,11 +76,9 @@ def ivim_fit(self, signals, bvalues, **kwargs): bvalues=np.array(bvalues) bounds=np.array(self.bounds) bounds=[bounds[0][[0, 2, 1, 3]], bounds[1][[0, 2, 1, 3]]] - initial_guess = np.array(self.initial_guess) - initial_guess = initial_guess[[0, 2, 1, 3]] - + signals[signals<0]=0 - fit_results = self.fit_least_squares(np.array(signals)[:,np.newaxis],bvalues, bounds,initial_guess,self.thresholds) + fit_results = self.fit_least_squares(np.array(signals)[:,np.newaxis],bvalues, bounds, min_bval_high=self.thresholds) results = {} results["D"] = fit_results[0] diff --git a/src/standardized/TCML_TechnionIIT_lsq_sls_lm.py b/src/standardized/TCML_TechnionIIT_lsq_sls_lm.py new file mode 100644 index 00000000..d6621c51 --- /dev/null +++ b/src/standardized/TCML_TechnionIIT_lsq_sls_lm.py @@ -0,0 +1,90 @@ +from src.wrappers.OsipiBase import OsipiBase +from super_ivim_dc.source.Classsic_ivim_fit import IVIM_fit_sls_lm +import numpy as np + +class TCML_TechnionIIT_lsq_sls_lm(OsipiBase): + """ + TCML_TechnionIIT_lsqlm fitting algorithm by Angeleene Ang, Moti Freiman and Noam Korngut, TechnionIIT + """ + + # I'm thinking that we define default attributes for each submission like this + # And in __init__, we can call the OsipiBase control functions to check whether + # the user inputs fulfil the requirements + + # Some basic stuff that identifies the algorithm + id_author = "Angeleene Ang, Moti Freiman and Noam Korngut, TechnIIT" + id_algorithm_type = "Bi-exponential, segmented as initaition, followed by Levenberg-Marquardt algorithm" + id_return_parameters = "f, D*, D, S0" + id_units = "seconds per milli metre squared or milliseconds per micro metre squared" + + # Algorithm requirements + required_bvalues = 4 + required_thresholds = [0, + 1] # Interval from "at least" to "at most", in case submissions allow a custom number of thresholds + required_bounds = False + required_bounds_optional = True # Bounds may not be required but are optional + required_initial_guess = False + required_initial_guess_optional = False + accepted_dimensions = 1 # Not sure how to define this for the number of accepted dimensions. Perhaps like the thresholds, at least and at most? + + + # Supported inputs in the standardized class + supported_bounds = True + supported_initial_guess = False + supported_thresholds = True + + def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=None, fitS0=True): + """ + Everything this algorithm requires should be implemented here. + Number of segmentation thresholds, bounds, etc. + + Our OsipiBase object could contain functions that compare the inputs with + the requirements. + """ + super(TCML_TechnionIIT_lsq_sls_lm, self).__init__(bvalues=bvalues, bounds=bounds) + self.fit_least_squares = IVIM_fit_sls_lm + self.fitS0=fitS0 + self.initialize(bounds, fitS0,thresholds) + + def initialize(self, bounds, fitS0,thresholds): + if bounds is None: + print( + 'warning, no bounds were defined, so default bounds are used of ([0.0003, 0.001, 0.009, 0],[0.008, 0.5,0.04, 3])') + self.bounds = ([0.0003, 0.001, 0.009, 0], [0.008, 0.5, 0.04, 3]) + else: + bounds = bounds + self.bounds = bounds + if thresholds is None: + self.thresholds = 150 + print('warning, no thresholds were defined, so default bounds are used of 150') + else: + self.thresholds = thresholds + self.fitS0=fitS0 + self.use_bounds = False + self.use_initial_guess = False + + def ivim_fit(self, signals, bvalues, **kwargs): + """Perform the IVIM fit + + Args: + signals (array-like) + bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None. + + Returns: + _type_: _description_ + """ + signals[signals<0]=0 + bvalues=np.array(bvalues) + fit_results = self.fit_least_squares(np.array(signals)[:,np.newaxis],bvalues, self.bounds, min_bval_high=self.thresholds) + + results = {} + if fit_results[0].size > 0: + results["D"] = fit_results[0] + results["f"] = fit_results[2] + results["Dp"] = fit_results[1] + else: + results["D"] = 0 + results["f"] = 0 + results["Dp"] = 0 + + return results \ No newline at end of file diff --git a/src/standardized/TCML_TechnionIIT_lsq_sls_trf.py b/src/standardized/TCML_TechnionIIT_lsq_sls_trf.py new file mode 100644 index 00000000..db605a22 --- /dev/null +++ b/src/standardized/TCML_TechnionIIT_lsq_sls_trf.py @@ -0,0 +1,86 @@ +from src.wrappers.OsipiBase import OsipiBase +from super_ivim_dc.source.Classsic_ivim_fit import IVIM_fit_sls_trf +import numpy as np + +class TCML_TechnionIIT_lsq_sls_trf(OsipiBase): + """ + TCML_TechnionIIT_lsqlm fitting algorithm by Angeleene Ang, Moti Freiman and Noam Korngut, TechnionIIT + """ + + # I'm thinking that we define default attributes for each submission like this + # And in __init__, we can call the OsipiBase control functions to check whether + # the user inputs fulfil the requirements + + # Some basic stuff that identifies the algorithm + id_author = "Angeleene Ang, Moti Freiman and Noam Korngut, TechnIIT" + id_algorithm_type = "Bi-exponential fit, SLS fit followed by Trust Region Reflective algorithm" + id_return_parameters = "f, D*, D, S0" + id_units = "seconds per milli metre squared or milliseconds per micro metre squared" + + # Algorithm requirements + required_bvalues = 4 + required_thresholds = [0, + 1] # Interval from "at least" to "at most", in case submissions allow a custom number of thresholds + required_bounds = False + required_bounds_optional = True # Bounds may not be required but are optional + required_initial_guess = False + required_initial_guess_optional = False + accepted_dimensions = 1 # Not sure how to define this for the number of accepted dimensions. Perhaps like the thresholds, at least and at most? + + + # Supported inputs in the standardized class + supported_bounds = True + supported_initial_guess = False + supported_thresholds = True + + def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=None, fitS0=True): + """ + Everything this algorithm requires should be implemented here. + Number of segmentation thresholds, bounds, etc. + + Our OsipiBase object could contain functions that compare the inputs with + the requirements. + """ + super(TCML_TechnionIIT_lsq_sls_trf, self).__init__(bvalues=bvalues, bounds=bounds) + self.fit_least_squares = IVIM_fit_sls_trf + self.fitS0=fitS0 + self.initialize(bounds, fitS0, thresholds) + + def initialize(self, bounds, fitS0, thresholds): + if bounds is None: + print('warning, no bounds were defined, so default bounds are used of ([0.0003, 0.001, 0.009, 0],[0.008, 0.5,0.04, 3])') + self.bounds = ([0.0003, 0.001, 0.009, 0],[0.008, 0.5,0.04, 3]) + else: + bounds=bounds + self.bounds = bounds + if thresholds is None: + self.thresholds = 150 + print('warning, no thresholds were defined, so default bounds are used of 150') + else: + self.thresholds = thresholds + self.fitS0=fitS0 + self.use_bounds = False + self.use_initial_guess = False + + def ivim_fit(self, signals, bvalues, **kwargs): + """Perform the IVIM fit + + Args: + signals (array-like) + bvalues (array-like, optional): b-values for the signals. If None, self.bvalues will be used. Default is None. + + Returns: + _type_: _description_ + """ + signals[signals<0]=0 + bvalues=np.array(bvalues) + bounds=np.array(self.bounds) + bounds=[bounds[0][[0, 2, 1, 3]], bounds[1][[0, 2, 1, 3]]] + fit_results = self.fit_least_squares(np.array(signals)[:,np.newaxis],bvalues, bounds,min_bval_high=self.thresholds) + + results = {} + results["D"] = fit_results[0] + results["f"] = fit_results[2] + results["Dp"] = fit_results[1] + + return results \ No newline at end of file diff --git a/src/standardized/TCML_TechnionIIT_lsqlm.py b/src/standardized/TCML_TechnionIIT_lsqlm.py index fd7ef610..a7198eb1 100644 --- a/src/standardized/TCML_TechnionIIT_lsqlm.py +++ b/src/standardized/TCML_TechnionIIT_lsqlm.py @@ -4,7 +4,7 @@ class TCML_TechnionIIT_lsqlm(OsipiBase): """ - TCML_TechnionIIT_lsqlm fitting algorithm by Moti Freiman and Noam Korngut, TechnionIIT + TCML_TechnionIIT_lsqlm fitting algorithm by Angeleene Ang, Moti Freiman and Noam Korngut, TechnionIIT """ # I'm thinking that we define default attributes for each submission like this @@ -12,8 +12,8 @@ class TCML_TechnionIIT_lsqlm(OsipiBase): # the user inputs fulfil the requirements # Some basic stuff that identifies the algorithm - id_author = "Moti Freiman and Noam Korngut, TechnIIT" - id_algorithm_type = "Bi-exponential fit" + id_author = "Angeleene Ang, Moti Freiman and Noam Korngut, TechnIIT" + id_algorithm_type = "Bi-exponential fit with Levenberg-Marquardt algorithm" id_return_parameters = "f, D*, D, S0" id_units = "seconds per milli metre squared or milliseconds per micro metre squared" @@ -22,14 +22,14 @@ class TCML_TechnionIIT_lsqlm(OsipiBase): required_thresholds = [0, 0] # Interval from "at least" to "at most", in case submissions allow a custom number of thresholds required_bounds = False - required_bounds_optional = False # Bounds may not be required but are optional + required_bounds_optional = True # Bounds may not be required but are optional required_initial_guess = False required_initial_guess_optional = True accepted_dimensions = 1 # Not sure how to define this for the number of accepted dimensions. Perhaps like the thresholds, at least and at most? # Supported inputs in the standardized class - supported_bounds = False + supported_bounds = True supported_initial_guess = True supported_thresholds = False @@ -74,8 +74,13 @@ def ivim_fit(self, signals, bvalues, **kwargs): fit_results = self.fit_least_squares(bvalues, np.array(signals)[:,np.newaxis], initial_guess) results = {} - results["D"] = fit_results[0] - results["f"] = fit_results[2] - results["Dp"] = fit_results[1] + if fit_results[0].size >0: + results["D"] = fit_results[0] + results["f"] = fit_results[2] + results["Dp"] = fit_results[1] + else: + results["D"] = 0 + results["f"] = 0 + results["Dp"] = 0 return results \ No newline at end of file diff --git a/src/standardized/TCML_TechnionIIT_lsqtrf.py b/src/standardized/TCML_TechnionIIT_lsqtrf.py index 977c3e5c..a91dded9 100644 --- a/src/standardized/TCML_TechnionIIT_lsqtrf.py +++ b/src/standardized/TCML_TechnionIIT_lsqtrf.py @@ -4,7 +4,7 @@ class TCML_TechnionIIT_lsqtrf(OsipiBase): """ - TCML_TechnionIIT_lsqlm fitting algorithm by Moti Freiman and Noam Korngut, TechnionIIT + TCML_TechnionIIT_lsqlm fitting algorithm by Angeleene Ang, Moti Freiman and Noam Korngut, TechnionIIT """ # I'm thinking that we define default attributes for each submission like this @@ -12,7 +12,7 @@ class TCML_TechnionIIT_lsqtrf(OsipiBase): # the user inputs fulfil the requirements # Some basic stuff that identifies the algorithm - id_author = "Moti Freiman and Noam Korngut, TechnIIT" + id_author = "Angeleene Ang, Moti Freiman and Noam Korngut, TechnIIT" id_algorithm_type = "Bi-exponential fit, Trust Region Reflective algorithm" id_return_parameters = "f, D*, D, S0" id_units = "seconds per milli metre squared or milliseconds per micro metre squared" diff --git a/src/wrappers/OsipiBase.py b/src/wrappers/OsipiBase.py index 9bf3f656..5c4cd542 100644 --- a/src/wrappers/OsipiBase.py +++ b/src/wrappers/OsipiBase.py @@ -4,10 +4,93 @@ import pathlib import sys from tqdm import tqdm +from joblib import Parallel, delayed class OsipiBase: - """The base class for OSIPI IVIM fitting""" + """ + Comprehensive base class for OSIPI IVIM fitting algorithms. + + The ``OsipiBase`` class defines a standardized framework for fitting + intravoxel incoherent motion (IVIM) models to diffusion-weighted MRI data. + It manages common attributes such as b-values, parameter bounds, + thresholds, and initial guesses; provides requirement-checking utilities; + and offers convenience methods for voxel-wise or full-volume fitting. + Subclasses can implement algorithm-specific behavior while inheriting + these shared tools. + + Parameters + ---------- + bvalues : array-like, optional + Diffusion b-values (s/mm²) matching the last dimension of the input data. + thresholds : array-like, optional + Thresholds used by specific algorithms (e.g., signal cutoffs). + bounds : array-like, optional + Parameter bounds for constrained optimization. + initial_guess : array-like, optional + Initial parameter estimates for the IVIM fit. + algorithm : str, optional + Name of an algorithm module in ``src/standardized`` to load dynamically. + If supplied, the instance is immediately converted to that algorithm’s + subclass via :meth:`osipi_initiate_algorithm`. + **kwargs + Additional keyword arguments forwarded to the selected algorithm’s + initializer if ``algorithm`` is provided. + + Attributes + ---------- + bvalues, thresholds, bounds, initial_guess : np.ndarray or None + Core fitting inputs stored as arrays when provided. + use_bounds, use_initial_guess : bool + Flags controlling whether bounds and initial guesses are applied. + deep_learning, supervised, stochastic : bool + Indicators for the algorithm type; subclasses may set these. + result_keys : list of str, optional + Names of the output parameters (e.g., ["f", "Dp", "D"]). + required_bvalues, required_thresholds, required_bounds, + required_bounds_optional, required_initial_guess, + required_initial_guess_optional : various, optional + Optional attributes used by requirement-checking helpers. + + Key Methods + ----------- + osipi_initiate_algorithm(algorithm, **kwargs) + Dynamically replace the current instance with the specified + algorithm subclass. + osipi_fit(data, bvalues=None, njobs=1, **kwargs) + Voxel-wise IVIM fitting with optional parallel processing and + automatic signal normalization. + osipi_fit_full_volume(data, bvalues=None, **kwargs) + Full-volume fitting for algorithms that support it. + osipi_print_requirements() + Display algorithm requirements such as needed b-values or bounds. + osipi_accepted_dimensions(), osipi_accepts_dimension(dim) + Query acceptable input dimensionalities. + osipi_check_required_*() + Validate that provided inputs meet algorithm requirements. + osipi_simple_bias_and_RMSE_test(SNR, bvalues, f, Dstar, D, noise_realizations) + Monte-Carlo bias/RMSE evaluation of the current fitting method. + D_and_Ds_swap(results) + Ensure consistency of D and D* estimates by swapping if necessary. + + Notes + ----- + * This class is typically used as a base or as a dynamic loader for + a specific algorithm implementation. + * Parallel voxel-wise fitting uses :mod:`joblib`. + * Subclasses must implement algorithm-specific methods such as + :meth:`ivim_fit` or :meth:`ivim_fit_full_volume`. + + Examples + -------- + # Dynamic algorithm loading + model = OsipiBase(bvalues=[0, 50, 200, 800], + algorithm="MyAlgorithm") + + # Voxel-wise fitting + results = base.osipi_fit(dwi_data, njobs=4) + f_map = results["f"] + """ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=None, algorithm=None, **kwargs): # Define the attributes as numpy arrays only if they are not None @@ -19,6 +102,7 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non self.use_initial_guess = True self.deep_learning = False self.supervised = False + self.stochastic = False # If the user inputs an algorithm to OsipiBase, it is intereprete as initiating # an algorithm object with that name. if algorithm: @@ -52,9 +136,48 @@ def initialize(**kwargs): pass #def osipi_fit(self, data=None, bvalues=None, thresholds=None, bounds=None, initial_guess=None, **kwargs): - def osipi_fit(self, data, bvalues=None, **kwargs): - """Fits the data with the bvalues - Returns [S0, f, Dstar, D] + def osipi_fit(self, data, bvalues=None, njobs=1, **kwargs): + """ + Fit multi-b-value diffusion MRI data using the IVIM model. + + This function normalizes the input signal to the minimum b-value and fits each voxel's signal to the IVIM model to estimate parameters such as + perfusion fraction (f), pseudo-diffusion coefficient (D*), and tissue diffusion coefficient (D). + Fits can be computed in parallel across voxels using multiple jobs. + + Parameters + ---------- + data : np.ndarray + Multi-dimensional array containing the signal intensities. The last dimension must correspond + to the b-values (diffusion weightings). + bvalues : array-like, optional + Array of b-values corresponding to the last dimension of `data`. If not provided, the method + uses `self.bvalues` set during object initialization. + njobs : int, optional, default=1 + Number of parallel jobs to use for voxel-wise fitting. If `njobs` > 1, the fitting will be + distributed across multiple processes. -1 will use all available cpus + **kwargs : dict, optional + Additional keyword arguments to be passed to the underlying `ivim_fit` function. + + Returns + ------- + results : dict of np.ndarray + Dictionary containing voxel-wise parameter maps. Keys are parameter names ("f", "D", "Dp"), + and values are arrays with the same shape as `data` excluding the last dimension. + + Notes + ----- + - The signal is normalized to the minimum b-value before fitting. + - Handles NaN values by returning zeros for all parameters in those voxels. + - Parallelization is handled using joblib's `Parallel` and `delayed`. + - If `self.result_keys` is defined, it determines the output parameter names; otherwise, the default + keys are ["f", "Dp", "D"]. + - The method swaps D and D* values after fitting using `self.D_and_Ds_swap` to maintain consistency. + + Example + ------- + >>> results = instance.osipi_fit(data, bvalues=[0, 50, 200, 800], njobs=4) + >>> f_map = results['f'] + >>> D_map = results['D'] """ # We should first check whether the attributes in the __init__ are not None @@ -64,7 +187,6 @@ def osipi_fit(self, data, bvalues=None, **kwargs): #use_bounds = bounds if self.bounds is None else self.bounds #use_initial_guess = initial_guess if self.initial_guess is None else self.initial_guess - # Make sure we don't make arrays of None's if use_bvalues is not None: use_bvalues = np.asarray(use_bvalues) #if use_thresholds is not None: use_thresholds = np.asarray(use_thresholds) @@ -101,39 +223,103 @@ def osipi_fit(self, data, bvalues=None, **kwargs): results[key] = np.empty(list(data.shape[:-1])) # Assuming the last dimension of the data is the signal values of each b-value - #results = np.empty(list(data.shape[:-1])+[3]) # Create an array with the voxel dimensions + the ones required for the fit + # results = np.empty(list(data.shape[:-1])+[3]) # Create an array with the voxel dimensions + the ones required for the fit #for ijk in tqdm(np.ndindex(data.shape[:-1]), total=np.prod(data.shape[:-1])): #args = [data[ijk], use_bvalues] #fit = list(self.ivim_fit(*args, **kwargs)) #results[ijk] = fit minimum_bvalue = np.min(use_bvalues) # We normalize the signal to the minimum bvalue. Should be 0 or very close to 0. b0_indices = np.where(use_bvalues == minimum_bvalue)[0] + normalization_factor = np.mean(data[..., b0_indices],axis=-1) + data = data / np.repeat(normalization_factor[...,np.newaxis],np.shape(data)[-1],-1) + if np.shape(data.shape)[0] == 1: + njobs=1 + if data.shape[0] < njobs: + njobs = 1 + if njobs > 1: + # Flatten the indices first + all_indices = list(np.ndindex(data.shape[:-1])) + #np.save('data.npy', data) + #data = np.load('data.npy', mmap_mode='r') + # Define parallel function + def parfun(ijk): + single_voxel_data = np.array(data[ijk], copy=True) + if not np.isnan(single_voxel_data[0]): + fit = self.ivim_fit(single_voxel_data, use_bvalues, **kwargs) + fit = self.D_and_Ds_swap(fit) + else: + fit={'D':0,'f':0,'Dp':0} + return ijk, fit - for ijk in tqdm(np.ndindex(data.shape[:-1]), total=np.prod(data.shape[:-1])): - # Normalize array - single_voxel_data = data[ijk] - single_voxel_data_normalization_factor = np.mean(single_voxel_data[b0_indices]) - single_voxel_data_normalized = single_voxel_data/single_voxel_data_normalization_factor - args = [single_voxel_data_normalized, use_bvalues] - fit = self.ivim_fit(*args, **kwargs) # For single voxel fits, we assume this is a dict with a float value per key. - fit = self.D_and_Ds_swap(self.ivim_fit(*args, **kwargs)) # For single voxel fits, we assume this is a dict with a float value per key. - for key in list(fit.keys()): - results[key][ijk] = fit[key] + # Run in parallel + results_list = Parallel(n_jobs=njobs)( + delayed(parfun)(ijk) for ijk in tqdm(all_indices, total=len(all_indices), mininterval=60) # updates every minute + ) + # Initialize result arrays if not already done + # Example: results = {key: np.zeros(data.shape[:-1]) for key in expected_keys} + + # Populate results after parallel loop + for ijk, fit in results_list: + for key in fit: + results[key][ijk] = fit[key] + else: + for ijk in tqdm(np.ndindex(data.shape[:-1]), total=np.prod(data.shape[:-1]), mininterval=60): + # Normalize array + single_voxel_data = data[ijk] + if not np.isnan(single_voxel_data[0]): + args = [single_voxel_data, use_bvalues] + fit = self.D_and_Ds_swap(self.ivim_fit(*args, **kwargs)) # For single voxel fits, we assume this is a dict with a float value per key. + else: + fit={'D':0,'f':0,'Dp':0} + for key in list(fit.keys()): + results[key][ijk] = fit[key] #self.parameter_estimates = self.ivim_fit(data, bvalues) return results def osipi_fit_full_volume(self, data, bvalues=None, **kwargs): - """Sends a full volume in one go to the fitting algorithm. The osipi_fit method only sends one voxel at a time. + """ + Fit an entire volume of multi-b-value diffusion MRI data in a single call using the IVIM model. - Args: - data (array): 2D (data x b-values), 3D (single slice) or 4D (multi slice) DWI data, with last dimension the b-value dimension. - bvalues (array, optional): The b-values of the DWI data. Defaults to None. + Unlike `osipi_fit`, which processes one voxel at a time, this method sends the full volume + to the fitting algorithm. This can be more efficient for algorithms that support full-volume fitting. - Returns: - results (dict): Dict with key each containing an array which is a parametric map. + Parameters + ---------- + data : np.ndarray + 2D (data x b-values), 3D (single slice), or 4D (multi-slice) diffusion-weighted imaging (DWI) data. + The last dimension must correspond to the b-values. + bvalues : array-like, optional + Array of b-values corresponding to the last dimension of `data`. If not provided, the method + uses `self.bvalues` set during object initialization. + **kwargs : dict, optional + Additional keyword arguments to be passed to `ivim_fit_full_volume`. + + Returns + ------- + results : dict of np.ndarray or bool + Dictionary containing parametric maps for each IVIM parameter (keys from `self.result_keys`, + or defaults ["f", "Dp", "D"]). Each value is an array matching the spatial dimensions of `data`. + Returns `False` if full-volume fitting is not supported by the algorithm. + + Notes + ----- + - This method does not normalize the input signal to the minimum b-value, unlike `osipi_fit`. + + Example + ------- + # Standard usage: + results = instance.osipi_fit_full_volume(data, bvalues=[0, 50, 200, 800]) + f_map = results['f'] + D_map = results['D'] + Dp_map = results['Dp'] + + # If the algorithm does not support full-volume fitting: + results = instance.osipi_fit_full_volume(data) + if results is False: + print("Full-volume fitting not supported.") """ try: @@ -152,16 +338,8 @@ def osipi_fit_full_volume(self, data, bvalues=None, **kwargs): results = {} for key in self.result_keys: results[key] = np.empty(list(data.shape[:-1])) - - minimum_bvalue = np.min(use_bvalues) # We normalize the signal to the minimum bvalue. Should be 0 or very close to 0. - b0_indices = np.where(use_bvalues == minimum_bvalue)[0] - b0_mean = np.mean(data[..., b0_indices], axis=-1) - - normalization_factors = np.array([b0_mean for i in range(data.shape[-1])]) - normalization_factors = np.moveaxis(normalization_factors, 0, -1) - data_normalized = data/normalization_factors - - args = [data_normalized, use_bvalues] + # no normalisation as volume algorithms may not want normalized signals... + args = [data, use_bvalues] fit = self.ivim_fit_full_volume(*args, **kwargs) # Assume this is a dict with an array per key representing the parametric maps for key in list(fit.keys()): results[key] = fit[key] diff --git a/tests/IVIMmodels/unit_tests/algorithms.json b/tests/IVIMmodels/unit_tests/algorithms.json index e38310f9..68422bc6 100644 --- a/tests/IVIMmodels/unit_tests/algorithms.json +++ b/tests/IVIMmodels/unit_tests/algorithms.json @@ -1,5 +1,10 @@ { "algorithms": [ + "IVIM_NEToptim", + "TCML_TechnionIIT_lsqlm", + "TCML_TechnionIIT_lsqtrf", + "TCML_TechnionIIT_lsq_sls_lm", + "TCML_TechnionIIT_SLS", "Super_IVIM_DC", "ASD_MemorialSloanKettering_QAMPER_IVIM", "ETP_SRI_LinearFitting", @@ -15,8 +20,7 @@ "PV_MUMC_biexp", "PvH_KB_NKI_IVIMfit", "OJ_GU_seg", - "TF_reference_IVIMfit", - "IVIM_NEToptim" + "TF_reference_IVIMfit" ], "IVIM_NEToptim": { "deep_learning": true diff --git a/tests/IVIMmodels/unit_tests/test_ivim_fit.py b/tests/IVIMmodels/unit_tests/test_ivim_fit.py index bd58a6aa..86c128c4 100644 --- a/tests/IVIMmodels/unit_tests/test_ivim_fit.py +++ b/tests/IVIMmodels/unit_tests/test_ivim_fit.py @@ -3,6 +3,8 @@ import pytest import time from src.wrappers.OsipiBase import OsipiBase +from joblib import Parallel, delayed +import warnings #run using python -m pytest from the root folder @@ -28,6 +30,10 @@ def tolerances_helper(tolerances, data): tolerances["atol"] = tolerances.get("atol", {"f": 2e-1, "D": 5e-4, "Dp": 4e-2}) return tolerances + +class PerformanceWarning(UserWarning): + pass + def test_ivim_fit_saved(data_ivim_fit_saved, eng, request, record_property): name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime, requires_matlab = data_ivim_fit_saved max_time = 0.5 @@ -75,7 +81,7 @@ def to_list_if_needed(value): assert elapsed_time < max_time, f"Algorithm {name} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel def test_default_bounds_and_initial_guesses(algorithmlist,eng): - algorithm, requires_matlab, deep_learning = algorithmlist + algorithm, requires_matlab = algorithmlist if requires_matlab: if eng is None: pytest.skip(reason="Running without matlab; if Matlab is available please run pytest --withmatlab") @@ -83,8 +89,6 @@ def test_default_bounds_and_initial_guesses(algorithmlist,eng): kwargs = {'eng': eng} else: kwargs={} - if deep_learning: - pytest.skip(reason="deep learning algorithms do not take initial guesses. Bound testing for deep learning algorithms not yet implemented") fit = OsipiBase(algorithm=algorithm,**kwargs) #assert fit.bounds is not None, f"For {algorithm}, there is no default fit boundary" #assert fit.initial_guess is not None, f"For {algorithm}, there is no default fit initial guess" @@ -152,6 +156,83 @@ def test_bounds(bound_input, eng): assert passf, f"Fit still passes when initial guess f is out of fit bounds; potentially initial guesses not respected for: {name}" assert passDp, f"Fit still passes when initial guess Ds is out of fit bounds; potentially initial guesses not respected for: {name}" ''' +def test_volume(algorithmlist,eng, threeddata): + algorithm, requires_matlab = algorithmlist + data, Dim, fim, Dpim, bvals = threeddata + # Get index of b=0 + b0_index = np.where(bvals == 0.)[0][0] + data[data < 0] = 0 + # Mask of voxels where signal at b=0 >= 0.5 + invalid_mask = data[:, :, :, b0_index] < 0.01 + data[invalid_mask,:] = np.nan + + if requires_matlab: + if eng is None: + pytest.skip(reason="Running without matlab; if Matlab is available please run pytest --withmatlab") + else: + kwargs = {'eng': eng} + else: + kwargs={} + fit = OsipiBase(algorithm=algorithm,**kwargs) + if hasattr(fit, 'ivim_fit_full_volume') and callable(getattr(fit, 'ivim_fit_full_volume')): + start_time = time.time() # Record the start time + fit_result = fit.osipi_fit_full_volume(data, bvals) + elapsed_time2 = time.time() - start_time # Calculate elapsed time + print('time elaapsed is '+str(elapsed_time2)) + assert np.shape(fit_result['D'])[0] == np.shape(data)[0] + assert np.shape(fit_result['D'])[1] == np.shape(data)[1] + assert np.shape(fit_result['D'])[2] == np.shape(data)[2] + # check if right variable is in right place + assert np.nanmean(fit_result['D']) < np.nanmean(fit_result['Dp']) + assert np.nanmean(fit_result['Dp']) < np.nanmean(fit_result['f']) + else: + pytest.skip(reason="Wrapper has no ivim_fit_full_volume option") + + +def test_parallel(algorithmlist,eng,threeddata): + algorithm, requires_matlab = algorithmlist + data, Dim, fim, Dpim, bvals = threeddata + # Get index of b=0 + b0_index = np.where(bvals == 0)[0][0] + data[data < 0] = 0 + # Mask of voxels where signal at b=0 >= 0.5 + invalid_mask = data[:, :, :, b0_index] < 0.01 + data[invalid_mask,:] = np.nan + print('testing ' + str(np.sum(~invalid_mask)) + ' voxels of a matrix size ' + str(np.shape(data))) + if requires_matlab: + if eng is None: + pytest.skip(reason="Running without matlab; if Matlab is available please run pytest --withmatlab") + else: + kwargs = {'eng': eng} + else: + kwargs={} + fit = OsipiBase(algorithm=algorithm,**kwargs) + + def dummy_task(x): + return x + + start_time = time.time() # Record the start time + fit_result = fit.osipi_fit(data, bvals,njobs=1) + time_serial = time.time() - start_time # Calculate elapsed time + Parallel(n_jobs=2)(delayed(dummy_task)(i) for i in range(8)) #github actions only supports 2 cores + start_time = time.time() # Record the start time + fit_result2 = fit.osipi_fit(data, bvals,njobs=2) + time_parallel= time.time() - start_time # Calculate elapsed time + + print('singular took '+str(time_serial)+' seconds, and parallel took '+str(time_parallel)+' seconds') + assert np.shape(fit_result['D'])[0] == np.shape(data)[0] + assert np.shape(fit_result['D'])[1] == np.shape(data)[1] + assert np.shape(fit_result['D'])[2] == np.shape(data)[2] + if not fit.stochastic: + assert np.allclose(fit_result['D'], fit_result2['D'], atol=1e-4), "Results differ between parallel and serial" + assert np.allclose(fit_result['f'], fit_result2['f'], atol=1e-2), "Results differ between parallel and serial" + assert np.allclose(fit_result['Dp'], fit_result2['Dp'], atol=1e-2), "Results differ between parallel and serial" + if time_parallel * 1.3 > time_serial: + warnings.warn( + f"[PERFORMANCE WARNING] Parallel code is not significantly faster than serial: " + f"{time_parallel:.3f}s vs {time_serial:.3f}s", PerformanceWarning + ) + def test_deep_learning_algorithms(deep_learning_algorithms, record_property): algorithm, data, bvals, kwargs, requires_matlab, tolerances = deep_learning_algorithms @@ -207,6 +288,3 @@ def to_list_if_needed(value): if errors: all_errors = "\n".join(errors) raise AssertionError(f"Some tests failed:\n{all_errors}") - - -