From 2546b4290fe4530575462fb343182e29bba069a3 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Fri, 3 Nov 2023 12:20:13 +0100 Subject: [PATCH 01/29] Push patch into V1.1 (#467) * Increase version number and update changelog (#455) * Bugfixes 4th sep (#460) * Fix normalisation in rice model Closes #459 * Remove three spin anaysis Closes #427 * Improved installation instructions * Caution about difference in definition of cost function Closes #450 * Bump Version * Bug for non linearly constrained problems When a problem is not linearly constrained and not non-negative, the linear solver outputs a result class not the solution. * Keeping changelog up-to date * Fixing Sophgrid bug (#464) * Fixing Sophgrid bug * Add unit test for sophgrid * Update changelog.rst for new release (#466) --- VERSION | 2 +- deerlab/dd_models.py | 2 +- deerlab/solvers.py | 8 +- deerlab/utils.py | 8 +- docsrc/source/changelog.rst | 12 +++ docsrc/source/installation.rst | 23 +++-- .../advanced/ex_long_threespin_analysis.py | 95 ------------------- test/test_utils.py | 16 ++++ 8 files changed, 55 insertions(+), 111 deletions(-) delete mode 100644 examples/advanced/ex_long_threespin_analysis.py create mode 100644 test/test_utils.py diff --git a/VERSION b/VERSION index 795460fce..0f1acbd56 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.1.0 +v1.1.2 diff --git a/deerlab/dd_models.py b/deerlab/dd_models.py index afcd381d9..dc32cbaab 100644 --- a/deerlab/dd_models.py +++ b/deerlab/dd_models.py @@ -125,7 +125,7 @@ def _multirice3dfun(r,nu,sig): r = r.T s2 = sig**2 I_scaled = spc.ive(n/2-1, nu*r/s2) - P = nu**(n/2-1)/s2*r**(n/2)*np.exp(-(r**2+nu**2)/(2*s2)+nu*r/s2)*I_scaled + P = nu**(1-n/2)/s2*r**(n/2)*np.exp(-(r**2+nu**2)/(2*s2)+nu*r/s2)*I_scaled P[P<0] = 0 # Normalization diff --git a/deerlab/solvers.py b/deerlab/solvers.py index 8c1c6a298..1052d7b39 100644 --- a/deerlab/solvers.py +++ b/deerlab/solvers.py @@ -426,6 +426,11 @@ def snlls(y, Amodel, par0=None, lb=None, ub=None, lbl=None, ubl=None, nnlsSolver * ``0`` : Work silently (default). * ``1`` : Display progress including the non-linear least-squares' solver termination report. * ``2`` : Display progress including the non-linear least-squares' solver iteration details. + + .. caution:: + + The verbose output from the non-linear least-squares solver uses a different definition of the cost function than DeerLab. + DeerLab uses the sum of squares of the residuals divided by the number of data points, whereas the non-linear least-squares solver uses the sum of squares of the residuals divided by 2. Returns ------- @@ -594,7 +599,8 @@ def linear_problem(y,A,optimize_alpha,alpha): if optimize_alpha: - output = dl.selregparam((y-yfrozen)[mask], Ared[mask,:], linSolver, regparam, + linsolver_result = lambda AtA, Aty: parseResult(linSolver(AtA, Aty)) + output = dl.selregparam((y-yfrozen)[mask], Ared[mask,:], linsolver_result, regparam, weights=weights[mask], regop=L, candidates=regparamrange, noiselvl=noiselvl,searchrange=regparamrange,full_output=True) alpha = output[0] diff --git a/deerlab/utils.py b/deerlab/utils.py index 5079bd5b6..3b7f69d13 100644 --- a/deerlab/utils.py +++ b/deerlab/utils.py @@ -833,7 +833,7 @@ def sophegrid(octants,maxphi,size): weights = np.zeros(nOrientations) sindth2 = np.sin(dtheta/2) - w1 = 0.5 + w1 = 1.0 # North pole (z orientation) phi[0] = 0 @@ -841,7 +841,7 @@ def sophegrid(octants,maxphi,size): weights[0] = maxphi*(1 - np.cos(dtheta/2)) # All but equatorial slice - Start = 2 + Start = 1 for iSlice in np.arange(2,size): nPhi = nOct*(iSlice-1) + 1 dPhi = maxphi/(nPhi - 1) @@ -854,13 +854,13 @@ def sophegrid(octants,maxphi,size): # Equatorial slice nPhi = nOct*(size - 1) + 1 dPhi = maxphi/(nPhi - 1) - idx = Start + (np.arange(0,nPhi) - 1) + idx = Start + np.arange(0,nPhi) phi[idx] = np.linspace(0,maxphi,nPhi) theta[idx] = np.pi/2 weights[idx] = sindth2*dPhi*np.concatenate([[w1], np.ones(nPhi-2), [0.5]]) # Border removal - rmv = np.cumsum(nOct*np.arange(1,size-1)+1)+1 + rmv = np.cumsum(nOct*np.arange(1,size)+1) phi = np.delete(phi,rmv) theta = np.delete(theta,rmv) weights = np.delete(weights,rmv) diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index 9da8425ee..894c269fb 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -24,6 +24,18 @@ Release Notes - |fix| : Something which was not working as expected or leading to errors has been fixed. - |api| : This will require changes in your scripts or code. +Release ``v1.1.2`` - November 2023 +------------------------------------------ +- |fix| : Fixes an issue with sophgrid (:pr:`463`). +- |fix| : Fixes an error in the normalisation of the rice models (:pr:`459`). +- |fix| : Removes the broken three spin example (:pr:`427`). +- |fix| : Fixes an error in the linear solver for linearly constrained not non-negative problems. + +Release ``v1.1.1`` - August 2023 +------------------------------------------ +- |fix| : Fixes an error in the `FitResult.evaluate` function. (:pr:`454`) + + Release ``v1.1.0`` - August 2023 ------------------------------------------ - |api| : The definition of the dipolar time axis for RIDME has changed to match the one for 4-pulse DEER (:pr:`436`). diff --git a/docsrc/source/installation.rst b/docsrc/source/installation.rst index dd974a469..95257bc3f 100644 --- a/docsrc/source/installation.rst +++ b/docsrc/source/installation.rst @@ -45,7 +45,7 @@ DeerLab installs the following packages: * `joblib `_ - Library lightweight pipelining and parallelization. * `tqdm `_ - A lightweight package for smart progress meters. * `dill `_ - An extension of Python's pickle module for serializing and de-serializing python objects. -* `quadprog `_ - A quadratic programming solver +* `quadprog `_ - A quadratic programming solver (Only for Python versions < 3.11) Importing DeerLab ------------------ @@ -74,9 +74,6 @@ Any DeerLab version released after v0.10.0 can be installed via ``pip`` using th python -m pip install deerlab==x.y.z -DeerLab version prior to 0.10 are written in MATLAB and are still available from an `archived repository `_. -Download and installation instruction for the MATLAB environment are provided in the released documentation. MATLAB releases have been deprecated and no further support is provided. - Installing from source ***************************** @@ -130,10 +127,18 @@ On a **Windows** computer, if you are trying to run a DeerLab function, you migh ImportError: DLL load failed: The specified module could not be found. -This happens when the MKL libraries have not been properly linked in ``numpy``, ``scipy`` or ``cvxopt`` -installations (typically ``numpy``). This can happen due to firewall restrictions, -user rights, or internet connection issues during the DeerLab installation. To solve this, the -best solution is to manually install as follows. +This issue can occur when a specific python package is not installled properly. This typicaly happens when installing the MKL linked libaries but can +occur when installing packages normaly from PyPI. This can happen due to firewall restrictions, +user rights, or internet connection issues during the DeerLab installation. + +If your where installing packages from PyPI (i.e. via pip) then please uninstall the package and reinstall it. + +.. code-block:: text + + python -m pip uninstall + python -m pip install + +If you were trying to install the MKL linked libraries then please follow the instructions below. 1) Go to https://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy @@ -167,7 +172,7 @@ During installation on certain systems (e.g. some computation clusters) using on the following error might be raised during the installation: .. code-block:: text - + Error while finding module specification for 'setup.py' (ModuleNotFoundError: __path__ attribute not found on 'setup' while trying to find 'setup.py') diff --git a/examples/advanced/ex_long_threespin_analysis.py b/examples/advanced/ex_long_threespin_analysis.py deleted file mode 100644 index cdbde8261..000000000 --- a/examples/advanced/ex_long_threespin_analysis.py +++ /dev/null @@ -1,95 +0,0 @@ -#%% -""" -Analyzing 4-pulse DEER data acquired on three-spin systems -============================================================================ - -As in the publication referenced below, this example will take two 4-pulse DEER signals acquired -on the same protein sample conatining three nitroxide spins with different attenuation levels of the pump -pulse power. - -For the original model and more information on these systems please refer to: -L. Fábregas Ibáñez, M. H. Tessmer, G. Jeschke, and S. Stoll. -Dipolar pathways in multi-spin and multi-dimensional dipolar EPR spectroscopy -Phys. Chem. Chem. Phys., 24 2022, 22645-22660 -""" -#%% -import numpy as np -import deerlab as dl - -# Load experimental data -files = [f'../data/triradical_protein_deer_{dB}dB.DTA' for dB in [0,6,9]] - -# Experiment information -t0 = 0.280 # Start time, μs -tau1 = 0.40 # First interpulse delay, μs -tau2 = 9.00 # Second interpulse delay, μs - -# Construct 4-pulse DEER experiment model -my4pdeer = dl.ex_4pdeer(tau1,tau2,pathways=[1]) - -# Loop over the different datasets -Vmodels,Vexps,ts,Vexps_sub,ts_sub = [],[],[],[],[] -for n,file in enumerate(files): - # Load the dataset - t,Vexp, descriptor = dl.deerload(file,full_output=True) - t = t[:-80] - Vexp = Vexp[:-80] - # Adjust the Start time - t = t - t[0] + t0 - - # Pre-processing - Vexp = dl.correctphase(Vexp) - Vexp /= np.max(Vexp) - - # Store the pre-processed datasets in a list - Vexps.append(Vexp) - ts.append(t) - - # Subsampling - # (required for efficient analysis in densely sampled datasets) - sampling = np.arange(0,len(t),4) # Take every 4th point - t_sub = t[sampling] - Vexp_sub = Vexp[sampling] - - # Store the subsampled datasets in a list - Vexps_sub.append(Vexp_sub) - ts_sub.append(t_sub) - - # Construct the three-spin dipolar model - Vmodel = dl.dipolarmodel(t_sub,spins=3,experiment=my4pdeer, minamp=0.01) - - # Add dipolar model to list of models - Vmodels.append(Vmodel) - -# Construct a global dipolar model describing all datasets -Vglobal = dl.merge(*Vmodels) -Vglobal = dl.link(Vglobal, - rmean1=[f'rmean1_{n+1}' for n in range(len(Vmodels))], - rmean2=[f'rmean2_{n+1}' for n in range(len(Vmodels))], - rmean3=[f'rmean3_{n+1}' for n in range(len(Vmodels))], - chol11=[f'chol11_{n+1}' for n in range(len(Vmodels))], - chol22=[f'chol22_{n+1}' for n in range(len(Vmodels))], - chol33=[f'chol33_{n+1}' for n in range(len(Vmodels))], - chol21=[f'chol21_{n+1}' for n in range(len(Vmodels))], - chol31=[f'chol31_{n+1}' for n in range(len(Vmodels))], - chol32=[f'chol32_{n+1}' for n in range(len(Vmodels))], - conc=[f'conc_{n+1}' for n in range(len(Vmodels))], - reftime1=[f'reftime1_{n+1}' for n in range(len(Vmodels))], -) -# Freeze the Cholesky-factors accounting for the correlation coefficients -# to zero (not always applicable) -Vglobal.chol21.freeze(0) -Vglobal.chol31.freeze(0) -Vglobal.chol32.freeze(0) - -# Fit the model to the data -results = dl.fit(Vglobal, Vexps_sub, reg=False, ftol=1e-5) - -# %% -# Plot the fitted datasets -results.plot(axis=ts_sub,xlabel='time (μs)') - -# Print the fit summary -print(results) - -# %% diff --git a/test/test_utils.py b/test/test_utils.py new file mode 100644 index 000000000..2df9a899f --- /dev/null +++ b/test/test_utils.py @@ -0,0 +1,16 @@ +import numpy as np +from deerlab import sophegrid + + +# ====================================================================== +# Test sophegrid function + +def test_sophegrid(): + # Test that the function returns the values and weights summing to 1 + # Comparison taken from EasySpin 6.0 + phi, theta, weights = sophegrid(4,np.pi*2,3) + + assert np.allclose(weights.sum(),1) + assert np.allclose(phi, np.array([0,0,1.57079632679490,3.14159265358979,4.71238898038469,0,0.785398163397448,1.57079632679490,2.35619449019235,3.14159265358979,3.92699081698724,4.71238898038469,5.49778714378214])) + assert np.allclose(theta, np.array([0,0.785398163397448,0.785398163397448,0.785398163397448,0.785398163397448,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490])) + assert np.allclose(weights*4*np.pi, np.array([0.956558005801449,1.70021769237074,1.70021769237074,1.70021769237074,1.70021769237074,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346])) \ No newline at end of file From 59a6584c8345084fb4ffda00a619ab3917f97ac0 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Sun, 14 Apr 2024 02:15:34 +0100 Subject: [PATCH 02/29] Removes katex and others (#471) * Fixing Sophgrid bug * Add unit test for sophgrid * Minor doc update * Bump version Number * Remove unnecessary doc files * Update changelog --- VERSION | 2 +- deerlab/fitresult.py | 2 ++ deerlab/solvers.py | 1 + deerlab/utils.py | 1 + docsrc/package-lock.json | 19 ------------------- docsrc/source/changelog.rst | 4 ++++ 6 files changed, 9 insertions(+), 20 deletions(-) delete mode 100644 docsrc/package-lock.json diff --git a/VERSION b/VERSION index 0f1acbd56..99a4aef0c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.1.2 +v1.1.3 diff --git a/deerlab/fitresult.py b/deerlab/fitresult.py index 80c99bcc5..4841f9e67 100644 --- a/deerlab/fitresult.py +++ b/deerlab/fitresult.py @@ -42,6 +42,8 @@ class FitResult(dict): * ``stats['aic']`` - Akaike information criterion * ``stats['aicc']`` - Corrected Akaike information criterion * ``stats['bic']`` - Bayesian information criterion + * ``stats['autocorr']`` - Autocorrelation based on Durbin–Watson statistic + nonlin : ndarray Fitted non-linear parameters. [:ref:`snlls` specific attribute] diff --git a/deerlab/solvers.py b/deerlab/solvers.py index 1052d7b39..523afbe15 100644 --- a/deerlab/solvers.py +++ b/deerlab/solvers.py @@ -462,6 +462,7 @@ def snlls(y, Amodel, par0=None, lb=None, ub=None, lbl=None, ubl=None, nnlsSolver * ``stats['aic']`` - Akaike information criterion * ``stats['aicc']`` - Corrected Akaike information criterion * ``stats['bic']`` - Bayesian information criterion + * ``stats['autocorr']`` - Autocorrelation based on Durbin–Watson statistic success : bool Whether or not the optimizer exited successfully. cost : float diff --git a/deerlab/utils.py b/deerlab/utils.py index 3b7f69d13..eb06134dd 100644 --- a/deerlab/utils.py +++ b/deerlab/utils.py @@ -282,6 +282,7 @@ def goodness_of_fit(x,xfit,Ndof,noiselvl): stats['aic'] - Akaike information criterion stats['aicc'] - Corrected Akaike information criterion stats['bic'] - Bayesian information criterion + stats['autocorr'] - Autocorrelation based on Durbin–Watson statistic """ sigma = noiselvl Ndof = np.maximum(Ndof,1) diff --git a/docsrc/package-lock.json b/docsrc/package-lock.json deleted file mode 100644 index 02c9db5b6..000000000 --- a/docsrc/package-lock.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "requires": true, - "lockfileVersion": 1, - "dependencies": { - "commander": { - "version": "2.20.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.0.tgz", - "integrity": "sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==" - }, - "katex": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/katex/-/katex-0.11.0.tgz", - "integrity": "sha512-RQsU3HSMjLW9AdPpi2zaBwM123goCbUcUbBJfmjcAdA982RgtEUNMmrf+3y8anGjgJLantcNLa/VSK73xztQBg==", - "requires": { - "commander": "^2.19.0" - } - } - } -} diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index 894c269fb..252ef0fcf 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -24,6 +24,10 @@ Release Notes - |fix| : Something which was not working as expected or leading to errors has been fixed. - |api| : This will require changes in your scripts or code. +Release ``v1.1.3`` - Ongoing +------------------------------------------ +- |fix| : Removes unnecessary files from the docs + Release ``v1.1.2`` - November 2023 ------------------------------------------ - |fix| : Fixes an issue with sophgrid (:pr:`463`). From e5cb1e2a3eb64e7be2d255f78c507cb4261f70cb Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Wed, 12 Jun 2024 19:31:44 +0200 Subject: [PATCH 03/29] Dipolarkernal speedup (#473) * Seperate Kinterpolator into its own function Interpolation in Scipy is very slow, currently the same interpolation is being rerun for every calculation of the dipolarkernal. This is now cached to speed it up. * Only run orientation selection on grid and integral based kernals This hunk of code is not needed when fresnel integrals are used so does not need to be evaluated * Update changelog --- deerlab/dipolarkernel.py | 29 ++++++++++++++++++----------- docsrc/source/changelog.rst | 1 + 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/deerlab/dipolarkernel.py b/deerlab/dipolarkernel.py index 33d815f1b..8ee3d3873 100644 --- a/deerlab/dipolarkernel.py +++ b/deerlab/dipolarkernel.py @@ -367,7 +367,7 @@ def dipolarkernel(t, r, *, pathways=None, mod=None, bg=None, method='fresnel', e if tinterp is not None: # Construct interpolator, this way elementarykernel_twospin is executed only once independently of how many pathways there are - Kinterpolator = [interp1d(tinterp,elementarykernel_twospin(tinterp,r_q,method,excbandwidth,gridsize,g,orisel,complex),axis=0,kind='cubic') for r_q in r] + Kinterpolator = _elementarykernel_twospin_interp(tinterp,r,method,excbandwidth,gridsize,g,orisel,complex) withinInterpolation = lambda tdip: np.all((np.max(tinterp) >= np.max(tdip)) & (np.min(tinterp) <= np.min(tdip))) # Define kernel matrix auxiliary functions @@ -435,8 +435,14 @@ def K0_3spin(tdip): return K #============================================================================== - - +@cached(max_size=100) +def _elementarykernel_twospin_interp(tinterp,r,method,excbandwidth,gridsize,g,orisel,complex): + """ + Construct interpolator, this way elementarykernel_twospin is executed only once independently of how many pathways there are + Cached for performance reasons, interpolation is slow. + """ + Kinterpolator = [interp1d(tinterp,elementarykernel_twospin(tinterp,r_q,method,excbandwidth,gridsize,g,orisel,complex),axis=0,kind='cubic') for r_q in r] + return Kinterpolator #============================================================================== # TWO-SPIN ELEMENTARY DIPOLAR KERNEL @@ -468,14 +474,15 @@ def elementarykernel_twospin(tdip,r,method,ωex,gridsize,g,Pθ,complex): ωr = (μ0/2)*μB**2*g[0]*g[1]/h*1e21/(r**3) # rad μs^-1 # Orientation selection - orientationselection = Pθ is not None - if orientationselection: - # Ensure zero-derivatives at [0,π/2] - θ = np.linspace(0,π/2,50) # rad - Pθ_ = make_interp_spline(θ, Pθ(θ),bc_type="clamped") - # Ensure normalization of probability density function (∫P(cosθ)dcosθ=1) - Pθnorm,_ = quad(lambda cosθ: Pθ_(np.arccos(cosθ)),0,1,limit=1000) - Pθ = lambda θ: Pθ_(θ)/Pθnorm + if method != 'fresnel': + orientationselection = Pθ is not None + if orientationselection: + # Ensure zero-derivatives at [0,π/2] + θ = np.linspace(0,π/2,50) # rad + Pθ_ = make_interp_spline(θ, Pθ(θ),bc_type="clamped") + # Ensure normalization of probability density function (∫P(cosθ)dcosθ=1) + Pθnorm,_ = quad(lambda cosθ: Pθ_(np.arccos(cosθ)),0,1,limit=1000) + Pθ = lambda θ: Pθ_(θ)/Pθnorm def elementarykernel_fresnel(tdip): #------------------------------------------------------------------------ diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index 252ef0fcf..3aa922321 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -27,6 +27,7 @@ Release Notes Release ``v1.1.3`` - Ongoing ------------------------------------------ - |fix| : Removes unnecessary files from the docs +- |efficiency| : Improves the performance of the ``dipolarkernel`` function by 10-30% (:pr:`473`), by caching the interpolation of he effective dipolar evolution time vector. Release ``v1.1.2`` - November 2023 ------------------------------------------ From 56689462e484504ce5561e19659ba58fcd499d3c Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 1 Jul 2024 17:21:18 +0200 Subject: [PATCH 04/29] Adding support for scipy 1.14 and tests for Python 3.12 (#474) * Update for 3.12 * Updated workflows and changelog * Upload to 3.12 --- .github/workflows/ci_PR.yml | 2 +- .github/workflows/ci_scheduled.yml | 2 +- .github/workflows/package_upload.yml | 4 ++-- deerlab/diststats.py | 2 +- docsrc/source/changelog.rst | 1 + 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci_PR.yml b/.github/workflows/ci_PR.yml index 63de2eec9..88374ad6f 100644 --- a/.github/workflows/ci_PR.yml +++ b/.github/workflows/ci_PR.yml @@ -12,7 +12,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ['3.10','3.11'] + python-version: ['3.11','3.12'] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/ci_scheduled.yml b/.github/workflows/ci_scheduled.yml index 0659c65fc..b101659cf 100644 --- a/.github/workflows/ci_scheduled.yml +++ b/.github/workflows/ci_scheduled.yml @@ -12,7 +12,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] + python-version: [3.8, 3.9, "3.10", "3.11", "3.12","3.12"] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/package_upload.yml b/.github/workflows/package_upload.yml index 69be3f252..4a3b99777 100644 --- a/.github/workflows/package_upload.yml +++ b/.github/workflows/package_upload.yml @@ -11,10 +11,10 @@ jobs: steps: - name: Checkout uses: actions/checkout@v1 - - name: Set up Python 3.10 + - name: Set up Python 3.11 uses: actions/setup-python@v1 with: - python-version: "3.10" + python-version: "3.11" - name: Install pypa/build run: >- python -m pip install build --user diff --git a/deerlab/diststats.py b/deerlab/diststats.py index 9368de5bc..3497e7b5b 100644 --- a/deerlab/diststats.py +++ b/deerlab/diststats.py @@ -7,7 +7,7 @@ import warnings import copy from scipy.signal import find_peaks -from scipy.integrate import cumtrapz +from scipy.integrate import cumulative_trapezoid as cumtrapz def diststats(r, P, Puq=None, verbose=False, threshold=None): r""" diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index 3aa922321..d8edd8c8b 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -28,6 +28,7 @@ Release ``v1.1.3`` - Ongoing ------------------------------------------ - |fix| : Removes unnecessary files from the docs - |efficiency| : Improves the performance of the ``dipolarkernel`` function by 10-30% (:pr:`473`), by caching the interpolation of he effective dipolar evolution time vector. +- |fix| : add support for Python 3.12 Release ``v1.1.2`` - November 2023 ------------------------------------------ From 6a0b992516dd53b6a8af79300f2520b3d5db14aa Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Tue, 2 Jul 2024 07:57:54 +0200 Subject: [PATCH 05/29] Numpy2 support (#479) * Update for 3.12 * Updated workflows and changelog * Upload to 3.12 * Numpy 2.0 compatibility updates * Update Changelog --- deerlab/classes.py | 2 +- deerlab/dd_models.py | 2 +- deerlab/diststats.py | 2 +- docsrc/source/changelog.rst | 4 +++- test/test_dipolarkernel.py | 2 +- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/deerlab/classes.py b/deerlab/classes.py index 7e9b3e149..39063cbf4 100644 --- a/deerlab/classes.py +++ b/deerlab/classes.py @@ -381,7 +381,7 @@ def percentile(self,p): cdf = np.cumsum(pdf) cdf /= max(cdf) # Eliminate duplicates - cdf, index = np.lib.arraysetops.unique(cdf,return_index=True) + cdf, index = np.unique(cdf,return_index=True) # Interpolate requested percentile x[n] = np.interp(p/100,cdf,values[index]) diff --git a/deerlab/dd_models.py b/deerlab/dd_models.py index dc32cbaab..1c4d6d3ff 100644 --- a/deerlab/dd_models.py +++ b/deerlab/dd_models.py @@ -110,7 +110,7 @@ def _multigaussfun(r,r0,sig): P = np.sqrt(1/(2*np.pi))*1/sig*np.exp(-0.5*((r.T-r0)/sig)**2) if not np.all(P==0): # Normalization - P = np.squeeze(P)/np.sum([np.trapz(c,r) for c in P.T]) + P = np.squeeze(P)/np.sum([np.trapezoid(c,r) for c in P.T]) else: P = np.squeeze(P) return P diff --git a/deerlab/diststats.py b/deerlab/diststats.py index 3497e7b5b..22d5d0229 100644 --- a/deerlab/diststats.py +++ b/deerlab/diststats.py @@ -109,7 +109,7 @@ def normalize(P): # Percentile function def pctile(r,P,p): cdf = cumtrapz(normalize(P),r,initial=0) - cdf, index = np.lib.arraysetops.unique(cdf,return_index=True) + cdf, index = np.unique(cdf,return_index=True) rpctile = np.interp(p/100,cdf,r[index]) return rpctile # Expectation operator function diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index d8edd8c8b..c1aa84ca7 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -28,7 +28,9 @@ Release ``v1.1.3`` - Ongoing ------------------------------------------ - |fix| : Removes unnecessary files from the docs - |efficiency| : Improves the performance of the ``dipolarkernel`` function by 10-30% (:pr:`473`), by caching the interpolation of he effective dipolar evolution time vector. -- |fix| : add support for Python 3.12 +- |fix| : Add support for Python 3.12 +- |fix| : Adds support for Numpy 2.0 + Release ``v1.1.2`` - November 2023 ------------------------------------------ diff --git a/test/test_dipolarkernel.py b/test/test_dipolarkernel.py index 8f7c505af..2d198541c 100644 --- a/test/test_dipolarkernel.py +++ b/test/test_dipolarkernel.py @@ -1,6 +1,6 @@ import numpy as np -from numpy import pi, inf, NaN +from numpy import pi, inf from deerlab.bg_models import bg_hom3d,bg_exp from deerlab.dd_models import dd_gauss from deerlab.dipolarkernel import dipolarkernel,elementarykernel_twospin From 19a491fbe0783d61faeda5a37e0c6820074737c4 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 15 Jul 2024 13:50:49 +0200 Subject: [PATCH 06/29] Regparam grid bug fix (#477) * Update for 3.12 * Updated workflows and changelog * Upload to 3.12 * Fix bug in regparam grid search Regparam would never build the grid correctly. Now using grid or Brent is automatically determined from number of elements in the regparamrange. * Add extra error messages * Update changelog * Updated Example * Updated test The test has been updated. The previous convergence criteria was unreliable and only worked based on a coincidence. * Prepare For Release * Remove duplicate python version --- .github/workflows/ci_scheduled.yml | 2 +- LICENSE | 2 +- deerlab/selregparam.py | 41 ++++++++++++------- deerlab/solvers.py | 4 +- docsrc/source/changelog.rst | 5 ++- examples/intermediate/ex_selregparam.py | 54 ++++++++++++++++++++++++- test/test_selregparam.py | 9 +++-- 7 files changed, 91 insertions(+), 26 deletions(-) diff --git a/.github/workflows/ci_scheduled.yml b/.github/workflows/ci_scheduled.yml index b101659cf..0659c65fc 100644 --- a/.github/workflows/ci_scheduled.yml +++ b/.github/workflows/ci_scheduled.yml @@ -12,7 +12,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.8, 3.9, "3.10", "3.11", "3.12","3.12"] + python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v3 diff --git a/LICENSE b/LICENSE index a2fb8b56d..1c241f0ed 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2019-2023 Luis Fabregas, Stefan Stoll, Gunnar Jeschke, and other contributors +Copyright (c) 2019-2024 Luis Fabregas, Stefan Stoll, Gunnar Jeschke, and other contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/deerlab/selregparam.py b/deerlab/selregparam.py index 27a800cfd..fffe61f99 100644 --- a/deerlab/selregparam.py +++ b/deerlab/selregparam.py @@ -8,8 +8,8 @@ import math as m import deerlab as dl -def selregparam(y, A, solver, method='aic', algorithm='brent', noiselvl=None, - searchrange=[1e-8,1e2],regop=None, weights=None, full_output=False, candidates=None): +def selregparam(y, A, solver, method='aic', algorithm='auto', noiselvl=None, + searchrange=[1e-8,1e2],regop=None, weights=None, full_output=False): r""" Selection of optimal regularization parameter based on a selection criterion. @@ -52,6 +52,8 @@ def selregparam(y, A, solver, method='aic', algorithm='brent', noiselvl=None, * ``'ncp'`` - Normalized Cumulative Periodogram (NCP) * ``'gml'`` - Generalized Maximum Likelihood (GML) * ``'mcl'`` - Mallows' C_L (MCL) + + If ``'lr'`` or ``'lc'`` is specified, the search algorithm is automatically set to ``'grid'``. weights : array_like, optional Array of weighting coefficients for the individual datasets in global fitting. @@ -60,18 +62,16 @@ def selregparam(y, A, solver, method='aic', algorithm='brent', noiselvl=None, algorithm : string, optional Search algorithm: + * ``'auto'`` - Automatically, selects algrothium based on the searchrange. If the searchrange has two elements its set to ``'brent'`` otherwise to ``'grid'``. * ``'grid'`` - Grid-search, slow. * ``'brent'`` - Brent-algorithm, fast. - The default is ``'brent'``. - - searchrange : two-element list, optional - Search range for the optimization of the regularization parameter with the ``'brent'`` algorithm. - If not specified the default search range defaults to ``[1e-8,1e2]``. + The default is ``'auto'``. - candidates : list, optional - List or array of candidate regularization parameter values to be evaluated with the ``'grid'`` algorithm. - If not specified, these are automatically computed from a grid within ``searchrange``. + searchrange : list, optional + Either the search range for the optimization of the regularization parameter with the ``'brent'`` algorithm. + Or if more than two values are specified, then it is interpreted as candidates for the ``'grid'`` algorithm. + If not specified the default search range defaults to ``[1e-8,1e2]`` and the ``'brent'`` algorithm. regop : 2D array_like, optional Regularization operator matrix, the default is the second-order differential operator. @@ -108,6 +108,13 @@ def selregparam(y, A, solver, method='aic', algorithm='brent', noiselvl=None, # If multiple datasets are passed, concatenate the datasets and kernels y, A, weights,_,__, noiselvl = dl.utils.parse_multidatasets(y, A, weights, noiselvl) + if algorithm == 'auto' and len(searchrange) == 2: + algorithm = 'brent' + elif algorithm == 'auto' and len(searchrange) > 2: + algorithm = 'grid' + elif algorithm == 'auto' and len(searchrange) < 2: + raise ValueError("`searchrange` must have at least two elements if `algorithm` is set to `'auto'") + # The L-curve criteria require a grid-evaluation if method == 'lr' or method == 'lc': algorithm = 'grid' @@ -121,9 +128,11 @@ def selregparam(y, A, solver, method='aic', algorithm='brent', noiselvl=None, evalalpha = lambda alpha: _evalalpha(alpha, y, A, L, solver, method, noiselvl, weights) # Evaluate functional over search range, using specified search method - if algorithm == 'brent': - + if algorithm == 'brent': + # Search boundaries + if len(searchrange) != 2: + raise ValueError("Search range must have two elements for the 'brent' algorithm.") lga_min = m.log10(searchrange[0]) lga_max = m.log10(searchrange[1]) @@ -148,10 +157,10 @@ def register_ouputs(optout): elif algorithm=='grid': # Get range of potential alpha values candidates - if candidates is None: + if len(searchrange) == 2: alphaCandidates = 10**np.linspace(np.log10(searchrange[0]),np.log10(searchrange[1]),60) else: - alphaCandidates = np.atleast_1d(candidates) + alphaCandidates = np.atleast_1d(searchrange) # Evaluate the full grid of alpha-candidates functional,residuals,penalties,alphas_evaled = tuple(zip(*[evalalpha(alpha) for alpha in alphaCandidates])) @@ -176,6 +185,10 @@ def register_ouputs(optout): # Find minimum of the selection functional alphaOpt = alphaCandidates[np.argmin(functional)] + functional = np.array(functional) + residuals = np.array(residuals) + penalties = np.array(penalties) + alphas_evaled = np.array(alphas_evaled) else: raise KeyError("Search method not found. Must be either 'brent' or 'grid'.") diff --git a/deerlab/solvers.py b/deerlab/solvers.py index 523afbe15..5fa940135 100644 --- a/deerlab/solvers.py +++ b/deerlab/solvers.py @@ -602,8 +602,8 @@ def linear_problem(y,A,optimize_alpha,alpha): if optimize_alpha: linsolver_result = lambda AtA, Aty: parseResult(linSolver(AtA, Aty)) output = dl.selregparam((y-yfrozen)[mask], Ared[mask,:], linsolver_result, regparam, - weights=weights[mask], regop=L, candidates=regparamrange, - noiselvl=noiselvl,searchrange=regparamrange,full_output=True) + weights=weights[mask], regop=L, noiselvl=noiselvl, + searchrange=regparamrange,full_output=True) alpha = output[0] alpha_stats['alphas_evaled'] = output[1] alpha_stats['functional'] = output[2] diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index c1aa84ca7..cbd4db7c6 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -24,12 +24,13 @@ Release Notes - |fix| : Something which was not working as expected or leading to errors has been fixed. - |api| : This will require changes in your scripts or code. -Release ``v1.1.3`` - Ongoing +Release ``v1.1.3`` - July 2024 ------------------------------------------ - |fix| : Removes unnecessary files from the docs - |efficiency| : Improves the performance of the ``dipolarkernel`` function by 10-30% (:pr:`473`), by caching the interpolation of he effective dipolar evolution time vector. -- |fix| : Add support for Python 3.12 +- |api| : Removes the `candidates` input from `selregparam` and integrates its function into `regparamrange`. - |fix| : Adds support for Numpy 2.0 +- |fix| : Add support for Python 3.12 Release ``v1.1.2`` - November 2023 diff --git a/examples/intermediate/ex_selregparam.py b/examples/intermediate/ex_selregparam.py index 417c284d1..929d851ca 100644 --- a/examples/intermediate/ex_selregparam.py +++ b/examples/intermediate/ex_selregparam.py @@ -33,7 +33,7 @@ t = t + tmin # Distance vector -r = np.linspace(1.5,7,50) # nm +r = np.linspace(1.5,7,100) # nm # Construct the model Vmodel = dl.dipolarmodel(t,r, experiment=dl.ex_4pdeer(tau1,tau2, pathways=[1])) @@ -101,7 +101,7 @@ # %% # Over and Under selection of the regularisation parameter -# -------------------------------------------------------- +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Here we will demonstrate the effect of selecting a regularisation parameter # that is either too small or too large. @@ -145,8 +145,58 @@ # As we can see when the regularisation parameter is too small we still get a high # quality fit in the time domain, however, our distance domain data is now way too # spikey and non-physical. + # In contrast when the regularisation parameter is too large we struggle to get # a good fit, however, we get a much smoother distance distribution. # This could have been seen from the selection functional above. The effect of # lower regularisation parameter had a smaller effect on the functional than the # effect of going to a larger one. + +# Plotting the full L-Curve +# ++++++++++++++++++++++++++ +# Normally the selection of the regularisation parameter is done through a Brent optimisation +# algorithm. This results in the L-Curve being sparsely sampled. However, the full L-Curve can be +# generated by evaluating the functional at specified regularisation paramters. + +# This has the disadvantage of being computationally expensive, however, it can be useful. + +regparamrange = 10**np.linspace(np.log10(1e-6),np.log10(1e1),60) # Builds the range of regularisation parameters +results_grid= dl.fit(Vmodel,Vexp,regparam='bic',regparamrange=regparamrange) +print(results_grid) + +fig, axs =plt.subplots(1,3, figsize=(9,4),width_ratios=(1,1,0.1)) +alphas = results_grid.regparam_stats['alphas_evaled'][:] +funcs = results_grid.regparam_stats['functional'][:] + + +idx = np.argsort(alphas) + +axs[0].semilogx(alphas[idx], funcs[idx],marker='.',ls='-') +axs[0].set_title(r"$\alpha$ selection functional"); +axs[0].set_xlabel("Regularisation Parameter") +axs[0].set_ylabel("Functional Value ") + +# Just the final L-Curve +x = results_grid.regparam_stats['residuals'] +y = results_grid.regparam_stats['penalties'] +idx = np.argsort(x) + + +axs[1].loglog(x[idx],y[idx]) + +n_points = results_grid.regparam_stats['alphas_evaled'].shape[-1] +lams = results_grid.regparam_stats['alphas_evaled'] +norm = mpl.colors.LogNorm(vmin=lams[:].min(), vmax=lams.max()) +for i in range(n_points): + axs[1].plot(x[i], y[i],marker = '.', ms=8, color=cmap(norm(lams[i]))) + +i_optimal = np.argmin(np.abs(lams - results_grid.regparam)) +axs[1].annotate(fr"$\alpha =$ {results_grid.regparam:.2g}", xy = (x[i_optimal],y[i_optimal]),arrowprops=dict(facecolor='black', shrink=0.05, width=5), xytext=(20, 20),textcoords='offset pixels') +fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),cax=axs[2]) +axs[1].set_ylabel("Penalties") +axs[2].set_ylabel("Regularisation Parameter") +axs[1].set_xlabel("Residuals") +axs[1].set_title("L-Curve"); +fig.tight_layout() + +# %% diff --git a/test/test_selregparam.py b/test/test_selregparam.py index 30a64ee9d..74ad7fd25 100644 --- a/test/test_selregparam.py +++ b/test/test_selregparam.py @@ -102,10 +102,11 @@ def test_unconstrained(dataset,design_matrix,regularization_matrix): #======================================================================= def test_manual_candidates(dataset,design_matrix,regularization_matrix): "Check that the alpha-search range can be manually passed" - alphas = np.linspace(-8,2,60) - alpha_manual = np.log10(selregparam(dataset,design_matrix,cvxnnls,method='aic',candidates=alphas,regop=regularization_matrix)) - alpha_auto = np.log10(selregparam(dataset,design_matrix,cvxnnls,method='aic',regop=regularization_matrix)) - assert abs(alpha_manual-alpha_auto)<1e-4 + alphas = np.logspace(-8,3,60,base=10) + alpha_manual = np.log10(selregparam(dataset,design_matrix,cvxnnls,method='aic',searchrange=alphas,regop=regularization_matrix)) + alpha_auto = np.log10(selregparam(dataset,design_matrix,cvxnnls,method='aic',searchrange=(1e-8,1e-3), regop=regularization_matrix)) + + assert abs(alpha_manual-alpha_auto)<1 #======================================================================= #======================================================================= From adf7cf456a1150f138531d58e82b4558da3dcdf0 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Tue, 3 Sep 2024 02:19:14 +0200 Subject: [PATCH 07/29] Sophegrid expansion (#482) --- VERSION | 2 +- deerlab/utils.py | 19 +++++++++++++------ docsrc/source/changelog.rst | 4 ++++ test/test_utils.py | 9 ++++++++- 4 files changed, 26 insertions(+), 8 deletions(-) diff --git a/VERSION b/VERSION index 99a4aef0c..c64122024 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.1.3 +v1.1.4 diff --git a/deerlab/utils.py b/deerlab/utils.py index eb06134dd..b52a6cb12 100644 --- a/deerlab/utils.py +++ b/deerlab/utils.py @@ -785,7 +785,7 @@ def read_pickle(filename): # -------------------------------------------------------------------------------------- -def sophegrid(octants,maxphi,size): +def sophegrid(octants,maxphi,size,closed_phi=False): """ Construct spherical grid over spherical angles based on input parameters. The grid implemented in this function is often called the SOPHE grid [1]_. @@ -799,6 +799,9 @@ def sophegrid(octants,maxphi,size): Largest value of angle phi (radians). size : integer Number of orientations between theta=0 and theta=pi/2. + closed_phi : bool + Set to true if grid point at maxPhi should be included, false otherwise. Default is false. + Returns ------- @@ -834,7 +837,10 @@ def sophegrid(octants,maxphi,size): weights = np.zeros(nOrientations) sindth2 = np.sin(dtheta/2) - w1 = 1.0 + if closed_phi: + w1=0.5 + else: + w1 = 1.0 # North pole (z orientation) phi[0] = 0 @@ -861,10 +867,11 @@ def sophegrid(octants,maxphi,size): weights[idx] = sindth2*dPhi*np.concatenate([[w1], np.ones(nPhi-2), [0.5]]) # Border removal - rmv = np.cumsum(nOct*np.arange(1,size)+1) - phi = np.delete(phi,rmv) - theta = np.delete(theta,rmv) - weights = np.delete(weights,rmv) + if not closed_phi: + rmv = np.cumsum(nOct*np.arange(1,size)+1) + phi = np.delete(phi,rmv) + theta = np.delete(theta,rmv) + weights = np.delete(weights,rmv) # For C1, add lower hemisphere if octants==8: diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index cbd4db7c6..3b4bed891 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -24,6 +24,10 @@ Release Notes - |fix| : Something which was not working as expected or leading to errors has been fixed. - |api| : This will require changes in your scripts or code. +Release ``v1.1.4`` - tba +------------------------------------------ +- |enhancement| : Expanded sophgrid to allow for closed phi integral. (:pr:`482`) + Release ``v1.1.3`` - July 2024 ------------------------------------------ - |fix| : Removes unnecessary files from the docs diff --git a/test/test_utils.py b/test/test_utils.py index 2df9a899f..aee4960a0 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -13,4 +13,11 @@ def test_sophegrid(): assert np.allclose(weights.sum(),1) assert np.allclose(phi, np.array([0,0,1.57079632679490,3.14159265358979,4.71238898038469,0,0.785398163397448,1.57079632679490,2.35619449019235,3.14159265358979,3.92699081698724,4.71238898038469,5.49778714378214])) assert np.allclose(theta, np.array([0,0.785398163397448,0.785398163397448,0.785398163397448,0.785398163397448,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490])) - assert np.allclose(weights*4*np.pi, np.array([0.956558005801449,1.70021769237074,1.70021769237074,1.70021769237074,1.70021769237074,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346])) \ No newline at end of file + assert np.allclose(weights*4*np.pi, np.array([0.956558005801449,1.70021769237074,1.70021769237074,1.70021769237074,1.70021769237074,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346])) + + phi, theta, weights = sophegrid(1,np.pi/2,5,closed_phi=True) + + assert np.allclose(weights.sum(),1) + assert np.allclose(phi, np.array([0, 0, 1.5708, 0, 0.7854, 1.5708, 0, 0.5236, 1.0472, 1.5708, 0, 0.3927, 0.7854, 1.1781, 1.5708]),rtol=1e-4) + assert np.allclose(theta, np.array([0, 0.3927, 0.3927, 0.7854, 0.7854, 0.7854, 1.1781, 1.1781, 1.1781, 1.1781, 1.5708, 1.5708, 1.5708, 1.5708, 1.5708]),rtol=1e-4) + assert np.allclose(weights*4*np.pi, np.array([0.24146, 0.93818, 0.93818, 0.86676, 1.7335, 0.86676, 0.75499, 1.51, 1.51, 0.75499, 0.30645, 0.61289, 0.61289, 0.61289, 0.30645]),rtol=1e-4) From b796f04068336cc26aa29a80684444615838a9e7 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Tue, 3 Sep 2024 17:36:26 +0200 Subject: [PATCH 08/29] Remove 3.8, Require Numpy 2.0 (#483) * Remove 3.8 Require Numpy 2.0 Co-authored-by: Stefan Stoll --- .github/workflows/ci_scheduled.yml | 2 +- README.md | 4 ++-- setup.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci_scheduled.yml b/.github/workflows/ci_scheduled.yml index 0659c65fc..655421332 100644 --- a/.github/workflows/ci_scheduled.yml +++ b/.github/workflows/ci_scheduled.yml @@ -12,7 +12,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] + python-version: [3.9, "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v3 diff --git a/README.md b/README.md index 3ade50f14..2442dea3b 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ The early versions of DeerLab (up to version 0.9.2) are written in MATLAB. The o ## Requirements -DeerLab is available for Windows, Mac and Linux systems and requires **Python 3.8**, **3.9**, **3.10**, or **3.11**. +DeerLab is available for Windows, Mac and Linux systems and requires **Python 3.9**, **3.10**, **3.11**, or **3.12**. All additional dependencies are automatically downloaded and installed during the setup. @@ -58,4 +58,4 @@ Here is the citation in bibtex format: DeerLab is licensed under the [MIT License](LICENSE). -Copyright © 2019-2023: Luis Fábregas Ibáñez, Stefan Stoll, Gunnar Jeschke, and [other contributors](https://github.com/JeschkeLab/DeerLab/contributors). +Copyright © 2019-2024: Luis Fábregas Ibáñez, Stefan Stoll, Gunnar Jeschke, and [other contributors](https://github.com/JeschkeLab/DeerLab/contributors). diff --git a/setup.py b/setup.py index ad8befbe5..a07479675 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ 'Documentation': 'https://jeschkelab.github.io/DeerLab/', 'Source': 'https://github.com/JeschkeLab/DeerLab', }, - python_requires='>=3.8', + python_requires='>=3.9', license='LICENSE.txt', include_package_data = True, keywords='data analysis modeling least-squares EPR spectroscopy DEER PELDOR'.split(), @@ -19,9 +19,9 @@ long_description=open('README.md', encoding='utf-8').read(), long_description_content_type="text/markdown", install_requires = [ - 'numpy>=1.22.4', + 'numpy>=2.0', 'cvxopt>=1.0.0', - 'scipy>=1.6.3', + 'scipy>=1.11', 'joblib>=1.0.0', 'dill>=0.3.0', 'tqdm>=4.51.0', @@ -39,10 +39,10 @@ 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS', 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', 'Topic :: Scientific/Engineering :: Chemistry', ] ) From e0a6ddba348e2e039ed06649197d1e4854df1dee Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Tue, 3 Sep 2024 19:55:23 +0200 Subject: [PATCH 09/29] Update changelog (#484) --- docsrc/source/changelog.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index 3b4bed891..08714a20b 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -24,9 +24,10 @@ Release Notes - |fix| : Something which was not working as expected or leading to errors has been fixed. - |api| : This will require changes in your scripts or code. -Release ``v1.1.4`` - tba +Release ``v1.1.4`` - September 2024 ------------------------------------------ - |enhancement| : Expanded sophgrid to allow for closed phi integral. (:pr:`482`) +- |api| : Removed Python 3.8 support and requires Numpy 2.0 Release ``v1.1.3`` - July 2024 ------------------------------------------ From ff6385d9173df8559051e7355d438a93f5a729b0 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Thu, 9 Jan 2025 16:18:50 +0100 Subject: [PATCH 10/29] Fixing an issue with time domain bootstrapped uncertanties (#487) * Added modelUncert for bootstrapped * Remove unused input * Always create modelUncert quantification * Allow bootstrap resampling to be variable in propagation * Update examples * Add test for modelUncert output * Minor docstring update * Bootrstap Uncertainty sampling reduction Reduces uncertainty calculation for bootstrapped uncertainties. Previously it was always 1000 samples. This saves around 4s, when using <100 samples. * Add model copying * General improvemements * Fixes issues with functions being deep copyied * Test update -`test_fit_model_confidence_intervals` increased the number of bootstraps so it passes -`test_fit_modelUncert` expanded to test the bootstrapped * Correct number of bootstrap samples * Fix gaussian normalisation issues * Bug fix in test * multi-guass_background fix removal * Fix example for latest matplotlib --- deerlab/classes.py | 4 +- deerlab/dd_models.py | 4 +- deerlab/fit.py | 19 ++++++--- deerlab/fitresult.py | 41 +++++++++++-------- deerlab/model.py | 16 +++++++- .../advanced/ex_dipolarpathways_selection.py | 5 +-- examples/advanced/ex_profileanalysis.py | 2 +- examples/basic/ex_bootstrapping.py | 3 +- examples/basic/ex_fitting_5pdeer.py | 2 +- .../ex_fitting_5pdeer_pathways.py | 2 +- setup.py | 2 +- test/test_model_class.py | 32 ++++++++++++++- 12 files changed, 93 insertions(+), 39 deletions(-) diff --git a/deerlab/classes.py b/deerlab/classes.py index 39063cbf4..49a6ec467 100644 --- a/deerlab/classes.py +++ b/deerlab/classes.py @@ -522,7 +522,7 @@ def propagate(self,model,lb=None,ub=None,samples=None): ubm : ndarray Upper bounds of the values returned by ``model``, by default assumed unconstrained. samples : int, optional - Number of samples to use when propagating uncertainty. If not provided, default value is 1000. + Number of samples to use when propagating a bootstraped uncertainty. If not provided, default value is 1000. Returns ------- @@ -583,7 +583,7 @@ def propagate(self,model,lb=None,ub=None,samples=None): # Get the parameter uncertainty distribution values,pdf = self.pardist(n) # Random sampling form the uncertainty distribution - sampled_parameters[n] = [np.random.choice(values, p=pdf/sum(pdf)) for _ in range(Nsamples)] + sampled_parameters[n] = np.random.choice(values, p=pdf/sum(pdf),size=Nsamples) # Convert to matrix sampled_parameters = np.atleast_2d(sampled_parameters) diff --git a/deerlab/dd_models.py b/deerlab/dd_models.py index 1c4d6d3ff..e590f7c23 100644 --- a/deerlab/dd_models.py +++ b/deerlab/dd_models.py @@ -99,7 +99,7 @@ def docstr_example(fcnstr): # ================================================================= def _normalize(r,P): if not all(P==0): - P = P/np.trapz(P,r) + P = P/np.trapezoid(P,r) return P # ================================================================= @@ -110,7 +110,7 @@ def _multigaussfun(r,r0,sig): P = np.sqrt(1/(2*np.pi))*1/sig*np.exp(-0.5*((r.T-r0)/sig)**2) if not np.all(P==0): # Normalization - P = np.squeeze(P)/np.sum([np.trapezoid(c,r) for c in P.T]) + P = np.squeeze(P)/np.array([np.trapezoid(c,r) for c in P.T]).sum() else: P = np.squeeze(P) return P diff --git a/deerlab/fit.py b/deerlab/fit.py index 7c757649b..71d262310 100644 --- a/deerlab/fit.py +++ b/deerlab/fit.py @@ -361,7 +361,9 @@ def fit(model_, y, *constants, par0=None, penalties=None, bootstrap=0, noiselvl= paramlist : list List of the fitted parameter names ordered according to the model parameter indices. model : ndarray - Fitted model response. + Fitted model response. + modelUncert : :ref:`UQResult` + Uncertainty quantification of the fitted model response. regparam : scalar Regularization parameter value used for the regularization of the linear parameters. penweights : scalar or list thereof @@ -470,7 +472,7 @@ def fit(model_, y, *constants, par0=None, penalties=None, bootstrap=0, noiselvl= fitfcn = lambda y,penweights: snlls(y, Amodel_fcn, par0, lb=lb, ub=ub, lbl=lbl, ubl=ubl, mask=mask, weights=weights, subsets=ysubsets, lin_frozen=linfrozen, nonlin_frozen=nonlinfrozen, regparam=regparam, reg=reg, regparamrange=regparamrange, noiselvl=noiselvl, - extrapenalty=extrapenalties(penweights), **kwargs) + extrapenalty=extrapenalties(penweights), modeluq=True, **kwargs) # Prepare outer optimization of the penalty weights, if necessary fitfcn = _outerOptimization(fitfcn,penalties,sigmas) @@ -491,14 +493,19 @@ def bootstrap_fcn(ysim): else: bootstrap_verbose = False - param_uq = bootstrap_analysis(bootstrap_fcn,ysplit,fitresults.model,samples=bootstrap,noiselvl=noiselvl,cores=bootcores, verbose=bootstrap_verbose) + param_uq = bootstrap_analysis(bootstrap_fcn,ysplit,fitresults.model,samples=bootstrap-1,noiselvl=noiselvl,cores=bootcores, verbose=bootstrap_verbose) # Include information on the boundaries for better uncertainty estimates paramlb = model._vecsort(model._getvector('lb'))[np.concatenate(param_idx)] paramub = model._vecsort(model._getvector('ub'))[np.concatenate(param_idx)] fitresults.paramUncert = UQResult('bootstrap',data=param_uq[0].samples,lb=paramlb,ub=paramub) fitresults.param = fitresults.paramUncert.median + # Get the uncertainty estimates for the model response + modellb = np.min(param_uq[1].samples,axis=0) + modelub = np.max(param_uq[1].samples,axis=0) + fitresults.model = [param_uq[n].median for n in range(1,len(param_uq))] + fitresults.modelUncert = UQResult('bootstrap',data=param_uq[1].samples,lb=modellb,ub=modelub) if len(fitresults.model)==1: fitresults.model = fitresults.model[0] # Get some basic information on the parameter vector @@ -509,7 +516,7 @@ def bootstrap_fcn(ysim): # Dictionary of parameter names and fit uncertainties FitResult_paramuq = {f'{key}Uncert': model._getparamuq(fitresults.paramUncert,idx) for key,idx in zip(keys,param_idx)} # Dictionary of other fit quantities of interest - FitResult_dict = {key: getattr(fitresults,key) for key in ['y','mask','param','paramUncert','model','cost','plot','residuals','stats','regparam','regparam_stats','__plot_inputs']} + FitResult_dict = {key: getattr(fitresults,key) for key in ['y','mask','param','paramUncert','model','modelUncert','cost','plot','residuals','stats','regparam','regparam_stats','__plot_inputs']} _paramlist = model._parameter_list('vector') param_idx = [[] for _ in _paramlist] @@ -536,8 +543,8 @@ def _scale(x): FitResult_param_[f'{key}_scale'] = _scale(FitResult_param_[key]) # Normalization factor FitResult_param_[key] = param.normalization(FitResult_param_[key]) # Normalized value - FitResult_paramuq_[f'{key}_scaleUncert'] = FitResult_paramuq_[f'{key}Uncert'].propagate(_scale) - FitResult_paramuq_[f'{key}Uncert'] = FitResult_paramuq_[f'{key}Uncert'].propagate(lambda x: x/FitResult_param_[f'{key}_scale'], lb=param.lb, ub=param.ub) # Normalization of the uncertainty + FitResult_paramuq_[f'{key}_scaleUncert'] = FitResult_paramuq_[f'{key}Uncert'].propagate(_scale,samples=bootstrap) + FitResult_paramuq_[f'{key}Uncert'] = FitResult_paramuq_[f'{key}Uncert'].propagate(lambda x: x/FitResult_param_[f'{key}_scale'], lb=param.lb, ub=param.ub,samples=bootstrap) # Normalization of the uncertainty if len(noiselvl)==1: noiselvl = noiselvl[0] diff --git a/deerlab/fitresult.py b/deerlab/fitresult.py index 4841f9e67..27d33aa93 100644 --- a/deerlab/fitresult.py +++ b/deerlab/fitresult.py @@ -97,20 +97,21 @@ def _extarct_params_from_model(self, model): if not hasattr(self,'param'): raise ValueError('The fit object does not contain any fitted parameters.') - # # Enforce model normalization - # normfactor_keys = [] - # for key in modelparam: - # param = getattr(model,key) - # if np.all(param.linear): - # if param.normalization is not None: - # normfactor_key = f'{key}_scale' - # normfactor_keys.append(normfactor_key) - # try: - # model.addnonlinear(normfactor_key,lb=-np.inf,ub=np.inf,par0=1,description=f'Normalization factor of {key}') - # getattr(model,normfactor_key).freeze(1) - # except KeyError: - # pass - + # Enforce model normalization + normfactor_keys = [] + for key in modelparam: + param = getattr(model,key) + if np.all(param.linear): + if param.normalization is not None: + normfactor_key = f'{key}_scale' + normfactor_keys.append(normfactor_key) + try: + model.addnonlinear(normfactor_key,lb=-np.inf,ub=np.inf,par0=1,description=f'Normalization factor of {key}') + getattr(model,normfactor_key).freeze(1) + except KeyError: + pass + modelparam += normfactor_keys + # # Get some basic information on the parameter vector # modelparam = model._parameter_list(order='vector') @@ -187,6 +188,7 @@ def evaluate(self, model, *constants): Model response at the fitted parameter values. """ try: + model = model.copy() modelparam = model._parameter_list('vector') modelparam, fitparams, fitparam_idx = self._extarct_params_from_model(model) except AttributeError: @@ -199,7 +201,7 @@ def evaluate(self, model, *constants): response = model(*constants,**parameters) return response - def propagate(self, model, *constants, lb=None, ub=None): + def propagate(self, model, *constants, lb=None, ub=None,samples=None): """ Propagate the uncertainty in the fit results to a model's response. @@ -223,7 +225,10 @@ def propagate(self, model, *constants, lb=None, ub=None): lb : array_like, optional Lower bounds of the model response. ub : array_like, optional - Upper bounds of the model response. + Upper bounds of the model response. + samples : int, optional + Number of samples to use when propagating a bootstraped uncertainty. If not provided, default value is 1000. + Returns ------- @@ -231,8 +236,10 @@ def propagate(self, model, *constants, lb=None, ub=None): responseUncert : :ref:`UQResult` Uncertainty quantification of the model's response. """ + try: + model = model.copy() modelparam = model._parameter_list('vector') modelparam, fitparams, fitparam_idx = self._extarct_params_from_model(model) @@ -241,7 +248,7 @@ def propagate(self, model, *constants, lb=None, ub=None): # Propagate the uncertainty from that subset to the model - modeluq = self.paramUncert.propagate(lambda param: model(*constants,*[param[s] for s in fitparam_idx]),lb,ub) + modeluq = self.paramUncert.propagate(lambda param: model(*constants,*[param[s] for s in fitparam_idx]),lb,ub,samples) return modeluq diff --git a/deerlab/model.py b/deerlab/model.py index 7ba709dd7..5e4866a3d 100644 --- a/deerlab/model.py +++ b/deerlab/model.py @@ -218,6 +218,13 @@ def unfreeze(self): self.frozen = False self.value = None #--------------------------------------------------------------------------------------- + def copy(self): + """ + Return a deep copy of the parameter + """ + + return deepcopy(self) + #=================================================================================== @@ -867,7 +874,7 @@ def __call__(self,*args,**kargs): # Check that all parameters have been passed if len(θ)!=self.Nparam: - raise SyntaxError(f'The model requires {self.Nparam} parameters, but {len(args_list)} were specified.') + raise SyntaxError(f'The model requires {self.Nparam} parameters, but {len(θ)} were specified.') # Determine which parameters are linear and which nonlinear θlin, θnonlin = self._split_linear(θ) @@ -973,6 +980,13 @@ def __str__(self): """ return self._parameter_table() #--------------------------------------------------------------------------------------- + def copy(self): + """ + Return a deep copy of the model + """ + + return deepcopy(self) + #=================================================================================== #============================================================================== diff --git a/examples/advanced/ex_dipolarpathways_selection.py b/examples/advanced/ex_dipolarpathways_selection.py index 57fb0e189..bf2551744 100644 --- a/examples/advanced/ex_dipolarpathways_selection.py +++ b/examples/advanced/ex_dipolarpathways_selection.py @@ -11,7 +11,6 @@ import numpy as np import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec -from mpl_toolkits.axes_grid1.inset_locator import inset_axes, InsetPosition import deerlab as dl @@ -79,9 +78,7 @@ Pfit = fits[n].P Pci = fits[n].PUncert.ci(95) # Setup the inset plot - axins = inset_axes(ax1,width="30%", height="30%", loc='upper left') - ip = InsetPosition(ax1,[0.35, 0.17+0.24*n, 0.6, 0.1]) - axins.set_axes_locator(ip) + axins = ax1.inset_axes([0.35, 0.17+0.24*n, 0.6, 0.1]) axins.yaxis.set_ticklabels([]) axins.yaxis.set_visible(False) # Plot the distance distributions and their confidence bands diff --git a/examples/advanced/ex_profileanalysis.py b/examples/advanced/ex_profileanalysis.py index 82d5fc424..6a195bd50 100644 --- a/examples/advanced/ex_profileanalysis.py +++ b/examples/advanced/ex_profileanalysis.py @@ -48,7 +48,7 @@ #%% # Extract fitted dipolar signal Vfit = results.model -Vci = results.propagate(Vmodel).ci(95) +Vci = results.modelUncert.ci(95) # Extract fitted distance distribution Pfit = results.P diff --git a/examples/basic/ex_bootstrapping.py b/examples/basic/ex_bootstrapping.py index de9a558fa..196bd772e 100644 --- a/examples/basic/ex_bootstrapping.py +++ b/examples/basic/ex_bootstrapping.py @@ -59,7 +59,7 @@ # Extract fitted dipolar signal Vfit = results.model -Vci = results.propagate(Vmodel).ci(95) +Vci = results.modelUncert.ci(95) # Extract fitted distance distribution Pfit = results.P @@ -78,6 +78,7 @@ plt.plot(t,Vexp,'.',color='grey',label='Data') # Plot the fitted signal plt.plot(t,Vfit,linewidth=3,label='Bootstrap median',color=violet) +plt.fill_between(t,Vci[:,0],Vci[:,1],linewidth=0.1,label='Bootstrap median',color=violet,alpha=0.3) plt.plot(t,Bfit,'--',linewidth=3,color=violet,label='Unmodulated contribution') plt.legend(frameon=False,loc='best') plt.xlabel('Time $t$ (μs)') diff --git a/examples/basic/ex_fitting_5pdeer.py b/examples/basic/ex_fitting_5pdeer.py index 83d7519db..432d31292 100644 --- a/examples/basic/ex_fitting_5pdeer.py +++ b/examples/basic/ex_fitting_5pdeer.py @@ -58,7 +58,7 @@ # Extract fitted dipolar signal Vfit = results.model -Vci = results.propagate(Vmodel).ci(95) +Vci = results.modelUncert.ci(95) # Extract fitted distance distribution Pfit = results.P diff --git a/examples/intermediate/ex_fitting_5pdeer_pathways.py b/examples/intermediate/ex_fitting_5pdeer_pathways.py index 370a7d937..516e50dce 100644 --- a/examples/intermediate/ex_fitting_5pdeer_pathways.py +++ b/examples/intermediate/ex_fitting_5pdeer_pathways.py @@ -47,7 +47,7 @@ # Extract fitted dipolar signal Vfit = results.model -Vci = results.propagate(Vmodel).ci(95) +Vci = results.modelUncert.ci(95) # Extract fitted distance distribution Pfit = results.P diff --git a/setup.py b/setup.py index a07479675..5d237fc5b 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ 'joblib>=1.0.0', 'dill>=0.3.0', 'tqdm>=4.51.0', - 'matplotlib>=3.3.4', + 'matplotlib>=3.6.0', 'memoization>=0.3.1', 'pytest>=6.2.2', 'setuptools>=53.0.0', diff --git a/test/test_model_class.py b/test/test_model_class.py index b8f6b8648..1a4c8a982 100644 --- a/test/test_model_class.py +++ b/test/test_model_class.py @@ -42,7 +42,14 @@ def mock_x(): @pytest.fixture(scope='module') def mock_data(mock_x): - return bigauss(mock_x,mean1=3,mean2=4,std1=0.5,std2=0.2,amp1=0.5,amp2=0.6) + data = bigauss(mock_x,mean1=3,mean2=4,std1=0.5,std2=0.2,amp1=0.5,amp2=0.6) + return data + +@pytest.fixture(scope='module') +def mock_data_noise(mock_x): + data = bigauss(mock_x,mean1=3,mean2=4,std1=0.5,std2=0.2,amp1=0.5,amp2=0.6) + data += whitegaussnoise(mock_x,0.01,seed=1) + return data model_types = ['parametric','semiparametric','semiparametric_vec', 'nonparametric','nonparametric_vec','nonparametric_vec_normalized'] @@ -511,7 +518,7 @@ def test_fit_model_confidence_intervals(mock_data,model_type,method): model = _generate_model(model_type, fixed_axis=True) if method=='bootstrap': - results = fit(model,mock_data + whitegaussnoise(x,0.01,seed=1), bootstrap=3) + results = fit(model,mock_data + whitegaussnoise(x,0.01,seed=1), bootstrap=100) else: results = fit(model,mock_data + whitegaussnoise(x,0.01,seed=1)) @@ -546,6 +553,27 @@ def test_fit_evaluate_model(mock_data,mock_x,model_type): response *= results.scale assert np.allclose(response,mock_data) + +# ================================================================ +@pytest.mark.parametrize('method', ['bootstrap','moment']) +@pytest.mark.parametrize('model_type', model_types) +def test_fit_modelUncert(mock_data_noise,mock_x,model_type,method): + "Check that the uncertainty of fit results can be calculated and is the uncertainty of the model is non zero for all but nonparametric models" + model = _generate_model(model_type, fixed_axis=False) + + if method=='bootstrap': + results = fit(model,mock_data_noise,mock_x, bootstrap=3) + else: + results = fit(model,mock_data_noise,mock_x) + + assert hasattr(results,'modelUncert') + ci_lower = results.modelUncert.ci(95)[:,0] + ci_upper = results.modelUncert.ci(95)[:,1] + assert np.less_equal(ci_lower,ci_upper).all() + if model_type != 'nonparametric' and model_type != 'nonparametric_vec' and model_type != 'nonparametric_vec_normalized': + assert np.all(np.round(ci_lower) <= np.round(results.model)) and np.less(ci_lower.sum(),results.model.sum()) + assert np.all(np.round(ci_upper,5) >= np.round(results.model,5)) and np.greater(ci_upper.sum(),results.model.sum()) + # ================================================================ # ================================================================ From 01ba8013b95c6e4a6a4ba5f5d9588d2a79befdd4 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Thu, 20 Mar 2025 16:08:02 +0100 Subject: [PATCH 11/29] Sophegrid Bug Fix (#495) - Fixes nOctants =0 and -1 - Adds some tests --- deerlab/utils.py | 8 ++++---- test/test_utils.py | 16 +++++++++++++++- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/deerlab/utils.py b/deerlab/utils.py index b52a6cb12..8e1219088 100644 --- a/deerlab/utils.py +++ b/deerlab/utils.py @@ -885,15 +885,15 @@ def sophegrid(octants,maxphi,size,closed_phi=False): elif octants==0: # Dinfh symmetry (quarter of meridian in xz plane) - phi = np.zeros(1,size) + phi = np.zeros(size) theta = np.linspace(0,np.pi/2,size) weights = -2*(2*np.pi)*np.diff(np.cos(np.concatenate([[0], np.arange(dtheta/2,np.pi/2,dtheta), [np.pi/2]]))); # sum = 4*pi elif octants==-1: # O3 symmetry (z orientation only) - phi = 0 - theta = 0 - weights = 4*np.pi + phi = np.array([0]) + theta = np.array([0]) + weights = np.array([4*np.pi]) else: raise ValueError('Unsupported value #d for octants.',octants) diff --git a/test/test_utils.py b/test/test_utils.py index aee4960a0..f111e0093 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -16,8 +16,22 @@ def test_sophegrid(): assert np.allclose(weights*4*np.pi, np.array([0.956558005801449,1.70021769237074,1.70021769237074,1.70021769237074,1.70021769237074,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346])) phi, theta, weights = sophegrid(1,np.pi/2,5,closed_phi=True) - + assert np.allclose(weights.sum(),1) assert np.allclose(phi, np.array([0, 0, 1.5708, 0, 0.7854, 1.5708, 0, 0.5236, 1.0472, 1.5708, 0, 0.3927, 0.7854, 1.1781, 1.5708]),rtol=1e-4) assert np.allclose(theta, np.array([0, 0.3927, 0.3927, 0.7854, 0.7854, 0.7854, 1.1781, 1.1781, 1.1781, 1.1781, 1.5708, 1.5708, 1.5708, 1.5708, 1.5708]),rtol=1e-4) assert np.allclose(weights*4*np.pi, np.array([0.24146, 0.93818, 0.93818, 0.86676, 1.7335, 0.86676, 0.75499, 1.51, 1.51, 0.75499, 0.30645, 0.61289, 0.61289, 0.61289, 0.30645]),rtol=1e-4) + + + phi, theta, weights = sophegrid(0,0,6) + assert np.allclose(weights.sum(),1) + assert np.allclose(phi, np.array([0, 0, 0, 0, 0, 0]),rtol=1e-3) + assert np.allclose(theta, np.array([0, 0.3142, 0.6283, 0.9425, 1.2566, 1.5708]),rtol=1e-3) + assert np.allclose(weights*4*np.pi, np.array([0.1547, 1.2149, 2.3110, 3.1808, 3.7392, 1.9658]),rtol=1e-3) + + phi, theta, weights = sophegrid(-1,0,6) + + assert np.allclose(weights.sum(),1) + assert np.allclose(phi, np.array([0]),rtol=1e-4) + assert np.allclose(theta, np.array([0]),rtol=1e-4) + assert np.allclose(weights*4*np.pi, np.array([12.5664]),rtol=1e-4) From 0a1e69ca1a3eed02693d49aeb1d1516098a8fc2e Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Fri, 3 Nov 2023 12:20:13 +0100 Subject: [PATCH 12/29] Push patch into V1.1 (#467) * Increase version number and update changelog (#455) * Bugfixes 4th sep (#460) * Fix normalisation in rice model Closes #459 * Remove three spin anaysis Closes #427 * Improved installation instructions * Caution about difference in definition of cost function Closes #450 * Bump Version * Bug for non linearly constrained problems When a problem is not linearly constrained and not non-negative, the linear solver outputs a result class not the solution. * Keeping changelog up-to date * Fixing Sophgrid bug (#464) * Fixing Sophgrid bug * Add unit test for sophgrid * Update changelog.rst for new release (#466) --- VERSION | 2 +- deerlab/solvers.py | 7 +++---- deerlab/utils.py | 28 ++++++++++------------------ docsrc/source/changelog.rst | 18 ------------------ test/test_utils.py | 23 +---------------------- 5 files changed, 15 insertions(+), 63 deletions(-) diff --git a/VERSION b/VERSION index 3e0c29c66..0f1acbd56 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.1.5 +v1.1.2 diff --git a/deerlab/solvers.py b/deerlab/solvers.py index da46fd0cc..1052d7b39 100644 --- a/deerlab/solvers.py +++ b/deerlab/solvers.py @@ -462,7 +462,6 @@ def snlls(y, Amodel, par0=None, lb=None, ub=None, lbl=None, ubl=None, nnlsSolver * ``stats['aic']`` - Akaike information criterion * ``stats['aicc']`` - Corrected Akaike information criterion * ``stats['bic']`` - Bayesian information criterion - * ``stats['autocorr']`` - Autocorrelation based on Durbin–Watson statistic success : bool Whether or not the optimizer exited successfully. cost : float @@ -602,8 +601,8 @@ def linear_problem(y,A,optimize_alpha,alpha): if optimize_alpha: linsolver_result = lambda AtA, Aty: parseResult(linSolver(AtA, Aty)) output = dl.selregparam((y-yfrozen)[mask], Ared[mask,:], linsolver_result, regparam, - weights=weights[mask], regop=L, noiselvl=noiselvl, - searchrange=regparamrange,full_output=True) + weights=weights[mask], regop=L, candidates=regparamrange, + noiselvl=noiselvl,searchrange=regparamrange,full_output=True) alpha = output[0] alpha_stats['alphas_evaled'] = output[1] alpha_stats['functional'] = output[2] @@ -794,7 +793,7 @@ def uq_subset(uq_full,subset,subset_lb,subset_ub): # Jacobian (non-linear part) Jnonlin = Jacobian(_ResidualsFcn,nonlinfit,lb,ub) # Jacobian (linear part) - scale = np.trapezoid(linfit,np.arange(Nlin)) + scale = np.trapz(linfit,np.arange(Nlin)) Jlin = (weights[:,np.newaxis]*Amodel(nonlinfit))[mask,:] if includeExtrapenalty: for penalty in extrapenalty: diff --git a/deerlab/utils.py b/deerlab/utils.py index 8e1219088..3b7f69d13 100644 --- a/deerlab/utils.py +++ b/deerlab/utils.py @@ -282,7 +282,6 @@ def goodness_of_fit(x,xfit,Ndof,noiselvl): stats['aic'] - Akaike information criterion stats['aicc'] - Corrected Akaike information criterion stats['bic'] - Bayesian information criterion - stats['autocorr'] - Autocorrelation based on Durbin–Watson statistic """ sigma = noiselvl Ndof = np.maximum(Ndof,1) @@ -785,7 +784,7 @@ def read_pickle(filename): # -------------------------------------------------------------------------------------- -def sophegrid(octants,maxphi,size,closed_phi=False): +def sophegrid(octants,maxphi,size): """ Construct spherical grid over spherical angles based on input parameters. The grid implemented in this function is often called the SOPHE grid [1]_. @@ -799,9 +798,6 @@ def sophegrid(octants,maxphi,size,closed_phi=False): Largest value of angle phi (radians). size : integer Number of orientations between theta=0 and theta=pi/2. - closed_phi : bool - Set to true if grid point at maxPhi should be included, false otherwise. Default is false. - Returns ------- @@ -837,10 +833,7 @@ def sophegrid(octants,maxphi,size,closed_phi=False): weights = np.zeros(nOrientations) sindth2 = np.sin(dtheta/2) - if closed_phi: - w1=0.5 - else: - w1 = 1.0 + w1 = 1.0 # North pole (z orientation) phi[0] = 0 @@ -867,11 +860,10 @@ def sophegrid(octants,maxphi,size,closed_phi=False): weights[idx] = sindth2*dPhi*np.concatenate([[w1], np.ones(nPhi-2), [0.5]]) # Border removal - if not closed_phi: - rmv = np.cumsum(nOct*np.arange(1,size)+1) - phi = np.delete(phi,rmv) - theta = np.delete(theta,rmv) - weights = np.delete(weights,rmv) + rmv = np.cumsum(nOct*np.arange(1,size)+1) + phi = np.delete(phi,rmv) + theta = np.delete(theta,rmv) + weights = np.delete(weights,rmv) # For C1, add lower hemisphere if octants==8: @@ -885,15 +877,15 @@ def sophegrid(octants,maxphi,size,closed_phi=False): elif octants==0: # Dinfh symmetry (quarter of meridian in xz plane) - phi = np.zeros(size) + phi = np.zeros(1,size) theta = np.linspace(0,np.pi/2,size) weights = -2*(2*np.pi)*np.diff(np.cos(np.concatenate([[0], np.arange(dtheta/2,np.pi/2,dtheta), [np.pi/2]]))); # sum = 4*pi elif octants==-1: # O3 symmetry (z orientation only) - phi = np.array([0]) - theta = np.array([0]) - weights = np.array([4*np.pi]) + phi = 0 + theta = 0 + weights = 4*np.pi else: raise ValueError('Unsupported value #d for octants.',octants) diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index 0655c6ead..894c269fb 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -24,24 +24,6 @@ Release Notes - |fix| : Something which was not working as expected or leading to errors has been fixed. - |api| : This will require changes in your scripts or code. - -Release ``v1.1.5`` - January 2025 -- |fix|: Moves to numpy 2.0 as a mimum requirement, and removes all `np.trapz` calls to `np.trapezoid`. - -Release ``v1.1.4`` - September 2024 ------------------------------------------- -- |enhancement| : Expanded sophgrid to allow for closed phi integral. (:pr:`482`) -- |api| : Removed Python 3.8 support and requires Numpy 2.0 - -Release ``v1.1.3`` - July 2024 ------------------------------------------- -- |fix| : Removes unnecessary files from the docs -- |efficiency| : Improves the performance of the ``dipolarkernel`` function by 10-30% (:pr:`473`), by caching the interpolation of he effective dipolar evolution time vector. -- |api| : Removes the `candidates` input from `selregparam` and integrates its function into `regparamrange`. -- |fix| : Adds support for Numpy 2.0 -- |fix| : Add support for Python 3.12 - - Release ``v1.1.2`` - November 2023 ------------------------------------------ - |fix| : Fixes an issue with sophgrid (:pr:`463`). diff --git a/test/test_utils.py b/test/test_utils.py index f111e0093..2df9a899f 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -13,25 +13,4 @@ def test_sophegrid(): assert np.allclose(weights.sum(),1) assert np.allclose(phi, np.array([0,0,1.57079632679490,3.14159265358979,4.71238898038469,0,0.785398163397448,1.57079632679490,2.35619449019235,3.14159265358979,3.92699081698724,4.71238898038469,5.49778714378214])) assert np.allclose(theta, np.array([0,0.785398163397448,0.785398163397448,0.785398163397448,0.785398163397448,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490,1.57079632679490])) - assert np.allclose(weights*4*np.pi, np.array([0.956558005801449,1.70021769237074,1.70021769237074,1.70021769237074,1.70021769237074,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346])) - - phi, theta, weights = sophegrid(1,np.pi/2,5,closed_phi=True) - - assert np.allclose(weights.sum(),1) - assert np.allclose(phi, np.array([0, 0, 1.5708, 0, 0.7854, 1.5708, 0, 0.5236, 1.0472, 1.5708, 0, 0.3927, 0.7854, 1.1781, 1.5708]),rtol=1e-4) - assert np.allclose(theta, np.array([0, 0.3927, 0.3927, 0.7854, 0.7854, 0.7854, 1.1781, 1.1781, 1.1781, 1.1781, 1.5708, 1.5708, 1.5708, 1.5708, 1.5708]),rtol=1e-4) - assert np.allclose(weights*4*np.pi, np.array([0.24146, 0.93818, 0.93818, 0.86676, 1.7335, 0.86676, 0.75499, 1.51, 1.51, 0.75499, 0.30645, 0.61289, 0.61289, 0.61289, 0.30645]),rtol=1e-4) - - - phi, theta, weights = sophegrid(0,0,6) - assert np.allclose(weights.sum(),1) - assert np.allclose(phi, np.array([0, 0, 0, 0, 0, 0]),rtol=1e-3) - assert np.allclose(theta, np.array([0, 0.3142, 0.6283, 0.9425, 1.2566, 1.5708]),rtol=1e-3) - assert np.allclose(weights*4*np.pi, np.array([0.1547, 1.2149, 2.3110, 3.1808, 3.7392, 1.9658]),rtol=1e-3) - - phi, theta, weights = sophegrid(-1,0,6) - - assert np.allclose(weights.sum(),1) - assert np.allclose(phi, np.array([0]),rtol=1e-4) - assert np.allclose(theta, np.array([0]),rtol=1e-4) - assert np.allclose(weights*4*np.pi, np.array([12.5664]),rtol=1e-4) + assert np.allclose(weights*4*np.pi, np.array([0.956558005801449,1.70021769237074,1.70021769237074,1.70021769237074,1.70021769237074,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346,0.601117729884346])) \ No newline at end of file From b1732b3b70c2ccc20322f7811db4ca0f5ba3e3a3 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Sun, 14 Apr 2024 02:15:34 +0100 Subject: [PATCH 13/29] Removes katex and others (#471) * Fixing Sophgrid bug * Add unit test for sophgrid * Minor doc update * Bump version Number * Remove unnecessary doc files * Update changelog --- VERSION | 2 +- deerlab/solvers.py | 1 + deerlab/utils.py | 1 + docsrc/source/changelog.rst | 4 ++++ 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 0f1acbd56..99a4aef0c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.1.2 +v1.1.3 diff --git a/deerlab/solvers.py b/deerlab/solvers.py index 1052d7b39..523afbe15 100644 --- a/deerlab/solvers.py +++ b/deerlab/solvers.py @@ -462,6 +462,7 @@ def snlls(y, Amodel, par0=None, lb=None, ub=None, lbl=None, ubl=None, nnlsSolver * ``stats['aic']`` - Akaike information criterion * ``stats['aicc']`` - Corrected Akaike information criterion * ``stats['bic']`` - Bayesian information criterion + * ``stats['autocorr']`` - Autocorrelation based on Durbin–Watson statistic success : bool Whether or not the optimizer exited successfully. cost : float diff --git a/deerlab/utils.py b/deerlab/utils.py index 3b7f69d13..eb06134dd 100644 --- a/deerlab/utils.py +++ b/deerlab/utils.py @@ -282,6 +282,7 @@ def goodness_of_fit(x,xfit,Ndof,noiselvl): stats['aic'] - Akaike information criterion stats['aicc'] - Corrected Akaike information criterion stats['bic'] - Bayesian information criterion + stats['autocorr'] - Autocorrelation based on Durbin–Watson statistic """ sigma = noiselvl Ndof = np.maximum(Ndof,1) diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index 894c269fb..252ef0fcf 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -24,6 +24,10 @@ Release Notes - |fix| : Something which was not working as expected or leading to errors has been fixed. - |api| : This will require changes in your scripts or code. +Release ``v1.1.3`` - Ongoing +------------------------------------------ +- |fix| : Removes unnecessary files from the docs + Release ``v1.1.2`` - November 2023 ------------------------------------------ - |fix| : Fixes an issue with sophgrid (:pr:`463`). From aca56fcecc7061119d1d0a9def6cc67a3f9fb707 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Wed, 12 Jun 2024 19:31:44 +0200 Subject: [PATCH 14/29] Dipolarkernal speedup (#473) * Seperate Kinterpolator into its own function Interpolation in Scipy is very slow, currently the same interpolation is being rerun for every calculation of the dipolarkernal. This is now cached to speed it up. * Only run orientation selection on grid and integral based kernals This hunk of code is not needed when fresnel integrals are used so does not need to be evaluated * Update changelog --- docsrc/source/changelog.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index 252ef0fcf..3aa922321 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -27,6 +27,7 @@ Release Notes Release ``v1.1.3`` - Ongoing ------------------------------------------ - |fix| : Removes unnecessary files from the docs +- |efficiency| : Improves the performance of the ``dipolarkernel`` function by 10-30% (:pr:`473`), by caching the interpolation of he effective dipolar evolution time vector. Release ``v1.1.2`` - November 2023 ------------------------------------------ From a136a5d3703da779ba8a661bfb39da48f11189ae Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 1 Jul 2024 17:21:18 +0200 Subject: [PATCH 15/29] Adding support for scipy 1.14 and tests for Python 3.12 (#474) * Update for 3.12 * Updated workflows and changelog * Upload to 3.12 --- .github/workflows/ci_scheduled.yml | 2 +- docsrc/source/changelog.rst | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci_scheduled.yml b/.github/workflows/ci_scheduled.yml index 655421332..b101659cf 100644 --- a/.github/workflows/ci_scheduled.yml +++ b/.github/workflows/ci_scheduled.yml @@ -12,7 +12,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.9, "3.10", "3.11", "3.12"] + python-version: [3.8, 3.9, "3.10", "3.11", "3.12","3.12"] steps: - uses: actions/checkout@v3 diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index 3aa922321..d8edd8c8b 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -28,6 +28,7 @@ Release ``v1.1.3`` - Ongoing ------------------------------------------ - |fix| : Removes unnecessary files from the docs - |efficiency| : Improves the performance of the ``dipolarkernel`` function by 10-30% (:pr:`473`), by caching the interpolation of he effective dipolar evolution time vector. +- |fix| : add support for Python 3.12 Release ``v1.1.2`` - November 2023 ------------------------------------------ From 1728b020e31e30f1c92173da38465a6eb653d044 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Tue, 2 Jul 2024 07:57:54 +0200 Subject: [PATCH 16/29] Numpy2 support (#479) * Update for 3.12 * Updated workflows and changelog * Upload to 3.12 * Numpy 2.0 compatibility updates * Update Changelog --- deerlab/dd_models.py | 6 +++--- docsrc/source/changelog.rst | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/deerlab/dd_models.py b/deerlab/dd_models.py index cab6ee244..1c4d6d3ff 100644 --- a/deerlab/dd_models.py +++ b/deerlab/dd_models.py @@ -99,7 +99,7 @@ def docstr_example(fcnstr): # ================================================================= def _normalize(r,P): if not all(P==0): - P = P/np.trapezoid(P,r) + P = P/np.trapz(P,r) return P # ================================================================= @@ -110,7 +110,7 @@ def _multigaussfun(r,r0,sig): P = np.sqrt(1/(2*np.pi))*1/sig*np.exp(-0.5*((r.T-r0)/sig)**2) if not np.all(P==0): # Normalization - P = np.squeeze(P)/np.array([np.trapezoid(c,r) for c in P.T]).sum() + P = np.squeeze(P)/np.sum([np.trapezoid(c,r) for c in P.T]) else: P = np.squeeze(P) return P @@ -129,7 +129,7 @@ def _multirice3dfun(r,nu,sig): P[P<0] = 0 # Normalization - P = np.squeeze(P)/np.sum([np.trapezoid(c,np.squeeze(r)) for c in P.T]) + P = np.squeeze(P)/np.sum([np.trapz(c,np.squeeze(r)) for c in P.T]) return P # ================================================================= diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index d8edd8c8b..c1aa84ca7 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -28,7 +28,9 @@ Release ``v1.1.3`` - Ongoing ------------------------------------------ - |fix| : Removes unnecessary files from the docs - |efficiency| : Improves the performance of the ``dipolarkernel`` function by 10-30% (:pr:`473`), by caching the interpolation of he effective dipolar evolution time vector. -- |fix| : add support for Python 3.12 +- |fix| : Add support for Python 3.12 +- |fix| : Adds support for Numpy 2.0 + Release ``v1.1.2`` - November 2023 ------------------------------------------ From 660e706b121ee0ec54ec9e7fd4eccc76aa30aa42 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 15 Jul 2024 13:50:49 +0200 Subject: [PATCH 17/29] Regparam grid bug fix (#477) * Update for 3.12 * Updated workflows and changelog * Upload to 3.12 * Fix bug in regparam grid search Regparam would never build the grid correctly. Now using grid or Brent is automatically determined from number of elements in the regparamrange. * Add extra error messages * Update changelog * Updated Example * Updated test The test has been updated. The previous convergence criteria was unreliable and only worked based on a coincidence. * Prepare For Release * Remove duplicate python version --- .github/workflows/ci_scheduled.yml | 2 +- deerlab/solvers.py | 4 ++-- docsrc/source/changelog.rst | 5 +++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci_scheduled.yml b/.github/workflows/ci_scheduled.yml index b101659cf..0659c65fc 100644 --- a/.github/workflows/ci_scheduled.yml +++ b/.github/workflows/ci_scheduled.yml @@ -12,7 +12,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.8, 3.9, "3.10", "3.11", "3.12","3.12"] + python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v3 diff --git a/deerlab/solvers.py b/deerlab/solvers.py index 523afbe15..5fa940135 100644 --- a/deerlab/solvers.py +++ b/deerlab/solvers.py @@ -602,8 +602,8 @@ def linear_problem(y,A,optimize_alpha,alpha): if optimize_alpha: linsolver_result = lambda AtA, Aty: parseResult(linSolver(AtA, Aty)) output = dl.selregparam((y-yfrozen)[mask], Ared[mask,:], linsolver_result, regparam, - weights=weights[mask], regop=L, candidates=regparamrange, - noiselvl=noiselvl,searchrange=regparamrange,full_output=True) + weights=weights[mask], regop=L, noiselvl=noiselvl, + searchrange=regparamrange,full_output=True) alpha = output[0] alpha_stats['alphas_evaled'] = output[1] alpha_stats['functional'] = output[2] diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst index c1aa84ca7..cbd4db7c6 100644 --- a/docsrc/source/changelog.rst +++ b/docsrc/source/changelog.rst @@ -24,12 +24,13 @@ Release Notes - |fix| : Something which was not working as expected or leading to errors has been fixed. - |api| : This will require changes in your scripts or code. -Release ``v1.1.3`` - Ongoing +Release ``v1.1.3`` - July 2024 ------------------------------------------ - |fix| : Removes unnecessary files from the docs - |efficiency| : Improves the performance of the ``dipolarkernel`` function by 10-30% (:pr:`473`), by caching the interpolation of he effective dipolar evolution time vector. -- |fix| : Add support for Python 3.12 +- |api| : Removes the `candidates` input from `selregparam` and integrates its function into `regparamrange`. - |fix| : Adds support for Numpy 2.0 +- |fix| : Add support for Python 3.12 Release ``v1.1.2`` - November 2023 From 96d25e6a91eac8659fd79d9c54af8502f5d3d5e8 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Tue, 17 Feb 2026 20:33:08 +0100 Subject: [PATCH 18/29] Replaced required callable input for `UQresult` - Changed `UQresult`, when used for profile analysis, so that threshold can be supplied with parameters not with a function. - Changed the creation of `UQresult` in `profile_analysis.py` so that it supplies arguments not functions. --- deerlab/classes.py | 29 ++++++++++++++++++++++------- deerlab/profile_analysis.py | 5 ++--- test/test_uqresult.py | 6 +++--- 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/deerlab/classes.py b/deerlab/classes.py index 16f0579e9..6f137784e 100644 --- a/deerlab/classes.py +++ b/deerlab/classes.py @@ -3,7 +3,7 @@ import numpy as np from deerlab.utils import Jacobian, nearest_psd -from scipy.stats import norm +from scipy.stats import norm, chi2 from scipy.signal import fftconvolve from scipy.linalg import block_diag from scipy.optimize import brentq @@ -51,14 +51,15 @@ class UQResult: profile : ndarray Likelihood profile of the parameters. Only available for ``type='profile'``. - threshold : scalar - Treshold value used for the profile method. Only available for ``type='profile'``. + threshold : function + Treshold function used for the profile method. Only available for ``type='profile'``. """ - def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,profiles=None,noiselvl=None): + def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None, + threshold_inputs=None,threshold=None,profiles=None,noiselvl=None): r""" Initializes the UQResult object. @@ -85,9 +86,15 @@ def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,pr ub : ndarray Upper bounds of the parameter estimates. Only applicable if ``uqtype='moment'``. - threshold : float - Threshold value used for the likelihood profile method. Only applicable if ``uqtype='profile'``. + threshold_inputs : dict, optional + Dictionary containing the inputs to compute the threshold value for the likelihood profile method. Required for ``uqtype='profile'``, unless ``threshold`` supplied. The dictionary should contain the following keys and values: + - 'Npoints': Number of points in the fit + - 'cost': The cost of the fit + + threshold : function + Function that takes the coverage (confidence level) as input and returns the corresponding threshold value for the likelihood profile method. Only applicable if ``uqtype='profile'``. If not provided, the threshold value is computed based on the inputs provided in ``threshold_inputs``. + profiles : list List of likelihood profiles for each parameter. Each element of the list should be a tuple of arrays ``(x, y)``, where ``x`` represents the values of the parameter and ``y`` represents the corresponding likelihoods. @@ -111,7 +118,15 @@ def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,pr self.__parfit = data self.__noiselvl = noiselvl self.profile = profiles - self.threshold = threshold + if callable(threshold): + self.__threshold_inputs=None + self.threshold = threshold + elif isinstance(threshold_inputs,dict): + self.__threshold_inputs = threshold_inputs + self.threshold = lambda coverage: noiselvl**2*chi2.ppf(coverage, df=1)/threshold_inputs['Npoints'] + threshold_inputs['cost'] + else: + raise ValueError('For uqtype ''profile'', either a threshold function or a dictionary with the inputs to compute the threshold must be provided.') + nParam = len(np.atleast_1d(data)) elif uqtype == 'bootstrap': diff --git a/deerlab/profile_analysis.py b/deerlab/profile_analysis.py index c67d6febb..06bbe1704 100644 --- a/deerlab/profile_analysis.py +++ b/deerlab/profile_analysis.py @@ -70,8 +70,7 @@ def profile_analysis(model,y, *args, parameters='all', grids=None, samples=50, n fitresult = fit(model, y, *args, **kargs) # Prepare the statistical threshold function - threshold = lambda coverage: noiselvl**2*chi2.ppf(coverage, df=1)/len(fitresult.residuals) + fitresult.cost - + threshold_inputs = {'Npoints': len(fitresult.residuals), 'cost': fitresult.cost} if parameters=='all': parameters = model._parameter_list() @@ -124,7 +123,7 @@ def profile_analysis(model,y, *args, parameters='all', grids=None, samples=50, n getattr(model, parameter).unfreeze() profile = {'x':np.squeeze(grid),'y':profile} - uqresults[parameter] = UQResult('profile', data=getattr(fitresult,parameter), profiles=profile, threshold=threshold, noiselvl=noiselvl) + uqresults[parameter] = UQResult('profile', data=getattr(fitresult,parameter), profiles=profile, threshold=threshold_inputs, noiselvl=noiselvl) uqresults[parameter].profile = uqresults[parameter].profile[0] return uqresults diff --git a/test/test_uqresult.py b/test/test_uqresult.py index 56ba00674..2eb468790 100644 --- a/test/test_uqresult.py +++ b/test/test_uqresult.py @@ -41,17 +41,17 @@ def uncertainty_quantification_simulation(): pdf2 = dd_gauss(x,means[1],std[1]) pdf1 /= max(pdf1) pdf2 /= max(pdf2) - σ = 0.01 + σ = 0.01 # Noise Level obj2likelihood = lambda f: 1/np.sqrt(σ*2*np.pi)*np.exp(-1/2*f/σ**2) likelihood2obj = lambda L: -2*np.log(L*np.sqrt(σ*2*np.pi))*σ**2 - threshold = lambda coverage: σ**2*chi2.ppf(coverage, df=1) + likelihood2obj(max(pdf1)) + threshold_inputs = {'Npoints': 1, 'cost': likelihood2obj(max(pdf1))} profile1 = {'y': likelihood2obj(pdf1), 'x':x} profile2 = {'y': likelihood2obj(pdf2), 'x':x} # Construct uncertainty quantification objects uq_moment = UQResult('moment',data=np.array(means),covmat=covmat) uq_bootstrap = UQResult('bootstrap',data=np.vstack(samples).T) - uq_profile = UQResult('profile',data=np.array(means),profiles=[profile1,profile2],threshold=threshold,noiselvl=σ) + uq_profile = UQResult('profile',data=np.array(means),profiles=[profile1,profile2],threshold_inputs=threshold_inputs,noiselvl=σ) mock_objects = {'moment': uq_moment, 'bootstrap': uq_bootstrap, 'profile': uq_profile} references = {'mean': means, 'std': std, 'median': p50, 'ci':[ci50,ci90,ci95], 'percentile': [p5,p50,p95]} From 05bcfd872a73064cbf35ded236dd483df40cc56f Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Tue, 17 Feb 2026 20:33:42 +0100 Subject: [PATCH 19/29] Add to_dict and from_dict methods to UQresult --- deerlab/classes.py | 105 ++++++++++++++++++++++++++++++++++++++++++ test/test_uqresult.py | 18 ++++++++ 2 files changed, 123 insertions(+) diff --git a/deerlab/classes.py b/deerlab/classes.py index 6f137784e..81aa2f5d3 100644 --- a/deerlab/classes.py +++ b/deerlab/classes.py @@ -615,4 +615,109 @@ def propagate(self,model,lb=None,ub=None,samples=None): #-------------------------------------------------------------------------------- + def to_dict(self): + """ + Convert the UQResult object to a dictionary. + + This method converts the UQResult object to a dictionary format, which can be useful for serialization or for easier access to the attributes of the object. + The keys of the dictionary correspond to the attributes of the UQResult object, and the values are the corresponding attribute values. + + Returns + ------- + uq_dict : dict + A dictionary representation of the UQResult object, containing all its attributes and their corresponding values. + """ + if self.type == 'moment': + uq_dict = { + 'type': self.type, + 'mean': self.mean, + 'median': self.median, + 'std': self.std, + 'covmat': self.covmat, + 'nparam': self.nparam, + 'lb': self.__lb, + 'ub': self.__ub + } + elif self.type == 'profile': + if self.__threshold_inputs is None: + raise ValueError('The threshold function was supplied directly, so the inputs to compute the threshold are not available. The to_dict method cannot be used in this case.') + uq_dict = { + 'type': self.type, + 'mean': self.mean, + 'median': self.median, + 'std': self.std, + 'covmat': self.covmat, + 'nparam': self.nparam, + 'profile': self.profile, + 'threshold_inputs': self.__threshold_inputs, + 'data': self.__parfit, + 'noiselvl': self.__noiselvl + } + elif self.type == 'bootstrap': + uq_dict = { + 'type': self.type, + 'mean': self.mean, + 'median': self.median, + 'std': self.std, + 'covmat': self.covmat, + 'nparam': self.nparam, + 'samples': getattr(self, 'samples', None), + 'data': getattr(self, '__parfit', None), + 'noiselvl': getattr(self, '__noiselvl', None) + } + return uq_dict + + @classmethod + def from_dict(self, uq_dict): + """ + Create a UQResult object from a dictionary. + + This method creates a UQResult object from a dictionary representation, which can be useful for deserialization or for creating a UQResult object from a set of attributes stored in a dictionary. + The keys of the input dictionary should correspond to the attributes of the UQResult object, and the values should be the corresponding attribute values. + + Parameters + ---------- + uq_dict : dict + A dictionary containing the attributes and their corresponding values to create a UQResult object. + + Returns + ------- + uq_result : :ref:`UQResult` + A UQResult object created from the input dictionary, with its attributes set according to the values in the dictionary. + """ + + if 'type' not in uq_dict: + raise KeyError("The input dictionary must contain the key 'type' to specify the type of UQResult to be created.") + + elif uq_dict['type'] == 'profile': + return UQResult( + uqtype='profile', + data=uq_dict.get('data', None), + covmat=None, + lb=None, + ub=None, + threshold_inputs=uq_dict.get('threshold_inputs'), + profiles=uq_dict.get('profile'), + noiselvl=uq_dict.get('noiselvl') + ) + elif uq_dict['type'] == 'bootstrap': + return UQResult( + uqtype=uq_dict.get('type', 'void'), + data=uq_dict.get('samples', None), + covmat=None, + lb=None, + ub=None + ) + elif uq_dict['type'] == 'moment': + + return UQResult( + uqtype=uq_dict.get('type', 'void'), + data=uq_dict.get('mean', None) , + covmat=uq_dict.get('covmat', None), + lb=None, + ub=None, + threshold=uq_dict.get('threshold', None), + profiles=uq_dict.get('profile', None) + ) + # ========================================================================= diff --git a/test/test_uqresult.py b/test/test_uqresult.py index 2eb468790..7a612668c 100644 --- a/test/test_uqresult.py +++ b/test/test_uqresult.py @@ -97,4 +97,22 @@ def test_uncertainty_quantitification_attributes(uncertainty_quantification_simu pdfs_ref = [dd_gauss(x,mean,sigma) for x,mean,sigma in zip(xs,references['mean'],references['std'])] assert ovl(pdfs[0],pdfs_ref[0])>0.99 assert ovl(pdfs[1],pdfs_ref[1])>0.99 + +@pytest.mark.parametrize('method', ['moment', 'bootstrap', 'profile']) +def test_to_and_from_dict(uncertainty_quantification_simulation, method): + """Test the to_dict and from_dict methods of the UQResult class""" + + # Retrieve the results of the mock simulation + uq_objects, _ = uncertainty_quantification_simulation + + uq = uq_objects[method] + dct = uq.to_dict() + uq_reconstructed = UQResult.from_dict(dct) + assert type(uq_reconstructed) == UQResult + assert np.allclose(uq.mean,uq_reconstructed.mean,rtol=1e-2) + assert np.allclose(uq.std,uq_reconstructed.std,rtol=1e-2) + assert np.allclose(uq.median,uq_reconstructed.median,rtol=1e-2) + assert np.allclose(uq.ci(95),uq_reconstructed.ci(95),rtol=1e-2) + assert np.allclose(uq.ci(90),uq_reconstructed.ci(90),rtol=1e-2) + assert np.allclose(uq.ci(50),uq_reconstructed.ci(50),rtol=1e-2) # ================================================================================================= From a9cbc6b498671b2a1818accde5676847a4bad7e3 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Tue, 24 Feb 2026 21:24:32 +0100 Subject: [PATCH 20/29] Fixed paths in some doc images --- deerlab/dd_models.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/deerlab/dd_models.py b/deerlab/dd_models.py index e590f7c23..541e4ca2e 100644 --- a/deerlab/dd_models.py +++ b/deerlab/dd_models.py @@ -232,7 +232,7 @@ def _gauss3(r,mean1,std1,mean2,std2,mean3,std3): .. raw:: html - +


:math:`P(r) = \frac{\beta}{2\sigma\Gamma(1/\beta)}\exp\left(-\left(\frac{|r-\left|}{\sigma}\right)^\beta \right)` @@ -262,7 +262,7 @@ def _gengauss(r,mean,std,beta): .. raw:: html - +


:math:`P(r) = \frac{1}{\sqrt{2\pi}}\exp\left(-\frac{(r-\left)^2}{2\sigma^2}\right)\left(1 + \mathrm{erf}\left(\alpha\frac{(r-\left)}{\sqrt{2}\sigma}\right) \right)` @@ -379,7 +379,7 @@ def _rice3(r,location1,spread1,location2,spread2,location3,spread3): .. raw:: html - +


:math:`P(r) = \frac{3}{(2\pi\nu_0)^{3/2}}4\pi r^2\exp(-\frac{3 r^2}{\nu_0})` @@ -502,7 +502,7 @@ def _pbs(r,R1,R2): .. raw:: html - +


:math:`P(r) = \left(R_2^6 P_\mathrm{B}(r|R_2) - R_1^6 P_\mathrm{B}(r|R_1) - 2(r_2^3 - r_1^3)P_\mathrm{BS}(r|R_1,R_2)\right)/(R_2^3 - R_1^3)^2` @@ -555,7 +555,7 @@ def _shell(r,radius,thickness): .. raw:: html - +


:math:`P(r) = \begin{cases} \frac{3r(R^2-(d-r)^2)}{4dR^3} \quad \text{for} \quad d-R \leq r < d+R \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}` @@ -595,7 +595,7 @@ def _spherepoint(r,radius,dist): .. raw:: html - +


:math:`P(r) = \begin{cases} \frac{r}{2R^2} \quad \text{for} \quad 0 \leq r < 2R \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}` @@ -632,7 +632,7 @@ def _spheresurf(r,radius): .. raw:: html - +


:math:`P(r) = (R_1^3(R_2^3 - R_1^3)P_\mathrm{BS}(r|R_1,R_2) - R_1^3(R_3^3 - R_1^3)P_\mathrm{BS}(r|R_1,R_3) - R_2^3(R_3^3 - R_2^3)P_\mathrm{BS}(r|R_2,R_3))/((R_3^3 - R_2^3)(R_2^3 - R_1^3))` @@ -691,7 +691,7 @@ def _shellshell(r,radius,thickness1,thickness2): .. raw:: html - +


:math:`P(r) = \frac{3}{16R_1^3(R_2^3 - R_1^3)}\begin{cases} 12r^3R_1^2 - r^5 \quad \text{for} \quad 0\leq r < \min(2R_1,R_2 - R_1) \\ 8r^2(R_2^3 - R_1^3) - 3r(R_2^2 - R_1^2)^2 - 6r^3(R_2 - R_1)(R_2 + R_1) \quad \text{for} \quad R_2-R_1 \leq r < 2R_1 \\ 16r^2R_1^3 \quad \text{for} \quad 2R_1\leq r < R_2 - R_1 \\ r^5 - 6r^3(R_2^2 + R_1^2) + 8r^2(R_2^3 + R_1^3) - 3r(R_2^2 - R1_2)^2 \quad \text{for} \quad \max(R_2-R_1,2R_1) \leq r < R_1+R_2 \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}` @@ -735,7 +735,7 @@ def _shellsphere(r,radius,thickness): .. raw:: html - +


:math:`P(r) = \left(R_1^3((R_3^3 - R_1^3)P_\mathrm{BS}(r|R_1,R_3) - (R_4^3 - R_1^3)P_\mathrm{BS}(r|R_1,R_4)) + R_2^3((R_4^3 - R_2^3)P_\mathrm{BS}(r|R_2,R_4) - (R_3^3 - R_2^3)P_\mathrm{BS}(r|R_2,R_3)) \right)/((R_4^3 - R_3^3)(R_2^3 - R_1^3))` @@ -806,7 +806,7 @@ def _shellvoidshell(r,radius,thickness1,thickness2,separation): .. raw:: html - +


@@ -871,7 +871,7 @@ def _shellvoidsphere(r,radius,thickness,separation): .. raw:: html - +


:math:`P(r) = \begin{cases} \frac{3r^5}{16R^6} - \frac{9r^3}{4R^4} + \frac{3r^2}{R^3} \quad \text{for} \quad 0 \leq r < 2R \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}` From 192714e40ad263d75c1e2d3e0469962b84ebdcd4 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Wed, 18 Mar 2026 18:18:49 +0100 Subject: [PATCH 21/29] Added names to models --- deerlab/bg_models.py | 13 ++++++ deerlab/dd_models.py | 25 +++++++++- deerlab/fitresult.py | 67 ++++++++++++++++++++++++++- deerlab/model.py | 103 ++++++++++++++++++++++++++++++++++++++++++ deerlab/solvers.py | 2 +- test/test_bgmodels.py | 1 + 6 files changed, 208 insertions(+), 3 deletions(-) diff --git a/deerlab/bg_models.py b/deerlab/bg_models.py index c0a11c8f4..6361b2760 100644 --- a/deerlab/bg_models.py +++ b/deerlab/bg_models.py @@ -110,6 +110,7 @@ def _hom3d(t,conc,lam): return B # Create model bg_hom3d = Model(_hom3d,constants='t') +bg_hom3d.name = 'bg_hom3d' bg_hom3d.description = 'Background from a homogeneous distribution of spins in a 3D medium' # Add parameters bg_hom3d.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM') @@ -147,6 +148,7 @@ def _hom3dphase(t,conc,lam): return B # Create model bg_hom3d_phase = Model(_hom3dphase,constants='t') +bg_hom3d_phase.name = 'bg_hom3d_phase' bg_hom3d_phase.description = 'Phase shift from a homogeneous distribution of spins in a 3D medium' # Add parameters bg_hom3d_phase.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM') @@ -197,6 +199,7 @@ def _hom3dex(t,conc,rex,lam): return B # Create model bg_hom3dex = Model(_hom3dex,constants='t') +bg_hom3dex.name = 'bg_hom3dex' bg_hom3dex.description = 'Background from a homogeneous distribution of spins with excluded volume' # Add parameters bg_hom3dex.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM') @@ -251,6 +254,7 @@ def _hom3dex_phase(t,conc,rex,lam): return B # Create model bg_hom3dex_phase = Model(_hom3dex_phase,constants='t') +bg_hom3dex_phase.name = 'bg_hom3dex_phase' bg_hom3dex_phase.description = 'Phase shift from a homogeneous distribution of spins with excluded volume' # Add parameters bg_hom3dex_phase.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM') @@ -296,6 +300,7 @@ def _homfractal(t,fconc,fdim,lam): # ====================================================================== # Create model bg_homfractal = Model(_homfractal, constants='t') +bg_homfractal.name = 'bg_homfractal' bg_homfractal.description = 'Background from homogeneous spin distribution in a space of fractal dimension' # Add parameters bg_homfractal.fconc.set(description='Fractal concentration of spins', lb=1e-20, ub=1e20, par0=1.0e-6, unit='μmol/dmᵈ') @@ -341,6 +346,7 @@ def _homfractal_phase(t,fconc,fdim,lam): # ====================================================================== # Create model bg_homfractal_phase = Model(_homfractal_phase,constants='t') +bg_homfractal_phase.name = 'bg_homfractal_phase' bg_homfractal_phase.description = 'Phase shift from a homogeneous distribution of spins in a fractal medium' # Add parameters bg_homfractal_phase.fconc.set(description='Fractal concentration of spins', lb=1e-20, ub=1e20, par0=1.0e-6, unit='μmol/dmᵈ') @@ -367,6 +373,7 @@ def _exp(t,decay): return np.exp(-decay*np.abs(t)) # Create model bg_exp = Model(_exp,constants='t') +bg_exp.name = 'bg_exp' bg_exp.description = 'Exponential background model' # Add parameters bg_exp.decay.set(description='Decay rate', lb=0, ub=200, par0=0.35, unit='μs⁻¹') @@ -392,6 +399,7 @@ def _strexp(t,decay,stretch): return np.exp(-decay*abs(t)**stretch) # Create model bg_strexp = Model(_strexp,constants='t') +bg_strexp.name = 'bg_strexp' bg_strexp.description = 'Stretched exponential background model' # Add parameters bg_strexp.decay.set(description='Decay rate', lb=0, ub=200, par0=0.25, unit='μs⁻¹') @@ -415,6 +423,7 @@ def _prodstrexp(t,decay1,stretch1,decay2,stretch2): return strexp1*strexp2 # Create model bg_prodstrexp = Model(_prodstrexp,constants='t') +bg_prodstrexp.name = 'bg_prodstrexp' bg_prodstrexp.description = 'Product of two stretched exponentials background model' # Add parameters bg_prodstrexp.decay1.set(description='Decay rate of 1st component', lb=0, ub=200, par0=0.25, unit='μs⁻¹') @@ -440,6 +449,7 @@ def _sumstrexp(t,decay1,stretch1,weight1,decay2,stretch2): return weight1*strexp1 + (1-weight1)*strexp2 # Create model bg_sumstrexp = Model(_sumstrexp,constants='t') +bg_sumstrexp.name = 'bg_sumstrexp' bg_sumstrexp.description = 'Sum of two stretched exponentials background model' # Add parameters bg_sumstrexp.decay1.set(description='Decay rate of 1st component', lb=0, ub=200, par0=0.25, unit='μs⁻¹') @@ -463,6 +473,7 @@ def _poly1(t,p0,p1): return np.polyval([p1,p0],abs(t)) # Create model bg_poly1 = Model(_poly1,constants='t') +bg_poly1.name = 'bg_poly1' bg_poly1.description = 'Polynomial 1st-order background model' # Add parameters bg_poly1.p0.set(description='Intercept', lb=0, ub=200, par0=1, unit='') @@ -484,6 +495,7 @@ def _poly2(t,p0,p1,p2): return np.polyval([p2,p1,p0],abs(t)) # Create model bg_poly2 = Model(_poly2,constants='t') +bg_poly2.name = 'bg_poly2' bg_poly2.description = 'Polynomial 2nd-order background model' # Add parameters bg_poly2.p0.set(description='Intercept', lb=0, ub=200, par0=1, unit='') @@ -505,6 +517,7 @@ def _poly3(t,p0,p1,p2,p3): return np.polyval([p3,p2,p1,p0],abs(t)) # Create model bg_poly3 = Model(_poly3,constants='t') +bg_poly3.name = 'bg_poly3' bg_poly3.description = 'Polynomial 3rd-order background model' # Add parameters bg_poly3.p0.set(description='Intercept', lb=0, ub=200, par0=1, unit='') diff --git a/deerlab/dd_models.py b/deerlab/dd_models.py index 541e4ca2e..aed9b5427 100644 --- a/deerlab/dd_models.py +++ b/deerlab/dd_models.py @@ -129,7 +129,7 @@ def _multirice3dfun(r,nu,sig): P[P<0] = 0 # Normalization - P = np.squeeze(P)/np.sum([np.trapz(c,np.squeeze(r)) for c in P.T]) + P = np.squeeze(P)/np.sum([np.trapezoid(c,np.squeeze(r)) for c in P.T]) return P # ================================================================= @@ -158,6 +158,7 @@ def _gauss(r,mean,std): return _multigaussfun(r,mean,std) # Create model dd_gauss = Model(_gauss,constants='r') +dd_gauss.name = 'dd_gauss' dd_gauss.description = 'Gaussian distribution model' # Parameters dd_gauss.mean.set(description='Mean', lb=1.0, ub=20, par0=3.5, unit='nm') @@ -181,6 +182,7 @@ def _gauss2(r,mean1,std1,mean2,std2): return _multigaussfun(r,[mean1,mean2],[std1,std2]) # Create model dd_gauss2 = Model(_gauss2,constants='r') +dd_gauss2.name = 'dd_gauss2' dd_gauss2.description = 'Sum of two Gaussian distributions model' # Parameters dd_gauss2.mean1.set(description='1st Gaussian mean', lb=1.0, ub=20, par0=2.5, unit='nm') @@ -208,6 +210,7 @@ def _gauss3(r,mean1,std1,mean2,std2,mean3,std3): return _multigaussfun(r,[mean1,mean2,mean3],[std1,std2,std3]) # Create model dd_gauss3 = Model(_gauss3,constants='r') +dd_gauss3.name = 'dd_gauss3' dd_gauss3.description = 'Sum of three Gaussian distributions model' # Parameters dd_gauss3.mean1.set(description='1st Gaussian mean', lb=1.0, ub=20, par0=2.5, unit='nm') @@ -244,6 +247,7 @@ def _gengauss(r,mean,std,beta): return _normalize(r,P) # Create model dd_gengauss = Model(_gengauss,constants='r') +dd_gengauss.name = 'dd_gengauss' dd_gengauss.description = 'Generalized Gaussian distribution model' # Parameters dd_gengauss.mean.set(description='Mean', lb=1.0, ub=20, par0=3.5, unit='nm') @@ -275,6 +279,7 @@ def _skewgauss(r,center,std,skew): return _normalize(r,P) # Create model dd_skewgauss = Model(_skewgauss,constants='r') +dd_skewgauss.name = 'dd_skewgauss' dd_skewgauss.description = 'Skew Gaussian distribution model' # Parameters dd_skewgauss.center.set(description='Center', lb=1.0, ub=20, par0=3.5, unit='nm') @@ -299,6 +304,7 @@ def _rice(r,location,spread): return _multirice3dfun(r,[location],[spread]) # Create model dd_rice = Model(_rice,constants='r') +dd_rice.name = 'dd_rice' dd_rice.description = '3D-Rice distribution model' # Parameters dd_rice.location.set(description='Location', lb=1.0, ub=20, par0=3.5, unit='nm') @@ -325,6 +331,7 @@ def _rice2(r,location1,spread1,location2,spread2): return _multirice3dfun(r,[location1,location2],[spread1,spread2]) # Create model dd_rice2 = Model(_rice2,constants='r') +dd_rice2.name = 'dd_rice2' dd_rice2.description = 'Sum of two 3D-Rice distributions model' # Parameters dd_rice2.location1.set(description='1st Rician location', lb=1.0, ub=20, par0=2.5, unit='nm') @@ -355,6 +362,7 @@ def _rice3(r,location1,spread1,location2,spread2,location3,spread3): return _multirice3dfun(r,[location1,location2,location3],[spread1,spread2,spread3]) # Create model dd_rice3 = Model(_rice3,constants='r') +dd_rice3.name = 'dd_rice3' dd_rice3.description = 'Sum of two 3D-Rice distributions model' # Parameters dd_rice3.location1.set(description='1st Rician location', lb=1.0, ub=20, par0=2.5, unit='nm') @@ -397,6 +405,7 @@ def _randcoil(r,Nres,scaling,length): return P # Create model dd_randcoil = Model(_randcoil,constants='r') +dd_randcoil.name = 'dd_randcoil' dd_randcoil.description = 'Random-coil model for an unfolded peptide/protein' # Parameters dd_randcoil.Nres.set(description='Number of residues', lb=2.0, ub=1000, par0=50, unit='') @@ -427,6 +436,7 @@ def _circle(r,center,radius): return P # Create model dd_circle = Model(_circle,constants='r') +dd_circle.name = 'dd_circle' dd_circle.description = 'Semicircle distribution model' # Parameters dd_circle.center.set(description='Center', lb=1, ub=20, par0=3, unit='nm') @@ -455,6 +465,7 @@ def _rcos(r,center,fwhm): return P # Create model dd_cos = Model(_rcos,constants='r') +dd_cos.name = 'dd_cos' dd_cos.description = 'Raised-cosine parametric model' # Parameters dd_cos.center.set(description='Center', lb=1, ub=20, par0=3, unit='nm') @@ -537,6 +548,7 @@ def _shell(r,radius,thickness): return P # Create model dd_shell = Model(_shell,constants='r') +dd_shell.name = 'dd_shell' dd_shell.description = 'Uniform distribution of particles on a spherical shell' # Parameters dd_shell.radius.set(description='Inner shell radius', lb=0.1, ub=20, par0=1.5, unit='nm') @@ -577,6 +589,7 @@ def _spherepoint(r,radius,dist): return P # Create model dd_spherepoint = Model(_spherepoint,constants='r') +dd_spherepoint.name = 'dd_spherepoint' dd_spherepoint.description = 'One particle distanced from particles uniformly distributed on a sphere' # Parameters dd_spherepoint.radius.set(description='Sphere radius', lb=0.1, ub=20, par0=1.5, unit='nm') @@ -616,6 +629,7 @@ def _spheresurf(r,radius): return P # Create model dd_spheresurf = Model(_spheresurf,constants='r') +dd_spheresurf.name = 'dd_spheresurf' dd_spheresurf.description = "Particles uniformly distributed on a sphere's surface." # Parameters dd_spheresurf.radius.set(description='Sphere radius', lb=0.1, ub=20, par0=2.5, unit='nm') @@ -674,6 +688,7 @@ def _shellshell(r,radius,thickness1,thickness2): return P # Create model dd_shellshell = Model(_shellshell,constants='r') +dd_shellshell.name = 'dd_shellshell' dd_shellshell.description = 'Particles uniformly distributed on a spherical shell and on another concentric spherical shell.' # Parameters dd_shellshell.radius.set(description='Inner shell radius', lb=0.1, ub=20, par0=1.5, unit='nm') @@ -718,6 +733,7 @@ def _shellsphere(r,radius,thickness): return P # Create model dd_shellsphere = Model(_shellsphere,constants='r') +dd_shellsphere.name = 'dd_shellsphere' dd_shellsphere.description = 'Particles uniformly distributed on a sphere and on an outer spherical shell.' # Parameters dd_shellsphere.radius.set(description='Inner shell radius', lb=0.1, ub=20, par0=1.5, unit='nm') @@ -786,6 +802,7 @@ def _shellvoidshell(r,radius,thickness1,thickness2,separation): return P # Create model dd_shellvoidshell = Model(_shellvoidshell,constants='r') +dd_shellvoidshell.name = 'dd_shellvoidshell' dd_shellvoidshell.description = 'Particles uniformly distributed on a spherical shell and on another concentric spherical shell separated by a void.' # Parameters dd_shellvoidshell.radius.set(description='Inner shell radius', lb=0.1, ub=20, par0=0.75, unit='nm') @@ -851,6 +868,7 @@ def _shellvoidsphere(r,radius,thickness,separation): return P # Create model dd_shellvoidsphere = Model(_shellvoidsphere,constants='r') +dd_shellvoidsphere.name = 'dd_shellvoidsphere' dd_shellvoidsphere.description = 'Particles uniformly distributed on a sphere and on a concentric outer spherical shell separated by a void.' # Parameters dd_shellvoidsphere.radius.set(description='Sphere radius', lb=0.1, ub=20, par0=1.5, unit='nm') @@ -889,6 +907,7 @@ def _sphere(r,radius): return P # Create model dd_sphere = Model(_sphere,constants='r') +dd_sphere.name = 'dd_sphere' dd_sphere.description = 'Particles uniformly distributed on a sphere.' # Parameters dd_sphere.radius.set(description='Sphere radius', lb=0.1, ub=20, par0=2.5, unit='nm') @@ -924,6 +943,7 @@ def _triangle(r,mode,left,right): return P # Create model dd_triangle = Model(_triangle,constants='r') +dd_triangle.name = 'dd_triangle' dd_triangle.description = 'Triangular distribution model.' # Parameters dd_triangle.mode.set(description='Mode', lb=1, ub=20, par0=3.5, unit='nm') @@ -952,6 +972,7 @@ def _uniform(r,left,right): return P # Create model dd_uniform = Model(_uniform,constants='r') +dd_uniform.name = 'dd_uniform' dd_uniform.description = 'Uniform distribution model.' # Parameters dd_uniform.left.set(description='Left edge', lb=0.1, ub=6, par0=2.5, unit='nm') @@ -999,6 +1020,7 @@ def _wormchain(r,contour,persistence): return P # Create model dd_wormchain = Model(_wormchain,constants='r') +dd_wormchain.name = 'dd_wormchain' dd_wormchain.description = 'Worm-like chain model near the rigid limit.' # Parameters dd_wormchain.contour.set(description='Contour length', lb=1.5, ub=10, par0=3.7, unit='nm') @@ -1035,6 +1057,7 @@ def _wormgauss(r,contour,persistence,std): return P # Create model dd_wormgauss = Model(_wormgauss,constants='r') +dd_wormgauss.name = 'dd_wormgauss' dd_wormgauss.description = 'Worm-like chain model near the rigid limit with Gaussian convolution.' # Parameters dd_wormgauss.contour.set(description='Contour length', lb=1.5, ub=10, par0=3.7, unit='nm') diff --git a/deerlab/fitresult.py b/deerlab/fitresult.py index 27d33aa93..4133d737e 100644 --- a/deerlab/fitresult.py +++ b/deerlab/fitresult.py @@ -3,6 +3,7 @@ import inspect import matplotlib.pyplot as plt import difflib +from deerlab.classes import UQResult @@ -16,7 +17,7 @@ class FitResult(dict): ---------- model : ndarray The fitted model response. - modelUncert : + modelUncert : :ref:`UQResult` Uncertainty quantification of the fitted model response. param : ndarray Fitted parameter vector ordered according to the model parameter indices. @@ -397,4 +398,68 @@ def plot(self,axis=None,xlabel=None,gof=False,fontsize=13): label.set_fontsize(fontsize) return fig + + def to_dict(self): + """ + Converts the FitResult object to a dictionary. + This is used internally when saving the results to a file, but it can also be used by the user to convert the results to a dictionary format. + + Returns + ------- + fit_dict : dict + Dictionary containing the results of the fit. The keys of the dictionary are the same as the attributes of the FitResult object. + """ + + fit_dict = { + 'model': self.model, + 'modelUncert': self.modelUncert.to_dict() if self.modelUncert is not None else None, + 'param': self.param, + 'paramUncert': self.paramUncert.to_dict() if self.paramUncert is not None else None, + 'regparam': getattr(self,'regparam', None), + 'noiselvl': getattr(self,'noiselvl', None), + 'success': getattr(self,'success', None), + 'cost': getattr(self,'cost', None), + 'residuals': getattr(self,'residuals', None), + 'stats': getattr(self,'stats', None), + 'nonlin': getattr(self,'nonlin', None), + 'nonlinUncert': self.nonlinUncert.to_dict() if self.nonlinUncert is not None else None, + 'lin': getattr(self,'lin', None), + 'linUncert': self.linUncert.to_dict() if self.linUncert is not None else None + } + return fit_dict + + @classmethod + def from_dict(cls, fit_dict): + """ + Creates a FitResult object from a dictionary. This is used internally when loading the results from a file, but it can also be used by the user to create a FitResult object from a dictionary format. + + Parameters + ---------- + fit_dict : dict + Dictionary containing the results of the fit. The keys of the dictionary are the same as the attributes of the FitResult object. + + Returns + ------- + fit_result : FitResult + FitResult object created from the input dictionary. + + """ + + fit_result = cls() + fit_result.model = fit_dict.get('model', None) + fit_result.modelUncert = UQResult.from_dict(fit_dict['modelUncert']) if fit_dict.get('modelUncert', None) is not None else None + fit_result.param = fit_dict.get('param', None) + fit_result.paramUncert = UQResult.from_dict(fit_dict['paramUncert']) if fit_dict.get('paramUncert', None) is not None else None + fit_result.regparam = fit_dict.get('regparam', None) + fit_result.noiselvl = fit_dict.get('noiselvl', None) + fit_result.success = fit_dict.get('success', None) + fit_result.cost = fit_dict.get('cost', None) + fit_result.residuals = fit_dict.get('residuals', None) + fit_result.stats = fit_dict.get('stats', None) + fit_result.nonlin = fit_dict.get('nonlin', None) + fit_result.nonlinUncert = UQResult.from_dict(fit_dict['nonlinUncert']) if fit_dict.get('nonlinUncert', None) is not None else None + fit_result.lin = fit_dict.get('lin', None) + fit_result.linUncert = UQResult.from_dict(fit_dict['linUncert']) if fit_dict.get('linUncert', None) is not None else None + + return fit_result # =========================================================================================== diff --git a/deerlab/model.py b/deerlab/model.py index 7538009ec..b92b247f3 100644 --- a/deerlab/model.py +++ b/deerlab/model.py @@ -224,6 +224,42 @@ def copy(self): """ return deepcopy(self) + + #--------------------------------------------------------------------------------------- + + def todict(self): + """ + Return a dictionary with the parameter attributes + """ + return { + 'name': self.name, + 'description': self.description, + 'unit': self.unit, + 'par0': self.par0, + 'lb': self.lb, + 'ub': self.ub, + 'frozen': self.frozen, + 'value': self.value, + 'linear': self.linear + } + #--------------------------------------------------------------------------------------- + + @classmethod + def fromdict(cls, param_dict): + """ + Create a Parameter object from a dictionary of attributes + + Parameters + ---------- + param_dict : dict + Dictionary containing the parameter attributes. The keys of the dictionary must be the same as the attributes of the Parameter class. + + Returns + ------- + parameter : Parameter object + Parameter object created from the input dictionary. + """ + return cls(**param_dict) #=================================================================================== @@ -243,6 +279,8 @@ class Model(): : :ref:`Parameter` Model parameter. One :ref:`Parameter` instance is assigned for each parameter (with name ````) in the model. + name : string + Name of the model, useful for rebuilding the model from a dictionary. description : string Description of the model. signature : string @@ -343,6 +381,7 @@ def __init__(self,nonlinfcn,constants=None,signature=None): nonlinfcn = lambda *_: Amatrix self.nonlinmodel = nonlinfcn self.description = None + self.name = None self._constantsInfo = [] self.parents = None @@ -987,6 +1026,70 @@ def copy(self): return deepcopy(self) + #-------------------------------------------------------------------------------- + + def to_dict(self): + """ + Convert the model to a dictionary representation. + + Returns + ------- + model_dict : dict + Dictionary representation of the model, containing all the information about the model's parameters and their values. + """ + # Get the model's metadata in vector form + metadata = self.getmetadata() + # Create a dictionary with the model's metadata + model_dict = { + 'description': self.description, + 'signature': self.signature, + 'constants': [entry['argkey'] for entry in self._constantsInfo], + 'parameters': [] + } + # Add each parameter's information to the dictionary + for name, lb, par0, ub, linear, frozen, unit in zip(metadata['names'], metadata['lb'], metadata['par0'], metadata['ub'], metadata['linear'], metadata['frozen'], metadata['units']): + param_dict = { + 'name': name, + 'lb': lb, + 'par0': par0, + 'ub': ub, + 'linear': linear, + 'frozen': frozen, + 'unit': unit + } + model_dict['parameters'].append(param_dict) + return model_dict + + @classmethod + def from_dict(self, model_dict): + """ + Update the model's parameters and their values from a dictionary representation. + + Parameters + ---------- + model_dict : dict + Dictionary representation of the model, containing all the information about the model's parameters and their values. + """ + # Update the model's description and signature + self.description = model_dict['description'] + self.signature = model_dict['signature'] + # Update the model's constants + self._constantsInfo = [{'argkey': argkey, 'argidx': idx} for idx, argkey in enumerate(model_dict['constants'])] + # Update the model's parameters + for param_dict in model_dict['parameters']: + name = param_dict['name'] + lb = param_dict['lb'] + par0 = param_dict['par0'] + ub = param_dict['ub'] + linear = param_dict['linear'] + frozen = param_dict['frozen'] + unit = param_dict['unit'] + if linear: + self.addlinear(name=name, lb=lb, ub=ub, par0=par0, unit=unit) + else: + self.addnonlinear(key=name, lb=lb, ub=ub, par0=par0, unit=unit) + if frozen: + getattr(self,name).freeze(par0) #=================================================================================== #============================================================================== diff --git a/deerlab/solvers.py b/deerlab/solvers.py index 5fa940135..da46fd0cc 100644 --- a/deerlab/solvers.py +++ b/deerlab/solvers.py @@ -794,7 +794,7 @@ def uq_subset(uq_full,subset,subset_lb,subset_ub): # Jacobian (non-linear part) Jnonlin = Jacobian(_ResidualsFcn,nonlinfit,lb,ub) # Jacobian (linear part) - scale = np.trapz(linfit,np.arange(Nlin)) + scale = np.trapezoid(linfit,np.arange(Nlin)) Jlin = (weights[:,np.newaxis]*Amodel(nonlinfit))[mask,:] if includeExtrapenalty: for penalty in extrapenalty: diff --git a/test/test_bgmodels.py b/test/test_bgmodels.py index ef6a690fa..5b8821d26 100644 --- a/test/test_bgmodels.py +++ b/test/test_bgmodels.py @@ -30,6 +30,7 @@ def assert_bgmodel_behavior(model): B_ub = model(t, *ub) # Assert + assert model.name is not None assert all(B_par0 == B_par0_T) assert all(~np.isnan(B_par0)) assert all(~np.isnan(B_lb)) From c0ecd4479885c15a4ece27d4aec2d88897b91fa4 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Thu, 26 Mar 2026 12:38:29 +0100 Subject: [PATCH 22/29] bump version --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index c64122024..3e0c29c66 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.1.4 +v1.1.5 From 27373bf15ff4668a40847d4491e755ff2c9d3eab Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 13 Apr 2026 14:14:37 +0200 Subject: [PATCH 23/29] Added automatic version detection --- deerlab/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/deerlab/__init__.py b/deerlab/__init__.py index 8a8d26e77..6c2ca1ee7 100644 --- a/deerlab/__init__.py +++ b/deerlab/__init__.py @@ -1,4 +1,11 @@ # __init__.py +from importlib.metadata import version as get_version + +try: + __VERSION__ = get_version('DeerLab') +except Exception: + __VERSION__ = 'unknown' + from .dd_models import * from .bg_models import * from .model import Model, Penalty, Parameter, link, lincombine, merge, relate @@ -22,3 +29,4 @@ from .diststats import diststats from .profile_analysis import profile_analysis from .utils import * +from .io import save, load, json_dumps, json_loads \ No newline at end of file From c1ca94f753600d290c4795779d454d2ee4982dc4 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 13 Apr 2026 14:16:28 +0200 Subject: [PATCH 24/29] Added `save` and `load` functions Including: - Function for dumping and reading from JSON strings. `dump_jsons` and `load_jsons` - Support for saving to HDF5, JSON, TOML - Example for saving and loading files --- deerlab/classes.py | 4 +- deerlab/fitresult.py | 86 +++--- deerlab/io.py | 411 ++++++++++++++++++++++++++++ docsrc/source/reference.rst | 10 +- examples/basic/ex_saving_loading.py | 56 ++++ test/test_io.py | 136 +++++++++ 6 files changed, 667 insertions(+), 36 deletions(-) create mode 100644 deerlab/io.py create mode 100644 examples/basic/ex_saving_loading.py create mode 100644 test/test_io.py diff --git a/deerlab/classes.py b/deerlab/classes.py index 81aa2f5d3..927d84513 100644 --- a/deerlab/classes.py +++ b/deerlab/classes.py @@ -714,8 +714,8 @@ def from_dict(self, uq_dict): uqtype=uq_dict.get('type', 'void'), data=uq_dict.get('mean', None) , covmat=uq_dict.get('covmat', None), - lb=None, - ub=None, + lb=uq_dict.get('lb', None), + ub=uq_dict.get('ub', None), threshold=uq_dict.get('threshold', None), profiles=uq_dict.get('profile', None) ) diff --git a/deerlab/fitresult.py b/deerlab/fitresult.py index 4133d737e..3e301822c 100644 --- a/deerlab/fitresult.py +++ b/deerlab/fitresult.py @@ -4,6 +4,7 @@ import matplotlib.pyplot as plt import difflib from deerlab.classes import UQResult +from deerlab.model import Model @@ -409,24 +410,47 @@ def to_dict(self): fit_dict : dict Dictionary containing the results of the fit. The keys of the dictionary are the same as the attributes of the FitResult object. """ - - fit_dict = { - 'model': self.model, - 'modelUncert': self.modelUncert.to_dict() if self.modelUncert is not None else None, - 'param': self.param, - 'paramUncert': self.paramUncert.to_dict() if self.paramUncert is not None else None, - 'regparam': getattr(self,'regparam', None), - 'noiselvl': getattr(self,'noiselvl', None), - 'success': getattr(self,'success', None), - 'cost': getattr(self,'cost', None), - 'residuals': getattr(self,'residuals', None), - 'stats': getattr(self,'stats', None), - 'nonlin': getattr(self,'nonlin', None), - 'nonlinUncert': self.nonlinUncert.to_dict() if self.nonlinUncert is not None else None, - 'lin': getattr(self,'lin', None), - 'linUncert': self.linUncert.to_dict() if self.linUncert is not None else None - } - return fit_dict + def _prepare_value(obj): + if isinstance(obj, UQResult): + d = obj.to_dict() + d['__type__'] = 'UQResult' + return d + elif isinstance(obj, list): + return [_prepare_value(item) for item in obj] + elif isinstance(obj, dict): + return {str(k): _prepare_value(v) for k, v in obj.items()} + return obj + + output_dict = {} + + for key in self.keys(): + obj = self[key] + if isinstance(obj, (int, float, np.ndarray, list, str)): + output_dict[str(key)] = _prepare_value(obj) + elif isinstance(obj, dict): + output_dict[str(key)] = _prepare_value(obj) + elif isinstance(obj, FitResult): + print('Warning: Skipping fitresult object for key:', key) + elif isinstance(obj, Model): + print('Warning: Skipping model object for key:', key) + elif isinstance(obj, UQResult): + output_dict[str(key)] = _prepare_value(obj) + elif obj is None: + output_dict[str(key)] = None + elif callable(obj): + continue + else: + print(f"Skipping key '{key}' of type {type(obj)}") + + # conver paramlist to list of str + if 'paramlist' in output_dict: + output_dict['paramlist'] = [str(item) for item in output_dict['paramlist']] + + # conert _param_idx to list of list of int + if '_param_idx' in output_dict: + output_dict['_param_idx'] = [list(item) for item in output_dict['_param_idx']] + + return output_dict @classmethod def from_dict(cls, fit_dict): @@ -445,21 +469,19 @@ def from_dict(cls, fit_dict): """ + def _resolve_deerlab_types(obj): + if isinstance(obj, dict): + if obj.get('__type__') == 'UQResult': + resolved = {k: _resolve_deerlab_types(v) for k, v in obj.items() if k != '__type__'} + return UQResult.from_dict(resolved) + return {k: _resolve_deerlab_types(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [_resolve_deerlab_types(item) for item in obj] + return obj + + fit_result = cls() - fit_result.model = fit_dict.get('model', None) - fit_result.modelUncert = UQResult.from_dict(fit_dict['modelUncert']) if fit_dict.get('modelUncert', None) is not None else None - fit_result.param = fit_dict.get('param', None) - fit_result.paramUncert = UQResult.from_dict(fit_dict['paramUncert']) if fit_dict.get('paramUncert', None) is not None else None - fit_result.regparam = fit_dict.get('regparam', None) - fit_result.noiselvl = fit_dict.get('noiselvl', None) - fit_result.success = fit_dict.get('success', None) - fit_result.cost = fit_dict.get('cost', None) - fit_result.residuals = fit_dict.get('residuals', None) - fit_result.stats = fit_dict.get('stats', None) - fit_result.nonlin = fit_dict.get('nonlin', None) - fit_result.nonlinUncert = UQResult.from_dict(fit_dict['nonlinUncert']) if fit_dict.get('nonlinUncert', None) is not None else None - fit_result.lin = fit_dict.get('lin', None) - fit_result.linUncert = UQResult.from_dict(fit_dict['linUncert']) if fit_dict.get('linUncert', None) is not None else None + fit_result.update(_resolve_deerlab_types(fit_dict)) return fit_result # =========================================================================================== diff --git a/deerlab/io.py b/deerlab/io.py new file mode 100644 index 000000000..8c2215869 --- /dev/null +++ b/deerlab/io.py @@ -0,0 +1,411 @@ +import h5py +from deerlab import FitResult, UQResult, Model, __VERSION__ +import os +import pathlib +import numpy as np +import json +import tomllib +import re + +supported_formats = ['hdf5', 'json', 'toml'] +supported_types = ['FitResult', 'UQResult', 'Model'] + + +#======================================================================================= +# Serialization helpers (JSON / TOML) +#======================================================================================= + +def _to_serializable(obj): + """Recursively convert Python/numpy objects to JSON/TOML-safe primitives.""" + if isinstance(obj, np.ndarray): + if np.issubdtype(obj.dtype, np.complexfloating): + return {'__ndarray__': True, 'real': obj.real.tolist(), 'imag': obj.imag.tolist(), 'dtype': str(obj.dtype)} + return {'__ndarray__': True, 'data': obj.tolist(), 'dtype': str(obj.dtype)} + elif isinstance(obj, complex): + return {'__complex__': True, 'real': obj.real, 'imag': obj.imag} + elif isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.bool_): + return bool(obj) + elif isinstance(obj, dict): + return {str(k): _to_serializable(v) for k, v in obj.items()} + elif isinstance(obj, (list, tuple)): + return [_to_serializable(i) for i in obj] + elif obj is None: + return {'__none__': True} + return obj + + +def _from_serializable(obj): + """Recursively restore numpy objects from serialized form.""" + if isinstance(obj, dict): + if obj.get('__ndarray__') is True: + if 'imag' in obj: + return np.array(obj['real'], dtype=float) + 1j * np.array(obj['imag'], dtype=float) + return np.array(obj['data'], dtype=obj.get('dtype', 'float64')) + elif obj.get('__complex__') is True: + return complex(obj['real'], obj['imag']) + elif obj.get('__none__') is True: + return None + return {k: _from_serializable(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [_from_serializable(i) for i in obj] + return obj + + +#======================================================================================= +# HDF5 +#======================================================================================= + +def _create_h5_element(group: h5py.Group, key, value): + if isinstance(value, (Model, FitResult, UQResult)): + return + elif value is None: + group.create_dataset(key, data='None') + elif isinstance(value, list) and len(value) > 0 and isinstance(value[0], list): + subgroup = group.create_group(key) + subgroup.attrs['__type__'] = 'list' + for i, sublist in enumerate(value): + if isinstance(sublist, list): + subgroup2 = subgroup.create_group(f'{key}_{i}') + subgroup2.attrs['__type__'] = 'list' + for j, item in enumerate(sublist): + _create_h5_element(subgroup2, f'{key}_{i}_{j}', item) + else: + subgroup.create_dataset(f'{key}_{i}', data=sublist) + elif isinstance(value, dict): + subgroup = group.create_group(key) + for subkey, subvalue in value.items(): + _create_h5_element(subgroup, subkey, subvalue) + else: + group.create_dataset(key, data=value) + + +def _read_hdf5(filename): + def _decode_h5_value(x): + if isinstance(x, (bytes, np.bytes_)): + s = x.decode('utf-8') + return None if s == 'None' else s + if isinstance(x, np.ndarray): + if x.dtype.kind == 'S': + return np.char.decode(x, 'utf-8') + if x.dtype.kind == 'O': + return np.array( + [i.decode('utf-8') if isinstance(i, (bytes, np.bytes_)) else i for i in x], + dtype=object, + ) + return x + + def _h5_to_python(item): + if isinstance(item, h5py.Group): + if item.attrs.get('__type__') == 'list': + return [_h5_to_python(item[k]) for k in item.keys()] + return {subkey: _h5_to_python(item[subkey]) for subkey in item.keys()} + else: + return _decode_h5_value(item[()]) + + with h5py.File(filename, 'r') as file: + if file.attrs.get('format') != 'deerlab': + raise ValueError(f"Unsupported format '{file.attrs.get('format')}' in file. Only 'deerlab' format is supported.") + object_class = file.attrs['object_class'] + if object_class not in supported_types: + raise ValueError(f"Unsupported object type '{object_class}' in file. Supported object types are: {supported_types}") + + raw_dict = {key: _h5_to_python(file[key]) for key in file.keys()} + raw_dict['object_class'] = object_class + + return raw_dict + + +#======================================================================================= +# JSON +#======================================================================================= + +class _NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + if np.issubdtype(obj.dtype, np.complexfloating): + return {'__ndarray__': True, 'real': obj.real.tolist(), 'imag': obj.imag.tolist(), 'dtype': str(obj.dtype)} + return {'__ndarray__': True, 'data': obj.tolist(), 'dtype': str(obj.dtype)} + elif isinstance(obj, complex): + return {'__complex__': True, 'real': obj.real, 'imag': obj.imag} + elif isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.bool_): + return bool(obj) + return super().default(obj) + + +def _json_object_hook(d): + if d.get('__ndarray__') is True: + if 'imag' in d: + return np.array(d['real'], dtype=float) + 1j * np.array(d['imag'], dtype=float) + return np.array(d['data'], dtype=d.get('dtype', 'float64')) + elif d.get('__complex__') is True: + return complex(d['real'], d['imag']) + elif d.get('__none__') is True: + return None + return d + + +def _save_json(filename, data_dict, object_class): + payload = {'__format__': 'deerlab', '__version__': __VERSION__, '__object_class__': object_class} + payload.update(data_dict) + if isinstance(filename, (str, os.PathLike)): + with open(filename, 'w') as f: + json.dump(payload, f, cls=_NumpyEncoder, indent=2) + else: + json.dump(payload, filename, cls=_NumpyEncoder, indent=2) + + +def _load_json(filename): + if isinstance(filename, (str, os.PathLike)): + with open(filename, 'r') as f: + payload = json.load(f, object_hook=_json_object_hook) + else: + payload = json.load(filename, object_hook=_json_object_hook) + if payload.get('__format__') != 'deerlab': + raise ValueError("File does not appear to be a deerlab JSON file.") + object_class = payload.pop('__object_class__') + payload.pop('__format__', None) + payload.pop('__version__', None) + if object_class not in supported_types: + raise ValueError(f"Unsupported object type '{object_class}' in file.") + payload['object_class'] = object_class + return payload + + +#======================================================================================= +# TOML +#======================================================================================= + +def _toml_key(key): + """Return a valid TOML key, quoting if necessary.""" + if re.match(r'^[A-Za-z0-9_-]+$', key): + return key + escaped = key.replace('\\', '\\\\').replace('"', '\\"') + return f'"{escaped}"' + + +def _toml_value(val): + """Serialize a value to an inline TOML value string.""" + if isinstance(val, bool): + return 'true' if val else 'false' + elif isinstance(val, int): + return str(val) + elif isinstance(val, float): + if val != val: + return 'nan' + elif val == float('inf'): + return 'inf' + elif val == float('-inf'): + return '-inf' + return repr(val) + elif isinstance(val, str): + result = [] + for ch in val: + if ch == '\\': + result.append('\\\\') + elif ch == '"': + result.append('\\"') + elif ch == '\n': + result.append('\\n') + elif ch == '\r': + result.append('\\r') + elif ch == '\t': + result.append('\\t') + elif ord(ch) < 0x20 or ord(ch) == 0x7f: + result.append(f'\\u{ord(ch):04x}') + else: + result.append(ch) + return '"' + ''.join(result) + '"' + elif isinstance(val, (list, tuple)): + return '[' + ', '.join(_toml_value(i) for i in val) + ']' + elif isinstance(val, dict): + items = ', '.join(f'{_toml_key(k)} = {_toml_value(v)}' for k, v in val.items()) + return '{' + items + '}' + else: + raise TypeError(f"Cannot serialize type {type(val).__name__} to TOML") + + +def _dict_to_toml(d): + """Serialize a dict to a TOML string. Top-level dicts become [sections].""" + scalars = [] + sections = {} + for key, val in d.items(): + if isinstance(val, dict): + sections[key] = val + else: + scalars.append(f'{_toml_key(key)} = {_toml_value(val)}') + + lines = scalars[:] + for key, val in sections.items(): + lines.append(f'\n[{_toml_key(key)}]') + for subkey, subval in val.items(): + lines.append(f'{_toml_key(subkey)} = {_toml_value(subval)}') + + return '\n'.join(lines) + '\n' + + +def _save_toml(filename, data_dict, object_class): + meta = {'__format__': 'deerlab', '__version__': __VERSION__, '__object_class__': object_class} + serializable = _to_serializable(data_dict) + payload = {**meta, **serializable} + content = _dict_to_toml(payload) + if isinstance(filename, (str, os.PathLike)): + with open(filename, 'w', encoding='utf-8') as f: + f.write(content) + else: + filename.write(content.encode('utf-8')) + + +def _load_toml(filename): + if isinstance(filename, (str, os.PathLike)): + with open(filename, 'rb') as f: + payload = tomllib.load(f) + else: + content = filename.read() + if isinstance(content, str): + content = content.encode('utf-8') + payload = tomllib.loads(content.decode('utf-8')) + if payload.get('__format__') != 'deerlab': + raise ValueError("File does not appear to be a deerlab TOML file.") + object_class = payload.pop('__object_class__') + payload.pop('__format__', None) + payload.pop('__version__', None) + if object_class not in supported_types: + raise ValueError(f"Unsupported object type '{object_class}' in file.") + result = _from_serializable(payload) + result['object_class'] = object_class + return result + + +#======================================================================================= +# Saving +#======================================================================================= + +def save(filename, object, format=None): + """ + Saves a deerlab object (FitResult, UQresult, Model) to file. + + Parameters + ---------- + filename : str, bytes, os.PathLike, or file-like object + The name of the file to which the data is saved, or a path-like object. + If the file already exists, it will be overwritten. If the file does not exist, a new file will be created. + A file-like buffer (e.g. BytesIO) can be used for HDF5 and TOML; a text buffer for JSON. + object : dl.FitResult, dl.UQResult, dl.Model + The deerlab object to be saved. + format : str, optional + The format in which to save the data. One of 'hdf5', 'json', 'toml'. + If not provided, the format is inferred from the file extension. + """ + if format is None and isinstance(filename, (str, os.PathLike)): + ext = pathlib.Path(filename).suffix.lower() + if ext in ['.hdf5', '.h5']: + format = 'hdf5' + elif ext == '.json': + format = 'json' + elif ext == '.toml': + format = 'toml' + + if format not in supported_formats: + raise ValueError(f"Unsupported format '{format}'. Supported formats are: {supported_formats}") + + object_class = type(object).__name__ + if object_class not in supported_types: + raise ValueError(f"Unsupported object type '{object_class}'. Supported object types are: {supported_types}") + + if format in ('hdf5', 'h5'): + with h5py.File(filename, 'w') as file: + file.attrs['format'] = 'deerlab' + file.attrs['version'] = __VERSION__ + file.attrs['object_class'] = object_class + for key, value in object.to_dict().items(): + _create_h5_element(file, key, value) + elif format == 'json': + _save_json(filename, object.to_dict(), object_class) + elif format == 'toml': + _save_toml(filename, object.to_dict(), object_class) + + +def json_dumps(object): + """Convert a deerlab object to a JSON string.""" + if not isinstance(object, (FitResult, UQResult, Model)): + raise ValueError("Only FitResult, UQResult, and Model objects can be serialized to JSON.") + return json.dumps({'__format__': 'deerlab', '__version__': __VERSION__, '__object_class__': type(object).__name__, **object.to_dict()}, cls=_NumpyEncoder, indent=2) + +def json_loads(json_string): + """Convert a JSON string back to a deerlab object.""" + payload = json.loads(json_string, object_hook=_json_object_hook) + if payload.get('__format__') != 'deerlab': + raise ValueError("String does not appear to be a deerlab JSON string.") + object_class = payload.pop('__object_class__') + payload.pop('__format__', None) + payload.pop('__version__', None) + if object_class == 'FitResult': + return FitResult.from_dict(payload) + elif object_class == 'UQResult': + return UQResult.from_dict(payload) + elif object_class == 'Model': + return Model.from_dict(payload) + else: + raise ValueError(f"Unsupported object type '{object_class}' in JSON string.") +#======================================================================================= +# Loading +#======================================================================================= + +def load(filename, format=None): + """ + Loads a deerlab object (FitResult, UQresult, Model) from file. + + Parameters + ---------- + filename : str, bytes, os.PathLike, or file-like object + The name of the file from which the data is loaded, or a path-like object. The file must exist. + format : str, optional + The format of the file. One of 'hdf5', 'json', 'toml'. + If not provided, the format is inferred from the file extension. + + Returns + ------- + object : dl.FitResult, dl.UQResult, dl.Model + The deerlab object that was loaded from file. + """ + if format is None and isinstance(filename, (str, os.PathLike)): + ext = pathlib.Path(filename).suffix.lower() + if ext in ['.hdf5', '.h5']: + file_format = 'hdf5' + elif ext == '.json': + file_format = 'json' + elif ext == '.toml': + file_format = 'toml' + else: + raise ValueError("Could not identify the file format. Please specify the format explicitly.") + elif format is not None: + file_format = format.lower() + if file_format not in supported_formats: + raise ValueError(f"Unsupported format '{file_format}'. Supported formats are: {supported_formats}") + else: + raise ValueError("Could not identify the file format. Please specify the format explicitly.") + + if file_format in ('hdf5', 'h5'): + dict_output = _read_hdf5(filename) + elif file_format == 'json': + dict_output = _load_json(filename) + elif file_format == 'toml': + dict_output = _load_toml(filename) + + if dict_output['object_class'] == 'FitResult': + dict_output.pop('object_class') + return FitResult.from_dict(dict_output) + elif dict_output['object_class'] == 'UQResult': + dict_output.pop('object_class') + return UQResult.from_dict(dict_output) + elif dict_output['object_class'] == 'Model': + dict_output.pop('object_class') + return Model.from_dict(dict_output) \ No newline at end of file diff --git a/docsrc/source/reference.rst b/docsrc/source/reference.rst index afda906d5..271507bf4 100644 --- a/docsrc/source/reference.rst +++ b/docsrc/source/reference.rst @@ -47,6 +47,9 @@ Reference Index fnnls cvxnnls goodness_of_fit + save + load + .. rubric:: Dipolar EPR functions @@ -72,8 +75,6 @@ Reference Index :template: custom_function_template.rst :nosignatures: - store_pickle - read_pickle sophegrid choleskycovmat hccm @@ -83,3 +84,8 @@ Reference Index ovl der_snr formatted_table + store_pickle + read_pickle + dump_jsons + load_jsons + diff --git a/examples/basic/ex_saving_loading.py b/examples/basic/ex_saving_loading.py new file mode 100644 index 000000000..f756ed549 --- /dev/null +++ b/examples/basic/ex_saving_loading.py @@ -0,0 +1,56 @@ +#%% [markdown] +""" +Saving and loading DeerLab fit results +------------------------------------------------------------------------- + +DeerLab FitResult objects can be saved and loaded to file, with all relevant information (fitted model, uncertainties, regularization parameter, etc.) preserved. +This is useful for archiving results, sharing them with collaborators, or importing fits from DeerAnalysis 2026 into Python for plotting. + +""" + + +# %% +# First we will fit a basic model to generate a result and then save it to file. + +import numpy as np +import matplotlib.pyplot as plt +import deerlab as dl +path = '../data/' +file = 'example_4pdeer_1.DTA' + +tau1 = 0.3; tau2 = 4.0; tmin = 0.1; + +t,Vexp = dl.deerload(path + file) + +# Pre-processing +Vexp = dl.correctphase(Vexp) # Phase correction +Vexp = Vexp/np.max(Vexp) # Rescaling (aesthetic) +t = t - t[0] # Account for zerotime +t = t + tmin +# Distance vector +r = np.arange(2.5,5,0.01) # nm + +# Construct the model +Vmodel = dl.dipolarmodel(t,r, experiment = dl.ex_4pdeer(tau1,tau2, pathways=[1])) + +# Fit the model to the data +results = dl.fit(Vmodel,Vexp) + + +# %% +# We will then save the results to an HDF5 file. +# The file can also be saved as a JSON or TOML file, if needed. A HDF5 file is easy to read using MATLAB, Origin or similar software. + +dl.save('fit_result.hdf5', results) + +#%% +# The file can then be loaded back into Python, with all information preserved. +results2 = dl.load('fit_result.hdf5') + +# We can check that the loaded results are the same as the original results by comparison +print('Model result is the same:', np.allclose(results.model, results2.model)) + +# Built in methods of the FitResult object can be used as normal, e.g. plotting the results +results2.plot(); + +# %% diff --git a/test/test_io.py b/test/test_io.py new file mode 100644 index 000000000..e52be8a05 --- /dev/null +++ b/test/test_io.py @@ -0,0 +1,136 @@ +import numpy as np +import pytest +import os +import tempfile +import deerlab as dl +from deerlab import dd_gauss, bg_hom3d, FitResult +from deerlab.dipolarmodel import dipolarmodel +from deerlab.fit import fit +from deerlab.io import save, load, json_dumps, json_loads +from matplotlib.figure import Figure +import h5py +import io + +# Shared test data +t = np.linspace(-0.5, 5, 100) +r = np.linspace(2, 5, 50) + +@pytest.fixture(scope='module') +def fitresult(): + Bfcn = lambda t, lam: bg_hom3d(t, 50, lam) + Pr = dd_gauss(r, 3, 0.2) + V = 1e5 * dl.dipolarkernel(t, r, mod=0.3, bg=Bfcn) @ Pr + model = dipolarmodel(t, r, dd_gauss, bg_hom3d, npathways=1) + return fit(model, V, ftol=1e-4) + +# ====================================================================== +def test_fitresult_to_dict(fitresult): + "Check that FitResult can be converted to a dictionary" + + d = fitresult.to_dict() + + assert isinstance(d, dict) + assert 'model' in d +# ====================================================================== + +def test_fitresult_save_filebuffer(fitresult): + "Check that FitResult can be saved to an HDF5 fileIO buffer" + + + buf = io.BytesIO() + print(buf.getbuffer().nbytes) + save(buf, fitresult, format='hdf5') + assert buf.getbuffer().nbytes > 0 + + +# ====================================================================== + +def test_fitresult_save_hdf5(fitresult): + "Check that FitResult can be saved to an HDF5 file" + + with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=False) as f: + fname = f.name + try: + save(fname, fitresult) + assert os.path.exists(fname) + # Check that the file can be opened and has the expected attributes + with h5py.File(fname, 'r') as file: + assert file.attrs['format'] == 'deerlab' + assert file.attrs['version'] == dl.__VERSION__ + assert file.attrs['object_class'] == 'FitResult' + assert 'model' in file.keys() + finally: + os.remove(fname) + + +# ====================================================================== +def test_fitresult_save_load_hdf5(fitresult): + "Check that FitResult can be saved and loaded from an HDF5 file" + + with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=False) as f: + fname = f.name + try: + save(fname, fitresult) + loaded = load(fname) + assert isinstance(loaded, FitResult) + assert isinstance(loaded.modelUncert,dl.UQResult) + assert np.allclose(loaded.model, fitresult.model) + assert np.allclose(loaded.regparam, fitresult.regparam) + assert isinstance(loaded.plot(), Figure) + + finally: + os.remove(fname) + +# ====================================================================== +def test_fitresult_to_from_json_string(fitresult): + "Check that FitResult can be converted to a JSON string" + + json_str = json_dumps(fitresult) + assert isinstance(json_str, str) + assert len(json_str) > 0 + fitresult_loaded = json_loads(json_str) + assert isinstance(fitresult_loaded, FitResult) + assert np.allclose(fitresult_loaded.model, fitresult.model) + assert isinstance(fitresult_loaded.modelUncert,dl.UQResult) + assert np.allclose(fitresult_loaded.regparam, fitresult.regparam) + +# ====================================================================== +def test_fitresult_save_load_json(fitresult): + "Check that FitResult can be saved and loaded from a JSON file" + + with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f: + fname = f.name + try: + save(fname, fitresult) + loaded = load(fname) + assert isinstance(loaded, FitResult) + assert np.allclose(loaded.model, fitresult.model) + assert isinstance(loaded.modelUncert,dl.UQResult) + assert np.allclose(loaded.regparam, fitresult.regparam) + assert isinstance(loaded.plot(), Figure) + + + finally: + os.remove(fname) + + +# ====================================================================== +def test_fitresult_save_load_toml(fitresult): + "Check that FitResult can be saved and loaded from a TOML file" + + with tempfile.NamedTemporaryFile(suffix='.toml', delete=False) as f: + fname = f.name + try: + save(fname, fitresult) + loaded = load(fname) + assert isinstance(loaded, FitResult) + assert np.allclose(loaded.model, fitresult.model) + assert isinstance(loaded.modelUncert,dl.UQResult) + assert np.allclose(loaded.regparam, fitresult.regparam) + assert isinstance(loaded.plot(), Figure) + + finally: + os.remove(fname) + + +# ====================================================================== From 3c4ecf663f9d29fad0a1708937e06e38c4e261df Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 13 Apr 2026 14:31:08 +0200 Subject: [PATCH 25/29] Added `h5py` as a dependency --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 5d237fc5b..5a81d4407 100644 --- a/setup.py +++ b/setup.py @@ -31,6 +31,7 @@ 'setuptools>=53.0.0', 'numexpr>=2.7.3', 'quadprog>=0.1.11; python_version <= "3.10"', + 'h5py>=3.16' ], classifiers=[ 'Development Status :: 5 - Production/Stable', From 8829acb972bfcf03acf1e6c06601ae4c3988a74a Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 13 Apr 2026 15:56:57 +0200 Subject: [PATCH 26/29] Fixed loading of lb and ub with UQResult --- deerlab/classes.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deerlab/classes.py b/deerlab/classes.py index 927d84513..2267fdcbb 100644 --- a/deerlab/classes.py +++ b/deerlab/classes.py @@ -694,8 +694,8 @@ def from_dict(self, uq_dict): uqtype='profile', data=uq_dict.get('data', None), covmat=None, - lb=None, - ub=None, + lb=uq_dict.get('lb', None), + ub=uq_dict.get('ub', None), threshold_inputs=uq_dict.get('threshold_inputs'), profiles=uq_dict.get('profile'), noiselvl=uq_dict.get('noiselvl') @@ -705,8 +705,8 @@ def from_dict(self, uq_dict): uqtype=uq_dict.get('type', 'void'), data=uq_dict.get('samples', None), covmat=None, - lb=None, - ub=None + lb=uq_dict.get('lb', None), + ub=uq_dict.get('ub', None), ) elif uq_dict['type'] == 'moment': From a4bc1d2b5f817e3915289ea3e00f5ef1fa3159a5 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 13 Apr 2026 16:14:50 +0200 Subject: [PATCH 27/29] Better handling of lb and ub in UQResult --- deerlab/classes.py | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/deerlab/classes.py b/deerlab/classes.py index 2267fdcbb..4dd1e3be1 100644 --- a/deerlab/classes.py +++ b/deerlab/classes.py @@ -54,6 +54,12 @@ class UQResult: threshold : function Treshold function used for the profile method. Only available for ``type='profile'``. + lb : ndarray + Lower bounds of the parameters. + + ub : ndarray + Upper bounds of the parameters. + """ @@ -144,15 +150,19 @@ def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None, else: raise NameError('uqtype not found. Must be: ''moment'', ''bootstrap'' or ''void''.') + # Set default bounds if not provided if lb is None: lb = np.full(nParam, -np.inf) if ub is None: ub = np.full(nParam, np.inf) + + lb = np.array([(-np.inf if v is None else v) for v in lb]) + ub = np.array([(np.inf if v is None else v) for v in ub]) # Set private variables - self.__lb = lb - self.__ub = ub + self.lb = lb + self.ub = ub self.nparam = nParam # Create confidence intervals structure @@ -234,8 +244,8 @@ def join(self,*args): # Original metadata newargs.append(self.mean) newargs.append(self.covmat) - newargs.append(self.__lb) - newargs.append(self.__ub) + newargs.append(self.lb) + newargs.append(self.ub) elif self.type=='bootstrap': newargs.append(self.samples) @@ -349,8 +359,8 @@ def pardist(self,n=0): pdf = fftconvolve(pdf, kernel, mode='same') # Clip the distributions outside the boundaries - pdf[x < self.__lb[n]] = 0 - pdf[x > self.__ub[n]] = 0 + pdf[x < self.lb[n]] = 0 + pdf[x > self.ub[n]] = 0 # Enforce non-negativity (takes care of negative round-off errors) pdf = np.maximum(pdf,0) @@ -469,11 +479,11 @@ def ci(self,coverage): # Compute moment-based confidence intervals # Clip at specified box boundaries standardError = norm.ppf(p)*np.sqrt(np.diag(self.covmat)) - confint[:,0] = np.maximum(self.__lb, self.mean.real - standardError) - confint[:,1] = np.minimum(self.__ub, self.mean.real + standardError) + confint[:,0] = np.maximum(self.lb, self.mean.real - standardError) + confint[:,1] = np.minimum(self.ub, self.mean.real + standardError) if iscomplex: - confint[:,0] = confint[:,0] + 1j*np.maximum(self.__lb, self.mean.imag - standardError) - confint[:,1] = confint[:,1] + 1j*np.minimum(self.__ub, self.mean.imag + standardError) + confint[:,0] = confint[:,0] + 1j*np.maximum(self.lb, self.mean.imag - standardError) + confint[:,1] = confint[:,1] + 1j*np.minimum(self.ub, self.mean.imag + standardError) elif self.type=='bootstrap': # Compute bootstrap-based confidence intervals @@ -573,7 +583,7 @@ def propagate(self,model,lb=None,ub=None,samples=None): model = lambda p: np.concatenate([model_(p).real,model_(p).imag]) # Get jacobian of model to be propagated with respect to parameters - J = Jacobian(model,parfit,self.__lb,self.__ub) + J = Jacobian(model,parfit,self.lb,self.ub) # Clip at boundaries modelfit = np.maximum(modelfit,lb) @@ -635,8 +645,8 @@ def to_dict(self): 'std': self.std, 'covmat': self.covmat, 'nparam': self.nparam, - 'lb': self.__lb, - 'ub': self.__ub + 'lb': self.lb, + 'ub': self.ub } elif self.type == 'profile': if self.__threshold_inputs is None: From c64ab8b083e0351da2b910fe4ae789bfac6a127b Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 13 Apr 2026 16:28:00 +0200 Subject: [PATCH 28/29] Test saving of UQResult --- deerlab/io.py | 2 +- test/test_io.py | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/deerlab/io.py b/deerlab/io.py index 8c2215869..056f15a95 100644 --- a/deerlab/io.py +++ b/deerlab/io.py @@ -8,7 +8,7 @@ import re supported_formats = ['hdf5', 'json', 'toml'] -supported_types = ['FitResult', 'UQResult', 'Model'] +supported_types = ['FitResult', 'UQResult', ] #======================================================================================= diff --git a/test/test_io.py b/test/test_io.py index e52be8a05..de257bb5d 100644 --- a/test/test_io.py +++ b/test/test_io.py @@ -11,6 +11,8 @@ import h5py import io +from test_uqresult import uncertainty_quantification_simulation + # Shared test data t = np.linspace(-0.5, 5, 100) r = np.linspace(2, 5, 50) @@ -134,3 +136,22 @@ def test_fitresult_save_load_toml(fitresult): # ====================================================================== + +@pytest.mark.parametrize('method', ['moment', 'bootstrap', 'profile']) +def test_UQResult_saving(uncertainty_quantification_simulation, method): + "Check that UQResult can be saved and loaded from a JSON string" + + # Retrieve the results of the mock simulation + uq_objects, references = uncertainty_quantification_simulation + uq = uq_objects[method] + + json_str = json_dumps(uq) + assert isinstance(json_str, str) + assert len(json_str) > 0 + UQresult_loaded = json_loads(json_str) + assert isinstance(UQresult_loaded, dl.UQResult) + assert np.allclose(UQresult_loaded.mean, uq.mean) + assert UQresult_loaded.type == uq.type + assert np.allclose(UQresult_loaded.std, uq.std) + assert np.allclose(UQresult_loaded.lb, uq.lb) + assert np.allclose(UQresult_loaded.ub, uq.ub) \ No newline at end of file From d1b183d0947c813c33d960b3ba07ef9a0cd20471 Mon Sep 17 00:00:00 2001 From: Hugo Karas Date: Mon, 13 Apr 2026 16:30:49 +0200 Subject: [PATCH 29/29] Fixed Bug in profile analysis --- deerlab/profile_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deerlab/profile_analysis.py b/deerlab/profile_analysis.py index 06bbe1704..7ad6f2c5c 100644 --- a/deerlab/profile_analysis.py +++ b/deerlab/profile_analysis.py @@ -123,7 +123,7 @@ def profile_analysis(model,y, *args, parameters='all', grids=None, samples=50, n getattr(model, parameter).unfreeze() profile = {'x':np.squeeze(grid),'y':profile} - uqresults[parameter] = UQResult('profile', data=getattr(fitresult,parameter), profiles=profile, threshold=threshold_inputs, noiselvl=noiselvl) + uqresults[parameter] = UQResult('profile', data=getattr(fitresult,parameter), profiles=profile, threshold_inputs=threshold_inputs, noiselvl=noiselvl) uqresults[parameter].profile = uqresults[parameter].profile[0] return uqresults