diff --git a/deerlab/__init__.py b/deerlab/__init__.py
index 43cd8a84d..d11e13484 100644
--- a/deerlab/__init__.py
+++ b/deerlab/__init__.py
@@ -1,4 +1,13 @@
# __init__.py
+from importlib.metadata import version as get_version
+
+try:
+ __VERSION__ = get_version('DeerLab')
+except Exception:
+ __VERSION__ = 'unknown'
+
+from .dd_models import *
+from .bg_models import *
from . import dd_models as _dd_models_mod
from . import bg_models as _bg_models_mod
@@ -32,3 +41,4 @@ def __getattr__(name):
from .diststats import diststats
from .profile_analysis import profile_analysis
from .utils import *
+from .io import save, load, json_dumps, json_loads
\ No newline at end of file
diff --git a/deerlab/bg_models.py b/deerlab/bg_models.py
index 0345b0a89..982d213ff 100644
--- a/deerlab/bg_models.py
+++ b/deerlab/bg_models.py
@@ -111,6 +111,7 @@ def _hom3d(t,conc,lam):
return B
# Create model
bg_hom3d = Model(_hom3d,constants='t')
+bg_hom3d.name = 'bg_hom3d'
bg_hom3d.description = 'Background from a homogeneous distribution of spins in a 3D medium'
# Add parameters
bg_hom3d.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM')
@@ -148,6 +149,7 @@ def _hom3dphase(t,conc,lam):
return B
# Create model
bg_hom3d_phase = Model(_hom3dphase,constants='t')
+bg_hom3d_phase.name = 'bg_hom3d_phase'
bg_hom3d_phase.description = 'Phase shift from a homogeneous distribution of spins in a 3D medium'
# Add parameters
bg_hom3d_phase.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM')
@@ -198,6 +200,7 @@ def _hom3dex(t,conc,rex,lam):
return B
# Create model
bg_hom3dex = Model(_hom3dex,constants='t')
+bg_hom3dex.name = 'bg_hom3dex'
bg_hom3dex.description = 'Background from a homogeneous distribution of spins with excluded volume'
# Add parameters
bg_hom3dex.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM')
@@ -252,6 +255,7 @@ def _hom3dex_phase(t,conc,rex,lam):
return B
# Create model
bg_hom3dex_phase = Model(_hom3dex_phase,constants='t')
+bg_hom3dex_phase.name = 'bg_hom3dex_phase'
bg_hom3dex_phase.description = 'Phase shift from a homogeneous distribution of spins with excluded volume'
# Add parameters
bg_hom3dex_phase.conc.set(description='Spin concentration', lb=0.01, ub=5000, par0=50, unit='μM')
@@ -297,6 +301,7 @@ def _homfractal(t,fconc,fdim,lam):
# ======================================================================
# Create model
bg_homfractal = Model(_homfractal, constants='t')
+bg_homfractal.name = 'bg_homfractal'
bg_homfractal.description = 'Background from homogeneous spin distribution in a space of fractal dimension'
# Add parameters
bg_homfractal.fconc.set(description='Fractal concentration of spins', lb=1e-20, ub=1e20, par0=1.0e-6, unit='μmol/dmᵈ')
@@ -342,6 +347,7 @@ def _homfractal_phase(t,fconc,fdim,lam):
# ======================================================================
# Create model
bg_homfractal_phase = Model(_homfractal_phase,constants='t')
+bg_homfractal_phase.name = 'bg_homfractal_phase'
bg_homfractal_phase.description = 'Phase shift from a homogeneous distribution of spins in a fractal medium'
# Add parameters
bg_homfractal_phase.fconc.set(description='Fractal concentration of spins', lb=1e-20, ub=1e20, par0=1.0e-6, unit='μmol/dmᵈ')
@@ -368,6 +374,7 @@ def _exp(t,decay):
return np.exp(-decay*np.abs(t))
# Create model
bg_exp = Model(_exp,constants='t')
+bg_exp.name = 'bg_exp'
bg_exp.description = 'Exponential background model'
# Add parameters
bg_exp.decay.set(description='Decay rate', lb=0, ub=200, par0=0.35, unit='μs⁻¹')
@@ -393,6 +400,7 @@ def _strexp(t,decay,stretch):
return np.exp(-decay*abs(t)**stretch)
# Create model
bg_strexp = Model(_strexp,constants='t')
+bg_strexp.name = 'bg_strexp'
bg_strexp.description = 'Stretched exponential background model'
# Add parameters
bg_strexp.decay.set(description='Decay rate', lb=0, ub=200, par0=0.25, unit='μs⁻¹')
@@ -416,6 +424,7 @@ def _prodstrexp(t,decay1,stretch1,decay2,stretch2):
return strexp1*strexp2
# Create model
bg_prodstrexp = Model(_prodstrexp,constants='t')
+bg_prodstrexp.name = 'bg_prodstrexp'
bg_prodstrexp.description = 'Product of two stretched exponentials background model'
# Add parameters
bg_prodstrexp.decay1.set(description='Decay rate of 1st component', lb=0, ub=200, par0=0.25, unit='μs⁻¹')
@@ -441,6 +450,7 @@ def _sumstrexp(t,decay1,stretch1,weight1,decay2,stretch2):
return weight1*strexp1 + (1-weight1)*strexp2
# Create model
bg_sumstrexp = Model(_sumstrexp,constants='t')
+bg_sumstrexp.name = 'bg_sumstrexp'
bg_sumstrexp.description = 'Sum of two stretched exponentials background model'
# Add parameters
bg_sumstrexp.decay1.set(description='Decay rate of 1st component', lb=0, ub=200, par0=0.25, unit='μs⁻¹')
@@ -464,6 +474,7 @@ def _poly1(t,p0,p1):
return np.polyval([p1,p0],abs(t))
# Create model
bg_poly1 = Model(_poly1,constants='t')
+bg_poly1.name = 'bg_poly1'
bg_poly1.description = 'Polynomial 1st-order background model'
# Add parameters
bg_poly1.p0.set(description='Intercept', lb=0, ub=200, par0=1, unit='')
@@ -485,6 +496,7 @@ def _poly2(t,p0,p1,p2):
return np.polyval([p2,p1,p0],abs(t))
# Create model
bg_poly2 = Model(_poly2,constants='t')
+bg_poly2.name = 'bg_poly2'
bg_poly2.description = 'Polynomial 2nd-order background model'
# Add parameters
bg_poly2.p0.set(description='Intercept', lb=0, ub=200, par0=1, unit='')
@@ -506,6 +518,7 @@ def _poly3(t,p0,p1,p2,p3):
return np.polyval([p3,p2,p1,p0],abs(t))
# Create model
bg_poly3 = Model(_poly3,constants='t')
+bg_poly3.name = 'bg_poly3'
bg_poly3.description = 'Polynomial 3rd-order background model'
# Add parameters
bg_poly3.p0.set(description='Intercept', lb=0, ub=200, par0=1, unit='')
diff --git a/deerlab/classes.py b/deerlab/classes.py
index 16f0579e9..4dd1e3be1 100644
--- a/deerlab/classes.py
+++ b/deerlab/classes.py
@@ -3,7 +3,7 @@
import numpy as np
from deerlab.utils import Jacobian, nearest_psd
-from scipy.stats import norm
+from scipy.stats import norm, chi2
from scipy.signal import fftconvolve
from scipy.linalg import block_diag
from scipy.optimize import brentq
@@ -51,14 +51,21 @@ class UQResult:
profile : ndarray
Likelihood profile of the parameters. Only available for ``type='profile'``.
- threshold : scalar
- Treshold value used for the profile method. Only available for ``type='profile'``.
+ threshold : function
+ Treshold function used for the profile method. Only available for ``type='profile'``.
+
+ lb : ndarray
+ Lower bounds of the parameters.
+
+ ub : ndarray
+ Upper bounds of the parameters.
"""
- def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,profiles=None,noiselvl=None):
+ def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,
+ threshold_inputs=None,threshold=None,profiles=None,noiselvl=None):
r"""
Initializes the UQResult object.
@@ -85,9 +92,15 @@ def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,pr
ub : ndarray
Upper bounds of the parameter estimates. Only applicable if ``uqtype='moment'``.
- threshold : float
- Threshold value used for the likelihood profile method. Only applicable if ``uqtype='profile'``.
+ threshold_inputs : dict, optional
+ Dictionary containing the inputs to compute the threshold value for the likelihood profile method. Required for ``uqtype='profile'``, unless ``threshold`` supplied. The dictionary should contain the following keys and values:
+ - 'Npoints': Number of points in the fit
+ - 'cost': The cost of the fit
+
+ threshold : function
+ Function that takes the coverage (confidence level) as input and returns the corresponding threshold value for the likelihood profile method. Only applicable if ``uqtype='profile'``. If not provided, the threshold value is computed based on the inputs provided in ``threshold_inputs``.
+
profiles : list
List of likelihood profiles for each parameter. Each element of the list should be a tuple of arrays
``(x, y)``, where ``x`` represents the values of the parameter and ``y`` represents the corresponding likelihoods.
@@ -111,7 +124,15 @@ def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,pr
self.__parfit = data
self.__noiselvl = noiselvl
self.profile = profiles
- self.threshold = threshold
+ if callable(threshold):
+ self.__threshold_inputs=None
+ self.threshold = threshold
+ elif isinstance(threshold_inputs,dict):
+ self.__threshold_inputs = threshold_inputs
+ self.threshold = lambda coverage: noiselvl**2*chi2.ppf(coverage, df=1)/threshold_inputs['Npoints'] + threshold_inputs['cost']
+ else:
+ raise ValueError('For uqtype ''profile'', either a threshold function or a dictionary with the inputs to compute the threshold must be provided.')
+
nParam = len(np.atleast_1d(data))
elif uqtype == 'bootstrap':
@@ -129,15 +150,19 @@ def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,pr
else:
raise NameError('uqtype not found. Must be: ''moment'', ''bootstrap'' or ''void''.')
+ # Set default bounds if not provided
if lb is None:
lb = np.full(nParam, -np.inf)
if ub is None:
ub = np.full(nParam, np.inf)
+
+ lb = np.array([(-np.inf if v is None else v) for v in lb])
+ ub = np.array([(np.inf if v is None else v) for v in ub])
# Set private variables
- self.__lb = lb
- self.__ub = ub
+ self.lb = lb
+ self.ub = ub
self.nparam = nParam
# Create confidence intervals structure
@@ -219,8 +244,8 @@ def join(self,*args):
# Original metadata
newargs.append(self.mean)
newargs.append(self.covmat)
- newargs.append(self.__lb)
- newargs.append(self.__ub)
+ newargs.append(self.lb)
+ newargs.append(self.ub)
elif self.type=='bootstrap':
newargs.append(self.samples)
@@ -334,8 +359,8 @@ def pardist(self,n=0):
pdf = fftconvolve(pdf, kernel, mode='same')
# Clip the distributions outside the boundaries
- pdf[x < self.__lb[n]] = 0
- pdf[x > self.__ub[n]] = 0
+ pdf[x < self.lb[n]] = 0
+ pdf[x > self.ub[n]] = 0
# Enforce non-negativity (takes care of negative round-off errors)
pdf = np.maximum(pdf,0)
@@ -454,11 +479,11 @@ def ci(self,coverage):
# Compute moment-based confidence intervals
# Clip at specified box boundaries
standardError = norm.ppf(p)*np.sqrt(np.diag(self.covmat))
- confint[:,0] = np.maximum(self.__lb, self.mean.real - standardError)
- confint[:,1] = np.minimum(self.__ub, self.mean.real + standardError)
+ confint[:,0] = np.maximum(self.lb, self.mean.real - standardError)
+ confint[:,1] = np.minimum(self.ub, self.mean.real + standardError)
if iscomplex:
- confint[:,0] = confint[:,0] + 1j*np.maximum(self.__lb, self.mean.imag - standardError)
- confint[:,1] = confint[:,1] + 1j*np.minimum(self.__ub, self.mean.imag + standardError)
+ confint[:,0] = confint[:,0] + 1j*np.maximum(self.lb, self.mean.imag - standardError)
+ confint[:,1] = confint[:,1] + 1j*np.minimum(self.ub, self.mean.imag + standardError)
elif self.type=='bootstrap':
# Compute bootstrap-based confidence intervals
@@ -558,7 +583,7 @@ def propagate(self,model,lb=None,ub=None,samples=None):
model = lambda p: np.concatenate([model_(p).real,model_(p).imag])
# Get jacobian of model to be propagated with respect to parameters
- J = Jacobian(model,parfit,self.__lb,self.__ub)
+ J = Jacobian(model,parfit,self.lb,self.ub)
# Clip at boundaries
modelfit = np.maximum(modelfit,lb)
@@ -600,4 +625,109 @@ def propagate(self,model,lb=None,ub=None,samples=None):
#--------------------------------------------------------------------------------
+ def to_dict(self):
+ """
+ Convert the UQResult object to a dictionary.
+
+ This method converts the UQResult object to a dictionary format, which can be useful for serialization or for easier access to the attributes of the object.
+ The keys of the dictionary correspond to the attributes of the UQResult object, and the values are the corresponding attribute values.
+
+ Returns
+ -------
+ uq_dict : dict
+ A dictionary representation of the UQResult object, containing all its attributes and their corresponding values.
+ """
+ if self.type == 'moment':
+ uq_dict = {
+ 'type': self.type,
+ 'mean': self.mean,
+ 'median': self.median,
+ 'std': self.std,
+ 'covmat': self.covmat,
+ 'nparam': self.nparam,
+ 'lb': self.lb,
+ 'ub': self.ub
+ }
+ elif self.type == 'profile':
+ if self.__threshold_inputs is None:
+ raise ValueError('The threshold function was supplied directly, so the inputs to compute the threshold are not available. The to_dict method cannot be used in this case.')
+ uq_dict = {
+ 'type': self.type,
+ 'mean': self.mean,
+ 'median': self.median,
+ 'std': self.std,
+ 'covmat': self.covmat,
+ 'nparam': self.nparam,
+ 'profile': self.profile,
+ 'threshold_inputs': self.__threshold_inputs,
+ 'data': self.__parfit,
+ 'noiselvl': self.__noiselvl
+ }
+ elif self.type == 'bootstrap':
+ uq_dict = {
+ 'type': self.type,
+ 'mean': self.mean,
+ 'median': self.median,
+ 'std': self.std,
+ 'covmat': self.covmat,
+ 'nparam': self.nparam,
+ 'samples': getattr(self, 'samples', None),
+ 'data': getattr(self, '__parfit', None),
+ 'noiselvl': getattr(self, '__noiselvl', None)
+ }
+ return uq_dict
+
+ @classmethod
+ def from_dict(self, uq_dict):
+ """
+ Create a UQResult object from a dictionary.
+
+ This method creates a UQResult object from a dictionary representation, which can be useful for deserialization or for creating a UQResult object from a set of attributes stored in a dictionary.
+ The keys of the input dictionary should correspond to the attributes of the UQResult object, and the values should be the corresponding attribute values.
+
+ Parameters
+ ----------
+ uq_dict : dict
+ A dictionary containing the attributes and their corresponding values to create a UQResult object.
+
+ Returns
+ -------
+ uq_result : :ref:`UQResult`
+ A UQResult object created from the input dictionary, with its attributes set according to the values in the dictionary.
+ """
+
+ if 'type' not in uq_dict:
+ raise KeyError("The input dictionary must contain the key 'type' to specify the type of UQResult to be created.")
+
+ elif uq_dict['type'] == 'profile':
+ return UQResult(
+ uqtype='profile',
+ data=uq_dict.get('data', None),
+ covmat=None,
+ lb=uq_dict.get('lb', None),
+ ub=uq_dict.get('ub', None),
+ threshold_inputs=uq_dict.get('threshold_inputs'),
+ profiles=uq_dict.get('profile'),
+ noiselvl=uq_dict.get('noiselvl')
+ )
+ elif uq_dict['type'] == 'bootstrap':
+ return UQResult(
+ uqtype=uq_dict.get('type', 'void'),
+ data=uq_dict.get('samples', None),
+ covmat=None,
+ lb=uq_dict.get('lb', None),
+ ub=uq_dict.get('ub', None),
+ )
+ elif uq_dict['type'] == 'moment':
+
+ return UQResult(
+ uqtype=uq_dict.get('type', 'void'),
+ data=uq_dict.get('mean', None) ,
+ covmat=uq_dict.get('covmat', None),
+ lb=uq_dict.get('lb', None),
+ ub=uq_dict.get('ub', None),
+ threshold=uq_dict.get('threshold', None),
+ profiles=uq_dict.get('profile', None)
+ )
+
# =========================================================================
diff --git a/deerlab/dd_models.py b/deerlab/dd_models.py
index 2bca33785..e0668b253 100644
--- a/deerlab/dd_models.py
+++ b/deerlab/dd_models.py
@@ -159,6 +159,7 @@ def _gauss(r,mean,std):
return _multigaussfun(r,mean,std)
# Create model
dd_gauss = Model(_gauss,constants='r')
+dd_gauss.name = 'dd_gauss'
dd_gauss.description = 'Gaussian distribution model'
# Parameters
dd_gauss.mean.set(description='Mean', lb=1.0, ub=20, par0=3.5, unit='nm')
@@ -182,6 +183,7 @@ def _gauss2(r,mean1,std1,mean2,std2):
return _multigaussfun(r,[mean1,mean2],[std1,std2])
# Create model
dd_gauss2 = Model(_gauss2,constants='r')
+dd_gauss2.name = 'dd_gauss2'
dd_gauss2.description = 'Sum of two Gaussian distributions model'
# Parameters
dd_gauss2.mean1.set(description='1st Gaussian mean', lb=1.0, ub=20, par0=2.5, unit='nm')
@@ -209,6 +211,7 @@ def _gauss3(r,mean1,std1,mean2,std2,mean3,std3):
return _multigaussfun(r,[mean1,mean2,mean3],[std1,std2,std3])
# Create model
dd_gauss3 = Model(_gauss3,constants='r')
+dd_gauss3.name = 'dd_gauss3'
dd_gauss3.description = 'Sum of three Gaussian distributions model'
# Parameters
dd_gauss3.mean1.set(description='1st Gaussian mean', lb=1.0, ub=20, par0=2.5, unit='nm')
@@ -233,7 +236,7 @@ def _gauss3(r,mean1,std1,mean2,std2,mean3,std3):
.. raw:: html
-
+
:math:`P(r) = \frac{\beta}{2\sigma\Gamma(1/\beta)}\exp\left(-\left(\frac{|r-\left|}{\sigma}\right)^\beta \right)`
@@ -245,6 +248,7 @@ def _gengauss(r,mean,std,beta):
return _normalize(r,P)
# Create model
dd_gengauss = Model(_gengauss,constants='r')
+dd_gengauss.name = 'dd_gengauss'
dd_gengauss.description = 'Generalized Gaussian distribution model'
# Parameters
dd_gengauss.mean.set(description='Mean', lb=1.0, ub=20, par0=3.5, unit='nm')
@@ -263,7 +267,7 @@ def _gengauss(r,mean,std,beta):
.. raw:: html
-
+
:math:`P(r) = \frac{1}{\sqrt{2\pi}}\exp\left(-\frac{(r-\left)^2}{2\sigma^2}\right)\left(1 + \mathrm{erf}\left(\alpha\frac{(r-\left)}{\sqrt{2}\sigma}\right) \right)`
@@ -276,6 +280,7 @@ def _skewgauss(r,center,std,skew):
return _normalize(r,P)
# Create model
dd_skewgauss = Model(_skewgauss,constants='r')
+dd_skewgauss.name = 'dd_skewgauss'
dd_skewgauss.description = 'Skew Gaussian distribution model'
# Parameters
dd_skewgauss.center.set(description='Center', lb=1.0, ub=20, par0=3.5, unit='nm')
@@ -300,6 +305,7 @@ def _rice(r,location,spread):
return _multirice3dfun(r,[location],[spread])
# Create model
dd_rice = Model(_rice,constants='r')
+dd_rice.name = 'dd_rice'
dd_rice.description = '3D-Rice distribution model'
# Parameters
dd_rice.location.set(description='Location', lb=1.0, ub=20, par0=3.5, unit='nm')
@@ -326,6 +332,7 @@ def _rice2(r,location1,spread1,location2,spread2):
return _multirice3dfun(r,[location1,location2],[spread1,spread2])
# Create model
dd_rice2 = Model(_rice2,constants='r')
+dd_rice2.name = 'dd_rice2'
dd_rice2.description = 'Sum of two 3D-Rice distributions model'
# Parameters
dd_rice2.location1.set(description='1st Rician location', lb=1.0, ub=20, par0=2.5, unit='nm')
@@ -356,6 +363,7 @@ def _rice3(r,location1,spread1,location2,spread2,location3,spread3):
return _multirice3dfun(r,[location1,location2,location3],[spread1,spread2,spread3])
# Create model
dd_rice3 = Model(_rice3,constants='r')
+dd_rice3.name = 'dd_rice3'
dd_rice3.description = 'Sum of two 3D-Rice distributions model'
# Parameters
dd_rice3.location1.set(description='1st Rician location', lb=1.0, ub=20, par0=2.5, unit='nm')
@@ -380,7 +388,7 @@ def _rice3(r,location1,spread1,location2,spread2,location3,spread3):
.. raw:: html
-
+
:math:`P(r) = \frac{3}{(2\pi\nu_0)^{3/2}}4\pi r^2\exp(-\frac{3 r^2}{\nu_0})`
@@ -398,6 +406,7 @@ def _randcoil(r,Nres,scaling,length):
return P
# Create model
dd_randcoil = Model(_randcoil,constants='r')
+dd_randcoil.name = 'dd_randcoil'
dd_randcoil.description = 'Random-coil model for an unfolded peptide/protein'
# Parameters
dd_randcoil.Nres.set(description='Number of residues', lb=2.0, ub=1000, par0=50, unit='')
@@ -428,6 +437,7 @@ def _circle(r,center,radius):
return P
# Create model
dd_circle = Model(_circle,constants='r')
+dd_circle.name = 'dd_circle'
dd_circle.description = 'Semicircle distribution model'
# Parameters
dd_circle.center.set(description='Center', lb=1, ub=20, par0=3, unit='nm')
@@ -456,6 +466,7 @@ def _rcos(r,center,fwhm):
return P
# Create model
dd_cos = Model(_rcos,constants='r')
+dd_cos.name = 'dd_cos'
dd_cos.description = 'Raised-cosine parametric model'
# Parameters
dd_cos.center.set(description='Center', lb=1, ub=20, par0=3, unit='nm')
@@ -503,7 +514,7 @@ def _pbs(r,R1,R2):
.. raw:: html
-
+
:math:`P(r) = \left(R_2^6 P_\mathrm{B}(r|R_2) - R_1^6 P_\mathrm{B}(r|R_1) - 2(r_2^3 - r_1^3)P_\mathrm{BS}(r|R_1,R_2)\right)/(R_2^3 - R_1^3)^2`
@@ -538,6 +549,7 @@ def _shell(r,radius,thickness):
return P
# Create model
dd_shell = Model(_shell,constants='r')
+dd_shell.name = 'dd_shell'
dd_shell.description = 'Uniform distribution of particles on a spherical shell'
# Parameters
dd_shell.radius.set(description='Inner shell radius', lb=0.1, ub=20, par0=1.5, unit='nm')
@@ -556,7 +568,7 @@ def _shell(r,radius,thickness):
.. raw:: html
-
+
:math:`P(r) = \begin{cases} \frac{3r(R^2-(d-r)^2)}{4dR^3} \quad \text{for} \quad d-R \leq r < d+R \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}`
@@ -578,6 +590,7 @@ def _spherepoint(r,radius,dist):
return P
# Create model
dd_spherepoint = Model(_spherepoint,constants='r')
+dd_spherepoint.name = 'dd_spherepoint'
dd_spherepoint.description = 'One particle distanced from particles uniformly distributed on a sphere'
# Parameters
dd_spherepoint.radius.set(description='Sphere radius', lb=0.1, ub=20, par0=1.5, unit='nm')
@@ -596,7 +609,7 @@ def _spherepoint(r,radius,dist):
.. raw:: html
-
+
:math:`P(r) = \begin{cases} \frac{r}{2R^2} \quad \text{for} \quad 0 \leq r < 2R \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}`
@@ -617,6 +630,7 @@ def _spheresurf(r,radius):
return P
# Create model
dd_spheresurf = Model(_spheresurf,constants='r')
+dd_spheresurf.name = 'dd_spheresurf'
dd_spheresurf.description = "Particles uniformly distributed on a sphere's surface."
# Parameters
dd_spheresurf.radius.set(description='Sphere radius', lb=0.1, ub=20, par0=2.5, unit='nm')
@@ -633,7 +647,7 @@ def _spheresurf(r,radius):
.. raw:: html
-
+
:math:`P(r) = (R_1^3(R_2^3 - R_1^3)P_\mathrm{BS}(r|R_1,R_2) - R_1^3(R_3^3 - R_1^3)P_\mathrm{BS}(r|R_1,R_3) - R_2^3(R_3^3 - R_2^3)P_\mathrm{BS}(r|R_2,R_3))/((R_3^3 - R_2^3)(R_2^3 - R_1^3))`
@@ -675,6 +689,7 @@ def _shellshell(r,radius,thickness1,thickness2):
return P
# Create model
dd_shellshell = Model(_shellshell,constants='r')
+dd_shellshell.name = 'dd_shellshell'
dd_shellshell.description = 'Particles uniformly distributed on a spherical shell and on another concentric spherical shell.'
# Parameters
dd_shellshell.radius.set(description='Inner shell radius', lb=0.1, ub=20, par0=1.5, unit='nm')
@@ -692,7 +707,7 @@ def _shellshell(r,radius,thickness1,thickness2):
.. raw:: html
-
+
:math:`P(r) = \frac{3}{16R_1^3(R_2^3 - R_1^3)}\begin{cases} 12r^3R_1^2 - r^5 \quad \text{for} \quad 0\leq r < \min(2R_1,R_2 - R_1) \\ 8r^2(R_2^3 - R_1^3) - 3r(R_2^2 - R_1^2)^2 - 6r^3(R_2 - R_1)(R_2 + R_1) \quad \text{for} \quad R_2-R_1 \leq r < 2R_1 \\ 16r^2R_1^3 \quad \text{for} \quad 2R_1\leq r < R_2 - R_1 \\ r^5 - 6r^3(R_2^2 + R_1^2) + 8r^2(R_2^3 + R_1^3) - 3r(R_2^2 - R1_2)^2 \quad \text{for} \quad \max(R_2-R_1,2R_1) \leq r < R_1+R_2 \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}`
@@ -719,6 +734,7 @@ def _shellsphere(r,radius,thickness):
return P
# Create model
dd_shellsphere = Model(_shellsphere,constants='r')
+dd_shellsphere.name = 'dd_shellsphere'
dd_shellsphere.description = 'Particles uniformly distributed on a sphere and on an outer spherical shell.'
# Parameters
dd_shellsphere.radius.set(description='Inner shell radius', lb=0.1, ub=20, par0=1.5, unit='nm')
@@ -736,7 +752,7 @@ def _shellsphere(r,radius,thickness):
.. raw:: html
-
+
:math:`P(r) = \left(R_1^3((R_3^3 - R_1^3)P_\mathrm{BS}(r|R_1,R_3) - (R_4^3 - R_1^3)P_\mathrm{BS}(r|R_1,R_4)) + R_2^3((R_4^3 - R_2^3)P_\mathrm{BS}(r|R_2,R_4) - (R_3^3 - R_2^3)P_\mathrm{BS}(r|R_2,R_3)) \right)/((R_4^3 - R_3^3)(R_2^3 - R_1^3))`
@@ -787,6 +803,7 @@ def _shellvoidshell(r,radius,thickness1,thickness2,separation):
return P
# Create model
dd_shellvoidshell = Model(_shellvoidshell,constants='r')
+dd_shellvoidshell.name = 'dd_shellvoidshell'
dd_shellvoidshell.description = 'Particles uniformly distributed on a spherical shell and on another concentric spherical shell separated by a void.'
# Parameters
dd_shellvoidshell.radius.set(description='Inner shell radius', lb=0.1, ub=20, par0=0.75, unit='nm')
@@ -807,7 +824,7 @@ def _shellvoidshell(r,radius,thickness1,thickness2,separation):
.. raw:: html
-
+
@@ -852,6 +869,7 @@ def _shellvoidsphere(r,radius,thickness,separation):
return P
# Create model
dd_shellvoidsphere = Model(_shellvoidsphere,constants='r')
+dd_shellvoidsphere.name = 'dd_shellvoidsphere'
dd_shellvoidsphere.description = 'Particles uniformly distributed on a sphere and on a concentric outer spherical shell separated by a void.'
# Parameters
dd_shellvoidsphere.radius.set(description='Sphere radius', lb=0.1, ub=20, par0=1.5, unit='nm')
@@ -872,7 +890,7 @@ def _shellvoidsphere(r,radius,thickness,separation):
.. raw:: html
-
+
:math:`P(r) = \begin{cases} \frac{3r^5}{16R^6} - \frac{9r^3}{4R^4} + \frac{3r^2}{R^3} \quad \text{for} \quad 0 \leq r < 2R \\ 0 \quad \text{for} \quad \text{otherwise} \end{cases}`
@@ -890,6 +908,7 @@ def _sphere(r,radius):
return P
# Create model
dd_sphere = Model(_sphere,constants='r')
+dd_sphere.name = 'dd_sphere'
dd_sphere.description = 'Particles uniformly distributed on a sphere.'
# Parameters
dd_sphere.radius.set(description='Sphere radius', lb=0.1, ub=20, par0=2.5, unit='nm')
@@ -925,6 +944,7 @@ def _triangle(r,mode,left,right):
return P
# Create model
dd_triangle = Model(_triangle,constants='r')
+dd_triangle.name = 'dd_triangle'
dd_triangle.description = 'Triangular distribution model.'
# Parameters
dd_triangle.mode.set(description='Mode', lb=1, ub=20, par0=3.5, unit='nm')
@@ -953,6 +973,7 @@ def _uniform(r,left,right):
return P
# Create model
dd_uniform = Model(_uniform,constants='r')
+dd_uniform.name = 'dd_uniform'
dd_uniform.description = 'Uniform distribution model.'
# Parameters
dd_uniform.left.set(description='Left edge', lb=0.1, ub=6, par0=2.5, unit='nm')
@@ -1000,6 +1021,7 @@ def _wormchain(r,contour,persistence):
return P
# Create model
dd_wormchain = Model(_wormchain,constants='r')
+dd_wormchain.name = 'dd_wormchain'
dd_wormchain.description = 'Worm-like chain model near the rigid limit.'
# Parameters
dd_wormchain.contour.set(description='Contour length', lb=1.5, ub=10, par0=3.7, unit='nm')
@@ -1036,6 +1058,7 @@ def _wormgauss(r,contour,persistence,std):
return P
# Create model
dd_wormgauss = Model(_wormgauss,constants='r')
+dd_wormgauss.name = 'dd_wormgauss'
dd_wormgauss.description = 'Worm-like chain model near the rigid limit with Gaussian convolution.'
# Parameters
dd_wormgauss.contour.set(description='Contour length', lb=1.5, ub=10, par0=3.7, unit='nm')
diff --git a/deerlab/fitresult.py b/deerlab/fitresult.py
index 27d33aa93..3e301822c 100644
--- a/deerlab/fitresult.py
+++ b/deerlab/fitresult.py
@@ -3,6 +3,8 @@
import inspect
import matplotlib.pyplot as plt
import difflib
+from deerlab.classes import UQResult
+from deerlab.model import Model
@@ -16,7 +18,7 @@ class FitResult(dict):
----------
model : ndarray
The fitted model response.
- modelUncert :
+ modelUncert : :ref:`UQResult`
Uncertainty quantification of the fitted model response.
param : ndarray
Fitted parameter vector ordered according to the model parameter indices.
@@ -397,4 +399,89 @@ def plot(self,axis=None,xlabel=None,gof=False,fontsize=13):
label.set_fontsize(fontsize)
return fig
+
+ def to_dict(self):
+ """
+ Converts the FitResult object to a dictionary.
+ This is used internally when saving the results to a file, but it can also be used by the user to convert the results to a dictionary format.
+
+ Returns
+ -------
+ fit_dict : dict
+ Dictionary containing the results of the fit. The keys of the dictionary are the same as the attributes of the FitResult object.
+ """
+ def _prepare_value(obj):
+ if isinstance(obj, UQResult):
+ d = obj.to_dict()
+ d['__type__'] = 'UQResult'
+ return d
+ elif isinstance(obj, list):
+ return [_prepare_value(item) for item in obj]
+ elif isinstance(obj, dict):
+ return {str(k): _prepare_value(v) for k, v in obj.items()}
+ return obj
+
+ output_dict = {}
+
+ for key in self.keys():
+ obj = self[key]
+ if isinstance(obj, (int, float, np.ndarray, list, str)):
+ output_dict[str(key)] = _prepare_value(obj)
+ elif isinstance(obj, dict):
+ output_dict[str(key)] = _prepare_value(obj)
+ elif isinstance(obj, FitResult):
+ print('Warning: Skipping fitresult object for key:', key)
+ elif isinstance(obj, Model):
+ print('Warning: Skipping model object for key:', key)
+ elif isinstance(obj, UQResult):
+ output_dict[str(key)] = _prepare_value(obj)
+ elif obj is None:
+ output_dict[str(key)] = None
+ elif callable(obj):
+ continue
+ else:
+ print(f"Skipping key '{key}' of type {type(obj)}")
+
+ # conver paramlist to list of str
+ if 'paramlist' in output_dict:
+ output_dict['paramlist'] = [str(item) for item in output_dict['paramlist']]
+
+ # conert _param_idx to list of list of int
+ if '_param_idx' in output_dict:
+ output_dict['_param_idx'] = [list(item) for item in output_dict['_param_idx']]
+
+ return output_dict
+
+ @classmethod
+ def from_dict(cls, fit_dict):
+ """
+ Creates a FitResult object from a dictionary. This is used internally when loading the results from a file, but it can also be used by the user to create a FitResult object from a dictionary format.
+
+ Parameters
+ ----------
+ fit_dict : dict
+ Dictionary containing the results of the fit. The keys of the dictionary are the same as the attributes of the FitResult object.
+
+ Returns
+ -------
+ fit_result : FitResult
+ FitResult object created from the input dictionary.
+
+ """
+
+ def _resolve_deerlab_types(obj):
+ if isinstance(obj, dict):
+ if obj.get('__type__') == 'UQResult':
+ resolved = {k: _resolve_deerlab_types(v) for k, v in obj.items() if k != '__type__'}
+ return UQResult.from_dict(resolved)
+ return {k: _resolve_deerlab_types(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [_resolve_deerlab_types(item) for item in obj]
+ return obj
+
+
+ fit_result = cls()
+ fit_result.update(_resolve_deerlab_types(fit_dict))
+
+ return fit_result
# ===========================================================================================
diff --git a/deerlab/io.py b/deerlab/io.py
new file mode 100644
index 000000000..056f15a95
--- /dev/null
+++ b/deerlab/io.py
@@ -0,0 +1,411 @@
+import h5py
+from deerlab import FitResult, UQResult, Model, __VERSION__
+import os
+import pathlib
+import numpy as np
+import json
+import tomllib
+import re
+
+supported_formats = ['hdf5', 'json', 'toml']
+supported_types = ['FitResult', 'UQResult', ]
+
+
+#=======================================================================================
+# Serialization helpers (JSON / TOML)
+#=======================================================================================
+
+def _to_serializable(obj):
+ """Recursively convert Python/numpy objects to JSON/TOML-safe primitives."""
+ if isinstance(obj, np.ndarray):
+ if np.issubdtype(obj.dtype, np.complexfloating):
+ return {'__ndarray__': True, 'real': obj.real.tolist(), 'imag': obj.imag.tolist(), 'dtype': str(obj.dtype)}
+ return {'__ndarray__': True, 'data': obj.tolist(), 'dtype': str(obj.dtype)}
+ elif isinstance(obj, complex):
+ return {'__complex__': True, 'real': obj.real, 'imag': obj.imag}
+ elif isinstance(obj, np.integer):
+ return int(obj)
+ elif isinstance(obj, np.floating):
+ return float(obj)
+ elif isinstance(obj, np.bool_):
+ return bool(obj)
+ elif isinstance(obj, dict):
+ return {str(k): _to_serializable(v) for k, v in obj.items()}
+ elif isinstance(obj, (list, tuple)):
+ return [_to_serializable(i) for i in obj]
+ elif obj is None:
+ return {'__none__': True}
+ return obj
+
+
+def _from_serializable(obj):
+ """Recursively restore numpy objects from serialized form."""
+ if isinstance(obj, dict):
+ if obj.get('__ndarray__') is True:
+ if 'imag' in obj:
+ return np.array(obj['real'], dtype=float) + 1j * np.array(obj['imag'], dtype=float)
+ return np.array(obj['data'], dtype=obj.get('dtype', 'float64'))
+ elif obj.get('__complex__') is True:
+ return complex(obj['real'], obj['imag'])
+ elif obj.get('__none__') is True:
+ return None
+ return {k: _from_serializable(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [_from_serializable(i) for i in obj]
+ return obj
+
+
+#=======================================================================================
+# HDF5
+#=======================================================================================
+
+def _create_h5_element(group: h5py.Group, key, value):
+ if isinstance(value, (Model, FitResult, UQResult)):
+ return
+ elif value is None:
+ group.create_dataset(key, data='None')
+ elif isinstance(value, list) and len(value) > 0 and isinstance(value[0], list):
+ subgroup = group.create_group(key)
+ subgroup.attrs['__type__'] = 'list'
+ for i, sublist in enumerate(value):
+ if isinstance(sublist, list):
+ subgroup2 = subgroup.create_group(f'{key}_{i}')
+ subgroup2.attrs['__type__'] = 'list'
+ for j, item in enumerate(sublist):
+ _create_h5_element(subgroup2, f'{key}_{i}_{j}', item)
+ else:
+ subgroup.create_dataset(f'{key}_{i}', data=sublist)
+ elif isinstance(value, dict):
+ subgroup = group.create_group(key)
+ for subkey, subvalue in value.items():
+ _create_h5_element(subgroup, subkey, subvalue)
+ else:
+ group.create_dataset(key, data=value)
+
+
+def _read_hdf5(filename):
+ def _decode_h5_value(x):
+ if isinstance(x, (bytes, np.bytes_)):
+ s = x.decode('utf-8')
+ return None if s == 'None' else s
+ if isinstance(x, np.ndarray):
+ if x.dtype.kind == 'S':
+ return np.char.decode(x, 'utf-8')
+ if x.dtype.kind == 'O':
+ return np.array(
+ [i.decode('utf-8') if isinstance(i, (bytes, np.bytes_)) else i for i in x],
+ dtype=object,
+ )
+ return x
+
+ def _h5_to_python(item):
+ if isinstance(item, h5py.Group):
+ if item.attrs.get('__type__') == 'list':
+ return [_h5_to_python(item[k]) for k in item.keys()]
+ return {subkey: _h5_to_python(item[subkey]) for subkey in item.keys()}
+ else:
+ return _decode_h5_value(item[()])
+
+ with h5py.File(filename, 'r') as file:
+ if file.attrs.get('format') != 'deerlab':
+ raise ValueError(f"Unsupported format '{file.attrs.get('format')}' in file. Only 'deerlab' format is supported.")
+ object_class = file.attrs['object_class']
+ if object_class not in supported_types:
+ raise ValueError(f"Unsupported object type '{object_class}' in file. Supported object types are: {supported_types}")
+
+ raw_dict = {key: _h5_to_python(file[key]) for key in file.keys()}
+ raw_dict['object_class'] = object_class
+
+ return raw_dict
+
+
+#=======================================================================================
+# JSON
+#=======================================================================================
+
+class _NumpyEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, np.ndarray):
+ if np.issubdtype(obj.dtype, np.complexfloating):
+ return {'__ndarray__': True, 'real': obj.real.tolist(), 'imag': obj.imag.tolist(), 'dtype': str(obj.dtype)}
+ return {'__ndarray__': True, 'data': obj.tolist(), 'dtype': str(obj.dtype)}
+ elif isinstance(obj, complex):
+ return {'__complex__': True, 'real': obj.real, 'imag': obj.imag}
+ elif isinstance(obj, np.integer):
+ return int(obj)
+ elif isinstance(obj, np.floating):
+ return float(obj)
+ elif isinstance(obj, np.bool_):
+ return bool(obj)
+ return super().default(obj)
+
+
+def _json_object_hook(d):
+ if d.get('__ndarray__') is True:
+ if 'imag' in d:
+ return np.array(d['real'], dtype=float) + 1j * np.array(d['imag'], dtype=float)
+ return np.array(d['data'], dtype=d.get('dtype', 'float64'))
+ elif d.get('__complex__') is True:
+ return complex(d['real'], d['imag'])
+ elif d.get('__none__') is True:
+ return None
+ return d
+
+
+def _save_json(filename, data_dict, object_class):
+ payload = {'__format__': 'deerlab', '__version__': __VERSION__, '__object_class__': object_class}
+ payload.update(data_dict)
+ if isinstance(filename, (str, os.PathLike)):
+ with open(filename, 'w') as f:
+ json.dump(payload, f, cls=_NumpyEncoder, indent=2)
+ else:
+ json.dump(payload, filename, cls=_NumpyEncoder, indent=2)
+
+
+def _load_json(filename):
+ if isinstance(filename, (str, os.PathLike)):
+ with open(filename, 'r') as f:
+ payload = json.load(f, object_hook=_json_object_hook)
+ else:
+ payload = json.load(filename, object_hook=_json_object_hook)
+ if payload.get('__format__') != 'deerlab':
+ raise ValueError("File does not appear to be a deerlab JSON file.")
+ object_class = payload.pop('__object_class__')
+ payload.pop('__format__', None)
+ payload.pop('__version__', None)
+ if object_class not in supported_types:
+ raise ValueError(f"Unsupported object type '{object_class}' in file.")
+ payload['object_class'] = object_class
+ return payload
+
+
+#=======================================================================================
+# TOML
+#=======================================================================================
+
+def _toml_key(key):
+ """Return a valid TOML key, quoting if necessary."""
+ if re.match(r'^[A-Za-z0-9_-]+$', key):
+ return key
+ escaped = key.replace('\\', '\\\\').replace('"', '\\"')
+ return f'"{escaped}"'
+
+
+def _toml_value(val):
+ """Serialize a value to an inline TOML value string."""
+ if isinstance(val, bool):
+ return 'true' if val else 'false'
+ elif isinstance(val, int):
+ return str(val)
+ elif isinstance(val, float):
+ if val != val:
+ return 'nan'
+ elif val == float('inf'):
+ return 'inf'
+ elif val == float('-inf'):
+ return '-inf'
+ return repr(val)
+ elif isinstance(val, str):
+ result = []
+ for ch in val:
+ if ch == '\\':
+ result.append('\\\\')
+ elif ch == '"':
+ result.append('\\"')
+ elif ch == '\n':
+ result.append('\\n')
+ elif ch == '\r':
+ result.append('\\r')
+ elif ch == '\t':
+ result.append('\\t')
+ elif ord(ch) < 0x20 or ord(ch) == 0x7f:
+ result.append(f'\\u{ord(ch):04x}')
+ else:
+ result.append(ch)
+ return '"' + ''.join(result) + '"'
+ elif isinstance(val, (list, tuple)):
+ return '[' + ', '.join(_toml_value(i) for i in val) + ']'
+ elif isinstance(val, dict):
+ items = ', '.join(f'{_toml_key(k)} = {_toml_value(v)}' for k, v in val.items())
+ return '{' + items + '}'
+ else:
+ raise TypeError(f"Cannot serialize type {type(val).__name__} to TOML")
+
+
+def _dict_to_toml(d):
+ """Serialize a dict to a TOML string. Top-level dicts become [sections]."""
+ scalars = []
+ sections = {}
+ for key, val in d.items():
+ if isinstance(val, dict):
+ sections[key] = val
+ else:
+ scalars.append(f'{_toml_key(key)} = {_toml_value(val)}')
+
+ lines = scalars[:]
+ for key, val in sections.items():
+ lines.append(f'\n[{_toml_key(key)}]')
+ for subkey, subval in val.items():
+ lines.append(f'{_toml_key(subkey)} = {_toml_value(subval)}')
+
+ return '\n'.join(lines) + '\n'
+
+
+def _save_toml(filename, data_dict, object_class):
+ meta = {'__format__': 'deerlab', '__version__': __VERSION__, '__object_class__': object_class}
+ serializable = _to_serializable(data_dict)
+ payload = {**meta, **serializable}
+ content = _dict_to_toml(payload)
+ if isinstance(filename, (str, os.PathLike)):
+ with open(filename, 'w', encoding='utf-8') as f:
+ f.write(content)
+ else:
+ filename.write(content.encode('utf-8'))
+
+
+def _load_toml(filename):
+ if isinstance(filename, (str, os.PathLike)):
+ with open(filename, 'rb') as f:
+ payload = tomllib.load(f)
+ else:
+ content = filename.read()
+ if isinstance(content, str):
+ content = content.encode('utf-8')
+ payload = tomllib.loads(content.decode('utf-8'))
+ if payload.get('__format__') != 'deerlab':
+ raise ValueError("File does not appear to be a deerlab TOML file.")
+ object_class = payload.pop('__object_class__')
+ payload.pop('__format__', None)
+ payload.pop('__version__', None)
+ if object_class not in supported_types:
+ raise ValueError(f"Unsupported object type '{object_class}' in file.")
+ result = _from_serializable(payload)
+ result['object_class'] = object_class
+ return result
+
+
+#=======================================================================================
+# Saving
+#=======================================================================================
+
+def save(filename, object, format=None):
+ """
+ Saves a deerlab object (FitResult, UQresult, Model) to file.
+
+ Parameters
+ ----------
+ filename : str, bytes, os.PathLike, or file-like object
+ The name of the file to which the data is saved, or a path-like object.
+ If the file already exists, it will be overwritten. If the file does not exist, a new file will be created.
+ A file-like buffer (e.g. BytesIO) can be used for HDF5 and TOML; a text buffer for JSON.
+ object : dl.FitResult, dl.UQResult, dl.Model
+ The deerlab object to be saved.
+ format : str, optional
+ The format in which to save the data. One of 'hdf5', 'json', 'toml'.
+ If not provided, the format is inferred from the file extension.
+ """
+ if format is None and isinstance(filename, (str, os.PathLike)):
+ ext = pathlib.Path(filename).suffix.lower()
+ if ext in ['.hdf5', '.h5']:
+ format = 'hdf5'
+ elif ext == '.json':
+ format = 'json'
+ elif ext == '.toml':
+ format = 'toml'
+
+ if format not in supported_formats:
+ raise ValueError(f"Unsupported format '{format}'. Supported formats are: {supported_formats}")
+
+ object_class = type(object).__name__
+ if object_class not in supported_types:
+ raise ValueError(f"Unsupported object type '{object_class}'. Supported object types are: {supported_types}")
+
+ if format in ('hdf5', 'h5'):
+ with h5py.File(filename, 'w') as file:
+ file.attrs['format'] = 'deerlab'
+ file.attrs['version'] = __VERSION__
+ file.attrs['object_class'] = object_class
+ for key, value in object.to_dict().items():
+ _create_h5_element(file, key, value)
+ elif format == 'json':
+ _save_json(filename, object.to_dict(), object_class)
+ elif format == 'toml':
+ _save_toml(filename, object.to_dict(), object_class)
+
+
+def json_dumps(object):
+ """Convert a deerlab object to a JSON string."""
+ if not isinstance(object, (FitResult, UQResult, Model)):
+ raise ValueError("Only FitResult, UQResult, and Model objects can be serialized to JSON.")
+ return json.dumps({'__format__': 'deerlab', '__version__': __VERSION__, '__object_class__': type(object).__name__, **object.to_dict()}, cls=_NumpyEncoder, indent=2)
+
+def json_loads(json_string):
+ """Convert a JSON string back to a deerlab object."""
+ payload = json.loads(json_string, object_hook=_json_object_hook)
+ if payload.get('__format__') != 'deerlab':
+ raise ValueError("String does not appear to be a deerlab JSON string.")
+ object_class = payload.pop('__object_class__')
+ payload.pop('__format__', None)
+ payload.pop('__version__', None)
+ if object_class == 'FitResult':
+ return FitResult.from_dict(payload)
+ elif object_class == 'UQResult':
+ return UQResult.from_dict(payload)
+ elif object_class == 'Model':
+ return Model.from_dict(payload)
+ else:
+ raise ValueError(f"Unsupported object type '{object_class}' in JSON string.")
+#=======================================================================================
+# Loading
+#=======================================================================================
+
+def load(filename, format=None):
+ """
+ Loads a deerlab object (FitResult, UQresult, Model) from file.
+
+ Parameters
+ ----------
+ filename : str, bytes, os.PathLike, or file-like object
+ The name of the file from which the data is loaded, or a path-like object. The file must exist.
+ format : str, optional
+ The format of the file. One of 'hdf5', 'json', 'toml'.
+ If not provided, the format is inferred from the file extension.
+
+ Returns
+ -------
+ object : dl.FitResult, dl.UQResult, dl.Model
+ The deerlab object that was loaded from file.
+ """
+ if format is None and isinstance(filename, (str, os.PathLike)):
+ ext = pathlib.Path(filename).suffix.lower()
+ if ext in ['.hdf5', '.h5']:
+ file_format = 'hdf5'
+ elif ext == '.json':
+ file_format = 'json'
+ elif ext == '.toml':
+ file_format = 'toml'
+ else:
+ raise ValueError("Could not identify the file format. Please specify the format explicitly.")
+ elif format is not None:
+ file_format = format.lower()
+ if file_format not in supported_formats:
+ raise ValueError(f"Unsupported format '{file_format}'. Supported formats are: {supported_formats}")
+ else:
+ raise ValueError("Could not identify the file format. Please specify the format explicitly.")
+
+ if file_format in ('hdf5', 'h5'):
+ dict_output = _read_hdf5(filename)
+ elif file_format == 'json':
+ dict_output = _load_json(filename)
+ elif file_format == 'toml':
+ dict_output = _load_toml(filename)
+
+ if dict_output['object_class'] == 'FitResult':
+ dict_output.pop('object_class')
+ return FitResult.from_dict(dict_output)
+ elif dict_output['object_class'] == 'UQResult':
+ dict_output.pop('object_class')
+ return UQResult.from_dict(dict_output)
+ elif dict_output['object_class'] == 'Model':
+ dict_output.pop('object_class')
+ return Model.from_dict(dict_output)
\ No newline at end of file
diff --git a/deerlab/model.py b/deerlab/model.py
index 7538009ec..b92b247f3 100644
--- a/deerlab/model.py
+++ b/deerlab/model.py
@@ -224,6 +224,42 @@ def copy(self):
"""
return deepcopy(self)
+
+ #---------------------------------------------------------------------------------------
+
+ def todict(self):
+ """
+ Return a dictionary with the parameter attributes
+ """
+ return {
+ 'name': self.name,
+ 'description': self.description,
+ 'unit': self.unit,
+ 'par0': self.par0,
+ 'lb': self.lb,
+ 'ub': self.ub,
+ 'frozen': self.frozen,
+ 'value': self.value,
+ 'linear': self.linear
+ }
+ #---------------------------------------------------------------------------------------
+
+ @classmethod
+ def fromdict(cls, param_dict):
+ """
+ Create a Parameter object from a dictionary of attributes
+
+ Parameters
+ ----------
+ param_dict : dict
+ Dictionary containing the parameter attributes. The keys of the dictionary must be the same as the attributes of the Parameter class.
+
+ Returns
+ -------
+ parameter : Parameter object
+ Parameter object created from the input dictionary.
+ """
+ return cls(**param_dict)
#===================================================================================
@@ -243,6 +279,8 @@ class Model():
: :ref:`Parameter`
Model parameter. One :ref:`Parameter` instance is assigned for each
parameter (with name ````) in the model.
+ name : string
+ Name of the model, useful for rebuilding the model from a dictionary.
description : string
Description of the model.
signature : string
@@ -343,6 +381,7 @@ def __init__(self,nonlinfcn,constants=None,signature=None):
nonlinfcn = lambda *_: Amatrix
self.nonlinmodel = nonlinfcn
self.description = None
+ self.name = None
self._constantsInfo = []
self.parents = None
@@ -987,6 +1026,70 @@ def copy(self):
return deepcopy(self)
+ #--------------------------------------------------------------------------------
+
+ def to_dict(self):
+ """
+ Convert the model to a dictionary representation.
+
+ Returns
+ -------
+ model_dict : dict
+ Dictionary representation of the model, containing all the information about the model's parameters and their values.
+ """
+ # Get the model's metadata in vector form
+ metadata = self.getmetadata()
+ # Create a dictionary with the model's metadata
+ model_dict = {
+ 'description': self.description,
+ 'signature': self.signature,
+ 'constants': [entry['argkey'] for entry in self._constantsInfo],
+ 'parameters': []
+ }
+ # Add each parameter's information to the dictionary
+ for name, lb, par0, ub, linear, frozen, unit in zip(metadata['names'], metadata['lb'], metadata['par0'], metadata['ub'], metadata['linear'], metadata['frozen'], metadata['units']):
+ param_dict = {
+ 'name': name,
+ 'lb': lb,
+ 'par0': par0,
+ 'ub': ub,
+ 'linear': linear,
+ 'frozen': frozen,
+ 'unit': unit
+ }
+ model_dict['parameters'].append(param_dict)
+ return model_dict
+
+ @classmethod
+ def from_dict(self, model_dict):
+ """
+ Update the model's parameters and their values from a dictionary representation.
+
+ Parameters
+ ----------
+ model_dict : dict
+ Dictionary representation of the model, containing all the information about the model's parameters and their values.
+ """
+ # Update the model's description and signature
+ self.description = model_dict['description']
+ self.signature = model_dict['signature']
+ # Update the model's constants
+ self._constantsInfo = [{'argkey': argkey, 'argidx': idx} for idx, argkey in enumerate(model_dict['constants'])]
+ # Update the model's parameters
+ for param_dict in model_dict['parameters']:
+ name = param_dict['name']
+ lb = param_dict['lb']
+ par0 = param_dict['par0']
+ ub = param_dict['ub']
+ linear = param_dict['linear']
+ frozen = param_dict['frozen']
+ unit = param_dict['unit']
+ if linear:
+ self.addlinear(name=name, lb=lb, ub=ub, par0=par0, unit=unit)
+ else:
+ self.addnonlinear(key=name, lb=lb, ub=ub, par0=par0, unit=unit)
+ if frozen:
+ getattr(self,name).freeze(par0)
#===================================================================================
#==============================================================================
diff --git a/deerlab/profile_analysis.py b/deerlab/profile_analysis.py
index 8de93e2a4..76bc97d0a 100644
--- a/deerlab/profile_analysis.py
+++ b/deerlab/profile_analysis.py
@@ -77,8 +77,7 @@ def profile_analysis(model,y, *args, parameters='all', grids=None, samples=50, n
fitresult = fit(model, y, *args, **kargs)
# Prepare the statistical threshold function
- threshold = lambda coverage: noiselvl**2*chi2.ppf(coverage, df=1)/len(fitresult.residuals) + fitresult.cost
-
+ threshold_inputs = {'Npoints': len(fitresult.residuals), 'cost': fitresult.cost}
if parameters=='all':
parameters = model._parameter_list()
@@ -129,7 +128,7 @@ def profile_sample(value):
profile = _ProgressParallel(n_jobs=cores,total=len(grid),use_tqdm=verbose)(delayed(profile_sample)(value) for value in grid)
profile = {'x':np.squeeze(grid),'y':profile}
- uqresults[parameter] = UQResult('profile', data=getattr(fitresult,parameter), profiles=profile, threshold=threshold, noiselvl=noiselvl)
+ uqresults[parameter] = UQResult('profile', data=getattr(fitresult,parameter), profiles=profile, threshold_inputs=threshold_inputs, noiselvl=noiselvl)
uqresults[parameter].profile = uqresults[parameter].profile[0]
return uqresults
diff --git a/docsrc/source/changelog.rst b/docsrc/source/changelog.rst
index 0655c6ead..08714a20b 100644
--- a/docsrc/source/changelog.rst
+++ b/docsrc/source/changelog.rst
@@ -24,10 +24,6 @@ Release Notes
- |fix| : Something which was not working as expected or leading to errors has been fixed.
- |api| : This will require changes in your scripts or code.
-
-Release ``v1.1.5`` - January 2025
-- |fix|: Moves to numpy 2.0 as a mimum requirement, and removes all `np.trapz` calls to `np.trapezoid`.
-
Release ``v1.1.4`` - September 2024
------------------------------------------
- |enhancement| : Expanded sophgrid to allow for closed phi integral. (:pr:`482`)
diff --git a/docsrc/source/reference.rst b/docsrc/source/reference.rst
index 652289e6a..569552966 100644
--- a/docsrc/source/reference.rst
+++ b/docsrc/source/reference.rst
@@ -47,6 +47,9 @@ Reference Index
fnnls
cvxnnls
goodness_of_fit
+ save
+ load
+
.. rubric:: Dipolar EPR functions
@@ -72,8 +75,6 @@ Reference Index
:template: custom_function_template.rst
:nosignatures:
- store_pickle
- read_pickle
sophegrid
choleskycovmat
hccm
@@ -83,4 +84,8 @@ Reference Index
ovl
der_snr
formatted_table
+ store_pickle
+ read_pickle
+ dump_jsons
+ load_jsons
show_config
diff --git a/examples/basic/ex_saving_loading.py b/examples/basic/ex_saving_loading.py
new file mode 100644
index 000000000..f756ed549
--- /dev/null
+++ b/examples/basic/ex_saving_loading.py
@@ -0,0 +1,56 @@
+#%% [markdown]
+"""
+Saving and loading DeerLab fit results
+-------------------------------------------------------------------------
+
+DeerLab FitResult objects can be saved and loaded to file, with all relevant information (fitted model, uncertainties, regularization parameter, etc.) preserved.
+This is useful for archiving results, sharing them with collaborators, or importing fits from DeerAnalysis 2026 into Python for plotting.
+
+"""
+
+
+# %%
+# First we will fit a basic model to generate a result and then save it to file.
+
+import numpy as np
+import matplotlib.pyplot as plt
+import deerlab as dl
+path = '../data/'
+file = 'example_4pdeer_1.DTA'
+
+tau1 = 0.3; tau2 = 4.0; tmin = 0.1;
+
+t,Vexp = dl.deerload(path + file)
+
+# Pre-processing
+Vexp = dl.correctphase(Vexp) # Phase correction
+Vexp = Vexp/np.max(Vexp) # Rescaling (aesthetic)
+t = t - t[0] # Account for zerotime
+t = t + tmin
+# Distance vector
+r = np.arange(2.5,5,0.01) # nm
+
+# Construct the model
+Vmodel = dl.dipolarmodel(t,r, experiment = dl.ex_4pdeer(tau1,tau2, pathways=[1]))
+
+# Fit the model to the data
+results = dl.fit(Vmodel,Vexp)
+
+
+# %%
+# We will then save the results to an HDF5 file.
+# The file can also be saved as a JSON or TOML file, if needed. A HDF5 file is easy to read using MATLAB, Origin or similar software.
+
+dl.save('fit_result.hdf5', results)
+
+#%%
+# The file can then be loaded back into Python, with all information preserved.
+results2 = dl.load('fit_result.hdf5')
+
+# We can check that the loaded results are the same as the original results by comparison
+print('Model result is the same:', np.allclose(results.model, results2.model))
+
+# Built in methods of the FitResult object can be used as normal, e.g. plotting the results
+results2.plot();
+
+# %%
diff --git a/setup.py b/setup.py
index 5d237fc5b..5a81d4407 100644
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,7 @@
'setuptools>=53.0.0',
'numexpr>=2.7.3',
'quadprog>=0.1.11; python_version <= "3.10"',
+ 'h5py>=3.16'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
diff --git a/test/test_bgmodels.py b/test/test_bgmodels.py
index ef6a690fa..5b8821d26 100644
--- a/test/test_bgmodels.py
+++ b/test/test_bgmodels.py
@@ -30,6 +30,7 @@ def assert_bgmodel_behavior(model):
B_ub = model(t, *ub)
# Assert
+ assert model.name is not None
assert all(B_par0 == B_par0_T)
assert all(~np.isnan(B_par0))
assert all(~np.isnan(B_lb))
diff --git a/test/test_io.py b/test/test_io.py
new file mode 100644
index 000000000..de257bb5d
--- /dev/null
+++ b/test/test_io.py
@@ -0,0 +1,157 @@
+import numpy as np
+import pytest
+import os
+import tempfile
+import deerlab as dl
+from deerlab import dd_gauss, bg_hom3d, FitResult
+from deerlab.dipolarmodel import dipolarmodel
+from deerlab.fit import fit
+from deerlab.io import save, load, json_dumps, json_loads
+from matplotlib.figure import Figure
+import h5py
+import io
+
+from test_uqresult import uncertainty_quantification_simulation
+
+# Shared test data
+t = np.linspace(-0.5, 5, 100)
+r = np.linspace(2, 5, 50)
+
+@pytest.fixture(scope='module')
+def fitresult():
+ Bfcn = lambda t, lam: bg_hom3d(t, 50, lam)
+ Pr = dd_gauss(r, 3, 0.2)
+ V = 1e5 * dl.dipolarkernel(t, r, mod=0.3, bg=Bfcn) @ Pr
+ model = dipolarmodel(t, r, dd_gauss, bg_hom3d, npathways=1)
+ return fit(model, V, ftol=1e-4)
+
+# ======================================================================
+def test_fitresult_to_dict(fitresult):
+ "Check that FitResult can be converted to a dictionary"
+
+ d = fitresult.to_dict()
+
+ assert isinstance(d, dict)
+ assert 'model' in d
+# ======================================================================
+
+def test_fitresult_save_filebuffer(fitresult):
+ "Check that FitResult can be saved to an HDF5 fileIO buffer"
+
+
+ buf = io.BytesIO()
+ print(buf.getbuffer().nbytes)
+ save(buf, fitresult, format='hdf5')
+ assert buf.getbuffer().nbytes > 0
+
+
+# ======================================================================
+
+def test_fitresult_save_hdf5(fitresult):
+ "Check that FitResult can be saved to an HDF5 file"
+
+ with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=False) as f:
+ fname = f.name
+ try:
+ save(fname, fitresult)
+ assert os.path.exists(fname)
+ # Check that the file can be opened and has the expected attributes
+ with h5py.File(fname, 'r') as file:
+ assert file.attrs['format'] == 'deerlab'
+ assert file.attrs['version'] == dl.__VERSION__
+ assert file.attrs['object_class'] == 'FitResult'
+ assert 'model' in file.keys()
+ finally:
+ os.remove(fname)
+
+
+# ======================================================================
+def test_fitresult_save_load_hdf5(fitresult):
+ "Check that FitResult can be saved and loaded from an HDF5 file"
+
+ with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=False) as f:
+ fname = f.name
+ try:
+ save(fname, fitresult)
+ loaded = load(fname)
+ assert isinstance(loaded, FitResult)
+ assert isinstance(loaded.modelUncert,dl.UQResult)
+ assert np.allclose(loaded.model, fitresult.model)
+ assert np.allclose(loaded.regparam, fitresult.regparam)
+ assert isinstance(loaded.plot(), Figure)
+
+ finally:
+ os.remove(fname)
+
+# ======================================================================
+def test_fitresult_to_from_json_string(fitresult):
+ "Check that FitResult can be converted to a JSON string"
+
+ json_str = json_dumps(fitresult)
+ assert isinstance(json_str, str)
+ assert len(json_str) > 0
+ fitresult_loaded = json_loads(json_str)
+ assert isinstance(fitresult_loaded, FitResult)
+ assert np.allclose(fitresult_loaded.model, fitresult.model)
+ assert isinstance(fitresult_loaded.modelUncert,dl.UQResult)
+ assert np.allclose(fitresult_loaded.regparam, fitresult.regparam)
+
+# ======================================================================
+def test_fitresult_save_load_json(fitresult):
+ "Check that FitResult can be saved and loaded from a JSON file"
+
+ with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f:
+ fname = f.name
+ try:
+ save(fname, fitresult)
+ loaded = load(fname)
+ assert isinstance(loaded, FitResult)
+ assert np.allclose(loaded.model, fitresult.model)
+ assert isinstance(loaded.modelUncert,dl.UQResult)
+ assert np.allclose(loaded.regparam, fitresult.regparam)
+ assert isinstance(loaded.plot(), Figure)
+
+
+ finally:
+ os.remove(fname)
+
+
+# ======================================================================
+def test_fitresult_save_load_toml(fitresult):
+ "Check that FitResult can be saved and loaded from a TOML file"
+
+ with tempfile.NamedTemporaryFile(suffix='.toml', delete=False) as f:
+ fname = f.name
+ try:
+ save(fname, fitresult)
+ loaded = load(fname)
+ assert isinstance(loaded, FitResult)
+ assert np.allclose(loaded.model, fitresult.model)
+ assert isinstance(loaded.modelUncert,dl.UQResult)
+ assert np.allclose(loaded.regparam, fitresult.regparam)
+ assert isinstance(loaded.plot(), Figure)
+
+ finally:
+ os.remove(fname)
+
+
+# ======================================================================
+
+@pytest.mark.parametrize('method', ['moment', 'bootstrap', 'profile'])
+def test_UQResult_saving(uncertainty_quantification_simulation, method):
+ "Check that UQResult can be saved and loaded from a JSON string"
+
+ # Retrieve the results of the mock simulation
+ uq_objects, references = uncertainty_quantification_simulation
+ uq = uq_objects[method]
+
+ json_str = json_dumps(uq)
+ assert isinstance(json_str, str)
+ assert len(json_str) > 0
+ UQresult_loaded = json_loads(json_str)
+ assert isinstance(UQresult_loaded, dl.UQResult)
+ assert np.allclose(UQresult_loaded.mean, uq.mean)
+ assert UQresult_loaded.type == uq.type
+ assert np.allclose(UQresult_loaded.std, uq.std)
+ assert np.allclose(UQresult_loaded.lb, uq.lb)
+ assert np.allclose(UQresult_loaded.ub, uq.ub)
\ No newline at end of file
diff --git a/test/test_uqresult.py b/test/test_uqresult.py
index 56ba00674..7a612668c 100644
--- a/test/test_uqresult.py
+++ b/test/test_uqresult.py
@@ -41,17 +41,17 @@ def uncertainty_quantification_simulation():
pdf2 = dd_gauss(x,means[1],std[1])
pdf1 /= max(pdf1)
pdf2 /= max(pdf2)
- σ = 0.01
+ σ = 0.01 # Noise Level
obj2likelihood = lambda f: 1/np.sqrt(σ*2*np.pi)*np.exp(-1/2*f/σ**2)
likelihood2obj = lambda L: -2*np.log(L*np.sqrt(σ*2*np.pi))*σ**2
- threshold = lambda coverage: σ**2*chi2.ppf(coverage, df=1) + likelihood2obj(max(pdf1))
+ threshold_inputs = {'Npoints': 1, 'cost': likelihood2obj(max(pdf1))}
profile1 = {'y': likelihood2obj(pdf1), 'x':x}
profile2 = {'y': likelihood2obj(pdf2), 'x':x}
# Construct uncertainty quantification objects
uq_moment = UQResult('moment',data=np.array(means),covmat=covmat)
uq_bootstrap = UQResult('bootstrap',data=np.vstack(samples).T)
- uq_profile = UQResult('profile',data=np.array(means),profiles=[profile1,profile2],threshold=threshold,noiselvl=σ)
+ uq_profile = UQResult('profile',data=np.array(means),profiles=[profile1,profile2],threshold_inputs=threshold_inputs,noiselvl=σ)
mock_objects = {'moment': uq_moment, 'bootstrap': uq_bootstrap, 'profile': uq_profile}
references = {'mean': means, 'std': std, 'median': p50, 'ci':[ci50,ci90,ci95], 'percentile': [p5,p50,p95]}
@@ -97,4 +97,22 @@ def test_uncertainty_quantitification_attributes(uncertainty_quantification_simu
pdfs_ref = [dd_gauss(x,mean,sigma) for x,mean,sigma in zip(xs,references['mean'],references['std'])]
assert ovl(pdfs[0],pdfs_ref[0])>0.99
assert ovl(pdfs[1],pdfs_ref[1])>0.99
+
+@pytest.mark.parametrize('method', ['moment', 'bootstrap', 'profile'])
+def test_to_and_from_dict(uncertainty_quantification_simulation, method):
+ """Test the to_dict and from_dict methods of the UQResult class"""
+
+ # Retrieve the results of the mock simulation
+ uq_objects, _ = uncertainty_quantification_simulation
+
+ uq = uq_objects[method]
+ dct = uq.to_dict()
+ uq_reconstructed = UQResult.from_dict(dct)
+ assert type(uq_reconstructed) == UQResult
+ assert np.allclose(uq.mean,uq_reconstructed.mean,rtol=1e-2)
+ assert np.allclose(uq.std,uq_reconstructed.std,rtol=1e-2)
+ assert np.allclose(uq.median,uq_reconstructed.median,rtol=1e-2)
+ assert np.allclose(uq.ci(95),uq_reconstructed.ci(95),rtol=1e-2)
+ assert np.allclose(uq.ci(90),uq_reconstructed.ci(90),rtol=1e-2)
+ assert np.allclose(uq.ci(50),uq_reconstructed.ci(50),rtol=1e-2)
# =================================================================================================