From 0e516967f8569903e0d1c2bdee8909ff3dbdc2d2 Mon Sep 17 00:00:00 2001 From: Ninjahh83 Date: Thu, 16 Apr 2026 10:26:26 +0200 Subject: [PATCH 1/2] new version of subspace methods --- src/pipt/update_schemes/enrml.py | 46 ++++++- src/pipt/update_schemes/esmda.py | 24 +++- .../update_methods_ns/margIS_update.py | 128 +++++++++++++----- .../update_methods_ns/subspace2_update.py | 56 ++++++++ .../update_methods_ns/subspace_update.py | 29 ++-- 5 files changed, 228 insertions(+), 55 deletions(-) create mode 100644 src/pipt/update_schemes/update_methods_ns/subspace2_update.py diff --git a/src/pipt/update_schemes/enrml.py b/src/pipt/update_schemes/enrml.py index 0f79d588..7b0c5450 100644 --- a/src/pipt/update_schemes/enrml.py +++ b/src/pipt/update_schemes/enrml.py @@ -7,11 +7,15 @@ import pipt.misc_tools.ensemble_tools as entools import pipt.misc_tools.data_tools as dtools + from geostat.decomp import Cholesky from pipt.loop.ensemble import Ensemble from pipt.update_schemes.update_methods_ns.subspace_update import subspace_update +from pipt.update_schemes.update_methods_ns.subspace2_update import subspace2_update from pipt.update_schemes.update_methods_ns.full_update import full_update from pipt.update_schemes.update_methods_ns.approx_update import approx_update +from pipt.update_schemes.update_methods_ns.margIS_update import margIS_update + import sys import pkgutil import inspect @@ -35,11 +39,11 @@ # import standard libraries # Check and import (if present) from other namespace packages -if 'margIS_update' in [el[0] for el in tot_ns_pkg]: # only compare package name - from pipt.update_schemes.update_methods_ns.margIS_update import margIS_update -else: - class margIS_update: - pass +#if 'margIS_update' in [el[0] for el in tot_ns_pkg]: # only compare package name +# from pipt.update_schemes.update_methods_ns.margIS_update import margIS_update +#else: +# class margIS_update: +# pass # Internal imports from pipt.misc_tools.analysis_tools import aug_state @@ -168,9 +172,26 @@ def calc_analysis(self): # Update the state ensemble and weights if hasattr(self, 'step'): self.enX_temp = self.enX + self.step + # This is the vector update following e.g. Evensen et al 2019 update for subspace if hasattr(self, 'w_step'): self.W = self.current_W + self.w_step - self.enX_temp = np.dot(self.prior_enX, (np.eye(self.ne) + self.W/np.sqrt(self.ne - 1))) + self.enX_temp = np.dot(self.prior_enX, (np.eye(self.ne) + self.W / np.sqrt(self.ne - 1))) + #This is the matrix update following e.g. Raanes et al 2019 update for subspace + if hasattr(self, 'W_step'): + self.W = self.current_W + self.W_step + X_p = self.prior_enX @ self.proj * np.sqrt(self.ne - 1) + self.enX_temp = np.mean(self.prior_enX, axis=1, keepdims=True) + np.dot(X_p, self.W) + + if hasattr(self, 'sqrt_w_step'): + self.w = self.current_w + self.sqrt_w_step + Us, Ss, VsT = np.linalg.svd(self.S, full_matrices=False) + eps = 1e-8 * Ss[0] # e.g., 1e-8 * largest + s_inv = 1.0 / np.sqrt(np.maximum(Ss, eps)) + S_inv = np.diag(s_inv) + self.W = Us @ S_inv @ Us.T + X_p = self.prior_enX @ self.proj * np.sqrt(self.ne - 1) + x = np.mean(self.prior_enX, axis=1) + X_p @ self.w + self.enX_temp = np.repeat(x[:, None], self.ne, axis=1) + np.dot(X_p, self.W) # Ensure limits are respected @@ -275,6 +296,8 @@ def check_convergence(self): # Update ensemble weights if hasattr(self, 'W'): self.current_W = cp.deepcopy(self.W) + if hasattr(self, 'w'): + self.current_w = cp.deepcopy(self.w) elif self.data_misfit < self.prev_data_misfit and self.data_misfit_std >= self.prev_data_misfit_std: @@ -290,6 +313,8 @@ def check_convergence(self): # Update ensemble weights if hasattr(self, 'W'): self.current_W = cp.deepcopy(self.W) + if hasattr(self, 'w'): + self.current_w = cp.deepcopy(self.w) else: # Reject iteration, and increase lam success = False @@ -337,6 +362,12 @@ class lmenrml_full(lmenrmlMixIn, full_update): class lmenrml_subspace(lmenrmlMixIn, subspace_update): pass +class lmenrml_subspace2(lmenrmlMixIn, subspace2_update): + pass + +class lmenrml_margIS(lmenrmlMixIn, margIS_update): + pass + class gnenrmlMixIn(Ensemble): """ @@ -662,6 +693,9 @@ class gnenrml_full(gnenrmlMixIn, full_update): class gnenrml_subspace(gnenrmlMixIn, subspace_update): pass +class gnenrml_subspace2(gnenrmlMixIn, subspace2_update): + pass + class gnenrml_margis(gnenrmlMixIn, margIS_update): ''' diff --git a/src/pipt/update_schemes/esmda.py b/src/pipt/update_schemes/esmda.py index c2948fc5..609d5930 100644 --- a/src/pipt/update_schemes/esmda.py +++ b/src/pipt/update_schemes/esmda.py @@ -18,7 +18,7 @@ from pipt.update_schemes.update_methods_ns.approx_update import approx_update from pipt.update_schemes.update_methods_ns.full_update import full_update from pipt.update_schemes.update_methods_ns.subspace_update import subspace_update - +from pipt.update_schemes.update_methods_ns.subspace2_update import subspace2_update class esmdaMixIn(Ensemble): """ @@ -180,9 +180,26 @@ def calc_analysis(self): # Update the state ensemble and weights if hasattr(self, 'step'): self.enX_temp = self.enX + self.step + # This is the vector update following e.g. Evensen et al 2019 update for subspace if hasattr(self, 'w_step'): self.W = self.current_W + self.w_step - self.enX_temp = np.dot(self.prior_enX, (np.eye(self.ne) + self.W/np.sqrt(self.ne - 1))) + self.enX_temp = np.dot(self.prior_enX, (np.eye(self.ne) + self.W / np.sqrt(self.ne - 1))) + # This is the matrix update following e.g. Raanes et al 2019 update for subspace + if hasattr(self, 'W_step'): + self.W = self.current_W + self.W_step + X_p = self.prior_enX @ self.proj * np.sqrt(self.ne - 1) + self.enX_temp = np.mean(self.prior_enX, axis=1, keepdims=True) + np.dot(X_p, self.W) + + if hasattr(self, 'sqrt_w_step'): + self.w = self.current_w + self.sqrt_w_step + Us, Ss, VsT = np.linalg.svd(self.S, full_matrices=False) + eps = 1e-8 * Ss[0] # e.g., 1e-8 * largest + s_inv = 1.0 / np.sqrt(np.maximum(Ss, eps)) + S_inv = np.diag(s_inv) + self.W = Us @ S_inv @ Us.T + X_p = self.prior_enX @ self.proj * np.sqrt(self.ne - 1) + x = np.mean(self.prior_enX, axis=1) + X_p @ self.w + self.enX_temp = np.repeat(x[:, None], self.ne, axis=1) + np.dot(X_p, self.W) # Ensure limits are respected @@ -367,6 +384,9 @@ class esmda_full(esmdaMixIn, full_update): class esmda_subspace(esmdaMixIn, subspace_update): pass +class esmda_subspace2(esmdaMixIn, subspace2_update): + pass + class esmda_geo(esmda_approx): """ diff --git a/src/pipt/update_schemes/update_methods_ns/margIS_update.py b/src/pipt/update_schemes/update_methods_ns/margIS_update.py index 33e729f6..f7aa1eb8 100644 --- a/src/pipt/update_schemes/update_methods_ns/margIS_update.py +++ b/src/pipt/update_schemes/update_methods_ns/margIS_update.py @@ -1,45 +1,101 @@ +"""Stochastic iterative ensemble smoother (IES, i.e. EnRML) with *subspace* implementation.""" + import numpy as np -from scipy.linalg import solve -import copy as cp -from pipt.misc_tools import analysis_tools as at +from scipy.linalg import solve, lu_solve, lu_factor, cho_solve + +import pipt.misc_tools.analysis_tools as at -class margIS_update(): +class margIS_update(): """ - Placeholder for private margIS method + MargIES update from Stordal et.al. + This is now implemented with perturbed observations, which means that we set a prior belief on the data uncertainty. + Thus, the prior is an invers chi2 distriubtuinm and after scaling the mean varians is 1. """ - def update(self): - if self.iteration == 1: # method requires some initiallization - self.aug_prior = cp.deepcopy(at.aug_state(self.prior_state, self.list_states)) - self.mean_prior = self.aug_prior.mean(axis=1) - self.X = (self.aug_prior - np.dot(np.resize(self.mean_prior, (len(self.mean_prior), 1)), - np.ones((1, self.ne)))) - self.W = np.eye(self.ne) - self.current_w = np.zeros((self.ne,)) - self.E = np.dot(self.real_obs_data, self.proj) - - M = len(self.real_obs_data) - Ytmp = solve(self.W, self.proj) - if len(self.scale_data.shape) == 1: - Y = np.dot(np.expand_dims(self.scale_data ** (-1), axis=1), np.ones((1, self.ne))) * \ - np.dot(self.aug_pred_data, Ytmp) - else: - Y = solve(self.scale_data, np.dot(self.aug_pred_data, Ytmp)) - pred_data_mean = np.mean(self.aug_pred_data, 1) - delta_d = (self.obs_data_vector - pred_data_mean) + def update(self, enX, enY, enE, **kwargs): + + if self.iteration == 1: # method requires some initiallization + self.current_W = np.eye(self.ne) + self.current_w = np.zeros(self.ne) + self.D = self.scale(enE, self.scale_data) + # Scale everything so that data uncertainty is I + + sY = self.scale(enY, self.scale_data) #Scaling is same as with 'known' uncertainty, hence makes sense to set s = 1 + self.S = 0 + + deltaD = 0 + deltaD_sqrt = 0 + + Y = np.linalg.solve(self.current_W.T, sY.T).T + Y = Y @ self.proj * np.sqrt(self.ne - 1) + index = np.arange(0, 70, 70) # Has to be specified via data types (or select each data)... + M = 1 #Numbers of data per type. Computed from index + s = 1 #should be default option with possibility to change in setup + nu = self.ne-1 #should be default option with possibility to change in setup + for j in range(70): + + delta = self.D[index,:]-sY[index,:] + Chi = np.sum(delta * delta, axis = 0) + Chi = np.mean(Chi) + Ratio = (M + nu) / (Chi + nu*s*s) + #Ratio = 1 + #Gradient + deltaD = deltaD + (Y[index,:] * Ratio).T @ delta + deltaD_sqrt = deltaD_sqrt + np.mean((Y[index, :] * Ratio).T @ delta ,axis=1) + # Hessian + self.S = self.S + (Y[index,:] * Ratio).T @ Y[index,:] + index += 1 + + deltaM = (self.ne-1)*(np.eye(self.ne)-self.current_W) + deltaM_sqrt = (self.ne-1)*self.current_w + self.S = self.S + np.eye(self.ne) * (self.ne - 1) + Delta = deltaM + deltaD + Delta_sqrt = deltaM_sqrt + deltaD_sqrt - if len(self.cov_data.shape) == 1: - S = np.dot(delta_d, (self.cov_data**(-1)) * delta_d) - Ratio = M / S - grad_lklhd = np.dot(Y.T * Ratio, (self.cov_data**(-1)) * delta_d) - grad_prior = (self.ne - 1) * self.current_w - self.C_w = (np.dot(Ratio * Y.T, np.dot(np.diag(self.cov_data ** (-1)), Y)) + (self.ne - 1) * np.eye(self.ne)) + + self.W_step = np.linalg.solve(self.S, Delta) / (1 + self.lam) + # self.sqrt_w_step = np.linalg.solve(self.S, Delta_sqrt) / (1 + self.lam) + + def scale(self, data, scaling): + """ + Scale the data perturbations by the data error standard deviation. + + Args: + data (np.ndarray): data perturbations + scaling (np.ndarray): data error standard deviation + + Returns: + np.ndarray: scaled data perturbations + """ + + if len(scaling.shape) == 1: + return (scaling ** (-1))[:, None] * data else: - S = np.dot(delta_d, solve(self.cov_data, delta_d)) - Ratio = M / S - grad_lklhd = np.dot(Y.T * Ratio, solve(self.cov_data, delta_d)) - grad_prior = (self.ne - 1) * self.current_w - self.C_w = (np.dot(Ratio * Y.T, solve(self.cov_data, Y)) + (self.ne - 1) * np.eye(self.ne)) + return solve(scaling, data) + + + + + + + + + + + + + + + + + + + + + + + + + - self.sqrt_w_step = solve(self.C_w, grad_prior + grad_lklhd) diff --git a/src/pipt/update_schemes/update_methods_ns/subspace2_update.py b/src/pipt/update_schemes/update_methods_ns/subspace2_update.py new file mode 100644 index 00000000..52d54f34 --- /dev/null +++ b/src/pipt/update_schemes/update_methods_ns/subspace2_update.py @@ -0,0 +1,56 @@ +"""Stochastic iterative ensemble smoother (IES, i.e. EnRML) with *subspace* implementation.""" + +import numpy as np +from scipy.linalg import solve, lu_solve, lu_factor +import pipt.misc_tools.analysis_tools as at + + + + + +class subspace2_update(): + """ + Ensemble subspace update, as described in Raanes, P. N., Stordal, A. S., & + Evensen, G. (2019). Revising the stochastic iterative ensemble smoother. + Nonlinear Processes in Geophysics, 26(3), 325–338. https://doi.org/10.5194/npg-26-325-2019 + + """ + + def update(self, enX, enY, enE, **kwargs): + + if self.iteration == 1: # method requires some initiallization + self.current_W = np.eye(self.ne) + self.D = self.scale(enE, self.scale_data) + # Scale everything so that data uncertainty is I + sY = self.scale(enY, self.scale_data) + Y = np.linalg.solve(self.current_W.T,sY.T).T #Raanes + Y = np.dot(Y, self.proj) * np.sqrt(self.ne - 1) #Raanes + + + #Gradients + + deltaD = Y.T @ (self.D - sY) + deltaM = (self.ne-1)*(np.eye(self.ne)-self.current_W) + + #Hessian + S = Y.T @ Y + np.eye(self.ne)*(self.ne-1) + + self.W_step = np.linalg.solve(S , (deltaM + deltaD))/(1 + self.lam) + + + def scale(self, data, scaling): + """ + Scale the data perturbations by the data error standard deviation. + + Args: + data (np.ndarray): data perturbations + scaling (np.ndarray): data error standard deviation + + Returns: + np.ndarray: scaled data perturbations + """ + + if len(scaling.shape) == 1: + return (scaling ** (-1))[:, None] * data + else: + return solve(scaling, data) \ No newline at end of file diff --git a/src/pipt/update_schemes/update_methods_ns/subspace_update.py b/src/pipt/update_schemes/update_methods_ns/subspace_update.py index 54ccff37..1d72f566 100644 --- a/src/pipt/update_schemes/update_methods_ns/subspace_update.py +++ b/src/pipt/update_schemes/update_methods_ns/subspace_update.py @@ -3,13 +3,11 @@ import numpy as np from scipy.linalg import solve, lu_solve, lu_factor import pipt.misc_tools.analysis_tools as at - +from sklearn.linear_model import Lasso class subspace_update(): """ - Ensemble subspace update, as described in Raanes, P. N., Stordal, A. S., & - Evensen, G. (2019). Revising the stochastic iterative ensemble smoother. - Nonlinear Processes in Geophysics, 26(3), 325–338. https://doi.org/10.5194/npg-26-325-2019 + More information about the method is found in Evensen, G., Raanes, P. N., Stordal, A. S., & Hove, J. (2019). Efficient Implementation of an Iterative Ensemble Smoother for Data Assimilation and Reservoir History Matching. Frontiers in Applied Mathematics and Statistics, 5(October), 114. https://doi.org/10.3389/fams.2019.00047 @@ -19,23 +17,32 @@ def update(self, enX, enY, enE, **kwargs): if self.iteration == 1: # method requires some initiallization self.current_W = np.zeros((self.ne, self.ne)) - self.E = np.dot(enE, self.proj) + self.E = np.dot(enE, self.proj) #original # Center ensemble matrices + Y = np.dot(enY, self.proj) - omega = np.eye(self.ne) + np.dot(self.current_W, self.proj) - S = lu_solve(lu_factor(omega.T), Y.T).T - # Compute scaled misfit (residual between predicted and observed data) - enRes = self.scale(enY - enE, self.scale_data) + omega = np.eye(self.ne) + np.dot(self.current_W, self.proj) #original + S = lu_solve(lu_factor(omega.T), Y.T).T #original + # Compute scaled misfit (residual between predicted and observed data) + enRes = self.scale(enY - enE, self.scale_data) #orginal + # enRes = self.scale(S @ self.current_W + enE - enY, self.scale_data) + #enRes = self.scale(np.dot(S,self.current_W)+enE-enY,self.scale_data) # Truncate SVD of S - Us, Ss, VsT = at.truncSVD(S, energy=self.trunc_energy) - Sinv = np.diag(1/Ss) + #Us, Ss, VsT = at.truncSVD(S, energy=self.trunc_energy) + Us, Ss, VsT = np.linalg.svd(S, full_matrices = False) + eps = 1e-8 * Ss[0] # e.g., 1e-8 * largest + s_inv = 1.0 / np.maximum(Ss, eps) + Sinv = np.diag(s_inv) + + #Sinv = np.diag(1/Ss) # Compute update step X = Sinv @ Us.T @ self.scale(self.E, self.scale_data) + #X = Sinv @ Us.T @ enRes eigval, eigvec = np.linalg.eig(X @ X.T) X2 = Us @ Sinv.T @ eigvec X3 = S.T @ X2 From 447228c6a9b982a056555d4f5104d11ed2f3bd91 Mon Sep 17 00:00:00 2001 From: Ninjahh83 Date: Thu, 16 Apr 2026 10:40:39 +0200 Subject: [PATCH 2/2] removed sklearn dependence --- src/pipt/update_schemes/update_methods_ns/subspace_update.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pipt/update_schemes/update_methods_ns/subspace_update.py b/src/pipt/update_schemes/update_methods_ns/subspace_update.py index 1d72f566..987505a7 100644 --- a/src/pipt/update_schemes/update_methods_ns/subspace_update.py +++ b/src/pipt/update_schemes/update_methods_ns/subspace_update.py @@ -3,7 +3,7 @@ import numpy as np from scipy.linalg import solve, lu_solve, lu_factor import pipt.misc_tools.analysis_tools as at -from sklearn.linear_model import Lasso +#from sklearn.linear_model import Lasso class subspace_update(): """