Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 40 additions & 6 deletions src/pipt/update_schemes/enrml.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,15 @@
import pipt.misc_tools.ensemble_tools as entools
import pipt.misc_tools.data_tools as dtools


from geostat.decomp import Cholesky
from pipt.loop.ensemble import Ensemble
from pipt.update_schemes.update_methods_ns.subspace_update import subspace_update
from pipt.update_schemes.update_methods_ns.subspace2_update import subspace2_update
from pipt.update_schemes.update_methods_ns.full_update import full_update
from pipt.update_schemes.update_methods_ns.approx_update import approx_update
from pipt.update_schemes.update_methods_ns.margIS_update import margIS_update

import sys
import pkgutil
import inspect
Expand All @@ -35,11 +39,11 @@
# import standard libraries

# Check and import (if present) from other namespace packages
if 'margIS_update' in [el[0] for el in tot_ns_pkg]: # only compare package name
from pipt.update_schemes.update_methods_ns.margIS_update import margIS_update
else:
class margIS_update:
pass
#if 'margIS_update' in [el[0] for el in tot_ns_pkg]: # only compare package name
# from pipt.update_schemes.update_methods_ns.margIS_update import margIS_update
#else:
# class margIS_update:
# pass

# Internal imports
from pipt.misc_tools.analysis_tools import aug_state
Expand Down Expand Up @@ -168,9 +172,26 @@ def calc_analysis(self):
# Update the state ensemble and weights
if hasattr(self, 'step'):
self.enX_temp = self.enX + self.step
# This is the vector update following e.g. Evensen et al 2019 update for subspace
if hasattr(self, 'w_step'):
self.W = self.current_W + self.w_step
self.enX_temp = np.dot(self.prior_enX, (np.eye(self.ne) + self.W/np.sqrt(self.ne - 1)))
self.enX_temp = np.dot(self.prior_enX, (np.eye(self.ne) + self.W / np.sqrt(self.ne - 1)))
#This is the matrix update following e.g. Raanes et al 2019 update for subspace
if hasattr(self, 'W_step'):
self.W = self.current_W + self.W_step
X_p = self.prior_enX @ self.proj * np.sqrt(self.ne - 1)
self.enX_temp = np.mean(self.prior_enX, axis=1, keepdims=True) + np.dot(X_p, self.W)

if hasattr(self, 'sqrt_w_step'):
self.w = self.current_w + self.sqrt_w_step
Us, Ss, VsT = np.linalg.svd(self.S, full_matrices=False)
eps = 1e-8 * Ss[0] # e.g., 1e-8 * largest
s_inv = 1.0 / np.sqrt(np.maximum(Ss, eps))
S_inv = np.diag(s_inv)
self.W = Us @ S_inv @ Us.T
X_p = self.prior_enX @ self.proj * np.sqrt(self.ne - 1)
x = np.mean(self.prior_enX, axis=1) + X_p @ self.w
self.enX_temp = np.repeat(x[:, None], self.ne, axis=1) + np.dot(X_p, self.W)


# Ensure limits are respected
Expand Down Expand Up @@ -275,6 +296,8 @@ def check_convergence(self):
# Update ensemble weights
if hasattr(self, 'W'):
self.current_W = cp.deepcopy(self.W)
if hasattr(self, 'w'):
self.current_w = cp.deepcopy(self.w)


elif self.data_misfit < self.prev_data_misfit and self.data_misfit_std >= self.prev_data_misfit_std:
Expand All @@ -290,6 +313,8 @@ def check_convergence(self):
# Update ensemble weights
if hasattr(self, 'W'):
self.current_W = cp.deepcopy(self.W)
if hasattr(self, 'w'):
self.current_w = cp.deepcopy(self.w)

else: # Reject iteration, and increase lam
success = False
Expand Down Expand Up @@ -337,6 +362,12 @@ class lmenrml_full(lmenrmlMixIn, full_update):
class lmenrml_subspace(lmenrmlMixIn, subspace_update):
pass

class lmenrml_subspace2(lmenrmlMixIn, subspace2_update):
pass

class lmenrml_margIS(lmenrmlMixIn, margIS_update):
pass


class gnenrmlMixIn(Ensemble):
"""
Expand Down Expand Up @@ -662,6 +693,9 @@ class gnenrml_full(gnenrmlMixIn, full_update):
class gnenrml_subspace(gnenrmlMixIn, subspace_update):
pass

class gnenrml_subspace2(gnenrmlMixIn, subspace2_update):
pass


class gnenrml_margis(gnenrmlMixIn, margIS_update):
'''
Expand Down
24 changes: 22 additions & 2 deletions src/pipt/update_schemes/esmda.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from pipt.update_schemes.update_methods_ns.approx_update import approx_update
from pipt.update_schemes.update_methods_ns.full_update import full_update
from pipt.update_schemes.update_methods_ns.subspace_update import subspace_update

from pipt.update_schemes.update_methods_ns.subspace2_update import subspace2_update

class esmdaMixIn(Ensemble):
"""
Expand Down Expand Up @@ -180,9 +180,26 @@ def calc_analysis(self):
# Update the state ensemble and weights
if hasattr(self, 'step'):
self.enX_temp = self.enX + self.step
# This is the vector update following e.g. Evensen et al 2019 update for subspace
if hasattr(self, 'w_step'):
self.W = self.current_W + self.w_step
self.enX_temp = np.dot(self.prior_enX, (np.eye(self.ne) + self.W/np.sqrt(self.ne - 1)))
self.enX_temp = np.dot(self.prior_enX, (np.eye(self.ne) + self.W / np.sqrt(self.ne - 1)))
# This is the matrix update following e.g. Raanes et al 2019 update for subspace
if hasattr(self, 'W_step'):
self.W = self.current_W + self.W_step
X_p = self.prior_enX @ self.proj * np.sqrt(self.ne - 1)
self.enX_temp = np.mean(self.prior_enX, axis=1, keepdims=True) + np.dot(X_p, self.W)

if hasattr(self, 'sqrt_w_step'):
self.w = self.current_w + self.sqrt_w_step
Us, Ss, VsT = np.linalg.svd(self.S, full_matrices=False)
eps = 1e-8 * Ss[0] # e.g., 1e-8 * largest
s_inv = 1.0 / np.sqrt(np.maximum(Ss, eps))
S_inv = np.diag(s_inv)
self.W = Us @ S_inv @ Us.T
X_p = self.prior_enX @ self.proj * np.sqrt(self.ne - 1)
x = np.mean(self.prior_enX, axis=1) + X_p @ self.w
self.enX_temp = np.repeat(x[:, None], self.ne, axis=1) + np.dot(X_p, self.W)


# Ensure limits are respected
Expand Down Expand Up @@ -367,6 +384,9 @@ class esmda_full(esmdaMixIn, full_update):
class esmda_subspace(esmdaMixIn, subspace_update):
pass

class esmda_subspace2(esmdaMixIn, subspace2_update):
pass


class esmda_geo(esmda_approx):
"""
Expand Down
128 changes: 92 additions & 36 deletions src/pipt/update_schemes/update_methods_ns/margIS_update.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,101 @@
"""Stochastic iterative ensemble smoother (IES, i.e. EnRML) with *subspace* implementation."""

import numpy as np
from scipy.linalg import solve
import copy as cp
from pipt.misc_tools import analysis_tools as at
from scipy.linalg import solve, lu_solve, lu_factor, cho_solve

import pipt.misc_tools.analysis_tools as at

class margIS_update():

class margIS_update():
"""
Placeholder for private margIS method
MargIES update from Stordal et.al.
This is now implemented with perturbed observations, which means that we set a prior belief on the data uncertainty.
Thus, the prior is an invers chi2 distriubtuinm and after scaling the mean varians is 1.
"""
def update(self):
if self.iteration == 1: # method requires some initiallization
self.aug_prior = cp.deepcopy(at.aug_state(self.prior_state, self.list_states))
self.mean_prior = self.aug_prior.mean(axis=1)
self.X = (self.aug_prior - np.dot(np.resize(self.mean_prior, (len(self.mean_prior), 1)),
np.ones((1, self.ne))))
self.W = np.eye(self.ne)
self.current_w = np.zeros((self.ne,))
self.E = np.dot(self.real_obs_data, self.proj)

M = len(self.real_obs_data)
Ytmp = solve(self.W, self.proj)
if len(self.scale_data.shape) == 1:
Y = np.dot(np.expand_dims(self.scale_data ** (-1), axis=1), np.ones((1, self.ne))) * \
np.dot(self.aug_pred_data, Ytmp)
else:
Y = solve(self.scale_data, np.dot(self.aug_pred_data, Ytmp))

pred_data_mean = np.mean(self.aug_pred_data, 1)
delta_d = (self.obs_data_vector - pred_data_mean)
def update(self, enX, enY, enE, **kwargs):

if self.iteration == 1: # method requires some initiallization
self.current_W = np.eye(self.ne)
self.current_w = np.zeros(self.ne)
self.D = self.scale(enE, self.scale_data)
# Scale everything so that data uncertainty is I

sY = self.scale(enY, self.scale_data) #Scaling is same as with 'known' uncertainty, hence makes sense to set s = 1
self.S = 0

deltaD = 0
deltaD_sqrt = 0

Y = np.linalg.solve(self.current_W.T, sY.T).T
Y = Y @ self.proj * np.sqrt(self.ne - 1)
index = np.arange(0, 70, 70) # Has to be specified via data types (or select each data)...
M = 1 #Numbers of data per type. Computed from index
s = 1 #should be default option with possibility to change in setup
nu = self.ne-1 #should be default option with possibility to change in setup
for j in range(70):

delta = self.D[index,:]-sY[index,:]
Chi = np.sum(delta * delta, axis = 0)
Chi = np.mean(Chi)
Ratio = (M + nu) / (Chi + nu*s*s)
#Ratio = 1
#Gradient
deltaD = deltaD + (Y[index,:] * Ratio).T @ delta
deltaD_sqrt = deltaD_sqrt + np.mean((Y[index, :] * Ratio).T @ delta ,axis=1)
# Hessian
self.S = self.S + (Y[index,:] * Ratio).T @ Y[index,:]
index += 1

deltaM = (self.ne-1)*(np.eye(self.ne)-self.current_W)
deltaM_sqrt = (self.ne-1)*self.current_w
self.S = self.S + np.eye(self.ne) * (self.ne - 1)
Delta = deltaM + deltaD
Delta_sqrt = deltaM_sqrt + deltaD_sqrt

if len(self.cov_data.shape) == 1:
S = np.dot(delta_d, (self.cov_data**(-1)) * delta_d)
Ratio = M / S
grad_lklhd = np.dot(Y.T * Ratio, (self.cov_data**(-1)) * delta_d)
grad_prior = (self.ne - 1) * self.current_w
self.C_w = (np.dot(Ratio * Y.T, np.dot(np.diag(self.cov_data ** (-1)), Y)) + (self.ne - 1) * np.eye(self.ne))

self.W_step = np.linalg.solve(self.S, Delta) / (1 + self.lam)
# self.sqrt_w_step = np.linalg.solve(self.S, Delta_sqrt) / (1 + self.lam)

def scale(self, data, scaling):
"""
Scale the data perturbations by the data error standard deviation.

Args:
data (np.ndarray): data perturbations
scaling (np.ndarray): data error standard deviation

Returns:
np.ndarray: scaled data perturbations
"""

if len(scaling.shape) == 1:
return (scaling ** (-1))[:, None] * data
else:
S = np.dot(delta_d, solve(self.cov_data, delta_d))
Ratio = M / S
grad_lklhd = np.dot(Y.T * Ratio, solve(self.cov_data, delta_d))
grad_prior = (self.ne - 1) * self.current_w
self.C_w = (np.dot(Ratio * Y.T, solve(self.cov_data, Y)) + (self.ne - 1) * np.eye(self.ne))
return solve(scaling, data)


























self.sqrt_w_step = solve(self.C_w, grad_prior + grad_lklhd)
56 changes: 56 additions & 0 deletions src/pipt/update_schemes/update_methods_ns/subspace2_update.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
"""Stochastic iterative ensemble smoother (IES, i.e. EnRML) with *subspace* implementation."""

import numpy as np
from scipy.linalg import solve, lu_solve, lu_factor
import pipt.misc_tools.analysis_tools as at





class subspace2_update():
"""
Ensemble subspace update, as described in Raanes, P. N., Stordal, A. S., &
Evensen, G. (2019). Revising the stochastic iterative ensemble smoother.
Nonlinear Processes in Geophysics, 26(3), 325–338. https://doi.org/10.5194/npg-26-325-2019

"""

def update(self, enX, enY, enE, **kwargs):

if self.iteration == 1: # method requires some initiallization
self.current_W = np.eye(self.ne)
self.D = self.scale(enE, self.scale_data)
# Scale everything so that data uncertainty is I
sY = self.scale(enY, self.scale_data)
Y = np.linalg.solve(self.current_W.T,sY.T).T #Raanes
Y = np.dot(Y, self.proj) * np.sqrt(self.ne - 1) #Raanes


#Gradients

deltaD = Y.T @ (self.D - sY)
deltaM = (self.ne-1)*(np.eye(self.ne)-self.current_W)

#Hessian
S = Y.T @ Y + np.eye(self.ne)*(self.ne-1)

self.W_step = np.linalg.solve(S , (deltaM + deltaD))/(1 + self.lam)


def scale(self, data, scaling):
"""
Scale the data perturbations by the data error standard deviation.

Args:
data (np.ndarray): data perturbations
scaling (np.ndarray): data error standard deviation

Returns:
np.ndarray: scaled data perturbations
"""

if len(scaling.shape) == 1:
return (scaling ** (-1))[:, None] * data
else:
return solve(scaling, data)
Loading
Loading