Skip to content
Snippets Groups Projects
Commit 006b6c0d authored by Jan Caron's avatar Jan Caron
Browse files

reconstruction: Now accepts starting distribution mag_0 and starting ramp_0.

parent 0026773d
No related branches found
No related tags found
No related merge requests found
......@@ -108,7 +108,8 @@ class Costfunction(object):
Jacobi vector which represents the cost derivative of all voxels of the magnetization.
"""
assert len(x) == self.n
print(self.fwd_model.ramp.n)
assert len(x) == self.n, 'Length of input {} does not match n={}'.format(len(x), self.n)
return (2 * self.fwd_model.jac_T_dot(x, self.Se_inv.dot(self.fwd_model(x) - self.y))
+ self.regularisator.jac(x))
......
......@@ -22,7 +22,7 @@ __all__ = ['optimize_linear', 'optimize_nonlin', 'optimize_splitbregman']
_log = logging.getLogger(__name__)
def optimize_linear(costfunction, max_iter=None, verbose=False):
def optimize_linear(costfunction, mag_0=None, ramp_0=None, max_iter=None, verbose=False):
"""Reconstruct a three-dimensional magnetic distribution from given phase maps via the
conjugate gradient optimizaion method :func:`~.scipy.sparse.linalg.cg`.
Blazingly fast for l2-based cost functions.
......@@ -32,6 +32,12 @@ def optimize_linear(costfunction, max_iter=None, verbose=False):
costfunction : :class:`~.Costfunction`
A :class:`~.Costfunction` object which implements a specified forward model and
regularisator which is minimized in the optimization process.
mag_0: :class:`~.VectorData`
The starting magnetisation distribution used for the reconstruction. A zero vector will be
used if no VectorData object is specified.
mag_0: :class:`~.Ramp`
The starting ramp for the reconstruction. A zero vector will be
used if no Ramp object is specified.
max_iter : int, optional
The maximum number of iterations for the opimization.
verbose: bool, optional
......@@ -47,12 +53,23 @@ def optimize_linear(costfunction, max_iter=None, verbose=False):
import jutil.cg as jcg
_log.debug('Calling optimize_linear')
_log.info('Cost before optimization: {:.3e}'.format(costfunction(np.zeros(costfunction.n))))
x_opt = jcg.conj_grad_minimize(costfunction, max_iter=max_iter, verbose=verbose).x
data_set = costfunction.fwd_model.data_set
# Get starting distribution vector x_0:
x_0 = np.empty(costfunction.n)
if mag_0 is not None:
costfunction.fwd_model.magdata = mag_0
x_0[:data_set.n] = costfunction.fwd_model.magdata.get_vector(mask=data_set.mask)
if ramp_0 is not None:
ramp_vec = ramp_0.param_cache.ravel()
else:
ramp_vec = np.zeros_like(costfunction.fwd_model.ramp.n)
x_0[data_set.n:] = ramp_vec
# Minimize:
x_opt = jcg.conj_grad_minimize(costfunction, x_0=x_0, max_iter=max_iter, verbose=verbose).x
_log.info('Cost after optimization: {:.3e}'.format(costfunction(x_opt)))
# Cut ramp parameters if necessary (this also saves the final parameters in the ramp class!):
x_opt = costfunction.fwd_model.ramp.extract_ramp_params(x_opt)
# Create and return fitting VectorData object:
data_set = costfunction.fwd_model.data_set
mag_opt = VectorData(data_set.a, np.zeros((3,) + data_set.dim))
mag_opt.set_vector(x_opt, data_set.mask)
return mag_opt
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment