Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • empyre/empyre
  • weber/empyre
  • wessels/empyre
  • bryan/empyre
4 results
Show changes
Showing
with 0 additions and 5597 deletions
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""Package for the creation and reconstruction of magnetic distributions and resulting phase maps.
Modules
-------
magcreator
Create simple magnetic distributions.
magdata
Class for the storage of magnetization data.
projector
Class for projecting given magnetization distributions.
kernel
Class for the kernel matrix representing one magnetized pixel.
phasemapper
Create magnetic and electric phase maps from magnetization data.
phasemap
Class for the storage of phase data.
analytic
Create phase maps for magnetic distributions with analytic solutions.
dataset
Class for collecting pairs of phase maps and corresponding projectors.
forwardmodel
Class which represents a phase mapping strategy.
costfunction
Class for the evaluation of the cost of a function.
reconstruction
Reconstruct magnetic distributions from given phasemaps.
regularisator
Class to instantiate different regularisation strategies.
ramp
Class which is used to add polynomial ramps to phasemaps.
diagnostics
Class to calculate diagnostics
quaternion
Class which is used for easy rotations in the Projector classes.
colormap
Class which implements a custom direction encoding colormap.
"""
from . import analytic
from . import reconstruction
from . import fieldconverter
from . import magcreator
from . import colors
from . import plottools # TODO: colors and plottools into "plots" package (maybe with examples?)
from . import utils
from .costfunction import *
from .dataset import *
from .diagnostics import *
from .fielddata import *
from .forwardmodel import *
from .kernel import *
from .phasemap import *
from .phasemapper import *
from .projector import *
from .regularisator import *
from .ramp import *
from .quaternion import *
from .file_io import *
from .version import version as __version__
from .version import hg_revision as __hg_revision__
import logging
_log = logging.getLogger(__name__)
_log.info("Starting Pyramid V{} HG{}".format(__version__, __hg_revision__))
del logging
__all__ = ['analytic', 'magcreator', 'reconstruction', 'fieldconverter',
'load_phasemap', 'load_vectordata', 'load_scalardata', 'load_projector', 'load_dataset',
'colors', 'utils']
__all__.extend(costfunction.__all__)
__all__.extend(dataset.__all__)
__all__.extend(diagnostics.__all__)
__all__.extend(forwardmodel.__all__)
__all__.extend(kernel.__all__)
__all__.extend(fielddata.__all__)
__all__.extend(phasemap.__all__)
__all__.extend(phasemapper.__all__)
__all__.extend(projector.__all__)
__all__.extend(regularisator.__all__)
__all__.extend(ramp.__all__)
__all__.extend(quaternion.__all__)
__all__.extend(file_io.__all__)
# TODO: Test for different systems!
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""Create phase maps for magnetic distributions with analytic solutions.
This module provides methods for the calculation of the magnetic phase for simple geometries for
which the analytic solutions are known. These can be used for comparison with the phase
calculated by the functions from the :mod:`~pyramid.phasemapper` module.
"""
import logging
import numpy as np
from numpy import pi
from pyramid.phasemap import PhaseMap
__all__ = ['phase_mag_slab', 'phase_mag_slab', 'phase_mag_sphere', 'phase_mag_vortex']
_log = logging.getLogger(__name__)
PHI_0 = 2067.83 # magnetic flux in T*nm²
def phase_mag_slab(dim, a, phi, center, width, b_0=1):
"""Calculate the analytic magnetic phase for a homogeneously magnetized slab.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
a : float
The grid spacing in nm.
phi : float
The azimuthal angle, describing the direction of the magnetization.
center : tuple (N=3)
The center of the slab in pixel coordinates `(z, y, x)`.
width : tuple (N=3)
The width of the slab in pixel coordinates `(z, y, x)`.
b_0 : float, optional
The magnetic induction corresponding to a magnetization `M`\ :sub:`0` in T.
The default is 1.
Returns
-------
phasemap : :class:`~numpy.ndarray` (N=2)
The phase as a 2-dimensional array.
"""
_log.debug('Calling phase_mag_slab')
# Function for the phase:
def _phi_mag(x, y):
def _F_0(x, y):
A = np.log(x ** 2 + y ** 2 + 1E-30)
B = np.arctan(x / (y + 1E-30))
return x * A - 2 * x + 2 * y * B
return coeff * Lz * (- np.cos(phi) * (_F_0(x - x0 - Lx / 2, y - y0 - Ly / 2) -
_F_0(x - x0 + Lx / 2, y - y0 - Ly / 2) -
_F_0(x - x0 - Lx / 2, y - y0 + Ly / 2) +
_F_0(x - x0 + Lx / 2, y - y0 + Ly / 2))
+ np.sin(phi) * (_F_0(y - y0 - Ly / 2, x - x0 - Lx / 2) -
_F_0(y - y0 + Ly / 2, x - x0 - Lx / 2) -
_F_0(y - y0 - Ly / 2, x - x0 + Lx / 2) +
_F_0(y - y0 + Ly / 2, x - x0 + Lx / 2)))
# Process input parameters:
z_dim, y_dim, x_dim = dim
y0 = a * center[1] # y0, x0 define the center of a pixel,
x0 = a * center[2] # hence: (cellindex + 0.5) * grid spacing
Lz, Ly, Lx = a * width[0], a * width[1], a * width[2]
coeff = - b_0 / (4 * PHI_0) # Minus because of negative z-direction
# Create grid:
x = np.linspace(a / 2, x_dim * a - a / 2, num=x_dim)
y = np.linspace(a / 2, y_dim * a - a / 2, num=y_dim)
xx, yy = np.meshgrid(x, y)
# Return phase:
return PhaseMap(a, _phi_mag(xx, yy))
def phase_mag_disc(dim, a, phi, center, radius, height, b_0=1):
"""Calculate the analytic magnetic phase for a homogeneously magnetized disc.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
a : float
The grid spacing in nm.
phi : float
The azimuthal angle, describing the direction of the magnetization.
center : tuple (N=3)
The center of the disc in pixel coordinates `(z, y, x)`.
radius : float
The radius of the disc in pixel coordinates.
height : float
The height of the disc in pixel coordinates.
b_0 : float, optional
The magnetic induction corresponding to a magnetization `M`\ :sub:`0` in T.
The default is 1.
Returns
-------
phasemap : :class:`~numpy.ndarray` (N=2)
The phase as a 2-dimensional array.
"""
_log.debug('Calling phase_mag_disc')
# Function for the phase:
def _phi_mag(x, y):
r = np.hypot(x - x0, y - y0)
result = coeff * Lz * ((y - y0) * np.cos(phi) - (x - x0) * np.sin(phi))
result *= np.where(r <= R, 1, (R / (r + 1E-30)) ** 2)
return result
# Process input parameters:
z_dim, y_dim, x_dim = dim
y0 = a * center[1]
x0 = a * center[2]
Lz = a * height
R = a * radius
coeff = pi * b_0 / (2 * PHI_0) # Minus is gone because of negative z-direction
# Create grid:
x = np.linspace(a / 2, x_dim * a - a / 2, num=x_dim)
y = np.linspace(a / 2, y_dim * a - a / 2, num=y_dim)
xx, yy = np.meshgrid(x, y)
# Return phase:
return PhaseMap(a, _phi_mag(xx, yy))
def phase_mag_sphere(dim, a, phi, center, radius, b_0=1):
"""Calculate the analytic magnetic phase for a homogeneously magnetized sphere.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
a : float
The grid spacing in nm.
phi : float
The azimuthal angle, describing the direction of the magnetization.
center : tuple (N=3)
The center of the sphere in pixel coordinates `(z, y, x)`.
radius : float
The radius of the sphere in pixel coordinates.
b_0 : float, optional
The magnetic induction corresponding to a magnetization `M`\ :sub:`0` in T.
The default is 1.
Returns
-------
phasemap : :class:`~numpy.ndarray` (N=2)
The phase as a 2-dimensional array.
"""
_log.debug('Calling phase_mag_sphere')
# Function for the phase:
def _phi_mag(x, y):
r = np.hypot(x - x0, y - y0)
result = coeff * R ** 3 / (r + 1E-30) ** 2 * (
(y - y0) * np.cos(phi) - (x - x0) * np.sin(phi))
# TODO: During testing: "RuntimeWarning: invalid value encountered in power":
result *= np.where(r > R, 1, (1 - (1 - (r / R) ** 2) ** (3. / 2.)))
return result
# Process input parameters:
z_dim, y_dim, x_dim = dim
y0 = a * center[1]
x0 = a * center[2]
R = a * radius
coeff = 2. / 3. * pi * b_0 / PHI_0 # Minus is gone because of negative z-direction
# Create grid:
x = np.linspace(a / 2, x_dim * a - a / 2, num=x_dim)
y = np.linspace(a / 2, y_dim * a - a / 2, num=y_dim)
xx, yy = np.meshgrid(x, y)
# Return phase:
return PhaseMap(a, _phi_mag(xx, yy))
def phase_mag_vortex(dim, a, center, radius, height, b_0=1):
"""Calculate the analytic magnetic phase for a vortex state disc.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
a : float
The grid spacing in nm.
center : tuple (N=3)
The center of the disc in pixel coordinates `(z, y, x)`, which is also the vortex center.
radius : float
The radius of the disc in pixel coordinates.
height : float
The height of the disc in pixel coordinates.
b_0 : float, optional
The magnetic induction corresponding to a magnetization `M`\ :sub:`0` in T.
The default is 1.
Returns
-------
phasemap : :class:`~numpy.ndarray` (N=2)
The phase as a 2-dimensional array.
"""
_log.debug('Calling phase_mag_vortex')
# Function for the phase:
def _phi_mag(x, y):
r = np.hypot(x - x0, y - y0)
result = coeff * np.where(r <= R, r - R, 0)
return result
# Process input parameters:
z_dim, y_dim, x_dim = dim
y0 = a * center[1]
x0 = a * center[2]
Lz = a * height
R = a * radius
coeff = - pi * b_0 * Lz / PHI_0 # Minus because of negative z-direction
# Create grid:
x = np.linspace(a / 2, x_dim * a - a / 2, num=x_dim)
y = np.linspace(a / 2, y_dim * a - a / 2, num=y_dim)
xx, yy = np.meshgrid(x, y)
# Return phase:
return PhaseMap(a, _phi_mag(xx, yy))
# -*- coding: utf-8 -*-
# Copyright 2014 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""This module provides the :class:`~.Costfunction` class which represents a strategy to calculate
the so called `cost` of a threedimensional magnetization distribution."""
import logging
import numpy as np
from pyramid.regularisator import NoneRegularisator
__all__ = ['Costfunction']
class Costfunction(object):
"""Class for calculating the cost of a 3D magnetic distributions in relation to 2D phase maps.
Represents a strategy for the calculation of the `cost` of a 3D magnetic distribution in
relation to two-dimensional phase maps. The `cost` is a measure for the difference of the
simulated phase maps from the magnetic distributions to the given set of phase maps and from
a priori knowledge represented by a :class:`~.Regularisator` object. Furthermore this class
provides convenient methods for the calculation of the derivative :func:`~.jac` or the product
with the Hessian matrix :func:`~.hess_dot` of the costfunction, which can be used by
optimizers. All required data should be given in a :class:`~DataSet` object.
Attributes
----------
fwd_model : :class:`~.ForwardModel`
The Forward model instance which should be used for the simulation of the phase maps which
will be compared to `y`.
regularisator : :class:`~.Regularisator`, optional
Regularisator class that's responsible for the regularisation term. If `None` or none is
given, no regularisation will be used.
y : :class:`~numpy.ndarray` (N=1)
Vector which lists all pixel values of all phase maps one after another.
m: int
Size of the image space.
n: int
Size of the input space.
Se_inv : :class:`~numpy.ndarray` (N=2), optional
Inverted covariance matrix of the measurement errors. The matrix has size `m x m` with m
being the length of the targetvector y.
"""
_log = logging.getLogger(__name__ + '.Costfunction')
def __init__(self, fwd_model, regularisator=None):
self._log.debug('Calling __init__')
self.fwd_model = fwd_model
if regularisator is None:
self.regularisator = NoneRegularisator()
else:
self.regularisator = regularisator
# Extract information from fwd_model:
self.y = self.fwd_model.y
self.n = self.fwd_model.n
self.m = self.fwd_model.m
self.Se_inv = self.fwd_model.Se_inv
self._log.debug('Created ' + str(self))
def __repr__(self):
self._log.debug('Calling __repr__')
return '%s(fwd_model=%r, regularisator=%r)' % \
(self.__class__, self.fwd_model, self.regularisator)
def __str__(self):
self._log.debug('Calling __str__')
return 'Costfunction(fwd_model=%s, fwd_model=%s, regularisator=%s)' % \
(self.fwd_model, self.fwd_model, self.regularisator)
def __call__(self, x):
delta_y = self.fwd_model(x) - self.y
self.chisq_m = delta_y.dot(self.Se_inv.dot(delta_y))
self.chisq_a = self.regularisator(x)
self.chisq = self.chisq_m + self.chisq_a
return self.chisq
def init(self, x):
"""Initialise the costfunction by calculating the different cost terms.
Parameters
----------
x : :class:`~numpy.ndarray` (N=1)
Vectorized magnetization distribution, for which the cost is calculated.
Returns
-------
None
"""
self._log.debug('Calling init')
self(x)
def jac(self, x):
"""Calculate the derivative of the costfunction for a given magnetization distribution.
Parameters
----------
x : :class:`~numpy.ndarray` (N=1)
Vectorized magnetization distribution, for which the Jacobi vector is calculated.
Returns
-------
result : :class:`~numpy.ndarray` (N=1)
Jacobi vector which represents the cost derivative of all voxels of the magnetization.
"""
assert len(x) == self.n, 'Length of input {} does not match n={}'.format(len(x), self.n)
return (2 * self.fwd_model.jac_T_dot(x, self.Se_inv.dot(self.fwd_model(x) - self.y))
+ self.regularisator.jac(x))
def hess_dot(self, x, vector):
"""Calculate the product of a `vector` with the Hessian matrix of the costfunction.
Parameters
----------
x : :class:`~numpy.ndarray` (N=1)
Vectorized magnetization distribution at which the Hessian is calculated. The Hessian
is constant in this case, thus `x` can be set to None (it is not used int the
computation). It is implemented for the case that in the future nonlinear problems
have to be solved.
vector : :class:`~numpy.ndarray` (N=1)
Vectorized magnetization distribution which is multiplied by the Hessian.
Returns
-------
result : :class:`~numpy.ndarray` (N=1)
Product of the input `vector` with the Hessian matrix of the costfunction.
"""
return (2 * self.fwd_model.jac_T_dot(x, self.Se_inv.dot(self.fwd_model.jac_dot(x, vector)))
+ self.regularisator.hess_dot(x, vector))
def hess_diag(self, _):
""" Return the diagonal of the Hessian.
Parameters
----------
_ : undefined
Unused input
"""
return np.ones(self.n)
# -*- coding: utf-8 -*-
# Copyright 2014 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""This module provides the :class:`~.DataSet` class for the collection of phase maps
and additional data like corresponding projectors."""
import logging
from numbers import Number
from pyramid.kernel import Kernel
from pyramid.phasemap import PhaseMap
from pyramid.phasemapper import PhaseMapperRDFC
from pyramid.projector import Projector
from pyramid.fielddata import ScalarData
from pyramid.ramp import Ramp
import matplotlib.pyplot as plt
import numpy as np
from scipy import sparse
__all__ = ['DataSet']
class DataSet(object):
"""Class for collecting phase maps and corresponding projectors.
Represents a collection of (e.g. experimentally derived) phase maps, stored as
:class:`~.PhaseMap` objects and corresponding projectors stored as :class:`~.Projector`
objects. At creation, the grid spacing `a` and the dimension `dim` of the magnetization
distribution have to be given. Data can be added via the :func:`~.append` method, where
a :class:`~.PhaseMap`, a :class:`~.Projector` and additional info have to be given.
Attributes
----------
a: float
The grid spacing in nm.
dim: tuple (N=3)
Dimensions of the 3D magnetization distribution.
b_0: double
The saturation induction in `T`.
mask: :class:`~numpy.ndarray` (N=3), optional
A boolean mask which defines the magnetized volume in 3D.
Se_inv : :class:`~numpy.ndarray` (N=2), optional
Inverted covariance matrix of the measurement errors. The matrix has size `NxN` with N
being the length of the targetvector y (vectorized phase map information).
projectors: list of :class:`~.Projector`
A list of all stored :class:`~.Projector` objects.
phasemaps: list of :class:`~.PhaseMap`
A list of all stored :class:`~.PhaseMap` objects.
phase_vec: :class:`~numpy.ndarray` (N=1)
The concatenaded, vectorized phase of all :class:`~.PhaseMap` objects.
count(self): int
Number of phase maps and projectors in the dataset.
hook_points(self): :class:`~numpy.ndarray` (N=1)
Hook points which determine the start of values of a phase map in the `phase_vec`.
The length is `count + 1`.
"""
_log = logging.getLogger(__name__ + '.DataSet')
@property
def a(self):
"""The grid spacing in nm."""
return self._a
@a.setter
def a(self, a):
assert isinstance(a, Number), 'Grid spacing has to be a number!'
assert a >= 0, 'Grid spacing has to be a positive number!'
self._a = float(a)
@property
def mask(self):
"""A boolean mask which defines the magnetized volume in 3D."""
return self._mask
@mask.setter
def mask(self, mask):
if mask is not None:
assert mask.shape == self.dim, 'Mask dimensions must match!'
else:
mask = np.ones(self.dim, dtype=bool)
self._mask = mask.astype(np.bool)
@property
def m(self):
"""Size of the image space."""
return np.sum([len(p.phase_vec) for p in self.phasemaps])
@property
def n(self):
"""Size of the input space."""
return 3 * np.sum(self.mask)
@property
def count(self):
"""Number of phase maps and projectors in the dataset."""
return len(self.projectors)
@property
def phase_vec(self):
"""The concatenaded, vectorized phase of all ;class:`~.PhaseMap` objects."""
return np.concatenate([p.phase_vec for p in self.phasemaps])
@property
def hook_points(self):
"""Hook points which determine the start of values of a phase map in the `phase_vec`."""
result = [0]
for i, phasemap in enumerate(self.phasemaps):
result.append(result[i] + np.prod(phasemap.dim_uv))
return result
@property
def phasemaps(self):
"""List of all PhaseMaps in the DataSet."""
return self._phasemaps
@property
def projectors(self):
"""List of all Projectors in the DataSet."""
return self._projectors
@property
def phasemappers(self):
# TODO: get rid, only use phasemapper_dict!!
"""List of all PhaseMappers in the DataSet."""
return self._phasemappers
@property
def phasemapper_dict(self):
"""Dictionary of all PhaseMappers in the DataSet."""
return self._phasemapper_dict
def __init__(self, a, dim, b_0=1, mask=None, Se_inv=None):
dim = tuple(dim)
self._log.debug('Calling __init__')
assert isinstance(dim, tuple) and len(dim) == 3, \
'Dimension has to be a tuple of length 3!'
self.a = a
self.dim = dim
self.b_0 = b_0
self.mask = mask
self.Se_inv = Se_inv
self._phasemaps = []
self._projectors = []
self._phasemappers = []
self._phasemapper_dict = {}
self._log.debug('Created: ' + str(self))
def __repr__(self):
self._log.debug('Calling __repr__')
return '%s(a=%r, dim=%r, b_0=%r, mask=%r, Se_inv=%r)' % (self.__class__, self.a, self.dim,
self.b_0, self.mask, self.Se_inv)
def __str__(self):
self._log.debug('Calling __str__')
return 'DataSet(a=%s, dim=%s, b_0=%s)' % (self.a, self.dim, self.b_0)
def _append_single(self, phasemap, projector, phasemapper=None):
self._log.debug('Calling _append')
assert isinstance(phasemap, PhaseMap) and isinstance(projector, Projector), \
'Argument has to be a tuple of a PhaseMap and a Projector object!'
dim_uv = projector.dim_uv
assert projector.dim == self.dim, '3D dimensions must match!'
assert phasemap.dim_uv == dim_uv, 'Projection dimensions (dim_uv) must match!'
assert phasemap.a == self.a, 'Grid spacing must match!'
# Create lookup key:
# TODO: Think again if phasemappers should be given as attribute (seems to be faulty
# TODO: currently... Also not very expensive, so keep outside?
if phasemapper is not None:
key = dim_uv # Create standard phasemapper, dim_uv is enough for identification!
else:
key = (dim_uv, str(phasemapper)) # Include string representation for identification!
# Retrieve existing, use given or create new phasemapper:
if key in self.phasemapper_dict: # Retrieve existing phasemapper:
phasemapper = self.phasemapper_dict[key]
elif phasemapper is not None: # Use given one (do nothing):
pass
else: # Create new standard (RDFC) phasemapper:
phasemapper = PhaseMapperRDFC(Kernel(self.a, dim_uv, self.b_0))
self._phasemapper_dict[key] = phasemapper
# Append everything to the lists (just contain pointers to objects!):
self._phasemaps.append(phasemap)
self._projectors.append(projector)
self._phasemappers.append(phasemapper)
def append(self, phasemap, projector, phasemapper=None):
"""Appends a data pair of phase map and projection infos to the data collection.`
Parameters
----------
phasemap: :class:`~.PhaseMap`
A :class:`~.PhaseMap` object which should be added to the data collection.
projector: :class:`~.Projector`
A :class:`~.Projector` object which should be added to the data collection.
phasemapper: :class:`~.PhaseMapper`, optional
An optional :class:`~.PhaseMapper` object which should be added.
Returns
-------
None
"""
self._log.debug('Calling append')
if type(phasemap) is not list:
phasemap = [phasemap]
if type(projector) is not list:
projector = [projector]
if type(phasemapper) is not list:
phasemapper = [phasemapper] * len(phasemap)
assert len(phasemap) == len(projector),\
('Phasemaps and projectors must have same' +
'length(phasemaps: {}, projectors: {})!'.format(len(phasemap), len(projector)))
for i in range(len(phasemap)):
self._append_single(phasemap[i], projector[i], phasemapper[i])
# Reset the Se_inv matrix from phasemaps confidence matrices:
self.set_Se_inv_diag_with_conf()
def create_phasemaps(self, magdata, difference=False, ramp=None):
"""Create a list of phasemaps with the projectors in the dataset for a given
:class:`~.VectorData` object.
Parameters
----------
magdata : :class:`~.VectorData`
Magnetic distribution to which the projectors of the dataset should be applied.
difference : bool, optional
If `True`, the phasemaps of the dataset are subtracted from the created ones to view
difference images. Default is False.
ramp : :class:`~.Ramp`
A ramp object, which can be specified to add a ramp to the generated phasemaps.
If `difference` is `True`, this can be interpreted as ramp correcting the phasemaps
saved in the dataset.
Returns
-------
phasemaps : list of :class:`~.phasemap.PhaseMap`
A list of the phase maps resulting from the projections specified in the dataset.
"""
self._log.debug('Calling create_phasemaps')
phasemaps = []
for i, projector in enumerate(self.projectors):
mag_proj = projector(magdata)
phasemap = self.phasemappers[i](mag_proj)
if difference:
phasemap -= self.phasemaps[i]
if ramp is not None:
assert type(ramp) == Ramp, 'ramp has to be a Ramp object!'
phasemap += ramp(index=i) # Full formula: phasemap -= phasemap_dataset - ramp
phasemap.mask = mag_proj.get_mask()[0, ...]
phasemaps.append(phasemap)
return phasemaps
def set_Se_inv_block_diag(self, cov_list):
"""Set the Se_inv matrix as a block diagonal matrix
Parameters
----------
cov_list: list of :class:`~numpy.ndarray`
List of inverted covariance matrices (one for each projection).
Returns
-------
None
"""
self._log.debug('Calling set_Se_inv_block_diag')
assert len(cov_list) == len(self.phasemaps), 'Needs one covariance matrix per phase map!'
self.Se_inv = sparse.block_diag(cov_list).tocsr()
def set_Se_inv_diag_with_conf(self, conf_list=None):
"""Set the Se_inv matrix as a block diagonal matrix from a list of confidence matrizes.
Parameters
----------
conf_list: list of :class:`~numpy.ndarray` (optional)
List of 2D confidence matrizes (one for each projection) which define trust regions.
If not given this uses the confidence matrizes of the phase maps.
Returns
-------
None
"""
self._log.debug('Calling set_Se_inv_diag_with_conf')
if conf_list is None: # if no confidence matrizes are given, extract from the phase maps!
conf_list = [phasemap.confidence for phasemap in self.phasemaps]
cov_list = [sparse.diags(c.ravel().astype(np.float32), 0) for c in conf_list]
self.set_Se_inv_block_diag(cov_list)
def set_3d_mask(self, mask_list=None, threshold=0.9):
# TODO: This function should be in a separate module and not here (maybe?)!
"""Set the 3D mask from a list of 2D masks.
Parameters
----------
mask_list: list of :class:`~numpy.ndarray` (optional)
List of 2D masks, which represent the projections of the 3D mask. If not given this
uses the mask matrizes of the phase maps. If just one phase map is present, the
according mask is simply expanded to 3D and used directly.
threshold: float, optional
The threshold, describing the minimal number of 2D masks which have to extrude to the
point in 3D to be considered valid as containing magnetisation. `threshold` is a
relative number in the range of [0, 1]. The default is 0.9. Choosing a value of 1 is
the strictest possible setting (every 2D mask has to contain a 3D point to be valid).
Returns
-------
None
"""
self._log.debug('Calling set_3d_mask')
if mask_list is None: # if no masks are given, extract from phase maps:
mask_list = [phasemap.mask for phasemap in self.phasemaps]
if len(mask_list) == 1: # just one phasemap --> 3D mask equals 2D mask
self.mask = np.expand_dims(mask_list[0], axis=0) # z-dim is set to 1!
else: # 3D mask has to be constructed from 2D masks:
mask_3d = np.zeros(self.dim)
for i, projector in enumerate(self.projectors):
mask_2d = self.phasemaps[i].mask.reshape(-1) # 2D mask
# Add extrusion of 2D mask:
mask_3d += projector.weight.T.dot(mask_2d).reshape(self.dim)
self.mask = np.where(mask_3d >= threshold * self.count, True, False)
def save(self, filename, overwrite=True):
"""Saves the dataset as a collection of HDF5 files.
Parameters
----------
filename: str
Base name of the files which the dataset is saved into. HDF5 files are supported.
overwrite: bool, optional
If True (default), an existing file will be overwritten, if False, this
(silently!) does nothing.
"""
from .file_io.io_dataset import save_dataset
save_dataset(self, filename, overwrite)
def plot_mask(self):
"""If it exists, display the 3D mask of the magnetization distribution.
Returns
-------
None
"""
self._log.debug('Calling plot_mask')
if self.mask is not None:
return ScalarData(self.a, self.mask).plot_mask()
def plot_phasemaps(self, magdata=None, title='Phase Map', difference=False, ramp=None,
**kwargs):
"""Display all phasemaps saved in the :class:`~.DataSet` as a colormesh.
Parameters
----------
magdata : :class:`~.VectorData`, optional
Magnetic distribution to which the projectors of the dataset should be applied. If not
given, the phasemaps in the dataset are used.
title : string, optional
The main part of the title of the plots. The default is 'Phase Map'. Additional
projector info is appended to this.
difference : bool, optional
If `True`, the phasemaps of the dataset are subtracted from the created ones to view
difference images. Default is False.
ramp : :class:`~.Ramp`
A ramp object, which can be specified to add a ramp to the generated phasemaps.
If `magdata` is not given, this will instead just ramp correct the phasemaps saved
in the dataset.
Returns
-------
None
"""
self._log.debug('Calling plot_phasemaps')
if magdata is not None: # Plot phasemaps of the given magnetisation distribution:
phasemaps = self.create_phasemaps(magdata, difference=difference, ramp=ramp)
else: # Plot phasemaps saved in the DataSet (default):
phasemaps = self.phasemaps
if ramp is not None:
for i, phasemap in enumerate(phasemaps):
assert type(ramp) == Ramp, 'ramp has to be a Ramp object!'
phasemap -= ramp(index=i) # Ramp correction
for (i, phasemap) in enumerate(phasemaps):
phasemap.plot_phase(note='{} ({})'.format(title, self.projectors[i].get_info()),
**kwargs)
def plot_phasemaps_combined(self, magdata=None, title='Combined Plot', difference=False,
ramp=None, **kwargs):
"""Display all phasemaps and the resulting color coded holography images.
Parameters
----------
magdata : :class:`~.VectorData`, optional
Magnetic distribution to which the projectors of the dataset should be applied. If not
given, the phasemaps in the dataset are used.
title : string, optional
The title of the plot. The default is 'Combined Plot'.
difference : bool, optional
If `True`, the phasemaps of the dataset are subtracted from the created ones to view
difference images. Default is False.
ramp : :class:`~.Ramp`
A ramp object, which can be specified to add a ramp to the generated phasemaps.
If `magdata` is not given, this will instead just ramp correct the phasemaps saved
in the dataset.
Returns
-------
None
"""
self._log.debug('Calling plot_phasemaps_combined')
if magdata is not None:
phasemaps = self.create_phasemaps(magdata, difference=difference, ramp=ramp)
else:
phasemaps = self.phasemaps
if ramp is not None:
for i, phasemap in enumerate(phasemaps):
assert type(ramp) == Ramp, 'ramp has to be a Ramp object!'
phasemap -= ramp(index=i) # Ramp correction
for (i, phasemap) in enumerate(phasemaps):
phasemap.plot_combined(note='{} ({})'.format(title, self.projectors[i].get_info()),
**kwargs)
plt.show()
# -*- coding: utf-8 -*-
# Copyright 2014 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""This module provides the :class:`~.Diagnostics` class for the calculation of diagnostics of a
specified costfunction for a fixed magnetization distribution."""
import logging
from pyramid.fielddata import VectorData
from pyramid.phasemap import PhaseMap
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib import patheffects
from matplotlib.ticker import FuncFormatter
import numpy as np
import jutil
__all__ = ['Diagnostics', 'get_vector_field_errors']
class Diagnostics(object):
"""Class for calculating diagnostic properties of a specified costfunction.
For the calculation of diagnostic properties, a costfunction and a magnetization distribution
are specified at construction. With the :func:`~.set_position`, a position in 3D space can be
set at which all properties will be calculated. Properties are saved via boolean flags and
thus, calculation is only done if the position has changed in between. The standard deviation
and the measurement contribution require the execution of a conjugate gradient solver and can
take a while for larger problems.
Attributes
----------
x_rec: :class:`~numpy.ndarray`
Vectorized magnetization distribution at which the costfunction is evaluated.
cost: :class:`~.pyramid.costfunction.Costfunction`
Costfunction for which the diagnostics are calculated.
max_iter: int, optional
Maximum number of iterations. Default is 1000.
fwd_model: :class:`~pyramid.forwardmodel.ForwardModel`
Forward model used in the costfunction.
Se_inv : :class:`~numpy.ndarray` (N=2), optional
Inverted covariance matrix of the measurement errors. The matrix has size `NxN` with N
being the length of the targetvector y (vectorized phase map information).
dim: tuple (N=3)
Dimensions of the 3D magnetic distribution.
row_idx: int
Row index of the system matrix corresponding to the current position in 3D space.
Notes
-----
Some properties depend on others, which may require recalculations of these prior
properties if necessary. The dependencies are ('-->' == 'requires'):
avrg_kern_row --> gain_row --> std --> m_inv_row
measure_contribution is independant
"""
_log = logging.getLogger(__name__ + '.Diagnostics')
@property
def cov_row(self):
"""Row of the covariance matrix (``S_a^-1+F'(x_f)^T S_e^-1 F'(x_f)``) which is needed for
the calculation of the gain and averaging kernel matrizes and which ideally contains the
variance at position `row_idx` for the current component and position in 3D.
Note that the covariance matrix of the solution is symmetric (like all covariance
matrices) and thusly this property could also be called cov_col for column."""
if not self._updated_cov_row:
e_i = np.zeros(self.cost.n, dtype=self.x_rec.dtype)
e_i[self.row_idx] = 1
row = 2 * jutil.cg.conj_grad_solve(self._A, e_i, P=self._P, max_iter=self.max_iter,
verbose=self.verbose)
self._std_row = np.asarray(row)
self._updated_cov_row = True
return self._std_row
@property
def std(self):
"""Standard deviation of the chosen component at the current position (calculated when
needed)."""
return np.sqrt(self.cov_row[self.row_idx])
@property
def gain_row(self):
"""Row of the gain matrix, which maps differences of phase measurements onto differences in
the retrieval result of the magnetization distribution(calculated when needed)."""
if not self._updated_gain_row:
self._gain_row = self.Se_inv.dot(self.fwd_model.jac_dot(self.x_rec, self.cov_row))
self._updated_gain_row = True
return self._gain_row
@property
def avrg_kern_row(self):
"""Row of the averaging kernel matrix (which is ideally the identity matrix), which
describes the smoothing introduced by the regularization (calculated when needed)."""
if not self._updated_avrg_kern_row:
self._avrg_kern_row = self.fwd_model.jac_T_dot(self.x_rec, self.gain_row)
self._updated_avrg_kern_row = True
return self._avrg_kern_row
@property
def measure_contribution(self):
"""The sum over an averaging kernel matrix row, which is an indicator for wheter a point of
the solution is determined by the measurement (close to `1`) or by a priori information
(close to `0`)."""
if not self._updated_measure_contribution:
cache = self.fwd_model.jac_dot(self.x_rec, np.ones(self.cost.n, self.x_rec.dtype))
cache = self.fwd_model.jac_T_dot(self.x_rec, self.Se_inv.dot(cache))
mc = 2 * jutil.cg.conj_grad_solve(self._A, cache, P=self._P, max_iter=self.max_iter)
self._measure_contribution = mc
self._updated_measure_contribution = True
return self._measure_contribution
@property
def pos(self):
"""The current solution position, which specifies the 3D-point (and the component) of the
magnetization, for which diagnostics are calculated."""
return self._pos
@pos.setter
def pos(self, pos):
c, z, y, x = pos
assert self.mask[z, y, x], 'Position is outside of the provided mask!'
mask_vec = self.mask.ravel()
idx_3d = z * self.dim[1] * self.dim[2] + y * self.dim[2] + x
row_idx = c * np.prod(mask_vec.sum()) + mask_vec[:idx_3d].sum()
if row_idx != self.row_idx:
self._pos = pos
self.row_idx = row_idx
self._updated_cov_row = False
self._updated_gain_row = False
self._updated_avrg_kern_row = False
self._updated_measure_contribution = False
def __init__(self, magdata, cost, max_iter=1000, verbose=False):
self._log.debug('Calling __init__')
self.magdata = magdata
self.cost = cost
self.max_iter = max_iter
self.verbose = verbose
self.fwd_model = cost.fwd_model
self.Se_inv = cost.Se_inv
self.dim = cost.fwd_model.data_set.dim
self.mask = cost.fwd_model.data_set.mask
self.x_rec = np.empty(cost.n)
self.x_rec[:self.fwd_model.data_set.n] = self.magdata.get_vector(mask=self.mask)
self.x_rec[self.fwd_model.data_set.n:] = self.fwd_model.ramp.param_cache.ravel()
self.row_idx = None
self.pos = (0,) + tuple(np.array(np.where(self.mask))[:, 0]) # first True mask entry
self._updated_cov_row = False
self._updated_gain_row = False
self._updated_avrg_kern_row = False
self._updated_measure_contribution = False
self._A = jutil.operator.CostFunctionOperator(self.cost, self.x_rec)
self._P = jutil.preconditioner.CostFunctionPreconditioner(self.cost, self.x_rec)
self._log.debug('Creating ' + str(self))
def get_avrg_kern_field(self, pos=None):
"""Get the averaging kernel matrix row represented as a 3D magnetization distribution.
Parameters
----------
pos: tuple (N=4)
Position in 3D plus component `(c, z, y, x)`
Returns
-------
magdata_avrg_kern: :class:`~pyramid.fielddata.VectorData`
Averaging kernel matrix row represented as a 3D magnetization distribution
"""
self._log.debug('Calling get_avrg_kern_field')
if pos is not None:
self.pos = pos
magdata_avrg_kern = VectorData(self.cost.fwd_model.data_set.a, np.zeros((3,) + self.dim))
vector = self.avrg_kern_row[:-self.fwd_model.ramp.n] # Only take vector field, not ramp!
magdata_avrg_kern.set_vector(vector, mask=self.mask)
return magdata_avrg_kern
def calculate_fwhm(self, pos=None, plot=False):
"""Calculate and plot the averaging pixel number at a specified position for x, y or z.
Parameters
----------
pos: tuple (N=4)
Position in 3D plus component `(c, z, y, x)`
plot : bool, optional
If True, a FWHM linescan plot is shown. Default is False.
Returns
-------
fwhm : float
The FWHM in x, y and z direction. The inverse corresponds to the number of pixels over
which is approximately averaged.
lr : 3 tuples of 2 floats
The left and right borders in x, y and z direction from which the FWHM is calculated.
Given in pixel coordinates and relative to the current position!
cxyz_dat : 4 lists of floats
The slices through the current position in the 4D volume (including the component),
which were used for FWHM calculations. Denotes information content in %!
Notes
-----
Uses the :func:`~.get_avrg_kern_field` function
"""
self._log.debug('Calling calculate_fwhm')
a = self.magdata.a
magdata_avrg_kern = self.get_avrg_kern_field(pos)
x = np.arange(0, self.dim[2]) - self.pos[3]
y = np.arange(0, self.dim[1]) - self.pos[2]
z = np.arange(0, self.dim[0]) - self.pos[1]
c_dat = magdata_avrg_kern.field[:, self.pos[1], self.pos[2], self.pos[3]]
x_dat = magdata_avrg_kern.field[self.pos[0], self.pos[1], self.pos[2], :]
y_dat = magdata_avrg_kern.field[self.pos[0], self.pos[1], :, self.pos[3]]
z_dat = magdata_avrg_kern.field[self.pos[0], :, self.pos[2], self.pos[3]]
c_dat = np.asarray(c_dat * 100) # in %
x_dat = np.asarray(x_dat * 100) # in %
y_dat = np.asarray(y_dat * 100) # in %
z_dat = np.asarray(z_dat * 100) # in %
def _calc_lr(c):
data = [x_dat, y_dat, z_dat][c]
i_m = np.argmax(data) # Index of the maximum
# Left side:
l = i_m
for i in np.arange(i_m - 1, -1, -1):
if data[i] < data[i_m] / 2:
# Linear interpolation between i and i + 1 to find left fractional index pos:
l = (data[i_m] / 2 - data[i]) / (data[i + 1] - data[i]) + i
break
# Right side:
r = i_m
for i in np.arange(i_m + 1, data.size):
if data[i] < data[i_m] / 2:
# Linear interpolation between i and i - 1 to find right fractional index pos:
r = (data[i_m] / 2 - data[i - 1]) / (data[i] - data[i - 1]) + i - 1
break
# Transform from index to coordinates:
l = (l - self.pos[3-c])
r = (r - self.pos[3-c])
return l, r
# Calculate FWHM:
lx, rx = _calc_lr(0)
ly, ry = _calc_lr(1)
lz, rz = _calc_lr(2)
# TODO: Test if FWHM is really calculated with a in mind... didn't seem so...
fwhm_x = (rx - lx) * a
fwhm_y = (ry - ly) * a
fwhm_z = (rz - lz) * a
# Plot helpful stuff:
if plot:
fig, axis = plt.subplots(1, 1)
axis.axvline(x=0, ls='-', color='k', linewidth=2)
axis.axhline(y=0, ls='-', color='k', linewidth=2)
axis.axhline(y=x_dat.max(), ls='-', color='k', linewidth=2)
axis.axhline(y=x_dat.max() / 2, ls='--', color='k', linewidth=2)
axis.vlines(x=[lx, rx], ymin=0, ymax=x_dat.max() / 2, linestyles='--',
color='r', linewidth=2, alpha=0.5)
axis.vlines(x=[ly, ry], ymin=0, ymax=y_dat.max() / 2, linestyles='--',
color='g', linewidth=2, alpha=0.5)
axis.vlines(x=[lz, rz], ymin=0, ymax=z_dat.max() / 2, linestyles='--',
color='b', linewidth=2, alpha=0.5)
l = []
l.extend(axis.plot(x, x_dat, label='x-dim.', color='r', marker='o', linewidth=2))
l.extend(axis.plot(y, y_dat, label='y-dim.', color='g', marker='o', linewidth=2))
l.extend(axis.plot(z, z_dat, label='z-dim.', color='b', marker='o', linewidth=2))
cx = axis.scatter(0, c_dat[0], marker='o', s=200, edgecolor='r', label='x-comp.',
facecolor='r', alpha=0.75)
cy = axis.scatter(0, c_dat[1], marker='d', s=200, edgecolor='g', label='y-comp.',
facecolor='g', alpha=0.75)
cz = axis.scatter(0, c_dat[2], marker='*', s=200, edgecolor='b', label='z-comp.',
facecolor='b', alpha=0.75)
lim_min = np.min(np.concatenate((x, y, z))) - 0.5
lim_max = np.max(np.concatenate((x, y, z))) + 0.5
axis.set_xlim(lim_min, lim_max)
axis.set_title('Avrg. kern. FWHM', fontsize=18)
axis.set_xlabel('x/y/z-slice [nm]', fontsize=15)
axis.set_ylabel('information content [%]', fontsize=15)
axis.tick_params(axis='both', which='major', labelsize=14)
axis.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: '{:.3g}'.format(x * a)))
comp_legend = axis.legend([cx, cy, cz], [c.get_label() for c in [cx, cy, cz]], loc=2,
scatterpoints=1, prop={'size': 14})
axis.legend(l, [i.get_label() for i in l], loc=1, numpoints=1, prop={'size': 14})
axis.add_artist(comp_legend)
fwhm = fwhm_x, fwhm_y, fwhm_z
lr = (lx, rx), (ly, ry), (lz, rz)
cxyz_dat = c_dat, x_dat, y_dat, z_dat
return fwhm, lr, cxyz_dat
def get_gain_maps(self, pos=None):
"""Get the gain matrix row represented as a list of 2D (inverse) phase maps.
Parameters
----------
pos: tuple (N=4)
Position in 3D plus component `(c, z, y, x)`
Returns
-------
gain_map_list: list of :class:`~pyramid.phasemap.PhaseMap`
Gain matrix row represented as a list of 2D phase maps
Notes
-----
Note that the produced gain maps define the magnetization change at the current position
in 3d per phase change at the position of the . Take this into account when plotting the
maps (1/rad instead of rad).
"""
self._log.debug('Calling get_gain_maps')
if pos is not None:
self.pos = pos
hp = self.cost.fwd_model.data_set.hook_points
gain_map_list = []
for i, projector in enumerate(self.cost.fwd_model.data_set.projectors):
gain = self.gain_row[hp[i]:hp[i + 1]].reshape(projector.dim_uv)
gain_map_list.append(PhaseMap(self.cost.fwd_model.data_set.a, gain))
return gain_map_list
def plot_position(self, **kwargs):
proj_axis = kwargs.get('proj_axis', 'z')
if proj_axis == 'z': # Slice of the xy-plane with z = ax_slice
pos_2d = (self.pos[2], self.pos[3])
ax_slice = self.pos[1]
elif proj_axis == 'y': # Slice of the xz-plane with y = ax_slice
pos_2d = (self.pos[1], self.pos[3])
ax_slice = self.pos[2]
elif proj_axis == 'x': # Slice of the zy-plane with x = ax_slice
pos_2d = (self.pos[2], self.pos[1])
ax_slice = self.pos[3]
else:
raise ValueError('{} is not a valid argument (use x, y or z)'.format(proj_axis))
note = kwargs.pop('note', None)
if note is None:
comp = {0: 'x', 1: 'y', 2: 'z'}[self.pos[0]]
note = '{}-comp., pos.: {}'.format(comp, self.pos[1:])
# Plots:
axis = self.magdata.plot_quiver_field(note=note, ax_slice=ax_slice, **kwargs)
rect = axis.add_patch(patches.Rectangle((pos_2d[1], pos_2d[0]), 1, 1, fill=False,
edgecolor='w', linewidth=2, alpha=0.5))
rect.set_path_effects([patheffects.withStroke(linewidth=4, foreground='k', alpha=0.5)])
def plot_position3d(self, **kwargs):
pass
def plot_avrg_kern_field(self, pos=None, **kwargs):
a = self.magdata.a
avrg_kern_field = self.get_avrg_kern_field(pos)
fwhms, lr = self.calculate_fwhm(pos)[:2]
proj_axis = kwargs.get('proj_axis', 'z')
if proj_axis == 'z': # Slice of the xy-plane with z = ax_slice
pos_2d = (self.pos[2], self.pos[3])
ax_slice = self.pos[1]
width, height = fwhms[0] / a, fwhms[1] / a
elif proj_axis == 'y': # Slice of the xz-plane with y = ax_slice
pos_2d = (self.pos[1], self.pos[3])
ax_slice = self.pos[2]
width, height = fwhms[0] / a, fwhms[2] / a
elif proj_axis == 'x': # Slice of the zy-plane with x = ax_slice
pos_2d = (self.pos[2], self.pos[1])
ax_slice = self.pos[3]
width, height = fwhms[2] / a, fwhms[1] / a
else:
raise ValueError('{} is not a valid argument (use x, y or z)'.format(proj_axis))
note = kwargs.pop('note', None)
if note is None:
comp = {0: 'x', 1: 'y', 2: 'z'}[self.pos[0]]
note = '{}-comp., pos.: {}'.format(comp, self.pos[1:])
# Plots:
axis = avrg_kern_field.plot_quiver_field(note=note, ax_slice=ax_slice, **kwargs)
xy = (pos_2d[1], pos_2d[0])
rect = axis.add_patch(patches.Rectangle(xy, 1, 1, fill=False, edgecolor='w',
linewidth=2, alpha=0.5))
rect.set_path_effects([patheffects.withStroke(linewidth=4, foreground='k', alpha=0.5)])
xy = (xy[0] + 0.5, xy[1] + 0.5)
artist = axis.add_patch(patches.Ellipse(xy, width, height, fill=False, edgecolor='w',
linewidth=2, alpha=0.5))
artist.set_path_effects([patheffects.withStroke(linewidth=4, foreground='k', alpha=0.5)])
def plot_avrg_kern_field3d(self, pos=None, mask=True, ellipsoid=True, **kwargs):
avrg_kern_field = self.get_avrg_kern_field(pos)
avrg_kern_field.plot_mask(color=(1, 1, 1), opacity=0.15, labels=False, grid=False,
orientation=False)
avrg_kern_field.plot_quiver3d(**kwargs, new_fig=False)
fwhm = self.calculate_fwhm()[0]
from mayavi.sources.api import ParametricSurface
from mayavi.modules.api import Surface
from mayavi import mlab
engine = mlab.get_engine()
scene = engine.scenes[0]
scene.scene.disable_render = True # for speed # TODO: EVERYWHERE WITH MAYAVI!
# TODO: from enthought.mayavi import mlab
# TODO: f = mlab.figure() # returns the current scene
# TODO: engine = mlab.get_engine() # returns the running mayavi engine
source = ParametricSurface()
source.function = 'ellipsoid'
engine.add_source(source)
surface = Surface()
source.add_module(surface)
actor = surface.actor # mayavi actor, actor.actor is tvtk actor
# actor.property.ambient = 1 # defaults to 0 for some reason, ah don't need it, turn off scalar visibility instead
actor.property.opacity = 0.5
actor.property.color = (0, 0, 0)
actor.mapper.scalar_visibility = False # don't colour ellipses by their scalar indices into colour map
actor.property.backface_culling = True # gets rid of rendering artifact when opacity is < 1
# actor.property.frontface_culling = True
actor.actor.orientation = [0, 0, 0] # in degrees
actor.actor.origin = (0, 0, 0)
actor.actor.position = (self.pos[1]+0.5, self.pos[2]+0.5, self.pos[3]+0.5)
a = self.magdata.a
actor.actor.scale = [0.5*fwhm[0]/a, 0.5*fwhm[1]/a, 0.5*fwhm[2]/a]
#surface.append(surface)
scene.scene.disable_render = False # now turn it on # TODO: EVERYWHERE WITH MAYAVI!
def plot_avrg_kern_field_3d_to_2d(self, dim_uv=None, axis=None, figsize=None, high_res=False,
**kwargs):
# TODO: 3d_to_2d into plottools and make available for all 3D plots if possible!
import tempfile
from PIL import Image
import os
from . import plottools
from mayavi import mlab
if figsize is None:
figsize = plottools.FIGSIZE_DEFAULT
if axis is None:
self._log.debug('axis is None')
fig = plt.figure(figsize=figsize)
axis = fig.add_subplot(1, 1, 1)
axis.set_axis_bgcolor('gray')
kwargs.setdefault('labels', 'False')
#avrg_kern_field = self.get_avrg_kern_field()
#avrg_kern_field.plot_quiver3d(**kwargs)
self.plot_avrg_kern_field3d(**kwargs)
if high_res: # Use temp files:
tmpdir = tempfile.mkdtemp()
temp_path = os.path.join(tmpdir, 'temp.png')
try:
mlab.savefig(temp_path, size=(2000, 2000))
imgmap = np.asarray(Image.open(temp_path))
except Exception as e:
raise e
finally:
os.remove(temp_path)
os.rmdir(tmpdir)
else: # Use screenshot (returns array WITH alpha!):
imgmap = mlab.screenshot(mode='rgba', antialiased=True)
mlab.close(mlab.gcf())
if dim_uv is None:
dim_uv = self.dim[1:]
axis.imshow(imgmap, extent=[0, dim_uv[0], 0, dim_uv[1]], origin='upper')
kwargs.setdefault('scalebar', False)
kwargs.setdefault('hideaxes', True)
return plottools.format_axis(axis, hideaxes=True, scalebar=False)
def get_vector_field_errors(vector_data, vector_data_ref):
"""After Kemp et. al.: Analysis of noise-induced errors in vector-field electron tomography"""
v, vr = vector_data.field, vector_data_ref.field
va, vra = vector_data.field_amp, vector_data_ref.field_amp
volume = np.prod(vector_data.dim)
# Total error:
amp_sum_sqr = np.nansum((v - vr)**2)
rms_tot = np.sqrt(amp_sum_sqr / np.nansum(vra**2))
# Directional error:
scal_prod = np.clip(np.nansum(vr * v, axis=0) / (vra * va), -1, 1) # arccos float pt. inacc.!
rms_dir = np.sqrt(np.nansum(np.arccos(scal_prod)**2) / volume) / np.pi
# Magnitude error:
rms_mag = np.sqrt(np.nansum((va - vra)**2) / np.nansum(vra**2))
# Return results as tuple:
return rms_tot, rms_dir, rms_mag
# -*- coding: utf-8 -*-
# Copyright 2014 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""Custom FFT module with numpy and FFTW support.
This module provides custom methods for FFTs including inverse, adjoint and real variants. The
FFTW library is supported and is used as a default if the import succeeds. Otherwise the numpy.fft
pack will be used. FFTW objects are saved in a cache after creation which speeds up further similar
FFT operations.
"""
# TODO: is this still in use or deprecated by jutil???
import pickle
import logging
import os
import numpy as np
_log = logging.getLogger(__name__)
try:
import pyfftw
BACKEND = 'fftw'
except ImportError:
pyfftw = None
BACKEND = 'numpy'
_log.info('pyFFTW module not found. Using numpy implementation.')
try:
import multiprocessing
NTHREADS = multiprocessing.cpu_count()
del multiprocessing
except ImportError:
NTHREADS = 1
_log.info('multiprocessing module not found. Using single core.')
__all__ = ['plans', 'FLOAT', 'COMPLEX', 'dump_wisdom', 'load_wisdom',
'zeros', 'empty', 'ones', 'configure_backend',
'fftn', 'ifftn', 'rfftn', 'irfftn', 'rfftn_adj', 'irfftn_adj']
class FFTWCache(object):
"""Class for adding FFTW Plans and on-demand lookups.
This class is instantiated in this module to store FFTW plans and for the lookup of the former.
Attributes
----------
cache: dict
Cache for storing the FFTW plans.
Notes
-----
This class is used internally and is not normally not intended to be used directly by the user.
"""
_log = logging.getLogger(__name__ + '.FFTWCache')
def __init__(self):
self._log.debug('Calling __init__')
self.cache = dict()
self._log.debug('Created ' + str(self))
def add_fftw(self, fft_type, fftw_obj, s, axes, nthreads):
"""Add an FFTW object to the cache.
Parameters
----------
fft_type: basestring
Identifier sting for the FFT type ('fftn', 'ifftn', 'rfftn', 'irfftn').
fftw_obj: :class:`~pyfftw.FFTW` object
The FFTW object which should be added to the cache.
s: tuple of ints
Shape of the output array.
axes: tuple of ints
The axes along which the FFTW should be executed.
nthreads: int
Number of threads which should be used.
"""
self._log.debug('Calling add_fftw')
in_arr = fftw_obj.get_input_array()
key = (fft_type, in_arr.shape, in_arr.dtype, s, axes, nthreads)
self.cache[key] = fftw_obj
def lookup_fftw(self, fft_type, in_arr, s, axes, nthreads):
"""
Parameters
----------
fft_type: basestring
Identifier sting for the FFT type ('fftn', 'ifftn', 'rfftn', 'irfftn').
in_arr:
Input array, internally, just the `dtype` and the `shape` are used to identify the FFT.
s: tuple of ints
Shape of the output array.
axes: tuple of ints
The axes along which the FFTW should be executed.
nthreads: int
Number of threads which should be used.
Returns
-------
fftw_obj: :class:`~pyfftw.FFTW` object
The requested FFTW object.
"""
self._log.debug('Calling lookup_fftw')
key = (fft_type, in_arr.shape, in_arr.dtype, s, axes, nthreads)
return self.cache.get(key, None)
def clear_cache(self):
"""Clear the cache."""
self._log.debug('Calling clear_cache')
self.cache = dict()
plans = FFTWCache()
FLOAT = np.float32 # One convenient place to
COMPLEX = np.complex64 # change from 32 to 64 bit
# Numpy functions:
def _fftn_numpy(a, s=None, axes=None):
return np.fft.fftn(a, s, axes)
def _ifftn_numpy(a, s=None, axes=None):
return np.fft.ifftn(a, s, axes)
def _rfftn_numpy(a, s=None, axes=None):
return np.fft.rfftn(a, s, axes)
def _irfftn_numpy(a, s=None, axes=None):
return np.fft.irfftn(a, s, axes)
def _rfftn_adj_numpy(a):
n = 2 * (a.shape[-1] - 1)
out_shape = a.shape[:-1] + (n,)
out_arr = zeros(out_shape, dtype=a.dtype)
out_arr[:, :a.shape[1]] = a
return _ifftn_numpy(out_arr).real * np.prod(out_shape)
def _irfftn_adj_numpy(a):
n = a.shape[-1] // 2 + 1
out_arr = _fftn_numpy(a, axes=(-1,)) / a.shape[-1]
if a.shape[-1] % 2 == 0: # even
out_arr[:, 1:n - 1] += np.conj(out_arr[:, :n - 1:-1])
else: # odd
out_arr[:, 1:n] += np.conj(out_arr[:, :n - 1:-1])
axes = tuple(range(len(out_arr.shape[:-1])))
return _fftn_numpy(out_arr[:, :n], axes=axes) / np.prod(out_arr.shape[:-1])
# FFTW functions:
def _fftn_fftw(a, s=None, axes=None):
fftw = plans.lookup_fftw('fftn', a, s, axes, NTHREADS)
if fftw is None:
fftw = pyfftw.builders.fftn(a, s, axes, threads=NTHREADS)
plans.add_fftw('fftn', fftw, s, axes, NTHREADS)
return fftw(a).copy()
def _ifftn_fftw(a, s=None, axes=None):
fftw = plans.lookup_fftw('ifftn', a, s, axes, NTHREADS)
if fftw is None:
fftw = pyfftw.builders.ifftn(a, s, axes, threads=NTHREADS)
plans.add_fftw('ifftn', fftw, s, axes, NTHREADS)
return fftw(a).copy()
def _rfftn_fftw(a, s=None, axes=None):
fftw = plans.lookup_fftw('rfftn', a, s, axes, NTHREADS)
if fftw is None:
fftw = pyfftw.builders.rfftn(a, s, axes, threads=NTHREADS)
plans.add_fftw('rfftn', fftw, s, axes, NTHREADS)
return fftw(a).copy()
def _irfftn_fftw(a, s=None, axes=None):
fftw = plans.lookup_fftw('irfftn', a, s, axes, NTHREADS)
if fftw is None:
fftw = pyfftw.builders.irfftn(a, s, axes, threads=NTHREADS)
plans.add_fftw('irfftn', fftw, s, axes, NTHREADS)
return fftw(a).copy()
def _rfftn_adj_fftw(a):
# Careful: just works for even a (which is guaranteed by the kernel!)
n = 2 * (a.shape[-1] - 1)
out_shape = a.shape[:-1] + (n,)
out_arr = zeros(out_shape, dtype=a.dtype)
out_arr[:, :a.shape[-1]] = a
return _ifftn_fftw(out_arr).real * np.prod(out_shape)
def _irfftn_adj_fftw(a):
out_arr = _fftn_fftw(a, axes=(-1,)) / a.shape[-1] # FFT of last axis
n = a.shape[-1] // 2 + 1
if a.shape[-1] % 2 == 0: # even
out_arr[:, 1:n - 1] += np.conj(out_arr[:, :n - 1:-1])
else: # odd
out_arr[:, 1:n] += np.conj(out_arr[:, :n - 1:-1])
axes = tuple(range(len(out_arr.shape[:-1])))
return _fftn_fftw(out_arr[:, :n], axes=axes) / np.prod(out_arr.shape[:-1])
# These wisdom functions do nothing if pyFFTW is not available:
def dump_wisdom(fname):
"""Wrapper function for the pyfftw.export_wisdom(), which uses a pickle dump.
Parameters
----------
fname: string
Name of the file in which the wisdom is saved.
Returns
-------
None
"""
_log.debug('Calling dump_wisdom')
if pyfftw is not None:
with open(fname, 'wb') as fp:
pickle.dump(pyfftw.export_wisdom(), fp, pickle.HIGHEST_PROTOCOL)
def load_wisdom(fname):
"""Wrapper function for the pyfftw.import_wisdom(), which uses a pickle to load a file.
Parameters
----------
fname: string
Name of the file from which the wisdom is loaded.
Returns
-------
None
"""
_log.debug('Calling load_wisdom')
if pyfftw is not None:
if not os.path.exists(fname):
print("Warning: Wisdom file does not exist. First time use?")
else:
with open(fname, 'rb') as fp:
pyfftw.import_wisdom(pickle.load(fp))
# Array setups:
def empty(shape, dtype=FLOAT):
"""Return a new array of given shape and type without initializing entries.
Parameters
----------
shape: int or tuple of int
Shape of the array.
dtype: data-type, optional
Desired output data-type.
Returns
-------
out: :class:`~numpy.ndarray`
The created array.
"""
_log.debug('Calling empty')
result = np.empty(shape, dtype)
if pyfftw is not None:
result = pyfftw.n_byte_align(result, pyfftw.simd_alignment)
return result
def zeros(shape, dtype=FLOAT):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape: int or tuple of int
Shape of the array.
dtype: data-type, optional
Desired output data-type.
Returns
-------
out: :class:`~numpy.ndarray`
The created array.
"""
_log.debug('Calling zeros')
result = np.zeros(shape, dtype)
if pyfftw is not None:
result = pyfftw.n_byte_align(result, pyfftw.simd_alignment)
return result
def ones(shape, dtype=FLOAT):
"""Return a new array of given shape and type, filled with ones.
Parameters
----------
shape: int or tuple of int
Shape of the array.
dtype: data-type, optional
Desired output data-type.
Returns
-------
out: :class:`~numpy.ndarray`
The created array.
"""
_log.debug('Calling ones')
result = np.ones(shape, dtype)
if pyfftw is not None:
result = pyfftw.n_byte_align(result, pyfftw.simd_alignment)
return result
# Configure backend:
def configure_backend(backend):
"""Change FFT backend.
Parameters
----------
backend: string
Backend to use. Supported values are "numpy" and "fftw".
Returns
-------
None
"""
_log.debug('Calling configure_backend')
global fftn
global ifftn
global rfftn
global irfftn
global rfftn_adj
global irfftn_adj
global BACKEND
if backend == 'numpy':
fftn = _fftn_numpy
ifftn = _ifftn_numpy
rfftn = _rfftn_numpy
irfftn = _irfftn_numpy
rfftn_adj = _rfftn_adj_numpy
irfftn_adj = _irfftn_adj_numpy
BACKEND = 'numpy'
elif backend == 'fftw':
if pyfftw is not None:
fftn = _fftn_fftw
ifftn = _ifftn_fftw
rfftn = _rfftn_fftw
irfftn = _irfftn_fftw
rfftn_adj = _rfftn_adj_fftw
irfftn_adj = _irfftn_adj_fftw
BACKEND = 'pyfftw'
else:
print('Error: FFTW requested but not available')
# On import:
ifftn = None
fftn = None
rfftn = None
irfftn = None
rfftn_adj = None
irfftn_adj = None
if pyfftw is not None:
configure_backend('fftw')
else:
configure_backend('numpy')
# coding=utf-8
"""Convert vector fields.
The :mod:`~.fieldconverter` provides methods for converting a magnetization distribution `M` into
a vector potential `A` and convert this in turn into a magnetic field `B`. The direct way is also
possible.
"""
import logging
import numpy as np
from jutil import fft
from pyramid.fielddata import VectorData
__all__ = ['convert_M_to_A', 'convert_A_to_B', 'convert_M_to_B']
_log = logging.getLogger(__name__)
def convert_M_to_A(magdata, b_0=1.0):
"""Convert a magnetic vector distribution into a vector potential `A`.
Parameters
----------
magdata: :class:`~pyramid.magdata.VectorData` object
The magnetic vector field from which the A-field is calculated.
b_0: float, optional
The saturation magnetization which is used in the calculation.
Returns
-------
b_data: :class:`~pyramid.magdata.VectorData` object
The calculated B-field.
"""
_log.debug('Calling convert_M_to_A')
# Preparations of variables:
assert isinstance(magdata, VectorData), 'Only VectorData objects can be mapped!'
dim = magdata.dim
dim_kern = tuple(2 * np.array(dim) - 1) # Dimensions of the kernel
if fft.HAVE_FFTW:
dim_pad = tuple(2 * np.array(dim)) # is at least even (not neccessary a power of 2)
else:
dim_pad = tuple(2 ** np.ceil(np.log2(2 * np.array(dim))).astype(int)) # pow(2)
slice_B = (slice(dim[0] - 1, dim_kern[0]), # Shift because kernel center
slice(dim[1] - 1, dim_kern[1]), # is not at (0, 0, 0)!
slice(dim[2] - 1, dim_kern[2]))
slice_M = (slice(0, dim[0]), # Magnetization is padded on the far end!
slice(0, dim[1]), # B-field cutout is shifted as listed above
slice(0, dim[2])) # because of the kernel center!
# Set up kernels
coeff = magdata.a * b_0 / (4 * np.pi)
zzz, yyy, xxx = np.indices(dim_kern)
xxx -= dim[2] - 1
yyy -= dim[1] - 1
zzz -= dim[0] - 1
k_x = np.empty(dim_kern, dtype=magdata.field.dtype)
k_y = np.empty(dim_kern, dtype=magdata.field.dtype)
k_z = np.empty(dim_kern, dtype=magdata.field.dtype)
k_x[...] = coeff * xxx / np.abs(xxx ** 2 + yyy ** 2 + zzz ** 2 + 1E-30) ** 3
k_y[...] = coeff * yyy / np.abs(xxx ** 2 + yyy ** 2 + zzz ** 2 + 1E-30) ** 3
k_z[...] = coeff * zzz / np.abs(xxx ** 2 + yyy ** 2 + zzz ** 2 + 1E-30) ** 3
# Calculate Fourier trafo of kernel components:
k_x_fft = fft.rfftn(k_x, dim_pad)
k_y_fft = fft.rfftn(k_y, dim_pad)
k_z_fft = fft.rfftn(k_z, dim_pad)
# Prepare magnetization:
x_mag = np.zeros(dim_pad, dtype=magdata.field.dtype)
y_mag = np.zeros(dim_pad, dtype=magdata.field.dtype)
z_mag = np.zeros(dim_pad, dtype=magdata.field.dtype)
x_mag[slice_M] = magdata.field[0, ...]
y_mag[slice_M] = magdata.field[1, ...]
z_mag[slice_M] = magdata.field[2, ...]
# Calculate Fourier trafo of magnetization components:
x_mag_fft = fft.rfftn(x_mag)
y_mag_fft = fft.rfftn(y_mag)
z_mag_fft = fft.rfftn(z_mag)
# Convolve:
a_x_fft = y_mag_fft * k_z_fft - z_mag_fft * k_y_fft
a_y_fft = z_mag_fft * k_x_fft - x_mag_fft * k_z_fft
a_z_fft = x_mag_fft * k_y_fft - y_mag_fft * k_x_fft
a_x = fft.irfftn(a_x_fft)[slice_B]
a_y = fft.irfftn(a_y_fft)[slice_B]
a_z = fft.irfftn(a_z_fft)[slice_B]
# Return A-field:
return VectorData(magdata.a, np.asarray((a_x, a_y, a_z)))
def convert_A_to_B(a_data):
"""Convert a vector potential `A` into a B-field distribution.
Parameters
----------
a_data: :class:`~pyramid.magdata.VectorData` object
The vector potential field from which the A-field is calculated.
Returns
-------
b_data: :class:`~pyramid.magdata.VectorData` object
The calculated B-field.
"""
_log.debug('Calling convert_A_to_B')
assert isinstance(a_data, VectorData), 'Only VectorData objects can be mapped!'
#
axis = tuple([i for i in range(3) if a_data.dim[i] > 1])
#
x_grads = np.gradient(a_data.field[0, ...], axis=axis) #/ a_data.a
y_grads = np.gradient(a_data.field[1, ...], axis=axis) #/ a_data.a
z_grads = np.gradient(a_data.field[2, ...], axis=axis) #/ a_data.a
#
x_gradii = np.zeros(a_data.shape)
y_gradii = np.zeros(a_data.shape)
z_gradii = np.zeros(a_data.shape)
#
for i, axis in enumerate(axis):
x_gradii[axis] = x_grads[i]
y_gradii[axis] = y_grads[i]
z_gradii[axis] = z_grads[i]
#
x_grad_z, x_grad_y, x_grad_x = x_gradii
y_grad_z, y_grad_y, y_grad_x = y_gradii
z_grad_z, z_grad_y, z_grad_x = z_gradii
# Calculate cross product:
b_x = (z_grad_y - y_grad_z)
b_y = (x_grad_z - z_grad_x)
b_z = (y_grad_x - x_grad_y)
# Return B-field:
return VectorData(a_data.a, np.asarray((b_x, b_y, b_z)))
# Calculate gradients:
x_mag, y_mag, z_mag = a_data.field
x_grad_z, x_grad_y, x_grad_x = np.gradient(x_mag)
y_grad_z, y_grad_y, y_grad_x = np.gradient(y_mag)
z_grad_z, z_grad_y, z_grad_x = np.gradient(z_mag)
# Calculate cross product:
b_x = (z_grad_y - y_grad_z)
b_y = (x_grad_z - z_grad_x)
b_z = (y_grad_x - x_grad_y)
# Return B-field:
return VectorData(a_data.a, np.asarray((b_x, b_y, b_z)))
def convert_M_to_B(magdata, b_0=1.0):
"""Convert a magnetic vector distribution into a B-field distribution.
Parameters
----------
magdata: :class:`~pyramid.magdata.VectorData` object
The magnetic vector field from which the B-field is calculated.
b_0: float, optional
The saturation magnetization which is used in the calculation.
Returns
-------
b_data: :class:`~pyramid.magdata.VectorData` object
The calculated B-field.
"""
_log.debug('Calling convert_M_to_B')
assert isinstance(magdata, VectorData), 'Only VectorData objects can be mapped!'
return convert_A_to_B(convert_M_to_A(magdata, b_0=b_0))
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""This module provides classes for storing vector and scalar 3D-field."""
import logging
import os
import tempfile
import abc
from numbers import Number
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from matplotlib import patheffects
from PIL import Image
from scipy.ndimage.interpolation import zoom
from . import colors
from . import plottools
__all__ = ['VectorData', 'ScalarData']
class FieldData(object, metaclass=abc.ABCMeta):
"""Class for storing field data.
Abstract base class for the representatio of magnetic or electric fields (see subclasses).
Fields can be accessed as 3D numpy arrays via the `field` property or as a vector via
`field_vec`. :class:`~.FieldData` objects support negation, arithmetic operators
(``+``, ``-``, ``*``) and their augmented counterparts (``+=``, ``-=``, ``*=``), with numbers
and other :class:`~.FieldData` objects of the same subclass, if their dimensions and grid
spacings match. It is possible to load data from HDF5 or LLG (.txt) files or to save the data
in these formats. Specialised plotting methods are also provided.
Attributes
----------
a: float
The grid spacing in nm.
field: :class:`~numpy.ndarray` (N=4)
The field distribution for every 3D-gridpoint.
"""
_log = logging.getLogger(__name__ + '.FieldData')
@property
def a(self):
"""The grid spacing in nm."""
return self._a
@a.setter
def a(self, a):
assert isinstance(a, Number), 'Grid spacing has to be a number!'
assert a >= 0, 'Grid spacing has to be a positive number!'
self._a = float(a)
@property
def shape(self):
"""The shape of the `field` (3D for scalar, 4D vor vector field)."""
return self.field.shape
@property
def dim(self):
"""Dimensions (z, y, x) of the grid, only 3D coordinates, without components if present."""
return self.shape[-3:]
@property
def field(self):
"""The field strength for every 3D-gridpoint (scalar: 3D, vector: 4D)."""
return self._field
@field.setter
def field(self, field):
assert isinstance(field, np.ndarray), 'Field has to be a numpy array!'
assert 3 <= len(field.shape) <= 4, 'Field has to be 3- or 4-dimensional (scalar / vector)!'
if len(field.shape) == 4:
assert field.shape[0] == 3, 'A vector field has to have exactly 3 components!'
self._field = field
@property
def field_amp(self):
"""The field amplitude (returns the field itself for scalar and the vector amplitude
calculated via a square sum for a vector field."""
if len(self.shape) == 4:
return np.sqrt(np.sum(self.field ** 2, axis=0))
else:
return self.field
@property
def field_vec(self):
"""Vector containing the vector field distribution."""
return np.reshape(self.field, -1)
@field_vec.setter
def field_vec(self, mag_vec):
assert np.size(mag_vec) == np.prod(self.shape), \
'Vector has to match field shape! {} {}'.format(mag_vec.shape, np.prod(self.shape))
self.field = mag_vec.reshape((3,) + self.dim)
def __init__(self, a, field):
self._log.debug('Calling __init__')
self.a = a
self.field = field
self._log.debug('Created ' + str(self))
def __repr__(self):
self._log.debug('Calling __repr__')
return '%s(a=%r, field=%r)' % (self.__class__, self.a, self.field)
def __str__(self):
self._log.debug('Calling __str__')
return '%s(a=%s, dim=%s)' % (self.__class__, self.a, self.dim)
def __neg__(self): # -self
self._log.debug('Calling __neg__')
return self.__class__(self.a, -self.field)
def __add__(self, other): # self + other
self._log.debug('Calling __add__')
assert isinstance(other, (FieldData, Number)), \
'Only FieldData objects and scalar numbers (as offsets) can be added/subtracted!'
if isinstance(other, Number): # other is a Number
self._log.debug('Adding an offset')
return self.__class__(self.a, self.field + other)
elif isinstance(other, FieldData):
self._log.debug('Adding two FieldData objects')
assert other.a == self.a, 'Added phase has to have the same grid spacing!'
assert other.shape == self.shape, 'Added field has to have the same dimensions!'
return self.__class__(self.a, self.field + other.field)
def __sub__(self, other): # self - other
self._log.debug('Calling __sub__')
return self.__add__(-other)
def __mul__(self, other): # self * other
self._log.debug('Calling __mul__')
assert isinstance(other, Number), 'FieldData objects can only be multiplied by numbers!'
return self.__class__(self.a, self.field * other)
def __truediv__(self, other): # self / other
self._log.debug('Calling __truediv__')
assert isinstance(other, Number), 'FieldData objects can only be divided by numbers!'
return self.__class__(self.a, self.field / other)
def __floordiv__(self, other): # self // other
self._log.debug('Calling __floordiv__')
assert isinstance(other, Number), 'FieldData objects can only be divided by numbers!'
return self.__class__(self.a, self.field // other)
def __radd__(self, other): # other + self
self._log.debug('Calling __radd__')
return self.__add__(other)
def __rsub__(self, other): # other - self
self._log.debug('Calling __rsub__')
return -self.__sub__(other)
def __rmul__(self, other): # other * self
self._log.debug('Calling __rmul__')
return self.__mul__(other)
def __iadd__(self, other): # self += other
self._log.debug('Calling __iadd__')
return self.__add__(other)
def __isub__(self, other): # self -= other
self._log.debug('Calling __isub__')
return self.__sub__(other)
def __imul__(self, other): # self *= other
self._log.debug('Calling __imul__')
return self.__mul__(other)
def __itruediv__(self, other): # self /= other
self._log.debug('Calling __itruediv__')
return self.__truediv__(other)
def __ifloordiv__(self, other): # self //= other
self._log.debug('Calling __ifloordiv__')
return self.__floordiv__(other)
def __getitem__(self, item):
return self.__class__(self.a, self.field[item])
def __array__(self, dtype=None): # Used for numpy ufuncs, together with __array_wrap__!
if dtype:
return self.field.astype(dtype)
else:
return self.field
def __array_wrap__(self, array, _=None): # _ catches the context, which is not used.
return type(self)(self.a, array)
def copy(self):
"""Returns a copy of the :class:`~.FieldData` object
Returns
-------
field_data: :class:`~.FieldData`
A copy of the :class:`~.FieldData`.
"""
self._log.debug('Calling copy')
return self.__class__(self.a, self.field.copy())
def get_mask(self, threshold=0):
"""Mask all pixels where the amplitude of the field lies above `threshold`.
Parameters
----------
threshold : float, optional
A pixel only gets masked, if it lies above this threshold . The default is 0.
Returns
-------
mask : :class:`~numpy.ndarray` (N=3, boolean)
Mask of the pixels where the amplitude of the field lies above `threshold`.
"""
self._log.debug('Calling get_mask')
return np.where(self.field_amp > threshold, True, False)
def plot_mask(self, title='Mask', threshold=0, grid=True, labels=True,
orientation=True, figsize=None, new_fig=True, **kwargs):
"""Plot the mask as a 3D-contour plot.
Parameters
----------
title: string, optional
The title for the plot.
threshold : float, optional
A pixel only gets masked, if it lies above this threshold . The default is 0.
Returns
-------
plot : :class:`mayavi.modules.vectors.Vectors`
The plot object.
"""
self._log.debug('Calling plot_mask')
from mayavi import mlab
if figsize is None:
figsize = (750, 700)
if new_fig:
mlab.figure(size=figsize, bgcolor=(0.5, 0.5, 0.5), fgcolor=(0., 0., 0.))
zzz, yyy, xxx = (np.indices(self.dim) + self.a / 2)
zzz, yyy, xxx = zzz.T, yyy.T, xxx.T
mask = self.get_mask(threshold=threshold).astype(int).T # Transpose because of VTK order!
extent = np.ravel(list(zip((0, 0, 0), mask.shape)))
cont = mlab.contour3d(xxx, yyy, zzz, mask, contours=[1], **kwargs)
if grid:
mlab.outline(cont, extent=extent)
if labels:
mlab.axes(cont, extent=extent)
mlab.title(title, height=0.95, size=0.35)
if orientation:
oa = mlab.orientation_axes()
oa.marker.set_viewport(0, 0, 0.4, 0.4)
mlab.draw()
engine = mlab.get_engine()
scene = engine.scenes[0]
scene.scene.isometric_view()
return cont
def plot_contour3d(self, title='Field Distribution', contours=10, opacity=0.25,
new_fig=True, **kwargs): # TODO: new_fig or hold in every mayavi plot!
"""Plot the field as a 3D-contour plot.
Parameters
----------
title: string, optional
The title for the plot.
contours: int, optional
Number of contours which should be plotted.
opacity: float, optional
Defines the opacity of the contours. Default is 0.25.
Returns
-------
plot : :class:`mayavi.modules.vectors.Vectors`
The plot object.
"""
self._log.debug('Calling plot_contour3d')
from mayavi import mlab
if new_fig:
mlab.figure(size=(750, 700))
zzz, yyy, xxx = (np.indices(self.dim) + self.a / 2)
zzz, yyy, xxx = zzz.T, yyy.T, xxx.T
field_amp = self.field_amp.T # Transpose because of VTK order!
if not isinstance(contours, (list, tuple, np.ndarray)): # Calculate the contours:
contours = list(np.linspace(field_amp.min(), field_amp.max(), contours))
extent = np.ravel(list(zip((0, 0, 0), field_amp.shape)))
cont = mlab.contour3d(xxx, yyy, zzz, field_amp, contours=contours,
opacity=opacity, **kwargs)
mlab.outline(cont, extent=extent)
mlab.axes(cont, extent=extent)
mlab.title(title, height=0.95, size=0.35)
mlab.orientation_axes()
cont.scene.isometric_view()
return cont
@abc.abstractmethod
def scale_down(self, n):
"""Scale down the field distribution by averaging over two pixels along each axis.
Parameters
----------
n : int, optional
Number of times the field distribution is scaled down. The default is 1.
Returns
-------
None
Notes
-----
Acts in place and changes dimensions and grid spacing accordingly.
Only possible, if each axis length is a power of 2!
"""
pass
@abc.abstractmethod
def scale_up(self, n, order):
"""Scale up the field distribution using spline interpolation of the requested order.
Parameters
----------
n : int, optional
Power of 2 with which the grid is scaled. Default is 1, which means every axis is
increased by a factor of ``2**1 = 2``.
order : int, optional
The order of the spline interpolation, which has to be in the range between 0 and 5
and defaults to 0.
Returns
-------
None
Notes
-----
Acts in place and changes dimensions and grid spacing accordingly.
"""
pass
@abc.abstractmethod
def get_vector(self, mask):
"""Returns the field as a vector, specified by a mask.
Parameters
----------
mask : :class:`~numpy.ndarray` (N=3, boolean)
Masks the pixels from which the entries should be taken.
Returns
-------
vector : :class:`~numpy.ndarray` (N=1)
The vector containing the field of the specified pixels.
"""
pass
@abc.abstractmethod
def set_vector(self, vector, mask):
"""Set the field of the masked pixels to the values specified by `vector`.
Parameters
----------
mask : :class:`~numpy.ndarray` (N=3, boolean), optional
Masks the pixels from which the field should be taken.
vector : :class:`~numpy.ndarray` (N=1)
The vector containing the field of the specified pixels.
Returns
-------
None
"""
pass
@classmethod
def from_signal(cls, signal):
"""Convert a :class:`~hyperspy.signals.Signal` object to a :class:`~.FieldData` object.
Parameters
----------
signal: :class:`~hyperspy.signals.Signal`
The :class:`~hyperspy.signals.Signal` object which should be converted to FieldData.
Returns
-------
magdata: :class:`~.FieldData`
A :class:`~.FieldData` object containing the loaded data.
Notes
-----
This method recquires the hyperspy package!
"""
cls._log.debug('Calling from_signal')
return cls(signal.axes_manager[0].scale, signal.data)
@abc.abstractmethod
def to_signal(self):
"""Convert :class:`~.FieldData` data into a HyperSpy signal.
Returns
-------
signal: :class:`~hyperspy.signals.Signal`
Representation of the :class:`~.FieldData` object as a HyperSpy Signal.
Notes
-----
This method recquires the hyperspy package!
"""
self._log.debug('Calling to_signal')
try: # Try importing HyperSpy:
# noinspection PyUnresolvedReferences
import hyperspy.api as hs
except ImportError:
self._log.error('This method recquires the hyperspy package!')
return
# Create signal:
signal = hs.signals.BaseSignal(self.field) # All axes are signal axes!
# Set axes:
signal.axes_manager[0].name = 'x-axis'
signal.axes_manager[0].units = 'nm'
signal.axes_manager[0].scale = self.a
signal.axes_manager[1].name = 'y-axis'
signal.axes_manager[1].units = 'nm'
signal.axes_manager[1].scale = self.a
signal.axes_manager[2].name = 'z-axis'
signal.axes_manager[2].units = 'nm'
signal.axes_manager[2].scale = self.a
return signal
class VectorData(FieldData):
"""Class for storing vector ield data.
Represents 3-dimensional vector field distributions with 3 components which are stored as a
3-dimensional numpy array in `field`, but which can also be accessed as a vector via
`field_vec`. :class:`~.VectorData` objects support negation, arithmetic operators
(``+``, ``-``, ``*``) and their augmented counterparts (``+=``, ``-=``, ``*=``), withnumbers
and other :class:`~.VectorData` objects, if their dimensions and grid spacings match. It is
possible to load data from HDF5 or LLG (.txt) files or to save the data in these formats.
Plotting methods are also provided.
Attributes
----------
a: float
The grid spacing in nm.
field: :class:`~numpy.ndarray` (N=4)
The `x`-, `y`- and `z`-component of the vector field for every 3D-gridpoint
as a 4-dimensional numpy array (first dimension has to be 3, because of the 3 components).
"""
_log = logging.getLogger(__name__ + '.VectorData')
def __getitem__(self, item):
return self.__class__(self.a, self.field[item])
def scale_down(self, n=1):
"""Scale down the field distribution by averaging over two pixels along each axis.
Parameters
----------
n : int, optional
Number of times the field distribution is scaled down. The default is 1.
Returns
-------
None
Notes
-----
Acts in place and changes dimensions and grid spacing accordingly.
Only possible, if each axis length is a power of 2!
"""
self._log.debug('Calling scale_down')
assert n > 0 and isinstance(n, int), 'n must be a positive integer!'
self.a *= 2 ** n
for t in range(n):
# Pad if necessary:
pz, py, px = self.dim[0] % 2, self.dim[1] % 2, self.dim[2] % 2
if pz != 0 or py != 0 or px != 0:
self.field = np.pad(self.field, ((0, 0), (0, pz), (0, py), (0, px)),
mode='constant')
# Create coarser grid for the vector field:
shape_4d = (3, self.dim[0] // 2, 2, self.dim[1] // 2, 2, self.dim[2] // 2, 2)
self.field = self.field.reshape(shape_4d).mean(axis=(6, 4, 2))
def scale_up(self, n=1, order=0):
"""Scale up the field distribution using spline interpolation of the requested order.
Parameters
----------
n : int, optional
Power of 2 with which the grid is scaled. Default is 1, which means every axis is
increased by a factor of ``2**1 = 2``.
order : int, optional
The order of the spline interpolation, which has to be in the range between 0 and 5
and defaults to 0.
Returns
-------
None
Notes
-----
Acts in place and changes dimensions and grid spacing accordingly.
"""
self._log.debug('Calling scale_up')
assert n > 0 and isinstance(n, int), 'n must be a positive integer!'
assert 5 > order >= 0 and isinstance(order, int), \
'order must be a positive integer between 0 and 5!'
self.a /= 2 ** n
self.field = np.array((zoom(self.field[0], zoom=2 ** n, order=order),
zoom(self.field[1], zoom=2 ** n, order=order),
zoom(self.field[2], zoom=2 ** n, order=order)))
def pad(self, pad_values):
"""Pad the current field distribution with zeros for each individual axis.
Parameters
----------
pad_values : tuple of int
Number of zeros which should be padded. Provided as a tuple where each entry
corresponds to an axis. An entry can be one int (same padding for both sides) or again
a tuple which specifies the pad values for both sides of the corresponding axis.
Returns
-------
None
Notes
-----
Acts in place and changes dimensions accordingly.
"""
self._log.debug('Calling pad')
assert len(pad_values) == 3, 'Pad values for each dimension have to be provided!'
pv = np.zeros(6, dtype=np.int)
for i, values in enumerate(pad_values):
assert np.shape(values) in [(), (2,)], 'Only one or two values per axis can be given!'
pv[2 * i:2 * (i + 1)] = values
self.field = np.pad(self.field, ((0, 0), (pv[0], pv[1]), (pv[2], pv[3]), (pv[4], pv[5])),
mode='constant')
def crop(self, crop_values):
"""Crop the current field distribution with zeros for each individual axis.
Parameters
----------
crop_values : tuple of int
Number of zeros which should be cropped. Provided as a tuple where each entry
corresponds to an axis. An entry can be one int (same cropping for both sides) or again
a tuple which specifies the crop values for both sides of the corresponding axis.
Returns
-------
None
Notes
-----
Acts in place and changes dimensions accordingly.
"""
self._log.debug('Calling crop')
assert len(crop_values) == 3, 'Crop values for each dimension have to be provided!'
cv = np.zeros(6, dtype=np.int)
for i, values in enumerate(crop_values):
assert np.shape(values) in [(), (2,)], 'Only one or two values per axis can be given!'
cv[2 * i:2 * (i + 1)] = values
cv *= np.resize([1, -1], len(cv))
cv = np.where(cv == 0, None, cv)
self.field = self.field[:, cv[0]:cv[1], cv[2]:cv[3], cv[4]:cv[5]]
def get_vector(self, mask):
"""Returns the vector field components arranged in a vector, specified by a mask.
Parameters
----------
mask : :class:`~numpy.ndarray` (N=3, boolean)
Masks the pixels from which the components should be taken.
Returns
-------
vector : :class:`~numpy.ndarray` (N=1)
The vector containing vector field components of the specified pixels.
Order is: first all `x`-, then all `y`-, then all `z`-components.
"""
self._log.debug('Calling get_vector')
if mask is not None:
return np.reshape([self.field[0][mask],
self.field[1][mask],
self.field[2][mask]], -1)
else:
return self.field_vec
def set_vector(self, vector, mask=None):
"""Set the field components of the masked pixels to the values specified by `vector`.
Parameters
----------
mask : :class:`~numpy.ndarray` (N=3, boolean), optional
Masks the pixels from which the components should be taken.
vector : :class:`~numpy.ndarray` (N=1)
The vector containing vector field components of the specified pixels.
Order is: first all `x`-, then all `y-, then all `z`-components.
Returns
-------
None
"""
self._log.debug('Calling set_vector')
assert np.size(vector) % 3 == 0, 'Vector has to contain all 3 components for every pixel!'
count = np.size(vector) // 3
if mask is not None:
self.field[0][mask] = vector[:count] # x-component
self.field[1][mask] = vector[count:2 * count] # y-component
self.field[2][mask] = vector[2 * count:] # z-component
else:
self.field_vec = vector
def flip(self, axis='x'):
"""Flip/mirror the vector field around the specified axis.
Parameters
----------
axis: {'x', 'y', 'z'}, optional
The axis around which the vector field is flipped.
Returns
-------
magdata_flip: :class:`~.VectorData`
A flipped copy of the :class:`~.VectorData` object.
"""
self._log.debug('Calling flip')
if axis == 'x':
mag_x, mag_y, mag_z = self.field[:, :, :, ::-1]
field_flip = np.array((-mag_x, mag_y, mag_z))
elif axis == 'y':
mag_x, mag_y, mag_z = self.field[:, :, ::-1, :]
field_flip = np.array((mag_x, -mag_y, mag_z))
elif axis == 'z':
mag_x, mag_y, mag_z = self.field[:, ::-1, :, :]
field_flip = np.array((mag_x, mag_y, -mag_z))
else:
raise ValueError("Wrong input! 'x', 'y', 'z' allowed!")
return VectorData(self.a, field_flip)
def rot90(self, axis='x'):
"""Rotate the vector field 90° around the specified axis (right hand rotation).
Parameters
----------
axis: {'x', 'y', 'z'}, optional
The axis around which the vector field is rotated.
Returns
-------
magdata_rot: :class:`~.VectorData`
A rotated copy of the :class:`~.VectorData` object.
"""
self._log.debug('Calling rot90')
if axis == 'x':
field_rot = np.zeros((3, self.dim[1], self.dim[0], self.dim[2]))
for i in range(self.dim[2]):
mag_x, mag_y, mag_z = self.field[:, :, :, i]
mag_xrot, mag_yrot, mag_zrot = np.rot90(mag_x), np.rot90(mag_y), np.rot90(mag_z)
field_rot[:, :, :, i] = np.array((mag_xrot, mag_zrot, -mag_yrot))
elif axis == 'y':
field_rot = np.zeros((3, self.dim[2], self.dim[1], self.dim[0]))
for i in range(self.dim[1]):
mag_x, mag_y, mag_z = self.field[:, :, i, :]
mag_xrot, mag_yrot, mag_zrot = np.rot90(mag_x), np.rot90(mag_y), np.rot90(mag_z)
field_rot[:, :, i, :] = np.array((mag_zrot, mag_yrot, -mag_xrot))
elif axis == 'z':
field_rot = np.zeros((3, self.dim[0], self.dim[2], self.dim[1]))
for i in range(self.dim[0]):
mag_x, mag_y, mag_z = self.field[:, i, :, :]
mag_xrot, mag_yrot, mag_zrot = np.rot90(mag_x), np.rot90(mag_y), np.rot90(mag_z)
field_rot[:, i, :, :] = np.array((mag_yrot, -mag_xrot, mag_zrot))
else:
raise ValueError("Wrong input! 'x', 'y', 'z' allowed!")
return VectorData(self.a, field_rot)
def get_slice(self, ax_slice=None, proj_axis='z'):
# TODO: return x y z instead of u v w (to color fields consistent with xyz!)
"""Extract a slice from the :class:`~.VectorData` object.
Parameters
----------
proj_axis : {'z', 'y', 'x'}, optional
The axis, from which the slice is taken. The default is 'z'.
ax_slice : None or int, optional
The slice-index of the axis specified in `proj_axis`. Defaults to the center slice.
Returns
-------
u_mag, v_mag, w_mag, submask : :class:`~numpy.ndarray` (N=2)
The extracted vector field components in plane perpendicular to the `proj_axis` and
the perpendicular component.
"""
self._log.debug('Calling get_slice')
# Find slice:
assert proj_axis == 'z' or proj_axis == 'y' or proj_axis == 'x', \
'Axis has to be x, y or z (as string).'
if ax_slice is None:
ax_slice = self.dim[{'z': 0, 'y': 1, 'x': 2}[proj_axis]] // 2
if proj_axis == 'z': # Slice of the xy-plane with z = ax_slice
self._log.debug('proj_axis == z')
u_mag = np.copy(self.field[0][ax_slice, ...]) # x-component
v_mag = np.copy(self.field[1][ax_slice, ...]) # y-component
w_mag = np.copy(self.field[2][ax_slice, ...]) # z-component
elif proj_axis == 'y': # Slice of the xz-plane with y = ax_slice
self._log.debug('proj_axis == y')
u_mag = np.copy(self.field[0][:, ax_slice, :]) # x-component
v_mag = np.copy(self.field[2][:, ax_slice, :]) # z-component
w_mag = np.copy(self.field[1][:, ax_slice, :]) # y-component
elif proj_axis == 'x': # Slice of the zy-plane with x = ax_slice
self._log.debug('proj_axis == x')
# TODO: Strange swapaxes, really necessary? Get rid EVERYWHERE if possible!
#u_mag = np.swapaxes(np.copy(self.field[2][..., ax_slice]), 0, 1) # z-component
#v_mag = np.swapaxes(np.copy(self.field[1][..., ax_slice]), 0, 1) # y-component
#w_mag = np.swapaxes(np.copy(self.field[0][..., ax_slice]), 0, 1) # x-component
# TODO: z should be special and always along y in 2D if possible!!
u_mag = np.copy(self.field[1][..., ax_slice]) # y-component
v_mag = np.copy(self.field[2][..., ax_slice]) # z-component
w_mag = np.copy(self.field[0][..., ax_slice]) # x-component
else:
raise ValueError('{} is not a valid argument (use x, y or z)'.format(proj_axis))
return u_mag, v_mag, w_mag
def to_signal(self):
"""Convert :class:`~.VectorData` data into a HyperSpy signal.
Returns
-------
signal: :class:`~hyperspy.signals.Signal`
Representation of the :class:`~.VectorData` object as a HyperSpy Signal.
Notes
-----
This method recquires the hyperspy package!
"""
self._log.debug('Calling to_signal')
signal = super().to_signal()
# Set component axis:
signal.axes_manager[3].name = 'x/y/z-component'
signal.axes_manager[3].units = ''
# Set metadata:
signal.metadata.Signal.title = 'VectorData'
# Return signal:
return signal
def save(self, filename, **kwargs):
"""Saves the VectorData in the specified format.
The function gets the format from the extension:
- hdf5 for HDF5.
- EMD Electron Microscopy Dataset format (also HDF5).
- llg format.
- ovf format.
- npy or npz for numpy formats.
If no extension is provided, 'hdf5' is used. Most formats are
saved with the HyperSpy package (internally the fielddata is first
converted to a HyperSpy Signal.
Each format accepts a different set of parameters. For details
see the specific format documentation.
Parameters
----------
filename : str, optional
Name of the file which the VectorData is saved into. The extension
determines the saving procedure.
"""
from .file_io.io_vectordata import save_vectordata
save_vectordata(self, filename, **kwargs)
def plot_quiver(self, ar_dens=1, log=False, scaled=True, scale=1., b_0=None, qkey_unit='T',
coloring='angle', cmap=None, # Used here and plot_streamlines!
proj_axis='z', ax_slice=None, show_mask=True, bgcolor=None, axis=None,
figsize=None, **kwargs):
"""Plot a slice of the vector field as a quiver plot.
Parameters
----------
ar_dens: int, optional
Number defining the arrow density which is plotted. A higher ar_dens number skips more
arrows (a number of 2 plots every second arrow). Default is 1.
log : boolean, optional
The loratihm of the arrow length is plotted instead. This is helpful if only the
direction of the arrows is important and the amplitude varies a lot. Default is False.
scaled : boolean, optional
Normalizes the plotted arrows in respect to the highest one. Default is True.
scale: float, optional
Additional multiplicative factor scaling the arrow length. Default is 1
(no further scaling).
b_0 : float, optional
Saturation induction (saturation magnetisation times the vacuum permeability).
If this is specified, a quiverkey is used to indicate the length of the longest arrow.
coloring : {'angle', 'amplitude', 'uniform', matplotlib color}
Color coding mode of the arrows. Use 'full' (default), 'angle', 'amplitude', 'uniform'
(black or white, depending on `bgcolor`), or a matplotlib color keyword.
cmap : string, optional
The :class:`~matplotlib.colors.Colormap` which is used for the plot as a string.
If not set, an appropriate one is used. Note that a subclass of
:class:`~.colors.Colormap3D` should be used for angle encoding.
proj_axis : {'z', 'y', 'x'}, optional
The axis, from which a slice is plotted. The default is 'z'.
ax_slice : int, optional
The slice-index of the axis specified in `proj_axis`. Is set to the center of
`proj_axis` if not specified.
show_mask: boolean
Default is True. Shows the outlines of the mask slice if available.
bgcolor: {'white', 'black'}, optional
Determines the background color of the plot.
axis : :class:`~matplotlib.axes.AxesSubplot`, optional
Axis on which the graph is plotted. Creates a new figure if none is specified.
figsize : tuple of floats (N=2)
Size of the plot figure.
Returns
-------
axis: :class:`~matplotlib.axes.AxesSubplot`
The axis on which the graph is plotted.
Notes
-----
Uses :func:`~.plottools.format_axis` at the end. According keywords can also be given here.
"""
self._log.debug('Calling plot_quiver')
a = self.a
if figsize is None:
figsize = plottools.FIGSIZE_DEFAULT
assert proj_axis == 'z' or proj_axis == 'y' or proj_axis == 'x', \
'Axis has to be x, y or z (as string).'
if ax_slice is None:
ax_slice = self.dim[{'z': 0, 'y': 1, 'x': 2}[proj_axis]] // 2
# Extract slice and mask:
u_mag, v_mag = self.get_slice(ax_slice, proj_axis)[:2]
submask = np.where(np.hypot(u_mag, v_mag) > 0, True, False)
# Prepare quiver (select only used arrows if ar_dens is specified):
dim_uv = u_mag.shape
vv, uu = np.indices(dim_uv) + 0.5 # shift to center of pixel
uu = uu[::ar_dens, ::ar_dens]
vv = vv[::ar_dens, ::ar_dens]
u_mag = u_mag[::ar_dens, ::ar_dens]
v_mag = v_mag[::ar_dens, ::ar_dens]
amplitudes = np.hypot(u_mag, v_mag)
# TODO: Delete if only used in log:
angles = np.angle(u_mag + 1j * v_mag, deg=True).tolist()
# Calculate the arrow colors:
if bgcolor is None:
bgcolor = 'white' # Default!
cmap_overwrite = cmap
if coloring == 'angle':
self._log.debug('Encoding angles')
hue = np.asarray(np.arctan2(v_mag, u_mag) / (2 * np.pi))
hue[hue < 0] += 1
cmap = colors.CMAP_CIRCULAR_DEFAULT
elif coloring == 'amplitude':
self._log.debug('Encoding amplitude')
hue = amplitudes / amplitudes.max()
if bgcolor == 'white':
cmap = colors.cmaps['cubehelix_reverse']
else:
cmap = colors.cmaps['cubehelix_standard']
elif coloring == 'uniform':
self._log.debug('Automatic uniform color encoding')
hue = amplitudes / amplitudes.max()
if bgcolor == 'white':
cmap = colors.cmaps['transparent_black']
else:
cmap = colors.cmaps['transparent_white']
else:
self._log.debug('Specified uniform color encoding')
hue = np.zeros_like(u_mag)
cmap = ListedColormap([coloring])
if cmap_overwrite is not None:
cmap = cmap_overwrite
# If no axis is specified, a new figure is created:
if axis is None:
self._log.debug('axis is None')
fig = plt.figure(figsize=figsize)
axis = fig.add_subplot(1, 1, 1)
tight = True
else:
tight = False
axis.set_aspect('equal')
# Take the logarithm of the arrows to clearly show directions (if specified):
# TODO: get rid of log!! (only problems...)
if log and np.any(amplitudes): # If the slice is empty, skip!
cutoff = 10
amp = np.round(amplitudes, decimals=cutoff)
min_value = amp[np.nonzero(amp)].min()
u_mag = np.round(u_mag, decimals=cutoff) / min_value
u_mag = np.log10(np.abs(u_mag) + 1) * np.sign(u_mag)
v_mag = np.round(v_mag, decimals=cutoff) / min_value
v_mag = np.log10(np.abs(v_mag) + 1) * np.sign(v_mag)
amplitudes = np.hypot(u_mag, v_mag) # Recalculate (used if scaled)!
# Scale the amplitude of the arrows to the highest one (if specified):
if scaled:
u_mag /= amplitudes.max() + 1E-30
v_mag /= amplitudes.max() + 1E-30
# Plot quiver:
# TODO: quiver does not work with matplotlib 2.0! FIX!
quiv = axis.quiver(uu, vv, u_mag, v_mag, hue, cmap=cmap, clim=(0, 1), #angles=angles,
pivot='middle', units='xy', scale_units='xy', scale=scale / ar_dens,
minlength=0.05, width=1*ar_dens, headlength=2, headaxislength=2,
headwidth=2, minshaft=2)
axis.set_xlim(0, dim_uv[1])
axis.set_ylim(0, dim_uv[0])
# Determine colormap if necessary:
if coloring == 'amplitude':
cbar_mappable, cbar_label = quiv, 'amplitude'
else:
cbar_mappable, cbar_label = None, None
# Change background color:
axis.set_facecolor(bgcolor)
# Show mask:
if show_mask and not np.all(submask): # Plot mask if desired and not trivial!
vv, uu = np.indices(dim_uv) + 0.5 # shift to center of pixel
mask_color = 'white' if bgcolor == 'black' else 'black'
axis.contour(uu, vv, submask, levels=[0.5], colors=mask_color,
linestyles='dotted', linewidths=2)
# Plot quiverkey if B_0 is specified):
if b_0 and not log: # The angles needed for log would break the quiverkey!
label = '{:.3g} {}'.format(amplitudes.max() * b_0, qkey_unit)
quiv.angles = 'uv' # With a list of angles, the quiverkey would break!
stroke = plottools.STROKE_DEFAULT
txtcolor = 'w' if stroke == 'k' else 'k'
edgecolor = stroke if stroke is not None else 'none'
fontsize = kwargs.get('fontsize', None)
if fontsize is None:
fontsize = plottools.FONTSIZE_DEFAULT
qk = plt.quiverkey(Q=quiv, X=0.88, Y=0.065, U=1, label=label, labelpos='W',
coordinates='axes', facecolor=txtcolor, edgecolor=edgecolor,
labelcolor=txtcolor, linewidth=0.5,
clip_box=axis.bbox, clip_on=True,
fontproperties={'size': kwargs.get('fontsize', fontsize)})
if stroke is not None:
qk.text.set_path_effects(
[patheffects.withStroke(linewidth=2, foreground=stroke)])
# Return formatted axis:
return plottools.format_axis(axis, sampling=a, cbar_mappable=cbar_mappable,
cbar_label=cbar_label, tight_layout=tight, **kwargs)
def plot_field(self, proj_axis='z', ax_slice=None, show_mask=True, bgcolor=None, axis=None,
figsize=None, **kwargs):
"""Plot a slice of the vector field as a color field imshow plot.
Parameters
----------
proj_axis : {'z', 'y', 'x'}, optional
The axis, from which a slice is plotted. The default is 'z'.
ax_slice : int, optional
The slice-index of the axis specified in `proj_axis`. Is set to the center of
`proj_axis` if not specified.
show_mask: boolean
Default is True. Shows the outlines of the mask slice if available.
bgcolor: {'white', 'black'}, optional
Determines the background color of the plot.
axis : :class:`~matplotlib.axes.AxesSubplot`, optional
Axis on which the graph is plotted. Creates a new figure if none is specified.
figsize : tuple of floats (N=2)
Size of the plot figure.
Returns
-------
axis: :class:`~matplotlib.axes.AxesSubplot`
The axis on which the graph is plotted.
Notes
-----
Uses :func:`~.plottools.format_axis` at the end. According keywords can also be given here.
"""
self._log.debug('Calling plot_field')
a = self.a
if figsize is None:
figsize = plottools.FIGSIZE_DEFAULT
assert proj_axis == 'z' or proj_axis == 'y' or proj_axis == 'x', \
'Axis has to be x, y or z (as string).'
if ax_slice is None:
ax_slice = self.dim[{'z': 0, 'y': 1, 'x': 2}[proj_axis]] // 2
# Extract slice and mask:
u_mag, v_mag, w_mag = self.get_slice(ax_slice, proj_axis)
submask = np.where(np.hypot(u_mag, v_mag) > 0, True, False)
# TODO: Before you fix this, fix get_slice!!! (should be easy...)
# TODO: return x y z instead of u v w (to color fields consistent with xyz!)
# TODO: maybe best to get colors of slice separately from u and v components!!!
# TODO: vector_to_rgb does already exist!!
if proj_axis == 'z': # Slice of the xy-plane with z = ax_slice
u_mag, v_mag, w_mag = u_mag, v_mag, w_mag
elif proj_axis == 'y': # Slice of the xz-plane with y = ax_slice
u_mag, v_mag, w_mag = u_mag, w_mag, v_mag
elif proj_axis == 'x': # Slice of the zy-plane with x = ax_slice
u_mag, v_mag, w_mag = w_mag, u_mag, v_mag
# TODO: END!
# If no axis is specified, a new figure is created:
if axis is None:
self._log.debug('axis is None')
fig = plt.figure(figsize=figsize)
axis = fig.add_subplot(1, 1, 1)
tight = True
else:
tight = False
axis.set_aspect('equal')
# Determine 'z'-component for luminance (keep as gray if None):
z_mag = w_mag
if bgcolor == 'white':
z_mag = np.where(submask, z_mag, np.max(np.hypot(u_mag, v_mag)))
if bgcolor == 'black':
z_mag = np.where(submask, z_mag, -np.max(np.hypot(u_mag, v_mag)))
# Plot the field:
dim_uv = u_mag.shape
rgb = colors.CMAP_CIRCULAR_DEFAULT.rgb_from_vector(np.asarray((u_mag, v_mag, z_mag)))
axis.imshow(Image.fromarray(rgb), origin='lower', interpolation='none',
extent=(0, dim_uv[1], 0, dim_uv[0]))
# Change background color:
if bgcolor is not None:
axis.set_facecolor(bgcolor)
# Show mask:
if show_mask and not np.all(submask): # Plot mask if desired and not trivial!
vv, uu = np.indices(dim_uv) + 0.5 # shift to center of pixel
mask_color = 'white' if bgcolor == 'black' else 'black'
axis.contour(uu, vv, submask, levels=[0.5], colors=mask_color,
linestyles='dotted', linewidths=2)
# Return formatted axis:
return plottools.format_axis(axis, sampling=a, tight_layout=tight, **kwargs)
def plot_quiver_field(self, **kwargs):
"""Plot the vector field as a field plot with uniformly colored arrows overlayed.
Parameters
----------
See :func:`~.plot_quiver` and :func:`~.plot_quiver` for parameters!
Returns
-------
axis: :class:`~matplotlib.axes.AxesSubplot`
The axis on which the graph is plotted.
"""
# Extract parameters:
show_mask = kwargs.pop('show_mask', True) # Only needed once!
axis = kwargs.pop('axis', None)
# Set default bgcolor to white (only for combined plot), only if bgcolor was not specified:
kwargs.setdefault('bgcolor', 'white')
# Plot field first (with mask and axis formatting), then quiver:
axis = self.plot_field(axis=axis, show_mask=show_mask, **kwargs)
self.plot_quiver(coloring='uniform', show_mask=False, axis=axis,
format_axis=False, **kwargs)
# Return plotting axis:
return axis
def plot_streamline(self, density=2, linewidth=2, coloring='angle', cmap=None,
proj_axis='z', ax_slice=None, show_mask=True, bgcolor=None, axis=None,
figsize=None, **kwargs):
"""Plot a slice of the vector field as a quiver plot.
Parameters
----------
density : float or 2-tuple, optional
Controls the closeness of streamlines. When density = 1, the domain is divided into a
30x30 grid—density linearly scales this grid. Each cebll in the grid can have, at most,
one traversing streamline. For different densities in each direction, use
[density_x, density_y].
linewidth : numeric or 2d array, optional
Vary linewidth when given a 2d array with the same shape as velocities.
coloring : {'angle', 'amplitude', 'uniform'}
Color coding mode of the arrows. Use 'full' (default), 'angle', 'amplitude' or
'uniform'.
cmap : string, optional
The :class:`~matplotlib.colors.Colormap` which is used for the plot as a string.
If not set, an appropriate one is used. Note that a subclass of
:class:`~.colors.Colormap3D` should be used for angle encoding.
proj_axis : {'z', 'y', 'x'}, optional
The axis, from which a slice is plotted. The default is 'z'.
ax_slice : int, optional
The slice-index of the axis specified in `proj_axis`. Is set to the center of
`proj_axis` if not specified.
show_mask: boolean
Default is True. Shows the outlines of the mask slice if available.
bgcolor: {'white', 'black'}, optional
Determines the background color of the plot.
axis : :class:`~matplotlib.axes.AxesSubplot`, optional
Axis on which the graph is plotted. Creates a new figure if none is specified.
figsize : tuple of floats (N=2)
Size of the plot figure.
Returns
-------
axis: :class:`~matplotlib.axes.AxesSubplot`
The axis on which the graph is plotted.
Notes
-----
Uses :func:`~.plottools.format_axis` at the end. According keywords can also be given here.
"""
self._log.debug('Calling plot_quiver')
a = self.a
if figsize is None:
figsize = plottools.FIGSIZE_DEFAULT
assert proj_axis == 'z' or proj_axis == 'y' or proj_axis == 'x', \
'Axis has to be x, y or z (as string).'
if ax_slice is None:
ax_slice = self.dim[{'z': 0, 'y': 1, 'x': 2}[proj_axis]] // 2
u_mag, v_mag = self.get_slice(ax_slice, proj_axis)[:2]
submask = np.where(np.hypot(u_mag, v_mag) > 0, True, False)
# Prepare streamlines:
dim_uv = u_mag.shape
uu = np.arange(dim_uv[1]) + 0.5 # shift to center of pixel
vv = np.arange(dim_uv[0]) + 0.5 # shift to center of pixel
u_mag, v_mag = self.get_slice(ax_slice, proj_axis)[:2]
# v_mag = np.ma.array(v_mag, mask=submask)
amplitudes = np.hypot(u_mag, v_mag)
# Calculate the arrow colors:
if bgcolor is None:
bgcolor = 'white' # Default!
cmap_overwrite = cmap
if coloring == 'angle':
self._log.debug('Encoding angles')
hue = np.asarray(np.arctan2(v_mag, u_mag) / (2 * np.pi))
hue[hue < 0] += 1
cmap = colors.CMAP_CIRCULAR_DEFAULT
elif coloring == 'amplitude':
self._log.debug('Encoding amplitude')
hue = amplitudes / amplitudes.max()
if bgcolor == 'white':
cmap = colors.cmaps['cubehelix_reverse']
else:
cmap = colors.cmaps['cubehelix_standard']
elif coloring == 'uniform':
self._log.debug('Automatic uniform color encoding')
hue = amplitudes / amplitudes.max()
if bgcolor == 'white':
cmap = colors.cmaps['transparent_black']
else:
cmap = colors.cmaps['transparent_white']
else:
self._log.debug('Specified uniform color encoding')
hue = np.zeros_like(u_mag)
cmap = ListedColormap([coloring])
if cmap_overwrite is not None:
cmap = cmap_overwrite
# If no axis is specified, a new figure is created:
if axis is None:
self._log.debug('axis is None')
fig = plt.figure(figsize=figsize)
axis = fig.add_subplot(1, 1, 1)
tight = True
else:
tight = False
axis.set_aspect('equal')
# Plot the streamlines:
im = plt.streamplot(uu, vv, u_mag, v_mag, density=density, linewidth=linewidth,
color=hue, cmap=cmap)
# Determine colormap if necessary:
if coloring == 'amplitude':
cbar_mappable, cbar_label = im, 'amplitude'
else:
cbar_mappable, cbar_label = None, None
# Change background color:
axis.set_axis_bgcolor(bgcolor)
# Show mask:
if show_mask and not np.all(submask): # Plot mask if desired and not trivial!
vv, uu = np.indices(dim_uv) + 0.5 # shift to center of pixel
mask_color = 'white' if bgcolor == 'black' else 'black'
axis.contour(uu, vv, submask, levels=[0.5], colors=mask_color,
linestyles='dotted', linewidths=2)
# Return formatted axis:
return plottools.format_axis(axis, sampling=a, cbar_mappable=cbar_mappable,
cbar_label=cbar_label, tight_layout=tight, **kwargs)
def plot_quiver3d(self, title='Vector Field', limit=None, cmap='jet', mode='2darrow',
coloring='angle', ar_dens=1, opacity=1.0, grid=True, labels=True,
orientation=True, figsize=None, new_fig=True, view='isometric',
position=None, bgcolor=(0.5, 0.5, 0.5)):
"""Plot the vector field as 3D-vectors in a quiverplot.
Parameters
----------
title : string, optional
The title for the plot.
limit : float, optional
Plotlimit for the vector field arrow length used to scale the colormap.
cmap : string, optional
String describing the colormap which is used for amplitude encoding (default is 'jet').
ar_dens: int, optional
Number defining the arrow density which is plotted. A higher ar_dens number skips more
arrows (a number of 2 plots every second arrow). Default is 1.
mode: string, optional
Mode, determining the glyphs used in the 3D plot. Default is '2darrow', which
corresponds to 2D arrows. For smaller amounts of arrows, 'arrow' (3D) is prettier.
coloring : {'angle', 'amplitude'}, optional
Color coding mode of the arrows. Use 'angle' (default) or 'amplitude'.
opacity: float, optional
Defines the opacity of the arrows. Default is 1.0 (completely opaque).
Returns
-------
plot : :class:`mayavi.modules.vectors.Vectors`
The plot object.
"""
self._log.debug('Calling quiver_plot3D')
from mayavi import mlab
if limit is None:
limit = np.max(np.nan_to_num(self.field_amp))
ad = ar_dens
# Create points and vector components as lists:
zzz, yyy, xxx = (np.indices(self.dim) + 1 / 2)
zzz = zzz[::ad, ::ad, ::ad].ravel()
yyy = yyy[::ad, ::ad, ::ad].ravel()
xxx = xxx[::ad, ::ad, ::ad].ravel()
x_mag = self.field[0][::ad, ::ad, ::ad].ravel()
y_mag = self.field[1][::ad, ::ad, ::ad].ravel()
z_mag = self.field[2][::ad, ::ad, ::ad].ravel()
# Plot them as vectors:
if figsize is None:
figsize = (750, 700)
if new_fig:
mlab.figure(size=figsize, bgcolor=bgcolor, fgcolor=(0., 0., 0.))
extent = np.ravel(list(zip((0, 0, 0), (self.dim[2], self.dim[1], self.dim[0]))))
if coloring == 'angle': # Encodes the full angle via colorwheel and saturation:
self._log.debug('Encoding full 3D angles')
vecs = mlab.quiver3d(xxx, yyy, zzz, x_mag, y_mag, z_mag, mode=mode, opacity=opacity,
scalars=np.arange(len(xxx)), line_width=2)
vector = np.asarray((x_mag.ravel(), y_mag.ravel(), z_mag.ravel()))
rgb = colors.CMAP_CIRCULAR_DEFAULT.rgb_from_vector(vector)
rgba = np.hstack((rgb, 255 * np.ones((len(xxx), 1), dtype=np.uint8)))
vecs.glyph.color_mode = 'color_by_scalar'
vecs.module_manager.scalar_lut_manager.lut.table = rgba
mlab.draw()
elif coloring == 'amplitude': # Encodes the amplitude of the arrows with the jet colormap:
self._log.debug('Encoding amplitude')
vecs = mlab.quiver3d(xxx, yyy, zzz, x_mag, y_mag, z_mag,
mode=mode, colormap=cmap, opacity=opacity, line_width=2)
mlab.colorbar(label_fmt='%.2f')
mlab.colorbar(orientation='vertical')
else:
raise AttributeError('Coloring mode not supported!')
vecs.glyph.glyph_source.glyph_position = 'center'
vecs.module_manager.vector_lut_manager.data_range = np.array([0, limit])
if grid:
mlab.outline(vecs, extent=extent)
if labels:
mlab.axes(vecs, extent=extent)
mlab.title(title, height=0.95, size=0.35)
if orientation:
oa = mlab.orientation_axes()
oa.marker.set_viewport(0, 0, 0.4, 0.4)
mlab.draw()
engine = mlab.get_engine()
scene = engine.scenes[0]
if view == 'isometric':
scene.scene.isometric_view()
elif view == 'x_plus_view':
scene.scene.x_plus_view()
elif view == 'y_plus_view':
scene.scene.y_plus_view()
if position:
scene.scene.camera.position = position
return vecs
def plot_quiver3d_to_2d(self, dim_uv=None, axis=None, figsize=None, high_res=False, **kwargs):
# TODO: into plottools and make available for all 3D plots if possible!
kwargs.setdefault('labels', False)
kwargs.setdefault('orientation', False)
kwargs.setdefault('bgcolor', (0.7, 0.7, 0.7))
from mayavi import mlab
if figsize is None:
figsize = plottools.FIGSIZE_DEFAULT
if axis is None:
self._log.debug('axis is None')
fig = plt.figure(figsize=figsize)
axis = fig.add_subplot(1, 1, 1)
axis.set_axis_bgcolor(kwargs['bgcolor'])
self.plot_quiver3d(figsize=(800, 800), **kwargs)
if high_res: # Use temp files:
tmpdir = tempfile.mkdtemp()
temp_path = os.path.join(tmpdir, 'temp.png')
try:
mlab.savefig(temp_path, size=(2000, 2000))
imgmap = np.asarray(Image.open(temp_path))
except Exception as e:
raise e
finally:
os.remove(temp_path)
os.rmdir(tmpdir)
else: # Use screenshot (returns array WITH alpha!):
imgmap = mlab.screenshot(mode='rgba', antialiased=True)
mlab.close(mlab.gcf())
if dim_uv is None:
dim_uv = self.dim[1:]
axis.imshow(imgmap, extent=[0, dim_uv[0], 0, dim_uv[1]], origin='upper')
kwargs.setdefault('scalebar', False)
kwargs.setdefault('hideaxes', True)
return plottools.format_axis(axis, hideaxes=True, scalebar=False)
class ScalarData(FieldData):
"""Class for storing scalar field data.
Represents 3-dimensional scalar field distributions which is stored as a 3-dimensional
numpy array in `field`, but which can also be accessed as a vector via `field_vec`.
:class:`~.ScalarData` objects support negation, arithmetic operators (``+``, ``-``, ``*``)
and their augmented counterparts (``+=``, ``-=``, ``*=``), with numbers and other
:class:`~.ScalarData` objects, if their dimensions and grid spacings match. It is possible
to load data from HDF5 or LLG (.txt) files or to save the data in these formats.
Plotting methods are also provided.
Attributes
----------
a: float
The grid spacing in nm.
field: :class:`~numpy.ndarray` (N=4)
The scalar field.
"""
_log = logging.getLogger(__name__ + '.ScalarData')
def scale_down(self, n=1):
"""Scale down the field distribution by averaging over two pixels along each axis.
Parameters
----------
n : int, optional
Number of times the field distribution is scaled down. The default is 1.
Returns
-------
None
Notes
-----
Acts in place and changes dimensions and grid spacing accordingly.
Only possible, if each axis length is a power of 2!
"""
self._log.debug('Calling scale_down')
assert n > 0 and isinstance(n, int), 'n must be a positive integer!'
self.a *= 2 ** n
for t in range(n):
# Pad if necessary:
pz, py, px = self.dim[0] % 2, self.dim[1] % 2, self.dim[2] % 2
if pz != 0 or py != 0 or px != 0:
self.field = np.pad(self.field, ((0, pz), (0, py), (0, px)), mode='constant')
# Create coarser grid for the field:
shape_4d = (self.dim[0] // 2, 2, self.dim[1] // 2, 2, self.dim[2] // 2, 2)
self.field = self.field.reshape(shape_4d).mean(axis=(5, 3, 1))
def scale_up(self, n=1, order=0):
"""Scale up the field distribution using spline interpolation of the requested order.
Parameters
----------
n : int, optional
Power of 2 with which the grid is scaled. Default is 1, which means every axis is
increased by a factor of ``2**1 = 2``.
order : int, optional
The order of the spline interpolation, which has to be in the range between 0 and 5
and defaults to 0.
Returns
-------
None
Notes
-----
Acts in place and changes dimensions and grid spacing accordingly.
"""
self._log.debug('Calling scale_up')
assert n > 0 and isinstance(n, int), 'n must be a positive integer!'
assert 5 > order >= 0 and isinstance(order, int), \
'order must be a positive integer between 0 and 5!'
self.a /= 2 ** n
self.field = zoom(self.field, zoom=2 ** n, order=order)
def get_vector(self, mask):
"""Returns the field as a vector, specified by a mask.
Parameters
----------
mask : :class:`~numpy.ndarray` (N=3, boolean)
Masks the pixels from which the components should be taken.
Returns
-------
vector : :class:`~numpy.ndarray` (N=1)
The vector containing the field of the specified pixels.
"""
self._log.debug('Calling get_vector')
if mask is not None:
return np.reshape(self.field[mask], -1)
else:
return self.field_vec
def set_vector(self, vector, mask=None):
"""Set the field components of the masked pixels to the values specified by `vector`.
Parameters
----------
mask : :class:`~numpy.ndarray` (N=3, boolean), optional
Masks the pixels from which the components should be taken.
vector : :class:`~numpy.ndarray` (N=1)
The vector containing the field of the specified pixels.
Returns
-------
None
"""
self._log.debug('Calling set_vector')
if mask is not None:
self.field[mask] = vector
else:
self.field_vec = vector
def rot90(self, axis='x'):
"""Rotate the scalar field 90° around the specified axis (right hand rotation).
Parameters
----------
axis: {'x', 'y', 'z'}, optional
The axis around which the vector field is rotated.
Returns
-------
scalardata_rot: :class:`~.ScalarData`
A rotated copy of the :class:`~.ScalarData` object.
"""
self._log.debug('Calling rot90')
if axis == 'x':
field_rot = np.zeros((self.dim[1], self.dim[0], self.dim[2]))
for i in range(self.dim[2]):
field_rot[:, :, i] = np.rot90(self.field[:, :, i])
elif axis == 'y':
field_rot = np.zeros((self.dim[2], self.dim[1], self.dim[0]))
for i in range(self.dim[1]):
field_rot[:, i, :] = np.rot90(self.field[:, i, :])
elif axis == 'z':
field_rot = np.zeros((self.dim[0], self.dim[2], self.dim[1]))
for i in range(self.dim[0]):
field_rot[i, :, :] = np.rot90(self.field[i, :, :])
else:
raise ValueError("Wrong input! 'x', 'y', 'z' allowed!")
return ScalarData(self.a, field_rot)
def to_signal(self):
"""Convert :class:`~.ScalarData` data into a HyperSpy signal.
Returns
-------
signal: :class:`~hyperspy.signals.Signal`
Representation of the :class:`~.ScalarData` object as a HyperSpy Signal.
Notes
-----
This method recquires the hyperspy package!
"""
self._log.debug('Calling to_signal')
signal = super().to_signal()
# Set metadata:
signal.metadata.Signal.title = 'ScalarData'
# Return signal:
return signal
def save(self, filename, **kwargs):
"""Saves the ScalarData in the specified format.
The function gets the format from the extension:
- hdf5 for HDF5.
- EMD Electron Microscopy Dataset format (also HDF5).
- npy or npz for numpy formats.
If no extension is provided, 'hdf5' is used. Most formats are
saved with the HyperSpy package (internally the fielddata is first
converted to a HyperSpy Signal.
Each format accepts a different set of parameters. For details
see the specific format documentation.
Parameters
----------
filename : str, optional
Name of the file which the ScalarData is saved into. The extension
determines the saving procedure.
"""
from .file_io.io_scalardata import save_scalardata
save_scalardata(self, filename, **kwargs)
# TODO: Histogram plots for magnetisation (see thesis!)
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""Subpackage containing Pyramid IO functionality."""
from .io_phasemap import load_phasemap
from .io_vectordata import load_vectordata
from .io_scalardata import load_scalardata
from .io_projector import load_projector
from .io_dataset import load_dataset
__all__ = ['load_phasemap', 'load_vectordata', 'load_scalardata', 'load_projector', 'load_dataset']
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""IO functionality for DataSet objects."""
import logging
import os
import h5py
import numpy as np
import scipy as sp
from ..dataset import DataSet
from ..file_io.io_projector import load_projector
from ..file_io.io_phasemap import load_phasemap
__all__ = ['load_projector']
_log = logging.getLogger(__name__)
def save_dataset(dataset, filename, overwrite=True):
"""%s"""
_log.debug('Calling save_dataset')
path, filename = os.path.split(filename)
name, extension = os.path.splitext(filename)
assert extension in ['.hdf5', ''], 'For now only HDF5 format is supported!'
if name.startswith('dataset_'):
name = name.split('dataset_')[1]
# Header file:
header_name = os.path.join(path, 'dataset_{}.hdf5'.format(name))
if not os.path.isfile(header_name) or overwrite: # Write if file does not exist or if forced:
with h5py.File(header_name, 'w') as f:
f.attrs['a'] = dataset.a
f.attrs['dim'] = dataset.dim
f.attrs['b_0'] = dataset.b_0
if dataset.mask is not None:
f.create_dataset('mask', data=dataset.mask)
if dataset.Se_inv is not None:
f.create_dataset('Se_inv', data=dataset.Se_inv.diagonal()) # Save only diagonal!
# PhaseMaps and Projectors:
for i, projector in enumerate(dataset.projectors):
projector_name = 'projector_{}_{}_{}{}'.format(name, i, projector.get_info(), extension)
projector.save(os.path.join(path, projector_name), overwrite=overwrite)
phasemap_name = 'phasemap_{}_{}_{}{}'.format(name, i, projector.get_info(), extension)
dataset.phasemaps[i].save(os.path.join(path, phasemap_name), overwrite=overwrite)
save_dataset.__doc__ %= DataSet.save.__doc__
def load_dataset(filename):
"""Load HDF5 file into a :class:`~pyramid.dataset.DataSet` instance.
Parameters
----------
filename: str
The filename to be loaded.
Returns
-------
projector : :class:`~.Projector`
A :class:`~.Projector` object containing the loaded data.
Notes
-----
This loads a header file and all matching HDF5 files which can be found. The filename
conventions have to be strictly followed for the process to be successful!
"""
_log.debug('Calling load_dataset')
path, filename = os.path.split(filename)
if path == '':
path = '.' # Make sure this can be used later!
name, extension = os.path.splitext(filename)
assert extension in ['.hdf5', ''], 'For now only HDF5 format is supported!'
if name.startswith('dataset_'):
name = name.split('dataset_')[1]
# Header file:
header_name = os.path.join(path, 'dataset_{}.hdf5'.format(name))
with h5py.File(header_name, 'r') as f:
a = f.attrs.get('a')
dim = f.attrs.get('dim')
b_0 = f.attrs.get('b_0')
mask = np.copy(f.get('mask', None))
Se_inv_diag = np.copy(f.get('Se_inv', None))
Se_inv = sp.sparse.diags(Se_inv_diag).tocsr()
dataset = DataSet(a, dim, b_0, mask, Se_inv)
# Projectors:
projectors = []
for f in os.listdir(path):
if f.startswith('projector') and f.endswith('.hdf5'):
projector_name, i = f.split('_')[1:3]
if projector_name == name:
projector = load_projector(os.path.join(path, f))
projectors.append((int(i), projector))
projectors = [p[1] for p in sorted(projectors, key=lambda x: x[0])]
# PhaseMaps:
phasemaps = []
for f in os.listdir(path):
if f.startswith('phasemap') and f.endswith('.hdf5'):
phasemap_name, i = f.split('_')[1:3]
if phasemap_name == name:
phasemap = load_phasemap(os.path.join(path, f))
phasemaps.append((int(i), phasemap))
phasemaps = [p[1] for p in sorted(phasemaps, key=lambda x: x[0])]
dataset.append(phasemaps, projectors)
# Return DataSet:
return dataset
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""IO functionality for FieldData objects."""
import logging
import os
import numpy as np
from PIL import Image
from ..phasemap import PhaseMap
__all__ = ['load_phasemap']
_log = logging.getLogger(__name__)
def load_phasemap(filename, mask=None, confidence=None, a=None, threshold=0,
print_mask_limits=False, **kwargs):
"""Load supported file into a :class:`~pyramid.phasemap.PhaseMap` instance.
The function loads the file according to the extension:
- hdf5 for HDF5.
- rpl for Ripple (useful to export to Digital Micrograph).
- dm3 and dm4 for Digital Micrograph files.
- unf for SEMPER unf binary format.
- txt format.
- npy or npz for numpy formats.
- Many image formats such as png, tiff, jpeg...
Any extra keyword is passed to the corresponsing reader. For
available options see their individual documentation.
Parameters
----------
filename: str
The filename to be loaded.
mask: str or tuple of str and dict, optional
If this is a filename, a mask will be loaded from an appropriate file. If additional
keywords should be provided, use a tuple of the filename and an according dictionary.
If this is `None`, no mask will be loaded.
default is False.
confidence: str or tuple of str and dict, optional
If this is a filename, a confidence matrix will be loaded from an appropriate file. If
additional keywords should be provided, use a tuple of the filename and an according
dictionary. If this is `None`, no mask will be loaded.
a: float or None, optional
If the grid spacing is not None, it will override a possibly loaded value from the files.
Returns
-------
phasemap : :class:`~.PhaseMap`
A :class:`~.PhaseMap` object containing the loaded data.
"""
_log.debug('Calling load_phasemap')
phasemap = _load(filename, as_phasemap=True, a=a, **kwargs)
if mask is not None:
filemask, kwargs_mask = _parse_add_param(mask)
mask_raw = _load(filemask, **kwargs_mask)
if print_mask_limits:
print('[Mask] min:', mask_raw.min(), 'max:', mask_raw.max(), 'threshold:', threshold)
mask = np.where(mask_raw > threshold, True, False)
phasemap.mask = mask
if confidence is not None:
fileconf, kwargs_conf = _parse_add_param(confidence)
phasemap.confidence = _load(fileconf, **kwargs_conf)
return phasemap
def _load(filename, as_phasemap=False, a=1., **kwargs):
_log.debug('Calling _load')
extension = os.path.splitext(filename)[1]
# Load from txt-files:
if extension == '.txt':
return _load_from_txt(filename, as_phasemap, a, **kwargs)
# Load from npy-files:
elif extension in ['.npy', '.npz']:
return _load_from_npy(filename, as_phasemap, a, **kwargs)
elif extension in ['.jpeg', '.jpg', '.png', '.bmp', '.tif']:
return _load_from_img(filename, as_phasemap, a, **kwargs)
# Load with HyperSpy:
else:
if extension == '':
filename = '{}.hdf5'.format(filename) # Default!
return _load_from_hs(filename, as_phasemap, a, **kwargs)
def _parse_add_param(param):
if param is None:
return None, {}
elif isinstance(param, str):
return param, {}
elif isinstance(param, (list, tuple)) and len(param) == 2:
return param
else:
raise ValueError('Parameter can be a string or a tuple/list of a string and a dict!')
def _load_from_txt(filename, as_phasemap, a, **kwargs):
def _load_arr(filename, **kwargs):
with open(filename, 'r') as phase_file:
if phase_file.readline().startswith('PYRAMID'): # File has pyramid structure:
return np.loadtxt(filename, delimiter='\t', skiprows=2)
else: # Try default args:
return np.loadtxt(filename, **kwargs)
result = _load_arr(filename, **kwargs)
if as_phasemap:
if a is None:
a = 1. # Default!
with open(filename, 'r') as phase_file:
header = phase_file.readline()
if header.startswith('PYRAMID'): # File has pyramid structure:
a = float(phase_file.readline()[15:-4])
return PhaseMap(a, result)
else:
return result
def _load_from_npy(filename, as_phasemap, a, **kwargs):
result = np.load(filename, **kwargs)
if as_phasemap:
if a is None:
a = 1. # Use default!
return PhaseMap(a, result)
else:
return result
def _load_from_img(filename, as_phasemap, a, **kwargs):
result = np.asarray(Image.open(filename, **kwargs).convert('L'))
if as_phasemap:
if a is None:
a = 1. # Use default!
return PhaseMap(a, result)
else:
return result
def _load_from_hs(filename, as_phasemap, a, **kwargs):
try:
import hyperspy.api as hs
except ImportError:
_log.error('This method recquires the hyperspy package!')
return
phasemap = PhaseMap.from_signal(hs.load(filename, **kwargs))
if as_phasemap:
if a is not None:
phasemap.a = a
return phasemap
else:
return phasemap.phase
def save_phasemap(phasemap, filename, save_mask, save_conf, pyramid_format, **kwargs):
"""%s"""
_log.debug('Calling save_phasemap')
extension = os.path.splitext(filename)[1]
if extension == '.txt': # Save to txt-files:
_save_to_txt(phasemap, filename, pyramid_format, save_mask, save_conf, **kwargs)
elif extension in ['.npy', '.npz']: # Save to npy-files:
_save_to_npy(phasemap, filename, save_mask, save_conf, **kwargs)
else: # Try HyperSpy:
_save_to_hs(phasemap, filename, save_mask, save_conf, **kwargs)
save_phasemap.__doc__ %= PhaseMap.save.__doc__
def _save_to_txt(phasemap, filename, pyramid_format, save_mask, save_conf, **kwargs):
def _save_arr(filename, array, tag, **kwargs):
if pyramid_format:
with open(filename, 'w') as phase_file:
name = os.path.splitext(os.path.split(filename)[1])[0]
phase_file.write('PYRAMID-{}: {}\n'.format(tag, name))
phase_file.write('grid spacing = {} nm\n'.format(phasemap.a))
save_kwargs = {'fmt': '%7.6e', 'delimiter': '\t'}
else:
save_kwargs = kwargs
with open(filename, 'ba') as phase_file:
np.savetxt(phase_file, array, **save_kwargs)
name, extension = os.path.splitext(filename)
_save_arr('{}{}'.format(name, extension), phasemap.phase, 'PHASEMAP', **kwargs)
if save_mask:
_save_arr('{}_mask{}'.format(name, extension), phasemap.mask, 'MASK', **kwargs)
if save_conf:
_save_arr('{}_conf{}'.format(name, extension), phasemap.confidence, 'CONFIDENCE', **kwargs)
def _save_to_npy(phasemap, filename, save_mask, save_conf, **kwargs):
name, extension = os.path.splitext(filename)
np.save('{}{}'.format(name, extension), phasemap.phase, **kwargs)
if save_mask:
np.save('{}_mask{}'.format(name, extension), phasemap.mask, **kwargs)
if save_conf:
np.save('{}_conf{}'.format(name, extension), phasemap.confidence, **kwargs)
def _save_to_hs(phasemap, filename, save_mask, save_conf, **kwargs):
name, extension = os.path.splitext(filename)
phasemap.to_signal().save(filename, **kwargs)
if extension not in ['.hdf5', '.HDF5', '']: # mask and confidence are saved separately:
import hyperspy.api as hs
if save_mask:
mask_name = '{}_mask{}'.format(name, extension)
hs.signals.Signal2D(phasemap.mask, **kwargs).save(mask_name)
if save_conf:
conf_name = '{}_conf{}'.format(name, extension)
hs.signals.Signal2D(phasemap.confidence, **kwargs).save(conf_name)
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""IO functionality for Projector objects."""
import logging
import os
from scipy.sparse import csr_matrix
import numpy as np
import h5py
from .. import projector
__all__ = ['load_projector']
_log = logging.getLogger(__name__)
def save_projector(projector, filename, overwrite=True):
"""%s"""
_log.debug('Calling save_projector')
name, extension = os.path.splitext(filename)
assert extension in ['.hdf5', ''], 'For now only HDF5 format is supported!'
filename = name + '.hdf5' # In case no extension is provided, set to HDF5!
if not os.path.isfile(filename) or overwrite: # Write if file does not exist or if forced:
with h5py.File(filename, 'w') as f:
class_name = projector.__class__.__name__
f.attrs['class'] = class_name
if class_name == 'SimpleProjector':
f.attrs['axis'] = projector.axis
else:
f.attrs['tilt'] = projector.tilt
if class_name == 'RotTiltProjector':
f.attrs['rotation'] = projector.rotation
f.attrs['dim'] = projector.dim
f.attrs['dim_uv'] = projector.dim_uv
f.create_dataset('data', data=projector.weight.data)
f.create_dataset('indptr', data=projector.weight.indptr)
f.create_dataset('indices', data=projector.weight.indices)
f.create_dataset('coeff', data=projector.coeff)
save_projector.__doc__ %= projector.Projector.save.__doc__
def load_projector(filename):
"""Load HDF5 file into a :class:`~pyramid.projector.Projector` instance (or a subclass).
Parameters
----------
filename: str
The filename to be loaded.
Returns
-------
projector : :class:`~.Projector`
A :class:`~.Projector` object containing the loaded data.
"""
_log.debug('Calling load_projector')
name, extension = os.path.splitext(filename)
assert extension in ['.hdf5', ''], 'For now only HDF5 format is supported!'
filename = name + '.hdf5' # In case no extension is provided, set to HDF5!
with h5py.File(filename, 'r') as f:
# Retrieve dimensions:
dim = f.attrs.get('dim')
dim_uv = f.attrs.get('dim_uv')
size_2d, size_3d = np.prod(dim_uv), np.prod(dim)
# Retrieve weight matrix:
data = f.get('data')
indptr = f.get('indptr')
indices = f.get('indices')
weight = csr_matrix((data, indices, indptr), shape=(size_2d, size_3d))
# Retrieve coefficients:
coeff = np.copy(f.get('coeff'))
# Construct projector:
result = projector.Projector(dim, dim_uv, weight, coeff)
# Specify projector type:
class_name = f.attrs.get('class')
result.__class__ = getattr(projector, class_name)
if class_name == 'SimpleProjector':
result.axis = f.attrs.get('axis')
else:
result.tilt = f.attrs.get('tilt')
if class_name == 'RotTiltProjector':
result.rotation = f.attrs.get('rotation')
# Return projector object:
return result
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""IO functionality for ScalarData objects."""
import logging
import os
import numpy as np
from ..fielddata import ScalarData
__all__ = ['load_scalardata']
_log = logging.getLogger(__name__)
def load_scalardata(filename, a=None, **kwargs):
"""Load supported file into a :class:`~pyramid.fielddata.ScalarData` instance.
The function loads the file according to the extension:
- hdf5 for HDF5.
- EMD Electron Microscopy Dataset format (also HDF5).
- npy or npz for numpy formats.
Any extra keyword is passed to the corresponsing reader. For
available options see their individual documentation.
Parameters
----------
filename: str
The filename to be loaded.
a: float or None, optional
If the grid spacing is not None, it will override a possibly loaded value from the files.
Returns
-------
scalardata : :class:`~.ScalarData`
A :class:`~.ScalarData` object containing the loaded data.
"""
_log.debug('Calling load_scalardata')
extension = os.path.splitext(filename)[1]
# Load from npy-files:
if extension in ['.npy', '.npz']:
return _load_from_npy(filename, a, **kwargs)
# Load with HyperSpy:
else:
if extension == '':
filename = '{}.hdf5'.format(filename) # Default!
return _load_from_hs(filename, a, **kwargs)
def _load_from_npy(filename, a, **kwargs):
_log.debug('Calling load_from_npy')
if a is None:
a = 1. # Use default!
return ScalarData(a, np.load(filename, **kwargs))
def _load_from_hs(filename, a, **kwargs):
_log.debug('Calling load_from_hs')
try:
import hyperspy.api as hs
except ImportError:
_log.error('This method recquires the hyperspy package!')
return
scalardata = ScalarData.from_signal(hs.load(filename, **kwargs))
if a is not None:
scalardata.a = a
return scalardata
def save_scalardata(scalardata, filename, **kwargs):
"""%s"""
_log.debug('Calling save_scalardata')
extension = os.path.splitext(filename)[1]
if extension in ['.npy', '.npz']: # Save to npy-files:
_save_to_npy(scalardata, filename, **kwargs)
else: # Try HyperSpy:
_save_to_hs(scalardata, filename, **kwargs)
save_scalardata.__doc__ %= ScalarData.save.__doc__
def _save_to_npy(scalardata, filename, **kwargs):
_log.debug('Calling save_to_npy')
np.save(filename, scalardata.field, **kwargs)
def _save_to_hs(scalardata, filename, **kwargs):
_log.debug('Calling save_to_hyperspy')
scalardata.to_signal().save(filename, **kwargs)
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""IO functionality for VectorData objects."""
import logging
import os
import numpy as np
from ..fielddata import VectorData
__all__ = ['load_vectordata']
_log = logging.getLogger(__name__)
def load_vectordata(filename, a=None, **kwargs):
"""Load supported file into a :class:`~pyramid.fielddata.VectorData` instance.
The function loads the file according to the extension:
- hdf5 for HDF5.
- EMD Electron Microscopy Dataset format (also HDF5).
- llg format.
- ovf format.
- npy or npz for numpy formats.
Any extra keyword is passed to the corresponsing reader. For
available options see their individual documentation.
Parameters
----------
filename: str
The filename to be loaded.
a: float or None, optional
If the grid spacing is not None, it will override a possibly loaded value from the files.
Returns
-------
vectordata : :class:`~.VectorData`
A :class:`~.VectorData` object containing the loaded data.
"""
_log.debug('Calling load_vectordata')
extension = os.path.splitext(filename)[1]
# Load from llg-files:
if extension in ['.llg', '.txt']:
return _load_from_llg(filename, a)
# Load from ovf-files:
if extension in ['.ovf', '.omf', '.ohf', 'obf']:
return _load_from_ovf(filename, a)
# Load from npy-files:
elif extension in ['.npy', '.npz']:
return _load_from_npy(filename, a, **kwargs)
# Load with HyperSpy:
else:
if extension == '':
filename = '{}.hdf5'.format(filename) # Default!
return _load_from_hs(filename, a, **kwargs)
def _load_from_llg(filename, a):
_log.debug('Calling load_from_llg')
SCALE = 1.0E-9 / 1.0E-2 # From cm to nm
data = np.genfromtxt(filename, skip_header=2)
dim = tuple(np.genfromtxt(filename, dtype=int, skip_header=1, skip_footer=len(data[:, 0])))
if a is None:
a = (data[1, 0] - data[0, 0]) / SCALE
field = data[:, 3:6].T.reshape((3,) + dim)
return VectorData(a, field)
def _load_from_ovf(filename, a):
_log.debug('Calling load_from_ovf')
with open(filename, 'rb') as mag_file:
# TODO: Also handle OOMF 1.0? See later TODOs...
line = mag_file.readline()
assert line.startswith(b'# OOMMF') # File has OVF format!
read_header, read_data = False, False
header = {'version': line.split()[-1].decode('utf-8')}
x_mag, y_mag, z_mag = [], [], []
data_mode = None
while True:
# Read in additional info:
if not read_header and not read_data:
line = mag_file.readline()
if line == b'':
break # End of file is reached!
if line.startswith(b'# Segment count'):
header['segment_count'] = int(line.split()[-1])
# TODO: currently new segments overwrite last ones. read more than one? return
# TODO: navigation axis (segment count > 1) if implemented in HyperSpy reader!
elif line.startswith(b'# Begin: Header'):
read_header = True
elif line.startswith(b'# Begin: Data'):
read_data = True
data_mode = ' '.join(line.decode('utf-8').split()[3:])
assert data_mode in ['text', 'Text', 'Binary 4', 'Binary 8'], \
'Data mode {} is currently not supported by this reader!'.format(data_mode)
assert header.get('meshtype') == 'rectangular', \
'Only rectangular grids can be currently read!'
# Read header line by line:
elif read_header:
line = mag_file.readline()
if line.startswith(b'# End: Header'): # Header is done:
read_header = False
continue
line = line.decode('utf-8') # Decode to use strings here!
line_list = line.split()
if '##' in line_list: # Strip trailing comments:
del line_list[line_list.index('##'):]
if len(line_list) <= 1: # Just '#' or empty line:
continue
key, value = line_list[1].strip(':'), ' '.join(line_list[2:])
if key not in header: # Add new key, value pair if not existant:
header[key] = value
elif key == 'Desc': # Can go over several lines:
header['Desc'] = ' '.join([header['Desc'], value])
# Read in data:
# TODO: Make it work for both text and binary! Put into HyperSpy?
# TODO: http://math.nist.gov/oommf/doc/userguide11b2/userguide/vectorfieldformat.html
# TODO: http://math.nist.gov/oommf/doc/userguide12a5/userguide/OVF_2.0_format.html
# TODO: 1.0 and 2.0 DIFFER (little and big endian in binary data -.-)
elif read_data: # Currently in a data block:
if data_mode in ['text', 'Text']: # Read data as text, line by line:
line = mag_file.readline()
if line.startswith(b'# End: Data'): # Header is done:
read_data = False
else:
x, y, z = [float(i) for i in line.split()]
x_mag.append(x)
y_mag.append(y)
z_mag.append(z)
elif 'Binary' in data_mode: # Read data as binary, all bytes at the same time:
count = int(data_mode.split()[-1])
if header['version'] == '1.0': # Big endian:
dtype = '>f{}'.format(count)
elif header['version'] == '2.0': # Little endian:
dtype = '<f{}'.format(count)
dim = (int(header['znodes']), int(header['ynodes']), int(header['xnodes']))
test = np.fromfile(mag_file, dtype=dtype, count=1)
if count == 4: # Binary 4:
assert test == 1234567.0, 'Wrong test bytes!'
elif count == 8: # Binary 8:
assert test == 123456789012345.0, 'Wrong test bytes!'
data = np.fromfile(mag_file, dtype=dtype, count=3*np.prod(dim))
x_mag, y_mag, z_mag = data[0::3], data[1::3], data[2::3]
read_data = False # Stop reading data and search for new Segments (if any).
# Format after reading:
dim = (int(header['znodes']), int(header['ynodes']), int(header['xnodes']))
x_mag = np.asarray(x_mag).reshape(dim)
y_mag = np.asarray(y_mag).reshape(dim)
z_mag = np.asarray(z_mag).reshape(dim)
field = np.asarray((x_mag, y_mag, z_mag)) * float(header.get('valuemultiplier', 1))
if a is None:
# TODO: If transferred to HyperSpy, this has to stay in Pyramid reader!
if not header.get('xstepsize') == header.get('ystepsize') == header.get('zstepsize'):
_log.warning('Grid spacing is not equal in x, y and z (x will be used)!')
a = float(header.get('xstepsize', 1.))
meshunit = header.get('meshunit', 'nm')
a *= {'m': 1e9, 'mm': 1e6, 'µm': 1e3, 'nm': 1}[meshunit] # Conversion to nm
return VectorData(a, field)
def _load_from_npy(filename, a, **kwargs):
_log.debug('Calling load_from_npy')
if a is None:
a = 1. # Use default!
return VectorData(a, np.load(filename, **kwargs))
def _load_from_hs(filename, a, **kwargs):
_log.debug('Calling load_from_hs')
try:
import hyperspy.api as hs
except ImportError:
_log.error('This method recquires the hyperspy package!')
return
vectordata = VectorData.from_signal(hs.load(filename, **kwargs))
if a is not None:
vectordata.a = a
return vectordata
def save_vectordata(vectordata, filename, **kwargs):
"""%s"""
_log.debug('Calling save_vectordata')
extension = os.path.splitext(filename)[1]
if extension == '.llg': # Save to llg-files:
_save_to_llg(vectordata, filename)
elif extension == '.ovf': # Save to ovf-files:
_save_to_ovf(vectordata, filename, **kwargs)
elif extension in ['.npy', '.npz']: # Save to npy-files:
_save_to_npy(vectordata, filename, **kwargs)
else: # Try HyperSpy:
_save_to_hs(vectordata, filename, **kwargs)
save_vectordata.__doc__ %= VectorData.save.__doc__
def _save_to_llg(vectordata, filename):
_log.debug('Calling save_to_llg')
dim = vectordata.dim
SCALE = 1.0E-9 / 1.0E-2 # from nm to cm
# Create 3D meshgrid and reshape it and the field into a list where x varies first:
zz, yy, xx = vectordata.a * SCALE * (np.indices(dim) + 0.5).reshape(3, -1)
x_vec, y_vec, z_vec = vectordata.field.reshape(3, -1)
data = np.array([xx, yy, zz, x_vec, y_vec, z_vec]).T
# Save data to file:
with open(filename, 'w') as mag_file:
mag_file.write('LLGFileCreator: {:s}\n'.format(filename))
mag_file.write(' {:d} {:d} {:d}\n'.format(*dim))
mag_file.writelines('\n'.join(' '.join('{:7.6e}'.format(cell)
for cell in row) for row in data))
def _save_to_ovf(vectordata, filename):
_log.debug('Calling save_to_ovf')
with open(filename, 'w') as mag_file:
mag_file.write('# OOMMF OVF 2.0\n')
mag_file.write('# Segment count: 1\n')
mag_file.write('# Begin: Segment\n')
# Write Header:
mag_file.write('# Begin: Header\n')
name = os.path.split(os.path.split(filename)[1])
mag_file.write('# Title: PYRAMID-VECTORDATA {}\n'.format(name))
mag_file.write('# meshtype: rectangular\n')
mag_file.write('# meshunit: nm\n')
mag_file.write('# valueunit: A/m\n')
mag_file.write('# valuemultiplier: 1.\n')
mag_file.write('# xmin: 0.\n')
mag_file.write('# ymin: 0.\n')
mag_file.write('# zmin: 0.\n')
mag_file.write('# xmax: {}\n'.format(vectordata.a * vectordata.dim[2]))
mag_file.write('# ymax: {}\n'.format(vectordata.a * vectordata.dim[1]))
mag_file.write('# zmax: {}\n'.format(vectordata.a * vectordata.dim[0]))
mag_file.write('# xbase: 0.\n')
mag_file.write('# ybase: 0.\n')
mag_file.write('# zbase: 0.\n')
mag_file.write('# xstepsize: {}\n'.format(vectordata.a))
mag_file.write('# ystepsize: {}\n'.format(vectordata.a))
mag_file.write('# zstepsize: {}\n'.format(vectordata.a))
mag_file.write('# xnodes: {}\n'.format(vectordata.dim[2]))
mag_file.write('# ynodes: {}\n'.format(vectordata.dim[1]))
mag_file.write('# znodes: {}\n'.format(vectordata.dim[0]))
mag_file.write('# End: Header\n')
# Write data:
mag_file.write('# Begin: Data Text\n')
x_mag, y_mag, z_mag = vectordata.field
x_mag = x_mag.ravel()
y_mag = y_mag.ravel()
z_mag = z_mag.ravel()
for i in range(np.prod(vectordata.dim)):
mag_file.write('{:g} {:g} {:g}\n'.format(x_mag[i], y_mag[i], z_mag[i]))
mag_file.write('# End: Data Text\n')
mag_file.write('# End: Segment\n')
def _save_to_npy(vectordata, filename, **kwargs):
_log.debug('Calling save_to_npy')
np.save(filename, vectordata.field, **kwargs)
def _save_to_hs(vectordata, filename, **kwargs):
_log.debug('Calling save_to_hyperspy')
vectordata.to_signal().save(filename, **kwargs)
# -*- coding: utf-8 -*-
# Copyright 2014 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""This module provides the :class:`~.ForwardModel` class which represents a strategy to map a
threedimensional magnetization distribution onto a two-dimensional phase map."""
import logging
import multiprocessing as mp
import sys
import numpy as np
from pyramid.dataset import DataSet
from pyramid.fielddata import VectorData
from pyramid.ramp import Ramp
__all__ = ['ForwardModel', 'DistributedForwardModel']
class ForwardModel(object):
"""Class for mapping 3D magnetic distributions to 2D phase maps.
Represents a strategy for the mapping of a 3D magnetic distribution to two-dimensional
phase maps. A :class:`~.DataSet` object is given which is used as input for the model
(projectors, phasemappers, etc.). A `ramp_order` can be specified to add polynomial ramps
to the constructed phase maps (which can also be reconstructed!). A :class:`~.Ramp` class
object will be constructed accordingly, which also holds all info about the ramps after a
reconstruction.
Attributes
----------
data_set: :class:`~dataset.DataSet`
:class:`~dataset.DataSet` object, which stores all required information calculation.
ramp_order : int or None (default)
Polynomial order of the additional phase ramp which will be added to the phase maps.
All ramp parameters have to be at the end of the input vector and are split automatically.
Default is None (no ramps are added).
y : :class:`~numpy.ndarray` (N=1)
Vector which lists all pixel values of all phase maps one after another.
m: int
Size of the image space. Number of pixels of the 2-dimensional projected grid.
n: int
Size of the input space. Number of voxels of the 3-dimensional grid.
Se_inv : :class:`~numpy.ndarray` (N=2), optional
Inverted covariance matrix of the measurement errors. The matrix has size `m x m` with m
being the length of the targetvector y (vectorized phase map information).
"""
_log = logging.getLogger(__name__ + '.ForwardModel')
def __init__(self, data_set, ramp_order=None):
self._log.debug('Calling __init__')
self.data_set = data_set
self.ramp_order = ramp_order
# Extract information from data_set:
self.phasemappers = self.data_set.phasemappers
self.y = self.data_set.phase_vec
self.n = self.data_set.n
self.m = self.data_set.m
self.shape = (self.m, self.n)
self.hook_points = self.data_set.hook_points
self.Se_inv = self.data_set.Se_inv
# Create ramp and change n accordingly:
self.ramp = Ramp(self.data_set, self.ramp_order)
self.n += self.ramp.n # ramp.n is 0 if ramp_order is None
# Create empty MagData object:
self.magdata = VectorData(self.data_set.a, np.zeros((3,) + self.data_set.dim))
self._log.debug('Creating ' + str(self))
def __repr__(self):
self._log.debug('Calling __repr__')
return '%s(data_set=%r)' % (self.__class__, self.data_set)
def __str__(self):
self._log.debug('Calling __str__')
return 'ForwardModel(data_set=%s)' % self.data_set
def __call__(self, x):
# Extract ramp parameters if necessary (x will be shortened!):
x = self.ramp.extract_ramp_params(x)
# Reset magdata and fill with vector:
self.magdata.field[...] = 0
self.magdata.set_vector(x, self.data_set.mask)
# Simulate all phase maps and create result vector:
result = np.zeros(self.m)
hp = self.hook_points
for i, projector in enumerate(self.data_set.projectors):
mapper = self.phasemappers[i]
phasemap = mapper(projector(self.magdata))
phasemap += self.ramp(i) # add ramp!
result[hp[i]:hp[i + 1]] = phasemap.phase_vec
return np.reshape(result, -1)
def jac_dot(self, x, vector):
"""Calculate the product of the Jacobi matrix with a given `vector`.
Parameters
----------
x : :class:`~numpy.ndarray` (N=1)
Evaluation point of the jacobi-matrix. The Jacobi matrix is constant for a linear
problem, thus `x` can be set to None (it is not used int the computation). It is
implemented for the case that in the future nonlinear problems have to be solved.
vector : :class:`~numpy.ndarray` (N=1)
Vectorized form of the 3D magnetization distribution. First the `x`, then the `y` and
lastly the `z` components are listed. Ramp parameters are also added at the end if
necessary.
Returns
-------
result_vector : :class:`~numpy.ndarray` (N=1)
Product of the Jacobi matrix (which is not explicitely calculated) with the input
`vector`.
"""
# Extract ramp parameters if necessary (vector will be shortened!):
vector = self.ramp.extract_ramp_params(vector)
# Reset magdata and fill with vector:
self.magdata.field[...] = 0
self.magdata.set_vector(vector, self.data_set.mask)
# Simulate all phase maps and create result vector:
result = np.zeros(self.m)
hp = self.hook_points
for i, projector in enumerate(self.data_set.projectors):
mag_vec = self.magdata.field_vec
mapper = self.phasemappers[i]
res = mapper.jac_dot(projector.jac_dot(mag_vec))
res += self.ramp.jac_dot(i) # add ramp!
result[hp[i]:hp[i + 1]] = res
return result
def jac_T_dot(self, x, vector):
"""'Calculate the product of the transposed Jacobi matrix with a given `vector`.
Parameters
----------
x : :class:`~numpy.ndarray` (N=1)
Evaluation point of the jacobi-matrix. The jacobi matrix is constant for a linear
problem, thus `x` can be set to None (it is not used int the computation). Is used
for the case that in the future nonlinear problems have to be solved.
vector : :class:`~numpy.ndarray` (N=1)
Vectorized form of all 2D phase maps one after another in one vector.
Returns
-------
result_vector : :class:`~numpy.ndarray` (N=1)
Product of the transposed Jacobi matrix (which is not explicitely calculated) with
the input `vector`. If necessary, transposed ramp parameters are concatenated.
"""
proj_T_result = np.zeros(3 * np.prod(self.data_set.dim))
hp = self.hook_points
for i, projector in enumerate(self.data_set.projectors):
sub_vec = vector[hp[i]:hp[i + 1]]
mapper = self.phasemappers[i]
proj_T_result += projector.jac_T_dot(mapper.jac_T_dot(sub_vec))
self.magdata.field_vec = proj_T_result
result = self.magdata.get_vector(self.data_set.mask)
ramp_params = self.ramp.jac_T_dot(vector) # calculate ramp_params separately!
return np.concatenate((result, ramp_params))
def finalize(self):
"""'Finalize the processes and let them join the master process (NOT USED HERE!).
Returns
-------
None
"""
pass
class DistributedForwardModel(ForwardModel):
"""Multiprocessing class for mapping 3D magnetic distributions to 2D phase maps.
Subclass of the :class:`~.ForwardModel` class which implements multiprocessing strategies
to speed up the calculations. The interface is the same, internally, the processes and one
ForwardModel operating on a subset of the DataSet per process are created during construction.
Ramps are calculated in the main thread. The :func:`~.finalize` method can be used to force
the processes to join if the class is no longer used.
Attributes
----------
data_set: :class:`~dataset.DataSet`
:class:`~dataset.DataSet` object, which stores all required information calculation.
ramp_order : int or None (default)
Polynomial order of the additional phase ramp which will be added to the phase maps.
All ramp parameters have to be at the end of the input vector and are split automatically.
Default is None (no ramps are added).
nprocs: int
Number of processes which should be created. Default is 1 (not recommended).
"""
def __init__(self, data_set, ramp_order=None, nprocs=1):
# Evoke super constructor to set up the normal ForwardModel:
super().__init__(data_set, ramp_order)
# Initialize multirocessing specific stuff:
self.nprocs = nprocs
img_per_proc = np.ceil(self.data_set.count / self.nprocs).astype(np.int)
hp = self.data_set.hook_points
self.proc_hook_points = [0]
self.pipes = []
self.processes = []
for proc_id in range(self.nprocs):
# Create SubDataSets:
sub_data = DataSet(self.data_set.a, self.data_set.dim, self.data_set.b_0,
self.data_set.mask, self.data_set.Se_inv)
# Distribute data to SubDataSets:
start = proc_id * img_per_proc
stop = np.min(((proc_id + 1) * img_per_proc, self.data_set.count))
self.proc_hook_points.append(hp[stop])
sub_data.phasemaps = self.data_set.phasemaps[start:stop]
sub_data.projectors = self.data_set.projectors[start:stop]
# Create SubForwardModel:
sub_fwd_model = ForwardModel(sub_data, ramp_order=None) # ramps handled in master!
# Create communication pipe:
self.pipes.append(mp.Pipe())
# Create process:
p = mp.Process(name='Worker {}'.format(proc_id), target=_worker,
args=(sub_fwd_model, self.pipes[proc_id][1]))
self.processes.append(p)
# Start process:
p.start()
self._log.debug('Creating ' + str(self))
def __call__(self, x):
# Extract ramp parameters if necessary (x will be shortened!):
x = self.ramp.extract_ramp_params(x)
# Distribute input to processes and start working:
for proc_id in range(self.nprocs):
self.pipes[proc_id][0].send(('__call__', (x,)))
# Initialize result vector and shorten hook point names:
result = np.zeros(self.m)
hp = self.hook_points
php = self.proc_hook_points
# Calculate ramps (if necessary):
if self.ramp_order is not None:
for i in range(self.data_set.count):
result[hp[i]:hp[i + 1]] += self.ramp(i).phase.ravel()
# Get process results from the pipes:
for proc_id in range(self.nprocs):
result[php[proc_id]:php[proc_id + 1]] += self.pipes[proc_id][0].recv()
# Return result:
return result
def jac_dot(self, x, vector):
"""Calculate the product of the Jacobi matrix with a given `vector`.
Parameters
----------
x : :class:`~numpy.ndarray` (N=1)
Evaluation point of the jacobi-matrix. The Jacobi matrix is constant for a linear
problem, thus `x` can be set to None (it is not used int the computation). It is
implemented for the case that in the future nonlinear problems have to be solved.
vector : :class:`~numpy.ndarray` (N=1)
Vectorized form of the 3D magnetization distribution. First the `x`, then the `y` and
lastly the `z` components are listed. Ramp parameters are also added at the end if
necessary.
Returns
-------
result_vector : :class:`~numpy.ndarray` (N=1)
Product of the Jacobi matrix (which is not explicitely calculated) with the input
`vector`.
"""
# Extract ramp parameters if necessary (x will be shortened!):
vector = self.ramp.extract_ramp_params(vector)
# Distribute input to processes and start working:
for proc_id in range(self.nprocs):
self.pipes[proc_id][0].send(('jac_dot', (None, vector)))
# Initialize result vector and shorten hook point names:
result = np.zeros(self.m)
hp = self.hook_points
php = self.proc_hook_points
# Calculate ramps (if necessary):
if self.ramp_order is not None:
for i in range(self.data_set.count):
result[hp[i]:hp[i + 1]] += self.ramp.jac_dot(i)
# Get process results from the pipes:
for proc_id in range(self.nprocs):
result[php[proc_id]:php[proc_id + 1]] += self.pipes[proc_id][0].recv()
# Return result:
return result
def jac_T_dot(self, x, vector):
"""'Calculate the product of the transposed Jacobi matrix with a given `vector`.
Parameters
----------
x : :class:`~numpy.ndarray` (N=1)
Evaluation point of the jacobi-matrix. The jacobi matrix is constant for a linear
problem, thus `x` can be set to None (it is not used int the computation). Is used
for the case that in the future nonlinear problems have to be solved.
vector : :class:`~numpy.ndarray` (N=1)
Vectorized form of all 2D phase maps one after another in one vector.
Returns
-------
result_vector : :class:`~numpy.ndarray` (N=1)
Product of the transposed Jacobi matrix (which is not explicitely calculated) with
the input `vector`. If necessary, transposed ramp parameters are concatenated.
"""
php = self.proc_hook_points
# Distribute input to processes and start working:
for proc_id in range(self.nprocs):
sub_vec = vector[php[proc_id]:php[proc_id + 1]]
self.pipes[proc_id][0].send(('jac_T_dot', (None, sub_vec)))
# Calculate ramps:
ramp_params = self.ramp.jac_T_dot(vector) # calculate ramp_params separately!
# Initialize result vector:
result = np.zeros(3 * self.data_set.mask.sum())
# Get process results from the pipes:
for proc_id in range(self.nprocs):
result += self.pipes[proc_id][0].recv()
# Return result:
return np.concatenate((result, ramp_params))
def finalize(self):
"""'Finalize the processes and let them join the master process.
Returns
-------
None
"""
# Finalize processes:
for proc_id in range(self.nprocs):
self.pipes[proc_id][0].send('STOP')
# Exit the completed processes:
for p in self.processes:
p.join()
def _worker(fwd_model, pipe):
# Has to be directly accessible in the module as a function, NOT a method of a class instance!
print('... {} starting!'.format(mp.current_process().name))
sys.stdout.flush()
for method, arguments in iter(pipe.recv, 'STOP'):
# '... {} processes method {}'.format(mp.current_process().name, method)
sys.stdout.flush()
result = getattr(fwd_model, method)(*arguments)
pipe.send(result)
print('... ', mp.current_process().name, 'exiting!')
sys.stdout.flush()
# -*- coding: utf-8 -*-
# Copyright 2014 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""This module provides the :class:`~.Kernel` class, representing the phase contribution of one
single magnetized pixel."""
import logging
import numpy as np
from jutil import fft
__all__ = ['Kernel', 'PHI_0']
PHI_0 = 2067.83 # magnetic flux in T*nm²
class Kernel(object):
"""Class for calculating kernel matrices for the phase calculation.
Represents the phase of a single magnetized pixel for two orthogonal directions (`u` and `v`),
which can be accessed via the corresponding attributes. The default elementary geometry is
`disc`, but can also be specified as the phase of a `slab` representation of a single
magnetized pixel. During the construction, a few attributes are calculated that are used in
the convolution during phase calculation in the different :class:`~Phasemapper` classes.
An instance of the :class:`~.Kernel` class can be called as a function with a `vector`,
which represents the projected magnetization onto a 2-dimensional grid.
Attributes
----------
a : float
The grid spacing in nm.
dim_uv : tuple of int (N=2), optional
Dimensions of the 2-dimensional projected magnetization grid from which the phase should
be calculated.
dim_kern : tuple of int (N=2)
Dimensions of the kernel, which is ``2N-1`` for both axes compared to `dim_uv`.
dim_pad : tuple of int (N=2)
Dimensions of the padded FOV, which is ``2N`` (if FFTW is used) or the next highest power
of 2 (for numpy-FFT).
dim_fft : tuple of int (N=2)
Dimensions of the grid, which is used for the FFT, taking into account that a RFFT should
be used (one axis is halved in comparison to `dim_pad`).
b_0 : float, optional
Saturation magnetization in Tesla, which is used for the phase calculation. Default is 1.
geometry : {'disc', 'slab'}, optional
The elementary geometry of the single magnetized pixel.
u : :class:`~numpy.ndarray` (N=3)
The phase contribution of one pixel magnetized in u-direction.
v : :class:`~numpy.ndarray` (N=3)
The phase contribution of one pixel magnetized in v-direction.
u_fft : :class:`~numpy.ndarray` (N=3)
The real FFT of the phase contribution of one pixel magnetized in u-direction.
v_fft : :class:`~numpy.ndarray` (N=3)
The real FFT of the phase contribution of one pixel magnetized in v-direction.
slice_phase : tuple (N=2) of :class:`slice`
A tuple of :class:`slice` objects to extract the original FOV from the increased one with
size `dim_pad` for the elementary kernel phase. The kernel is shifted, thus the center is
not at (0, 0), which also shifts the slicing compared to `slice_mag`.
slice_mag : tuple (N=2) of :class:`slice`
A tuple of :class:`slice` objects to extract the original FOV from the increased one with
size `dim_pad` for the projected magnetization distribution.
prw_vec: tuple of 2 int, optional
A two-component vector describing the displacement of the reference wave to include
perturbation of this reference by the object itself (via fringing fields), (y, x).
dtype: numpy dtype, optional
Data type of the kernel. Default is np.float32.
"""
_log = logging.getLogger(__name__ + '.Kernel')
def __init__(self, a, dim_uv, b_0=1., prw_vec=None, geometry='disc', dtype=np.float32):
self._log.debug('Calling __init__')
# Set basic properties:
self.b_0 = b_0
self.prw_vec = prw_vec
self.dim_uv = dim_uv # Dimensions of the FOV
self.dim_kern = tuple(2 * np.array(dim_uv) - 1) # Dimensions of the kernel
self.a = a
self.geometry = geometry
# Set up FFT:
if fft.HAVE_FFTW:
self.dim_pad = tuple(2 * np.array(dim_uv)) # is at least even (not nec. power of 2)
else:
self.dim_pad = tuple(2 ** np.ceil(np.log2(2 * np.array(dim_uv))).astype(int)) # pow(2)
self.dim_fft = (self.dim_pad[0], self.dim_pad[1] // 2 + 1) # last axis is real
self.slice_phase = (slice(dim_uv[0] - 1, self.dim_kern[0]), # Shift because kernel center
slice(dim_uv[1] - 1, self.dim_kern[1])) # is not at (0, 0)!
self.slice_mag = (slice(0, dim_uv[0]), # Magnetization is padded on the far end!
slice(0, dim_uv[1])) # (Phase cutout is shifted as listed above)
# Calculate kernel (single pixel phase):
# [M_0] = A/m --> This is the magnetization, not the magnetic moment (A/m * m³ = Am²)!
# [PHI_0 / µ_0] = Tm² / Tm/A = Am
# [b_0] = [M_0] * [µ_0] = A/m * N/A² = N/Am = T
# [coeff] = [b_0 * a² / (2*PHI_0)] = T * m² / Tm² = 1 --> without unit (phase)!
coeff = b_0 * a ** 2 / (2 * PHI_0) # Minus is gone because of negative z-direction
v_dim, u_dim = dim_uv
u = np.linspace(-(u_dim - 1), u_dim - 1, num=2 * u_dim - 1)
v = np.linspace(-(v_dim - 1), v_dim - 1, num=2 * v_dim - 1)
uu, vv = np.meshgrid(u, v)
self.u = np.empty(self.dim_kern, dtype=dtype)
self.v = np.empty(self.dim_kern, dtype=dtype)
self.u[...] = coeff * self._get_elementary_phase(geometry, uu, vv, a)
self.v[...] = coeff * -self._get_elementary_phase(geometry, vv, uu, a)
# Include perturbed reference wave:
if prw_vec is not None:
uu += prw_vec[1]
vv += prw_vec[0]
self.u[...] -= coeff * self._get_elementary_phase(geometry, uu, vv, a)
self.v[...] -= coeff * -self._get_elementary_phase(geometry, vv, uu, a)
# Calculate Fourier trafo of kernel components:
self.u_fft = fft.rfftn(self.u, self.dim_pad)
self.v_fft = fft.rfftn(self.v, self.dim_pad)
self._log.debug('Created ' + str(self))
def __repr__(self):
self._log.debug('Calling __repr__')
return '%s(a=%r, dim_uv=%r, b_0=%r, prw_vec=%r, geometry=%r)' % \
(self.__class__, self.a, self.dim_uv, self.b_0, self.prw_vec, self.geometry)
def __str__(self):
self._log.debug('Calling __str__')
return 'Kernel(a=%s, dim_uv=%s, b_0=%s, prw_vec=%s, geometry=%s)' % \
(self.a, self.dim_uv, self.b_0, self.prw_vec, self.geometry)
def _get_elementary_phase(self, geometry, n, m, a):
self._log.debug('Calling _get_elementary_phase')
if geometry == 'disc':
in_or_out = ~ np.logical_and(n == 0, m == 0)
return m / (n ** 2 + m ** 2 + 1E-30) * in_or_out
elif geometry == 'slab':
def _F_a(n, m):
A = np.log(a ** 2 * (n ** 2 + m ** 2))
B = np.arctan(n / m)
return n * A - 2 * n + 2 * m * B
return 0.5 * (_F_a(n - 0.5, m - 0.5) - _F_a(n + 0.5, m - 0.5) -
_F_a(n - 0.5, m + 0.5) + _F_a(n + 0.5, m + 0.5))
def print_info(self):
"""Print information about the kernel.
Returns
-------
None
"""
self._log.debug('Calling log_info')
print('Shape of the FOV :', self.dim_uv)
print('Shape of the Kernel :', self.dim_kern)
print('Zero-padded shape :', self.dim_pad)
print('Shape of the FFT :', self.dim_fft)
print('Slice for the phase :', self.slice_phase)
print('Slice for the magn. :', self.slice_mag)
print('Saturation Induction:', self.b_0)
print('Grid spacing : {} nm'.format(self.a))
print('Geometry :', self.geometry)
print('PRW vector : {} T'.format(self.prw_vec))
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""Subpackage containing functionality for creating magnetic distributions."""
from . import shapes
from . import examples
from .magcreator import *
__all__ = ['shapes', 'examples']
__all__.extend(magcreator.__all__)
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""Provide simple examples for magnetic distributions."""
import logging
import numpy as np
import random as rnd
from . import magcreator as mc
from . import shapes
from ..fielddata import VectorData
__all__ = ['pyramid_logo', 'singularity', 'homog_pixel', 'homog_slab', 'homog_disc',
'homog_sphere', 'homog_filament', 'homog_alternating_filament',
'homog_array_sphere_disc_slab', 'homog_random_pixels', 'homog_random_slabs',
'vortex_slab', 'vortex_disc', 'vortex_alternating_discs', 'vortex_sphere',
'vortex_horseshoe', 'smooth_vortex_disc', 'source_disc',
'core_shell_disc', 'core_shell_sphere']
_log = logging.getLogger(__name__)
def pyramid_logo(a=1., dim=(1, 256, 256), phi=np.pi / 2, theta=np.pi / 2):
"""Create pyramid logo."""
_log.debug('Calling pyramid_logo')
mag_shape = np.zeros(dim)
x = range(dim[2])
y = range(dim[1])
xx, yy = np.meshgrid(x, y)
bottom = (yy >= 0.25 * dim[1])
left = (yy <= 0.75 / 0.5 * dim[1] / dim[2] * xx)
right = np.fliplr(left)
mag_shape[0, ...] = np.logical_and(np.logical_and(left, right), bottom)
return VectorData(a, mc.create_mag_dist_homog(mag_shape, phi, theta))
def singularity(a=1., dim=(8, 8, 8), center=None):
"""Create magnetic singularity."""
_log.debug('Calling singularity')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
x = np.linspace(-center[2], dim[2] - 1 - center[2], dim[2]) + 0.5 # pixel center!
y = np.linspace(-center[1], dim[1] - 1 - center[1], dim[1]) + 0.5 # pixel center!
z = np.linspace(-center[0], dim[0] - 1 - center[0], dim[0]) + 0.5 # pixel center!
yy, zz, xx = np.meshgrid(x, y, z) # What's up with this strange order???
magnitude = np.array((xx, yy, zz)).astype(float)
magnitude /= np.sqrt((magnitude ** 2 + 1E-30).sum(axis=0)) # Normalise!
return VectorData(a, magnitude)
def homog_pixel(a=1., dim=(1, 9, 9), pixel=None, phi=np.pi/4, theta=np.pi/2):
"""Create single magnetised slab."""
_log.debug('Calling homog_pixel')
if pixel is None:
pixel = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
mag_shape = shapes.pixel(dim, pixel)
return VectorData(a, mc.create_mag_dist_homog(mag_shape, phi, theta))
def homog_slab(a=1., dim=(32, 32, 32), center=None, width=None, phi=np.pi/4, theta=np.pi/4):
"""Create homogeneous slab magnetisation distribution."""
_log.debug('Calling homog_slab')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if width is None:
width = (np.max((dim[0] // 8, 1)), np.max((dim[1] // 2, 1)), np.max((dim[2] // 4, 1)))
mag_shape = shapes.slab(dim, center, width)
return VectorData(a, mc.create_mag_dist_homog(mag_shape, phi, theta))
def homog_disc(a=1., dim=(32, 32, 32), center=None, radius=None, height=None,
phi=np.pi / 4, theta=np.pi / 4, axis='z'):
"""Create homogeneous disc magnetisation distribution."""
_log.debug('Calling homog_disc')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if radius is None:
radius = dim[2] // 4
if height is None:
height = np.max((dim[0] // 2, 1))
mag_shape = shapes.disc(dim, center, radius, height, axis)
return VectorData(a, mc.create_mag_dist_homog(mag_shape, phi, theta))
def homog_sphere(a=1., dim=(32, 32, 32), center=None, radius=None, phi=np.pi/4, theta=np.pi/4):
"""Create homogeneous sphere magnetisation distribution."""
_log.debug('Calling homog_sphere')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if radius is None:
radius = dim[2] // 4
mag_shape = shapes.sphere(dim, center, radius)
return VectorData(a, mc.create_mag_dist_homog(mag_shape, phi, theta))
def homog_filament(a=1., dim=(1, 21, 21), pos=None, phi=np.pi / 2, theta=np.pi/2):
"""Create magnetisation distribution of a single magnetised filaments."""
_log.debug('Calling homog_filament')
if pos is None:
pos = (0, dim[1] // 2)
mag_shape = shapes.filament(dim, pos)
return VectorData(a, mc.create_mag_dist_homog(mag_shape, phi, theta))
def homog_alternating_filament(a=1., dim=(1, 21, 21), spacing=5, phi=np.pi/2, theta=np.pi/2):
"""Create magnetisation distribution of alternating filaments."""
_log.debug('Calling homog_alternating_filament')
count = int((dim[1] - 1) / spacing) + 1
magdata = VectorData(a, np.zeros((3,) + dim))
for i in range(count):
pos = i * spacing
mag_shape = shapes.filament(dim, (0, pos))
magdata += VectorData(a, mc.create_mag_dist_homog(mag_shape, phi, theta))
phi *= -1 # Switch the angle
return magdata
def homog_array_sphere_disc_slab(a=1., dim=(64, 128, 128), center_sp=(32, 96, 64), radius_sp=24,
center_di=(32, 32, 96), radius_di=24, height_di=24,
center_sl=(32, 32, 32), width_sl=(48, 48, 48)):
"""Create array of several magnetisation distribution (sphere, disc and slab)."""
_log.debug('Calling homog_array_sphere_disc_slab')
mag_shape_sphere = shapes.sphere(dim, center_sp, radius_sp)
mag_shape_disc = shapes.disc(dim, center_di, radius_di, height_di)
mag_shape_slab = shapes.slab(dim, center_sl, width_sl)
magdata = VectorData(a, mc.create_mag_dist_homog(mag_shape_sphere, np.pi))
magdata += VectorData(a, mc.create_mag_dist_homog(mag_shape_disc, np.pi / 2))
magdata += VectorData(a, mc.create_mag_dist_homog(mag_shape_slab, np.pi / 4))
return magdata
def homog_random_pixels(a=1., dim=(1, 64, 64), count=10, rnd_seed=24):
"""Create random magnetised pixels."""
_log.debug('Calling homog_random_pixels')
rnd.seed(rnd_seed)
magdata = VectorData(a, np.zeros((3,) + dim))
for i in range(count):
pixel = (rnd.randrange(dim[0]), rnd.randrange(dim[1]), rnd.randrange(dim[2]))
mag_shape = shapes.pixel(dim, pixel)
phi = 2 * np.pi * rnd.random()
magdata += VectorData(a, mc.create_mag_dist_homog(mag_shape, phi))
return magdata
def homog_random_slabs(a=1., dim=(1, 64, 64), count=10, width_max=5, rnd_seed=2):
"""Create random magnetised slabs."""
_log.debug('Create homog_random_slabs')
rnd.seed(rnd_seed)
magdata = VectorData(a, np.zeros((3,) + dim))
for i in range(count):
width = (1, rnd.randint(1, width_max), rnd.randint(1, width_max))
center = (rnd.randrange(int(width[0] / 2), dim[0] - int(width[0] / 2)),
rnd.randrange(int(width[1] / 2), dim[1] - int(width[1] / 2)),
rnd.randrange(int(width[2] / 2), dim[2] - int(width[2] / 2)))
mag_shape = shapes.slab(dim, center, width)
phi = 2 * np.pi * rnd.random()
magdata += VectorData(a, mc.create_mag_dist_homog(mag_shape, phi))
return magdata
def vortex_slab(a=1., dim=(32, 32, 32), center=None, width=None, axis='z'):
"""Create vortex slab magnetisation distribution."""
_log.debug('Calling vortex_slab')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if width is None:
width = (np.max((dim[0] // 2, 1)), np.max((dim[1] // 2, 1)), np.max((dim[2] // 2, 1)))
mag_shape = shapes.slab(dim, center, width)
magnitude = mc.create_mag_dist_vortex(mag_shape, center, axis)
return VectorData(a, magnitude)
def vortex_disc(a=1., dim=(32, 32, 32), center=None, radius=None, height=None, axis='z'):
"""Create vortex disc magnetisation distribution."""
_log.debug('Calling vortex_disc')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if radius is None:
radius = dim[2] // 4
if height is None:
height = np.max((dim[0] // 2, 1))
mag_shape = shapes.disc(dim, center, radius, height, axis.replace('-', ''))
magnitude = mc.create_mag_dist_vortex(mag_shape, center, axis)
return VectorData(a, magnitude)
def vortex_alternating_discs(a=1., dim=(80, 32, 32), count=8):
"""Create pillar of alternating vortex disc magnetisation distributions."""
_log.debug('Calling vortex_alternating_discs')
segment_height = dim[0] // (count + 2)
magdata = VectorData(a, np.zeros((3,) + dim))
for i in range(count):
axis = 'z' if i % 2 == 0 else '-z'
center = (segment_height * (i + 1 + 0.5), dim[1] // 2, dim[2] // 2)
radius = dim[2] // 4
height = segment_height
mag_shape = shapes.disc(dim, center=center, radius=radius, height=height)
mag_amp = mc.create_mag_dist_vortex(mag_shape=mag_shape, center=center, axis=axis)
magdata += VectorData(1, mag_amp)
return magdata
def vortex_sphere(a=1., dim=(32, 32, 32), center=None, radius=None, axis='z'):
"""Create vortex sphere magnetisation distribution."""
_log.debug('Calling vortex_sphere')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if radius is None:
radius = dim[2] // 4
mag_shape = shapes.sphere(dim, center, radius)
magnitude = mc.create_mag_dist_vortex(mag_shape, center, axis)
return VectorData(a, magnitude)
def vortex_horseshoe(a=1., dim=(16, 64, 64), center=None, radius_core=None,
radius_shell=None, height=None):
"""Create magnetic horseshoe vortex magnetisation distribution."""
_log.debug('Calling vortex_horseshoe')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if radius_core is None:
radius_core = dim[1] // 8
if radius_shell is None:
radius_shell = dim[1] // 4
if height is None:
height = np.max((dim[0] // 2, 1))
mag_shape_core = shapes.disc(dim, center, radius_core, height)
mag_shape_outer = shapes.disc(dim, center, radius_shell, height)
mag_shape_horseshoe = np.logical_xor(mag_shape_outer, mag_shape_core)
mag_shape_horseshoe[:, dim[1] // 2:, :] = False
return VectorData(a, mc.create_mag_dist_vortex(mag_shape_horseshoe))
def smooth_vortex_disc(a=1., dim=(32, 32, 32), center=None, radius=None, height=None, axis='z',
core_r=0, vortex_radius=None):
"""Create smooth vortex disc magnetisation distribution."""
_log.debug('Calling vortex_disc')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if radius is None:
radius = dim[2] // 4
if height is None:
height = np.max((dim[0] // 2, 1))
if vortex_radius is None:
vortex_radius = radius // 2
mag_shape = shapes.disc(dim, center, radius, height, axis.replace('-', '')) # same for +/-
magnitude = mc.create_mag_dist_smooth_vortex(mag_shape, center, vortex_radius, core_r, axis)
return VectorData(a, magnitude)
def source_disc(a=1., dim=(32, 32, 32), center=None, radius=None, height=None, axis='z'):
"""Create source disc magnetisation distribution."""
_log.debug('Calling vortex_disc')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if radius is None:
radius = dim[2] // 4
if height is None:
height = np.max((dim[0] // 2, 1))
mag_shape = shapes.disc(dim, center, radius, height, axis)
magnitude = mc.create_mag_dist_source(mag_shape, center, axis)
return VectorData(a, magnitude)
def core_shell_disc(a=1., dim=(32, 32, 32), center=None, radius_core=None,
radius_shell=None, height=None, rate_core_to_shell=0.75):
"""Create magnetic core shell disc magnetisation distribution."""
_log.debug('Calling core_shell_disc')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if radius_core is None:
radius_core = dim[1] // 8
if radius_shell is None:
radius_shell = dim[1] // 4
if height is None:
height = np.max((dim[0] // 2, 1))
mag_shape_core = shapes.disc(dim, center, radius_core, height)
mag_shape_outer = shapes.disc(dim, center, radius_shell, height)
mag_shape_shell = np.logical_xor(mag_shape_outer, mag_shape_core)
magdata = VectorData(a, mc.create_mag_dist_vortex(mag_shape_shell)) * rate_core_to_shell
magdata += VectorData(a, mc.create_mag_dist_homog(mag_shape_core, phi=0, theta=0))
return magdata
def core_shell_sphere(a=1., dim=(32, 32, 32), center=None, radius_core=None,
radius_shell=None, rate_core_to_shell=0.75):
"""Create magnetic core shell sphere magnetisation distribution."""
_log.debug('Calling core_shell_sphere')
if center is None:
center = (dim[0] // 2, dim[1] // 2, dim[2] // 2)
if radius_core is None:
radius_core = dim[1] // 8
if radius_shell is None:
radius_shell = dim[1] // 4
mag_shape_sphere = shapes.sphere(dim, center, radius_shell)
mag_shape_disc = shapes.disc(dim, center, radius_core, height=dim[0])
mag_shape_core = np.logical_and(mag_shape_sphere, mag_shape_disc)
mag_shape_shell = np.logical_and(mag_shape_sphere, np.logical_not(mag_shape_core))
magdata = VectorData(a, mc.create_mag_dist_vortex(mag_shape_shell)) * rate_core_to_shell
magdata += VectorData(a, mc.create_mag_dist_homog(mag_shape_core, phi=0, theta=0))
return magdata
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""Create simple magnetic distributions.
The :mod:`~.magcreator` module is responsible for the creation of simple distributions of
magnetic moments. In the :mod:`~.shapes` module, you can find several general shapes for the
3-dimensional volume that should be magnetized (e.g. slabs, spheres, discs or single pixels).
These shapes are then used as input for the creating functions (or you could specify the
volume yourself as a 3-dimensional boolean matrix or a matrix with values in the range from 0 to 1,
which modifies the magnetization amplitude). The specified volume can either be magnetized
homogeneously with the :func:`~.create_mag_dist_homog` function by specifying the magnetization
direction, or in a vortex state with the :func:`~.create_mag_dist_vortex` by specifying the vortex
center.
"""
import logging
import numpy as np
from numpy import pi
__all__ = ['create_mag_dist_homog', 'create_mag_dist_vortex', 'create_mag_dist_source',
'create_mag_dist_smooth_vortex']
_log = logging.getLogger(__name__)
def create_mag_dist_homog(mag_shape, phi, theta=pi / 2):
"""Create a 3-dimensional magnetic distribution of a homogeneously magnetized object.
Parameters
----------
mag_shape : :class:`~numpy.ndarray` (N=3)
The magnetic shapes (see :mod:`.~shapes` for examples).
phi : float
The azimuthal angle, describing the direction of the magnetized object.
theta : float, optional
The polar angle, describing the direction of the magnetized object.
The default is pi/2, which means, the z-component is zero.
Returns
-------
amplitude : tuple (N=3) of :class:`~numpy.ndarray` (N=3)
The magnetic distribution as a tuple of the 3 components in
`x`-, `y`- and `z`-direction on the 3-dimensional grid.
"""
_log.debug('Calling create_mag_dist_homog')
dim = np.shape(mag_shape)
assert len(dim) == 3, 'Magnetic shapes must describe 3-dimensional distributions!'
z_mag = np.ones(dim) * np.cos(theta) * mag_shape
y_mag = np.ones(dim) * np.sin(theta) * np.sin(phi) * mag_shape
x_mag = np.ones(dim) * np.sin(theta) * np.cos(phi) * mag_shape
return np.array([x_mag, y_mag, z_mag])
def create_mag_dist_vortex(mag_shape, center=None, axis='z'):
"""Create a 3-dimensional magnetic distribution of a homogeneous magnetized object.
Parameters
----------
mag_shape : :class:`~numpy.ndarray` (N=3)
The magnetic shapes (see :mod:`.~shapes`` for examples).
center : tuple (N=2 or N=3), optional
The vortex center, given in 2D `(v, u)` or 3D `(z, y, x)`, where the perpendicular axis
is is discarded. Is set to the center of the field of view if not specified. The vortex
center has to be between two pixels.
axis : {'z', '-z', 'y', '-y', 'x', '-x'}, optional
The orientation of the vortex axis. The default is 'z'. Negative values invert the vortex
orientation.
Returns
-------
amplitude : tuple (N=3) of :class:`~numpy.ndarray` (N=3)
The magnetic distribution as a tuple of the 3 components in
`x`-, `y`- and `z`-direction on the 3-dimensional grid.
Notes
-----
To avoid singularities, the vortex center should lie between the pixel centers (which
reside at coordinates with _.5 at the end), i.e. integer values should be used as center
coordinates (e.g. coordinate 1 lies between the first and the second pixel).
"""
_log.debug('Calling create_mag_dist_vortex')
dim = mag_shape.shape
assert len(dim) == 3, 'Magnetic shapes must describe 3-dimensional distributions!'
assert center is None or len(center) in {2, 3}, \
'Vortex center has to be defined in 3D or 2D or not at all!'
if center is None:
center = (dim[1] / 2, dim[2] / 2)
sign = -1 if '-' in axis else 1
if axis in ('z', '-z'):
if len(center) == 3: # if a 3D-center is given, just take the x and y components
center = (center[1], center[2])
u = np.linspace(-center[1], dim[2] - 1 - center[1], dim[2]) + 0.5 # pixel center!
v = np.linspace(-center[0], dim[1] - 1 - center[0], dim[1]) + 0.5 # pixel center!
uu, vv = np.meshgrid(u, v)
phi = np.expand_dims(np.arctan2(vv, uu) - pi / 2, axis=0)
phi = np.tile(phi, (dim[0], 1, 1))
z_mag = np.zeros(dim)
y_mag = np.ones(dim) * -np.sin(phi) * mag_shape * sign
x_mag = np.ones(dim) * -np.cos(phi) * mag_shape * sign
elif axis in ('y', '-y'):
if len(center) == 3: # if a 3D-center is given, just take the x and z components
center = (center[0], center[2])
u = np.linspace(-center[1], dim[2] - 1 - center[1], dim[2]) + 0.5 # pixel center!
v = np.linspace(-center[0], dim[0] - 1 - center[0], dim[0]) + 0.5 # pixel center!
uu, vv = np.meshgrid(u, v)
phi = np.expand_dims(np.arctan2(vv, uu) - pi / 2, axis=1)
phi = np.tile(phi, (1, dim[1], 1))
z_mag = np.ones(dim) * np.sin(phi) * mag_shape * sign
y_mag = np.zeros(dim)
x_mag = np.ones(dim) * np.cos(phi) * mag_shape * sign
elif axis in ('x', '-x'):
if len(center) == 3: # if a 3D-center is given, just take the z and y components
center = (center[0], center[1])
u = np.linspace(-center[1], dim[1] - 1 - center[1], dim[1]) + 0.5 # pixel center!
v = np.linspace(-center[0], dim[0] - 1 - center[0], dim[0]) + 0.5 # pixel center!
uu, vv = np.meshgrid(u, v)
phi = np.expand_dims(np.arctan2(vv, uu) - pi / 2, axis=2)
phi = np.tile(phi, (1, 1, dim[2]))
z_mag = np.ones(dim) * -np.sin(phi) * mag_shape * sign
y_mag = np.ones(dim) * -np.cos(phi) * mag_shape * sign
x_mag = np.zeros(dim)
else:
raise ValueError('{} is not a valid argument (use x, -x, y, -y, z or -z)'.format(axis))
return np.array([x_mag, y_mag, z_mag])
def create_mag_dist_smooth_vortex(mag_shape, center=None, vort_r=None, core_r=0, axis='z'):
"""Create a 3-dimensional magnetic distribution of a homogeneous magnetized object.
Parameters
----------
mag_shape : :class:`~numpy.ndarray` (N=3)
The magnetic shapes (see :mod:`.~shapes`` for examples).
center : tuple (N=2 or N=3), optional
The vortex center, given in 2D `(v, u)` or 3D `(z, y, x)`, where the perpendicular axis
is is discarded. Is set to the center of the field of view if not specified. The vortex
center has to be between two pixels.
axis : {'z', '-z', 'y', '-y', 'x', '-x'}, optional
The orientation of the vortex axis. The default is 'z'. Negative values invert the vortex
orientation.
Returns
-------
amplitude : tuple (N=3) of :class:`~numpy.ndarray` (N=3)
The magnetic distribution as a tuple of the 3 components in
`x`-, `y`- and `z`-direction on the 3-dimensional grid.
Notes
-----
To avoid singularities, the vortex center should lie between the pixel centers (which
reside at coordinates with _.5 at the end), i.e. integer values should be used as center
coordinates (e.g. coordinate 1 lies between the first and the second pixel).
"""
def core(r):
"""Function describing the smooth vortex core."""
r_clip = np.clip(r - core_r, a_min=0, a_max=None)
return 1 - 2/np.pi * np.arcsin(np.tanh(np.pi*r_clip/vort_r))
_log.debug('Calling create_mag_dist_vortex')
dim = mag_shape.shape
assert len(dim) == 3, 'Magnetic shapes must describe 3-dimensional distributions!'
assert center is None or len(center) in {2, 3}, \
'Vortex center has to be defined in 3D or 2D or not at all!'
if center is None:
center = (dim[1] / 2, dim[2] / 2)
sign = -1 if '-' in axis else 1
if axis in ('z', '-z'):
if len(center) == 3: # if a 3D-center is given, just take the x and y components
center = (center[1], center[2])
u = np.linspace(-center[1], dim[2] - 1 - center[1], dim[2]) + 0.5 # pixel center!
v = np.linspace(-center[0], dim[1] - 1 - center[0], dim[1]) + 0.5 # pixel center!
uu, vv = np.meshgrid(u, v)
rr = np.hypot(uu, vv)[None, :, :]
phi = np.expand_dims(np.arctan2(vv, uu) - pi / 2, axis=0)
phi = np.tile(phi, (dim[0], 1, 1))
z_mag = np.ones(dim) * mag_shape * sign * core(rr)
y_mag = np.ones(dim) * -np.sin(phi) * mag_shape * sign * np.sqrt(1 - core(rr))
x_mag = np.ones(dim) * -np.cos(phi) * mag_shape * sign * np.sqrt(1 - core(rr))
elif axis in ('y', '-y'):
if len(center) == 3: # if a 3D-center is given, just take the x and z components
center = (center[0], center[2])
u = np.linspace(-center[1], dim[2] - 1 - center[1], dim[2]) + 0.5 # pixel center!
v = np.linspace(-center[0], dim[0] - 1 - center[0], dim[0]) + 0.5 # pixel center!
uu, vv = np.meshgrid(u, v)
rr = np.hypot(uu, vv)[:, None, :]
phi = np.expand_dims(np.arctan2(vv, uu) - pi / 2, axis=1)
phi = np.tile(phi, (1, dim[1], 1))
z_mag = np.ones(dim) * np.sin(phi) * mag_shape * sign * np.sqrt(1 - core(rr))
y_mag = np.ones(dim) * mag_shape * sign * core(rr)
x_mag = np.ones(dim) * np.cos(phi) * mag_shape * sign * np.sqrt(1 - core(rr))
elif axis in ('x', '-x'):
if len(center) == 3: # if a 3D-center is given, just take the z and y components
center = (center[0], center[1])
u = np.linspace(-center[1], dim[1] - 1 - center[1], dim[1]) + 0.5 # pixel center!
v = np.linspace(-center[0], dim[0] - 1 - center[0], dim[0]) + 0.5 # pixel center!
uu, vv = np.meshgrid(u, v)
rr = np.hypot(uu, vv)[:, :, None]
phi = np.expand_dims(np.arctan2(vv, uu) - pi / 2, axis=2)
phi = np.tile(phi, (1, 1, dim[2]))
z_mag = np.ones(dim) * -np.sin(phi) * mag_shape * sign * np.sqrt(1 - core(rr))
y_mag = np.ones(dim) * -np.cos(phi) * mag_shape * sign * np.sqrt(1 - core(rr))
x_mag = np.ones(dim) * mag_shape * sign * core(rr)
else:
raise ValueError('{} is not a valid argument (use x, -x, y, -y, z or -z)'.format(axis))
return np.array([x_mag, y_mag, z_mag])
def create_mag_dist_source(mag_shape, center=None, axis='z'):
"""Create a 3-dimensional magnetic distribution of a homogeneous magnetized object.
Parameters
----------
mag_shape : :class:`~numpy.ndarray` (N=3)
The magnetic shapes (see :mod:`.~shapes`` for examples).
center : tuple (N=2 or N=3), optional
The source center, given in 2D `(v, u)` or 3D `(z, y, x)`, where the perpendicular axis
is is discarded. Is set to the center of the field of view if not specified.
The source center has to be between two pixels.
axis : {'z', '-z', 'y', '-y', 'x', '-x'}, optional
The orientation of the source axis. The default is 'z'. Negative values invert the source
to a sink.
Returns
-------
amplitude : tuple (N=3) of :class:`~numpy.ndarray` (N=3)
The magnetic distribution as a tuple of the 3 components in
`x`-, `y`- and `z`-direction on the 3-dimensional grid.
Notes
-----
To avoid singularities, the source center should lie between the pixel centers (which
reside at coordinates with _.5 at the end), i.e. integer values should be used as center
coordinates (e.g. coordinate 1 lies between the first and the second pixel).
"""
_log.debug('Calling create_mag_dist_vortex')
dim = mag_shape.shape
assert len(dim) == 3, 'Magnetic shapes must describe 3-dimensional distributions!'
assert center is None or len(center) in {2, 3}, \
'Vortex center has to be defined in 3D or 2D or not at all!'
if center is None:
center = (dim[1] / 2, dim[2] / 2)
sign = -1 if '-' in axis else 1
if axis in ('z', '-z'):
if len(center) == 3: # if a 3D-center is given, just take the x and y components
center = (center[1], center[2])
u = np.linspace(-center[1], dim[2] - 1 - center[1], dim[2]) + 0.5 # pixel center!
v = np.linspace(-center[0], dim[1] - 1 - center[0], dim[1]) + 0.5 # pixel center!
uu, vv = np.meshgrid(u, v)
phi = np.expand_dims(np.arctan2(vv, uu) - pi / 2, axis=0)
phi = np.tile(phi, (dim[0], 1, 1))
z_mag = np.zeros(dim)
y_mag = np.ones(dim) * np.cos(phi) * mag_shape * sign
x_mag = np.ones(dim) * -np.sin(phi) * mag_shape * sign
elif axis in ('y', '-y'):
if len(center) == 3: # if a 3D-center is given, just take the x and z components
center = (center[0], center[2])
u = np.linspace(-center[1], dim[2] - 1 - center[1], dim[2]) + 0.5 # pixel center!
v = np.linspace(-center[0], dim[0] - 1 - center[0], dim[0]) + 0.5 # pixel center!
uu, vv = np.meshgrid(u, v)
phi = np.expand_dims(np.arctan2(vv, uu) - pi / 2, axis=1)
phi = np.tile(phi, (1, dim[1], 1))
z_mag = np.ones(dim) * np.cos(phi) * mag_shape * sign
y_mag = np.zeros(dim)
x_mag = np.ones(dim) * -np.sin(phi) * mag_shape * sign
elif axis in ('x', '-x'):
if len(center) == 3: # if a 3D-center is given, just take the z and y components
center = (center[0], center[1])
u = np.linspace(-center[1], dim[1] - 1 - center[1], dim[1]) + 0.5 # pixel center!
v = np.linspace(-center[0], dim[0] - 1 - center[0], dim[0]) + 0.5 # pixel center!
uu, vv = np.meshgrid(u, v)
phi = np.expand_dims(np.arctan2(vv, uu) - pi / 2, axis=2)
phi = np.tile(phi, (1, 1, dim[2]))
z_mag = np.ones(dim) * np.cos(phi) * mag_shape * sign
y_mag = np.ones(dim) * -np.sin(phi) * mag_shape * sign
x_mag = np.zeros(dim)
else:
raise ValueError('{} is not a valid argument (use x, -x, y, -y, z or -z)'.format(axis))
return np.array([x_mag, y_mag, z_mag])
# -*- coding: utf-8 -*-
# Copyright 2016 by Forschungszentrum Juelich GmbH
# Author: J. Caron
#
"""Provide simple shapes.
This module is a collection of some methods that return a 3-dimensional
matrix that represents the field volume and consists of boolean values.
This matrix is used in the functions of the :mod:`~.magcreator` module to create
:class:`~pyramid.fielddata.VectorData` objects which store the field information.
"""
import logging
import numpy as np
__all__ = ['slab', 'disc', 'ellipse', 'ellipsoid', 'sphere', 'filament', 'pixel']
_log = logging.getLogger(__name__)
def slab(dim, center, width):
"""Create the shape of a slab.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
center : tuple (N=3)
The center of the slab in pixel coordinates `(z, y, x)`.
width : tuple (N=3)
The width of the slab in pixel coordinates `(z, y, x)`.
Returns
-------
shape : :class:`~numpy.ndarray` (N=3)
The shape as a 3D-array.
"""
_log.debug('Calling slab')
assert np.shape(dim) == (3,), 'Parameter dim has to be a tuple of length 3!'
assert np.shape(center) == (3,), 'Parameter center has to be a tuple of length 3!'
assert np.shape(width) == (3,), 'Parameter width has to be a tuple of length 3!'
zz, yy, xx = np.indices(dim) + 0.5
xx_shape = np.where(abs(xx - center[2]) <= width[2] / 2, True, False)
yy_shape = np.where(abs(yy - center[1]) <= width[1] / 2, True, False)
zz_shape = np.where(abs(zz - center[0]) <= width[0] / 2, True, False)
return np.logical_and(np.logical_and(xx_shape, yy_shape), zz_shape)
def disc(dim, center, radius, height, axis='z'):
"""Create the shape of a cylindrical disc in x-, y-, or z-direction.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
center : tuple (N=3)
The center of the disc in pixel coordinates `(z, y, x)`.
radius : float
The radius of the disc in pixel coordinates.
height : float
The height of the disc in pixel coordinates.
axis : {'z', 'y', 'x'}, optional
The orientation of the disc axis. The default is 'z'.
Returns
-------
shape : :class:`~numpy.ndarray` (N=3)
The shape as a 3D-array.
"""
_log.debug('Calling disc')
assert np.shape(dim) == (3,), 'Parameter dim has to be a a tuple of length 3!'
assert np.shape(center) == (3,), 'Parameter center has to be a a tuple of length 3!'
assert radius > 0 and np.shape(radius) == (), 'Radius has to be a positive scalar value!'
assert height > 0 and np.shape(height) == (), 'Height has to be a positive scalar value!'
assert axis in {'z', 'y', 'x'}, 'Axis has to be x, y or z (as a string)!'
zz, yy, xx = np.indices(dim) + 0.5
xx -= center[2]
yy -= center[1]
zz -= center[0]
if axis == 'z':
uu, vv, ww = xx, yy, zz
elif axis == 'y':
uu, vv, ww = zz, xx, yy
elif axis == 'x':
uu, vv, ww = yy, zz, xx
else:
raise ValueError('{} is not a valid argument (use x, y or z)'.format(axis))
return np.logical_and(np.where(np.hypot(uu, vv) <= radius, True, False),
np.where(abs(ww) <= height / 2, True, False))
def ellipse(dim, center, width, height, axis='z'):
"""Create the shape of an elliptical cylinder in x-, y-, or z-direction.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
center : tuple (N=3)
The center of the ellipse in pixel coordinates `(z, y, x)`.
width : tuple (N=2)
Length of the two axes of the ellipse.
height : float
The height of the ellipse in pixel coordinates.
axis : {'z', 'y', 'x'}, optional
The orientation of the ellipse axis. The default is 'z'.
Returns
-------
shape : :class:`~numpy.ndarray` (N=3)
The shape as a 3D-array.
"""
_log.debug('Calling ellipse')
assert np.shape(dim) == (3,), 'Parameter dim has to be a a tuple of length 3!'
assert np.shape(center) == (3,), 'Parameter center has to be a a tuple of length 3!'
assert np.shape(width) == (2,), 'Parameter width has to be a a tuple of length 2!'
assert height > 0 and np.shape(height) == (), 'Height has to be a positive scalar value!'
assert axis in {'z', 'y', 'x'}, 'Axis has to be x, y or z (as a string)!'
zz, yy, xx = np.indices(dim) + 0.5
xx -= center[2]
yy -= center[1]
zz -= center[0]
if axis == 'z':
uu, vv, ww = xx, yy, zz
elif axis == 'y':
uu, vv, ww = xx, zz, yy
elif axis == 'x':
uu, vv, ww = yy, zz, xx
else:
raise ValueError('{} is not a valid argument (use x, y or z)'.format(axis))
distance = np.hypot(uu / (width[1] / 2), vv / (width[0] / 2))
return np.logical_and(np.where(distance <= 1, True, False),
np.where(abs(ww) <= height / 2, True, False))
def ellipsoid(dim, center, width):
"""Create the shape of an ellipsoid.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
center : tuple (N=3)
The center of the ellipsoid in pixel coordinates `(z, y, x)`.
width : tuple (N=3)
The width of the ellipsoid `(z, y, x)`.
Returns
-------
shape : :class:`~numpy.ndarray` (N=3)
The shape as a 3D-array.
"""
_log.debug('Calling ellipsoid')
assert np.shape(dim) == (3,), 'Parameter dim has to be a a tuple of length 3!'
assert np.shape(center) == (3,), 'Parameter center has to be a a tuple of length 3!'
assert np.shape(width) == (3,), 'Parameter width has to be a a tuple of length 3!'
zz, yy, xx = np.indices(dim) + 0.5
distance = np.sqrt(((xx - center[2]) / (width[2] / 2)) ** 2
+ ((yy - center[1]) / (width[1] / 2)) ** 2
+ ((zz - center[0]) / (width[0] / 2)) ** 2)
return np.where(distance <= 1, True, False)
def sphere(dim, center, radius):
"""Create the shape of a sphere.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
center : tuple (N=3)
The center of the sphere in pixel coordinates `(z, y, x)`.
radius : float
The radius of the sphere in pixel coordinates.
Returns
-------
shape : :class:`~numpy.ndarray` (N=3)
The shape as a 3D-array.
"""
_log.debug('Calling sphere')
assert np.shape(dim) == (3,), 'Parameter dim has to be a a tuple of length 3!'
assert np.shape(center) == (3,), 'Parameter center has to be a a tuple of length 3!'
assert radius > 0 and np.shape(radius) == (), 'Radius has to be a positive scalar value!'
zz, yy, xx = np.indices(dim) + 0.5
distance = np.sqrt((xx - center[2]) ** 2 + (yy - center[1]) ** 2 + (zz - center[0]) ** 2)
return np.where(distance <= radius, True, False)
def filament(dim, pos, axis='y'):
"""Create the shape of a filament.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
pos : tuple (N=2)
The position of the filament in pixel coordinates `(coord1, coord2)`.
`coord1` and `coord2` stand for the two axes, which are perpendicular to `axis`. For
the default case (`axis = y`), it is `(coord1, coord2) = (z, x)`.
axis : {'y', 'x', 'z'}, optional
The orientation of the filament axis. The default is 'y'.
Returns
-------
shape : :class:`~numpy.ndarray` (N=3)
The shape as a 3D-array.
"""
_log.debug('Calling filament')
assert np.shape(dim) == (3,), 'Parameter dim has to be a tuple of length 3!'
assert np.shape(pos) == (2,), 'Parameter pos has to be a tuple of length 2!'
assert axis in {'z', 'y', 'x'}, 'Axis has to be x, y or z (as a string)!'
shape = np.zeros(dim, dtype=bool)
if axis == 'z':
shape[:, pos[0], pos[1]] = True
elif axis == 'y':
shape[pos[0], :, pos[1]] = True
elif axis == 'x':
shape[pos[0], pos[1], :] = True
return shape
def pixel(dim, pixel):
"""Create the shape of a single pixel.
Parameters
----------
dim : tuple (N=3)
The dimensions of the grid `(z, y, x)`.
pixel : tuple (N=3)
The coordinates of the pixel `(z, y, x)`.
Returns
-------
shape : :class:`~numpy.ndarray` (N=3)
The shape as a 3D-array.
"""
_log.debug('Calling pixel')
assert np.shape(dim) == (3,), 'Parameter dim has to be a tuple of length 3!'
assert np.shape(pixel) == (3,), 'Parameter pixel has to be a tuple of length 3!'
shape = np.zeros(dim, dtype=bool)
shape[pixel] = True
return shape