Skip to content
Snippets Groups Projects
Commit 4b9d4ba7 authored by Jan Caron's avatar Jan Caron
Browse files

Cleanup!

Deleted print statements, superfluous log, TODOs, added a few comments.
parent 69332eb1
No related branches found
No related tags found
No related merge requests found
......@@ -68,7 +68,7 @@ pypi:
script:
- pip install twine
- python setup.py sdist bdist_wheel
- twine upload -u __token__ -p $PYPI_ACCESS_TOKEN dist/*
- twine upload -u __token__ -p $PYPI_ACCESS_TOKEN dist/* # -u user -p password upload_source
rules: # similar to only/except, but newer!
# Job is executed if branch is master AND if a tag is building which matches the regular expression!
# ONLY executes if commit to master has a tag, ^:start, $:end, valid example: "1.2.3", no "-dev" at the end!
......
......@@ -13,6 +13,7 @@ Welcome to EMPyRe's documentation!
:caption: Contents:
fields
vis
Indices and tables
......
The vis visualization submodule
===============================
vis docu here!
\ No newline at end of file
......@@ -27,7 +27,7 @@ dependencies:
- matplotlib=3.1
- Pillow=6.1
- cmocean=2.0
#- qt=5.9 # TODO: only needed for mayavi?
#- qt=5.9 # TODO: only needed for mayavi? which version?
- mayavi=4.6 # TODO: Get rid of!
# Testing:
- pytest=5.0
......
......@@ -7,12 +7,12 @@
[metadata]
name = empyre
version = 0.1.0.dev0
version = 0.0.0
author = Jan Caron
author-email = j.caron@fz-juelich.de
description = Electron Microscopy Python Reconstruction
long-description = file: README.rst
url = https://jugit.fz-juelich.de/empyre/empyre
url = https://iffgit.fz-juelich.de/empyre/empyre
license = GPLv3
classifiers =
Development Status :: 3 - alpha
......
......@@ -38,12 +38,3 @@ for style_path in style_files:
# Run setup (reads metadata & options from setup.cfg):
print(R'running setup.py')
setup()
# TODO: HOW TO GET JUTIL???
# TODO: Also create conda recipe!
# TODO: Handle extras via metapackage (depends on pyramid-base that holds the core code and extras?)
# TODO: https://docs.conda.io/projects/conda-build/en/latest/user-guide/tutorials/build-pkgs.html
# TODO: MATPLOTLIB STYLE INSTALL: https://stackoverflow.com/questions/35851201/how-can-i-share-matplotlib-style
......@@ -16,14 +16,6 @@ from scipy.ndimage import interpolation
__all__ = ['Field']
# TODO: Forward numpys ndim parameter here?
# TODO: Search for .data ! Not usable anymore!
# TODO: overwrite property ndim?
# TODO: get rid of some _log and print stuff!
class Field(NDArrayOperatorsMixin):
"""Container class for storing multidimensional scalar or vector fields.
......@@ -168,26 +160,9 @@ class Field(NDArrayOperatorsMixin):
outputs = kwargs.pop('out', ())
outputs = kwargs.pop('out', (None,)*ufunc.nout) # Defaults to tuple of None (currently: nout=1 all the time)
outputs_arr = tuple([np.asarray(out) if isinstance(out, Field) else out for out in outputs])
nl = '\n '
str_inputs = [str(inp) for inp in inputs]
self._log.debug(f' ufunc: {ufunc}')
self._log.debug(f' method: {method}')
self._log.debug(f' inputs: {nl.join(str_inputs)}')
self._log.debug(f' # inp.: {len(inputs)}')
self._log.debug(f' outputs: {str(outputs)}')
self._log.debug(f' out_arr: {str(outputs_arr)}')
self._log.debug(f' kwargs: {kwargs}')
self._log.debug(f' nin: {ufunc.nin}')
self._log.debug(f' nout: {ufunc.nout}')
self._log.debug(f' self: {str(self)}')
# Cannot handle items that have __array_ufunc__ (other than our own).
print(f'inputs+outputs: {inputs+outputs}')
for item in inputs + outputs:
print(f'type(item): {type(item)}')
print(f'hasattr(item, __array_ufunc__): {hasattr(item, "__array_ufunc__")}')
print(f'isinstance(item, Field): {isinstance(item, Field)}')
if hasattr(item, '__array_ufunc__') and not isinstance(item, Field): # Something else with __array_ufunc__:
print(f'type(item).__array_ufunc__: {type(item).__array_ufunc__}')
if type(item).__array_ufunc__ is not np.ndarray.__array_ufunc__: # Can't handle other overrides:
return NotImplemented
# TODO: BIGGEST NOTE HERE: Delegate work to ndarray.__array_ufunc__!
......@@ -201,13 +176,12 @@ class Field(NDArrayOperatorsMixin):
# TODO: for security, newaxis is not allowed (most other indexing works though), because scale would be unknown!
# 1 input (has to be a Field, otherwise we wouldn't be here):
if len(inputs) == 1:
self._log.debug('-inputs: 1 ------')
self._log.debug(f'__array_ufunc__ inputs: {len(inputs)}')
field = inputs[0]
scale_new = field.scale
vector_new = field.vector
# Preprocess axis keyword if it exists:
axis = kwargs.get('axis', False) # Default must not be None, because None is a possible setting!
self._log.debug(f' axis: {axis}')
full_reduction = False
if axis is not False:
ax_full = tuple(range(len(field.dim))) # All axes (minus a possible component axis for vector Fields)!
......@@ -220,59 +194,39 @@ class Field(NDArrayOperatorsMixin):
if ax_full_wc[-1] in axis: # User explicitely wants component reduction (can only be true for vector):
vector_new = False # Force scalar field!
scale_new = tuple([s for i, s in enumerate(field.scale) if i not in axis]) # Drop axis from scale!
self._log.debug(f' kwargs: {kwargs}')
self._log.debug(f' full_reduction: {full_reduction}')
inputs_arr = np.asarray(field) # Convert inputs that are Fields to ndarrays to avoid recursion!
data_new = getattr(ufunc, method)(inputs_arr, out=outputs_arr, **kwargs)
if full_reduction: # Premature return because the result is no longer a Field:
return data_new
# More than 1 input (at least one has to be a Field, otherwise we wouldn't be here):
elif len(inputs) > 1:
self._log.debug(f'-inputs: {len(inputs)} ------')
is_field = [isinstance(inp, Field) for inp in inputs]
self._log.debug(f'is_field: {is_field}')
is_vector = [getattr(inp, 'vector', False) for inp in inputs]
self._log.debug(f'is_vector: {is_vector}')
# Determine scale:
if np.sum(is_field) > 1: # More than one input is a Field objects:
scales = [inp.scale for i, inp in enumerate(inputs) if is_field[i]] # Only takes scales of Field obj.!
self._log.debug(f'scales: {scales}')
scale_new = scales[0]
err_msg = f'Scales of all Field objects must match! Given scales: {scales}!'
assert all(scale == scale_new for scale in scales), err_msg
else: # Only one input is a field, pick the scale of that one:
scale_new = inputs[np.argmax(is_field)].scale # argmax returns the index of first True!
self._log.debug(f'scale_new: {scale_new}')
# Determine vector:
vector_new = True if np.any(is_vector) else False # Output is vector field if any input is a vector field!
self._log.debug(f'vector_new: {vector_new}')
if np.sum(is_vector) > 1: # More than one input is a vector Field objects:
ncomps = [inp.ncomp for i, inp in enumerate(inputs) if is_vector[i]] # Only takes ncomp of v.-Fields!
self._log.debug(f'ncomps: {ncomps}')
err_msg = f'# of components of all Field objects must match! Given ncomps: {ncomps}!'
assert all(ncomp == ncomps[0] for ncomp in ncomps), err_msg
# Append new axis at the end of non vector objects to broadcast to components:
if np.any(is_vector):
inputs = list(inputs)
self._log.debug(f'len(input_arr): {len(inputs)}')
for i, inp in enumerate(inputs):
self._log.debug(f'i: {i}')
self._log.debug(f'is_vector[i]: {is_vector[i]}')
self._log.debug(f'isinstance(inp, Number): {isinstance(inp, Number)}')
if not is_vector[i] and not isinstance(inp, Number): # Numbers work for broadcasting anyway:
inp_arr = np.asarray(inp) # For broadcasting, try to cast as ndarray!
self._log.debug(f'inp_arr.shape: {inp_arr.shape}')
self._log.debug(f'inp.shape), len(scale_new): {len(np.asarray(inp).shape)}, {len(scale_new)}')
if len(np.asarray(inp).shape) == len(scale_new): # No. of dimensions w/o comp., have to match!
self._log.debug(f'inputs[i]: {inputs[i]}')
inputs[i] = np.asarray(inputs[i])[..., np.newaxis]
inputs[i] = np.asarray(inputs[i])[..., np.newaxis] # Broadcasting, try to cast as ndarray!
inputs = tuple(inputs)
# Convert inputs that are Fields to ndarrays to avoid recursion and determine data_new:
inputs_arr = tuple([np.asarray(inp) if isinstance(inp, Field) else inp for inp in inputs])
self._log.debug(f'kwargs: {kwargs}')
data_new = getattr(ufunc, method)(*inputs_arr, out=outputs_arr, **kwargs)
self._log.debug(f'data_new.shape: {data_new.shape}')
# TODO: Test clip (also ufunc?? think not...) seems to work, but what about others?
# Return results:
result = Field(data_new, scale_new, vector_new)
return result
......@@ -373,15 +327,11 @@ class Field(NDArrayOperatorsMixin):
The padded `Field` object.
"""
print(f'pad_width: {pad_width}')
if isinstance(pad_width, Number): # Paddding is the same for each dimension (make sure it is a tuple)!
pad_width = (pad_width,) * len(self.dim)
print(f'pad_width: {pad_width}')
pad_width = [(p, p) if isinstance(p, Number) else p for p in pad_width]
print(f'pad_width: {pad_width}')
if self.vector: # Append zeros to padding, so component axis stays as is:
pad_width = pad_width + [(0, 0)]
print(f'pad_width: {pad_width}')
data_new = np.pad(self.data, pad_width, mode, **kwargs)
return Field(data_new, self.scale, self.vector)
......@@ -411,16 +361,12 @@ class Field(NDArrayOperatorsMixin):
assert all([n_i >= 0 for n_i in n]), 'All entries of n must be positive integers!'
# Pad if necessary (use padded 'field' from here on), formula for clarity: (n - dim % n) % n
pad_width = [(0, (n[i] - self.dim[i] % n[i]) % n[i]) for i in range(len(self.dim))]
print(f'bin: pad_width: {pad_width}')
field = self.pad(pad_width, mode='edge')
# Create new shape used for binning (mean over every second axis will be taken):
bin_shape = list(np.ravel([(field.dim[i]//n[i], n[i]) for i in range(len(field.dim))]))
print(f'bin_shape: {bin_shape}')
mean_axes = np.arange(1, 2*len(field.dim), 2) # every 2nd axis!
print(f'mean_axes: {mean_axes}')
if self.vector: # Vector field:
bin_shape += [field.ncomp] # Append component axis (they stay unchanged)
print(f'bin_shape: {bin_shape}')
# Bin data and scale accordingly:
data_new = field.data.reshape(bin_shape).mean(axis=tuple(mean_axes))
scale_new = tuple([field.scale[i] * n[i] for i in range(len(field.dim))])
......@@ -573,6 +519,7 @@ class Field(NDArrayOperatorsMixin):
if self.vector: # Vector fields need to scale components according to masked amplitude
mask_vec = np.logical_and(mask, amp <= vmax) # Only vmax is important!
data = amp / np.where(mask_vec, 1, amp) # TODO: needs testing!
# TODO: Test np.clip(field) (also ufunc?? think not...) seems to work, but what about others?
else: # For scalar fields, just delegate to the numpy function:
data = np.clip(self.data, vmin, vmax)
return Field(data, self.scale, self.vector)
......@@ -17,11 +17,6 @@ __all__ = ['create_vector_homog', 'create_vector_vortex', 'create_vector_skyrmio
_log = logging.getLogger(__name__)
# TODO: TEST!!!
# TODO: ALL Docstrings!
# TODO: LOGGING!
def create_vector_homog(dim, phi=0, theta=None, scale=1):
"""Field subclass implementing a homogeneous vector field with 3 components in 2 or 3 dimensions.
......@@ -72,6 +67,7 @@ def create_vector_vortex(dim, center=None, core_r=0, oop_r=None, axis=0, scale=1
only 2D. To invert chirality, multiply the resulting Field object by -1. # TODO: WRONG?
"""
_log.debug('Calling create_vector_vortex')
assert len(dim) in (2, 3), 'Vortex can only be build in 2 or 3 dimensions!'
# Find indices of the vortex plane axes:
idx_uv = [0, 1, 2]
......@@ -176,6 +172,7 @@ def create_vector_skyrmion(dim, center=None, phi_0=0, skyrm_d=None, wall_d=None,
theta /= np.abs(theta).max() / np.pi
return theta
_log.debug('Calling create_vector_skyrmion')
assert len(dim) in (2, 3), 'Skyrmion can only be build in 2 or 3 dimensions!'
# Find indices of the skyrmion plane axes:
idx_uv = [0, 1, 2]
......@@ -202,11 +199,10 @@ def create_vector_skyrmion(dim, center=None, phi_0=0, skyrm_d=None, wall_d=None,
rr = np.hypot(coords_uv[0], coords_uv[1])
phi = np.arctan2(coords_uv[0], coords_uv[1]) - phi_0
theta = _theta(rr)
w_comp = np.cos(theta)
v_comp = np.sin(theta) * np.sin(phi)
u_comp = np.sin(theta) * np.cos(phi)
# Expansion to 3D if necessary and component shuffling:
if len(dim) == 3: # Expand to 3D:
w_comp = np.expand_dims(w_comp, axis=axis)
v_comp = np.expand_dims(v_comp, axis=axis)
......@@ -263,25 +259,17 @@ def create_vector_singularity(dim, center=None, scale=1):
coordinates (e.g. coordinate 1 lies between the first and the second pixel).
""" # TODO: What does negating do here? Senke / Quelle (YES IT DOES!)? swell and sink? ISSUE! INVITE PEOPLE!!!
_log.debug('Calling create_vector_singularity')
# Find default values:
if center is None:
center = tuple([d / 2 for d in dim])
assert len(dim) == len(center), f"Length of dim ({len(dim)}) and center ({len(center)}) don't match!"
# Setup coordinates, shape is (c, z, y, x), if 3D, or (c, y, x), if 2D (c: components):
coords = np.indices(dim) + 0.5 # 0.5 to get to pixel/voxel center!
print(f'coords.shape: {coords.shape}')
print(f'center: {center}')
print(f'dim: {dim}')
center = np.asarray(center, dtype=float)
bc_shape = (len(dim,),) + (1,)*len(dim) # Shape for broadcasting, (1,1,1,3) for 3D, (1,1,2) for 2D!
print(f'bc_shape: {bc_shape}')
coords = coords - center.reshape(bc_shape) # Shift by center (append 1s for broadcasting)!
rr = np.sqrt(np.sum([coords[i]**2 for i in range(len(dim))], axis=0))
print(f'rr.max(): {rr.max()}')
print(f'coords.shape: {coords.shape}')
print(f'rr.shape: {rr.shape}')
data = coords / (rr + 1E-30) # Normalise amplitude (keep direction), rr (z,y,x) is broadcasted to data (c,z,y,x)!
print(f'np.moveaxis(data, 0, -1).shape: {np.moveaxis(data, 0, -1).shape}')
print(f'data: {data}')
data = data.T # (c,z,y,x) -> (x,y,z,c)
return Field(data=data, scale=scale, vector=True)
......@@ -65,10 +65,6 @@ class Colormap3D(colors.Colormap, metaclass=abc.ABCMeta):
self._log.debug('Calling rgb_from_vector')
x, y, z = np.asarray(vector)
R = np.sqrt(x ** 2 + y ** 2 + z ** 2)
print(f'----------RGB')
print(f'vector: {vector}')
print(f'R: {R}')
print(f'vector.shape: {vector.shape}')
R_max = vmax if vmax is not None else R.max() + 1E-30
# FIRST color dimension: HUE (1D ring/angular direction)
phi = np.asarray(np.arctan2(y, x))
......@@ -502,8 +498,7 @@ def interpolate_color(fraction, start, end):
cmaps = {'cubehelix_standard': ColormapCubehelix(),
'cubehelix_reverse': ColormapCubehelix(reverse=True),
'cubehelix_circular': ColormapCubehelix(start=1, rot=1,
minLight=0.5, maxLight=0.5, sat=2),
'cubehelix_circular': ColormapCubehelix(start=1, rot=1, minLight=0.5, maxLight=0.5, sat=2),
'perception_circular': ColormapPerception(),
'hls_circular': ColormapHLS(),
'classic_circular': ColormapClassic(),
......
......@@ -127,7 +127,7 @@ def contour(field, axis=None, **kwargs):
# Create coordinates (respecting the field scale, +0.5: pixel center!):
vv, uu = (np.indices(squeezed_field.dim) + 0.5) * np.asarray(squeezed_field.scale)[:, None, None]
# Set kwargs defaults without overriding possible user input:
kwargs.setdefault('levels', [0.5]) # TODO: Check all kwargs.setdefault if shiftable to style sheet (only universal)
kwargs.setdefault('levels', [0.5])
kwargs.setdefault('colors', 'k')
kwargs.setdefault('linestyles', 'dotted')
kwargs.setdefault('linewidths', 2)
......@@ -361,7 +361,7 @@ def quiver(field, axis=None, color_angles=False, cmap=None, n_bin='auto', bin_wi
elif squeezed_indices[0] == 2: # Slice of the zy-plane with x squeezed:
u_comp = y_comp
v_comp = z_comp
# Set specific defaults for quiver kwargs: # TODO: Check if they can go into empyre style sheet:
# Set specific defaults for quiver kwargs:
kwargs.setdefault('edgecolor', colors.cmaps['transparent_black'](amplitude).reshape(-1, 4))
kwargs.setdefault('scale', 1/np.max(squeezed_field.scale))
kwargs.setdefault('width', np.max(squeezed_field.scale))
......
......@@ -5,6 +5,7 @@
"""This module provides helper functions to the vis module."""
import logging
from numbers import Number
import numpy as np
......@@ -14,6 +15,7 @@ from ..fields.field import Field
__all__ = ['new', 'savefig', 'calc_figsize']
_log = logging.getLogger(__name__)
def new(nrows=1, ncols=1, mode='image', figsize=None, textwidth=None, width_scale=1, aspect=None, **kwargs):
......@@ -64,6 +66,7 @@ def new(nrows=1, ncols=1, mode='image', figsize=None, textwidth=None, width_scal
additional kwargs are passed to `~matplotlib.pyplot.subplots`.
"""
_log.debug('Calling new')
assert mode in ('image', 'plot'), "mode has to be 'image', or 'plot'!"
if figsize is None and textwidth is not None: # Only then is all this necessary:
if aspect is None:
......@@ -95,7 +98,9 @@ def savefig(fname, **kwargs):
-----
Uses the 'empyre-save' stylesheet (installed together with EMPyRe to control the saving behaviour. Any kwargs are
passed to :func:`~matplotlib.pyplot.savefig`.
"""
_log.debug('Calling savefig')
with plt.style.context('empyre-save'):
plt.savefig(fname, **kwargs)
......@@ -122,8 +127,10 @@ def calc_figsize(textwidth, width_scale=1, aspect=1):
Notes
-----
Based on code from Florian Winkler.
Based on snippet from Florian Winkler.
"""
_log.debug('Calling calc_figsize')
GOLDEN_RATIO = (1 + np.sqrt(5)) / 2 # Aesthetic ratio!
INCHES_PER_POINT = 1.0 / 72.27 # Convert points to inch, LaTeX constant, apparently...
textwidth_in = textwidth * INCHES_PER_POINT # Width of the text in inches
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment