1212 lines
56 KiB
Python
1212 lines
56 KiB
Python
import numpy as np
|
|
class Field3d:
|
|
def __init__(self,data,origin,spacing,deep=False):
|
|
assert len(origin)==3, "'origin' must be of length 3"
|
|
assert len(spacing)==3, "'spacing' must be of length 3"
|
|
assert isinstance(data,np.ndarray), "'data' must be numpy.ndarray."
|
|
if deep:
|
|
self.data = data.copy()
|
|
else:
|
|
self.data = data
|
|
self.origin = tuple([float(x) for x in origin])
|
|
self.spacing = tuple([float(x) for x in spacing])
|
|
self.eps_collapse = 1e-8
|
|
self._dim = None
|
|
return
|
|
|
|
def __str__(self):
|
|
str = 'Field3d with\n'
|
|
str+= ' dimension: {}, {}, {}\n'.format(*self.dim())
|
|
str+= ' origin: {:G}, {:G}, {:G}\n'.format(*self.origin)
|
|
str+= ' spacing: {:G}, {:G}, {:G}\n'.format(*self.spacing)
|
|
str+= ' datatype: {}'.format(self.dtype())
|
|
return str
|
|
|
|
def __add__(self,other):
|
|
if isinstance(other,Field3d):
|
|
assert self.has_same_grid(other), "Grid mismatch."
|
|
return Field3d(self.data+other.data,self.origin,self.spacing)
|
|
else:
|
|
return Field3d(self.data+other,self.origin,self.spacing)
|
|
|
|
def __sub__(self,other):
|
|
if isinstance(other,Field3d):
|
|
assert self.has_same_grid(other), "Grid mismatch."
|
|
return Field3d(self.data-other.data,self.origin,self.spacing)
|
|
else:
|
|
return Field3d(self.data-other,self.origin,self.spacing)
|
|
|
|
def __mul__(self,other):
|
|
if isinstance(other,Field3d):
|
|
assert self.has_same_grid(other), "Grid mismatch."
|
|
return Field3d(self.data*other.data,self.origin,self.spacing)
|
|
else:
|
|
return Field3d(self.data*other,self.origin,self.spacing)
|
|
|
|
def __truediv__(self,other):
|
|
if isinstance(other,Field3d):
|
|
assert self.has_same_grid(other), "Grid mismatch."
|
|
return Field3d(self.data/other.data,self.origin,self.spacing)
|
|
else:
|
|
return Field3d(self.data/other,self.origin,self.spacing)
|
|
|
|
def __radd__(self,other):
|
|
return Field3d(other+self.data,self.origin,self.spacing)
|
|
|
|
def __rmul__(self,other):
|
|
return Field3d(other*self.data,self.origin,self.spacing)
|
|
|
|
def __pow__(self,other):
|
|
return Field3d(self.data**other,self.origin,self.spacing)
|
|
|
|
def __iadd__(self,other):
|
|
if isinstance(other,Field3d):
|
|
assert self.has_same_grid(other), "Grid mismatch."
|
|
self.data += other.data
|
|
else:
|
|
self.data += other
|
|
return self
|
|
|
|
def __isub__(self,other):
|
|
if isinstance(other,Field3d):
|
|
assert self.has_same_grid(other), "Grid mismatch."
|
|
self.data -= other.data
|
|
else:
|
|
self.data -= other
|
|
return self
|
|
|
|
def __imul__(self,other):
|
|
if isinstance(other,Field3d):
|
|
assert self.has_same_grid(other), "Grid mismatch."
|
|
self.data *= other.data
|
|
else:
|
|
self.data *= other
|
|
return self
|
|
|
|
def __itruediv__(self,other):
|
|
if isinstance(other,Field3d):
|
|
assert self.has_same_grid(other), "Grid mismatch."
|
|
self.data /= other.data
|
|
else:
|
|
self.data /= other
|
|
return self
|
|
|
|
# TBD: this should return another Field3d object
|
|
# def __getitem__(self,val):
|
|
# assert isinstance(val,tuple) and len(val)==3, "Field3d must be indexed by [ii,jj,kk]."
|
|
# sl = []
|
|
# for x in val:
|
|
# if isinstance(x,int):
|
|
# lo,hi = x,x+1
|
|
# hi = hi if hi!=0 else None
|
|
# sl.append(slice(lo,hi))
|
|
# elif isinstance(x,slice):
|
|
# sl.append(x)
|
|
# else:
|
|
# raise TypeError("Trajectories can only be sliced by slice objects or integers.")
|
|
# return self.data[sl[0],sl[1],sl[2]]
|
|
|
|
@classmethod
|
|
def from_chunk(cls,chunk,gridg):
|
|
'''Initialize Field3d from chunk data and global grid.'''
|
|
xg,yg,zg = gridg
|
|
ib,jb,kb = chunk['ibeg']-1, chunk['jbeg']-1, chunk['kbeg']-1
|
|
dx,dy,dz = xg[1]-xg[0], yg[1]-yg[0], zg[1]-zg[0]
|
|
xo,yo,zo = xg[ib]-chunk['ighost']*dx, yg[jb]-chunk['ighost']*dy, zg[kb]-chunk['ighost']*dz
|
|
nx,ny,nz = chunk['data'].shape
|
|
assert (chunk['nxl']+2*chunk['ighost'])==nx, "Invalid chunk data: nxl != chunk['data'].shape[0]"
|
|
assert (chunk['nyl']+2*chunk['ighost'])==ny, "Invalid chunk data: nyl != chunk['data'].shape[1]"
|
|
assert (chunk['nzl']+2*chunk['ighost'])==nz, "Invalid chunk data: nzl != chunk['data'].shape[2]"
|
|
return cls(chunk['data'],origin=(xo,yo,zo),spacing=(dx,dy,dz))
|
|
|
|
@classmethod
|
|
def from_file(cls,file,name='Field3d'):
|
|
import h5py
|
|
is_open = isinstance(file,(h5py.File,h5py.Group))
|
|
f = file if is_open else h5py.File(file,'r')
|
|
g = f[name]
|
|
origin = tuple(g['origin'])
|
|
spacing = tuple(g['spacing'])
|
|
data = g['data'][:]
|
|
if not is_open: f.close()
|
|
return cls(data,origin,spacing)
|
|
|
|
@classmethod
|
|
def allocate(cls,dim,origin,spacing,fill=None,dtype=np.float64):
|
|
'''Allocates an empty field, or a field filled with 'fill'.'''
|
|
assert isinstance(dim,(tuple,list,np.ndarray)) and len(dim)==3,\
|
|
"'dim' must be a tuple/list of length 3."
|
|
assert isinstance(origin,(tuple,list,np.ndarray)) and len(origin)==3,\
|
|
"'origin' must be a tuple/list of length 3."
|
|
assert isinstance(spacing,(tuple,list,np.ndarray)) and len(spacing)==3,\
|
|
"'spacing' must be a tuple/list of length 3."
|
|
if fill is None:
|
|
data = np.empty(dim,dtype=dtype)
|
|
else:
|
|
data = np.full(dim,fill,dtype=dtype)
|
|
return cls(data,origin,spacing)
|
|
|
|
@classmethod
|
|
def pseudo_field(cls,dim,origin,spacing):
|
|
'''Creates a Field3d instance without allocating any memory.'''
|
|
assert isinstance(dim,(tuple,list,np.ndarray)) and len(dim)==3,\
|
|
"'dim' must be a tuple/list of length 3."
|
|
assert isinstance(origin,(tuple,list,np.ndarray)) and len(origin)==3,\
|
|
"'origin' must be a tuple/list of length 3."
|
|
assert isinstance(spacing,(tuple,list,np.ndarray)) and len(spacing)==3,\
|
|
"'spacing' must be a tuple/list of length 3."
|
|
data = np.empty((0,0,0))
|
|
r = cls(data,origin,spacing)
|
|
r._dim = dim
|
|
return r
|
|
|
|
def save(self,file,name='Field3d',truncate=False):
|
|
import h5py
|
|
is_open = isinstance(file,(h5py.File,h5py.Group))
|
|
flag = 'w' if truncate else 'a'
|
|
f = file if is_open else h5py.File(file,flag)
|
|
g = f.create_group(name)
|
|
g.create_dataset('origin',data=self.origin)
|
|
g.create_dataset('spacing',data=self.spacing)
|
|
g.create_dataset('data',data=self.data)
|
|
if not is_open: f.close()
|
|
return
|
|
|
|
def copy(self):
|
|
return Field3d(self.data.copy(),self.origin,self.spacing)
|
|
|
|
def pseudo_copy(self):
|
|
return self.pseudo_field(self.dim(),self.origin,self.spacing)
|
|
|
|
def insert_subfield(self,subfield):
|
|
assert all([abs(subfield.spacing[ii]-self.spacing[ii])<self.eps_collapse
|
|
for ii in range(3)]), "spacing differs. Got {}, have {}".format(subfield.spacing,self.spacing)
|
|
assert all([self.distance_to_nearest_gridpoint(subfield.origin[ii],axis=ii)<self.eps_collapse
|
|
for ii in range(3)]), "subfield has shifted origin."
|
|
assert all(self.is_within_bounds(subfield.origin,axis=None)), "subfield origin is out-of-bounds."
|
|
assert all(self.is_within_bounds(subfield.endpoint(),axis=None)), "subfield endpoint is out-of-bounds."
|
|
#ib,jb,kb = [int(round((subfield.origin[ii]-self.origin[ii])/self.spacing[ii])) for ii in range(3)]
|
|
ib,jb,kb = self.nearest_gridpoint(subfield.origin,axis=None)
|
|
nx,ny,nz = subfield.dim()
|
|
self.data[ib:ib+nx,jb:jb+ny,kb:kb+nz] = subfield.data[:,:,:]
|
|
return
|
|
|
|
def extract_subfield(self,idx_origin,dim,stride=(1,1,1),deep=False,strict_bounds=True):
|
|
if strict_bounds:
|
|
assert all(idx_origin[ii]>=0 and idx_origin[ii]<self.dim(axis=ii) for ii in range(3)),\
|
|
"'origin' is out-of-bounds."
|
|
assert all(idx_origin[ii]+stride[ii]*(dim[ii]-1)<self.dim(axis=ii) for ii in range(3)),\
|
|
"endpoint is out-of-bounds."
|
|
else:
|
|
for ii in range(3):
|
|
if idx_origin[ii]<0:
|
|
dim[ii] += idx_origin[ii]
|
|
idx_origin[ii] = 0
|
|
if idx_origin[ii]+stride[ii]*(dim[ii]-1)>=self.dim(axis=ii):
|
|
dim[ii] = (self.dim(axis=ii)-idx_origin[ii])//stride[ii]
|
|
if dim[ii]<=0:
|
|
return None
|
|
sl = tuple(slice(idx_origin[ii],
|
|
idx_origin[ii]+stride[ii]*dim[ii],
|
|
stride[ii]) for ii in range(3))
|
|
origin = self.coordinate(idx_origin)
|
|
spacing = tuple(self.spacing[ii]*stride[ii] for ii in range(3))
|
|
data = self.data[sl]
|
|
return Field3d(data,origin,spacing,deep=deep)
|
|
|
|
def clip(self,position,axis,invert=False,deep=False,is_relative=False):
|
|
'''Extracts a subfield by clipping with a plane with normal pointing
|
|
direction specified by 'axis'.'''
|
|
if is_relative:
|
|
coord = self.origin[axis] + coord*self.dim(axis=axis)*self.spacing[axis]
|
|
idx_clip = self.nearest_gridpoint(coord,axis=axis,lower=True)
|
|
sl = 3*[slice(None)]
|
|
origin_ = self.origin
|
|
spacing_ = self.spacing
|
|
if invert:
|
|
sl[axis] = slice(idx_clip+1,None)
|
|
origin_[axis] = self.origin[axis]+idx_clip*self.spacing[axis]
|
|
else:
|
|
sl[axis] = slice(0,idx_clip+1)
|
|
data_ = self.data[tuple(sl)]
|
|
return Field3d(data_,origin_,spacing_,deep=deep)
|
|
|
|
def clip_box(self,bounds,deep=False,is_relative=False):
|
|
'''Extracts a subfield by clipping with a box.'''
|
|
if is_relative:
|
|
bounds = tuple(self.origin[ii//2] + bounds[ii]*self.dim(ii//2)*self.spacing[ii//2] for ii in range(6))
|
|
idx_lo = self.nearest_gridpoint((bounds[0],bounds[2],bounds[4]),lower=True)
|
|
idx_hi = self.nearest_gridpoint((bounds[1],bounds[3],bounds[5]),lower=True)
|
|
idx_lo = tuple(0 if idx_lo[axis]<0 else idx_lo[axis] for axis in range(3))
|
|
idx_hi = tuple(0 if idx_hi[axis]<0 else idx_hi[axis] for axis in range(3))
|
|
sl_ = tuple([slice(idx_lo[0],idx_hi[0]+1),
|
|
slice(idx_lo[1],idx_hi[1]+1),
|
|
slice(idx_lo[2],idx_hi[2]+1)])
|
|
origin_ = tuple(self.origin[axis]+idx_lo[axis]*self.spacing[axis] for axis in range(3))
|
|
spacing_ = self.spacing
|
|
data_ = self.data[sl_]
|
|
return Field3d(data_,origin_,spacing_,deep=deep)
|
|
|
|
def threshold(self,val,invert=False):
|
|
'''Returns a binary array indicating which grid points are above,
|
|
or in case of invert=True below, a given threshold.'''
|
|
if invert:
|
|
return self.data<val
|
|
else:
|
|
return self.data>=val
|
|
|
|
def coordinate(self,idx,axis=None):
|
|
if axis is None:
|
|
assert len(idx)==3, "If 'axis' is None, 'idx' must be a tuple/list of length 3."
|
|
return tuple(self.coordinate(idx[ii],axis=ii) for ii in range(3))
|
|
assert axis<3, "'axis' must be one of 0,1,2."
|
|
assert idx<self.dim(axis=axis), "'idx' is out-of-bounds."
|
|
return self.origin[axis]+idx*self.spacing[axis]
|
|
|
|
def grid(self,axis=None):
|
|
if axis is None:
|
|
return tuple(self.grid(axis=ii) for ii in range(3))
|
|
assert axis<3, "'axis' must be one of 0,1,2."
|
|
return self.origin[axis]+np.arange(0,self.dim(axis=axis))*self.spacing[axis]
|
|
def x(self): return self.grid(axis=0)
|
|
def y(self): return self.grid(axis=1)
|
|
def z(self): return self.grid(axis=2)
|
|
|
|
def nearest_gridpoint(self,coord,axis=None,lower=False):
|
|
if axis is None:
|
|
assert len(coord)==3, "If 'axis' is None, 'coord' must be a tuple/list of length 3."
|
|
return tuple(self.nearest_gridpoint(coord[ii],axis=ii,lower=lower) for ii in range(3))
|
|
assert axis<3, "'axis' must be one of 0,1,2."
|
|
if lower:
|
|
return np.floor((coord+self.eps_collapse-self.origin[axis])/self.spacing[axis]).astype('int')
|
|
else:
|
|
return np.round((coord-self.origin[axis])/self.spacing[axis]).astype('int')
|
|
|
|
def distance_to_nearest_gridpoint(self,coord,axis=None,lower=False):
|
|
if axis is None:
|
|
assert len(coord)==3, "If 'axis' is None, 'coord' must be a tuple/list of length 3."
|
|
return tuple(self.distance_to_nearest_gridpoint(coord[ii],axis=ii,lower=lower) for ii in range(3))
|
|
assert axis<3, "'axis' must be one of 0,1,2."
|
|
val = np.remainder(coord+self.eps_collapse-self.origin[axis],self.spacing[axis])-self.eps_collapse
|
|
if not lower and val>0.5*self.spacing[axis]:
|
|
val = self.spacing[axis]-val
|
|
return val
|
|
|
|
def is_within_bounds(self,coord,axis=None):
|
|
if axis is None:
|
|
assert len(coord)==3, "If 'axis' is None, 'coord' must be a tuple/list of length 3."
|
|
return tuple(self.is_within_bounds(coord[ii],axis=ii) for ii in range(3))
|
|
assert axis<3, "'axis' must be one of 0,1,2."
|
|
idx_nearest = self.nearest_gridpoint(coord,axis=axis)
|
|
if idx_nearest>0 and idx_nearest<self.dim(axis=axis)-1:
|
|
return True
|
|
dist_nearest = self.distance_to_nearest_gridpoint(coord,axis=axis)
|
|
if (idx_nearest==0 or idx_nearest==self.dim(axis=axis)-1) and abs(dist_nearest)<self.eps_collapse:
|
|
return True
|
|
else:
|
|
return False
|
|
|
|
def has_same_grid(self,other):
|
|
if not self.has_same_origin(other): return False
|
|
if not self.has_same_spacing(other): return False
|
|
if not self.has_same_origin(other): return False
|
|
return True
|
|
|
|
def has_same_origin(self,other,axis=None):
|
|
if axis is None:
|
|
return all([self.has_same_origin(other,axis=ii) for ii in range(3)])
|
|
origin = other.origin if isinstance(other,Field3d) else other
|
|
return abs(origin[axis]-self.origin[axis])<self.eps_collapse
|
|
|
|
def has_same_spacing(self,other,axis=None):
|
|
if axis is None:
|
|
return all([self.has_same_spacing(other,axis=ii) for ii in range(3)])
|
|
spacing = other.spacing if isinstance(other,Field3d) else other
|
|
return abs(spacing[axis]-self.spacing[axis])<self.eps_collapse
|
|
|
|
def has_same_dim(self,other,axis=None):
|
|
if axis is None:
|
|
return all([self.has_same_dim(other,axis=ii) for ii in range(3)])
|
|
dim = other.dim(axis=axis) if isinstance(other,Field3d) else other
|
|
return dim==self.dim(axis=axis)
|
|
|
|
def dim(self,axis=None):
|
|
if axis is None:
|
|
return tuple(self.dim(axis=ii) for ii in range(3))
|
|
assert axis<3, "'axis' must be one of 0,1,2."
|
|
if self._dim is not None:
|
|
return self._dim[axis]
|
|
else:
|
|
return self.data.shape[axis]
|
|
|
|
def endpoint(self,axis=None):
|
|
if axis is None:
|
|
return tuple(self.endpoint(axis=ii) for ii in range(3))
|
|
assert axis<3, "'axis' must be one of 0,1,2."
|
|
return self.origin[axis]+(self.dim(axis=axis)-1)*self.spacing[axis]
|
|
|
|
def dtype(self):
|
|
return self.data.dtype
|
|
|
|
def convert_dtype(self,dtype):
|
|
self.data = self.data.astype(dtype,copy=False)
|
|
return
|
|
|
|
def derivative(self,axis,only_keep_interior=False,shift_origin='before'):
|
|
'''Computes derivative wrt to direction 'axis' with 2nd order finite differences
|
|
centered between the origin grid points'''
|
|
from scipy import ndimage
|
|
assert axis<3, "'axis' must be one of 0,1,2."
|
|
origin = list(self.origin)
|
|
assert shift_origin in ('before','after'), "'shift_origin' must be one of {'before','after'}."
|
|
if shift_origin=='left':
|
|
corr_orig = 0
|
|
origin[axis] -= 0.5*self.spacing[axis]
|
|
else:
|
|
corr_orig = -1
|
|
origin[axis] += 0.5*self.spacing[axis]
|
|
data = ndimage.correlate1d(self.data,np.array([-1.,1.])/self.spacing[axis],axis=axis,
|
|
mode='constant',cval=np.nan,origin=corr_orig)
|
|
if only_keep_interior:
|
|
sl = 3*[slice(None)]
|
|
if shift_origin=='before':
|
|
sl[axis] = slice(-1,None)
|
|
origin[axis] += self.spacing[axis]
|
|
else:
|
|
sl[axis] = slice(0,-1)
|
|
data = data[tuple(sl)]
|
|
return Field3d(data,origin,self.spacing)
|
|
|
|
def gradient(self,axis,preserve_origin=False,only_keep_interior=False,add_border='before'):
|
|
return [self.derivative(axis,preserve_origin=preserve_origin,
|
|
only_keep_interior=only_keep_interior,add_border=add_border) for axis in range(0,3)]
|
|
|
|
def laplacian(self,only_keep_interior=False):
|
|
'''Computes the Laplacian of a field.'''
|
|
from scipy import ndimage
|
|
data = ndimage.correlate1d(self.data,np.array([1.,-2.,1.])/self.spacing[0]**2,axis=0,mode='constant',cval=np.nan,origin=0)
|
|
data += ndimage.correlate1d(self.data,np.array([1.,-2.,1.])/self.spacing[1]**2,axis=1,mode='constant',cval=np.nan,origin=0)
|
|
data += ndimage.correlate1d(self.data,np.array([1.,-2.,1.])/self.spacing[2]**2,axis=2,mode='constant',cval=np.nan,origin=0)
|
|
origin = list(self.origin)
|
|
if only_keep_interior:
|
|
data = data[1:-1,1:-1,1:-1]
|
|
for axis in range(3):
|
|
origin[axis] = origin[axis]+self.spacing[axis]
|
|
return Field3d(data,origin,self.spacing)
|
|
|
|
def integral(self,integrate_axis,average=False,ignore_nan=False,return_weights=False,ufunc=None):
|
|
'''Computes the integral or average along a given axis applying the
|
|
function 'ufunc' to each node.'''
|
|
assert isinstance(integrate_axis,(list,tuple,np.ndarray)) and len(integrate_axis)==3,\
|
|
"'integrate_axis' must be a tuple/list of length 3."
|
|
assert all([isinstance(integrate_axis[ii],(bool,int)) for ii in range(3)]),\
|
|
"'integrate_axis' requires bool values."
|
|
assert any(integrate_axis), "'integrate_axis' must contain at least one True."
|
|
axes = []
|
|
weight = 1.0
|
|
for axis in range(3):
|
|
if integrate_axis[axis]:
|
|
axes.append(axis)
|
|
if average:
|
|
weight *= self.dim(axis=axis)
|
|
else:
|
|
weight *= self.spacing[axis]
|
|
axes = tuple(axes)
|
|
if ignore_nan:
|
|
if average:
|
|
weight = np.sum(~np.isnan(self.data),axis=axes,keepdims=True)
|
|
func_sum = np.nansum
|
|
else:
|
|
func_sum = np.sum
|
|
if ufunc is None:
|
|
out = func_sum(self.data,axis=axes,keepdims=True)
|
|
else:
|
|
assert isinstance(ufunc,np.ufunc), "'ufunc' needs to be a numpy ufunc. "\
|
|
"Check out https://numpy.org/doc/stable/reference/ufuncs.html for reference."
|
|
assert ufunc.nin==1, "Only ufunc with single input argument are supported for now."
|
|
out = func_sum(ufunc(self.data),axis=axes,keepdims=True)
|
|
if return_weights:
|
|
return (out,weight)
|
|
else:
|
|
return out/weight
|
|
|
|
def gaussian_filter(self,sigma,truncate=4.0,only_keep_interior=False):
|
|
'''Applies a gaussian filter: sigma is standard deviation for Gaussian kernel for each axis.'''
|
|
from scipy import ndimage
|
|
assert isinstance(sigma,(tuple,list,np.ndarray)) and len(sigma)==3,\
|
|
"'sigma' must be a tuple/list of length 3"
|
|
# Convert sigma from simulation length scales to grid points as required by ndimage
|
|
sigma_img = tuple(sigma[ii]/self.spacing[ii] for ii in range(3))
|
|
data = ndimage.gaussian_filter(self.data,sigma_img,truncate=truncate,mode='constant',cval=np.nan)
|
|
origin = list(self.origin)
|
|
if only_keep_interior:
|
|
r = self.gaussian_filter_radius(sigma,truncate=truncate)
|
|
data = data[r[0]:-r[0],r[1]:-r[1],r[2]:-r[2]]
|
|
for axis in range(3):
|
|
origin[axis] = origin[axis]+r[axis]*self.spacing[axis]
|
|
return Field3d(data,origin,self.spacing)
|
|
|
|
def gaussian_filter_radius(self,sigma,truncate=4.0):
|
|
'''Radius of Gaussian filter. Stencil width is 2*radius+1.'''
|
|
assert isinstance(sigma,(tuple,list,np.ndarray)) and len(sigma)==3,\
|
|
"'sigma' must be a tuple/list of length 3"
|
|
# Convert sigma from simulation length scales to grid points as required by ndimage
|
|
sigma_img = tuple(sigma[ii]/self.spacing[ii] for ii in range(3))
|
|
radius = []
|
|
for ii in range(3):
|
|
radius.append(int(truncate*sigma_img[ii]+0.5))
|
|
return tuple(radius)
|
|
|
|
def shift_origin(self,rel_shift,only_keep_interior=False):
|
|
'''Shifts the origin of a field by multiple of spacing.'''
|
|
from scipy import ndimage
|
|
assert isinstance(rel_shift,(tuple,list,np.ndarray)) and len(rel_shift)==3,\
|
|
"'shift' must be tuple/list with length 3."
|
|
assert all([rel_shift[ii]>=-1.0-self.eps_collapse and rel_shift[ii]<=1.0+self.eps_collapse for ii in range(3)]),\
|
|
"'shift' must be in (-1.0,1.0). {}".format(rel_shift)
|
|
data = self.data.copy()
|
|
origin = list(self.origin)
|
|
sl = 3*[slice(None)]
|
|
for axis in range(3):
|
|
if abs(rel_shift[axis])<self.eps_collapse:
|
|
continue
|
|
elif rel_shift[axis]>0:
|
|
w = rel_shift[axis] if rel_shift[axis]<=1.0 else 1.0
|
|
weights = (1.0-w,w)
|
|
data = ndimage.correlate1d(data,weights,axis=axis,mode='constant',cval=np.nan,origin=-1)
|
|
origin[axis] += w*self.spacing[axis]
|
|
if only_keep_interior:
|
|
sl[axis] = slice(0,-1)
|
|
else:
|
|
w = rel_shift[axis] if rel_shift[axis]>=-1.0 else -1.0
|
|
weights = (-w,1.0+w)
|
|
data = ndimage.correlate1d(data,weights,axis=axis,mode='constant',cval=np.nan,origin=0)
|
|
origin[axis] += w*self.spacing[axis]
|
|
if only_keep_interior:
|
|
sl[axis] = slice(1,None)
|
|
origin[axis] += self.spacing[axis]
|
|
if only_keep_interior:
|
|
data = data[sl]
|
|
return Field3d(data,origin,self.spacing)
|
|
|
|
def relative_shift(self,field):
|
|
'''Compute the relative shift (in terms of spacing) to shift self onto field.'''
|
|
assert self.has_same_spacing(field), "spacing differs."
|
|
rel_shift = [0.0,0.0,0.0]
|
|
for axis in range(3):
|
|
dist = field.origin[axis]-self.origin[axis]
|
|
if abs(dist)>self.eps_collapse:
|
|
rel_shift[axis] = dist/self.spacing[axis]
|
|
return tuple(rel_shift)
|
|
|
|
def change_grid(self,origin,spacing,dim,padding=None,numpad=1):
|
|
assert all([origin[ii]>=self.origin[ii] for ii in range(0,3)]), "New origin is out of bounds."
|
|
endpoint = [origin[ii]+(dim[ii]-1)*spacing[ii] for ii in range(0,3)]
|
|
assert all([endpoint[ii]<=self.endpoint(ii) for ii in range(0,3)]), "New end point is out of bounds."
|
|
# Allocate (possibly padded array)
|
|
origin_pad,dim_pad,sl_out = padding(origin,spacing,dim,padding,numpad)
|
|
data = np.zeros(dim_pad,dtype=self.data.dtype)
|
|
# Trilinear interpolation
|
|
if np.allclose(spacing,self.spacing):
|
|
# spacing is the same: we can construct universal weights for the stencil
|
|
i0,j0,k0 = self.nearest_gridpoint(origin,axis=None,lower=True)
|
|
cx,cy,cz = [self.distance_to_nearest_gridpoint(origin[ii],axis=ii,lower=True)/self.spacing[ii]
|
|
for ii in range(3)]
|
|
c = self.weights_trilinear((cx,cy,cz))
|
|
for ii in range(0,2):
|
|
for jj in range(0,2):
|
|
for kk in range(0,2):
|
|
if c[ii,jj,kk]>self.eps_collapse:
|
|
data[sl_out] += c[ii,jj,kk]*self.data[
|
|
i0+ii:i0+ii+dim[0],
|
|
j0+jj:j0+jj+dim[1],
|
|
k0+kk:k0+kk+dim[2]]
|
|
else:
|
|
data_ref = data[sl_out]
|
|
for ii in range(0,dim[0]):
|
|
for jj in range(0,dim[1]):
|
|
for kk in range(0,dim[2]):
|
|
coord = (
|
|
origin[0]+ii*spacing[0],
|
|
origin[1]+jj*spacing[1],
|
|
origin[2]+kk*spacing[2])
|
|
data_ref[ii,jj,kk] = self.interpolate(coord)
|
|
return Field3d(data,origin_pad,spacing)
|
|
|
|
def interpolate(self,coord):
|
|
assert all([coord[ii]>=self.origin[ii] for ii in range(0,3)]), "'coord' is out of bounds."
|
|
assert all([coord[ii]<=self.endpoint(ii) for ii in range(0,3)]), "'coord' is out of bounds."
|
|
i0,j0,k0 = self.nearest_gridpoint(coord,axis=None,lower=True)
|
|
cx,cy,cz = [self.distance_to_nearest_gridpoint(coord[ii],axis=ii,lower=True)/self.spacing[ii]
|
|
for ii in range(3)]
|
|
c = self.weights_trilinear((cx,cy,cz))
|
|
val = 0.0
|
|
for ii in range(0,2):
|
|
for jj in range(0,2):
|
|
for kk in range(0,2):
|
|
if c[ii,jj,kk]>self.eps_collapse:
|
|
val += c[ii,jj,kk]*self.data[i0+ii,j0+jj,k0+kk]
|
|
return val
|
|
|
|
@staticmethod
|
|
def padding(origin,spacing,dim,padding,numpad):
|
|
if isinstance(numpad,int):
|
|
numpad = np.fill(3,numpad,dtype=int)
|
|
else:
|
|
numpad = np.array(numpad,dtype=int)
|
|
assert len(numpad)==3, "'numpad' must be either an integer or tuple/list of length 3."
|
|
origin_pad = np.array(origin)
|
|
dim_pad = np.array(dim)
|
|
sl_out = [slice(None),slice(None),slice(None)]
|
|
if padding is not None:
|
|
if padding=='before':
|
|
dim_pad += numpad
|
|
origin_pad -= numpad*spacing
|
|
for axis in range(3):
|
|
sl_out[axis] = slice(numpad[axis],None)
|
|
elif padding=='after':
|
|
dim_pad += numpad
|
|
for axis in range(3):
|
|
sl_out[axis] = slice(0,-numpad[axis])
|
|
elif padding=='both':
|
|
dim_pad += 2*numpad
|
|
origin_pad -= numpad*spacing
|
|
for axis in range(3):
|
|
sl_out[axis] = slice(numpad[axis],-numpad[axis])
|
|
else:
|
|
raise ValueError("'padding' must either be None or one of {'before','after','both'}.")
|
|
sl_out = tuple(sl_out)
|
|
origin_pad = tuple(origin_pad)
|
|
dim_pad = tuple(dim_pad)
|
|
return (origin_pad,dim_pad,sl_out)
|
|
|
|
def weights_trilinear(self,rel_dist):
|
|
assert len(rel_dist)==3, "len(rel_dist) must be 3."
|
|
cx,cy,cz = rel_dist
|
|
if cx<0.0 and cx>-self.eps_collapse: cx=0.0
|
|
if cy<0.0 and cy>-self.eps_collapse: cy=0.0
|
|
if cz<0.0 and cz>-self.eps_collapse: cz=0.0
|
|
if cx>1.0 and cx<1.0+self.eps_collapse: cx=1.0
|
|
if cy>1.0 and cy<1.0+self.eps_collapse: cy=1.0
|
|
if cz>1.0 and cz<1.0+self.eps_collapse: cz=1.0
|
|
assert cx>=0.0 and cy>=0.0 and cz>=0.0, "'rel_dist' must be >=0"
|
|
assert cx<=1.0 and cy<=1.0 and cz<=1.0, "'rel_dist' must be <=1"
|
|
c = np.zeros((2,2,2))
|
|
c[0,0,0] = 1.0-(cx+cy+cz)+(cx*cy+cx*cz+cy*cz)-(cx*cy*cz)
|
|
c[1,0,0] = cx-(cx*cy+cx*cz)+(cx*cy*cz)
|
|
c[0,1,0] = cy-(cx*cy+cy*cz)+(cx*cy*cz)
|
|
c[0,0,1] = cz-(cx*cz+cy*cz)+(cx*cy*cz)
|
|
c[1,1,0] = (cx*cy)-(cx*cy*cz)
|
|
c[1,0,1] = (cx*cz)-(cx*cy*cz)
|
|
c[0,1,1] = (cy*cz)-(cx*cy*cz)
|
|
c[1,1,1] = (cx*cy*cz)
|
|
return c
|
|
|
|
def set_writable(self,flag):
|
|
self.data.setflags(write=flag)
|
|
return
|
|
|
|
def to_vtk(self,deep=False):
|
|
import pyvista as pv
|
|
mesh = pv.UniformGrid()
|
|
mesh.dimensions = self.dim(axis=None)
|
|
mesh.origin = self.origin
|
|
mesh.spacing = self.spacing
|
|
# order needs to be F no matter how array is stored in memory
|
|
if deep:
|
|
mesh.point_arrays['data'] = self.data.flatten(order='F')
|
|
else:
|
|
if not self.data.flags['F_CONTIGUOUS']:
|
|
self.data = np.asfortranarray(self.data)
|
|
mesh.point_arrays['data'] = self.data.ravel(order='F')
|
|
return mesh
|
|
|
|
def vtk_contour(self,val,deep=False,method='contour'):
|
|
if not isinstance(val,(tuple,list)):
|
|
val = [val]
|
|
return self.to_vtk(deep=deep).contour(val,method=method)
|
|
|
|
def vtk_slice(self,normal,origin,deep=False):
|
|
assert (normal in ('x','y','z') or (isinstance(normal,(tuple,list))
|
|
and len(normal)==3)), "'normal' must be 'x','y','z' or tuple of length 3."
|
|
assert isinstance(origin,(tuple,list)) and len(origin)==3,\
|
|
"'origin' must be tuple of length 3."
|
|
return self.to_vtk(deep=deep).slice(normal=normal,origin=origin)
|
|
|
|
def gaussian_filter_umean_channel(array,spacing,sigma,truncate=4.0):
|
|
'''Applies a Gaussian filter to a numpy array of dimension (1,ny,1) which
|
|
contains the mean streamwise velocity of the channel (or possibly sth else).
|
|
Since yu[0] = c and yu[-1] = d, we can use scipy's mirror settings and don't
|
|
need ghost cells.'''
|
|
from scipy import ndimage
|
|
assert array.ndim==3, "Expected an array with three dimensions/axes."
|
|
assert array.shape[0]==1 and array.shape[1]>1 and array.shape[2]==1,\
|
|
"Expected an array with shape (1,ny,1)."
|
|
sigma_img = sigma/spacing
|
|
array = ndimage.gaussian_filter1d(array,sigma_img,axis=1,truncate=truncate,mode='mirror')
|
|
return array
|
|
|
|
|
|
class Features3d:
|
|
def __init__(self,input,threshold,origin,spacing,periodicity,
|
|
invert=False,has_ghost=False,keep_input=False,
|
|
contour_method='flying_edges',cellvol_normal_component=2,
|
|
report=False):
|
|
assert len(origin)==3, "'origin' must be of length 3"
|
|
assert len(spacing)==3, "'spacing' must be of length 3"
|
|
assert len(periodicity)==3, "'periodicity' must be of length 3"
|
|
assert isinstance(input,np.ndarray), "'input' must be numpy.ndarray."
|
|
# Assign basic properties to class variables
|
|
self.origin = np.array(origin,dtype=np.float)
|
|
self.spacing = np.array(spacing,dtype=np.float)
|
|
self.dimensions = input.shape
|
|
self.periodicity = tuple(bool(x) for x in periodicity)
|
|
# If regions are supposed to be inverted, i.e. the interior consists of values
|
|
# smaller than the threshold instead of larger, change the sign of the array.
|
|
sign_invert = -1 if invert else +1
|
|
self._threshold = sign_invert*threshold
|
|
# '_input' is the array which is to be triangulated. Here one trailing ghost cell is
|
|
# required to get closed surfaces. The data will always be copied since it probably
|
|
# needs to be copied for vtk anyway (needs FORTRAN contiguous array) and this way
|
|
# there will never be side effects on the input data.
|
|
if has_ghost:
|
|
self._input = sign_invert*input
|
|
else:
|
|
pw = tuple((0,1) if x else (0,0) for x in periodicity)
|
|
self._input = np.pad(sign_invert*input,pw,mode='wrap')
|
|
# Triangulate
|
|
self.triangulate(contour_method=contour_method,
|
|
cellvol_normal_component=cellvol_normal_component,
|
|
report=report)
|
|
# Drop input if requested: this may free memory in case it was copied/temporary.
|
|
if not keep_input: self._input = None
|
|
return
|
|
|
|
@classmethod
|
|
def from_field(cls,fld,threshold,periodicity,invert=False,has_ghost=False,
|
|
keep_input=False,contour_method='flying_edges',
|
|
cellvol_normal_component=2,report=False):
|
|
return cls(fld.data,threshold,fld.origin,fld.spacing,periodicity,
|
|
invert=invert,has_ghost=has_ghost,keep_input=keep_input,
|
|
contour_method=contour_method,
|
|
cellvol_normal_component=cellvol_normal_component,
|
|
report=report)
|
|
|
|
def triangulate(self,contour_method='flying_edges',cellvol_normal_component=2,report=False):
|
|
import pyvista as pv
|
|
import vtk
|
|
from scipy import ndimage, spatial
|
|
# Check if '_input' is available: might have been dropped after initialization
|
|
assert self._input is not None, "'_input' not available. Initialize object with keep_input=True flag."
|
|
# Wrap data for VTK using pyvista
|
|
datavtk = pv.UniformGrid()
|
|
datavtk.dimensions = self._input.shape
|
|
datavtk.origin = self.origin
|
|
datavtk.spacing = self.spacing
|
|
datavtk.point_arrays['data'] = self._input.ravel('F')
|
|
# Compute full contour: ensure we only have triangles to efficiently extract points
|
|
# later on.
|
|
if report: print('[Features3d.triangulate] computing isocontour using {}...'.format(contour_method))
|
|
contour_ = datavtk.contour([self._threshold],method=contour_method,compute_scalars=False)
|
|
assert contour_.is_all_triangles(), "Contouring produced non-triangle cells."
|
|
# Compute the connectivity of the triangulated surface: first we run an ordinary
|
|
# connectivity filter neglecting periodic wrapping.
|
|
if report: print('[Features3d.triangulate] computing connectivity...')
|
|
alg = vtk.vtkPolyDataConnectivityFilter() # ~twice as fast as default vtkConnectivityFilter
|
|
alg.SetInputData(contour_)
|
|
alg.SetExtractionModeToAllRegions()
|
|
alg.SetColorRegions(True)
|
|
alg.Update()
|
|
contour_ = pv.filters._get_output(alg)
|
|
# Now determine the boundary points, i.e. points on the boundary which have a corresponding
|
|
# vertex on the other side of the domain
|
|
if report: print('[Features3d.triangulate] correcting connectivity for periodic boundaries...')
|
|
points = contour_.points
|
|
bd_points_xyz = np.zeros((0,3),dtype=points.dtype)
|
|
bd_points_idx = np.zeros((0,),dtype=np.int)
|
|
for axis in range(3):
|
|
if not self.periodicity[axis]: continue
|
|
# Compute position of boundary, period of domain and set a threshold for "on boundary" condition
|
|
bound_pos = datavtk.origin[axis]+datavtk.spacing[axis]*(datavtk.dimensions[axis]-1)
|
|
period = np.zeros((3,))
|
|
period[axis] = bound_pos-datavtk.origin[axis]
|
|
bound_dist = 1e-5*datavtk.spacing[axis]
|
|
# Lower boundary
|
|
li = np.abs(points[:,axis]-datavtk.origin[axis])<bound_dist
|
|
bd_points_idx = np.append(bd_points_idx,np.nonzero(li)[0],axis=0)
|
|
bd_points_xyz = np.append(bd_points_xyz,points[li,:],axis=0)
|
|
# Upper boundary
|
|
li = np.abs(points[:,axis]-bound_pos)<bound_dist
|
|
bd_points_idx = np.append(bd_points_idx,np.nonzero(li)[0],axis=0)
|
|
bd_points_xyz = np.append(bd_points_xyz,points[li,:]-period,axis=0)
|
|
# Construct a KD Tree for efficient neighborhood search
|
|
kd = spatial.KDTree(bd_points_xyz,leafsize=10,compact_nodes=True,copy_data=False,balanced_tree=True)
|
|
# Construct a map to get new labels for regions. We search for pairs of points with
|
|
# a very small distance inbetween. Then we check if their labels differ and choose
|
|
# the lower label as the new joint one.
|
|
point_labels = contour_.point_arrays['RegionId']
|
|
nfeatures = np.max(point_labels)+1
|
|
map_ = np.array(range(nfeatures),dtype=point_labels.dtype)
|
|
overlap_dist = 1e-4*np.sqrt(np.square(datavtk.spacing).sum())
|
|
for (ii,jj) in kd.query_pairs(r=overlap_dist):
|
|
label_ii = point_labels[bd_points_idx[ii]]
|
|
label_jj = point_labels[bd_points_idx[jj]]
|
|
if label_ii!=label_jj:
|
|
source_ = np.maximum(label_ii,label_jj)
|
|
target_ = np.minimum(label_ii,label_jj)
|
|
while target_ != map_[target_]: # map it recursively
|
|
target_ = map_[target_]
|
|
map_[source_] = target_
|
|
#
|
|
map_ = np.unique(map_,return_inverse=True)[1]
|
|
point_labels = map_[point_labels]
|
|
nfeatures = np.max(map_)+1 # starts with zero
|
|
# Labels are now stored as point data. To efficiently convert it to cell data, the first
|
|
# point of each cell determines the value for this cell.
|
|
if report: print('[Features3d.triangulate] identifying cell based labels...')
|
|
ncells = contour_.n_faces
|
|
faces = contour_.faces.reshape(ncells,4)[:,:]
|
|
cell_labels = point_labels[faces[:,1]]
|
|
# Compute the volume and area per cell. For the volume computation, an arbitrary component
|
|
# of the normal has to be chosen which defaults to the z-component and is set by
|
|
# 'cellvol_normal_component'.
|
|
if report: print('[Features3d.triangulate] calculating area and volume per cell...')
|
|
U = points[faces[:,1],:]
|
|
V = points[faces[:,2],:]
|
|
W = points[faces[:,3],:]
|
|
cn = np.cross(V-U,W-U)
|
|
cc = (U+V+W)/3
|
|
area = 0.5*np.sqrt(np.square(cn).sum(axis=1))
|
|
vol = 0.5*cn[:,cellvol_normal_component]*cc[:,cellvol_normal_component]
|
|
# Now the label is known per cell. We only need to find all cells with the same label
|
|
# and group them. Therefore we sort the array holding the faces by feature label
|
|
# and save the offset in another array to easily extract them whenever necessary.
|
|
if report: print('[Features3d.triangulate] finalizing internal data...')
|
|
offset = np.zeros((nfeatures+1,),dtype=np.int64)
|
|
offset[1:] = np.cumsum(np.bincount(cell_labels))
|
|
ind = np.argsort(cell_labels)
|
|
self._offset = offset
|
|
self._faces = faces[ind,:]
|
|
self._points = points
|
|
self._cell_areas = area[ind]
|
|
self._cell_volumes = vol[ind]
|
|
self._nfeatures = nfeatures
|
|
return
|
|
|
|
@property
|
|
def threshold(self): return self._threshold
|
|
|
|
@property
|
|
def nfeatures(self): return self._nfeatures
|
|
|
|
@property
|
|
def faces(self): return np.split(self._faces,self._offset)
|
|
|
|
@property
|
|
def points(self): return self._points
|
|
|
|
@property
|
|
def cell_areas(self): return np.split(self._cell_areas,self._offset[1:-1])
|
|
|
|
@property
|
|
def cell_volumes(self): return np.split(self._cell_volumes,self._offset[1:-1])
|
|
|
|
def fill_holes(self,report=False):
|
|
'''Remove triangulation which is fully enclosed by another. These regions
|
|
will have a negative volume due to the direction of their normal vector.'''
|
|
self.discard_features(np.flatnonzero(self.volumes()<0),report=report)
|
|
return
|
|
|
|
def reduce_noise(self,threshold=1e-5,is_relative=True,report=False):
|
|
'''Discards all objects with smaller volume than a threshold.'''
|
|
if is_relative:
|
|
vol_domain = np.prod(self.spacing*self.dimensions)
|
|
threshold = threshold*vol_domain
|
|
self.discard_features(np.flatnonzero(self.volumes()<threshold),report=report)
|
|
return
|
|
|
|
def discard_features(self,labels,clean_points=False,report=False):
|
|
'''Removes features from triangulated data.'''
|
|
from collections.abc import Iterable
|
|
from time import time
|
|
if not isinstance(labels,Iterable): labels = [labels]
|
|
# Get index ranges which are to be deleted and also create an array
|
|
# which determines the size of the block to be deleted
|
|
idx = []
|
|
gapsize = np.zeros((self.nfeatures,),dtype=np.int)
|
|
for label in labels:
|
|
rng_ = np.arange(self._offset[label],self._offset[label+1])
|
|
idx.append(rng_)
|
|
gapsize[label] = len(rng_)
|
|
idx = np.concatenate(idx,axis=0)
|
|
# Save former number of faces for report
|
|
nfaces = self._faces.shape[0]
|
|
# Delete indexed elements from arrays
|
|
self._faces = np.delete(self._faces,idx,axis=0)
|
|
self._cell_areas = np.delete(self._cell_areas,idx,axis=0)
|
|
self._cell_volumes = np.delete(self._cell_volumes,idx,axis=0)
|
|
# Correct offset
|
|
self._offset[1:] = self._offset[1:]-np.cumsum(gapsize)
|
|
self._offset = self._offset[:-len(labels)]
|
|
if report:
|
|
print('[Features3d.discard_features]',end=' ')
|
|
print('discarded {:} features with {:} faces.'.format(
|
|
len(labels),nfaces-self._faces.shape[0]))
|
|
if clean_points:
|
|
self.clean_points(report=report)
|
|
return
|
|
|
|
def area(self,feature):
|
|
'''Returns the surface area of feature. If feature is None, total surface
|
|
area of all features is returned.'''
|
|
if feature is None:
|
|
return np.sum(self._cell_areas)
|
|
else:
|
|
return np.sum(self.cell_areas[feature-1])
|
|
|
|
def areas(self):
|
|
'''Returns an array with surface areas of all features.'''
|
|
return np.add.reduceat(self._cell_areas,self._offset[:-1])
|
|
|
|
def volume(self,feature):
|
|
'''Returns volume enclosed by feature. If feature isNone, total volume of
|
|
all features is returned.'''
|
|
if feature is None:
|
|
return np.sum(self._cell_volumes)
|
|
else:
|
|
return np.sum(self.cell_volumes[feature-1])
|
|
|
|
def volumes(self):
|
|
'''Returns an array with volumes of all features.'''
|
|
return np.add.reduceat(self._cell_volumes,self._offset[:-1])
|
|
|
|
def find_feature(self):
|
|
# coords = np.array(coords)
|
|
# assert coords.ndim==2 and coords.shape[1]==3, "'coords' need to be provided as Nx3 array."
|
|
from time import time
|
|
from scipy import spatial
|
|
|
|
t = time()
|
|
|
|
xmin = np.amin(self._points[self._faces[:,1:],0],axis=1)
|
|
xmax = np.amax(self._points[self._faces[:,1:],0],axis=1)
|
|
zmin = np.amin(self._points[self._faces[:,1:],2],axis=1)
|
|
zmax = np.amax(self._points[self._faces[:,1:],2],axis=1)
|
|
|
|
center = np.stack((0.5*(xmax+xmin),0.5*(zmax+zmin)),axis=1)
|
|
radius = np.sqrt(2.0)*np.maximum(
|
|
np.amax(0.5*(xmax-xmin)),
|
|
np.amax(0.5*(zmax-zmin)))
|
|
print('Preparation for KD tree in',time()-t)
|
|
|
|
t = time()
|
|
kd = spatial.KDTree(center,leafsize=100,compact_nodes=False,copy_data=False,balanced_tree=False)
|
|
# kd = spatial.KDTree(center,leafsize=10,compact_nodes=True,copy_data=False,balanced_tree=True)
|
|
# kd = spatial.KDTree(self.points[:,1:],leafsize=20,compact_nodes=False,copy_data=False,balanced_tree=False)
|
|
# kd = spatial.KDTree(self._points[:,1:],leafsize=10,compact_nodes=True,copy_data=False,balanced_tree=True)
|
|
print('KD tree built in',time()-t)
|
|
|
|
query = np.random.random((10000,2))
|
|
|
|
# t = time()
|
|
# map_ = self._points.shape[0]*[[]]
|
|
# for ii,face in enumerate(self._faces[:,1:].ravel()):
|
|
# map_[face].append(ii)
|
|
# faces_connected_to_vertex = {}
|
|
# for face_index,face in enumerate(self._faces[:,1:]):
|
|
# for vertex_index in face:
|
|
# faces_connected_to_vertex.setdefault(vertex_index,[]).append(face_index)
|
|
# print('Inverted cells-faces',time()-t)
|
|
# radius = np.sqrt(np.sum(self.spacing[1:]**2))
|
|
|
|
t = time()
|
|
kd.query_ball_point(query,radius)
|
|
print('KD tree query',time()-t)
|
|
|
|
# print(radius)
|
|
# print(len(kd.query_ball_point((0.5,0.0),radius,return_sorted=True)),self._points.shape[0])
|
|
|
|
|
|
# yset = set(x.data for x in sorted(treey.at(0.0)))
|
|
# zset = set(x.data for x in sorted(treez.at(0.0)))
|
|
return
|
|
|
|
# @staticmethod
|
|
# def ray_triangle_intersect(self):
|
|
|
|
def ray_triangle_intersect(r0,dr,v0,v1,v2):
|
|
v0v1 = v1-v0
|
|
v0v2 = v2-v0
|
|
pvec = np.cross(dr,v0v2)
|
|
det = np.dot(v0v1,pvec)
|
|
if np.abs(det)<1e-6:
|
|
return float('-inf')
|
|
invDet = 1.0 / det
|
|
tvec = r0-v0
|
|
u = np.dot(tvec,pvec)*invDet
|
|
if u<0 or u>1:
|
|
return float('-inf')
|
|
qvec = np.cross(tvec,v0v1)
|
|
v = np.dot(dr,qvec)*invDet
|
|
if v<0 or u+v>1:
|
|
return float('-inf')
|
|
return np.dot(v0v2,qvec)*invDet
|
|
|
|
def clean_points(self,report=False):
|
|
nfaces_ = self._faces.shape[0]
|
|
ind,inv = np.unique(self._faces[:,1:],return_inverse=True)
|
|
if report:
|
|
print('[Features3d.clean_points]',end=' ')
|
|
print('removed {:} orphan points.'.format(self._points.shape[0]-len(ind)))
|
|
self._faces[:,1:4] = inv.reshape(nfaces_,3)
|
|
self._points = self._points[ind,:]
|
|
|
|
def to_vtk(self,labels,reduce_points=False):
|
|
import pyvista as pv
|
|
from collections.abc import Iterable
|
|
if labels is None:
|
|
return pv.PolyData(self._points,self._faces)
|
|
if not isinstance(labels,Iterable):
|
|
labels = [labels]
|
|
points = self._points
|
|
faces = []
|
|
for label in labels:
|
|
sl_ = slice(self._offset[label],self._offset[label+1])
|
|
faces.append(self._faces[sl_,:])
|
|
faces = np.concatenate(faces,axis=0)
|
|
# if reduce_points:
|
|
# nfaces_ = self._faces[idx].shape[0]
|
|
# ind,inv = np.unique(self._faces[:,1:4],return_inverse=True)
|
|
# faces_ = np.zeros((nfaces_,4),dtype=self._faces.dtype)
|
|
# faces_[:,0] = 3
|
|
# faces_[:,1:4] = inv.reshape(nfaces_,3)
|
|
# points_ = self._points[ind,:]
|
|
# return pv.PolyData(points_,faces_)
|
|
# else:
|
|
return pv.PolyData(points,faces)
|
|
|
|
class BinaryFieldNd:
|
|
def __init__(self,input,periodicity,connect_diagonals=False,deep=False,has_ghost=False):
|
|
assert isinstance(input,np.ndarray) and input.dtype==bool,\
|
|
"'input' must be a numpy array of type bool."
|
|
assert all([isinstance(x,(bool,int)) for x in periodicity]),\
|
|
"'periodicity' requires bool values."
|
|
assert len(periodicity)==input.ndim,\
|
|
"Number of entries in 'periodicity' must match dimension of binary field."
|
|
from scipy import ndimage
|
|
if has_ghost and deep:
|
|
self._data = input.copy()
|
|
elif has_ghost:
|
|
self._data = input
|
|
else:
|
|
pw = tuple((0,1) if x else (0,0) for x in periodicity)
|
|
self._data = np.pad(input,pw,mode='wrap')
|
|
self._sldata = tuple(slice(0,-1) if x else None for x in periodicity)
|
|
self._dim = self._data.shape
|
|
self._ndim = self._data.ndim
|
|
self._labels = None
|
|
self.nlabels = None
|
|
self.wrap = tuple(self._ndim*[None])
|
|
self.periodicity = tuple(bool(x) for x in periodicity)
|
|
if connect_diagonals:
|
|
self.structure = ndimage.generate_binary_structure(self._ndim,self._ndim)
|
|
else:
|
|
self.structure = ndimage.generate_binary_structure(self._ndim,1)
|
|
|
|
@property
|
|
def data(self):
|
|
return self._data[self._sldata]
|
|
|
|
@property
|
|
def labels(self):
|
|
if self._labels is None:
|
|
return None
|
|
return self._labels[self._sldata]
|
|
|
|
def label(self):
|
|
'''Labels connected regions in binary fields.'''
|
|
from scipy import ndimage
|
|
if any(self.periodicity):
|
|
self._labels,self.nlabels,self.wrap = self._labels_periodic()
|
|
else:
|
|
self._labels,self.nlabels = ndimage.label(self._data,structure=self.structure)
|
|
|
|
def _labels_periodic(self,map_to_zero=False):
|
|
'''Label features in an array while taking into account periodic wrapping.
|
|
If map_to_zero=True, every feature which overlaps or is attached to the
|
|
periodic boundary will be removed.'''
|
|
from scipy import ndimage
|
|
# Compute labels on padded array
|
|
labels_,nlabels_ = ndimage.label(self._data,structure=self.structure)
|
|
# Get a mapping of labels which differ at periodic overlap
|
|
map_ = np.array(range(nlabels_+1),dtype=labels_.dtype)
|
|
wrap_ = self._ndim*[None]
|
|
for axis in range(self._ndim):
|
|
if not self.periodicity[axis]: continue
|
|
sl_lo = tuple(slice(0,1) if ii==axis else slice(None) for ii in range(self._ndim))
|
|
sl_hi = tuple(slice(-1,None) if ii==axis else slice(None) for ii in range(self._ndim))
|
|
sl_pre = tuple(slice(-2,-1) if ii==axis else slice(None) for ii in range(self._ndim))
|
|
lab_lo = labels_[sl_lo]
|
|
lab_hi = labels_[sl_hi]
|
|
lab_pre = np.unique(labels_[sl_pre]) # all labels in last (unwrapped) slice
|
|
# Initialize array to keep track of wrapping
|
|
wrap_[axis] = np.zeros(nlabels_+1,dtype=bool)
|
|
# Determine new label and map
|
|
lab_new = np.minimum(lab_lo,lab_hi)
|
|
for lab_ in [lab_lo,lab_hi]:
|
|
li = (lab_!=lab_new) #if not map_to_zero else (lab_!=0)
|
|
lab_li = lab_[li]
|
|
lab_new_li = lab_new[li]
|
|
for idx_ in np.unique(lab_li,return_index=True)[1]:
|
|
source_ = lab_li[idx_] # the label to be changed
|
|
target_ = lab_new_li[idx_] # the label which will be newly assigned
|
|
if map_to_zero and source_ in lab_pre:
|
|
map_[source_] = 0
|
|
map_[target_] = 0
|
|
else:
|
|
while target_ != map_[target_]: # map it recursively
|
|
target_ = map_[target_]
|
|
map_[source_] = target_
|
|
if source_ in lab_pre: # check if source is not a ghost
|
|
wrap_[axis][target_] = True
|
|
# Remove gaps from target mapping
|
|
idx_,map_ = np.unique(map_,return_index=True,return_inverse=True)[1:3]
|
|
# Relabel and remove padding
|
|
labels_ = map_[labels_]
|
|
nlabels_ = np.max(map_)
|
|
assert nlabels_==len(idx_)-1, "DEBUG assertion"
|
|
self.wrap = tuple(None if x is None else x[idx_] for x in self.wrap)
|
|
# for axis in range(self._ndim):
|
|
# if wrap_[axis] is not None:
|
|
# wrap_[axis] = wrap_[axis][idx_]
|
|
return labels_,nlabels_,tuple(wrap_)
|
|
|
|
def fill_holes(self):
|
|
'''Fill the holes in binary objects while taking into account periodicity.
|
|
In the non-periodic sense, a hole is a region of zeros which does not connect
|
|
to a boundary. In the periodic sense, a hole is a region of zeros which is not
|
|
connected to itself accross the periodic boundaries.'''
|
|
from scipy import ndimage
|
|
# Reimplementation of "binary_fill_holes" from ndimage
|
|
mask = np.logical_not(self._data) # only modify locations which are "False" at the moment
|
|
tmp = np.zeros(mask.shape,bool) # create empty array to "grow from boundaries"
|
|
ndimage.binary_dilation(tmp,structure=None,iterations=-1,
|
|
mask=mask,output=self._data,border_value=1,
|
|
origin=0) # everything connected to the boundary is now True in self._data
|
|
# Remove holes which overlap the boundaries
|
|
if any(self.periodicity):
|
|
self._data = self._labels_periodic(map_to_zero=True)[0]>0
|
|
# Invert to get the final result
|
|
np.logical_not(self._data,self._data)
|
|
# If labels have been computed already, recompute them to stay consistent
|
|
if self._labels is not None:
|
|
self.label()
|
|
|
|
def probe(self,idx,probe_label=False):
|
|
'''Returns whether or not a point at idx is True or False.'''
|
|
if probe_label:
|
|
return self._labels[tuple(idx)]
|
|
else:
|
|
return self._data[tuple(idx)]
|
|
|
|
def volume(self,label):
|
|
'''Returns the sum of True values.'''
|
|
if label is None:
|
|
return np.sum(self.data)
|
|
else:
|
|
if self._labels is None: self.label()
|
|
return np.sum(self.labels==label)
|
|
|
|
def volumes(self):
|
|
'''Returns volume of all features, i.e. connected regions which have been
|
|
labeled using the label() method including the volume of the background region.
|
|
The array is sorted by labels, i.e. vol[0] is the volume of the background
|
|
region, vol[1] the volume of label 1, etc. If 'label' is an integer value,
|
|
only the volume of the corresponding region is returned.
|
|
Note: it is more efficient to retrieve all volumes at once than querying
|
|
single labels.'''
|
|
if self._labels is None: self.label()
|
|
return np.bincount(self.labels.ravel())
|
|
|
|
def volume_domain(self):
|
|
'''Returns volume of entire domain. Should be equal to sum(volume_feature()).'''
|
|
dim = tuple(self._dim[axis]-1 if self.periodicity[axis] else self._dim[axis] for axis in range(self._ndim))
|
|
return np.prod(dim)
|
|
|
|
def feature_labels_by_volume(self,descending=True,return_volumes=False):
|
|
'''Returns labels of connected regions sorted by volume.'''
|
|
vol = self.volumes()[1:]
|
|
labels = np.argsort(vol)+1
|
|
if descending:
|
|
labels = labels[::-1]
|
|
vol = vol[::-1]
|
|
if return_volumes:
|
|
return labels,vol
|
|
else:
|
|
return labels
|
|
|
|
def discard_features(self,selection,return_mask=False):
|
|
'''Removes features from data. Optionally returns a boolean array 'mask'
|
|
which marks position of features which have been removed.'''
|
|
if self._labels is None:
|
|
self.label()
|
|
selection = self._select_features(selection)
|
|
# Map tagged regions to zero in order to discard them
|
|
map_ = np.array(range(self.nlabels+1),dtype=self._labels.dtype)
|
|
map_[selection] = 0
|
|
# Remove gaps from target mapping
|
|
idx_,map_ = np.unique(map_,return_index=True,return_inverse=True)[1:3]
|
|
# Discard regions
|
|
self._labels = map_[self._labels]
|
|
self.nlabels = np.max(map_)
|
|
print(self.nlabels)
|
|
self.wrap = tuple(None if x is None else x[idx_] for x in self.wrap)
|
|
# for axis in range(self._ndim):
|
|
# if self.wrap[axis] is not None:
|
|
# self.wrap[axis] = self.wrap[axis][idx_]
|
|
if return_mask:
|
|
mask = np.logical_and(self._labels==0,self._data)
|
|
self._data = self._labels>0
|
|
if return_mask: return mask
|
|
|
|
def _select_features(self,selection):
|
|
dtype = self._labels.dtype
|
|
if selection is None:
|
|
return np.array(range(1,self.nlabels+1))
|
|
elif np.issubdtype(type(selection),np.integer):
|
|
return np.array(selection,dtype=dtype,ndmin=1)
|
|
elif isinstance(selection,(list,tuple,np.ndarray)):
|
|
selection = np.array(selection)
|
|
if selection.dtype==np.dtype('bool'):
|
|
assert selection.ndim==1 and selection.shape[0]==self.nlabels+1,\
|
|
"Boolean indexing must provide count+1 values."
|
|
else:
|
|
selection = selection.astype(dtype)
|
|
assert np.max(selection)<=self.nlabels and np.min(selection)>=0,\
|
|
"Entry in selection is out-of-bounds."
|
|
return selection
|
|
else:
|
|
raise ValueError('Invalid input. Accepting int,list,tuple,ndarray.')
|
|
|
|
class ChunkIterator:
|
|
'''Iterates through all chunks. 'snapshot' must be an instance
|
|
of a class which returns a Field3d from the method call
|
|
snapshot.field_chunk(rank,key,keep_ghost=keep_ghost).
|
|
One example implementation is UCFSnapshot from suspendtools.ucf.'''
|
|
def __init__(self,snapshot,key,keep_ghost=True):
|
|
self.snapshot = snapshot
|
|
self.key = key
|
|
self.keep_ghost = keep_ghost
|
|
self.iter_rank = 0
|
|
def __iter__(self):
|
|
self.iter_rank = 0
|
|
return self
|
|
def __next__(self):
|
|
if self.iter_rank<self.snapshot.nproc():
|
|
field = self.snapshot.field_chunk(
|
|
self.iter_rank,self.key,keep_ghost=self.keep_ghost)
|
|
self.iter_rank += 1
|
|
return field
|
|
else:
|
|
raise StopIteration
|