Features3d now fully based on triangulation: needs to be tested and some routines implemented

This commit is contained in:
Michael Krayer 2021-08-10 02:17:58 +02:00
parent 0c2db9fa79
commit fd23122470
1 changed files with 105 additions and 54 deletions

159
field.py
View File

@ -648,13 +648,11 @@ def gaussian_filter_umean_channel(array,spacing,sigma,truncate=4.0):
class Features3d:
def __init__(self,input,threshold,origin,spacing,periodicity,connect_diagonals=True,invert=False,has_ghost=False):
def __init__(self,input,threshold,origin,spacing,periodicity,invert=False,has_ghost=False):
assert len(origin)==3, "'origin' must be of length 3"
assert len(spacing)==3, "'spacing' must be of length 3"
assert len(periodicity)==3, "'periodicity' must be of length 3"
assert isinstance(input,np.ndarray), "'input' must be numpy.ndarray."
# Import libraries
import pyvista as pv
# Assign basic properties to class variables
self.origin = tuple(float(x) for x in origin)
self.spacing = tuple(float(x) for x in spacing)
@ -664,26 +662,17 @@ class Features3d:
# If regions are supposed to be inverted, i.e. the interior consists of values
# smaller than the threshold instead of larger, change the sign of the array.
sign_invert = -1 if invert else +1
self._threshold = threshold
self._threshold = sign_invert*threshold
# '_data' is the array which is to be triangulated. Here one trailing ghost cell is
# required to get closed surfaces. The data will always be copied since it probably
# needs to be copied for vtk anyway (needs FORTRAN contiguous array) and this way
# there will never be side effects on the input data.
if has_ghost:
self._data = np.asfortranarray(sign_invert*input)
self._data = sign_invert*input
else:
pw = tuple((0,1) if x else (0,0) for x in periodicity)
self._data = np.asfortranarray(np.pad(sign_invert*input,pw,mode='wrap'))
# 'binary' is a BinaryField which determines the connectivity of regions
# and provides morphological manipulations.
self.binary = BinaryFieldNd(self._data>=self._threshold,periodicity,
connect_diagonals=connect_diagonals,
deep=False,has_ghost=has_ghost)
# Compute features in 'binary' by labeling.
self.binary.label()
# Set arrays produced by triangulation to None
self._points = None
self._faces = None
self._data = np.pad(sign_invert*input,pw,mode='wrap')
@classmethod
@ -700,32 +689,36 @@ class Features3d:
def faces(self): return self._faces
@property
def nfeatures(self): return self.binary.nlabels
def nfeatures(self): return self._nfeatures
def fill_holes(self):
self.binary.fill_holes()
li = ndimage.binary_erosion(self.binary._data)
self._data[li] = 2*self._threshold # arbitrary, only needs to be above threshold
if self._faces is not None:
self.triangulate(contour_method=self.__TRI_CONTMETH,
cellvol_normal_component=self.__TRI_NORMCOMP)
# Check if volume negative -> cell normal direction
# self.binary.fill_holes()
# li = ndimage.binary_erosion(self.binary._data)
# self._data[li] = 2*self._threshold # arbitrary, only needs to be above threshold
# if self._faces is not None:
# self.triangulate(contour_method=self.__TRI_CONTMETH,
# cellvol_normal_component=self.__TRI_NORMCOMP)
return
def reduce_noise(self,threshold=1e-5,is_relative=True):
'''Removes all objects with smaller (binary-based) volume than a threshold.'''
if is_relative:
threshold = threshold*self.binary.volume_domain()
vol = self.binary.volumes()
li = vol<threshold
mask = self.binary.discard_features(li,return_mask=True)
self._data[mask] = np.nan#self._threshold
if self._faces is not None:
self.triangulate(contour_method=self.__TRI_CONTMETH,
cellvol_normal_component=self.__TRI_NORMCOMP)
# if is_relative:
# threshold = threshold*self.binary.volume_domain()
# vol = self.binary.volumes()
# li = vol<threshold
# mask = self.binary.discard_features(li,return_mask=True)
# self._data[mask] = np.nan#self._threshold
# if self._faces is not None:
# self.triangulate(contour_method=self.__TRI_CONTMETH,
# cellvol_normal_component=self.__TRI_NORMCOMP)
return
def triangulate(self,contour_method='flying_edges',cellvol_normal_component=2):
import pyvista as pv
from scipy import ndimage
import vtk
from scipy import ndimage, spatial
from time import time
# Save arguments in case we need to recreate triangulation later on
self.__TRI_CONTMETH = contour_method
@ -742,28 +735,85 @@ class Features3d:
contour_ = datavtk.contour([self._threshold],method=contour_method,compute_scalars=False)
assert contour_.is_all_triangles(), "Contouring produced non-triangle cells."
print('CONTOUR:',time()-t)
# Interpolate labels from binary array: first the array needs to be
# dilated to ensure that we get proper values for the cell vertex interpolation.
# Compute the connectivity of the triangulated surface: first we run a normal
# connectivity filter neglecting periodic wrapping.
t = time()
# labels_ = ndimage.grey_dilation(self.binary._labels,size=(3,3,3),mode='wrap')
labels_ = ndimage.maximum_filter(self.binary._labels,size=(3,3,3),mode='wrap')
print('DILATION:',time()-t)
# Labels are interpolated on points, and then the first point of each cell determines
# the value for this cell. This allows us to skip cell center computation and reduce
# the number of points which need to be interpolated. ndimage's 'map_coordinates' is
# the fastest interpolation routine I could find, but coordinates need to be provided
# as pixel values.
alg = vtk.vtkPolyDataConnectivityFilter() # ~twice as fast as default vtkConnectivityFilter
alg.SetInputData(contour_)
alg.SetExtractionModeToAllRegions()
alg.SetColorRegions(True)
alg.Update()
contour_ = pv.filters._get_output(alg)
print('connectivity computed in',time()-t,'seconds')
# Now determine the boundary points, i.e. points on the boundary which have a corresponding
# vertex on the other side of the domain
t = time()
pixel_coords = (contour_.points-self.origin)/self.spacing
point_val = ndimage.map_coordinates(labels_,pixel_coords.transpose(),order=0,mode='grid-wrap',prefilter=False)
cell_val = point_val[contour_.faces.reshape(contour_.n_faces,4)[:,1]]
print('INTERPOLATION:',time()-t)
points = contour_.points
bd_points_xyz = np.zeros((0,3),dtype=points.dtype)
bd_points_idx = np.zeros((0,),dtype=np.int64)
for axis in range(3):
if not self.periodicity[axis]: continue
# Compute position of boundary, period of domain and set a threshold for "on boundary" condition
bound_pos = datavtk.origin[axis]+datavtk.spacing[axis]*(datavtk.dimensions[axis]-1)
period = np.zeros((3,))
period[axis] = bound_pos-datavtk.origin[axis]
bound_dist = 1e-5*datavtk.spacing[axis]
# Lower boundary
li = np.abs(points[:,axis]-datavtk.origin[axis])<bound_dist
bd_points_idx = np.append(bd_points_idx,np.nonzero(li)[0],axis=0)
bd_points_xyz = np.append(bd_points_xyz,points[li,:],axis=0)
# Upper boundary
li = np.abs(points[:,axis]-bound_pos)<bound_dist
bd_points_idx = np.append(bd_points_idx,np.nonzero(li)[0],axis=0)
bd_points_xyz = np.append(bd_points_xyz,points[li,:]-period,axis=0)
print('Points selected in',time()-t,'seconds')
# Construct a KD Tree for efficient neighborhood search
t = time()
kd = spatial.KDTree(bd_points_xyz,leafsize=10,compact_nodes=True,copy_data=False,balanced_tree=True)
print('kd tree built in',time()-t,'seconds')
# Construct a map to get new labels for regions. We search for pairs of points with
# a very small distance inbetween. Then we check if their labels differ and choose
# the lower label as the new joint one.
t = time()
point_labels = contour_.point_arrays['RegionId']
nfeatures = np.max(point_labels)
map_ = np.array(range(nfeatures+1),dtype=point_labels.dtype)
overlap_dist = 1e-4*np.sqrt(np.square(datavtk.spacing).sum())
for (ii,jj) in kd.query_pairs(r=overlap_dist):
label_ii = point_labels[bd_points_idx[ii]]
label_jj = point_labels[bd_points_idx[jj]]
if label_ii!=label_jj:
source_ = np.maximum(label_ii,label_jj)
target_ = np.minimum(label_ii,label_jj)
while target_ != map_[target_]: # map it recursively
target_ = map_[target_]
map_[source_] = target_
#
map_ = np.unique(map_,return_inverse=True)[1]
point_labels = map_[point_labels]
nfeatures = np.max(map_)
print('mapped overlaps in',time()-t,'seconds')
# pl = pv.Plotter()
# pl.add_mesh(contour_,scalars=point_labels,opacity=1.0,specular=1.0,interpolate_before_map=True)
# pl.show()
# Labels are now stored as point data. To efficiently convert it to cell data, the first
# point of each cell determines the value for this cell.
t = time()
ncells = contour_.n_faces
faces = contour_.faces.reshape(ncells,4)[:,:]
# cell_labels = np.zeros((ncells,))
# for ii,face in enumerate(faces):
# cell_labels[ii] = point_labels[face[1]]
cell_labels = point_labels[faces[:,1]]
print('cell interpolation in',time()-t,'seconds')
# While we have the full contour in memory, compute the area and volume per cell. For
# the volume computation, an arbitrary component of the normal has to be chosen which
# defaults to the z-component and is set by 'cellvol_normal_component'.
t = time()
faces = contour_.faces.reshape(contour_.n_faces,4)[:,:]
points = contour_.points
# faces = contour_.faces.reshape(contour_.n_faces,4)[:,:]
# points = contour_.points
X = points[faces[:,1],:]
Y = points[faces[:,2],:]
Z = points[faces[:,3],:]
@ -776,13 +826,14 @@ class Features3d:
# and group them. Internally we will store the points in one array and the cells in
# nlabel tuples of ndarrays.
t = time()
offset = np.cumsum(np.bincount(cell_val))[1:-1]
ind = np.argsort(cell_val)
offset = np.cumsum(np.bincount(cell_labels))[1:-1]
ind = np.argsort(cell_labels)
self._offset = offset
self._faces = contour_.faces.reshape(contour_.n_faces,4)[ind,:]
self._faces = faces[ind,1:]
self._points = points
self._cell_areas = area[ind]
self._cell_volumes = vol[ind]
self._nfeatures = nfeatures
print('FINALIZING:',time()-t)
return
@ -839,8 +890,8 @@ class Features3d:
def which_feature(self,coords,method='binary'):
coords = np.array(coords)
assert coords.ndim==2 and coords.shape[1]==3, "'coords' need to be provided as 3xN array."
if method=='binary':
idx = np.round()
# if method=='binary':
# idx = np.round()
# elif method=='triangulation':
return