first production version for filtered plotting

This commit is contained in:
Michael Krayer 2021-06-01 12:16:10 +02:00
parent 8dd8161537
commit 3366816adf
3 changed files with 759 additions and 378 deletions

343
field.py
View File

@ -4,19 +4,74 @@ class Field3d:
assert len(origin)==3, "'origin' must be of length 3"
assert len(spacing)==3, "'spacing' must be of length 3"
self.data = numpy.array(data)
self.numpoints = self.data.shape
self.origin = tuple([float(x) for x in origin])
self.spacing = tuple([float(x) for x in spacing])
self.eps_collapse = 1e-12
self._dim = None
return
def __str__(self):
str = 'Field3d with\n'
str+= ' dimension: {}, {}, {}\n'.format(*self.numpoints)
str+= ' dimension: {}, {}, {}\n'.format(*self.dim())
str+= ' origin: {:G}, {:G}, {:G}\n'.format(*self.origin)
str+= ' spacing: {:G}, {:G}, {:G}'.format(*self.spacing)
str+= ' spacing: {:G}, {:G}, {:G}\n'.format(*self.spacing)
str+= ' datatype: {}'.format(self.dtype())
return str
def __add__(self,other):
if isinstance(other,Field3d):
assert self.has_same_grid(other), "Grid mismatch."
return Field3d(self.data+other.data,self.origin,self.spacing)
else:
return Field3d(self.data+other,self.origin,self.spacing)
def __sub__(self,other):
if isinstance(other,Field3d):
assert self.has_same_grid(other), "Grid mismatch."
return Field3d(self.data-other.data,self.origin,self.spacing)
else:
return Field3d(self.data-other,self.origin,self.spacing)
def __mul__(self,other):
if isinstance(other,Field3d):
assert self.has_same_grid(other), "Grid mismatch."
return Field3d(self.data*other.data,self.origin,self.spacing)
else:
return Field3d(self.data*other,self.origin,self.spacing)
def __radd__(self,other):
return Field3d(other+self.data,self.origin,self.spacing)
def __rmul__(self,other):
return Field3d(other*self.data,self.origin,self.spacing)
def __pow__(self,other):
return Field3d(self.data**other,self.origin,self.spacing)
def __iadd__(self,other):
if isinstance(other,Field3d):
assert self.has_same_grid(other), "Grid mismatch."
self.data += other.data
else:
self.data += other
return self
def __isub__(self,other):
if isinstance(other,Field3d):
assert self.has_same_grid(other), "Grid mismatch."
self.data -= other.data
else:
self.data -= other
return self
def __imul__(self,other):
if isinstance(other,Field3d):
assert self.has_same_grid(other), "Grid mismatch."
self.data *= other.data
else:
self.data *= other
return self
# TBD: this should return another Field3d object
# def __getitem__(self,val):
# assert isinstance(val,tuple) and len(val)==3, "Field3d must be indexed by [ii,jj,kk]."
@ -45,6 +100,30 @@ class Field3d:
assert (chunk['nzl']+2*chunk['ighost'])==nz, "Invalid chunk data: nzl != chunk['data'].shape[2]"
return cls(chunk['data'],origin=(xo,yo,zo),spacing=(dx,dy,dz))
@classmethod
def allocate(cls,dim,origin,spacing,fill=None,dtype=numpy.float64,pseudo=False):
'''Allocates an empty field, or a field filled with 'fill'.'''
assert isinstance(dim,(tuple,list,numpy.ndarray)) and len(dim)==3,\
"'dim' must be a tuple/list of length 3."
assert isinstance(origin,(tuple,list,numpy.ndarray)) and len(origin)==3,\
"'origin' must be a tuple/list of length 3."
assert isinstance(spacing,(tuple,list,numpy.ndarray)) and len(spacing)==3,\
"'spacing' must be a tuple/list of length 3."
if pseudo:
data = numpy.empty((0,0,0))
r = cls(data,origin,spacing)
r._dim = dim
return r
else:
if fill is None:
data = numpy.empty(dim,dtype=dtype)
else:
data = numpy.full(dim,fill,dtype=dtype)
return cls(data,origin,spacing)
def copy(self):
return Field3d(self.data.copy(),self.origin,self.spacing)
def insert_subfield(self,subfield):
assert all([abs(subfield.spacing[ii]-self.spacing[ii])<self.eps_collapse
for ii in range(3)]), "spacing differs."
@ -54,16 +133,16 @@ class Field3d:
assert all(self.is_within_bounds(subfield.endpoint(),axis=None)), "subfield endpoint is out-of-bounds."
#ib,jb,kb = [int(round((subfield.origin[ii]-self.origin[ii])/self.spacing[ii])) for ii in range(3)]
ib,jb,kb = self.nearest_gridpoint(subfield.origin,axis=None)
nx,ny,nz = subfield.numpoints
nx,ny,nz = subfield.dim()
self.data[ib:ib+nx,jb:jb+ny,kb:kb+nz] = subfield.data[:,:,:]
return
def extract_subfield(self,idx_origin,numpoints,stride=(1,1,1)):
assert all(idx_origin[ii]>=0 and idx_origin[ii]<self.numpoints[ii] for ii in range(3)),\
def extract_subfield(self,idx_origin,dim,stride=(1,1,1)):
assert all(idx_origin[ii]>=0 and idx_origin[ii]<self.dim(axis=ii) for ii in range(3)),\
"'origin' is out-of-bounds."
assert all(idx_origin[ii]+stride[ii]*(numpoints[ii]-1)<self.numpoints[ii] for ii in range(3)),\
assert all(idx_origin[ii]+stride[ii]*(dim[ii]-1)<self.dim(axis=ii) for ii in range(3)),\
"endpoint is out-of-bounds."
sl = tuple(slice(idx_origin[ii],idx_origin[ii]+stride[ii]*numpoints[ii],stride[ii]) for ii in range(3))
sl = tuple(slice(idx_origin[ii],idx_origin[ii]+stride[ii]*dim[ii],stride[ii]) for ii in range(3))
origin = self.coordinate(idx_origin)
spacing = tuple(self.spacing[ii]*stride[ii] for ii in range(3))
data = self.data[sl].copy()
@ -74,7 +153,7 @@ class Field3d:
assert len(idx)==3, "If 'axis' is None, 'idx' must be a tuple/list of length 3."
return tuple(self.coordinate(idx[ii],axis=ii) for ii in range(3))
assert axis<3, "'axis' must be one of 0,1,2."
assert idx<self.numpoints[axis], "'idx' is out-of-bounds."
assert idx<self.dim(axis=axis), "'idx' is out-of-bounds."
return self.origin[axis]+idx*self.spacing[axis]
def nearest_gridpoint(self,coord,axis=None,lower=False):
@ -103,61 +182,84 @@ class Field3d:
return tuple(self.is_within_bounds(coord[ii],axis=ii) for ii in range(3))
assert axis<3, "'axis' must be one of 0,1,2."
idx_nearest = self.nearest_gridpoint(coord,axis=axis)
if idx_nearest>0 and idx_nearest<self.numpoints[axis]-1:
if idx_nearest>0 and idx_nearest<self.dim(axis=axis)-1:
return True
dist_nearest = self.distance_to_nearest_gridpoint(coord,axis=axis)
if (idx_nearest==0 or idx_nearest==self.numpoints[axis]-1) and abs(dist_nearest)<self.eps_collapse:
if (idx_nearest==0 or idx_nearest==self.dim(axis=axis)-1) and abs(dist_nearest)<self.eps_collapse:
return True
else:
return False
def has_same_grid(self,other):
if not self.has_same_origin(other): return False
if not self.has_same_spacing(other): return False
if not self.has_same_origin(other): return False
return True
def has_same_origin(self,other,axis=None):
if axis is None:
return all([self.has_same_origin(other,axis=ii) for ii in range(3)])
origin = other.origin if isinstance(other,Field3d) else other
return abs(origin[axis]-self.origin[axis])<self.eps_collapse
def has_same_spacing(self,other,axis=None):
if axis is None:
return all([self.has_same_spacing(other,axis=ii) for ii in range(3)])
spacing = other.spacing if isinstance(other,Field3d) else other
return abs(spacing[axis]-self.spacing[axis])<self.eps_collapse
def has_same_dim(self,other,axis=None):
if axis is None:
return all([self.has_same_dim(other,axis=ii) for ii in range(3)])
dim = other.dim(axis=axis) if isinstance(other,Field3d) else other
return dim==self.dim(axis=axis)
def dim(self,axis=None):
if axis is None:
return tuple(self.dim(axis=ii) for ii in range(3))
assert axis<3, "'axis' must be one of 0,1,2."
return self.numpoints[axis]
if self._dim is not None:
return self._dim[axis]
else:
return self.data.shape[axis]
def endpoint(self,axis=None):
if axis is None:
return tuple(self.endpoint(axis=ii) for ii in range(3))
assert axis<3, "'axis' must be one of 0,1,2."
return self.origin[axis]+(self.numpoints[axis]-1)*self.spacing[axis]
return self.origin[axis]+(self.dim(axis=axis)-1)*self.spacing[axis]
def derivative(self,axis,only_keep_interior=False,add_border='before',preserve_origin=False):
if axis is None:
return tuple(self.derivative(axis,preserve_grid=preserve_grid,padding=padding,numpad=numpad)
for axis in range(0,3))
def dtype(self):
return self.data.dtype
def convert_dtype(self,dtype):
self.data = self.data.astype(dtype,copy=False)
return
def derivative(self,axis,only_keep_interior=False,shift_origin='before'):
'''Computes derivative wrt to direction 'axis' with 2nd order finite differences
centered between the origin grid points'''
from scipy import ndimage
assert axis<3, "'axis' must be one of 0,1,2."
if not preserve_origin:
origin = list(self.origin)
numpoints = list(self.numpoints)
origin[axis] += 0.5*self.spacing[axis]
numpoints[axis] -= 1
if not only_keep_interior:
assert add_border in ('before','after'), "'add_border' must be one of {'before','after'}."
origin,numpoints,sl_out = padding(origin,self.spacing,numpoints,add_border,1)
else:
sl_out = tuple(slice(None),slice(None),slice(None))
data = numpy.zeros(numpoints,dtype=self.data.dtype)
# 2nd order FD with h = dx/2
slhi = 3*[slice(None)]
sllo = 3*[slice(None)]
slhi[axis] = slice(1,None)
sllo[axis] = slice(0,-1)
slhi = tuple(slhi)
sllo = tuple(sllo)
data[sl_out] = (1./self.spacing[axis])*(self.data[slhi]-self.data[sllo])
origin = list(self.origin)
assert shift_origin in ('before','after'), "'shift_origin' must be one of {'before','after'}."
if shift_origin=='left':
corr_orig = 0
origin[axis] -= 0.5*self.spacing[axis]
else:
origin,numpoints,sl_out = padding(self.origin,self.spacing,self.numpoints,padding,numpad)
data = numpy.zeros(numpoints,dtype=self.data.dtype)
# 2nd order FD with h = dx, one-sided at boundaries
# https://numpy.org/doc/stable/reference/generated/numpy.gradient.html
data[sl_out] = numpy.gradient(self.data,self.spacing[axis],axis=axis,edge_order=2)
if only_keep_interior:
sl_out = 3*[slice(None)]
sl_out[axis] = slice(1,-1)
data = data[tuple(sl_out)]
corr_orig = -1
origin[axis] += 0.5*self.spacing[axis]
data = ndimage.correlate1d(self.data,numpy.array([-1.,1.])/self.spacing[axis],axis=axis,
mode='constant',cval=numpy.nan,origin=corr_orig)
if only_keep_interior:
sl = 3*[slice(None)]
if shift_origin=='before':
sl[axis] = slice(-1,None)
origin[axis] += self.spacing[axis]
else:
sl[axis] = slice(0,-1)
data = data[tuple(sl)]
return Field3d(data,origin,self.spacing)
def gradient(self,axis,preserve_origin=False,only_keep_interior=False,add_border='before'):
@ -167,9 +269,9 @@ class Field3d:
def laplacian(self,only_keep_interior=False):
'''Computes the Laplacian of a field.'''
from scipy import ndimage
data = ndimage.correlate1d(self.data,[1.,-2.,1.]/self.spacing[0],axis=0,mode='constant',cval=numpy.nan,origin=0)
data += ndimage.correlate1d(self.data,[1.,-2.,1.]/self.spacing[1],axis=1,mode='constant',cval=numpy.nan,origin=0)
data += ndimage.correlate1d(self.data,[1.,-2.,1.]/self.spacing[2],axis=2,mode='constant',cval=numpy.nan,origin=0)
data = ndimage.correlate1d(self.data,numpy.array([1.,-2.,1.])/self.spacing[0]**2,axis=0,mode='constant',cval=numpy.nan,origin=0)
data += ndimage.correlate1d(self.data,numpy.array([1.,-2.,1.])/self.spacing[1]**2,axis=1,mode='constant',cval=numpy.nan,origin=0)
data += ndimage.correlate1d(self.data,numpy.array([1.,-2.,1.])/self.spacing[2]**2,axis=2,mode='constant',cval=numpy.nan,origin=0)
origin = list(self.origin)
if only_keep_interior:
data = data[1:-1,1:-1,1:-1]
@ -191,7 +293,7 @@ class Field3d:
if integrate_axis[axis]:
axes.append(axis)
if average:
weight *= self.numpoints[axis]
weight *= self.dim(axis=axis)
else:
weight *= self.spacing[axis]
axes = tuple(axes)
@ -240,49 +342,89 @@ class Field3d:
radius.append(int(truncate*sigma_img[ii]+0.5))
return tuple(radius)
def shift_origin(self,rel_shift):
raise NotImplementedError("Routine has not been verified yet.")
#TBD: verify this routine
# def shift_origin(self,rel_shift):
# raise NotImplementedError("Routine has not been verified yet.")
# #TBD: verify this routine
# assert isinstance(rel_shift,(tuple,list,numpy.ndarray)) and len(rel_shift)==3,\
# "'shift' must be tuple/list with length 3."
# assert all([rel_shift[ii]>-1.0 and rel_shift[ii]<1.0 for ii in range(3)]),\
# "'shift' must be in (-1.0,1.0)."
# #data = numpy.full(self.dim,numpy.nan)
# data = self.data.copy()
# origin = list(self.origin)
# for axis in range(3):
# if abs(rel_shift[axis])<self.eps_collapse:
# continue
# origin[axis] += rel_shift[axis]*self.spacing[axis]
# sl_hi = 3*[slice(None)]
# sl_lo = 3*[slice(None)]
# sl_out = 3*[slice(None)]
# sl_clr = 3*[slice(None)]
# sl_hi[axis] = slice(1,None)
# sl_lo[axis] = slice(0,-1)
# if rel_shift[ii]<0.0:
# w = 1.0+rel_shift[axis]
# sl_out[axis] = slice(1,None)
# sl_clr[axis] = slice(0,1)
# else:
# w = rel_shift[axis]
# sl_out[axis] = slice(0,-1)
# sl_clr[axis] = slice(-1,None)
# sl_hi = tuple(sl_hi)
# sl_lo = tuple(sl_lo)
# sl_out = tuple(sl_out)
# sl_clr = tuple(sl_clr)
# data[sl_out] = (1.0-w)*data[sl_lo] + w*data[sl_hi]
# data[sl_clr] = numpy.nan
# return Field3d(data,origin,self.spacing)
def shift_origin(self,rel_shift,only_keep_interior=False):
'''Shifts the origin of a field by multiple of spacing.'''
from scipy import ndimage
assert isinstance(rel_shift,(tuple,list,numpy.ndarray)) and len(rel_shift)==3,\
"'shift' must be tuple/list with length 3."
assert all([rel_shift[ii]>-1.0 and rel_shift[ii]<1.0 for ii in range(3)]),\
assert all([rel_shift[ii]>=-1.0 and rel_shift[ii]<=1.0 for ii in range(3)]),\
"'shift' must be in (-1.0,1.0)."
#data = numpy.full(self.numpoints,numpy.nan)
data = self.data.copy()
data = self.data.copy()
origin = list(self.origin)
sl = 3*[slice(None)]
for axis in range(3):
if abs(rel_shift[axis])<self.eps_collapse:
continue
origin[axis] += rel_shift[axis]*self.spacing[axis]
sl_hi = 3*[slice(None)]
sl_lo = 3*[slice(None)]
sl_out = 3*[slice(None)]
sl_clr = 3*[slice(None)]
sl_hi[axis] = slice(1,None)
sl_lo[axis] = slice(0,-1)
if rel_shift[ii]<0.0:
w = 1.0+rel_shift[axis]
sl_out[axis] = slice(1,None)
sl_clr[axis] = slice(0,1)
elif rel_shift[axis]>0:
weights = (1.0-rel_shift[axis],rel_shift[axis])
data = ndimage.correlate1d(data,weights,axis=axis,mode='constant',cval=numpy.nan,origin=-1)
origin[axis] += rel_shift[axis]*self.spacing[axis]
if only_keep_interior:
sl[axis] = slice(0,-1)
else:
w = rel_shift[axis]
sl_out[axis] = slice(0,-1)
sl_clr[axis] = slice(-1,None)
sl_hi = tuple(sl_hi)
sl_lo = tuple(sl_lo)
sl_out = tuple(sl_out)
sl_clr = tuple(sl_clr)
data[sl_out] = (1.0-w)*data[sl_lo] + w*data[sl_hi]
data[sl_clr] = numpy.nan
weights = (-rel_shift[axis],1.0+rel_shift[axis])
data = ndimage.correlate1d(data,weights,axis=axis,mode='constant',cval=numpy.nan,origin=0)
origin[axis] += rel_shift[axis]*self.spacing[axis]
if only_keep_interior:
sl[axis] = slice(1,None)
origin[axis] += self.spacing[axis]
if only_keep_interior:
data = data[sl]
return Field3d(data,origin,self.spacing)
def change_grid(self,origin,spacing,numpoints,padding=None,numpad=1):
def relative_shift(self,field):
'''Compute the relative shift (in terms of spacing) to shift self onto field.'''
assert self.has_same_spacing(field), "spacing differs."
rel_shift = [0.0,0.0,0.0]
for axis in range(3):
dist = field.origin[axis]-self.origin[axis]
if abs(dist)>self.eps_collapse:
rel_shift[axis] = dist/self.spacing[axis]
return tuple(rel_shift)
def change_grid(self,origin,spacing,dim,padding=None,numpad=1):
assert all([origin[ii]>=self.origin[ii] for ii in range(0,3)]), "New origin is out of bounds."
endpoint = [origin[ii]+(numpoints[ii]-1)*spacing[ii] for ii in range(0,3)]
endpoint = [origin[ii]+(dim[ii]-1)*spacing[ii] for ii in range(0,3)]
assert all([endpoint[ii]<=self.endpoint(ii) for ii in range(0,3)]), "New end point is out of bounds."
# Allocate (possibly padded array)
origin_pad,numpoints_pad,sl_out = padding(origin,spacing,numpoints,padding,numpad):
data = numpy.zeros(numpoints_pad,dtype=self.data.dtype)
origin_pad,dim_pad,sl_out = padding(origin,spacing,dim,padding,numpad)
data = numpy.zeros(dim_pad,dtype=self.data.dtype)
# Trilinear interpolation
if numpy.allclose(spacing,self.spacing):
# spacing is the same: we can construct universal weights for the stencil
@ -295,14 +437,14 @@ class Field3d:
for kk in range(0,2):
if c[ii,jj,kk]>self.eps_collapse:
data[sl_out] += c[ii,jj,kk]*self.data[
i0+ii:i0+ii+numpoints[0],
j0+jj:j0+jj+numpoints[1],
k0+kk:k0+kk+numpoints[2]]
i0+ii:i0+ii+dim[0],
j0+jj:j0+jj+dim[1],
k0+kk:k0+kk+dim[2]]
else:
data_ref = data[sl_out]
for ii in range(0,numpoints[0]):
for jj in range(0,numpoints[1]):
for kk in range(0,numpoints[2]):
for ii in range(0,dim[0]):
for jj in range(0,dim[1]):
for kk in range(0,dim[2]):
coord = (
origin[0]+ii*spacing[0],
origin[1]+jj*spacing[1],
@ -326,36 +468,36 @@ class Field3d:
return val
@staticmethod
def padding(origin,spacing,numpoints,padding,numpad):
def padding(origin,spacing,dim,padding,numpad):
if isinstance(numpad,int):
numpad = numpy.fill(3,numpad,dtype=int)
else:
numpad = numpy.array(numpad,dtype=int)
assert len(numpad)==3, "'numpad' must be either an integer or tuple/list of length 3."
origin_pad = numpy.array(origin)
numpoints_pad = numpy.array(numpoints)
dim_pad = numpy.array(dim)
sl_out = [slice(None),slice(None),slice(None)]
if padding is not None:
if padding=='before':
numpoints_pad += numpad
dim_pad += numpad
origin_pad -= numpad*spacing
for axis in range(3):
sl_out[axis] = slice(numpad[axis],None)
elif padding=='after':
numpoints_pad += numpad
dim_pad += numpad
for axis in range(3):
sl_out[axis] = slice(0,-numpad[axis])
elif padding=='both':
numpoints_pad += 2*numpad
dim_pad += 2*numpad
origin_pad -= numpad*spacing
for axis in range(3):
sl_out[axis] = slice(numpad[axis],-numpad[axis])
else:
raise ValueError("'padding' must either be None or one of {'before','after','both'}.")
sl_out = tuple(sl_out)
origin_pad = tuple(origin_pad)
numpoints_pad = tuple(numpoints_pad)
return (origin_pad,numpoints_pad,sl_out)
sl_out = tuple(sl_out)
origin_pad = tuple(origin_pad)
dim_pad = tuple(dim_pad)
return (origin_pad,dim_pad,sl_out)
def weights_trilinear(self,rel_dist):
assert len(rel_dist)==3, "len(rel_dist) must be 3."
@ -408,6 +550,19 @@ class Field3d:
"'origin' must be tuple of length 3."
return self.to_vtk(deep=deep).slice(normal=normal,origin=origin)
def gaussian_filter_umean_channel(array,spacing,sigma,truncate=4.0):
'''Applies a Gaussian filter to a numpy array of dimension (1,ny,1) which
contains the mean streamwise velocity of the channel (or possibly sth else).
Since yu[0] = c and yu[-1] = d, we can use scipy's mirror settings and don't
need ghost cells.'''
from scipy import ndimage
assert array.ndim==3, "Expected an array with three dimensions/axes."
assert array.shape[0]==1 and array.shape[1]>1 and array.shape[2]==1,\
"Expected an array with shape (1,ny,1)."
sigma_img = sigma/spacing
array = ndimage.gaussian_filter1d(array,sigma_img,axis=1,truncate=truncate,mode='mirror')
return array
class ChunkIterator:
'''Iterates through all chunks. 'snapshot' must be an instance
of a class which returns a Field3d from the method call

View File

@ -1,22 +1,43 @@
import numpy as np
class PPP:
"""Parallel Python Postprocessor for suspend"""
def __init__(self,comm,func_load,num_ghost,precision,origin,spacing,periodicity,bounds,proc,chunks_per_proc):
'''Constructor: except for comm, only rank 0 needs initialized data.'''
self.comm = comm
self.rank = comm.Get_rank()
self.func_load = func_load
self.init_settings(num_ghost,precision)
self.init_domain(origin,spacing,periodicity,bounds)
self.init_procgrid(proc,chunks_per_proc)
self.field = {}
self.symmetries = {}
def __init__(self,comm,func_load,
num_ghost,chunks_per_proc,origin,spacing,periodicity,bounds,
proc_grid,nxp,nyp,nzp,
proc_grid_ext,nxp_ext,nyp_ext,nzp_ext,
nghbr,field,symmetries):
'''Class constructor: it is expected that all communcation is done in the corresponding
class methods, such that every rank already holds the required information.'''
# Initialize object with passed arguments
self.comm = comm
self.func_load = func_load
self.num_ghost = num_ghost
self.chunks_per_proc = chunks_per_proc
self.origin = origin
self.spacing = spacing
self.periodicity = periodicity
self.bounds = bounds
self.proc_grid = proc_grid
self.nxp,self.nyp,self.nzp = nxp,nyp,nzp
self.proc_grid_ext = proc_grid_ext
self.nxp_ext,self.nyp_ext,self.nzp_ext = nxp_ext,nyp_ext,nzp_ext
self.nghbr = nghbr
self.field = field
self.symmetries = symmetries
# Initialize some deduced variables
self.rank = comm.Get_rank()
self.nproc = nxp*nyp*nzp
self.ip,self.jp,self.kp = self.position_from_rank(self.rank,external=False)
self.nghx,self.nghy,self.nghz = num_ghost
self.xperiodic,self.yperiodic,self.zperiodic = periodicity
return
@classmethod
def from_snapshot(cls,snap,chunks_per_proc=(1,1,1),num_ghost=(1,1,1),precision='float64'):
def from_snapshot(cls,snap,chunks_per_proc=(1,1,1),num_ghost=(1,1,1)):
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Let rank 0 read the processor grid as well as some info and then broadcast it
if rank==0:
proc = snap.procgrid()
origin = snap.origin()
@ -24,182 +45,53 @@ class PPP:
periodicity = snap.periodicity()
bounds = snap.bounds()
else:
proc = None
origin = None
spacing = None
periodicity = None
bounds = None
proc,origin,spacing,periodicity,bounds = 5*[None]
proc = comm.bcast(proc ,root=0)
origin = comm.bcast(origin ,root=0)
spacing = comm.bcast(spacing ,root=0)
periodicity = comm.bcast(periodicity,root=0)
bounds = comm.bcast(bounds ,root=0)
# Make sure all processors got the same arguments
chunks_per_proc = comm.bcast(chunks_per_proc,root=0)
num_ghost = comm.bcast(num_ghost,root=0)
# Set data loading function
func_load = snap.field_chunk
return cls(comm,func_load,num_ghost,precision,origin,spacing,periodicity,bounds,proc,chunks_per_proc)
# Construct new processor grid
(proc_grid,nxp,nyp,nzp,
proc_grid_ext,nxp_ext,nyp_ext,nzp_ext,
nghbr) = PPP._construct_procgrid(comm,proc,chunks_per_proc,num_ghost,periodicity)
# Initially, no field is loaded
field = {}
symmetries = {}
return cls(comm,func_load,
num_ghost,chunks_per_proc,origin,spacing,periodicity,bounds,
proc_grid,nxp,nyp,nzp,
proc_grid_ext,nxp_ext,nyp_ext,nzp_ext,
nghbr,field,symmetries)
def init_settings(self,num_ghost,precision):
'''Initializes PPP settings for all processors.'''
# TBD: some assertions
self.num_ghost = self.comm.bcast(num_ghost,root=0)
self.precision = self.comm.bcast(precision,root=0)
# Some shortcuts
self.nghx,self.nghy,self.nghz = self.num_ghost
@classmethod
def from_state(cls,filename):
import pickle
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
fin = filename+'.{:05d}.pickle'.format(rank)
with open(fin,"rb") as f:
payload = pickle.load(f)
(num_ghost,chunks_per_proc,origin,spacing,periodicity,bounds,
proc_grid,nxp,nyp,nzp,nghbr,field,symmetries) = payload
assert nxp*nyp*nzp==comm.Get_size(), "The loaded state requires {} processors, but "\
"we are currently running with {}.".format(nxp*nyp*nzp,comm.Get_size())
func_load = None
proc_grid_ext = None
nxp_ext,nyp_ext,nzp_ext = 3*[None]
return cls(comm,func_load,
num_ghost,chunks_per_proc,origin,spacing,periodicity,bounds,
proc_grid,nxp,nyp,nzp,
proc_grid_ext,nxp_ext,nyp_ext,nzp_ext,
nghbr,field,symmetries)
def init_symmetries(self,key,mirror=(True,True)):
'''Sets the symmetries for ghost cells behind the wall'''
# Example: wall in y
# No-slip boundary (no mirror) free-slip boundary (mirror)
# u -> -u u -> u
# v -> -v v -> -v
# w -> -w w -> w
# p -> p p -> p
import numpy as np
self.symmetries[key] = np.zeros((3,3,3),dtype='i')
if not self.xperiodic:
if key=='u':
self.symmetries[key][0,1,1] = -1
self.symmetries[key][2,1,1] = -1
else:
self.symmetries[key][0,1,1] = 1 if mirror[0] else -1
self.symmetries[key][2,1,1] = 1 if mirror[1] else -1
if not self.yperiodic:
if key=='v':
self.symmetries[key][1,0,1] = -1
self.symmetries[key][1,2,1] = -1
else:
self.symmetries[key][1,0,1] = 1 if mirror[0] else -1
self.symmetries[key][1,2,1] = 1 if mirror[1] else -1
if not self.zperiodic:
if key=='w':
self.symmetries[key][1,1,0] = -1
self.symmetries[key][1,1,2] = -1
else:
self.symmetries[key][1,1,0] = 1 if mirror[0] else -1
self.symmetries[key][1,1,2] = 1 if mirror[1] else -1
def init_domain(self,origin,spacing,periodicity,bounds):
'''Sets up domain information for all processors'''
# TBD: some assertions
self.origin = self.comm.bcast(origin,root=0)
self.spacing = self.comm.bcast(spacing,root=0)
self.periodicity = self.comm.bcast(periodicity,root=0)
self.bounds = self.comm.bcast(bounds,root=0)
# Some shortcuts
self.xperiodic,self.yperiodic,self.zperiodic = self.periodicity
return
def init_procgrid(self,proc,chunks_per_proc):
# Note: requires nghx, xperiodic to be set
'''Read input processor grid, compute processor grid for workers'''
import numpy as np
self.chunks_per_proc = self.comm.bcast(chunks_per_proc,root=0)
self.nxcpp = chunks_per_proc[0]
self.nycpp = chunks_per_proc[1]
self.nzcpp = chunks_per_proc[2]
if self.rank==0:
# Assert proc and add it to class
assert all(k in proc for k in ('u','v','w','p','s')), "'proc' must be a dictionary with "\
"keys 'u','v','w','p','s'"
for key in proc:
assert len(proc[key])==6, "Entries of 'proc' must have length of 6."
proc_grid_ext = proc
# Initialize chunks per processor
# Verify that this processor grid can be redistributed onto the requested processor layout
nxp_ext = len(proc_grid_ext['u'][0])
nyp_ext = len(proc_grid_ext['u'][2])
nzp_ext = len(proc_grid_ext['u'][4])
nproc_ext = nxp_ext*nyp_ext*nzp_ext
#
assert nxp_ext%self.nxcpp==0, "Number of processors must be divisible by the number "\
"of processors per process. (nxp_ext={}, nxcpp={})".format(
nxp_ext,self.nxcpp)
assert nyp_ext%self.nycpp==0, "Number of processors must be divisible by the number "\
"of processors per process. (nyp_ext={}, nycpp={})".format(
nyp_ext,self.nycpp)
assert nzp_ext%self.nzcpp==0, "Number of processors must be divisible by the number "\
"of processors per process. (nzp_ext={}, nzcpp={})".format(
nzp_ext,self.nzcpp)
# Determine the new processor layout and verify total number of MPI processes
nxp = nxp_ext//self.nxcpp
nyp = nyp_ext//self.nycpp
nzp = nzp_ext//self.nzcpp
nproc = nxp*nyp*nzp
assert nproc==self.comm.Get_size(), "Number of MPI processes does not match the requested "\
"processor layout. (MPI procs: {}, required procs: {})".format(
self.comm.Get_size(),nproc)
# Construct internal processor grid
proc_grid = {}
for key in proc_grid_ext:
proc_grid[key] = [None]*6
proc_grid[key][0] = proc_grid_ext[key][0][0::self.nxcpp]
proc_grid[key][1] = proc_grid_ext[key][1][self.nxcpp-1::self.nxcpp]
proc_grid[key][2] = proc_grid_ext[key][2][0::self.nycpp]
proc_grid[key][3] = proc_grid_ext[key][3][self.nycpp-1::self.nycpp]
proc_grid[key][4] = proc_grid_ext[key][4][0::self.nzcpp]
proc_grid[key][5] = proc_grid_ext[key][5][self.nzcpp-1::self.nzcpp]
else:
proc_grid_ext = None
proc_grid = None
nxp_ext,nyp_ext,nzp_ext,nproc_ext = None,None,None,None
nxp,nyp,nzp,nproc = None,None,None,None
# Broadcast the data
self.proc_grid_ext = self.comm.bcast(proc_grid_ext,root=0)
self.proc_grid = self.comm.bcast(proc_grid,root=0)
self.nxp_ext,self.nyp_ext,\
self.nzp_ext,self.nproc_ext = self.comm.bcast((nxp_ext,nyp_ext,nzp_ext,nproc_ext),root=0)
self.nxp,self.nyp,\
self.nzp,self.nproc = self.comm.bcast((nxp,nyp,nzp,nproc),root=0)
# Get position in processor grid
self.ip,self.jp,self.kp = self.position_from_rank(self.rank,external=False)
# Determine local grid indices and size
for key in self.proc_grid:
nxl = self.proc_grid[key][1][self.ip]-self.proc_grid[key][0][self.ip]+1
nyl = self.proc_grid[key][3][self.jp]-self.proc_grid[key][2][self.jp]+1
nzl = self.proc_grid[key][5][self.kp]-self.proc_grid[key][4][self.kp]+1
# Verify that local grid size is not smaller than ghost cell size
assert (nxl>=self.nghx and nyl>=self.nghy and nzl>=self.nghz),\
"Local grid size must be greater than number "\
"of ghost cells in each direction!"
# Initialize neighbor array
nghbr = np.empty((3,3,3),dtype='int')
# wrap-around x
ipl = self.ip-1
if ipl<0:
if self.xperiodic: ipl = self.nxp-1
else: ipl = -1
ipr = self.ip+1
if ipr>self.nxp-1:
if self.xperiodic: ipr = 0
else: ipr = -1
inghbr = (ipl,self.ip,ipr)
# wrap-around y
jpl = self.jp-1
if jpl<0:
if self.yperiodic: jpl = self.nyp-1
else: jpl = -1
jpr = self.jp+1
if jpr>self.nyp-1:
if self.yperiodic: jpr = 0
else: jpr = -1
jnghbr = (jpl,self.jp,jpr)
# wrap-around z
kpl = self.kp-1
if kpl<0:
if self.zperiodic: kpl = self.nzp-1
else: kpl = -1
kpr = self.kp+1
if kpr>self.nzp-1:
if self.zperiodic: kpr = 0
else: kpr = -1
knghbr = (kpl,self.kp,kpr)
# Construct array of neighbors
for ip in range(3):
for jp in range(3):
for kp in range(3):
# Assign rank to neighbor array
if inghbr[ip]<0 or jnghbr[jp]<0 or knghbr[kp]<0:
nghbr[ip,jp,kp] = -1
else:
nghbr[ip,jp,kp] = self.rank_from_position(inghbr[ip],jnghbr[jp],knghbr[kp],external=False)
# Save neighbors as class variable
self.nghbr = nghbr
def load_field(self,key,io_limit=None,barrier=False):
def load_field(self,key,io_limit=None,barrier=False,dtype=np.float64):
'''Loads the required chunks from file'''
from .field import Field3d
import numpy as np
@ -226,7 +118,7 @@ class PPP:
data = np.empty(
(nxl+2*self.nghx,
nyl+2*self.nghy,
nzl+2*self.nghz),dtype=self.precision)
nzl+2*self.nghz),dtype=dtype)
# Compute origin of subfield
origin = (self.origin[key][0]+(ib-1-self.nghx)*self.spacing[0],
self.origin[key][1]+(jb-1-self.nghy)*self.spacing[1],
@ -257,34 +149,32 @@ class PPP:
# Exchange ghost cells
self.exchange_ghost_cells(key)
# Initialize symmetries and impose BC
self.init_symmetries(key)
self.init_symmetry(key)
self.impose_boundary_conditions(key)
# Syncronize processes if requested
if barrier: self.comm.Barrier()
def differentiate(self,key,axis,key_out=None):
def differentiate(self,key,axis,key_out=None,on_pressure_grid=False):
assert axis<3, "'axis' must be one of 0,1,2."
if key_out is None:
key_out = key+('x','y','z')[axis]
origin = list(self.origin)
key_out = 'd'+key+('/dx','/dy','/dz')[axis]
origin = list(self.origin[key])
shifting_state = self.shifting_state(key,axis=axis)
if shifting_state==-1:
padding = 'after'
shift_origin = 'after'
origin[axis] += 0.5*self.spacing[axis]
elif shifting_state==0:
padding = 'before'
shift_origin = 'before'
origin[axis] -= 0.5*self.spacing[axis]
elif shifting_state==1:
padding = 'before'
shift_origin = 'before'
origin[axis] -= 0.5*self.spacing[axis]
else:
raise ValueError("Invalid shifting state.")
self.field[key_out] = self.field[key].derivative(axis,padding=padding)
self.origin[key_out] = tuple(origin)
self.spacing[key_out] = self.spacing[key].copy()
self.symmetries[key_out] = self.symmetries[key].copy()
self.proc_grid[key_out] = self.proc_grid[key].copy()
self.proc_grid_ext[key_out] = self.proc_grid_ext[key].copy()
self.field[key_out] = self.field[key].derivative(axis,only_keep_interior=False,shift_origin=shift_origin)
self.origin[key_out] = tuple(origin)
self.symmetries[key_out] = self.symmetries[key].copy()
self.proc_grid[key_out] = self.proc_grid[key].copy()
if axis==0:
self.symmetries[key_out][0,1,1] = -self.symmetries[key_out][0,1,1]
self.symmetries[key_out][2,1,1] = -self.symmetries[key_out][2,1,1]
@ -297,15 +187,90 @@ class PPP:
# Exchange ghost cells and set boundary conditions
self.exchange_ghost_cells(key_out)
self.impose_boundary_conditions(key_out)
# Shift to pressure grid if desired
if on_pressure_grid:
self.shift_to_pressure_grid(key_out,key_out=key_out)
def allocate(self,key_grid,fill=None,pseudo=False,dtype=np.float64):
from .field import Field3d
ib = self.proc_grid[key_grid][0][self.ip]
ie = self.proc_grid[key_grid][1][self.ip]
jb = self.proc_grid[key_grid][2][self.jp]
je = self.proc_grid[key_grid][3][self.jp]
kb = self.proc_grid[key_grid][4][self.kp]
ke = self.proc_grid[key_grid][5][self.kp]
origin = (self.origin[key_grid][0]+(ib-1-self.nghx)*self.spacing[0],
self.origin[key_grid][1]+(jb-1-self.nghy)*self.spacing[1],
self.origin[key_grid][2]+(kb-1-self.nghz)*self.spacing[2])
dim = (ie-ib+1+2*self.nghx,
je-jb+1+2*self.nghy,
ke-kb+1+2*self.nghz)
return Field3d.allocate(dim,origin,self.spacing,fill=fill,pseudo=pseudo,dtype=dtype)
def qcriterion(self,key_out=None,from_pressure=False,keep_derivatives=False):
'''Computes Q-criterion'''
from .field import Field3d
if key_out is None:
key_out = 'Q'
if from_pressure:
assert 'p' in self.field, "Pressure field is not loaded."
self.copy('p',key_out,skip_data=True)
self.field[key_out] = 0.5*self.field['p'].laplacian(only_keep_interior=False)
else:
assert 'u' in self.field, "Velocity field is not loaded: u"
assert 'v' in self.field, "Velocity field is not loaded: v"
assert 'w' in self.field, "Velocity field is not loaded: w"
self.copy('p',key_out,skip_data=True,skip_symmetries=True)
self.init_symmetry(key_out)
self.field[key_out] = self.allocate('p',fill=0.0,dtype=self.field['u'].dtype())
keyx = ('x','y','z')
keyu = ('u','v','w')
for ii in range(3):
key_tmp = 'd'+keyu[ii]+'/d'+keyx[ii]
parprint(key_tmp)
self.differentiate(keyu[ii],axis=ii,key_out=key_tmp,on_pressure_grid=True)
self.field[key_out] += self.field[key_tmp]**2
if not keep_derivatives:
self.delete(key_tmp)
#
iip1 = (ii+1)%3
key_tmp1 = 'd'+keyu[ii]+'/d'+keyx[iip1]
key_tmp2 = 'd'+keyu[iip1]+'/d'+keyx[ii]
parprint(key_tmp1,key_tmp2)
self.differentiate(keyu[ii],axis=iip1,key_out=key_tmp1,on_pressure_grid=True)
self.differentiate(keyu[iip1],axis=ii,key_out=key_tmp2,on_pressure_grid=True)
self.field[key_out] += 2.0*self.field[key_tmp1]*self.field[key_tmp2]
if not keep_derivatives:
self.delete(key_tmp1)
self.delete(key_tmp2)
self.field[key_out] *= -0.5
# Exchange ghost cells and set boundary conditions
self.exchange_ghost_cells(key_out)
self.impose_boundary_conditions(key_out)
return
#def vorticity(self,axis=None,key_out=None,keep_derivatives=False,on_pressure_grid=True):
def multiply(self,key1,key2,key_out=None,on_pressure_grid=True):
if key_out is None:
key_out = key1
if not self.field[key1].has_same_grid(self.field[key2]) or on_pressure_grid:
self.shift_to_pressure_grid(key1,key_out)
self.shift_to_pressure_grid(key2,'tmp')
self.field[key_out] *= self.field['tmp']
self.delete('tmp')
else:
self.copy(key1,key_out)
self.field[key_out] *= self.field[key2]
return
def gaussian_filter(self,key,sigma,truncate=4.0,key_out=None,iterate=False):
'''Applies a gaussian filter to a field as in-place operation. Sigma is the std of the filter in terms of grid width.'''
import numpy as np
if key_out is None:
key_out = key
else:
self.origin[key_out] = self.origin[key].copy()
self.spacing[key_out] = self.spacing[key].copy()
if key!=key_out:
self.copy(key,key_out,skip_data=True)
# Compute radius of Gaussian filter
radius = self.field[key].gaussian_filter_radius(sigma,truncate=truncate)
if not iterate:
@ -324,23 +289,27 @@ class PPP:
assert all([radius[ii]>0 if sigma[ii]>0.0 else True for ii in range(3)]),\
"Iterative procedure leads to invalid stencil radius: "\
"increase number of ghost cells. {}".format(radius)
print('Iterations: {}, stencil radius: {}'.format(niter,radius))
parprint('Gaussian filter: iterations={}, stencil radius={}'.format(niter,radius))
for iiter in range(niter):
parprint('Iter #{:d}'.format(iiter))
# Filter field: if key_out is None, perform operation inplace
self.field[key_out] = self.field[key].gaussian_filter(sigma,
truncate=truncate,border='constant',const_val=0.0)
truncate=truncate,only_keep_interior=False)
# Exchange ghost cells and set boundary conditions
self.exchange_ghost_cells(key_out)
self.impose_boundary_conditions(key_out)
# Iterate inplace from now on
key = key_out
def broadcast(self,key,arg,operation):
'''Broadcasts an inplace operation involving a scalar or matrix on
def broadcast(self,key,operation,arg,key_out=None):
'''Broadcasts an operation involving a scalar or matrix on
the entire grid. If 'arg' is a matrix, it must be three-dimensional
and its axes must be singular or of length nx/ny/nz.'''
import numpy as np
import operator
# Broadcast argument
arg = self.comm.bcast(arg,root=0)
# Select operator
if operation in ('add','+'):
op = operator.iadd
elif operation in ('subtract','sub','-'):
@ -358,7 +327,7 @@ class PPP:
for axis in range(3):
if arg.shape[axis]==1:
continue
elif arg.shape[axis]==self.numpoints(key,axis=axis):
elif arg.shape[axis]==self.dim(key,axis=axis):
pos = self.position_from_rank(self.rank,external=False)[axis]
sl_arg[axis] = slice(
self.proc_grid[key][2*axis][pos]-1,
@ -366,16 +335,24 @@ class PPP:
else:
raise ValueError("'arg' must either be singular or match global "\
"grid dimension. (axis={}: got {:d}, expected {:d}".format(
axis,arg.shape[axis],self.numpoints(key,axis=axis)))
axis,arg.shape[axis],self.dim(key,axis=axis)))
# Only operate on interior and communcate ghosts later
sl_int = tuple(slice(self.num_ghost[ii],-self.num_ghost[ii]) for ii in range(3))
sl_arg = tuple(sl_arg)
op(self.field[key].data[sl_int],arg[sl_arg])
if key_out is None:
op(self.field[key].data[sl_int],arg[sl_arg])
else:
self.copy(key,key_out)
op(self.field[key_out].data[sl_int],arg[sl_arg])
# Exchange ghost cells and set boundary conditions
self.exchange_ghost_cells(key)
self.impose_boundary_conditions(key)
elif isinstance(arg,(int,float)):
op(self.field[key].data,arg)
if key_out is None:
op(self.field[key].data,arg)
else:
self.copy(key,key_out)
op(self.field[key_out].data,arg)
return
def integrate(self,key,integrate_axis,ufunc=None,ignore_nan=False,average=False):
@ -396,7 +373,7 @@ class PPP:
weights_local = np.array(weights_local)
# Simple implementation: all processor communicate directly to root
if self.rank==0:
dim_final = tuple(1 if integrate_axis[axis] else self.numpoints(key,axis=axis) for axis in range(3))
dim_final = tuple(1 if integrate_axis[axis] else self.dim(key,axis=axis) for axis in range(3))
# Allocate a nice spot of memory
integ = np.zeros(dim_final,dtype=integ_local.dtype)
if average:
@ -432,47 +409,169 @@ class PPP:
self.comm.send(weights_local,dest=0,tag=2)
return None
def shift_to_pressure_grid(self,key,key_out=None):
from .field import Field3d
if key_out is None:
if key in ('u','v','w'):
raise ValueError("In-place shift of {'u','v','w'} is not permitted.")
key_out = key
# If pressure is loaded, use field information, else create a
# fake Field3d object so that we can use its methods.
field_ref = self.allocate('p',pseudo=True)
# Compute the relative shift and shift it
rel_shift = self.field[key].relative_shift(field_ref)
self.field[key_out] = self.field[key].shift_origin(rel_shift)
# Match number of gridpoints
sl = 3*[slice(None)]
for axis in range(3):
if not self.field[key_out].has_same_dim(field_ref,axis=axis):
sl[axis] = slice(0,field_ref.dim(axis=axis))
self.field[key_out].data = self.field[key_out].data[sl]
# Copy metadata of pressure grid
self.copy('p',key_out,skip_data=True,skip_symmetries=True)
self.symmetries[key_out] = self.symmetries[key].copy()
# Exchange ghost cells and set boundary conditions
self.exchange_ghost_cells(key_out)
self.impose_boundary_conditions(key_out)
def init_symmetry(self,key,mirror=(True,True)):
'''Sets the symmetries for ghost cells behind the wall'''
# Example: wall in y
# No-slip boundary (no mirror) free-slip boundary (mirror)
# u -> -u u -> u
# v -> -v v -> -v
# w -> -w w -> w
# p -> p p -> p
import numpy as np
self.symmetries[key] = np.zeros((3,3,3),dtype='i')
if not self.xperiodic:
if key=='u':
self.symmetries[key][0,1,1] = -1
self.symmetries[key][2,1,1] = -1
else:
self.symmetries[key][0,1,1] = 1 if mirror[0] else -1
self.symmetries[key][2,1,1] = 1 if mirror[1] else -1
if not self.yperiodic:
if key=='v':
self.symmetries[key][1,0,1] = -1
self.symmetries[key][1,2,1] = -1
else:
self.symmetries[key][1,0,1] = 1 if mirror[0] else -1
self.symmetries[key][1,2,1] = 1 if mirror[1] else -1
if not self.zperiodic:
if key=='w':
self.symmetries[key][1,1,0] = -1
self.symmetries[key][1,1,2] = -1
else:
self.symmetries[key][1,1,0] = 1 if mirror[0] else -1
self.symmetries[key][1,1,2] = 1 if mirror[1] else -1
def copy(self,key,key_out,skip_data=False,skip_symmetries=False):
'''Copies a field to a new key.'''
if key==key_out:
return
self.origin[key_out] = tuple(self.origin[key])
self.proc_grid[key_out] = self.proc_grid[key].copy()
if not skip_symmetries:
self.symmetries[key_out] = self.symmetries[key].copy()
if not skip_data:
self.field[key_out] = self.field[key].copy()
return
def delete(self,key):
'''Removes a field from memory.'''
assert key in self.field, "Field {} cannot be deleted, because it "\
"does not exist.".format(key)
import gc
del self.field[key]
del self.symmetries[key]
if key not in ('u','v','w','p','s'):
del self.origin[key]
del self.proc_grid[key]
gc.collect()
return
def merge(self,key,stride=(1,1,1)):
'''Returns the complete (possibly downsampled) field at rank 0.'''
return
def save_state(self,filename):
import pickle
fout = filename+'.{:05d}.pickle'.format(self.rank)
payload = (
self.num_ghost,
self.chunks_per_proc,
self.origin,
self.spacing,
self.periodicity,
self.bounds,
self.proc_grid,
self.nxp,
self.nyp,
self.nzp,
self.nghbr,
self.field,
self.symmetries)
with open(fout,"wb") as f:
pickle.dump(payload,f)
def to_vtk(self,key,no_ghost=False):
'''Returns the field (only its interior + some ghost cells for plotting)
as a vtk mesh.'''
return self._subfield_for_vtk(key,no_ghost).to_vtk()
def vtk_contour(self,key,val):
'''Compute isocontour for chunks.'''
if any([self.num_ghost[ii]>1 for ii in range(3)]):
idx_origin = tuple(self.num_ghost[ii]-1 for ii in range(3))
numpoints = self.chunk_size(axis=None)
return self.field[key].extract_subfield(
idx_origin,numpoints).vtk_contour(val)
else:
return self.field[key].vtk_contour(val)
return
return self._subfield_for_vtk(key,False).vtk_contour(val)
def vtk_slice(self,key,normal,origin):
'''Extracts a plane from field.'''
if any([self.num_ghost[ii]>1 for ii in range(3)]):
idx_origin = tuple(self.num_ghost[ii]-1 for ii in range(3))
numpoints = tuple(self.field[key].numpoints[ii]-2*(self.num_ghost[ii]-1)
for ii in range(3))
return self.field[key].extract_subfield(
idx_origin,numpoints).vtk_slice(normal,origin)
else:
return self.field[key].vtk_slice(normal,origin)
return
return self._subfield_for_vtk(key,False).vtk_slice(normal,origin)
def _subfield_for_vtk(self,key,no_ghost):
num_ghost = np.array(self.num_ghost)
idx_origin = num_ghost
dim = np.array(self.field[key].dim(axis=None))
dim -= 2*num_ghost
if not no_ghost:
idx_origin -= 1
dim += 1
if self.ip==self.nxp-1: dim[0]+=1
if self.jp==self.nyp-1: dim[1]+=1
if self.kp==self.nzp-1: dim[2]+=1
idx_origin = tuple(idx_origin)
dim = tuple(dim)
return self.field[key].extract_subfield(idx_origin,dim)
def rank_from_position(self,ip,jp,kp,external=False):
if external:
nyp,nzp = self.nyp_ext,self.nzp_ext
else:
nyp,nzp = self.nyp,self.nzp
return ip*nyp*nzp+jp*nzp+kp
return self._rank_from_position(ip,jp,kp,nyp,nzp)
def position_from_rank(self,rank,external=False):
if external:
nyp,nzp = self.nyp_ext,self.nzp_ext
else:
nyp,nzp = self.nyp,self.nzp
return self._position_from_rank(rank,nyp,nzp)
@staticmethod
def _rank_from_position(ip,jp,kp,nyp,nzp):
return ip*nyp*nzp+jp*nzp+kp
@staticmethod
def _position_from_rank(rank,nyp,nzp):
ip = rank//(nyp*nzp)
jp = (rank//nzp)%nyp
kp = rank%nzp
return (ip,jp,kp)
def shifting_state(self,key,axis=None):
'''Returns the relative shift of first global grid point to domain
boundaries as either 1 (= +0.5h), 0 (= 0h) or -1 (= -0.5h).'''
if axis is None:
return tuple(self.shifting_state(key,axis=ii) for ii in range(3))
assert axis<3, "'axis' must be one of 0,1,2."
@ -483,13 +582,13 @@ class PPP:
if axis is None:
return tuple(self.chunk_size(key,axis=ii) for ii in range(3))
assert axis<3, "'axis' must be one of 0,1,2."
return self.field[key].numpoints[axis]-2*self.num_ghost[axis]
return self.field[key].dim(axis=axis)-2*self.num_ghost[axis]
def numpoints(self,key,axis=None):
def dim(self,key,axis=None):
'''Returns the total number of gridpoints across all processors
without ghost cells.'''
if axis is None:
return tuple(self.numpoints(key,axis=ii) for ii in range(3))
return tuple(self.dim(key,axis=ii) for ii in range(3))
assert axis<3, "'axis' must be one of 0,1,2."
return self.proc_grid[key][2*axis+1][-1]
@ -600,7 +699,7 @@ class PPP:
recvbuf = np.zeros(
(ii_dst.stop-ii_dst.start,
jj_dst.stop-jj_dst.start,
kk_dst.stop-kk_dst.start),dtype=self.precision)
kk_dst.stop-kk_dst.start),dtype=self.field[key].data.dtype)
reqrecv = self.comm.Irecv(recvbuf,source=rank_src,tag=tag)
# [recv] wait for data to be received
if reqrecv is not None:
@ -625,7 +724,7 @@ class PPP:
for axis in range(3):
if positionBd[axis]==-1: # lower boundary
# index of first point within the domain
idx = self.field[key].nearest_gridpoint(self.bounds[2*axis],axis=axis,lower=True)+1
idx = self.field[key].nearest_gridpoint(self.bounds[2*axis],axis=axis,lower=True)+1
# distance first point to wall: should be either 0 or dx/2
dist = np.abs(self.field[key].coordinate(idx,axis=axis)-self.bounds[2*axis])
if dist>=0.25*self.spacing[axis]: # no point on boundary
@ -637,19 +736,123 @@ class PPP:
break
elif positionBd[axis]==1: # upper boundary
# index of last point within the domain
idx = self.field[key].nearest_gridpoint(self.bounds[2*axis+1],axis=axis,lower=True)
idx = self.field[key].nearest_gridpoint(self.bounds[2*axis+1],axis=axis,lower=True)
# distance last point to wall: should be either 0 or -dx/2
dist = np.abs(self.field[key].coordinate(idx,axis=axis)-self.bounds[2*axis+1])
if dist>=0.25*self.spacing[axis]: # no point on boundary
sl_dst[axis] = slice(idx+1,self.field[key].numpoints[axis],1)
sl_src[axis] = slice(idx,2*idx+1-self.field[key].numpoints[axis],-1)
sl_dst[axis] = slice(idx+1,self.field[key].dim(axis=axis),1)
sl_src[axis] = slice(idx,2*idx+1-self.field[key].dim(axis=axis),-1)
else: # point on boundary
sl_dst[axis] = slice(idx+1,self.field[key].numpoints[axis],1)
sl_src[axis] = slice(idx-1,2*idx-self.field[key].numpoints[axis],-1)
sl_dst[axis] = slice(idx+1,self.field[key].dim(axis=axis),1)
sl_src[axis] = slice(idx-1,2*idx-self.field[key].dim(axis=axis),-1)
break
# print(sl_src,sl_dst,self.field[key].dim())
self.field[key].data[tuple(sl_dst)] = self.symmetries[key][inghbr,jnghbr,knghbr]*\
self.field[key].data[tuple(sl_src)]
@staticmethod
def _construct_procgrid(comm,proc,chunks_per_proc,num_ghost,periodicity):
import numpy as np
rank = comm.Get_rank()
nxcpp,nycpp,nzcpp = chunks_per_proc
nghx,nghy,nghz = num_ghost
xperiodic,yperiodic,zperiodic = periodicity
# Assert proc
assert all(k in proc for k in ('u','v','w','p','s')), "'proc' must be a dictionary with "\
"keys 'u','v','w','p','s'"
for key in proc:
assert len(proc[key])==6, "Entries of 'proc' must have length of 6."
proc_grid_ext = proc
# Initialize chunks per processor
# Verify that this processor grid can be redistributed onto the requested processor layout
nxp_ext = len(proc_grid_ext['u'][0])
nyp_ext = len(proc_grid_ext['u'][2])
nzp_ext = len(proc_grid_ext['u'][4])
nproc_ext = nxp_ext*nyp_ext*nzp_ext
#
assert nxp_ext%nxcpp==0, "Number of processors must be divisible by the number "\
"of processors per process. (nxp_ext={}, nxcpp={})".format(
nxp_ext,nxcpp)
assert nyp_ext%nycpp==0, "Number of processors must be divisible by the number "\
"of processors per process. (nyp_ext={}, nycpp={})".format(
nyp_ext,nycpp)
assert nzp_ext%nzcpp==0, "Number of processors must be divisible by the number "\
"of processors per process. (nzp_ext={}, nzcpp={})".format(
nzp_ext,nzcpp)
# Determine the new processor layout and verify total number of MPI processes
nxp = nxp_ext//nxcpp
nyp = nyp_ext//nycpp
nzp = nzp_ext//nzcpp
nproc = nxp*nyp*nzp
assert nproc==comm.Get_size(), "Number of MPI processes does not match the requested "\
"processor layout. (MPI procs: {}, required procs: {})".format(
comm.Get_size(),nproc)
# Construct internal processor grid
proc_grid = {}
for key in proc_grid_ext:
proc_grid[key] = [None]*6
proc_grid[key][0] = proc_grid_ext[key][0][0::nxcpp]
proc_grid[key][1] = proc_grid_ext[key][1][nxcpp-1::nxcpp]
proc_grid[key][2] = proc_grid_ext[key][2][0::nycpp]
proc_grid[key][3] = proc_grid_ext[key][3][nycpp-1::nycpp]
proc_grid[key][4] = proc_grid_ext[key][4][0::nzcpp]
proc_grid[key][5] = proc_grid_ext[key][5][nzcpp-1::nzcpp]
# Get position in processor grid
ip,jp,kp = PPP._position_from_rank(rank,nyp,nzp)
# Determine local grid indices and size
for key in proc_grid:
nxl = proc_grid[key][1][ip]-proc_grid[key][0][ip]+1
nyl = proc_grid[key][3][jp]-proc_grid[key][2][jp]+1
nzl = proc_grid[key][5][kp]-proc_grid[key][4][kp]+1
# Verify that local grid size is not smaller than ghost cell size
assert (nxl>=nghx and nyl>=nghy and nzl>=nghz),\
"Local grid size must be greater than number "\
"of ghost cells in each direction!"
# Initialize neighbor array
nghbr = np.empty((3,3,3),dtype='int')
# wrap-around x
ipl = ip-1
if ipl<0:
if xperiodic: ipl = nxp-1
else: ipl = -1
ipr = ip+1
if ipr>nxp-1:
if xperiodic: ipr = 0
else: ipr = -1
inghbr = (ipl,ip,ipr)
# wrap-around y
jpl = jp-1
if jpl<0:
if yperiodic: jpl = nyp-1
else: jpl = -1
jpr = jp+1
if jpr>nyp-1:
if yperiodic: jpr = 0
else: jpr = -1
jnghbr = (jpl,jp,jpr)
# wrap-around z
kpl = kp-1
if kpl<0:
if zperiodic: kpl = nzp-1
else: kpl = -1
kpr = kp+1
if kpr>nzp-1:
if zperiodic: kpr = 0
else: kpr = -1
knghbr = (kpl,kp,kpr)
# Construct array of neighbors
for ip in range(3):
for jp in range(3):
for kp in range(3):
# Assign rank to neighbor array
if inghbr[ip]<0 or jnghbr[jp]<0 or knghbr[kp]<0:
nghbr[ip,jp,kp] = -1
else:
nghbr[ip,jp,kp] = PPP._rank_from_position(inghbr[ip],jnghbr[jp],knghbr[kp],nyp,nzp)
return (proc_grid,nxp,nyp,nzp,
proc_grid_ext,nxp_ext,nyp_ext,nzp_ext,
nghbr)
def _baton_wait(self,batch_size,tag=420):
'''Block execution until an empty message from rank-batch_wait
is received (issued by _baton_pass)'''

View File

@ -1,37 +1,36 @@
import numpy as np
class Particles:
def __init__(self,num,time,attr,period):
'''Initialize a Particles object.
num number of particles
time time value
attr dictionary of numpy arrays with length equal to num
attr dictionary of np arrays with length equal to num
period the domain periods
'''
import numpy
assert 'id' in attr, "Need attribute 'id' to initialize Particles"
assert 'x' in attr, "Need attribute 'x' to initialize Particles"
assert 'y' in attr, "Need attribute 'y' to initialize Particles"
assert 'z' in attr, "Need attribute 'z' to initialize Particles"
assert isinstance(period,(tuple,list)) and len(period)==3, "'period' must be tuple/list with length 3"
self.num = num
if isinstance(time,(tuple,list,numpy.ndarray)):
if isinstance(time,(tuple,list,np.ndarray)):
assert len(time)==1, "'time' must be a scalar value."
time = time[0]
self.time = time
self.attr = {}
sortidx = attr['id'].argsort()
for key in attr:
assert attr[key].ndim==1, "Attributes must be a one-dimensional numpy array."
assert attr[key].ndim==1, "Attributes must be a one-dimensional np array."
assert attr[key].shape[0]==num, "Attributes must be of length 'num'."
if key=='id':
self.add_attribute(key,attr[key][sortidx].astype('int'))
else:
self.add_attribute(key,attr[key][sortidx])
self.period = tuple(period)
self.frame_velocity = numpy.zeros(3)
self.frame_velocity = np.zeros(3)
return
@classmethod
def from_array(cls,pp,col,time,period,select_col=None):
import numpy
assert 'id' in col, "Need column 'id' to initialize Particles"
assert 'x' in col, "Need column 'x' to initialize Particles"
assert 'y' in col, "Need column 'y' to initialize Particles"
@ -39,7 +38,7 @@ class Particles:
assert pp.ndim==2 or pp.shape[2]==1, "'pp' cannot be a time series."
num = pp.shape[1]
if pp.ndim==2:
pp = numpy.expand_dims(pp,axis=2)
pp = np.expand_dims(pp,axis=2)
if select_col is not None:
for scol in select_col:
assert scol in col, "Selected column '{}' not found in 'col'.".format(scol)
@ -78,21 +77,19 @@ class Particles:
attr[key] = self.attr[key].copy()
return Particles(self.num,self.time,attr,self.period)
def add_attribute(self,key,val):
import numpy
if isinstance(val,(tuple,list,numpy.ndarray)):
if isinstance(val,(tuple,list,np.ndarray)):
assert len(val)==self.num and val.ndim==1, "Invalid 'val'."
self.attr[key] = numpy.array(val)
self.attr[key] = np.array(val)
else:
self.attr[key] = numpy.full(self.num,val)
self.attr[key] = np.full(self.num,val)
return
def del_attribute(self,key):
del self.attr[key]
def get_attribute(self,key):
return self.attr[key]
def get_position(self,axis=None):
import numpy
if axis is None:
return numpy.vstack([self.attr[key].copy() for key in ('x','y','z')])
return np.vstack([self.attr[key].copy() for key in ('x','y','z')])
else:
assert axis<3, "'axis' must be smaller than 3."
key = ('x','y','z')[axis]
@ -113,7 +110,8 @@ class Particles:
valdiff = val-self.frame_velocity[axis]
self.translate(-self.time*valdiff,axis=axis)
key = ('u','v','w')[axis]
self.attr[key] -= valdiff
if self.has_attribute(key):
self.attr[key] -= valdiff
self.frame_velocity[axis] = val
return
def enforce_periodicity(self,axis=None):
@ -127,7 +125,6 @@ class Particles:
return
def to_vtk(self,deep=False):
import pyvista as pv
import numpy as np
position = np.vstack([self.attr[key] for key in ('x','y','z')]).transpose()
mesh = pv.PolyData(position,deep=deep)
for key in self.attr:
@ -143,12 +140,11 @@ class Particles:
class Trajectories:
def __init__(self,part,unraveled=False,copy_particles=False):
import numpy
self.numtime = 1
self.numpart = part.num
self.copy_particles = copy_particles
# Attributes are first stored as a list of view to instances of
# Particles. Later they will be concatenated into numpy array
# Particles. Later they will be concatenated into np array
# and the attributes of Particles will be views.
self.attr = {}
self.part = []
@ -161,7 +157,8 @@ class Trajectories:
self.attr[key] = [self.part[0].attr[key].view()]
self.period = self.part[0].period
self.unraveled = unraveled
self.frame_velocity = numpy.zeros(3)
self.frame_velocity = np.zeros(3)
def __str__(self):
str = 'Trajectories with\n'
str+= ' time steps: {:d}\n'.format(self.numtime)
@ -173,6 +170,7 @@ class Trajectories:
str+= ' frame velocity: {}\n'.format(self.frame_velocity)
str+= ' unraveled: {}'.format(self.unraveled)
return str
def __iadd__(self,other):
if isinstance(other,Trajectories):
self.add_trajectories(other)
@ -181,6 +179,7 @@ class Trajectories:
else:
raise TypeError('Cannot add type {}'.format(type(other)))
return self
def __getitem__(self,val):
assert isinstance(val,tuple) and len(val)==2, "Trajectories must be indexed by [part,time]."
sl = []
@ -194,6 +193,7 @@ class Trajectories:
else:
raise TypeError("Trajectories can only be sliced by slice objects or integers.")
return self._slice(slice_part=sl[0],slice_time=sl[1])
@classmethod
def from_mat(cls,file,unraveled=False,select_col=None):
from .helper import load_mat
@ -206,6 +206,7 @@ class Trajectories:
for key in col:
col[key]-=1
return cls.from_array(pp,col,time,period,unraveled=unraveled,select_col=select_col)
@classmethod
def from_array(cls,pp,col,time,period,unraveled=False,select_col=None):
ntime = len(time)
@ -213,6 +214,7 @@ class Trajectories:
for ii in range(1,ntime):
traj += Particles.from_array(pp[:,:,ii],col,time[ii],period,select_col=select_col)
return traj
def _slice(self,slice_part=slice(None),slice_time=slice(None)):
assert isinstance(slice_part,slice), "'slice_part' must be a slice object."
assert isinstance(slice_time,slice), "'slice_time' must be a slice object."
@ -225,8 +227,8 @@ class Trajectories:
traj += Trajectories(part[slice_part])
traj.frame_velocity = self.frame_velocity
return traj
def add_particles(self,part):
import numpy
# Verify part
assert part.time>self.part[-1].time, "Time steps must be added in monotonically increasing order."
assert part.num==self.numpart, "Number of particles is different from previous time steps."
@ -246,6 +248,7 @@ class Trajectories:
self.attr[key].append(self.part[-1].attr[key].view())
self.numtime += 1
return
def add_trajectories(self,traj):
# Verify traj
assert traj.part[0].time>self.part[-1].time, "Time steps must be added in monotonically increasing order."
@ -267,26 +270,26 @@ class Trajectories:
self.attr[key].append(self.part[-traj.numtime+ii].attr[key].view())
self.numtime += traj.numtime
return
def get_time(self):
import numpy
time = numpy.zeros(self.numtime)
time = np.zeros(self.numtime)
for ii,part in enumerate(self.part):
time[ii] = part.time
return time
def get_trajectories(self,slice_part=slice(None),slice_time=slice(None)):
'''Get (x,y,z) trajectories in numpy array of dimension (3,npart,ntime).'''
import numpy
'''Get (x,y,z) trajectories in np array of dimension (3,npart,ntime).'''
assert isinstance(slice_part,slice), "'slice_part' must be a slice."
assert isinstance(slice_time,slice), "'slice_time' must be a slice."
self._make_data_array()
return numpy.stack([
return np.stack([
self.attr['x'][slice_part,slice_time],
self.attr['y'][slice_part,slice_time],
self.attr['z'][slice_part,slice_time]],axis=0)
def get_trajectories_segmented(self,slice_part=slice(None),slice_time=slice(None),restore_ravel_state=True):
'''Get (x,y,z) segments of trajectories as a tuple (len: npart) of
lists (len: nsegments) of numpy arrays (shape: 3 X ntime of the segment)'''
import numpy
lists (len: nsegments) of np arrays (shape: 3 X ntime of the segment)'''
assert isinstance(slice_part,slice), "'slice_part' must be a slice."
assert isinstance(slice_time,slice), "'slice_time' must be a slice."
was_unraveled = self.unraveled
@ -296,21 +299,21 @@ class Trajectories:
ntime = xyzpath.shape[2]
# Initialize output and helper arrays
out = tuple([] for ii in range(npart))
lastJump = numpy.zeros(npart,dtype=numpy.uint)
lastPeriod = numpy.zeros((3,npart),dtype=numpy.int)
lastJump = np.zeros(npart,dtype=np.uint)
lastPeriod = np.zeros((3,npart),dtype=np.int)
for axis in range(3):
if self.period[axis] is not None:
lastPeriod[axis,:] = numpy.floor_divide(xyzpath[axis,:,0],self.period[axis])
lastPeriod[axis,:] = np.floor_divide(xyzpath[axis,:,0],self.period[axis])
# Construct output tuple
for itime in range(1,ntime+1):
thisPeriod = numpy.zeros((3,npart),dtype=int)
thisPeriod = np.zeros((3,npart),dtype=int)
if itime==ntime:
hasJumped = numpy.ones(npart,dtype=bool)
hasJumped = np.ones(npart,dtype=bool)
else:
for axis in range(3):
if self.period[axis] is not None:
thisPeriod[axis,:] = numpy.floor_divide(xyzpath[axis,:,itime],self.period[axis])
hasJumped = numpy.any(thisPeriod!=lastPeriod,axis=0)
thisPeriod[axis,:] = np.floor_divide(xyzpath[axis,:,itime],self.period[axis])
hasJumped = np.any(thisPeriod!=lastPeriod,axis=0)
for ipart in range(npart):
if hasJumped[ipart]:
sl = slice(lastJump[ipart],itime)
@ -325,8 +328,20 @@ class Trajectories:
if restore_ravel_state and not was_unraveled:
self.enforce_periodicity()
return out
def slice_time(self,time_beg=None,time_end=None):
return
time = self.get_time()
if time_beg is not None:
idx_beg = np.searchsorted(time,time_beg,side='left')
else:
idx_beg = None
if time_end is not None:
idx_end = np.searchsorted(time,time_end,side='left')
else:
idx_end = None
sl = slice(idx_beg,idx_end+1)
return self._slice(slice_time=sl)
def set_frame_velocity(self,val,axis=0):
assert axis<3, "'axis' must be smaller than 3."
for part in self.part:
@ -335,8 +350,8 @@ class Trajectories:
if not self.unraveled:
self.enforce_periodicity()
return
def unravel(self):
import numpy
if self.unraveled: return
#raise NotImplementedError('Implemented but untested!')
self._make_data_array()
@ -345,48 +360,56 @@ class Trajectories:
key = ('x','y','z')[axis]
#posdiff = (self.part[ii].attr[key]-self.part[ii-1].attr[key]).squeeze()
posdiff = self.attr[key][:,1:]-self.attr[key][:,0:-1]
coeff = -numpy.sign(posdiff)*(numpy.abs(posdiff)>0.5*self.period[axis])
coeff = numpy.cumsum(coeff,axis=1)
coeff = -np.sign(posdiff)*(np.abs(posdiff)>0.5*self.period[axis])
coeff = np.cumsum(coeff,axis=1)
self.attr[key][:,1:] += coeff*self.period[axis]
self.unraveled = True
return
def enforce_periodicity(self):
for part in self.part:
part.enforce_periodicity(axis=None)
self.unraveled = False
return
def to_vtk(self):
import pyvista as pv
mesh = pv.PolyData()
for part in self.get_trajectories_segmented():
for seg in part:
mesh += pv.helpers.lines_from_points(seg.transpose())
return mesh
def _make_data_array(self):
'''Transforms self.attr[key] to a numpy array of dimension npart X ntime
'''Transforms self.attr[key] to a np array of dimension npart X ntime
and updates self.part[itime].attr[key] to views on self.attr[key][:,itime].'''
import numpy
#print('DEBUG: _make_data_array')
if not self._is_data_list:
return
#print([x.shape for x in self.attr['x']])
for key in self.attr:
self.attr[key] = numpy.stack(self.attr[key],axis=1)
self.attr[key] = np.stack(self.attr[key],axis=1)
for ii in range(0,self.numtime):
self.part[ii].attr[key] = self.attr[key][:,ii]
#print(self.attr['x'].shape)
self._is_data_list = False
return
def _make_data_list(self):
'''Transforms self.attr[key] to a list of length ntime of numpy arrays of
'''Transforms self.attr[key] to a list of length ntime of np arrays of
dimension npart and updates self.part[itime].attr[key] to views on self.attr[key][itime].'''
import numpy
#print('DEBUG: _make_data_list')
if self._is_data_list:
return
#print(self.attr['x'].shape)
for key in self.attr:
self.attr[key] = [numpy.squeeze(x) for x in numpy.split(self.attr[key],self.numtime,axis=1)]
self.attr[key] = [np.squeeze(x) for x in np.split(self.attr[key],self.numtime,axis=1)]
for ii in range(0,self.numtime):
self.part[ii].attr[key] = self.attr[key][ii].view()
#print([x.shape for x in self.attr['x']])
self._is_data_list = True
return
def sort(pp,col):
ncol,npart,ntime = pp.shape
assert('id' in col)