implemented statistics: to be debugged

This commit is contained in:
Michael Krayer 2020-04-03 16:54:08 +02:00
parent 69ae002e56
commit 2b1d859a7e
1 changed files with 131 additions and 0 deletions

View File

@ -24,6 +24,9 @@ class ibmppp:
self.__precision = precision
# Declare variables which hold fields
self.field = {'u': None, 'v': None, 'w': None, 'p': None, 's1': None}
# Declare variables which hold statistics
self.spatialMean = {'u': None, 'v': None, 'w': None, 'p': None, 's1': None}
self.spatialMsqr = {'u': None, 'v': None, 'w': None, 'p': None, 's1': None}
# Declare variables for grid (to be set by "loadGrid()")
self.grid = {'u': [None,None,None], 'v':[None,None,None], 'w': [None,None,None], 'p': [None,None,None], 's1': [None,None,None]}
self.domainBounds = (None,None,None,None,None,None)
@ -582,6 +585,26 @@ class ibmppp:
fid.write('</Xdmf> \n')
fid.close()
def saveStatistics(self,filename):
'''Writes all gathered statistics to a h5 file.'''
if self.__rank==0:
# Construct file name
file_h5 = filename+'.h5'
# Open the file and write data
fid = h5py.File(file_h5,'w')
for key in self.spatialMean:
if self.spatialMean[key] is None:
continue
gid = fid.create_group('/'+key)
did = gid.create_dataset('mean',data=self.spatialMean[key].ravel('C'))
did.attrs['F_CONTIGUOUS'] = 0
did.attrs['DIM'] = spatialMean[key].shape
did = gid.create_dataset('meansquare',data=self.spatialMsqr[key].ravel('C'))
did.attrs['F_CONTIGUOUS'] = 0
did.attrs['DIM'] = spatialMsqr[key].shape
# Close the file
fid.close()
def saveParticles(self,filename,xdmf=False):
'''Writes particles to h5 file.'''
# Select duplicate particles (they have a negative ID)
@ -907,6 +930,114 @@ class ibmppp:
if collocate:
self.shiftField(keyout,self.__getShiftDirection(keyout,'p'))
def spatialStatistics(self,key,homogeneous=(self.__xperiodic,self.__yperiodic,self.__zperiodic)):
'''Computes spatial mean and meansquare over homogeneous directions.'''
# Check if there is any homogeneous direction
if not np.any(homogeneous):
raise ValueError('Cannot compute statistics, since there is no homogeneous direction.')
# Each processor computes local statistics:
# We get three local arrays with len=1 in homogeneous directions and len=nl in inhomogeneous:
# tmpsum: sum of local non-nan values
# tmpssq: sum of squares of local non-nan values
# counter: number of points used for the sum
tmpsum = self.field[key][self.__nghx:-self.__nghx,self.__nghy:-self.__nghy,self.__nghz:-self.__nghz]
tmpssq = np.square(self.field[key][self.__nghx:-self.__nghx,self.__nghy:-self.__nghy,self.__nghz:-self.__nghz])
counter = ~np.isnan(tmpsum)
for idir in range(0,3):
if homogeneous[idir]:
counter = np.sum(counter,axis=idir,keepdims=True)
tmpsum = np.nansum(tmpsum,axis=idir,keepdims=True)
tmpssq = np.nansum(tmpssq,axis=idir,keepdims=True)
# Reduce the statistics to processors with col/row/pln=0 in homogeneous directions
for idir in range(0,3):
if homogeneous[idir]:
pos_dst = [self.__icol,self.__jrow,self.__kpln]
pos_dst[idir] = 0
rank_dst = self.__rankFromPosition(pos_dst[0],pos_dst[1],pos_dst[2],self.__nxp,self.__nyp,self.__nzp)
# Communicate counter
if self.__rank==rank_dst:
recvbuf = np.zeros_like(counter)
else:
recvbuf = counter
self.__comm.Reduce(counter,recvbuf,op=MPI.SUM,root=rank_dst)
counter = recvbuf
# Communicate tmpsum
if self.__rank==rank_dst:
recvbuf = np.zeros_like(tmpsum)
else:
recvbuf = tmpsum
self.__comm.Reduce(tmpsum,recvbuf,op=MPI.SUM,root=rank_dst)
tmpsum = recvbuf
# Communicate tmpssq
if self.__rank==rank_dst:
recvbuf = np.zeros_like(tmpssq)
else:
recvbuf = tmpssq
self.__comm.Reduce(tmpssq,recvbuf,op=MPI.SUM,root=rank_dst)
tmpssq = recvbuf
# All processors with col/row/pln!=0 in homogeneous direction are done.
pos = [self.__icol,self.__jrow,self.__kpln]
for idir in range(0,3):
if homogeneous[idir] and pos[idir]!=0:
return
# The inhomogeneous directions need to communicate their data to rank 0, which then holds
# the full line or plane. Rank 0 allocates the final array
if self.__rank==0:
# Determine size of final array
nxg = self.__procGrid[key][1][-1]-self.__procGrid[key][0][0]
nyg = self.__procGrid[key][3][-1]-self.__procGrid[key][2][0]
nzg = self.__procGrid[key][5][-1]-self.__procGrid[key][4][0]
dim = [nxg,nyg,nzg]
for idir in range(0,3):
if homogeneous[idir]:
dim[idir] = 1
# Allocate a nice spot of memory
self.spatialMean[key] = np.zeros(dim,dtype=self.__precision)
self.spatialMsqr[key] = np.zeros(dim,dtype=self.__precision)
# Also allocate one helper array
counter_full = np.zeros(dim,dtype=counter.dtype)
# Create iterators for receive request
ipend = 0 if homogeneous[0] else self.__nxp-1
jpend = 0 if homogeneous[1] else self.__nyp-1
kpend = 0 if homogeneous[2] else self.__nzp-1
ipiter = range(0,ipend+1)
jpiter = range(0,jpend+1)
kpiter = range(0,kpend+1)
# Iterate through sending procs
for ip in ipiter:
for jp in jpiter:
for kp in kpiter:
# Determine rank of processor from whom data is received from
rank_src = self.__rankFromPosition(ip,jp,kp,self.__nxp,self.__nyp,self.__nzp)
# Determine target array position
ib,ie = (0,0) if homogeneous[0] else (self.__procGrid[key][0][ip]-1,self.__procGrid[key][1][ip]-1)
jb,je = (0,0) if homogeneous[1] else (self.__procGrid[key][2][jp]-1,self.__procGrid[key][3][jp]-1)
kb,ke = (0,0) if homogeneous[2] else (self.__procGrid[key][4][kp]-1,self.__procGrid[key][5][kp]-1)
# Receive data and put it in a good spot
if rank_src==0:
recvbuf = counter
else:
recvbuf = self.__comm.Recv(source=rank_src,tag=1)
counter_full[ib:ie+1,jb:je+1,kb:ke+1] = recvbuf
if rank_src==0:
recvbuf = tmpsum
else:
recvbuf = self.__comm.Recv(source=rank_src,tag=2)
self.spatialMean[key][ib:ie+1,jb:je+1,kb:ke+1] = recvbuf
if rank_src==0:
recvbuf = tmpssq
else:
recvbuf = self.__comm.Recv(source=rank_src,tag=3)
self.spatialMsqr[key][ib:ie+1,jb:je+1,kb:ke+1] = recvbuf
# Rank 0 got all the data now. Finalize it!
self.spatialMean[key] /= counter_full
self.spatialMsqr[key] /= counter_full
else:
# All tanks but rank 0 send their data
self.__comm.Send(counter,dest=0,tag=1)
self.__comm.Send(tmpsum, dest=0,tag=2)
self.__comm.Send(tmpssq, dest=0,tag=3)
def collocateVelocity(self):
'''Moves velocity components onto the pressure grid.'''
raise NotImplementedError('TBD')