small changes on fh2: start debugging
This commit is contained in:
parent
589cde04f3
commit
b3748eb210
|
|
@ -7,14 +7,13 @@ import os
|
|||
|
||||
class ibmppp:
|
||||
"""Parallel Python Postprocessor for the IBM-DNS code"""
|
||||
def __init__(self,comm,dir_base,iseq,flowType,chunksPerProc=(1,1,1),numberOfGhosts=(1,1,1)):
|
||||
def __init__(self,comm,dir_base,flowType,chunksPerProc=(1,1,1),numberOfGhosts=(1,1,1)):
|
||||
"""Class constructor"""
|
||||
# Save method arguments for later use
|
||||
self.__comm = comm
|
||||
self.__rank = comm.Get_rank()
|
||||
self.__nproc = comm.Get_size()
|
||||
self.__dir_base = dir_base
|
||||
self.__iseq = iseq
|
||||
self.__nxppp = chunksPerProc[0]
|
||||
self.__nyppp = chunksPerProc[1]
|
||||
self.__nzppp = chunksPerProc[2]
|
||||
|
|
@ -88,7 +87,7 @@ class ibmppp:
|
|||
'''Read input processor grid, compute processor grid for workers'''
|
||||
if self.__rank==0:
|
||||
# Read the processor grid
|
||||
file_proc = self.__dir_base+'/proc_{:04d}.bin'.format(self.__iseq)
|
||||
file_proc = self.__dir_base+'/proc.bin'
|
||||
(self.__procGrid1['u'][0], self.__procGrid1['u'][1], self.__procGrid1['u'][2], self.__procGrid1['u'][3], self.__procGrid1['u'][4], self.__procGrid1['u'][5],
|
||||
self.__procGrid1['v'][0], self.__procGrid1['v'][1], self.__procGrid1['v'][2], self.__procGrid1['v'][3], self.__procGrid1['v'][4], self.__procGrid1['v'][5],
|
||||
self.__procGrid1['w'][0], self.__procGrid1['w'][1], self.__procGrid1['w'][2], self.__procGrid1['w'][3], self.__procGrid1['w'][4], self.__procGrid1['w'][5],
|
||||
|
|
@ -153,7 +152,7 @@ class ibmppp:
|
|||
buffer = None
|
||||
if self.__rank==0:
|
||||
# Read the grid
|
||||
file_grid = self.__dir_base+'/grid_{:04d}.bin'.format(self.__iseq)
|
||||
file_grid = self.__dir_base+'/grid.bin'
|
||||
buffer = ucf.readGrid(file_grid)
|
||||
# Broadcast the data
|
||||
buffer = self.__comm.bcast(buffer,root=0)
|
||||
|
|
@ -204,7 +203,7 @@ class ibmppp:
|
|||
# There is No need to distinguish between master and slave particles here, since we will
|
||||
# only use them for masking the fields. Also we do not care about particles in ghost cells.
|
||||
if self.__rank==0:
|
||||
file_input = self.__dir_base+'particles_{:04d}.bin'.format(self.__iseq)
|
||||
file_input = self.__dir_base+'particles.bin'
|
||||
pp,col = ucf.readParticles(file_input,step=1)
|
||||
# Remove time dimension, because we are dealing with a single time step exclusively
|
||||
pp = pp[:,:,0]
|
||||
|
|
@ -252,13 +251,15 @@ class ibmppp:
|
|||
sendbuf = (np.ascontiguousarray(pp[:,li_part]),col)
|
||||
self.__mpireq.append(self.__comm.isend(sendbuf,dest=rank_dst))
|
||||
# Every rank needs to receive the particles, rank 0 send them to itself
|
||||
req = self.__comm.irecv(source=0)
|
||||
buffsize = 32*1024*1024
|
||||
req = self.__comm.irecv(buffsize,source=0)
|
||||
(self.particles,self.col) = req.wait()
|
||||
if self.__rank==0:
|
||||
# Wait for everyone to finish
|
||||
MPI.Request.waitall(self.__mpireq)
|
||||
# Communication is done! Clear the list of requests
|
||||
self.__mpireq.clear()
|
||||
self.__comm.Barrier()
|
||||
|
||||
def loadField(self,key,dtype='float64'):
|
||||
'''Reads chunks from files'''
|
||||
|
|
@ -275,19 +276,19 @@ class ibmppp:
|
|||
# Also determine file which contains the desired field as well as the
|
||||
# dataset ID
|
||||
if key[0]=='u':
|
||||
filebase = self.__dir_base+'uvwp_{:04d}.'.format(self.__iseq)
|
||||
filebase = self.__dir_base+'uvwp.'
|
||||
dset = 1
|
||||
elif key[0]=='v':
|
||||
filebase = self.__dir_base+'uvwp_{:04d}.'.format(self.__iseq)
|
||||
filebase = self.__dir_base+'uvwp.'
|
||||
dset = 2
|
||||
elif key[0]=='w':
|
||||
filebase = self.__dir_base+'uvwp_{:04d}.'.format(self.__iseq)
|
||||
filebase = self.__dir_base+'uvwp.'
|
||||
dset = 3
|
||||
elif key[0]=='p':
|
||||
filebase = self.__dir_base+'uvwp_{:04d}.'.format(self.__iseq)
|
||||
filebase = self.__dir_base+'uvwp.'
|
||||
dset = 4
|
||||
elif key[0]=='s':
|
||||
filebase = self.__dir_base+'scal_{:04d}.'.format(self.__iseq)
|
||||
filebase = self.__dir_base+'scal.'
|
||||
dset = int(key[1])
|
||||
if dset!=1:
|
||||
self.fields[key] = None
|
||||
|
|
@ -860,7 +861,7 @@ class ibmppp:
|
|||
stencil[ii] = 2*int(truncate*sigma[ii]+0.5)+1
|
||||
if self.__nghx<stencil[0] or self.__nghx<stencil[1] or self.__nghx<stencil[2]:
|
||||
raise ValueError('Too few ghost cells for stencil: ',stencil,(self.__nghx,self.__nghy,self.__nghz))
|
||||
print(stencil)
|
||||
#print(stencil)
|
||||
# Run scipy gaussian filter
|
||||
self.field[key] = ndimage.gaussian_filter(self.field[key],sigma,truncate=truncate,mode='constant',cval=0.0)
|
||||
# Exchange ghost cells and set boundary conditions
|
||||
|
|
@ -1083,6 +1084,7 @@ class ibmppp:
|
|||
self.__communicateGhostCells(key,(+1,+1,+1)) # right,up,back
|
||||
# Wait for communication to finish
|
||||
MPI.Request.waitall(self.__mpireq)
|
||||
self.__comm.Barrier()
|
||||
# Communication is done! Clear the list of requests
|
||||
self.__mpireq.clear()
|
||||
# Assign buffer to array
|
||||
|
|
@ -1468,4 +1470,14 @@ class ibmppp:
|
|||
symm = np.multiply(symm1,symm2)
|
||||
else:
|
||||
raise ValueError('Invalid operation: ',operation)
|
||||
return symm
|
||||
return symm
|
||||
|
||||
# Utility functions
|
||||
def gaussianFilterStencilWidth(sigma=(1,1,1),truncate=4.0):
|
||||
if len(sigma)!=3:
|
||||
raise ValueError('Expected one value of sigma for each direction, but only got {}.'.format(len(sigma)))
|
||||
# Compute stencil width
|
||||
stencil = [0,0,0]
|
||||
for ii in range(0,3):
|
||||
stencil[ii] = 2*int(truncate*sigma[ii]+0.5)+1
|
||||
return stencil
|
||||
|
|
|
|||
Loading…
Reference in New Issue