diff --git a/python/ibmppp/ibmppp.py b/python/ibmppp/ibmppp.py index b50f5b5..5d8f090 100644 --- a/python/ibmppp/ibmppp.py +++ b/python/ibmppp/ibmppp.py @@ -7,14 +7,13 @@ import os class ibmppp: """Parallel Python Postprocessor for the IBM-DNS code""" - def __init__(self,comm,dir_base,iseq,flowType,chunksPerProc=(1,1,1),numberOfGhosts=(1,1,1)): + def __init__(self,comm,dir_base,flowType,chunksPerProc=(1,1,1),numberOfGhosts=(1,1,1)): """Class constructor""" # Save method arguments for later use self.__comm = comm self.__rank = comm.Get_rank() self.__nproc = comm.Get_size() self.__dir_base = dir_base - self.__iseq = iseq self.__nxppp = chunksPerProc[0] self.__nyppp = chunksPerProc[1] self.__nzppp = chunksPerProc[2] @@ -88,7 +87,7 @@ class ibmppp: '''Read input processor grid, compute processor grid for workers''' if self.__rank==0: # Read the processor grid - file_proc = self.__dir_base+'/proc_{:04d}.bin'.format(self.__iseq) + file_proc = self.__dir_base+'/proc.bin' (self.__procGrid1['u'][0], self.__procGrid1['u'][1], self.__procGrid1['u'][2], self.__procGrid1['u'][3], self.__procGrid1['u'][4], self.__procGrid1['u'][5], self.__procGrid1['v'][0], self.__procGrid1['v'][1], self.__procGrid1['v'][2], self.__procGrid1['v'][3], self.__procGrid1['v'][4], self.__procGrid1['v'][5], self.__procGrid1['w'][0], self.__procGrid1['w'][1], self.__procGrid1['w'][2], self.__procGrid1['w'][3], self.__procGrid1['w'][4], self.__procGrid1['w'][5], @@ -153,7 +152,7 @@ class ibmppp: buffer = None if self.__rank==0: # Read the grid - file_grid = self.__dir_base+'/grid_{:04d}.bin'.format(self.__iseq) + file_grid = self.__dir_base+'/grid.bin' buffer = ucf.readGrid(file_grid) # Broadcast the data buffer = self.__comm.bcast(buffer,root=0) @@ -204,7 +203,7 @@ class ibmppp: # There is No need to distinguish between master and slave particles here, since we will # only use them for masking the fields. Also we do not care about particles in ghost cells. if self.__rank==0: - file_input = self.__dir_base+'particles_{:04d}.bin'.format(self.__iseq) + file_input = self.__dir_base+'particles.bin' pp,col = ucf.readParticles(file_input,step=1) # Remove time dimension, because we are dealing with a single time step exclusively pp = pp[:,:,0] @@ -252,13 +251,15 @@ class ibmppp: sendbuf = (np.ascontiguousarray(pp[:,li_part]),col) self.__mpireq.append(self.__comm.isend(sendbuf,dest=rank_dst)) # Every rank needs to receive the particles, rank 0 send them to itself - req = self.__comm.irecv(source=0) + buffsize = 32*1024*1024 + req = self.__comm.irecv(buffsize,source=0) (self.particles,self.col) = req.wait() if self.__rank==0: # Wait for everyone to finish MPI.Request.waitall(self.__mpireq) # Communication is done! Clear the list of requests self.__mpireq.clear() + self.__comm.Barrier() def loadField(self,key,dtype='float64'): '''Reads chunks from files''' @@ -275,19 +276,19 @@ class ibmppp: # Also determine file which contains the desired field as well as the # dataset ID if key[0]=='u': - filebase = self.__dir_base+'uvwp_{:04d}.'.format(self.__iseq) + filebase = self.__dir_base+'uvwp.' dset = 1 elif key[0]=='v': - filebase = self.__dir_base+'uvwp_{:04d}.'.format(self.__iseq) + filebase = self.__dir_base+'uvwp.' dset = 2 elif key[0]=='w': - filebase = self.__dir_base+'uvwp_{:04d}.'.format(self.__iseq) + filebase = self.__dir_base+'uvwp.' dset = 3 elif key[0]=='p': - filebase = self.__dir_base+'uvwp_{:04d}.'.format(self.__iseq) + filebase = self.__dir_base+'uvwp.' dset = 4 elif key[0]=='s': - filebase = self.__dir_base+'scal_{:04d}.'.format(self.__iseq) + filebase = self.__dir_base+'scal.' dset = int(key[1]) if dset!=1: self.fields[key] = None @@ -860,7 +861,7 @@ class ibmppp: stencil[ii] = 2*int(truncate*sigma[ii]+0.5)+1 if self.__nghx