Compare commits

...

2 Commits

Author SHA1 Message Date
Michael Krayer 87f65fecfd restructured package. implemented some abstraction layers 2021-05-12 23:37:12 +02:00
Michael Krayer f707360f17 updated output 2021-05-12 13:07:14 +02:00
11 changed files with 302 additions and 151 deletions

View File

@ -678,6 +678,14 @@ class ibmppp:
gid.create_dataset('nx',data=nxg)
gid.create_dataset('ny',data=nyg)
gid.create_dataset('nz',data=nzg)
x0 = self.grid[key][0][0]
y0 = self.grid[key][1][0]
z0 = self.grid[key][2][0]
dx = self.__dx[key]*step
gid.create_dataset('x0',data=x0)
gid.create_dataset('y0',data=y0)
gid.create_dataset('z0',data=z0)
gid.create_dataset('dx',data=dx)
did = gid.create_dataset('data',(nzg,nyg,nxg),dtype=self.__precision)
did[ksl_h5,jsl_h5,isl_h5] = np.transpose(self.field[key][isl_ch,jsl_ch,ksl_ch])
fid.close()
@ -691,10 +699,12 @@ class ibmppp:
fid.close()
if self.__rank!=self.__nproc-1:
self.__comm.send(True, dest=self.__rank+1, tag=1)
# Ensure that no processor is still working on the file
self.__comm.Barrier()
# Create an XDMF file for the field
if xdmf and self.__rank==0:
if verbose:
print('[saveFieldSingleFile] generating XDMF',end='\r')
print('[saveFieldSingleFile] generating XDMF'+4*'\t',end='\r')
# Construct XDMF filename
file_xdmf = filename+'_'+key+'.xdmf'
filename_h5 = os.path.basename(file_out)
@ -705,10 +715,6 @@ class ibmppp:
fid.write(' <Domain>\n')
fid.write(' <Grid Name="{}">\n'.format(key))
#fid.write(' <Time Value="%.2f" />\n',params.general.simtime)
dx = self.__dx[key]*step
x0 = self.grid[key][0][0]
y0 = self.grid[key][1][0]
z0 = self.grid[key][2][0]
fid.write(' <Topology TopologyType="3DCORECTMesh" NumberOfElements="{:d} {:d} {:d}"/>\n'.format(nzg,nyg,nxg))
fid.write(' <Geometry Origin="" Type="ORIGIN_DXDYDZ">\n')
fid.write(' <DataItem DataType="Float" Dimensions="3" Format="XML" Precision="8"> {:12f} {:12f} {:12f}</DataItem>\n'.format(z0,y0,x0))
@ -722,6 +728,10 @@ class ibmppp:
fid.write(' </Domain>\n')
fid.write('</Xdmf> \n')
fid.close()
# Clear carriage return output if verbose
if self.__rank==0 and verbose:
print(10*'\t',end='\r')
return None
def saveStatistics(self,filename):
'''Writes all gathered statistics to a h5 file.'''

3
python/ucf/__init__.py Normal file
View File

@ -0,0 +1,3 @@
from .base import UCF
from .tools import read_grid, read_procgrid, read_chunk, read_particles, grid_chunk
from .abstraction import Ustar, UCFSnapshot, ChunkIterator

100
python/ucf/abstraction.py Normal file
View File

@ -0,0 +1,100 @@
class ChunkIterator:
def __init__(self,snapshot,dset=-1,keep_ghost=True):
self.snapshot = snapshot
self.dset = dset
self.keep_ghost = keep_ghost
self.iter_rank = 0
def __iter__(self):
self.iter_rank = 0
return self
def __next__(self):
if self.iter_rank<self.snapshot.nproc:
data = self.snapshot.read_chunk(
self.iter_rank,dset=self.dset,keep_ghost=self.keep_ghost
)
self.iter_rank += 1
return data
else:
raise StopIteration
class UCFSnapshot:
'''Handles a snapshot.ucf.tar file.'''
def __init__(self,file_tar,file_index=None):
self.handler = Ustar(file_tar,file_index)
self.verbose = False
self.debug = False
self.type = None
#
file_name_string = '\t'.join(self.handler.file_name)
if 'uvwp' in file_name_string:
self.type = 'uvwp'
elif 'scal' in file_name_string:
self.type = 'scal'
else:
raise ValueError("Archive does not contain 'uvwp' nor 'scal' files.")
self.nproc = sum(self.type in s for s in self.handler.file_name)
def read_particles(self):
from .tools import read_particles
data = self.handler.read('particles.bin')
return read_particles(data,step=1,verbosity=self.verbose,debug=self.debug)
def read_chunk(self,rank,dset=-1,keep_ghost=True):
from .tools import read_chunk
file_target = self.type+'.{:05d}'.format(rank)
data = self.handler.read(file_target)
return read_chunk(data,step=1,dset=dset,keep_ghost=keep_ghost,
verbosity=self.verbose,debug=self.debug)
def read_grid(self):
from .tools import read_grid
data = self.handler.read('grid.bin')
return read_grid(data,verbosity=self.verbose,debug=self.debug)
def read_procgrid(self):
from .tools import read_procgrid
data = self.handler.read('proc.bin')
return read_procgrid(data,verbosity=self.verbose,debug=self.debug)
class Ustar:
'''Minimalistic ustar implementation meant to be used with ucftar files'''
def __init__(self,file_tar,file_index=None):
self.path = file_tar
self.num_files = 0
self.file_name = []
self.file_size = []
self.file_offset = []
if file_index:
self.import_index_file(file_index)
else:
self.import_tar_file()
def __del__(self):
return
def import_tar_file(self):
'''Imports information on tar archive from scanning it'''
from tarfile import TarFile,USTAR_FORMAT
from struct import unpack
with open(self.path,'rb') as f:
tarinfos = TarFile(fileobj=f,format=USTAR_FORMAT).getmembers()
self.num_files = 0
for tarinfo in tarinfos:
self.num_files += 1
self.file_name.append(tarinfo.name)
self.file_offset.append(tarinfo.offset_data)
self.file_size.append(tarinfo.size)
return
def import_index_file(self,file_index):
'''Imports information on tar archive from .taridx file'''
from struct import unpack
with open(file_index,'rb') as f:
self.num_files = unpack('<q',f.read(8))[0]
self.file_name = []
self.file_size = []
self.file_offset = []
for ifile in range(0,self.num_files):
self.file_name.append(f.read(256).decode().strip().rstrip('\0'))
self.file_offset.append(unpack('<q',f.read(8))[0])
self.file_size.append(unpack('<q',f.read(8))[0])
return
def read(self,file):
'''Reads a file from the archive into memory. Data is returned as bytes.'''
idx = self.file_name.index(file)
with open(self.path,'rb') as f:
f.seek(self.file_offset[idx])
return f.read(self.file_size[idx])

View File

@ -474,149 +474,4 @@ class UCF:
self.__currentSetSizeof = 0
self.__currentSetNumParams = 0
self.__currentSetParams = 0
self.__currentSetNumElements = 0
#################################
# High-level function interface #
#################################
def readGrid(file,verbosity=False,debug=False):
obj = UCF(file=file,verbosity=verbosity,debug=debug)
output = []
for iset in range(0,obj.NumDataset):
(data,params) = obj.readSet(step=1,dset=iset+1)
nx = params[0]
ny = params[1]
nz = params[2]
output.append(data[0:nx])
output.append(data[nx:nx+ny])
output.append(data[nx+ny:nx+ny+nz])
#if obj.UCFVersion<2:
if obj.NumDataset<5:
output.extend(output[-3:])
obj.close()
return output
def readProcgrid(file,verbosity=False,debug=False):
obj = UCF(file=file,verbosity=verbosity,debug=debug)
output = []
for iset in range(0,obj.NumDataset):
(data,params) = obj.readSet(step=1,dset=iset+1)
nxp = params[0]
nyp = params[1]
nzp = params[2]
output.append(data[0:nxp]) # ibeg
output.append(data[nxp:2*nxp]) # iend
output.append(data[2*nxp:2*nxp+nyp]) # jbeg
output.append(data[2*nxp+nyp:2*nxp+2*nyp]) # jend
output.append(data[2*nxp+2*nyp:2*nxp+2*nyp+nzp]) # kbeg
output.append(data[2*nxp+2*nyp+nzp:2*nxp+2*nyp*2*nzp]) # kend
#if obj.UCFVersion<2:
if obj.NumDataset<5:
output.extend(output[-6:])
obj.close()
return output
def readFieldChunk(file,step=1,dset=-1,verbosity=False,debug=False):
obj = UCF(file=file,verbosity=verbosity,debug=debug)
if not isinstance(dset,list):
if dset==-1:
dset = range(1,obj.NumDataset+1) # fix that maybe later (this is maximum over all timesteps)
else:
dset = [dset]
output = []
for ii in dset:
tmp = dict()
(data,params) = obj.readSet(step=step,dset=ii)
tmp['ighost'] = params[0]
tmp['ibeg'] = params[1]
tmp['jbeg'] = params[2]
tmp['kbeg'] = params[3]
tmp['nxl'] = params[4]
tmp['nyl'] = params[5]
tmp['nzl'] = params[6]
tmp['nx'] = params[7]
tmp['ny'] = params[8]
tmp['nz'] = params[9]
tmp['data'] = data.reshape((tmp['nxl']+2*tmp['ighost'],
tmp['nyl']+2*tmp['ighost'],
tmp['nzl']+2*tmp['ighost']),
order='F')
tmp['rank'] = obj.IORank[0]
tmp['rankijk']= obj.IORank[1:]
output.append(tmp)
obj.close()
return output
def readParticles(file,step=-1,verbosity=False,debug=False):
# Check what kind of file was passed: standalone, tar, bytes
# TBD: tar is not supported yet
obj = UCF(file=file,verbosity=verbosity,debug=debug)
if not isinstance(step,list):
if step==-1:
step = range(1,obj.NumTimestep+1)
else:
step = [step]
# The output will be the following:
# 1) numpy array with dimension (ncol,np,ntime)
# 2) dictionary which specifies the columns
# We read the data step by step in a list, which is then converted to a 3D array
pp = []
for ii in step:
(data,params) = obj.readSet(step=ii,dset=1)
npart = params[0]
ncol = params[1]
ncol_rank = params[2]
ncol_hybrid = params[3]
ncol_dem = params[4]
ncol_scalar = params[5]
nscal = ncol_scalar//2
pp.append(data.reshape((ncol,npart),order='F'))
# Close UCF obeject
obj.close()
# Convert list of 2D arrays to 3D array
pp = np.stack(pp,axis=2)
# Create the dictionary
col = colmap_from_flags(ncol_rank,ncol_hybrid,ncol_dem,nscal)
# Return result
return (pp,col)
def colmap_from_flags(irank,ihybrid,idem,iscal):
'''Creates a dictionary which specifies the columns of a particle array.'''
col = {}
ioffset = 0
if irank>0:
col['rank'] = ioffset; ioffset+=1
if ihybrid>0:
col['id'] = ioffset; ioffset+=1
col['x'] = ioffset; ioffset+=1
col['y'] = ioffset; ioffset+=1
col['z'] = ioffset; ioffset+=1
col['r'] = ioffset; ioffset+=1
col['rho']= ioffset; ioffset+=1
col['ax'] = ioffset; ioffset+=1
col['ay'] = ioffset; ioffset+=1
col['az'] = ioffset; ioffset+=1
col['u'] = ioffset; ioffset+=1
col['v'] = ioffset; ioffset+=1
col['w'] = ioffset; ioffset+=1
col['ox'] = ioffset; ioffset+=1
col['oy'] = ioffset; ioffset+=1
col['oz'] = ioffset; ioffset+=1
col['fx'] = ioffset; ioffset+=1
col['fy'] = ioffset; ioffset+=1
col['fz'] = ioffset; ioffset+=1
col['tx'] = ioffset; ioffset+=1
col['ty'] = ioffset; ioffset+=1
col['tz'] = ioffset; ioffset+=1
if idem>0:
col['fxc'] = ioffset; ioffset+=1
col['fyc'] = ioffset; ioffset+=1
col['fzc'] = ioffset; ioffset+=1
col['txc'] = ioffset; ioffset+=1
col['tyc'] = ioffset; ioffset+=1
col['tzc'] = ioffset; ioffset+=1
if iscal>0:
for ii in range(0,iscal):
col['s'+str(ii)] = ioffset; ioffset+=1
col['q'+str(ii)] = ioffset; ioffset+=1
return col
self.__currentSetNumElements = 0

183
python/ucf/tools.py Normal file
View File

@ -0,0 +1,183 @@
def read_grid(file,verbosity=False,debug=False):
from .base import UCF
obj = UCF(file=file,verbosity=verbosity,debug=debug)
output = []
for iset in range(0,obj.NumDataset):
(data,params) = obj.readSet(step=1,dset=iset+1)
nx = params[0]
ny = params[1]
nz = params[2]
output.append((data[0:nx],data[nx:nx+ny],data[nx+ny:nx+ny+nz]))
#if obj.UCFVersion<2:
if obj.NumDataset<5:
output.extend(output[-1:])
obj.close()
return output
def read_procgrid(file,verbosity=False,debug=False):
from .base import UCF
obj = UCF(file=file,verbosity=verbosity,debug=debug)
output = []
for iset in range(0,obj.NumDataset):
(data,params) = obj.readSet(step=1,dset=iset+1)
nxp = params[0]
nyp = params[1]
nzp = params[2]
output.append((
data[0:nxp], # ibeg
data[nxp:2*nxp], # iend
data[2*nxp:2*nxp+nyp], # jbeg
data[2*nxp+nyp:2*nxp+2*nyp], # jend
data[2*nxp+2*nyp:2*nxp+2*nyp+nzp], # kbeg
data[2*nxp+2*nyp+nzp:2*nxp+2*nyp*2*nzp] # kend
))
#if obj.UCFVersion<2:
if obj.NumDataset<5:
output.extend(output[-1:])
obj.close()
return output
def read_chunk(file,step=1,dset=-1,keep_ghost=True,verbosity=False,debug=False):
from .base import UCF
obj = UCF(file=file,verbosity=verbosity,debug=debug)
if not isinstance(dset,list):
if dset==-1:
dset = range(1,obj.NumDataset+1) # fix that maybe later (this is maximum over all timesteps)
else:
dset = [dset]
output = []
for ii in dset:
tmp = dict()
(data,params) = obj.readSet(step=step,dset=ii)
tmp['ighost'] = params[0]
tmp['ibeg'] = params[1]
tmp['jbeg'] = params[2]
tmp['kbeg'] = params[3]
tmp['nxl'] = params[4]
tmp['nyl'] = params[5]
tmp['nzl'] = params[6]
tmp['nx'] = params[7]
tmp['ny'] = params[8]
tmp['nz'] = params[9]
tmp['data'] = data.reshape((tmp['nxl']+2*tmp['ighost'],
tmp['nyl']+2*tmp['ighost'],
tmp['nzl']+2*tmp['ighost']),
order='F')
tmp['rank'] = obj.IORank[0]
tmp['rankijk']= obj.IORank[1:]
if not keep_ghost and ighost:
tmp['data'] = tmp['data'][1:-1,1:-1,1:-1]
tmp['ghost'] = 0
output.append(tmp)
obj.close()
return output
def read_particles(file,step=-1,verbosity=False,debug=False):
from .base import UCF
import numpy
obj = UCF(file=file,verbosity=verbosity,debug=debug)
if not isinstance(step,list):
if step==-1:
step = range(1,obj.NumTimestep+1)
else:
step = [step]
# The output will be the following:
# 1) numpy array with dimension (ncol,np,ntime)
# 2) dictionary which specifies the columns
# We read the data step by step in a list, which is then converted to a 3D array
pp = []
for ii in step:
(data,params) = obj.readSet(step=ii,dset=1)
npart = params[0]
ncol = params[1]
ncol_rank = params[2]
ncol_hybrid = params[3]
ncol_dem = params[4]
ncol_scalar = params[5]
nscal = ncol_scalar//2
pp.append(data.reshape((ncol,npart),order='F'))
# Close UCF obeject
obj.close()
# Convert list of 2D arrays to 3D array
pp = numpy.stack(pp,axis=2)
# Create the dictionary
col = colmap_from_flags(ncol_rank,ncol_hybrid,ncol_dem,nscal)
# Return result
return (pp,col)
def colmap_from_flags(irank,ihybrid,idem,iscal):
'''Creates a dictionary which specifies the columns of a particle array.'''
col = {}
ioffset = 0
if irank>0:
col['rank'] = ioffset; ioffset+=1
if ihybrid>0:
col['id'] = ioffset; ioffset+=1
col['x'] = ioffset; ioffset+=1
col['y'] = ioffset; ioffset+=1
col['z'] = ioffset; ioffset+=1
col['r'] = ioffset; ioffset+=1
col['rho']= ioffset; ioffset+=1
col['ax'] = ioffset; ioffset+=1
col['ay'] = ioffset; ioffset+=1
col['az'] = ioffset; ioffset+=1
col['u'] = ioffset; ioffset+=1
col['v'] = ioffset; ioffset+=1
col['w'] = ioffset; ioffset+=1
col['ox'] = ioffset; ioffset+=1
col['oy'] = ioffset; ioffset+=1
col['oz'] = ioffset; ioffset+=1
col['fx'] = ioffset; ioffset+=1
col['fy'] = ioffset; ioffset+=1
col['fz'] = ioffset; ioffset+=1
col['tx'] = ioffset; ioffset+=1
col['ty'] = ioffset; ioffset+=1
col['tz'] = ioffset; ioffset+=1
if idem>0:
col['fxc'] = ioffset; ioffset+=1
col['fyc'] = ioffset; ioffset+=1
col['fzc'] = ioffset; ioffset+=1
col['txc'] = ioffset; ioffset+=1
col['tyc'] = ioffset; ioffset+=1
col['tzc'] = ioffset; ioffset+=1
if iscal>0:
for ii in range(0,iscal):
col['s'+str(ii)] = ioffset; ioffset+=1
col['q'+str(ii)] = ioffset; ioffset+=1
return col
def grid_chunk(grid_global,chunk):
import numpy
xg,yg,zg = grid_global
# Shift indices so that they start from zero
ib = chunk['ibeg']-1
jb = chunk['jbeg']-1
kb = chunk['kbeg']-1
nxl = chunk['nxl']
nyl = chunk['nyl']
nzl = chunk['nzl']
ighost = chunk['ighost']
if ighost:
nxg = len(xg)
nyg = len(yg)
nzg = len(zg)
dx = xg[2]-xg[1]
dy = yg[2]-yg[1]
dz = zg[2]-zg[1]
xl = numpy.zeros(nxl+2)
yl = numpy.zeros(nyl+2)
zl = numpy.zeros(nzl+2)
xl[0] = xg[ib]-dx
xl[1:-1] = xg[ib:ib+nxl]
xl[-1] = xg[ib+nxl-1]+dx
yl[0] = yg[jb]-dy
yl[1:-1] = yg[jb:jb+nyl]
yl[-1] = yg[jb+nyl-1]+dy
zl[0] = zg[kb]-dz
zl[1:-1] = zg[kb:kb+nzl]
zl[-1] = zg[kb+nzl-1]+dz
else:
xl = xg[ibeg:ib+nxl-1]
yl = yg[jbeg:jb+nyl-1]
zl = zg[kbeg:kb+nzl-1]
return (xl,yl,zl)