100 lines
4.0 KiB
Python
100 lines
4.0 KiB
Python
class ChunkIterator:
|
|
def __init__(self,snapshot,dset=-1,keep_ghost=True):
|
|
self.snapshot = snapshot
|
|
self.dset = dset
|
|
self.keep_ghost = keep_ghost
|
|
self.iter_rank = 0
|
|
def __iter__(self):
|
|
self.iter_rank = 0
|
|
return self
|
|
def __next__(self):
|
|
if self.iter_rank<self.snapshot.nproc:
|
|
data = self.snapshot.read_chunk(
|
|
self.iter_rank,dset=self.dset,keep_ghost=self.keep_ghost
|
|
)
|
|
self.iter_rank += 1
|
|
return data
|
|
else:
|
|
raise StopIteration
|
|
|
|
class UCFSnapshot:
|
|
'''Handles a snapshot.ucf.tar file.'''
|
|
def __init__(self,file_tar,file_index=None):
|
|
self.handler = Ustar(file_tar,file_index)
|
|
self.verbose = False
|
|
self.debug = False
|
|
self.type = None
|
|
#
|
|
file_name_string = '\t'.join(self.handler.file_name)
|
|
if 'uvwp' in file_name_string:
|
|
self.type = 'uvwp'
|
|
elif 'scal' in file_name_string:
|
|
self.type = 'scal'
|
|
else:
|
|
raise ValueError("Archive does not contain 'uvwp' nor 'scal' files.")
|
|
self.nproc = sum(self.type in s for s in self.handler.file_name)
|
|
def read_particles(self):
|
|
from .tools import read_particles
|
|
data = self.handler.read('particles.bin')
|
|
return read_particles(data,step=1,verbosity=self.verbose,debug=self.debug)
|
|
def read_chunk(self,rank,dset=-1,keep_ghost=True):
|
|
from .tools import read_chunk
|
|
file_target = self.type+'.{:05d}'.format(rank)
|
|
data = self.handler.read(file_target)
|
|
return read_chunk(data,step=1,dset=dset,keep_ghost=keep_ghost,
|
|
verbosity=self.verbose,debug=self.debug)
|
|
def read_grid(self):
|
|
from .tools import read_grid
|
|
data = self.handler.read('grid.bin')
|
|
return read_grid(data,verbosity=self.verbose,debug=self.debug)
|
|
def read_procgrid(self):
|
|
from .tools import read_procgrid
|
|
data = self.handler.read('proc.bin')
|
|
return read_procgrid(data,verbosity=self.verbose,debug=self.debug)
|
|
|
|
class Ustar:
|
|
'''Minimalistic ustar implementation meant to be used with ucftar files'''
|
|
def __init__(self,file_tar,file_index=None):
|
|
self.path = file_tar
|
|
self.num_files = 0
|
|
self.file_name = []
|
|
self.file_size = []
|
|
self.file_offset = []
|
|
if file_index:
|
|
self.import_index_file(file_index)
|
|
else:
|
|
self.import_tar_file()
|
|
def __del__(self):
|
|
return
|
|
def import_tar_file(self):
|
|
'''Imports information on tar archive from scanning it'''
|
|
from tarfile import TarFile,USTAR_FORMAT
|
|
from struct import unpack
|
|
with open(self.path,'rb') as f:
|
|
tarinfos = TarFile(fileobj=f,format=USTAR_FORMAT).getmembers()
|
|
self.num_files = 0
|
|
for tarinfo in tarinfos:
|
|
self.num_files += 1
|
|
self.file_name.append(tarinfo.name)
|
|
self.file_offset.append(tarinfo.offset_data)
|
|
self.file_size.append(tarinfo.size)
|
|
return
|
|
def import_index_file(self,file_index):
|
|
'''Imports information on tar archive from .taridx file'''
|
|
from struct import unpack
|
|
with open(file_index,'rb') as f:
|
|
self.num_files = unpack('<q',f.read(8))[0]
|
|
self.file_name = []
|
|
self.file_size = []
|
|
self.file_offset = []
|
|
for ifile in range(0,self.num_files):
|
|
self.file_name.append(f.read(256).decode().strip().rstrip('\0'))
|
|
self.file_offset.append(unpack('<q',f.read(8))[0])
|
|
self.file_size.append(unpack('<q',f.read(8))[0])
|
|
return
|
|
def read(self,file):
|
|
'''Reads a file from the archive into memory. Data is returned as bytes.'''
|
|
idx = self.file_name.index(file)
|
|
with open(self.path,'rb') as f:
|
|
f.seek(self.file_offset[idx])
|
|
return f.read(self.file_size[idx]) |