Merge branch 'init' of git.mkray.de:mwtkrayer/ucftools into init

This commit is contained in:
Michael Krayer 2021-06-07 18:33:43 +02:00
commit 414469de1e
20 changed files with 1403 additions and 159 deletions

View File

@ -372,7 +372,7 @@ classdef ucf < handle
[obj.currentSetDatatype,'=>',obj.currentSetDatatype]);
end
function [params] = readParameters(obj,tstep,dset)
% [data,params] = obj.readParameters(tstep,dset)
% [params] = obj.readParameters(tstep,dset)
% Reads a raw set of parameters without parsing.
% Input
% tstep index of timestep to be read
@ -551,6 +551,24 @@ classdef ucf < handle
% istep step index
ndset = obj.numSetPerStep(istep);
end
function [dpos,dsize,dtype,dnum] = getDataSetPosition(obj,tstep,dset)
% [params] = obj.readParameters(tstep,dset)
% Reads a raw set of parameters without parsing.
% Input
% tstep index of timestep to be read
% dset index of dataset to be read
% Output
% fpos offset to beginning of dataset
fseek(obj.fileID,obj.findSet(tstep,dset),'bof');
if obj.Debug
fprintf('Skipped to position %d\n',ftell(obj.fileID));
end
obj.readHeaderSet();
dpos = ftell(obj.fileID);
dsize = obj.currentSetSize;
dtype = obj.currentSetDatatype;
dnum = obj.currentSetNumElements;
end
end
%% ------------------------------------------------------------------------%%
%% PRIVATE METHODS %%

View File

@ -72,7 +72,7 @@ classdef ustar < handle
% indexing data.
% Input
% tarfile path to TAR file
% indexfile path to index file (in json format)
% indexfile path to index file (in json/msgpack/taridx format)
obj.File = tarfile;
obj.IndexFile = indexfile;
obj.IOMode = 'read';
@ -248,7 +248,7 @@ classdef ustar < handle
error('Unable to open file: %s',obj.IndexFile);
end
fseek(indexfileID,0,'bof');
jsonstr = fread(indexfileID,'char=>char')';
jsonstr = fread(indexfileID,'schar=>char')';
fclose(indexfileID);
% Parse JSON and reconstruct filenames
if ~isempty(which('jsonlab.loadjson'))
@ -316,7 +316,7 @@ classdef ustar < handle
tarFileSize = zeros(1,nsubfile);
tarFileOffset = zeros(1,nsubfile);
for isub=1:nsubfile
tarFileName{isub} = deblank(fread(indexfileID,[1,256],'char=>char'));
tarFileName{isub} = deblank(fread(indexfileID,[1,256],'schar=>char'));
tarFileOffset(isub) = fread(indexfileID,1,'int64=>double');
tarFileSize(isub) = fread(indexfileID,1,'int64=>double');
end
@ -339,7 +339,7 @@ classdef ustar < handle
% in 'current*' class-variables.
% Input
% scanMode when set to true, omit parts which are not needed during scan
header = fread(obj.fileID,[1,obj.blockSize],'char=>char');
header = fread(obj.fileID,[1,obj.blockSize],'schar=>char');
% Extract header information
name = header(1:100);
mode = header(101:108);

18
matlab/find_chunk.m Normal file
View File

@ -0,0 +1,18 @@
function [col,row,pln] = find_chunk(xq,x,ibeg,iend,a,b,periodic)
if periodic
xq = mod(xq-a,b-a)+a;
elseif any(xq<a || xq>b)
error('Query points must be located inside domain.');
end
x = reshape(x,[],1);
xq = reshape(xq,1,[]);
[~,ii] = min(abs(bsxfun(@minus,x,xq)));
nxq = numel(xq);
col = zeros(1,nxq);
for iq=1:nxq
col(iq) = find(ii(iq)>=ibeg & ii(iq)<=iend,1,'first');
end
end

219
matlab/find_region_ucfmf.m Normal file
View File

@ -0,0 +1,219 @@
function [ii,jj,kk,xq,yq,zq] = find_region_ucfmf(fdir,iseq,field,xb,yb,zb,varargin)
% [ii,jj,kk,xq,yq,zq] = find_region_ucfmf(fdir,iseq,field,xb,yb,zb,varargin)
% Determines the global index of grid points which lie within the bounds
% xb,yb,zb specified in the parameters, i.e. the grid points for which
% xb(1) <= x <= xb(2),
% yb(1) <= y <= yb(2),
% zb(1) <= z <= zb(2).
% If the domain is periodic, the specified region can lie "outside" of the domain,
% i.e. it is automatically translated to coordinates in the inner domain.
% If xb(1)==xb(2), a slice located at the nearest neighbor point will be created.
% Input
% fdir directory with files
% iseq sequence index
% field field to be indexed
% {'u','v','w','p','s1','s2',...}
% xb,yb,zb arrays which describe interval, e.g. [0,1] for all points between 0 and 1
% ? fuzzy Include the first point outside of the region? (default: 0)
% ? verbosity verbose output? (default: 0)
% ? debug debug output? (default: 0)
% Output
% ii,jj,kk array with field indices
% xq,yq,zq corresponding grid, monotonically increasing, even if periodic boundary is crossed
% Parse input
par = inputParser;
addParamValue(par,'verbosity',0,@isnumeric);
addParamValue(par,'debug',0,@isnumeric);
addParamValue(par,'fuzzy',0,@isnumeric);
parse(par,varargin{:});
iverb = par.Results.verbosity;
idebug= par.Results.debug;
ifuzzy= par.Results.fuzzy;
% Parse bounds
xmin = xb(1);
xmax = xb(2);
ymin = yb(1);
ymax = yb(2);
zmin = zb(1);
zmax = zb(2);
% Parse field
switch field(1)
case 'u'; ifield=1; fbase='uvwp'; cmesh='u';
case 'v'; ifield=2; fbase='uvwp'; cmesh='v';
case 'w'; ifield=3; fbase='uvwp'; cmesh='w';
case 'p'; ifield=4; fbase='uvwp'; cmesh='p';
case 's'; ifield=str2double(field(2:end)); fbase='scal'; cmesh='p';
otherwise; error('Invalid field: %s',field);
end
% Read auxillary files
fparam = sprintf('%s/parameters_%04d.asc',fdir,iseq);
fgrid = sprintf('%s/grid_%04d.bin',fdir,iseq);
fproc = sprintf('%s/proc_%04d.bin',fdir,iseq);
params = read_parameters_ucf(fparam,'tarmode',0,'verbosity',iverb);
[x.u,y.u,z.u,x.v,y.v,z.v,x.w,y.w,z.w,x.p,y.p,z.p] = read_grid_ucf(fgrid,'tarmode',0,'verbosity',iverb,'debug',idebug);
x = x.(cmesh);
y = y.(cmesh);
z = z.(cmesh);
[ibeg.u,iend.u,jbeg.u,jend.u,kbeg.u,kend.u,...
ibeg.v,iend.v,jbeg.v,jend.v,kbeg.v,kend.v,...
ibeg.w,iend.w,jbeg.w,jend.w,kbeg.w,kend.w,...
ibeg.p,iend.p,jbeg.p,jend.p,kbeg.p,kend.p] = read_procgrid_ucf(fproc,'tarmode',0,'verbosity',iverb,'debug',idebug);
ibeg = ibeg.(cmesh);
jbeg = jbeg.(cmesh);
kbeg = kbeg.(cmesh);
iend = iend.(cmesh);
jend = jend.(cmesh);
kend = kend.(cmesh);
nx = numel(x);
ny = numel(y);
nz = numel(z);
% Parse domain information
a = params.geometry.a;
b = params.geometry.b;
c = params.geometry.c;
d = params.geometry.d;
e = params.geometry.e;
f = params.geometry.f;
x_periodic = params.geometry.xperiodic;
y_periodic = params.geometry.yperiodic;
z_periodic = params.geometry.zperiodic;
% Check if bounds are valid
if xmax<xmin
error('xmax must be greater than xmin.');
end
if ymax<ymin
error('ymax must be greater than ymin.');
end
if zmax<zmin
error('zmax must be greater than zmin.');
end
% Check if min and max are the same point (if so, treat it as slice)
feps = 1e-12;
islice = false;
jslice = false;
kslice = false;
if abs(xmax-xmin)<feps
islice = true;
end
if abs(ymax-ymin)<feps
jslice = true;
end
if abs(zmax-zmin)<feps
kslice = true;
end
% Check values outside of domain, circular shift if periodic, otherwise error
% Keep the number of shifts and initial offset to reconstruct grid
if x_periodic
nxloop = floor((xmax-xmin)/(b-a));
nxoffset = floor(xmin/(b-a));
xmin = mod(xmin-a,b-a)+a;
xmax = mod(xmax-a,b-a)+a;
elseif xmin<a || xmin>b || xmax<a || xmax>b
error('Interval must be located inside domain.');
else
nxloop = 0;
nxoffset = 0;
end
if y_periodic
nyloop = floor((ymax-ymin)/(d-c));
nyoffset = floor(ymin/(d-c));
ymin = mod(ymin-c,d-c)+c;
ymax = mod(ymax-c,d-c)+c;
elseif ymin<c || ymin>d || ymax<c || ymax>d
error('Interval must be located inside domain.');
else
nyloop = 0;
nyoffset = 0;
end
if z_periodic
nzloop = floor((zmax-zmin)/(f-e));
nzoffset = floor(zmin/(f-e));
zmin = mod(zmin-e,f-e)+e;
zmax = mod(zmax-e,f-e)+e;
elseif zmin<e || zmin>f || zmax<e || zmax>f
error('Interval must be located inside domain.');
else
nzloop = 0;
nzoffset = 0;
end
% Find gridpoints closest to the bounds
if ifuzzy
[~,imin] = find((x-xmin)<0,1,'last');
[~,imax] = find((x-xmax)>0,1,'first');
[~,jmin] = find((y-ymin)<0,1,'last');
[~,jmax] = find((y-ymax)>0,1,'first');
[~,kmin] = find((z-zmin)<0,1,'last');
[~,kmax] = find((z-zmax)>0,1,'first');
else
[~,imin] = find((x-xmin)>=0,1,'first');
[~,imax] = find((x-xmax)<=0,1,'last');
[~,jmin] = find((y-ymin)>=0,1,'first');
[~,jmax] = find((y-ymax)<=0,1,'last');
[~,kmin] = find((z-zmin)>=0,1,'first');
[~,kmax] = find((z-zmax)<=0,1,'last');
end
if isempty(imax); imax=1; end
if isempty(jmax); jmax=1; end
if isempty(kmax); kmax=1; end
% If a slice is requested, correct previous results
if islice
[~,imin] = min(abs(x-xmin));
imax = imin;
end
if jslice
[~,jmin] = min(abs(y-ymin));
jmax = jmin;
end
if kslice
[~,kmin] = min(abs(z-zmin));
kmax = kmin;
end
% Construct the points which are inside of the interval and reconstruct the grid
if imin<=imax
ii = [repmat(mod(imin+(0:nx-1)-1,nx)+1,[1,nxloop]),imin:imax];
shift = [reshape((repmat([zeros(1,nx-imin+1),ones(1,imin-1)],nxloop,1)+(1:nxloop)')',1,[]),...
ones(1,imax-imin+1)+nxloop]-1;
xq = x(ii)+(shift+nxoffset)*(b-a);
else
ii = [imin:nx,repmat(1:nx,[1,nxloop]),1:imax];
shift = [ones(1,nx-imin+1),...
reshape((repmat(ones(1,nx),nxloop,1)+(1:nxloop)')',1,[]),...
ones(1,imax)+nxloop+1]-1;
xq = x(ii)+(shift+nxoffset)*(b-a);
end
if jmin<=jmax
jj = [repmat(mod(jmin+(0:ny-1)-1,ny)+1,[1,nyloop]),jmin:jmax];
shift = [reshape((repmat([zeros(1,ny-jmin+1),ones(1,jmin-1)],nyloop,1)+(1:nyloop)')',1,[]),...
ones(1,jmax-jmin+1)+nyloop]-1;
yq = y(jj)+(shift+nyoffset)*(d-c);
else
jj = [jmin:ny,repmat(1:ny,[1,nyloop]),1:jmax];
shift = [ones(1,ny-jmin+1),...
reshape((repmat(ones(1,ny),nyloop,1)+(1:nyloop)')',1,[]),...
ones(1,jmax)+nyloop+1]-1;
yq = y(jj)+(shift+nyoffset)*(d-c);
end
if kmin<=kmax
kk = [repmat(mod(kmin+(0:nz-1)-1,nz)+1,[1,nzloop]),kmin:kmax];
shift = [reshape((repmat([zeros(1,nz-kmin+1),ones(1,kmin-1)],nzloop,1)+(1:nzloop)')',1,[]),...
ones(1,kmax-kmin+1)+nzloop]-1;
zq = z(kk)+(shift+nzoffset)*(f-e);
else
kk = [kmin:nz,repmat(1:nz,[1,nzloop]),1:kmax];
shift = [ones(1,nz-kmin+1),...
reshape((repmat(ones(1,nz),nzloop,1)+(1:nzloop)')',1,[]),...
ones(1,kmax)+nzloop+1]-1;
zq = z(kk)+(shift+nzoffset)*(f-e);
end
end

View File

@ -0,0 +1,108 @@
function [q] = read_field_block_ucfmf(fdir,iseq,field,ii,jj,kk,varargin)
% [q] = read_field_block_ucfmf(fdir,iseq,field,ii,jj,kk,varargin)
% Reads a specified block of a field from processor chunks which hold the data (multi-file version)
% Input
% fdir directory with files
% iseq sequence index
% field field to be read
% {'u','v','w','p','s1','s2',...}
% ii,jj,kk list of global field indices to be read
% ? step index of step to be read (default: 1)
% ? verbosity verbose output? (default: 0)
% ? debug debug output? (default: 0)
% Output
% q partial field
% Parse optional input arguments
par = inputParser;
addParamValue(par,'step',1,@isnumeric);
addParamValue(par,'verbosity',0,@isnumeric);
addParamValue(par,'debug',0,@isnumeric);
parse(par,varargin{:});
istep = par.Results.step;
iverb = par.Results.verbosity;
idebug= par.Results.debug;
% Parse field
switch field(1)
case 'u'; ifield=1; fbase='uvwp'; cmesh='u';
case 'v'; ifield=2; fbase='uvwp'; cmesh='v';
case 'w'; ifield=3; fbase='uvwp'; cmesh='w';
case 'p'; ifield=4; fbase='uvwp'; cmesh='p';
case 's'; ifield=str2double(field(2:end)); fbase='scal'; cmesh='s';
otherwise; error('Invalid field: %s',field);
end
% Read processor grid
fproc = sprintf('%s/proc_%04d.bin',fdir,iseq);
[ibeg.u,iend.u,jbeg.u,jend.u,kbeg.u,kend.u,...
ibeg.v,iend.v,jbeg.v,jend.v,kbeg.v,kend.v,...
ibeg.w,iend.w,jbeg.w,jend.w,kbeg.w,kend.w,...
ibeg.p,iend.p,jbeg.p,jend.p,kbeg.p,kend.p,...
ibeg.s,iend.s,jbeg.s,jend.s,kbeg.s,kend.s] = ...
read_procgrid_ucf(fproc,'tarmode',0,'verbosity',iverb,'debug',idebug);
ibeg = ibeg.(cmesh);
jbeg = jbeg.(cmesh);
kbeg = kbeg.(cmesh);
iend = iend.(cmesh);
jend = jend.(cmesh);
kend = kend.(cmesh);
nxprocs = length(ibeg);
nyprocs = length(jbeg);
nzprocs = length(kbeg);
% Determine the corresponding chunks
nxb = numel(ii);
nyb = numel(jj);
nzb = numel(kk);
col = zeros(1,nxb);
row = zeros(1,nyb);
pln = zeros(1,nzb);
for iproc=1:nxprocs
col(ii>=ibeg(iproc) & ii<=iend(iproc)) = iproc;
end
for iproc=1:nyprocs
row(jj>=jbeg(iproc) & jj<=jend(iproc)) = iproc;
end
for iproc=1:nzprocs
pln(kk>=kbeg(iproc) & kk<=kend(iproc)) = iproc;
end
% Setup fast indexing for blocks
iib = [1:nxb];
jjb = [1:nyb];
kkb = [1:nzb];
% Print some info (if verbose)
if iverb
nchunk = numel(unique(col))*numel(unique(row))*numel(unique(pln));
fprintf('Reading [%d x %d x %d] points from %d chunks.\n',nxb,nyb,nzb,nchunk);
end
% Load data
q = zeros(nxb,nyb,nzb);
for thiscol=unique(col)
for thisrow=unique(row)
for thispln=unique(pln)
% Load the data
iproc = (thiscol-1)*nyprocs*nzprocs+(thisrow-1)*nzprocs+(thispln-1);
fdata = sprintf('%s/%s_%04d.%05d',fdir,fbase,iseq,iproc);
[data,ib,jb,kb,nxl,nyl,nzl,ighost] = read_field_chunk_ucf(fdata,ifield,'tarmode',0,'ghost',0,'step',istep,'verbosity',iverb,'debug',idebug);
% Determine the points which are covered here
iiq = ii(col==thiscol);
jjq = jj(row==thisrow);
kkq = kk(pln==thispln);
% Translate to local indexing of loaded data
iiql = iiq-ib+1;
jjql = jjq-jb+1;
kkql = kkq-kb+1;
% Translate to local indexing of block
iiqb = iib(col==thiscol);
jjqb = jjb(row==thisrow);
kkqb = kkb(pln==thispln);
% Add loaded data to block
q(iiqb,jjqb,kkqb) = data(iiql,jjql,kkql);
end
end
end
end

View File

@ -0,0 +1,56 @@
function [q,x,y,z] = read_field_complete_ucf(hucf,field,varargin)
% [q,x,y,z] = read_field_complete_ucf(hucf,field,varargin)
% Reads a specific field from all processor chunks.(UCF tar version)
% Input
% hucf handle of UCF object (ustar,ucfmulti)
% field field to be read
% {'u','v','w','p','s1','s2',...}
% ? step index of step to be read (default: 1)
% ? verbosity verbose output? (default: 0)
% ? debug debug output? (default: 0)
% Output
% q complete field
% x,y,z corresponding grid
% Parse optional input arguments
par = inputParser;
addParamValue(par,'step',1,@isnumeric);
addParamValue(par,'verbosity',0,@isnumeric);
addParamValue(par,'debug',0,@isnumeric);
parse(par,varargin{:});
istep = par.Results.step;
% Parse field
switch field(1)
case 'u'; cmesh = 'u';
case 'v'; cmesh = 'v';
case 'w'; cmesh = 'w';
case 'p'; cmesh = 'p';
case 's'; cmesh = 'p';
otherwise; error('Invalid field: %s',field);
end
% Read parameters
params = read_parameters_ucf(hucf);
% Construct an array with final mesh size
nx = params.mesh.(sprintf('nx%s',cmesh));
ny = params.mesh.(sprintf('ny%s',cmesh));
nz = params.mesh.(sprintf('nz%s',cmesh));
q = zeros(nx,ny,nz);
% Read grid
[x.u,y.u,z.u,x.v,y.v,z.v,x.w,y.w,z.w,x.p,y.p,z.p] = read_grid_ucf(hucf);
x = x.(cmesh);
y = y.(cmesh);
z = z.(cmesh);
% Get number of processors
nprocs = params.parallel.nprocs;
% Now read chunk by chunk
for iproc=0:nprocs-1
[data,ib,jb,kb,nxl,nyl,nzl] = read_field_chunk_ucf(hucf,field,'rank',iproc,'ghost',0,'step',istep);
q(ib:ib+nxl-1,jb:jb+nyl-1,kb:kb+nzl-1) = data;
end
end

View File

@ -33,7 +33,7 @@ function [xu,yu,zu,xv,yv,zv,xw,yw,zw,xp,yp,zp,xs,ys,zs] = read_grid_ucf(file,var
end
% Define sets to be read
isDualMesh=(obj.UCFVersion>=2);
isDualMesh=(obj.UCFVersion>=2) && (obj.NumDataset>4);
if isDualMesh
sets = {'u','v','w','p','s'};
else

View File

@ -40,7 +40,7 @@ function [ibegu,iendu,jbegu,jendu,kbegu,kendu,...
end
% Define sets to be read
isDualMesh=(obj.UCFVersion>=2);
isDualMesh=(obj.UCFVersion>=2) && (obj.NumDataset>4);
if isDualMesh
sets = {'u','v','w','p','s'};
else

View File

@ -6,17 +6,21 @@ function [tbeg,tend,nstat,sm,ss,su,sv] = read_statistics_channel_scal_ucf(file,v
addParamValue(par,'debug',0,@isnumeric);
parse(par,varargin{:});
% Define sets to be read
sets = {'um','uu','vv','ww','uv','uub','uvb'};
nset = numel(sets);
% Open UCF file and read
obj = ucf('verbosity',par.Results.verbosity,'debug',par.Results.debug);
obj.open(file);
tend = obj.getSimulationTime(1);
switch class(file)
case 'char'
obj.open(file);
case {'ustar','ucfmulti'}
ptr = file.pointer('statistics_scal.bin');
obj.opentar(ptr);
otherwise
error('Input file type not supported: %s',class(file));
end
tend = obj.getSimulationTime();
% Read parameters of first set
params = obj.readParameters(1,1);
params = obj.readParameters(1,1)
% Parse parameters
tbeg = typecast(params(1),'double');

View File

@ -356,6 +356,35 @@ class ibmppp:
self.exchangeGhostCells(key)
self.imposeBoundaryConditions(key)
def loadStatistics(self,filename,key):
'''Loads statistics from a h5 file.'''
if self.__rank==0:
# Construct file name
file_h5 = filename+'.h5'
# Open the file and write data
fid = h5py.File(file_h5,'r')
gid = fid[key]
# Load mean
did = gid['mean'];
fcont = did.attrs['F_CONTIGUOUS']
dims = did.attrs['DIM']
order = 'F' if fcont else 'C'
self.spatialMean[key] = np.reshape(did,dims,order=order)
# Load meansquare
did = gid['meansquare'];
fcont = did.attrs['F_CONTIGUOUS']
dims = did.attrs['DIM']
order = 'F' if fcont else 'C'
self.spatialMsqr[key] = np.reshape(did,dims,order=order)
# Load nsample
did = gid['nsample'];
fcont = did.attrs['F_CONTIGUOUS']
dims = did.attrs['DIM']
order = 'F' if fcont else 'C'
self.spatialNsample[key] = np.reshape(did,dims,order=order)
# Close the file
fid.close()
def saveField(self,filename,key,append=False,keepAllGhost=False,xdmf=False):
'''Writes chunks in new processor layout to h5 files'''
# Determine filename of output file
@ -592,6 +621,118 @@ class ibmppp:
fid.write('</Xdmf> \n')
fid.close()
def saveFieldSingleFile(self,filename,key,append=False,downsample=False,xdmf=False,verbose=False):
'''Writes field data into a single h5 file'''
# Determine filename of output file
file_out = filename+'.h5'
# Compute global nx,ny,nz
nxg = self.__procGrid[key][1][-1]-self.__procGrid[key][0][0]+1
nyg = self.__procGrid[key][3][-1]-self.__procGrid[key][2][0]+1
nzg = self.__procGrid[key][5][-1]-self.__procGrid[key][4][0]+1
# Adjust these values if downsampling is requested
if downsample:
nxg = np.ceil(nxg/2).astype('int')
nyg = np.ceil(nyg/2).astype('int')
nzg = np.ceil(nzg/2).astype('int')
# Setup the correct slicing
if not downsample:
ishift = 0
jshift = 0
kshift = 0
ifb = self.__localChunkBounds[key][0]
jfb = self.__localChunkBounds[key][2]
kfb = self.__localChunkBounds[key][4]
nxl = self.__localChunkSize[key][0]
nyl = self.__localChunkSize[key][1]
nzl = self.__localChunkSize[key][2]
step = 1
else:
ishift = 1 if self.__localChunkBounds[key][0]%2==0 else 0
jshift = 1 if self.__localChunkBounds[key][2]%2==0 else 0
kshift = 1 if self.__localChunkBounds[key][4]%2==0 else 0
ifb = np.ceil((self.__localChunkBounds[key][0]+ishift)/2).astype('int')
jfb = np.ceil((self.__localChunkBounds[key][2]+jshift)/2).astype('int')
kfb = np.ceil((self.__localChunkBounds[key][4]+kshift)/2).astype('int')
nxl = np.ceil(self.__localChunkSize[key][0]/2).astype('int')
nyl = np.ceil(self.__localChunkSize[key][1]/2).astype('int')
nzl = np.ceil(self.__localChunkSize[key][2]/2).astype('int')
step = 2
isl_ch = slice(self.__nghx+ishift,self.__nghx+self.__localChunkSize[key][0],step)
jsl_ch = slice(self.__nghy+jshift,self.__nghy+self.__localChunkSize[key][1],step)
ksl_ch = slice(self.__nghz+kshift,self.__nghz+self.__localChunkSize[key][2],step)
isl_h5 = slice(ifb-1,ifb+nxl-1)
jsl_h5 = slice(jfb-1,jfb+nyl-1)
ksl_h5 = slice(kfb-1,kfb+nzl-1)
# If append flag is set, we add a dataset to the existing file
if append:
ioflag = 'a'
else:
ioflag = 'w'
# I avoid parallel IO because h5py is not available everywhere with
# MPI support. So rank 0 does all the writing and we communicate manually.
if self.__rank==0:
if verbose:
print('[saveFieldSingleFile] rank {} / {} writing to file'.format(self.__rank,self.__nproc-1),end='\r')
fid = h5py.File(file_out,ioflag)
gid = fid.create_group('/'+key)
gid.create_dataset('nx',data=nxg)
gid.create_dataset('ny',data=nyg)
gid.create_dataset('nz',data=nzg)
x0 = self.grid[key][0][0]
y0 = self.grid[key][1][0]
z0 = self.grid[key][2][0]
dx = self.__dx[key]*step
gid.create_dataset('x0',data=x0)
gid.create_dataset('y0',data=y0)
gid.create_dataset('z0',data=z0)
gid.create_dataset('dx',data=dx)
did = gid.create_dataset('data',(nzg,nyg,nxg),dtype=self.__precision)
did[ksl_h5,jsl_h5,isl_h5] = np.transpose(self.field[key][isl_ch,jsl_ch,ksl_ch])
fid.close()
self.__comm.send(True, dest=self.__rank+1, tag=1)
else:
msg = self.__comm.recv(source=self.__rank-1,tag=1)
if verbose:
print('[saveFieldSingleFile] rank {} / {} writing to file'.format(self.__rank,self.__nproc-1),end='\r')
fid = h5py.File(file_out,'a')
fid['/'+key+'/data'][ksl_h5,jsl_h5,isl_h5] = np.transpose(self.field[key][isl_ch,jsl_ch,ksl_ch])
fid.close()
if self.__rank!=self.__nproc-1:
self.__comm.send(True, dest=self.__rank+1, tag=1)
# Ensure that no processor is still working on the file
self.__comm.Barrier()
# Create an XDMF file for the field
if xdmf and self.__rank==0:
if verbose:
print('[saveFieldSingleFile] generating XDMF'+4*'\t',end='\r')
# Construct XDMF filename
file_xdmf = filename+'_'+key+'.xdmf'
filename_h5 = os.path.basename(file_out)
# Open file and write header
fid = open(file_xdmf,'w')
fid.write('<?xml version="1.0"?>\n')
fid.write('<Xdmf Version="2.0">\n')
fid.write(' <Domain>\n')
fid.write(' <Grid Name="{}">\n'.format(key))
#fid.write(' <Time Value="%.2f" />\n',params.general.simtime)
fid.write(' <Topology TopologyType="3DCORECTMesh" NumberOfElements="{:d} {:d} {:d}"/>\n'.format(nzg,nyg,nxg))
fid.write(' <Geometry Origin="" Type="ORIGIN_DXDYDZ">\n')
fid.write(' <DataItem DataType="Float" Dimensions="3" Format="XML" Precision="8"> {:12f} {:12f} {:12f}</DataItem>\n'.format(z0,y0,x0))
fid.write(' <DataItem DataType="Float" Dimensions="3" Format="XML" Precision="8"> {:12f} {:12f} {:12f}</DataItem>\n'.format(dx,dx,dx))
fid.write(' </Geometry>\n')
fid.write(' <Attribute Name="{}" Dimensions="{:d} {:d} {:d}">\n'.format(key,nzg,nyg,nxg))
fid.write(' <DataItem Format="HDF5" DataType="Float" Dimensions="{:d} {:d} {:d}">\n'.format(nzg,nyg,nxg))
fid.write(' {}:/{}/data </DataItem>\n'.format(filename_h5,key))
fid.write(' </Attribute>\n')
fid.write(' </Grid>\n')
fid.write(' </Domain>\n')
fid.write('</Xdmf> \n')
fid.close()
# Clear carriage return output if verbose
if self.__rank==0 and verbose:
print(10*'\t',end='\r')
return None
def saveStatistics(self,filename):
'''Writes all gathered statistics to a h5 file.'''
if self.__rank==0:
@ -940,6 +1081,110 @@ class ibmppp:
if collocate:
self.shiftField(keyout,self.__getShiftDirection(keyout,'p'))
def dissipation(self,keepDerivatives=False):
'''Computes dissipation rate on pressure grid.'''
# D = 2 [dudx^2 + dvdy^2 + dwdz^2] + (dudy+dvdx)^2 + (dudz+dwdx)^2 + (dvdz+dwdy)^2
# Viscosity is not multiplied!
# Allocate a new output field on pressure grid
self.__allocateField('D','p',shift=(0,0,0))
# Compute derivatives on after another and add to field
# dudx*dudx
if 'ux' not in self.field:
self.differentiateField('u','x')
symm = self.__deriveSymmetry(self.__boundarySymmetries['ux'],'mult',symm2=self.__boundarySymmetries['ux'])
self.__allocateField('tmp','ux',shift=(0,0,0),symmetry=symm)
self.field['tmp'] += 2.0*np.square(self.field['ux'])
self.shiftField('tmp',self.__getShiftDirection('tmp','D'))
iin,jin,kin,iout,jout,kout = self.__sliceCollocated('tmp','D')
self.field['D'][iout,jout,kout] += self.field['tmp'][iin,jin,kin]
self.freeField('tmp')
if not keepDerivatives:
self.freeField('ux')
# dvdy*dvdy
if 'vy' not in self.field:
self.differentiateField('v','y')
symm = self.__deriveSymmetry(self.__boundarySymmetries['vy'],'mult',symm2=self.__boundarySymmetries['vy'])
self.__allocateField('tmp','vy',shift=(0,0,0),symmetry=symm)
self.field['tmp'] += 2.0*np.square(self.field['vy'])
self.shiftField('tmp',self.__getShiftDirection('tmp','D'))
iin,jin,kin,iout,jout,kout = self.__sliceCollocated('tmp','D')
self.field['D'][iout,jout,kout] += self.field['tmp'][iin,jin,kin]
self.freeField('tmp')
if not keepDerivatives:
self.freeField('vy')
# dwdz*dwdz
if 'wz' not in self.field:
self.differentiateField('w','z')
symm = self.__deriveSymmetry(self.__boundarySymmetries['wz'],'mult',symm2=self.__boundarySymmetries['wz'])
self.__allocateField('tmp','wz',shift=(0,0,0),symmetry=symm)
self.field['tmp'] += 2.0*np.square(self.field['wz'])
self.shiftField('tmp',self.__getShiftDirection('tmp','D'))
iin,jin,kin,iout,jout,kout = self.__sliceCollocated('tmp','D')
self.field['D'][iout,jout,kout] += self.field['tmp'][iin,jin,kin]
self.freeField('tmp')
if not keepDerivatives:
self.freeField('wz')
# (dudy+dvdx)^2
if 'uy' not in self.field:
self.differentiateField('u','y')
if 'vx' not in self.field:
self.differentiateField('v','x')
symm = self.__deriveSymmetry(self.__boundarySymmetries['uy'],'mult',symm2=self.__boundarySymmetries['vx'])
self.__allocateField('tmp','D',shift=(-1,-1,0),symmetry=symm)
ii1,jj1,kk1,iout,jout,kout = self.__sliceCollocated('uy','tmp')
ii2,jj2,kk2,iout,jout,kout = self.__sliceCollocated('vx','tmp')
self.field['tmp'][iout,jout,kout] += np.square(self.field['uy'][ii1,jj1,kk1]+self.field['vx'][ii2,jj2,kk2])
self.exchangeGhostCells('tmp')
self.imposeBoundaryConditions('tmp')
self.shiftField('tmp',self.__getShiftDirection('tmp','D'))
iin,jin,kin,iout,jout,kout = self.__sliceCollocated('tmp','D')
self.field['D'][iout,jout,kout] += self.field['tmp'][iin,jin,kin]
self.freeField('tmp')
if not keepDerivatives:
self.freeField('uy')
self.freeField('vx')
# (dudz+dwdx)^2
if 'uz' not in self.field:
self.differentiateField('u','z')
if 'wx' not in self.field:
self.differentiateField('w','x')
symm = self.__deriveSymmetry(self.__boundarySymmetries['uz'],'mult',symm2=self.__boundarySymmetries['wx'])
self.__allocateField('tmp','D',shift=(-1,0,-1),symmetry=symm)
ii1,jj1,kk1,iout,jout,kout = self.__sliceCollocated('uz','tmp')
ii2,jj2,kk2,iout,jout,kout = self.__sliceCollocated('wx','tmp')
self.field['tmp'][iout,jout,kout] += np.square(self.field['uz'][ii1,jj1,kk1]+self.field['wx'][ii2,jj2,kk2])
self.exchangeGhostCells('tmp')
self.imposeBoundaryConditions('tmp')
self.shiftField('tmp',self.__getShiftDirection('tmp','D'))
iin,jin,kin,iout,jout,kout = self.__sliceCollocated('tmp','D')
self.field['D'][iout,jout,kout] -= self.field['tmp'][iin,jin,kin]
self.freeField('tmp')
if not keepDerivatives:
self.freeField('uz')
self.freeField('wx')
# (dvdz+dwdy)^2
if 'vz' not in self.field:
self.differentiateField('v','z')
if 'wy' not in self.field:
self.differentiateField('w','y')
symm = self.__deriveSymmetry(self.__boundarySymmetries['vz'],'mult',symm2=self.__boundarySymmetries['wy'])
self.__allocateField('tmp','D',shift=(0,-1,-1),symmetry=symm)
ii1,jj1,kk1,iout,jout,kout = self.__sliceCollocated('vz','tmp')
ii2,jj2,kk2,iout,jout,kout = self.__sliceCollocated('wy','tmp')
self.field['tmp'][iout,jout,kout] += np.square(self.field['vz'][ii1,jj1,kk1]+self.field['wy'][ii2,jj2,kk2])
self.exchangeGhostCells('tmp')
self.imposeBoundaryConditions('tmp')
self.shiftField('tmp',self.__getShiftDirection('tmp','D'))
iin,jin,kin,iout,jout,kout = self.__sliceCollocated('tmp','D')
self.field['D'][iout,jout,kout] -= self.field['tmp'][iin,jin,kin]
self.freeField('tmp')
if not keepDerivatives:
self.freeField('vz')
self.freeField('wy')
# Exchange ghost cells and set boundary conditions
self.exchangeGhostCells('D')
self.imposeBoundaryConditions('D')
def spatialStatistics(self,key,homogeneous=None):
'''Computes spatial mean and meansquare over homogeneous directions.'''
# If no homogeneous directions are provided, use periodicity
@ -1064,6 +1309,60 @@ class ibmppp:
self.__comm.Send(tmpsum, dest=0,tag=2)
self.__comm.Send(tmpssq, dest=0,tag=3)
def subtractMean(self,key):
'''Subtract mean from field.'''
# Check if this operation can be executed
if self.__rank==0:
if not key in self.spatialMean or self.spatialMean[key] is None:
raise ValueError('Statistics not available')
# Broadcast statistics
sendbuf = self.spatialMean[key] if self.__rank==0 else None
self.spatialMean[key] = self.__comm.bcast(sendbuf,root=0)
# Get dimensions
dims = self.spatialMean[key].shape
# Slice statistics for each processor
ib,ie = (0,1) if dims[0]==1 else (self.__localChunkBounds[key][0]-1,self.__localChunkBounds[key][1])
jb,je = (0,1) if dims[1]==1 else (self.__localChunkBounds[key][2]-1,self.__localChunkBounds[key][3])
kb,ke = (0,1) if dims[2]==1 else (self.__localChunkBounds[key][4]-1,self.__localChunkBounds[key][5])
# Subtract mean from field
self.field[key][
self.__nghx:-self.__nghx,
self.__nghy:-self.__nghy,
self.__nghz:-self.__nghz] -= self.spatialMean[key][ib:ie,jb:je,kb:ke]
# Exchange ghost cells and set boundary conditions
self.exchangeGhostCells(key)
self.imposeBoundaryConditions(key)
def normalizeByStandardDeviation(self,key):
'''Normalize field by its standard deviation.'''
# Check if this operation can be executed
if self.__rank==0:
if (not key in self.spatialMean or self.spatialMean[key] is None or
not key in self.spatialMsqr or self.spatialMsqr[key] is None):
raise ValueError('Statistics not available')
# Broadcast statistics
sendbuf = self.spatialMean[key] if self.__rank==0 else None
self.spatialMean[key] = self.__comm.bcast(sendbuf,root=0)
sendbuf = self.spatialMsqr[key] if self.__rank==0 else None
self.spatialMsqr[key] = self.__comm.bcast(sendbuf,root=0)
# Compute standard deviation
assert(self.spatialMean[key].shape==self.spatialMsqr[key].shape)
spatialStdDev = np.sqrt(self.spatialMsqr[key]-np.square(self.spatialMean[key]))
# Get dimensions
dims = spatialStdDev.shape
# Slice statistics for each processor
ib,ie = (0,1) if dims[0]==1 else (self.__localChunkBounds[key][0]-1,self.__localChunkBounds[key][1])
jb,je = (0,1) if dims[1]==1 else (self.__localChunkBounds[key][2]-1,self.__localChunkBounds[key][3])
kb,ke = (0,1) if dims[2]==1 else (self.__localChunkBounds[key][4]-1,self.__localChunkBounds[key][5])
# Subtract mean from field
self.field[key][
self.__nghx:-self.__nghx,
self.__nghy:-self.__nghy,
self.__nghz:-self.__nghz] /= spatialStdDev[ib:ie,jb:je,kb:ke]
# Exchange ghost cells and set boundary conditions
self.exchangeGhostCells(key)
self.imposeBoundaryConditions(key)
def collocateVelocity(self):
'''Moves velocity components onto the pressure grid.'''
raise NotImplementedError('TBD')

64
python/programs/ucf_file_diff Executable file
View File

@ -0,0 +1,64 @@
#!/usr/bin/env python3
import ucf
import argparse
import os
import sys
import traceback
import numpy as np
parser = argparse.ArgumentParser(description='Tests basic integrity of a UCF file')
parser.add_argument('file1', metavar='file1',help='input file #1')
parser.add_argument('file2', metavar='file2',help='input file #2')
parser.add_argument("--debug", help="enable debug output? [default: False]", action="store_true")
parser.add_argument("--stop", help="stop as soon as an error has been detected? [default: False]", action="store_true")
parser.add_argument("--tolerance", metavar='tol', help="floating point tolerance for comparison [default: 1e-12]", type=float)
parser.add_argument("-v","--verbose", help="enable verbose output? [default: False]", action="store_true")
args = parser.parse_args()
file_in1 = args.file1
file_in2 = args.file2
flag_debug = args.debug
flag_verbose = args.verbose
fptol = args.tolerance
flag_stop = args.stop
if fptol is None:
fptol = 1e-12
if os.path.isfile(file_in1)==False:
print('File not found:',file_in1)
sys.exit(1)
if os.path.isfile(file_in2)==False:
print('File not found:',file_in2)
sys.exit(1)
obj1 = ucf.UCF(file=file_in1,verbosity=flag_verbose,debug=flag_debug)
obj2 = ucf.UCF(file=file_in2,verbosity=flag_verbose,debug=flag_debug)
if obj1.NumTimestep!=obj2.NumTimestep:
print("Number of time steps differ: {:d}, {:d}".format(obj1.NumTimestep,obj2.NumTimestep))
sys.exit(2)
errfound=False
for istep in range(0,obj1.NumTimestep):
for iset in range(0,obj1._UCF__numSetPerStep[istep]):
(data1,param1) = obj1.readSet(step=istep+1,dset=iset+1)
(data2,param2) = obj2.readSet(step=istep+1,dset=iset+1)
if not param1==param2:
print('Step #{:d}, set #{:d}: parameter mismatch. {}, {}'.format(istep,iset,param1,param2))
if flag_stop:
sys.exit(3)
else:
errfound=True
if not np.allclose(data1,data2,rtol=fptol):
data_diff = np.abs(data1-data2)
idx = np.argmax(data_diff)
vald = data_diff.flatten()[idx]
val1 = data1.flatten()[idx]
val2 = data2.flatten()[idx]
print('Step #{:d}, set #{:d}: difference in FP data above tolerance. idx={}, diff={}, val1={}, val2={}'.format(istep,iset,idx,vald,val1,val2))
if flag_stop:
sys.exit(4)
else:
errfound=True
if errfound:
sys.exit(5)

View File

@ -0,0 +1,73 @@
#!/usr/bin/env python3
import ucf
import argparse
import os
import sys
import traceback
import numpy as np
parser = argparse.ArgumentParser(description='Verifies that content of single UCF file is equivalent to multiple UCF files')
parser.add_argument('file1', metavar='file_single',help='input file #1 (single file)')
parser.add_argument('file2', metavar='filebase_multi',help='input file #2 (multiple files)')
parser.add_argument("--debug", help="enable debug output? [default: False]", action="store_true")
parser.add_argument("--stop", help="stop as soon as an error has been detected? [default: False]", action="store_true")
parser.add_argument("--tolerance", metavar='tol', help="floating point tolerance for comparison [default: 1e-12]", type=float)
parser.add_argument("-v","--verbose", help="enable verbose output? [default: False]", action="store_true")
args = parser.parse_args()
file_single = args.file1
fiba_multi = args.file2
flag_debug = args.debug
flag_verbose = args.verbose
fptol = args.tolerance
flag_stop = args.stop
if fptol is None:
fptol = 1e-12
if os.path.isfile(file_single)==False:
print('File not found:',os.path.isfile(file_single))
sys.exit(1)
objs = ucf.UCF(file=file_single,verbosity=flag_verbose,debug=flag_debug)
#if obj1.NumTimestep!=obj2.NumTimestep:
errfound=False
for istep in range(0,objs.NumTimestep):
print('Comparing rank {:5d}'.format(istep))
file_multi = '{}{:05d}'.format(fiba_multi,istep)
if os.path.isfile(file_multi)==False:
print('File not found:',os.path.isfile(file_multi))
if flag_stop:
sys.exit(1)
else:
errfound=True
objm = ucf.UCF(file=file_multi,verbosity=flag_verbose,debug=flag_debug)
if objs._UCF__numSetPerStep[istep]!=objm._UCF__numSetPerStep[0]:
print("Number of datasets differs: {:d}, {:d}".format(objs._UCF__numSetPerStep[istep],objm._UCF__numSetPerStep[0]))
if flag_stop:
sys.exit(2)
else:
errfound=True
for iset in range(0,objs._UCF__numSetPerStep[istep]):
(data1,param1) = objs.readSet(step=istep+1,dset=iset+1)
(data2,param2) = objm.readSet(step=1,dset=iset+1)
if not param1==param2:
print('Step #{:d}, set #{:d}: parameter mismatch. {}, {}'.format(istep,iset,param1,param2))
if flag_stop:
sys.exit(3)
else:
errfound=True
if not np.allclose(data1,data2,rtol=fptol):
data_diff = np.abs(data1-data2)
idx = np.argmax(data_diff)
vald = data_diff.flatten()[idx]
val1 = data1.flatten()[idx]
val2 = data2.flatten()[idx]
print('Step #{:d}, set #{:d}: difference in FP data above tolerance. idx={}, diff={}, val1={}, val2={}'.format(istep,iset,idx,vald,val1,val2))
if flag_stop:
sys.exit(4)
else:
errfound=True
if errfound:
sys.exit(5)

View File

@ -0,0 +1,50 @@
#!/usr/bin/env python3
import ucf
import argparse
import os
import sys
import traceback
parser = argparse.ArgumentParser(description='Tests basic integrity of a UCF file')
parser.add_argument('infile', metavar='file', nargs='+',help='input file')
parser.add_argument("--debug", help="enable debug output? [default: False]", action="store_true")
parser.add_argument("--print-error", help="print possible error messages? [default: False]", action="store_true")
parser.add_argument("--stop", help="stop as soon as a corrupted file has been found? [default: False]", action="store_true")
parser.add_argument("-v","--verbose", help="enable verbose output? [default: False]", action="store_true")
args = parser.parse_args()
file_in = args.infile
flag_debug = args.debug
flag_verbose = args.verbose
flag_error = args.print_error
flag_stop = args.stop
numerror=0
for file in file_in:
print(file+": ",end="")
if os.path.isfile(file)==False:
print('not found.')
numerror+=1
continue
try:
obj = ucf.UCF(file=file,verbosity=flag_verbose,debug=flag_debug)
for it in range(0,obj.NumTimestep):
for iset in range(0,obj._UCF__numSetPerStep[it]):
obj._UCF__findSet(it+1,iset+1)
obj._UCF__readHeaderSet()
obj.close()
print('successful ({:d} time steps, {:d} data sets)'.format(obj.NumTimestep,obj.NumDataset))
except Exception:
print('corrupt')
if flag_error:
print(traceback.print_exc())
if flag_stop:
sys.exit(1)
numerror+=1
if numerror==0:
print('Test completed with no errors.')
sys.exit(0)
else:
print("Test completed with {:d} error(s).".format(numerror))
sys.exit(1)

128
python/programs/ucftar_mpiio_pack Executable file
View File

@ -0,0 +1,128 @@
#!/usr/bin/env python3
import sys
import io
import os
import tarfile
import argparse
import numpy as np
import ucf
import configparser
parser = argparse.ArgumentParser(description='Packs UCF MPIIO data into a tar archive. The single output file is split into multiple files due to file size restrictions.')
parser.add_argument('indir', metavar='dirin', help='input directory')
parser.add_argument('iseq', metavar='iseq', help='sequence number')
parser.add_argument('base', metavar='base', help='filebase to be archived: "uvwp" or "scal"')
parser.add_argument("-o", "--outfile", metavar='file',nargs='?', default=None, help="name of the output file [default: snapshot_XXXX.ucf.tar]", action="store")
parser.add_argument("-v", "--verbose", help="activate verbose output", action="store_true")
args = parser.parse_args()
dir_in = args.indir
iseq = int(args.iseq)
base = args.base
verbose = args.verbose
if not base in ('uvwp','scal'):
raise ValueError('Invalid base key: {}',base)
if args.outfile is None:
if base=='uvwp':
file_out = 'snapshot_{:04d}.ucf.tar'.format(iseq)
elif base=='scal':
file_out = 'snapshot_scalar_{:04d}.ucf.tar'.format(iseq)
else:
file_out = args.outfile
# Check if all required files are available
files_req = ('parameters_{:04d}.asc','proc_{:04d}.bin','grid_{:04d}.bin','particles_{:04d}.bin',base+'_{:04d}.bin')
for files in files_req:
path_check = '{}/{}'.format(dir_in,files.format(iseq))
if not os.path.isfile(path_check):
raise IOError('File does not exist: {}'.format(path_check))
if verbose:
print("[x] parameters")
print("[x] grid")
print("[x] processor grid")
print("[x] particles")
if base=='uvwp':
print("[x] fluid field")
elif base=='scal':
print("[x] scalar field")
# Open tar file for output
ftar = tarfile.open(name=file_out,mode='w',pax_headers=tarfile.USTAR_FORMAT)
def transform_filename(filename,iseq):
return os.path.basename(filename).replace('_{:04d}'.format(iseq),'')
# Parse parameters to construct file headers, then add it to tar
file_in = '{}/parameters_{:04d}.asc'.format(dir_in,iseq)
config = configparser.ConfigParser()
config.read(file_in)
nxprocs = int(config['parallel']['nxprocs'])
nyprocs = int(config['parallel']['nyprocs'])
nzprocs = int(config['parallel']['nzprocs'])
nprocs = nxprocs*nyprocs*nzprocs
print(transform_filename(file_in,iseq))
fid = open(file_in,'rb')
info = tarfile.TarInfo(name=transform_filename(file_in,iseq))
info.size = os.path.getsize(file_in)
ftar.addfile(info,fileobj=fid)
fid.close()
# Add proc.bin
file_in = '{}/proc_{:04d}.bin'.format(dir_in,iseq)
print(transform_filename(file_in,iseq))
fid = open(file_in,'rb')
info = tarfile.TarInfo(name=transform_filename(file_in,iseq))
info.size = os.path.getsize(file_in)
ftar.addfile(info,fileobj=fid)
fid.close()
# Add grid.bin
file_in = '{}/grid_{:04d}.bin'.format(dir_in,iseq)
print(transform_filename(file_in,iseq))
fid = open(file_in,'rb')
info = tarfile.TarInfo(name=transform_filename(file_in,iseq))
info.size = os.path.getsize(file_in)
ftar.addfile(info,fileobj=fid)
fid.close()
# Add particles.bin
file_in = '{}/particles_{:04d}.bin'.format(dir_in,iseq)
print(transform_filename(file_in,iseq))
fid = open(file_in,'rb')
info = tarfile.TarInfo(name=transform_filename(file_in,iseq))
info.size = os.path.getsize(file_in)
ftar.addfile(info,fileobj=fid)
fid.close()
# Split uvwp/scal files and add them
def positionFromRank(rank,nxp,nyp,nzp):
ip = rank//(nyp*nzp)
jp = (rank//nzp)%nyp
kp = rank%nzp
return (ip,jp,kp)
file_in = '{}/{}_{:04d}.bin'.format(dir_in,base,iseq)
ucf_data = ucf.UCF(file=file_in)
if nprocs!=ucf_data.NumTimestep:
raise ValueError('Number of steps does not equal number of procs: {}, {}',ucf_data.NumTimestep,nprocs)
for iproc in range(0,nprocs):
print(base+'.{:05d}'.format(iproc))
# Construct a new UCF file
ucf_data.addFileHeaderToBuffer(
rank=iproc,
rankijk=positionFromRank(iproc,nxprocs,nyprocs,nzprocs),
ftype=ucf_data._UCF__typeID
)
ucf_data.copyStepToBuffer(iproc+1,recursive=True)
ucf_bytes = ucf_data.flushBuffer()
# Write it to tar
info = tarfile.TarInfo(name=base+'.{:05d}'.format(iproc))
info.size = len(ucf_bytes)
ftar.addfile(info,fileobj=io.BytesIO(ucf_bytes))
# Close tar file
ftar.close()

View File

@ -0,0 +1,66 @@
#!/usr/bin/env python3
import sys
import io
import os
import tarfile
import argparse
import numpy as np
import ucf
import configparser
parser = argparse.ArgumentParser(description='Unpacks an UCF tar file as MPIIO input file.')
parser.add_argument('infile', metavar='filein', help='input file')
parser.add_argument('iseq', metavar='iseq', help='sequence number')
#parser.add_argument('base', metavar='base', help='filebase to be archived: "uvwp" or "scal"')
parser.add_argument("-o", "--outdir", metavar='directory',nargs='?', default=None, help="name of the output directory", action="store")
parser.add_argument("-v", "--verbose", help="activate verbose output", action="store_true")
args = parser.parse_args()
file_in = args.infile
iseq = int(args.iseq)
#base = args.base
verbose = args.verbose
if args.outdir is None:
dir_out = './'
else:
dir_out = args.outdir
# Open tar file for input
ftar = tarfile.open(name=file_in,mode='r')
# Extract files
def transform_filename(filename,iseq):
return filename.replace('.','_{:04d}.'.format(iseq))
files_all = ftar.getnames()
#files_aux = ('parameters.asc','proc.bin','grid.bin','particles.bin')
files_aux = [s for s in files_all if not s.startswith('uvwp') or s.startswith('scal')]
for files in files_aux:
print(files)
ucf_bytes = ftar.extractfile(files).read()
file_out = dir_out+transform_filename(files,iseq)
fidw = open(file_out,'wb')
fidw.write(ucf_bytes)
fidw.close()
files_data = [s for s in files_all if s.startswith('uvwp') or s.startswith('scal')]
base = files_data[0].split('.')[0]
iproc = 0
file_out = dir_out+'/'+base+'_{:04d}.bin'.format(iseq)
fidw = open(file_out,'wb')
for files in files_data:
print(files)
sproc = files.split('.')[1]
if iproc!=int(sproc):
raise ValueError('Invalid file order in tar: {}, {}'.format(iproc,files))
ucf_bytes = ftar.extractfile(files).read()
if iproc==0:
fidw.write(ucf_bytes)
else:
fidw.write(ucf_bytes[64:])
iproc += 1
fidw.close()
# Close tar file
ftar.close()

3
python/ucf/__init__.py Normal file
View File

@ -0,0 +1,3 @@
from .base import UCF
from .tools import read_grid, read_procgrid, read_chunk, read_particles, grid_chunk
from .abstraction import Ustar, UCFSnapshot, ChunkIterator

100
python/ucf/abstraction.py Normal file
View File

@ -0,0 +1,100 @@
class ChunkIterator:
def __init__(self,snapshot,dset=-1,keep_ghost=True):
self.snapshot = snapshot
self.dset = dset
self.keep_ghost = keep_ghost
self.iter_rank = 0
def __iter__(self):
self.iter_rank = 0
return self
def __next__(self):
if self.iter_rank<self.snapshot.nproc:
data = self.snapshot.read_chunk(
self.iter_rank,dset=self.dset,keep_ghost=self.keep_ghost
)
self.iter_rank += 1
return data
else:
raise StopIteration
class UCFSnapshot:
'''Handles a snapshot.ucf.tar file.'''
def __init__(self,file_tar,file_index=None):
self.handler = Ustar(file_tar,file_index)
self.verbose = False
self.debug = False
self.type = None
#
file_name_string = '\t'.join(self.handler.file_name)
if 'uvwp' in file_name_string:
self.type = 'uvwp'
elif 'scal' in file_name_string:
self.type = 'scal'
else:
raise ValueError("Archive does not contain 'uvwp' nor 'scal' files.")
self.nproc = sum(self.type in s for s in self.handler.file_name)
def read_particles(self):
from .tools import read_particles
data = self.handler.read('particles.bin')
return read_particles(data,step=1,verbosity=self.verbose,debug=self.debug)
def read_chunk(self,rank,dset=-1,keep_ghost=True):
from .tools import read_chunk
file_target = self.type+'.{:05d}'.format(rank)
data = self.handler.read(file_target)
return read_chunk(data,step=1,dset=dset,keep_ghost=keep_ghost,
verbosity=self.verbose,debug=self.debug)
def read_grid(self):
from .tools import read_grid
data = self.handler.read('grid.bin')
return read_grid(data,verbosity=self.verbose,debug=self.debug)
def read_procgrid(self):
from .tools import read_procgrid
data = self.handler.read('proc.bin')
return read_procgrid(data,verbosity=self.verbose,debug=self.debug)
class Ustar:
'''Minimalistic ustar implementation meant to be used with ucftar files'''
def __init__(self,file_tar,file_index=None):
self.path = file_tar
self.num_files = 0
self.file_name = []
self.file_size = []
self.file_offset = []
if file_index:
self.import_index_file(file_index)
else:
self.import_tar_file()
def __del__(self):
return
def import_tar_file(self):
'''Imports information on tar archive from scanning it'''
from tarfile import TarFile,USTAR_FORMAT
from struct import unpack
with open(self.path,'rb') as f:
tarinfos = TarFile(fileobj=f,format=USTAR_FORMAT).getmembers()
self.num_files = 0
for tarinfo in tarinfos:
self.num_files += 1
self.file_name.append(tarinfo.name)
self.file_offset.append(tarinfo.offset_data)
self.file_size.append(tarinfo.size)
return
def import_index_file(self,file_index):
'''Imports information on tar archive from .taridx file'''
from struct import unpack
with open(file_index,'rb') as f:
self.num_files = unpack('<q',f.read(8))[0]
self.file_name = []
self.file_size = []
self.file_offset = []
for ifile in range(0,self.num_files):
self.file_name.append(f.read(256).decode().strip().rstrip('\0'))
self.file_offset.append(unpack('<q',f.read(8))[0])
self.file_size.append(unpack('<q',f.read(8))[0])
return
def read(self,file):
'''Reads a file from the archive into memory. Data is returned as bytes.'''
idx = self.file_name.index(file)
with open(self.path,'rb') as f:
f.seek(self.file_offset[idx])
return f.read(self.file_size[idx])

View File

@ -475,148 +475,3 @@ class UCF:
self.__currentSetNumParams = 0
self.__currentSetParams = 0
self.__currentSetNumElements = 0
#################################
# High-level function interface #
#################################
def readGrid(file,verbosity=False,debug=False):
obj = UCF(file=file,verbosity=verbosity,debug=debug)
output = []
for iset in range(0,obj.NumDataset):
(data,params) = obj.readSet(step=1,dset=iset+1)
nx = params[0]
ny = params[1]
nz = params[2]
output.append(data[0:nx])
output.append(data[nx:nx+ny])
output.append(data[nx+ny:nx+ny+nz])
#if obj.UCFVersion<2:
if obj.NumDataset<5:
output.extend(output[-3:])
obj.close()
return output
def readProcgrid(file,verbosity=False,debug=False):
obj = UCF(file=file,verbosity=verbosity,debug=debug)
output = []
for iset in range(0,obj.NumDataset):
(data,params) = obj.readSet(step=1,dset=iset+1)
nxp = params[0]
nyp = params[1]
nzp = params[2]
output.append(data[0:nxp]) # ibeg
output.append(data[nxp:2*nxp]) # iend
output.append(data[2*nxp:2*nxp+nyp]) # jbeg
output.append(data[2*nxp+nyp:2*nxp+2*nyp]) # jend
output.append(data[2*nxp+2*nyp:2*nxp+2*nyp+nzp]) # kbeg
output.append(data[2*nxp+2*nyp+nzp:2*nxp+2*nyp*2*nzp]) # kend
#if obj.UCFVersion<2:
if obj.NumDataset<5:
output.extend(output[-6:])
obj.close()
return output
def readFieldChunk(file,step=1,dset=-1,verbosity=False,debug=False):
obj = UCF(file=file,verbosity=verbosity,debug=debug)
if not isinstance(dset,list):
if dset==-1:
dset = range(1,obj.NumDataset+1) # fix that maybe later (this is maximum over all timesteps)
else:
dset = [dset]
output = []
for ii in dset:
tmp = dict()
(data,params) = obj.readSet(step=step,dset=ii)
tmp['ighost'] = params[0]
tmp['ibeg'] = params[1]
tmp['jbeg'] = params[2]
tmp['kbeg'] = params[3]
tmp['nxl'] = params[4]
tmp['nyl'] = params[5]
tmp['nzl'] = params[6]
tmp['nx'] = params[7]
tmp['ny'] = params[8]
tmp['nz'] = params[9]
tmp['data'] = data.reshape((tmp['nxl']+2*tmp['ighost'],
tmp['nyl']+2*tmp['ighost'],
tmp['nzl']+2*tmp['ighost']),
order='F')
tmp['rank'] = obj.IORank[0]
tmp['rankijk']= obj.IORank[1:]
output.append(tmp)
obj.close()
return output
def readParticles(file,step=-1,verbosity=False,debug=False):
# Check what kind of file was passed: standalone, tar, bytes
# TBD: tar is not supported yet
obj = UCF(file=file,verbosity=verbosity,debug=debug)
if not isinstance(step,list):
if step==-1:
step = range(1,obj.NumTimestep+1)
else:
step = [step]
# The output will be the following:
# 1) numpy array with dimension (ncol,np,ntime)
# 2) dictionary which specifies the columns
# We read the data step by step in a list, which is then converted to a 3D array
pp = []
for ii in step:
(data,params) = obj.readSet(step=ii,dset=1)
npart = params[0]
ncol = params[1]
ncol_rank = params[2]
ncol_hybrid = params[3]
ncol_dem = params[4]
ncol_scalar = params[5]
nscal = ncol_scalar//2
pp.append(data.reshape((ncol,npart),order='F'))
# Close UCF obeject
obj.close()
# Convert list of 2D arrays to 3D array
pp = np.stack(pp,axis=2)
# Create the dictionary
col = colmap_from_flags(ncol_rank,ncol_hybrid,ncol_dem,nscal)
# Return result
return (pp,col)
def colmap_from_flags(irank,ihybrid,idem,iscal):
'''Creates a dictionary which specifies the columns of a particle array.'''
col = {}
ioffset = 0
if irank>0:
col['rank'] = ioffset; ioffset+=1
if ihybrid>0:
col['id'] = ioffset; ioffset+=1
col['x'] = ioffset; ioffset+=1
col['y'] = ioffset; ioffset+=1
col['z'] = ioffset; ioffset+=1
col['r'] = ioffset; ioffset+=1
col['rho']= ioffset; ioffset+=1
col['ax'] = ioffset; ioffset+=1
col['ay'] = ioffset; ioffset+=1
col['az'] = ioffset; ioffset+=1
col['u'] = ioffset; ioffset+=1
col['v'] = ioffset; ioffset+=1
col['w'] = ioffset; ioffset+=1
col['ox'] = ioffset; ioffset+=1
col['oy'] = ioffset; ioffset+=1
col['oz'] = ioffset; ioffset+=1
col['fx'] = ioffset; ioffset+=1
col['fy'] = ioffset; ioffset+=1
col['fz'] = ioffset; ioffset+=1
col['tx'] = ioffset; ioffset+=1
col['ty'] = ioffset; ioffset+=1
col['tz'] = ioffset; ioffset+=1
if idem>0:
col['fxc'] = ioffset; ioffset+=1
col['fyc'] = ioffset; ioffset+=1
col['fzc'] = ioffset; ioffset+=1
col['txc'] = ioffset; ioffset+=1
col['tyc'] = ioffset; ioffset+=1
col['tzc'] = ioffset; ioffset+=1
if iscal>0:
for ii in range(0,iscal):
col['s'+str(ii)] = ioffset; ioffset+=1
col['q'+str(ii)] = ioffset; ioffset+=1
return col

183
python/ucf/tools.py Normal file
View File

@ -0,0 +1,183 @@
def read_grid(file,verbosity=False,debug=False):
from .base import UCF
obj = UCF(file=file,verbosity=verbosity,debug=debug)
output = []
for iset in range(0,obj.NumDataset):
(data,params) = obj.readSet(step=1,dset=iset+1)
nx = params[0]
ny = params[1]
nz = params[2]
output.append((data[0:nx],data[nx:nx+ny],data[nx+ny:nx+ny+nz]))
#if obj.UCFVersion<2:
if obj.NumDataset<5:
output.extend(output[-1:])
obj.close()
return output
def read_procgrid(file,verbosity=False,debug=False):
from .base import UCF
obj = UCF(file=file,verbosity=verbosity,debug=debug)
output = []
for iset in range(0,obj.NumDataset):
(data,params) = obj.readSet(step=1,dset=iset+1)
nxp = params[0]
nyp = params[1]
nzp = params[2]
output.append((
data[0:nxp], # ibeg
data[nxp:2*nxp], # iend
data[2*nxp:2*nxp+nyp], # jbeg
data[2*nxp+nyp:2*nxp+2*nyp], # jend
data[2*nxp+2*nyp:2*nxp+2*nyp+nzp], # kbeg
data[2*nxp+2*nyp+nzp:2*nxp+2*nyp*2*nzp] # kend
))
#if obj.UCFVersion<2:
if obj.NumDataset<5:
output.extend(output[-1:])
obj.close()
return output
def read_chunk(file,step=1,dset=-1,keep_ghost=True,verbosity=False,debug=False):
from .base import UCF
obj = UCF(file=file,verbosity=verbosity,debug=debug)
if not isinstance(dset,list):
if dset==-1:
dset = range(1,obj.NumDataset+1) # fix that maybe later (this is maximum over all timesteps)
else:
dset = [dset]
output = []
for ii in dset:
tmp = dict()
(data,params) = obj.readSet(step=step,dset=ii)
tmp['ighost'] = params[0]
tmp['ibeg'] = params[1]
tmp['jbeg'] = params[2]
tmp['kbeg'] = params[3]
tmp['nxl'] = params[4]
tmp['nyl'] = params[5]
tmp['nzl'] = params[6]
tmp['nx'] = params[7]
tmp['ny'] = params[8]
tmp['nz'] = params[9]
tmp['data'] = data.reshape((tmp['nxl']+2*tmp['ighost'],
tmp['nyl']+2*tmp['ighost'],
tmp['nzl']+2*tmp['ighost']),
order='F')
tmp['rank'] = obj.IORank[0]
tmp['rankijk']= obj.IORank[1:]
if not keep_ghost and ighost:
tmp['data'] = tmp['data'][1:-1,1:-1,1:-1]
tmp['ghost'] = 0
output.append(tmp)
obj.close()
return output
def read_particles(file,step=-1,verbosity=False,debug=False):
from .base import UCF
import numpy
obj = UCF(file=file,verbosity=verbosity,debug=debug)
if not isinstance(step,list):
if step==-1:
step = range(1,obj.NumTimestep+1)
else:
step = [step]
# The output will be the following:
# 1) numpy array with dimension (ncol,np,ntime)
# 2) dictionary which specifies the columns
# We read the data step by step in a list, which is then converted to a 3D array
pp = []
for ii in step:
(data,params) = obj.readSet(step=ii,dset=1)
npart = params[0]
ncol = params[1]
ncol_rank = params[2]
ncol_hybrid = params[3]
ncol_dem = params[4]
ncol_scalar = params[5]
nscal = ncol_scalar//2
pp.append(data.reshape((ncol,npart),order='F'))
# Close UCF obeject
obj.close()
# Convert list of 2D arrays to 3D array
pp = numpy.stack(pp,axis=2)
# Create the dictionary
col = colmap_from_flags(ncol_rank,ncol_hybrid,ncol_dem,nscal)
# Return result
return (pp,col)
def colmap_from_flags(irank,ihybrid,idem,iscal):
'''Creates a dictionary which specifies the columns of a particle array.'''
col = {}
ioffset = 0
if irank>0:
col['rank'] = ioffset; ioffset+=1
if ihybrid>0:
col['id'] = ioffset; ioffset+=1
col['x'] = ioffset; ioffset+=1
col['y'] = ioffset; ioffset+=1
col['z'] = ioffset; ioffset+=1
col['r'] = ioffset; ioffset+=1
col['rho']= ioffset; ioffset+=1
col['ax'] = ioffset; ioffset+=1
col['ay'] = ioffset; ioffset+=1
col['az'] = ioffset; ioffset+=1
col['u'] = ioffset; ioffset+=1
col['v'] = ioffset; ioffset+=1
col['w'] = ioffset; ioffset+=1
col['ox'] = ioffset; ioffset+=1
col['oy'] = ioffset; ioffset+=1
col['oz'] = ioffset; ioffset+=1
col['fx'] = ioffset; ioffset+=1
col['fy'] = ioffset; ioffset+=1
col['fz'] = ioffset; ioffset+=1
col['tx'] = ioffset; ioffset+=1
col['ty'] = ioffset; ioffset+=1
col['tz'] = ioffset; ioffset+=1
if idem>0:
col['fxc'] = ioffset; ioffset+=1
col['fyc'] = ioffset; ioffset+=1
col['fzc'] = ioffset; ioffset+=1
col['txc'] = ioffset; ioffset+=1
col['tyc'] = ioffset; ioffset+=1
col['tzc'] = ioffset; ioffset+=1
if iscal>0:
for ii in range(0,iscal):
col['s'+str(ii)] = ioffset; ioffset+=1
col['q'+str(ii)] = ioffset; ioffset+=1
return col
def grid_chunk(grid_global,chunk):
import numpy
xg,yg,zg = grid_global
# Shift indices so that they start from zero
ib = chunk['ibeg']-1
jb = chunk['jbeg']-1
kb = chunk['kbeg']-1
nxl = chunk['nxl']
nyl = chunk['nyl']
nzl = chunk['nzl']
ighost = chunk['ighost']
if ighost:
nxg = len(xg)
nyg = len(yg)
nzg = len(zg)
dx = xg[2]-xg[1]
dy = yg[2]-yg[1]
dz = zg[2]-zg[1]
xl = numpy.zeros(nxl+2)
yl = numpy.zeros(nyl+2)
zl = numpy.zeros(nzl+2)
xl[0] = xg[ib]-dx
xl[1:-1] = xg[ib:ib+nxl]
xl[-1] = xg[ib+nxl-1]+dx
yl[0] = yg[jb]-dy
yl[1:-1] = yg[jb:jb+nyl]
yl[-1] = yg[jb+nyl-1]+dy
zl[0] = zg[kb]-dz
zl[1:-1] = zg[kb:kb+nzl]
zl[-1] = zg[kb+nzl-1]+dz
else:
xl = xg[ibeg:ib+nxl-1]
yl = yg[jbeg:jb+nyl-1]
zl = zg[kbeg:kb+nzl-1]
return (xl,yl,zl)