diff --git a/icartt/__init__.py b/icartt/__init__.py index 44baa5dda2320760e02240b9eded89728186a521..a96c250527acb44a5b52fc015368fe2d5b306c70 100644 --- a/icartt/__init__.py +++ b/icartt/__init__.py @@ -1 +1 @@ -from .dataset import Dataset, StandardNormalComments, Variable, Formats \ No newline at end of file +from .dataset import Dataset, StandardNormalComments, Variable, Formats, VariableType \ No newline at end of file diff --git a/icartt/dataset.py b/icartt/dataset.py index 6cd8cb45983d66e4a0aec1467cd0dcd341d9173a..c881b18a5efdd1e109f1594386742f0ddf01c08c 100644 --- a/icartt/dataset.py +++ b/icartt/dataset.py @@ -3,22 +3,148 @@ import sys import collections import re import math -from tkinter.tix import Form import warnings +import numpy as np +from io import StringIO from enum import Enum, IntEnum class Formats(IntEnum): '''File Format Indicators (FFI) ''' - FFI_1001 = 1001 - FFI_2110 = 2110 -# FFI_2310 = 2310 + FFI_1001 = 1001 + FFI_2110 = 2110 +# FFI_2310 = 2310 class VariableType(Enum): - IVAR = 1 - IBVAR = 2 - DVAR = 3 + Independent_Variable = 1 + Independent_Bounded_Variable = 2 + Dependent_Variable = 3 + +def sanitize(val, miss): + return float(val) if not float(val) == float(miss) else np.NaN + +class DataStore_1001: + def __init__(self, ivar, dvars): + self.ivarname = ivar.shortname +# + self.varnames = [ ivar.shortname ] + [ x for x in dvars ] + self.missvals = { x: dvars[x].miss for x in dvars } + self.missvals.update( { self.ivarname: ivar.miss } ) +# + self.data = None +# + def __getitem__(self, s=slice(None)): + return self.data[s] +# + def add_bulk(self, raw): + for cur in range(len(raw)): + newdata = { x: raw[cur][i] for i, x in enumerate( self.varnames ) } + self.add(**newdata) +# + def add(self, **kwargs): + if not self.ivarname in kwargs.keys(): + raise Exception("Need independent variable data.") + + ivarvalue = sanitize(kwargs[self.ivarname], self.missvals[self.ivarname]) + + newline = np.array( np.NaN, dtype = [ (v, 'f8') for v in self.varnames ] ) + for key in kwargs.keys(): + if key in self.varnames: + newline[key] = sanitize( kwargs[key], self.missvals[key] ) + + if type(self.data) is type(None): + self.data = newline + self.data = self.data.reshape(1) # don't even ask + else: + if ivarvalue in self.data[self.ivarname]: + raise Exception("Cannot replace data (yet).") + else: + self.data = np.append( self.data, newline ) + + def write(self, f=sys.stdout, fmt="%.1f", delimiter=", "): + np.savetxt( f, self.data, fmt=fmt, delimiter=delimiter ) + + +class DataStore_2110: + def __init__(self, ivar, ibvar, auxvars, dvars): + self.ivarname = ivar.shortname + self.ibvarname = ibvar.shortname + + self.auxvarnames = [ x for x in auxvars ] + self.dvarnames = [ x for x in dvars ] + + self.missvals = { x: dvars[x].miss for x in dvars } + self.missvals.update( { x: auxvars[x].miss for x in auxvars } ) + self.missvals.update( { self.ibvarname: ibvar.miss } ) + self.missvals.update( { self.ivarname: ivar.miss } ) + + self.nauxvarname = self.auxvarnames[0] # convention! +# + self.data = {} +# + self.ivar = ivar + self.auxvars = auxvars + self.ibvar = ibvar + self.dvars = dvars +# + def add_auxline(self, auxline): + newdata = { x: auxline[i] for i, x in enumerate( [ self.ivarname ] + self.auxvarnames ) } + self.add(**newdata) + + def add_deplines(self, ivar, raw): + for cur in range(len(raw)): + newdata = { x: raw[cur][i] for i, x in enumerate( [ self.ibvarname ] + self.dvarnames ) } + newdata.update( { self.ivarname: ivar }) + self.add(**newdata) + + def add_bulk(self, raw): + cur = 0 + while cur < len(raw): + ivarvalue = sanitize(raw[cur][0], self.missvals[self.ivarname]) + + self.add_auxline(raw[cur]) + cur += 1 + + # stupid, but at first auxline added, nprimary_dat ist a 0-dim array... + nprimary_data = self.data[ivarvalue]['AUX'][self.nauxvarname] + nprimary = int(nprimary_data) if nprimary_data.shape == () else int(nprimary_data[-1]) + + self.add_deplines(ivarvalue, raw[ cur:(cur+nprimary) ]) + + cur += nprimary + + def add(self, **kwargs): + # whatever we do, an independent variable is needed + if not self.ivarname in kwargs.keys(): + raise Exception("Need independent variable data.") + + ivarvalue = sanitize(kwargs[self.ivarname], self.missvals[self.ivarname]) + + # this is an AUX line + if any( [ x in self.auxvarnames for x in kwargs.keys() ] ): + # and we create the whole dataset if needed + if not ivarvalue in self.data.keys(): + self.data[ ivarvalue ] = { + "AUX": DataStore_1001( self.ivar, self.auxvars ), + "DEP": DataStore_1001( self.ibvar, self.dvars ) + } + self.data[ ivarvalue ]['AUX'].add( **kwargs ) + + # this is a DEP line + if any( [ x in self.dvarnames for x in kwargs.keys() ] ): + if not self.ibvarname in kwargs.keys(): + raise Exception("Need independent (bounded) variable data.") + + if not ivarvalue in self.data.keys(): + raise Exception("Aux data line needs to be added first.") + + self.data[ ivarvalue ]['DEP'].add( **kwargs ) + + def write(self, f=sys.stdout, fmt="%.1f", delimiter=", "): + for ivarvalue in self.data: + self.data[ ivarvalue ]['AUX'].write(f, fmt=fmt, delimiter=delimiter) + self.data[ ivarvalue ]['DEP'].write(f, fmt=fmt, delimiter=delimiter) class KeywordComment(): def __init__(self, key, na_allowed): @@ -56,7 +182,6 @@ class StandardNormalComments(collections.UserList): current_keyword = None for l in raw: possible_keyword = l.split(":")[0].strip() -# import pdb; pdb.set_trace() if possible_keyword in self.keywords or re.match("R[a-zA-Z0-9]{1,2}[ ]*", possible_keyword): current_keyword = possible_keyword if not current_keyword in self.keywords: # for the revisions only... @@ -71,7 +196,7 @@ class StandardNormalComments(collections.UserList): if self.keywords[key].data == []: warnings.warn("Normal comments: required keyword {:s} is missing.".format(key)) - def __init__(self, contents=[]): + def __init__(self): self.freeform = [] self.shortnames = [] @@ -99,10 +224,6 @@ class StandardNormalComments(collections.UserList): self.keywords["UNCERTAINTY"].na_allowed = False self.keywords["REVISION"].na_allowed = False - if not contents is []: - self.ingest(contents) - - class Variable(collections.UserList): '''An ICARTT variable description with name, units, scale and missing value. @@ -119,7 +240,7 @@ class Variable(collections.UserList): :type longname: str :param vartype: Variable type (unbounded/bounded independent or dependent) - :type vartype: enum:`icartt.Formats`, defaults to VariableType.DVAR + :type vartype: enum:`icartt.Formats`, defaults to VariableType.Dependent_Variable :param scale: Scaling factor for the variable :type scale: float, defaults to 1.0 @@ -144,36 +265,6 @@ class Variable(collections.UserList): descstr += [ str(self.longname) ] return splitChar.join(descstr) - def append(self, *argv): - '''Append data to a variable. Depending on type (independent, dependent variable), - all identifying (bounded and unbounded) independent variables need to be given. - - :param ivar: value of the independent (unbounded) variable - :type ivar: float - - :param ibvar: value of the independent (bounded) variable - :type ibvar: float, optional - - :param dvar: value of the dependent variable - :type dvar: float, optional - ''' - - def sanitized(z): return float(z) if not float(z) == float(self.miss) else float('NaN') - - v = [sanitized(y) for y in argv] - - if len(v) > 2: - # ( (a, b, ...), c ) - x = (tuple([y for y in v[:-1]]), v[-1]) - elif len(v) > 1: - # ( a, b ) - x = ((v[0]), v[1]) - else: - # ( a ) - x = (v[0]) - - self.data.append(x) - def is_valid_variablename(self, name): # ICARTT Standard v2 2.1.1 2) # Variable short names and variable standard names: @@ -189,7 +280,7 @@ class Variable(collections.UserList): return (all_are_alpha_or_underscore and first_is_alpha and less_than_31_chars) - def __init__(self, shortname, units, standardname, longname, vartype=VariableType.DVAR, scale=1.0, miss=-99999.0, splitChar=","): + def __init__(self, shortname, units, standardname, longname, vartype=VariableType.Dependent_Variable, scale=1.0, miss=-99999.0, splitChar=","): '''Constructor method ''' if not self.is_valid_variablename(shortname): @@ -205,8 +296,6 @@ class Variable(collections.UserList): self.splitChar = splitChar - self.data = [] - class Dataset: '''An ICARTT dataset that can be created from scratch or read from a file, @@ -232,11 +321,11 @@ class Dataset: ''' total = -1 if self.format == Formats.FFI_1001: - total = 14 + len(self.DVARS) + len(self.SCOM) + self.NCOM.nlines + total = 14 + len(self.Dependent_Variables) + len(self.Special_Comments) + self.Normal_Comments.nlines if self.format == Formats.FFI_2110: # 2: IVAR + IBVAR - total = 16 + 2 + len(self.AUXVARS) + len(self.DVARS) +\ - len(self.SCOM) + self.NCOM.nlines + total = 16 + 2 + len(self.Auxiliary_Variables) + len(self.Dependent_Variables) +\ + len(self.Special_Comments) + self.Normal_Comments.nlines return total @property @@ -246,7 +335,7 @@ class Dataset: :return: list of variable names :rtype: list ''' - return [x for x in self.VARS.keys()] + return [x for x in self.Variables.keys()] @property def times(self): @@ -255,18 +344,23 @@ class Dataset: :return: list of time steps :rtype: list ''' - return [self.dateValid + datetime.timedelta(seconds=x) for x in self.IVAR] + return [ self.Date_Collection + datetime.timedelta(seconds=x) for x in self.Independent_Variable ] @property - def VARS(self): + def Variables(self): '''Variables (independent + dependent + auxiliary) :return: dictionary of all variables :rtype: dict of Variable(s) ''' - vars = {self.IVAR.name: self.IVAR, **self.DVARS} - if self.format == Formats.FFI_2110: - vars = {self.IBVAR.name: self.IBVAR, **vars, **self.AUXVARS} + vars = {} + if not self.Independent_Variable is None: + vars[ self.Independent_Variable.shortname ] = self.Independent_Variable + if not self.Independent_Bounded_Variable is None: + vars[ self.Independent_Bounded_Variable.shortname ] = self.Independent_Bounded_Variable + + vars = { **vars, **self.Dependent_Variables, **self.Auxiliary_Variables } + return vars def __getitem__(self, name): @@ -275,7 +369,7 @@ class Dataset: :return: variable data :rtype: list ''' - return self.VARS[name] + return self.Variables[name] def write_header(self, f=sys.stdout): '''Write header @@ -294,74 +388,77 @@ class Dataset: prnt(txt) # PI last name, first name/initial. - prnt(self.PI) + prnt(self.PI_name) # Organization/affiliation of PI. - prnt(self.organization) + prnt(self.PI_affiliation) # Data source description (e.g., instrument name, platform name, model name, etc.). - prnt(self.dataSource) + prnt(self.Data_Source_Description) # Mission name (usually the mission acronym). - prnt(self.mission) + prnt(self.Mission_Name) # File volume number, number of file volumes (these integer values are used when the data require more than one file per day; for data that require only one file these values are set to 1, 1) - comma delimited. - prnt(self.splitChar.join([str(self.volume), str(self.nvolumes)])) + prnt(self.splitChar.join([str(self.File_Volume_Number), str(self.Total_Number_Of_File_Volumes)])) # UTC date when data begin, UTC date of data reduction or revision - comma delimited (yyyy, mm, dd, yyyy, mm, dd). prnt(self.splitChar.join([datetime.datetime.strftime(x, self.splitChar.join( - ["%Y", "%m", "%d"])) for x in [self.dateValid, self.dateRevised]])) + ["%Y", "%m", "%d"])) for x in [self.Date_Collection, self.Revision_Date]])) # Data Interval (This value describes the time spacing (in seconds) between consecutive data records. It is the (constant) interval between values of the independent variable. For 1 Hz data the data interval value is 1 and for 10 Hz data the value is 0.1. All intervals longer than 1 second must be reported as Start and Stop times, and the Data Interval value is set to 0. The Mid-point time is required when it is not at the average of Start and Stop times. For additional information see Section 2.5 below.). - prnt(self.splitChar.join( [ str(x) for x in self.dataInterval ] ) ) + prnt(self.splitChar.join( [ str(x) for x in self.Data_Interval_Code ] ) ) if self.format == Formats.FFI_2110: # Description or name of independent (bound) variable (This is the name chosen for the start time. It always refers to the number of seconds UTC from the start of the day on which measurements began. It should be noted here that the independent variable should monotonically increase even when crossing over to a second day.). - prnt(self.IBVAR.desc(self.splitChar)) + prnt(self.Independent_Bounded_Variable.desc(self.splitChar)) # Description or name of independent variable (This is the name chosen for the start time. It always refers to the number of seconds UTC from the start of the day on which measurements began. It should be noted here that the independent variable should monotonically increase even when crossing over to a second day.). - prnt(self.IVAR.desc(self.splitChar)) + prnt(self.Independent_Variable.desc(self.splitChar)) # Number of variables (Integer value showing the number of dependent variables: the total number of columns of data is this value plus one.). - prnt(len(self.DVARS)) + prnt(len(self.Dependent_Variables)) # Scale factors (1 for most cases, except where grossly inconvenient) - comma delimited. prnt(self.splitChar.join( - ["{:.1g}".format(DVAR.scale) for DVAR in self.DVARS.values()])) + ["{:.1g}".format(DVAR.scale) for DVAR in self.Dependent_Variables.values()])) # Missing data indicators (This is -9999 (or -99999, etc.) for any missing data condition, except for the main time (independent) variable which is never missing) - comma delimited. prnt(self.splitChar.join([str(DVAR.miss) - for DVAR in self.DVARS.values()])) + for DVAR in self.Dependent_Variables.values()])) # Variable names and units (Short variable name and units are required, and optional long descriptive name, in that order, and separated by commas. If the variable is unitless, enter the keyword "none" for its units. Each short variable name and units (and optional long name) are entered on one line. The short variable name must correspond exactly to the name used for that variable as a column header, i.e., the last header line prior to start of data.). - nul = [prnt(DVAR.desc(self.splitChar)) for DVAR in self.DVARS.values()] + nul = [prnt(DVAR.desc(self.splitChar)) for DVAR in self.Dependent_Variables.values()] if self.format == Formats.FFI_2110: # Number of variables (Integer value showing the number of dependent variables: the total number of columns of data is this value plus one.). - prnt(len(self.AUXVARS)) + prnt(len(self.Auxiliary_Variables)) # Scale factors (1 for most cases, except where grossly inconvenient) - comma delimited. prnt(self.splitChar.join( - ["{:.1g}".format(AUXVAR.scale) for AUXVAR in self.AUXVARS.values()])) + ["{:.1g}".format(AUXVAR.scale) for AUXVAR in self.Auxiliary_Variables.values()])) # Missing data indicators (This is -9999 (or -99999, etc.) for any missing data condition, except for the main time (independent) variable which is never missing) - comma delimited. prnt(self.splitChar.join([str(AUXVAR.miss) - for AUXVAR in self.AUXVARS.values()])) + for AUXVAR in self.Auxiliary_Variables.values()])) # Variable names and units (Short variable name and units are required, and optional long descriptive name, in that order, and separated by commas. If the variable is unitless, enter the keyword "none" for its units. Each short variable name and units (and optional long name) are entered on one line. The short variable name must correspond exactly to the name used for that variable as a column header, i.e., the last header line prior to start of data.). - nul = [prnt(AUXVAR.desc(self.splitChar)) for AUXVAR in self.AUXVARS.values()] + nul = [prnt(AUXVAR.desc(self.splitChar)) for AUXVAR in self.Auxiliary_Variables.values()] # Number of SPECIAL comment lines (Integer value indicating the number of lines of special comments, NOT including this line.). - prnt("{:d}".format(len(self.SCOM))) + prnt("{:d}".format(len(self.Special_Comments))) # Special comments (Notes of problems or special circumstances unique to this file. An example would be comments/problems associated with a particular flight.). - nul = [prnt(x) for x in self.SCOM] + nul = [prnt(x) for x in self.Special_Comments] # Number of Normal comments (i.e., number of additional lines of SUPPORTING information: Integer value indicating the number of lines of additional information, NOT including this line.). - prnt("{:d}".format(self.NCOM.nlines)) + prnt("{:d}".format(self.Normal_Comments.nlines)) # Normal comments (SUPPORTING information: This is the place for investigators to more completely describe the data and measurement parameters. The supporting information structure is described below as a list of key word: value pairs. Specifically include here information on the platform used, the geo-location of data, measurement technique, and data revision comments. Note the non-optional information regarding uncertainty, the upper limit of detection (ULOD) and the lower limit of detection (LLOD) for each measured variable. The ULOD and LLOD are the values, in the same units as the measurements that correspond to the flags -7777s and -8888s within the data, respectively. The last line of this section should contain all the short variable names on one line. The key words in this section are written in BOLD below and must appear in this section of the header along with the relevant data listed after the colon. For key words where information is not needed or applicable, simply enter N/A.). - nul = [prnt(x) for x in self.NCOM] - + # re-create last line out of actual data if missing... + if self.Normal_Comments.shortnames == []: + self.Normal_Comments.shortnames = self.splitChar.join( [ self.Variables[x].shortname for x in self.Variables ] ) + nul = [prnt(x) for x in self.Normal_Comments] + def _write_data_1001(self, prnt=lambda x: sys.stdout.write(x)): def p(val, var): return var.miss if math.isnan(val) else val - for i in range(len(self.IVAR)): - prnt([p(self.IVAR[i], self.IVAR)] + [p(DVAR[i][1], DVAR) - for DVAR in self.DVARS.values()]) + for i in range(len(self.Independent_Variable)): + prnt([p(self.Independent_Variable[i], self.Independent_Variable)] + \ + [p(DVAR[i][1], DVAR) for DVAR in self.Dependent_Variables.values()]) def _write_data_2110(self, prnt=lambda x: sys.stdout.write(x)): def p(val, var): return var.miss if math.isnan(val) else val - for ival in self.IVAR: - prnt([p(ival, self.IVAR)] + [p(auxval[1], AUXVAR) - for AUXVAR in self.AUXVARS.values() for auxval in AUXVAR if auxval[0] == ival]) - for ibval in [b[1] for b in self.IBVAR if b[0] == ival]: - prnt([p(ibval, self.IBVAR)] + [p(dval[1], DVAR) for DVAR in self.DVARS.values() - for dval in DVAR if (dval[0][0] == ival) and (dval[0][1] == ibval)]) + for ival in self.Independent_Variable: + prnt([p(ival, self.Independent_Variable)] + \ + [p(auxval[1], AUXVAR) for AUXVAR in self.Auxiliary_Variables.values() for auxval in AUXVAR if auxval[0] == ival]) + for ibval in [b[1] for b in self.Independent_Bounded_Variable if b[0] == ival]: + prnt([p(ibval, self.Independent_Bounded_Variable)] + \ + [p(dval[1], DVAR) for DVAR in self.Dependent_Variables.values() for dval in DVAR if (dval[0][0] == ival) and (dval[0][1] == ibval)]) def write_data(self, f=sys.stdout): '''Write data @@ -398,10 +495,10 @@ class Dataset: :rtype: string ''' fn = self.dataID + "_" + self.locationID + "_" + \ - datetime.datetime.strftime(self.dateValid, date_format) + datetime.datetime.strftime(self.Date_Collection, date_format) fn += "_R" + str(self.revision) if not self.revision is None else "" fn += "_L" + str(self.launch) if not self.launch is None else "" - fn += "_V" + str(self.volume) if self.nvolumes > 1 else "" + fn += "_V" + str(self.File_Volume_Number) if self.Total_Number_Of_File_Volumes > 1 else "" return fn + ".ict" @@ -465,31 +562,31 @@ class Dataset: self.version = dmp[2] # line 2 - PI last name, first name/initial. - self.PI = f.readline(do_split=False) + self.PI_name = f.readline(do_split=False) # line 3 - Organization/affiliation of PI. - self.organization = f.readline(do_split=False) + self.PI_affiliation = f.readline(do_split=False) # line 4 - Data source description (e.g., instrument name, platform name, # model name, etc.). - self.dataSource = f.readline(do_split=False) + self.Data_Source_Description = f.readline(do_split=False) # line 5 - Mission name (usually the mission acronym). - self.mission = f.readline(do_split=False) + self.Mission_Name = f.readline(do_split=False) # line 6 - File volume number, number of file volumes (these integer values # are used when the data require more than one file per day; for data that # require only one file these values are set to 1, 1) - comma delimited. dmp = f.readline() - self.volume = int(dmp[0]) - self.nvolumes = int(dmp[1]) + self.File_Volume_Number = int(dmp[0]) + self.Total_Number_Of_File_Volumes = int(dmp[1]) # line 7 - UTC date when data begin, UTC date of data reduction or revision # - comma delimited (yyyy, mm, dd, yyyy, mm, dd). dmp = f.readline() - self.dateValid = datetime.datetime.strptime( + self.Date_Collection = datetime.datetime.strptime( "".join(["{:s}".format(x) for x in dmp[0:3]]), '%Y%m%d') - self.dateRevised = datetime.datetime.strptime( + self.Revision_Date = datetime.datetime.strptime( "".join(["{:s}".format(x) for x in dmp[3:6]]), '%Y%m%d') # line 8 - Data Interval (This value describes the time spacing (in seconds) @@ -502,7 +599,7 @@ class Dataset: # 2.5 below.). dmp = f.readline() # might have multiple entries for 2110 - self.dataInterval = [ float(x) for x in dmp ] + self.Data_Interval_Code = [ float(x) for x in dmp ] # line 9 - Description or name of independent variable (This is the name # chosen for the start time. It always refers to the number of seconds UTC @@ -520,12 +617,12 @@ class Dataset: if self.format == Formats.FFI_2110: dmp = f.readline() shortname, units, standardname, longname = extract_vardesc(dmp) - self.IBVAR = Variable(shortname, units, standardname, longname, + self.Independent_Bounded_Variable = Variable(shortname, units, standardname, longname, splitChar=self.splitChar) dmp = f.readline() shortname, units, standardname, longname = extract_vardesc(dmp) - self.IVAR = Variable(shortname, units, standardname, longname, + self.Independent_Variable = Variable(shortname, units, standardname, longname, splitChar=self.splitChar) def read_vars(f): @@ -568,10 +665,10 @@ class Dataset: return {shortname: Variable(shortname, unit, standardname, longname, scale=scale, miss=miss, splitChar=self.splitChar) for shortname, unit, standardname, longname, scale, miss in zip(vshortname, vunits, vstandardname, vlongname, vscale, vmiss)} - self.DVARS = read_vars(f) + self.Dependent_Variables = read_vars(f) if self.format == Formats.FFI_2110: - self.AUXVARS = read_vars(f) + self.Auxiliary_Variables = read_vars(f) # line 14 + nvar - Number of SPECIAL comment lines (Integer value # indicating the number of lines of special comments, NOT including this @@ -581,7 +678,7 @@ class Dataset: # line 15 + nvar - Special comments (Notes of problems or special # circumstances unique to this file. An example would be comments/problems # associated with a particular flight.). - self.SCOM = [f.readline(do_split=False) for i in range(0, nscom)] + self.Special_Comments = [f.readline(do_split=False) for i in range(0, nscom)] # line 16 + nvar + nscom - Number of Normal comments (i.e., number of # additional lines of SUPPORTING information: Integer value indicating the @@ -604,7 +701,7 @@ class Dataset: # colon. For key words where information is not needed or applicable, simply # enter N/A.). raw_ncom = [f.readline(do_split=False) for i in range(0, nncom)] - self.NCOM = StandardNormalComments(raw_ncom) + self.Normal_Comments.ingest(raw_ncom) self.nheader_file = f.line @@ -612,26 +709,6 @@ class Dataset: warnings.warn("Number of header lines suggested in line 1 ({:d}) do not match actual header lines read ({:d})".format( nheader_suggested, self.nheader)) - def _extract_items_1001(self, raw): - for cur in range(len(raw)): - self.IVAR.append(raw[cur][0]) - nul = [self.DVARS[key].append( - raw[cur][0], raw[cur][i+1]) for i, key in enumerate(self.DVARS)] - - def _extract_items_2110(self, raw): - cur = 0 - num_var_name = list(self.AUXVARS.keys())[0] - while cur < len(raw): - self.IVAR.append(raw[cur][0]) - nul = [self.AUXVARS[key].append( - raw[cur][0], raw[cur][i+1]) for i, key in enumerate(self.AUXVARS)] - nprimary = int(self.AUXVARS[num_var_name][-1][1]) - for i in range(nprimary): - self.IBVAR.append(raw[cur][0], raw[cur+i+1][0]) - nul = [self.DVARS[key].append( - raw[cur][0], raw[cur+i+1][0], raw[cur+i+1][j+1]) for j, key in enumerate(self.DVARS)] - cur += 1 + nprimary - def read_data(self): '''Read ICARTT data (from file) ''' @@ -642,15 +719,10 @@ class Dataset: nul = [self.input_fhandle.readline() for i in range(self.nheader_file)] raw = [line.split(self.splitChar) for line in self.input_fhandle] - if self.format == Formats.FFI_1001: - nul = self._extract_items_1001(raw) - elif self.format == Formats.FFI_2110: - nul = self._extract_items_2110(raw) - else: - warnings.warn( - "Unknown file format: {:d}, could not read data.".format(self.format)) + + nul = self.data.add_bulk(raw) except: - a = 1 + pass finally: self.input_fhandle.close() @@ -658,6 +730,7 @@ class Dataset: '''Read ICARTT data and header ''' self.read_header() + self.end_define_mode() self.read_data() def __del__(self): @@ -666,59 +739,54 @@ class Dataset: self.input_fhandle.close() except: pass + + def end_define_mode(self): + self.DEFINE_MODE = False + + # create data store + if self.format == Formats.FFI_1001: + self.data = DataStore_1001(self.Independent_Variable, self.Dependent_Variables ) + elif self.format == Formats.FFI_2110: + self.data = DataStore_2110(self.Independent_Variable, self.Independent_Bounded_Variable, self.Auxiliary_Variables, self.Dependent_Variables ) def __init__(self, f=None, loadData=True, splitChar=",", format=Formats.FFI_1001): '''Constructor method ''' - self.format = format - self.version = None - - self.dataID = 'dataID' - self.locationID = 'locationID' - - self.revision = 0 - self.launch = None - self.volume = 1 - self.nvolumes = 1 - - self.PI = 'Mustermann, Martin' - self.organization = 'Musterinstitut' - self.dataSource = 'Musterdatenprodukt' - self.mission = 'MUSTEREX' - self.dateValid = datetime.datetime.today() - self.dateRevised = datetime.datetime.today() - self.dataInterval = [ 0.0 ] - self.IVAR = Variable('Time_Start', - 'seconds_from_0_hours_on_valid_date', - 'Time_Start', - 'Time_Start', - vartype=VariableType.IVAR, - scale=1.0, miss=-9999999, splitChar=splitChar) - self.IBVAR = None - self.AUXVARS = {} - self.DVARS = { - 'Time_Stop': - Variable('Time_Stop', - 'seconds_from_0_hours_on_valid_date', - 'Time_Stop', - 'Time_Stop', - scale=1.0, miss=-9999999, splitChar=splitChar), - 'Some_Variable': - Variable('Some_Variable', - 'ppbv', - 'Some_Variable', - 'Some_Variable', - scale=1.0, miss=-9999999, splitChar=splitChar) - } - - self.SCOM = [] - self.NCOM = [] - - self.splitChar = splitChar + self.format = format + self.version = None + + self.dataID = 'dataID' + self.locationID = 'locationID' + + self.revision = 0 + self.launch = None + self.File_Volume_Number = 1 + self.Total_Number_Of_File_Volumes = 1 + + self.PI_name = 'Mustermann, Martin' + self.PI_affiliation = 'Musterinstitut' + self.Data_Source_Description = 'Musterdatenprodukt' + self.Mission_Name = 'MUSTEREX' + self.Date_Collection = datetime.datetime.today() + self.Revision_Date = datetime.datetime.today() + self.Data_Interval_Code = [ 0.0 ] + self.Independent_Variable = None + self.Independent_Bounded_Variable = None + self.Auxiliary_Variables = {} + self.Dependent_Variables = {} + + self.Special_Comments = [] + self.Normal_Comments = StandardNormalComments() # Standard v2.0 for normal comments requires all keywords present, # might not be the case - then reading data will fail - self.nheader_file = -1 + self.nheader_file = -1 + + self.splitChar = splitChar + + self.data = None + + self.DEFINE_MODE = True # read data if f is not None if f is not None: @@ -729,4 +797,5 @@ class Dataset: self.read_header() if loadData: + self.end_define_mode() self.read_data()