Skip to content
Snippets Groups Projects
Commit 2b4f48c4 authored by Christoph Knote's avatar Christoph Knote
Browse files

Cleanup tests, add test for 2110, format all documents with black

parent 3fd50c35
No related branches found
No related tags found
1 merge request!17Cleanup tests
.Phony: coverage
coverage:
coverage run --source=tests -m unittest discover
......@@ -11,12 +11,12 @@ def get_version():
return pkg_resources.get_distribution("icartt").version
__version__ = get_version()
del get_version
# EXPORTED TYPES
from .dataset import Dataset, StandardNormalComments, Variable, Formats, VariableType
__all__ = ("Dataset", "StandardNormalComments", "Variable", "Formats", "VariableType")
\ No newline at end of file
__all__ = ("Dataset", "StandardNormalComments", "Variable", "Formats", "VariableType")
from ctypes import ArgumentError
import datetime
import sys
import pathlib
......@@ -22,11 +21,14 @@ class Formats(IntEnum):
class VariableType(IntEnum):
"""ICARTT Variable Types"""
IndependentVariable = 1
IndependentBoundedVariable = 2
AuxiliaryVariable = 3
DependentVariable = 4
class DataStore1001:
def __init__(self, ivar, dvars):
self.ivarname = ivar.shortname
......@@ -64,7 +66,7 @@ class DataStore1001:
self.add(newData)
def add(self, newData):
"""bulk add data, providing a (structured) numpy array.
"""(bulk) add data, providing a (structured) numpy array.
Array has to have shape [ (ivar, dvar, dvar, ...), ... ],
missing values have to be set to np.nan.
......@@ -72,13 +74,13 @@ class DataStore1001:
:param newData: data to be added
:type newData: numpy.ndarray
"""
if not type(newData) is np.ndarray: # TODO: isinstance(arr, np.ndarray)?
raise ArgumentError("Input data needs to be numpy ndarray.")
if not isinstance(newData, np.ndarray):
raise TypeError("Input data needs to be numpy ndarray.")
if newData.dtype.names is None:
try:
newData.dtype = [(name, newData.dtype) for name in self.varnames]
except:
ArgumentError(
ValueError(
"Could not assign names to data structure, are you providing an array containing all variables?"
)
......@@ -96,10 +98,15 @@ class DataStore1001:
def write(
self, f=sys.stdout, fmt=DEFAULT_NUM_FORMAT, delimiter=DEFAULT_FIELD_DELIM
):
# TODO the fact that we need to clean before writing suggests we need to be more careful what to "add" in the first place!
d = self.denanify(self.data)
# single line data is 0D, savetxt cannot work with 0D. Make 1D.
# single line data is 0D if passed as tuple, savetxt cannot work with 0D. Make 1D.
if d.ndim == 0:
d = np.array( [ d ] )
d = np.array([d])
# need to squeeze extra dimension added for one liners added as np.array
if len(d.shape) == 2:
d = np.squeeze(d, axis=1)
np.savetxt(f, d, fmt=fmt, delimiter=delimiter)
......@@ -141,16 +148,18 @@ class DataStore2110(collections.UserDict):
# we are at the end of the file if this happens
break
ndeprows = int( auxds[self.nauxvarname] )
ndeprows = int(auxds[self.nauxvarname])
try:
depds.addFromTxt(f, splitChar, max_rows=ndeprows)
except:
break
# it is indeed possible to have zero dependent data lines
if ndeprows > 0:
try:
depds.addFromTxt(f, splitChar, max_rows=ndeprows)
except:
raise IOError("Could not read dependent data lines.")
ivarValue = float(auxds[self.ivar.shortname])
self.data[ivarValue] = { "AUX": auxds, "DEP": depds }
self.data[ivarValue] = {"AUX": auxds, "DEP": depds}
def add(self, newAuxData, newDepData):
auxds = DataStore1001(self.ivar, self.auxvars)
......@@ -161,7 +170,7 @@ class DataStore2110(collections.UserDict):
ivarValue = float(auxds[self.ivar.shortname])
self.data[ivarValue] = { "AUX": auxds, "DEP": depds }
self.data[ivarValue] = {"AUX": auxds, "DEP": depds}
def write(
self, f=sys.stdout, fmt=DEFAULT_NUM_FORMAT, delimiter=DEFAULT_FIELD_DELIM
......@@ -192,10 +201,10 @@ class StandardNormalComments(collections.UserList):
# shortnames line is always there:
n = 1
# freeform comment might or might not be there:
n += sum(len(s.split('\n')) for s in self.freeform)
n += sum(len(s.split("\n")) for s in self.freeform)
# tagged comments have at least one line:
for k in self.keywords.values():
n += sum(len(s.split('\n')) for s in k.data) or 1
n += sum(len(s.split("\n")) for s in k.data) or 1
return n
@property
......@@ -219,18 +228,24 @@ class StandardNormalComments(collections.UserList):
keywordLine = False
for l in raw:
possibleKeyword = l.split(":")[0].strip()
if possibleKeyword in self.keywords or re.match("R[a-zA-Z0-9]{1,2}[ ]*", possibleKeyword):
if possibleKeyword in self.keywords or re.match(
"R[a-zA-Z0-9]{1,2}[ ]*", possibleKeyword
):
currentKeyword = possibleKeyword
keywordLine = True
if not currentKeyword in self.keywords: # for the revisions only...
self.keywords[currentKeyword] = KeywordComment(currentKeyword, False)
self.keywords[currentKeyword] = KeywordComment(
currentKeyword, False
)
else:
keywordLine = False
if currentKeyword is None:
self.freeform.append(l)
elif keywordLine:
self.keywords[currentKeyword].append(l.replace(l.split(":")[0] + ":", "").strip())
self.keywords[currentKeyword].append(
l.replace(l.split(":")[0] + ":", "").strip()
)
else:
self.keywords[currentKeyword].append(l.strip())
......@@ -405,15 +420,28 @@ class Dataset:
:return: numpy array of time steps
:rtype: numpy.ndarray
"""
if self.data.data is None or self.independentVariable is None:
if self.defineMode:
return np.datetime64("NaT")
if self.data.data is None or self.data.data == {}:
return np.datetime64("NaT")
ref_dt = np.datetime64(datetime.datetime(*self.dateOfCollection), "ns")
time_values = []
if self.format == Formats.FFI1001:
time_values = self.data[self.independentVariable.shortname]
elif self.format == Formats.FFI2110:
# for 2110, data keys are independent variable values by definition in out implementation
time_values = np.array(list(self.data.keys()))
else:
raise NotImplementedError(
"times method not implemented for this ICARTT format!"
)
# ivar unit is seconds as per standard; need to convert to ns to use timedelta64[ns] type.
return ref_dt + (
self.data[self.independentVariable.shortname] * 10**9
).astype("timedelta64[ns]")
return ref_dt + (time_values * 10**9).astype("timedelta64[ns]")
@property
def variables(self):
......@@ -423,6 +451,7 @@ class Dataset:
:rtype: dict of Variable(s)
"""
variables = {}
if self.independentVariable is not None:
variables[self.independentVariable.shortname] = self.independentVariable
if self.independentBoundedVariable is not None:
......@@ -849,12 +878,10 @@ class Dataset:
def __repr__(self):
# TODO: this could be more meaningful
return "ICARTT Dataset object repr"
return "icartt.Dataset()"
def __str__(self):
# TODO: this could be more meaningful
return "ICARTT Dataset string representation"
return f"ICARTT Dataset {self.makeFileName()}"
def __init__(self, f=None, loadData=True, splitChar=",", format=Formats.FFI1001):
"""Constructor method"""
......@@ -910,4 +937,4 @@ class Dataset:
self.readHeader(splitChar)
if loadData:
self.endDefineMode()
self.readData(splitChar)
\ No newline at end of file
self.readData(splitChar)
......@@ -2,6 +2,7 @@
import numpy as np
def compareFiles(fn, strIn, strOut, skiplines=0, nlines=-1): # pragma: no cover
"""compare two icartt files line by line"""
strOut.seek(0)
......
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('input_file', type=str)
parser.add_argument('output_file', type=str)
parser.add_argument("input_file", type=str)
parser.add_argument("output_file", type=str)
args = parser.parse_args()
......@@ -11,5 +11,5 @@ import icartt
ict = icartt.Dataset(args.input_file)
ict.splitChar = ", "
with open(args.output_file, 'w') as f:
with open(args.output_file, "w") as f:
ict.write(f)
......@@ -5,17 +5,16 @@ import datetime
import numpy as np
import icartt
try:
from _utils import compareFiles # we're executing from the directory of this script
from _utils import compareFiles # we're executing from the directory of this script
except ImportError:
from ._utils import compareFiles # we're executing from another directory
from ._utils import compareFiles # we're executing from another directory
# working directory, example files
wd = pathlib.Path(__file__).parent
class Simple1001TestCase(unittest.TestCase):
def setUp(self):
self.fn = wd / "example_data" / "expect_ok" / "NOx_RHBrown_20040830_R0.ict"
......@@ -164,6 +163,7 @@ class Simple1001TestCase(unittest.TestCase):
["Use of these data requires PRIOR OK from the PI"],
)
self.assertEqual(ict.normalComments.keywords["OTHER_COMMENTS"].data, ["N/A"])
# TODO test revision information
def testReadData(self):
ict = icartt.Dataset(self.fn, loadData=True)
......@@ -325,4 +325,4 @@ class Create1001TestCase(unittest.TestCase):
if __name__ == "__main__": # pragma: no cover
unittest.main()
\ No newline at end of file
unittest.main()
import unittest
import pathlib
import io
import datetime
import numpy as np
import icartt
try:
from _utils import compareFiles # we're executing from the directory of this script
except ImportError:
from ._utils import compareFiles # we're executing from another directory
# working directory, example files
wd = pathlib.Path(__file__).parent
class Simple2110TestCase(unittest.TestCase):
def setUp(self):
self.fn = wd / "example_data" / "expect_warn" / "PAVE-AR_DC8_20050203_R0.ict"
self.nHeader = 55
def tearDown(self):
pass
def testOpen(self):
ict = icartt.Dataset(self.fn, loadData=False)
self.assertEqual(type(ict), icartt.Dataset)
def testFormat(self):
ict = icartt.Dataset(self.fn, loadData=False)
self.assertEqual(ict.format, icartt.Formats.FFI2110)
def testN(self):
ict = icartt.Dataset(self.fn, loadData=False)
self.assertEqual(ict.nHeader, self.nHeader)
self.assertEqual(len(ict.auxiliaryVariables), 11)
self.assertEqual(len(ict.dependentVariables), 7)
self.assertEqual(len(ict.normalComments), 18)
self.assertEqual(len(ict.specialComments), 1)
def testIvar(self):
ict = icartt.Dataset(self.fn, loadData=False)
self.assertEqual(ict.independentVariable.shortname, "UTC")
self.assertEqual(ict.independentVariable.units, "seconds")
self.assertEqual(ict.independentVariable.standardname, "Time_Start")
self.assertEqual(
ict.independentVariable.longname, "number of seconds from 00:00 UTC"
)
self.assertEqual(ict.independentVariable.scale, 1.0)
self.assertEqual(ict.independentVariable.miss, -99999.0)
def testAuxvar(self):
ict = icartt.Dataset(self.fn, loadData=False)
self.assertEqual(
[AUXVAR.shortname for AUXVAR in ict.auxiliaryVariables.values()],
[
"NumAlts",
"Year",
"Month",
"Day",
"AvgTime",
"Lat",
"Lon",
"PAlt",
"GPSAlt",
"SAT",
"SZA",
],
)
self.assertEqual(
[AUXVAR.units for AUXVAR in ict.auxiliaryVariables.values()],
[
"#",
"yyyy",
"mm",
"dd",
"minutes",
"degrees",
"degrees",
"meters",
"meters",
"K",
"degrees",
],
)
self.assertEqual(
[AUXVAR.standardname for AUXVAR in ict.auxiliaryVariables.values()],
[
"Number_of_altitudes",
"Year_UTC",
"Month_UTC",
"Day_UTC",
"Averaging_time",
"Latitude",
"Longitude",
"pressure_altitude",
"GPS_altitude",
"Static_air_temperature",
"Sun_Zenith_Angle",
],
)
self.assertEqual(
[AUXVAR.longname for AUXVAR in ict.auxiliaryVariables.values()],
[
"Number_of_altitudes_reported",
"Year_UTC",
"Month_UTC",
"Day_UTC",
"Averaging_time_of_presented_data xxx.x_minutes",
None,
None,
None,
None,
None,
None,
],
)
self.assertEqual(
[AUXVAR.scale for AUXVAR in ict.auxiliaryVariables.values()],
["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"],
)
self.assertEqual(
[AUXVAR.miss for AUXVAR in ict.auxiliaryVariables.values()],
[
"-9999",
"-9999",
"-9999",
"-9999",
"-9999",
"-9999",
"-9999",
"-9999",
"-9999",
"-9999",
"-9999",
],
)
def testDvar(self):
ict = icartt.Dataset(self.fn, loadData=False)
self.assertEqual(
[DVAR.shortname for DVAR in ict.dependentVariables.values()],
[
"TempK[]",
"Log10_NumDensity[]",
"TempK_Err[]",
"AerKlet[]",
"Log10_O3NumDensity[]",
"O3_MR[]",
"Log10_O3NumDensity_Err[]",
],
)
self.assertEqual(
[DVAR.units for DVAR in ict.dependentVariables.values()],
[
"K",
"part/cc",
"K",
"Klet",
"part/cc",
"ppb",
"part/cc",
],
)
self.assertEqual(
[DVAR.standardname for DVAR in ict.dependentVariables.values()],
[
"Temperature",
"Log10_NumDensity",
"Temperature_Error",
"Aerosol",
"Log10_O3NumDensity",
"Ozone_mixing_ratio",
"Log10_O3NumDensity_Error",
],
)
self.assertEqual(
[DVAR.longname for DVAR in ict.dependentVariables.values()],
[
"Temperature_array",
"Log10_NumDensity_array",
"Temperature_error_array",
"Aerosol_array",
"Log10_Ozone_NumDensity_array",
"Ozone_mixing_ratio_array",
"Log10_NumDensity_error_array",
],
)
self.assertEqual(
[DVAR.scale for DVAR in ict.dependentVariables.values()],
[
"0.1",
"0.0001",
"0.1",
"0.01",
"0.0001",
"0.1",
"0.0001",
],
)
self.assertEqual(
[DVAR.miss for DVAR in ict.dependentVariables.values()],
[
"-999999",
"-999999",
"-999999",
"-999999",
"-999999",
"-999999",
"-999999",
],
)
def testNCOM(self):
ict = icartt.Dataset(self.fn, loadData=False)
self.assertEqual(
ict.normalComments.keywords["PI_CONTACT_INFO"].data,
["Enter PI Address here"],
)
self.assertEqual(
ict.normalComments.keywords["PLATFORM"].data,
["NASA DC8"],
)
self.assertEqual(
ict.normalComments.keywords["LOCATION"].data,
["Lat, Lon, and Alt included in the data records"],
)
self.assertEqual(ict.normalComments.keywords["ASSOCIATED_DATA"].data, ["N/A"])
self.assertEqual(
ict.normalComments.keywords["INSTRUMENT_INFO"].data,
["N/A"],
)
self.assertEqual(
ict.normalComments.keywords["DATA_INFO"].data,
["N/A"],
)
self.assertEqual(
ict.normalComments.keywords["UNCERTAINTY"].data,
["Contact PI"],
)
self.assertEqual(ict.normalComments.keywords["ULOD_FLAG"].data, ["-7777"])
self.assertEqual(ict.normalComments.keywords["ULOD_VALUE"].data, ["N/A"])
self.assertEqual(ict.normalComments.keywords["LLOD_FLAG"].data, ["-8888"])
self.assertEqual(
ict.normalComments.keywords["LLOD_VALUE"].data,
["N/A"],
)
self.assertEqual(
ict.normalComments.keywords["DM_CONTACT_INFO"].data,
["Enter Data Manager Info here"],
)
self.assertEqual(
ict.normalComments.keywords["PROJECT_INFO"].data,
["PAVE MISSION: Jan-Feb 2005"],
)
self.assertEqual(
ict.normalComments.keywords["STIPULATIONS_ON_USE"].data,
["Use of these data should be done in consultation with the PI"],
)
self.assertEqual(ict.normalComments.keywords["OTHER_COMMENTS"].data, ["N/A"])
# TODO test revision information
def testReadData(self):
ict = icartt.Dataset(self.fn, loadData=True)
self.assertEqual(type(ict), icartt.Dataset)
def testWriteHeader(self):
ict = icartt.Dataset(self.fn, loadData=False)
strIn = open(self.fn)
strOut = io.StringIO()
ict.writeHeader(f=strOut)
self.assertTrue(compareFiles(self.fn, strIn, strOut, nlines=self.nHeader))
def testWriteData(self):
ict = icartt.Dataset(self.fn, loadData=True)
strIn = open(self.fn)
strOut = io.StringIO()
ict.write(f=strOut)
self.assertTrue(compareFiles(self.fn, strIn, strOut, skiplines=self.nHeader))
def testWrite(self):
ict = icartt.Dataset(self.fn, loadData=True)
strIn = open(self.fn)
strOut = io.StringIO()
ict.write(f=strOut)
self.assertTrue(compareFiles(self.fn, strIn, strOut))
class Create2110TestCase(unittest.TestCase):
def testCreateDs(self):
now = datetime.datetime.today()
ict = icartt.Dataset(format=icartt.Formats.FFI2110)
ict.PIName = "Knote, Christoph"
ict.PIAffiliation = "Faculty of Medicine, University Augsburg, Germany"
ict.dataSourceDescription = "Example data"
ict.missionName = "MBEES"
ict.dateOfCollection = now.timetuple()[:3]
ict.dateOfRevision = now.timetuple()[:3]
ict.dataIntervalCode = [0]
ict.independentVariable = icartt.Variable(
"Time_Start",
"seconds_from_0_hours_on_valid_date",
"Time_Start",
"Time_Start",
vartype=icartt.VariableType.IndependentVariable,
scale=1.0,
miss=-9999999,
)
ict.independentBoundedVariable = icartt.Variable(
"Altitude",
"altitude_above_ground_in_meters",
"Altitude",
"Altitude",
vartype=icartt.VariableType.IndependentBoundedVariable,
scale=1.0,
miss=-9999999,
)
# ICARTT convention: first aux variable contains number of dependent elements
ict.auxiliaryVariables["nAltitudes"] = icartt.Variable(
"nAltitudes",
"number_of_dependent_variable_items",
"variable",
"nAltitudes",
scale=1.0,
miss=-9999999,
)
ict.auxiliaryVariables["Time_Stop"] = icartt.Variable(
"Time_Stop",
"seconds_from_0_hours_on_valid_date",
"Time_Stop",
"Time_Stop",
scale=1.0,
miss=-9999999,
)
ict.auxiliaryVariables["Longitude"] = icartt.Variable(
"Longitude",
"longitude_in_degrees",
"Longitude",
"Longitude",
scale=1.0,
miss=-9999999,
)
ict.auxiliaryVariables["Latitude"] = icartt.Variable(
"Latitude",
"latitude_in_degrees",
"Latitude",
"Latitude",
scale=1.0,
miss=-9999999,
)
ict.dependentVariables["Payload1"] = icartt.Variable(
"Payload1", "some_units", "Payload1", "Payload1", scale=1.0, miss=-9999999
)
ict.dependentVariables["Payload2"] = icartt.Variable(
"Payload2", "some_units", "Payload2", "Payload2", scale=1.0, miss=-9999999
)
ict.specialComments.append("Some comments on this dataset:")
ict.specialComments.append("They are just examples!")
ict.specialComments.append("Adapt as needed.")
# we can just use len of the list to check number of comments
self.assertEqual(len(ict.specialComments), 3)
# let's define some normal comments... 21 lines
ncom = {
"PI_CONTACT_INFO": "PI1 pi-email@mail.com\nPI2 more-email@what.com",
"PLATFORM": "a platform",
"LOCATION": "somewhere",
"ASSOCIATED_DATA": "met sensor data",
"INSTRUMENT_INFO": "super cool instrument",
"DATA_INFO": f"icartt Python package version: {icartt.__version__}",
"UNCERTAINTY": "not much",
"ULOD_FLAG": "-7777",
"ULOD_VALUE": "N/A",
"LLOD_FLAG": "-8888",
"LLOD_VALUE": "N/A",
"DM_CONTACT_INFO": "datamanager@mail.edu",
"PROJECT_INFO": "the campaign",
"STIPULATIONS_ON_USE": "no",
"OTHER_COMMENTS": "a lot more info\non multiple lines",
"REVISION": (
"R1\n"
"R1: revised time synchronization.\n"
"R0: initial, preliminary version."
),
}
for k, v in ncom.items():
ict.normalComments.keywords[k].append(v)
# we can check if nlines method of normalComments class works
self.assertEqual(ict.normalComments.nlines, 21)
ict.normalComments.freeform.append("free comment line 1")
ict.normalComments.freeform.append("free comment line 2")
self.assertEqual(ict.normalComments.nlines, 23)
ict.endDefineMode()
# and times must be NaT
self.assertTrue(np.isnat(ict.times))
# note, the second variable ('4') is the number of dependent lines to follow
# ivar, ndepvar, auxvar1, auxvar2, auxvar3
auxData = np.array([(12.3, 4, 12.5, 48.21, 10.3)])
# ibvar, dvar1, dvar2
depData = np.array(
[(0, 123, 8.4e4), (100, 122, 9.1e4), (250, 115, 9.3e4), (500, 106, 9.8e4)]
)
ict.data.add(auxData, depData)
# ... and so forth
auxData = np.array([(12.4, 2, 12.8, 48.41, 12.1)])
# ibvar, dvar1, dvar2
depData = np.array([(0, 153, 7.3e4), (270, 172, 8.9e4)])
ict.data.add(auxData, depData)
# import pdb; pdb.set_trace()
# elements of the time array must be equal to our input
t0 = np.datetime64(datetime.datetime(*now.timetuple()[:3]), "ns")
for have, want in zip(ict.times, (12.3, 12.4)):
self.assertEqual(int(have - t0), int(want * 10**9))
strOut = io.StringIO()
ict.write(f=strOut)
return True
if __name__ == "__main__": # pragma: no cover
unittest.main()
......@@ -5,58 +5,43 @@ import io
# import pytest
import icartt
try:
from _utils import compareFiles # we're executing from the directory of this script
from _utils import compareFiles # we're executing from the directory of this script
except ImportError:
from ._utils import compareFiles # we're executing from another directory
from ._utils import compareFiles # we're executing from another directory
# working directory, example files
wd = pathlib.Path(__file__).parent / "example_data"
# file : (ffi, nlscom, nlncom, nHeaderLines, exception) <- want
fileinfo = {
# should warn; has multiple keywords per line in normalComments
'AROTAL-RAY_DC8_20040715_R1.ict': (2110, 1, 19, 68, None),
'AR_DC8_20050203_R0.ict': (2110, 0, 18, 54, None), # warns
'BB-FLUX_CU-SOF_20180808_R2.ict': (1001, 0, 18, 38, None), # ok
'DC8-20160517.ict': (1001, 0, 18, 36, None), # ok
'discoveraq-CO2_p3b_20140721_R0.ict': (1001, 1, 18, 37, None), # ok
# ok
'DISCOVERAQ-NOXYO3_P3B_20140720_R0.ict': (1001, 0, 27, 47, None),
'Dongdaemun_NIER_20160520_RA.ict': (1001, 0, 18, 36, None), # warns
'HOX_DC8_20040712_R0.ict': (1001, 0, 18, 36, None), # ok
# warns
'korusaq-flexpart-dc8_trajectory_20160529_R2.ict': (2110, 27, 20, 101, None),
# ok
'korusaq-mrg01-HANSEO-KING-AIR_merge_20160507_RA.ict': (1001, 0, 18, 45, None),
# error: 2310 not implemented
'LIDARO3_WP3_20040830_R0.ict': (2310, 0, 18, 46, NotImplementedError),
'NOx_RHBrown_20040830_R0.ict': (1001, 0, 18, 41, None), # ok
# error: invalid number of variables / columns
'output.ict': (1001, 8, 17, 41, Exception),
'PAVE-AR_DC8_20050203_R0.ict': (2110, 1, 18, 55, None), # warns
# ok
'SEAC4RS-PTRMS-acetaldehyde_DC8_20130806_R1.ict': (1001, 0, 26, 44, None),
'bt_Munich_2020061000_72.ict.txt': (1001, 29, 18, 91, None), # warns
# warns (variable names)
'korusaq-mrg10-dc8_merge_20160510_R4.ict': (1001, 0, 29, 397, None),
}
# should warn; has multiple keywords per line in normalComments
"AROTAL-RAY_DC8_20040715_R1.ict": (2110, 1, 19, 68, None),
"AR_DC8_20050203_R0.ict": (2110, 0, 18, 54, None), # warns
"BB-FLUX_CU-SOF_20180808_R2.ict": (1001, 0, 18, 38, None), # ok
"DC8-20160517.ict": (1001, 0, 18, 36, None), # ok
"discoveraq-CO2_p3b_20140721_R0.ict": (1001, 1, 18, 37, None), # ok
# ok
"DISCOVERAQ-NOXYO3_P3B_20140720_R0.ict": (1001, 0, 27, 47, None),
"Dongdaemun_NIER_20160520_RA.ict": (1001, 0, 18, 36, None), # warns
"HOX_DC8_20040712_R0.ict": (1001, 0, 18, 36, None), # ok
# warns
"korusaq-flexpart-dc8_trajectory_20160529_R2.ict": (2110, 27, 20, 101, None),
# ok
"korusaq-mrg01-HANSEO-KING-AIR_merge_20160507_RA.ict": (1001, 0, 18, 45, None),
# error: 2310 not implemented
"LIDARO3_WP3_20040830_R0.ict": (2310, 0, 18, 46, NotImplementedError),
"NOx_RHBrown_20040830_R0.ict": (1001, 0, 18, 41, None), # ok
# error: invalid number of variables / columns
"output.ict": (1001, 8, 17, 41, Exception),
"PAVE-AR_DC8_20050203_R0.ict": (2110, 1, 18, 55, None), # warns
# ok
"SEAC4RS-PTRMS-acetaldehyde_DC8_20130806_R1.ict": (1001, 0, 26, 44, None),
"bt_Munich_2020061000_72.ict.txt": (1001, 29, 18, 91, None), # warns
# warns (variable names)
"korusaq-mrg10-dc8_merge_20160510_R4.ict": (1001, 0, 29, 397, None),
}
# TODO: dataset -> close file pointer after read ?!
......@@ -65,9 +50,9 @@ fileinfo = {
class BulkIOTestCase(unittest.TestCase):
def setUp(self):
self.files_ok = list((wd / "expect_ok").glob("*.ict"))
self.files_warn = list(
(wd / "expect_warn").glob("*.ict")
) + list((wd / "example_data" / "expect_warn").glob("*.txt"))
self.files_warn = list((wd / "expect_warn").glob("*.ict")) + list(
(wd / "example_data" / "expect_warn").glob("*.txt")
)
self.files_fail = list((wd / "expect_fail").glob("*.ict"))
def tearDown(self):
......
......@@ -96,6 +96,8 @@ depData = np.array(
[(0, 123, 8.4e4), (100, 122, 9.1e4), (250, 115, 9.3e4), (500, 106, 9.8e4)]
)
ict.data.add(auxData, depData)
# ... and so forth
auxData = np.array([(12.4, 2, 12.8, 48.41, 12.1)])
......
......@@ -3,7 +3,7 @@ import pathlib
# load a new dataset from an existing file
wd = pathlib.Path(__file__).parent
ict = icartt.Dataset( wd / ".." / "example_data" / 'expect_ok' / 'DC8-20160517.ict')
ict = icartt.Dataset(wd / ".." / "example_data" / "expect_ok" / "DC8-20160517.ict")
# read some metadata
ict.PIName
......@@ -14,17 +14,17 @@ ict.dataID
ict.locationID
# list variable names
[ x for x in ict.variables ]
[x for x in ict.variables]
# some info on a variable
ict.variables['Alt_ft'].units
ict.variables['Alt_ft'].miss
ict.variables["Alt_ft"].units
ict.variables["Alt_ft"].miss
# get data for variable 'UTC':
ict.data['UTC']
ict.data["UTC"]
# get all data (NumPy array):
ict.data[:]
# get the altitude in feet for those data where UTC < 86400.0:
ict.data[ ict.data['UTC'] < 86400.0 ]['Alt_ft']
\ No newline at end of file
ict.data[ict.data["UTC"] < 86400.0]["Alt_ft"]
......@@ -3,10 +3,12 @@ import pathlib
# load a new dataset from an existing file
wd = pathlib.Path(__file__).parent
ict = icartt.Dataset( wd / ".." / "example_data" / 'expect_warn' / 'AR_DC8_20050203_R0.ict')
ict = icartt.Dataset(
wd / ".." / "example_data" / "expect_warn" / "AR_DC8_20050203_R0.ict"
)
# list variable names
[ x for x in ict.variables ]
[x for x in ict.variables]
# independent, independent bounded, dependent, auxiliary variables?
print(f"Independent variable: {ict.independentVariable.shortname}")
......@@ -19,19 +21,19 @@ print(f"Units of variable Latitude are {ict.variables['Latitude'].units}")
print(f"... and its missing value is {ict.variables['Latitude'].miss}")
# get steps for which data is available:
tsteps = [ x for x in ict.data ]
tsteps = [x for x in ict.data]
# let's look at the first time step data
print("First time step data:")
print(ict.data[ tsteps[0] ])
print(ict.data[tsteps[0]])
# auxiliary data at this time step:
print("First time step auxiliary data:")
print(ict.data[ tsteps[0] ]['AUX'][:])
print(ict.data[tsteps[0]]["AUX"][:])
# dependent data at this time step:
tstepdata = ict.data[ tsteps[0] ]['DEP'][:]
tstepdata = ict.data[tsteps[0]]["DEP"][:]
# get the ozone mixing ratio for those data where Altitude < 10000.0:
print(f"Ozone mixing ratio for altitudes < 10000 at time step {tsteps[0]}")
print(tstepdata[ tstepdata['Altitude[]'] < 10000.0 ]['O3_MR[]'])
print(tstepdata[tstepdata["Altitude[]"] < 10000.0]["O3_MR[]"])
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment