Skip to content
Snippets Groups Projects
Commit fca183bd authored by jacques.grelet_ird.fr's avatar jacques.grelet_ird.fr
Browse files

write netcdf file with OceanSites convention name under netcdf dir

parent b6e3be14
No related branches found
No related tags found
No related merge requests found
...@@ -68,6 +68,14 @@ class FileExtractor: ...@@ -68,6 +68,14 @@ class FileExtractor:
return buf return buf
def set_regex(self, cfg, ti): def set_regex(self, cfg, ti):
''' prepare (compile) each regular expression inside toml file under section [<device>.header]
[ctd.header]
isHeader = '^[*#]'
isDevice = '^\*\s+(Sea-Bird)'
TIME = 'System UpLoad Time\s*=\s*(\w+)\s+(\d+)\s+(\d+)\s+(\d+):(\d+):(\d+)'
LATITUDE = 'NMEA\s+Latitude\s*[:=]\s*(\d+)\s+(\d+\.\d+)\s+(\w)'
LONGITUDE = 'NMEA\s+Longitude\s*[:=]\s*(\d+)\s+(\d+.\d+)\s+(\w)'
'''
# first pass on file(s) # first pass on file(s)
d = cfg[ti.lower()]['header'] d = cfg[ti.lower()]['header']
...@@ -95,7 +103,6 @@ class FileExtractor: ...@@ -95,7 +103,6 @@ class FileExtractor:
with fileinput.input( with fileinput.input(
file, openhook=fileinput.hook_encoded("ISO-8859-1")) as f: file, openhook=fileinput.hook_encoded("ISO-8859-1")) as f:
lineData = 0 lineData = 0
lineHeader = 0
filesRead += 1 filesRead += 1
for line in f: for line in f:
# header detection, skip header lines # header detection, skip header lines
...@@ -143,10 +150,12 @@ class FileExtractor: ...@@ -143,10 +150,12 @@ class FileExtractor:
# initialize datetime object # initialize datetime object
dt = datetime dt = datetime
# set skipHeader is declared in toml section, 0 by default # set separator field if declared in toml section, none by default
if 'separator' in cfg[device.lower()]: if 'separator' in cfg[device.lower()]:
self.__separator = cfg[device.lower()]['separator'] self.__separator = cfg[device.lower()]['separator']
# set skipHeader is declared in toml section, 0 by default
# get the dictionary from toml block, device must be is in lower case # get the dictionary from toml block, device must be is in lower case
hash = cfg['split'][device.lower()] hash = cfg['split'][device.lower()]
...@@ -212,6 +221,7 @@ class FileExtractor: ...@@ -212,6 +221,7 @@ class FileExtractor:
print("{:07.4f} : {}".format(longitude, longitude_str)) print("{:07.4f} : {}".format(longitude, longitude_str))
self.__data['LONGITUDE'][n] = longitude self.__data['LONGITUDE'][n] = longitude
continue continue
# split the line, remove leading and trailing space before # split the line, remove leading and trailing space before
p = line.strip().split(self.__separator) p = line.strip().split(self.__separator)
......
...@@ -137,10 +137,12 @@ def process(args, cfg, ti): ...@@ -137,10 +137,12 @@ def process(args, cfg, ti):
# fileExtractor # fileExtractor
fe = FileExtractor(args.files, args.keys) fe = FileExtractor(args.files, args.keys)
# prepare (compile) each regular expression inside toml file under section [<device=ti>.header]
fe.set_regex(cfg, ti) fe.set_regex(cfg, ti)
# cfg = toml.load(args.config) # the first pass skip headers and return data dimensions size
fe.first_pass() fe.first_pass()
# fe.secondPass(['PRES', 'TEMP', 'PSAL', 'DOX2'], cfg, 'ctd') # fe.secondPass(['PRES', 'TEMP', 'PSAL', 'DOX2'], cfg, 'ctd')
fe.second_pass(cfg, ti, variables_1D) fe.second_pass(cfg, ti, variables_1D)
# fe.disp(['PRES', 'TEMP', 'PSAL', 'DOX2']) # fe.disp(['PRES', 'TEMP', 'PSAL', 'DOX2'])
...@@ -276,4 +278,6 @@ if __name__ == "__main__": ...@@ -276,4 +278,6 @@ if __name__ == "__main__":
fe = process(args, cfg, device) fe = process(args, cfg, device)
#print("Dimensions: {} x {}".format(fe.m, fe.n)) #print("Dimensions: {} x {}".format(fe.m, fe.n))
#print(fe.disp()) #print(fe.disp())
netcdf.writeNetCDF( 'output/test.nc', fe,variables_1D)
# write the NetCDF file
netcdf.writeNetCDF( "netcdf/OS_{}_{}.nc".format(cfg['cruise']['cycleMesure'], device), fe,variables_1D)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment