Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# -*- coding:utf-8 -*-
""" lance les traitements sur les fichiers L1C qui ont
été téléchargés mais qui n'ont pas été traités
"""
import logging
import datetime
import pandas as pd
from sen2chain import Tile, L1cProduct, L2aProduct
from sen2chain import l2a_multiprocessing, cld_version_probability_iterations_reprocessing_multiprocessing, idx_multiprocessing
import os, shutil, sys
import glob
import math
from itertools import chain
try:
arg = sys.argv[1]
except:
arg = []
print(arg)
logger = logging.getLogger("Traitement L2A")
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.INFO)
# default nombre de jours à requeter à partir de today
# si le champs n'est pas renseingné ds le csv
delta_t = 15
# liste de tuiles à traiter
fwd = os.path.dirname(os.path.realpath(__file__))
tiles_file = fwd + "/99_traitement_l2a_multiprocessing.csv"
tiles_list = pd.read_csv(tiles_file, sep = ';', na_values="", na_filter=False, comment='#')
for index, row in tiles_list.iterrows():
if not row.start_time:
tiles_list.at[index, "start_time"] = (datetime.datetime.now()-datetime.timedelta(days=delta_t)).strftime('%Y-%m-%d')
if not row.end_time:
tiles_list.at[index, "end_time"] = (datetime.datetime.now()+datetime.timedelta(days=1)).strftime('%Y-%m-%d')
if not row.indices:
tiles_list.at[index, "indices"] = 'NDVI'
logger.info("\n{}".format(tiles_list))
# Traitement des L1C en L2A
l1c_process_list = []
for index, row in tiles_list.iterrows():
t = Tile(row.tile)
l1c_process_list.append(list(p.identifier for p in t.l2a_missings.filter_dates(date_min = row.start_time, date_max = row.end_time)))
l1c_process_list = list(chain.from_iterable(l1c_process_list))
logger.info("l1c_process_list ({} files): {}\n".format(len(l1c_process_list), l1c_process_list))
l2a_res = False
if l1c_process_list:
if not arg:
l2a_res = l2a_multiprocessing(l1c_process_list, nb_proc=12)
# Parametrage des cloudmasks (clouds)
cm_version = "cm001"
probability = 1
iterations = 5
cld_shad = True
cld_med_prob = True
cld_hi_prob = True
thin_cir = True
reprocess = False
# Traitement des L2A (clouds)
cld_l2a_process_list = []
for index, row in tiles_list.iterrows():
t = Tile(row.tile)
if not reprocess:
l2a_list = [p.identifier for p in t.cloudmasks_missing().filter_dates(date_min = row.start_time, date_max = row.end_time)]
else:
l2a_list = [p.identifier for p in t.l2a.filter_dates(date_min = row.start_time, date_max = row.end_time)]
for j in l2a_list:
cld_l2a_process_list.append([j,
cm_version,
probability,
iterations,
cld_shad,
cld_med_prob,
cld_hi_prob,
thin_cir,
reprocess,
])
logger.info("\ncld_l2a_process_list ({} files): {}".format(len(cld_l2a_process_list), cld_l2a_process_list))
cld_res = False
if cld_l2a_process_list:
if not arg:
cld_res = cld_version_probability_iterations_reprocessing_multiprocessing(cld_l2a_process_list, nb_proc=8)
# Traitement des L2A (indices)
nodata_clouds = True
quicklook = False
indices_l2a_process_list = []
for index, row in tiles_list.iterrows():
t = Tile(row.tile)
indices_list = row.indices.split("/")
for i in indices_list:
if not reprocess:
l2a_list = [p.identifier for p in t.missing_indices(indice = i,
nodata_clouds = nodata_clouds,
).filter_dates(date_min = row.start_time, date_max = row.end_time)]
else:
l2a_list = [p.identifier for p in t.l2a.filter_dates(date_min = row.start_time, date_max = row.end_time)]
for j in l2a_list:
indices_l2a_process_list.append([j,
i,
reprocess,
nodata_clouds,
quicklook,
cm_version,
probability,
iterations,
cld_shad,
cld_med_prob,
cld_hi_prob,
thin_cir,
])
logger.info("\nindices_l2a_process_list ({} files): {}".format(len(indices_l2a_process_list), indices_l2a_process_list))
indices_res = False
if indices_l2a_process_list:
if not arg:
indices_res = idx_multiprocessing(indices_l2a_process_list, nb_proc=8)