diff --git a/examples/scripts/98_download_tiles.py b/examples/scripts/98_download_tiles.py
deleted file mode 120000
index 695f35d4d154aaf686554b58cb127c0f1af22774..0000000000000000000000000000000000000000
--- a/examples/scripts/98_download_tiles.py
+++ /dev/null
@@ -1 +0,0 @@
-/home/operateur/scripts/02_sen2chain_scripts_PM/98_download_tiles.py
\ No newline at end of file
diff --git a/examples/scripts/98_download_tiles.py b/examples/scripts/98_download_tiles.py
new file mode 100644
index 0000000000000000000000000000000000000000..da8c477ca18614d265603afbd7057983a07e9d76
--- /dev/null
+++ b/examples/scripts/98_download_tiles.py
@@ -0,0 +1,59 @@
+# -*- coding:utf-8 -*-
+"""
+ Télécharge les nouvelles tuiles identifiées ds le fichier tiles_file.csv
+ Le delta_t permet d'alléger les requètes au serveur en n'interrogeant que les n derniers jours
+ Ce but de ce script est d'être lancé très régulièrement (cron)
+"""
+
+import logging
+import pandas as pd
+from sen2chain import DataRequest, DownloadAndProcess, Tile
+import datetime
+import os, shutil
+import time
+import glob
+import math
+import telegram
+
+logger = logging.getLogger("Telechargement L1C")
+logging.basicConfig(level=logging.INFO)
+
+#cwd = os.getcwd()
+fwd = os.path.dirname(os.path.realpath(__file__))
+
+# liste de tuiles à traiter
+tiles_file = fwd + "/98_download_tiles.csv"
+
+# default nombre de jours à requeter à partir de today
+delta_t = 15
+
+# recuperation de la liste des tuiles a telecharger
+tiles_list = pd.read_csv(tiles_file, sep = ';', na_values="", comment='#')
+
+total = 0
+logger.info("Total tiles to download = {}".format(total))
+for index, row in tiles_list.iterrows():
+    try:
+        if math.isnan(row.start_time):
+            row.start_time = (datetime.datetime.now()-datetime.timedelta(days=delta_t)).strftime('%Y-%m-%d')
+    except:
+        pass
+    try:
+        if math.isnan(row.end_time):
+            row.end_time = (datetime.datetime.now()+datetime.timedelta(days=1)).strftime('%Y-%m-%d')
+    except:
+        pass
+    
+    # Pour ponctuellement forcer les dates de début et fin
+    #~ row.start_time = "2018-01-01"
+    #~ end_time = "2018-08-06"
+        
+    tuile = Tile(row.tile)
+    req = DataRequest(start_date=row.start_time, end_date=row.end_time, cloud_cover_percentage=[0, row.max_clouds]).from_tiles([row.tile])
+    #~ logger.info("req = {}".format(req))
+    total += len(req['hubs'])
+    logger.info("Total tiles to download = {}".format(total))
+    DownloadAndProcess(req, hubs_limit={"peps":8, "scihub":0}, process_products=False, indices_list=[], nodata_clouds=False, quicklook=False, max_processes=8)
+    time.sleep(1)
+    tuile = Tile(row.tile)
+
diff --git a/examples/scripts/99_traitement_l2a_multiprocessing.py b/examples/scripts/99_traitement_l2a_multiprocessing.py
deleted file mode 120000
index eb44d6c3ecf05a542273f12e0aec2410f0235ee8..0000000000000000000000000000000000000000
--- a/examples/scripts/99_traitement_l2a_multiprocessing.py
+++ /dev/null
@@ -1 +0,0 @@
-/home/operateur/scripts/02_sen2chain_scripts_PM/99_traitement_l2a_multiprocessing.py
\ No newline at end of file
diff --git a/examples/scripts/99_traitement_l2a_multiprocessing.py b/examples/scripts/99_traitement_l2a_multiprocessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..61438eabbe163973e2d6572b2f64166a82ab99b6
--- /dev/null
+++ b/examples/scripts/99_traitement_l2a_multiprocessing.py
@@ -0,0 +1,135 @@
+# -*- coding:utf-8 -*-
+
+""" lance les traitements sur les fichiers L1C qui ont
+été téléchargés mais qui n'ont pas été traités 
+"""
+import logging
+import datetime
+import pandas as pd
+from sen2chain import Tile, L1cProduct, L2aProduct
+from sen2chain import l2a_multiprocessing, cld_version_probability_iterations_reprocessing_multiprocessing, idx_multiprocessing
+import os, shutil, sys
+import glob
+import math
+from itertools import chain
+
+try:
+    arg = sys.argv[1]
+except:
+    arg = []
+print(arg)
+
+logger = logging.getLogger("Traitement L2A")
+logging.basicConfig(level=logging.INFO)
+logger.setLevel(logging.INFO)
+
+
+# default nombre de jours à requeter à partir de today
+# si le champs n'est pas renseingné ds le csv
+delta_t = 15
+
+# liste de tuiles à traiter
+fwd = os.path.dirname(os.path.realpath(__file__))
+tiles_file = fwd + "/99_traitement_l2a_multiprocessing.csv"
+
+tiles_list = pd.read_csv(tiles_file, sep = ';', na_values="", na_filter=False, comment='#')
+
+for index, row in tiles_list.iterrows():
+    if not row.start_time:
+        tiles_list.at[index, "start_time"] = (datetime.datetime.now()-datetime.timedelta(days=delta_t)).strftime('%Y-%m-%d')
+    if not row.end_time:
+        tiles_list.at[index, "end_time"] = (datetime.datetime.now()+datetime.timedelta(days=1)).strftime('%Y-%m-%d')
+    if not row.indices:
+        tiles_list.at[index, "indices"] = 'NDVI'
+
+logger.info("\n{}".format(tiles_list))
+
+
+# Traitement des L1C en L2A
+l1c_process_list =  []
+for index, row in tiles_list.iterrows():
+    t = Tile(row.tile)
+    l1c_process_list.append(list(p.identifier for p in t.l2a_missings.filter_dates(date_min = row.start_time, date_max = row.end_time)))
+l1c_process_list = list(chain.from_iterable(l1c_process_list))
+logger.info("l1c_process_list ({} files): {}\n".format(len(l1c_process_list), l1c_process_list))
+
+l2a_res = False
+if l1c_process_list:
+    if not arg:
+        l2a_res = l2a_multiprocessing(l1c_process_list, nb_proc=12)
+
+
+# Traitement des L2A (clouds)
+cm_version = "cm001"
+probability = 1
+iterations = 5
+reprocess = False
+
+cld_l2a_process_list =  []
+for index, row in tiles_list.iterrows():
+    t = Tile(row.tile)
+    if not reprocess:
+        l2a_list = [p.identifier for p in t.cloudmasks_missing(cm_version = cm_version,
+                                                                       probability = probability,
+                                                                       iterations = iterations,
+                                                                       )\
+                                                                       .filter_dates(date_min = row.start_time, date_max = row.end_time)]
+    else:
+        l2a_list = [p.identifier for p in t.l2a.filter_dates(date_min = row.start_time, date_max = row.end_time)]
+    
+    for j in l2a_list:
+        cld_l2a_process_list.append([j, 
+                                     cm_version,
+                                     probability,
+                                     iterations,
+                                     reprocess,
+                                     ])
+logger.info("\ncld_l2a_process_list ({} files): {}".format(len(cld_l2a_process_list), cld_l2a_process_list))
+
+cld_res = False
+if cld_l2a_process_list:
+    if not arg:
+        cld_res = cld_version_probability_iterations_reprocessing_multiprocessing(cld_l2a_process_list, nb_proc=8)
+
+        
+# Traitement des L2A (indices)
+reprocess = False
+nodata_clouds = True
+quicklook = False
+cm_version = "cm001"
+probability = 1
+iterations = 5
+                        
+indices_l2a_process_list =  []
+for index, row in tiles_list.iterrows():
+    t = Tile(row.tile)
+    indices_list = row.indices.split("/")
+    for i in indices_list:
+        if not reprocess:
+            l2a_list = [p.identifier for p in t.missing_indices(i,
+                                                                nodata_clouds = nodata_clouds,
+                                                                cm_version = cm_version,
+                                                                probability = probability, 
+                                                                iterations = iterations,
+                                                                ).filter_dates(date_min = row.start_time, date_max = row.end_time)]
+        else:
+            l2a_list = [p.identifier for p in t.l2a.filter_dates(date_min = row.start_time, date_max = row.end_time)]
+        
+        for j in l2a_list:
+            #~ indices_l2a_process_list.append([j, i])
+            indices_l2a_process_list.append([j, 
+                                             i,
+                                             reprocess,
+                                             nodata_clouds,
+                                             quicklook,
+                                             cm_version,
+                                             probability,
+                                             iterations])
+            
+logger.info("\nindices_l2a_process_list ({} files): {}".format(len(indices_l2a_process_list), indices_l2a_process_list))
+
+indices_res = False
+if indices_l2a_process_list:
+    if not arg:
+        indices_res = idx_multiprocessing(indices_l2a_process_list, nb_proc=8)
+