Skip to content
Snippets Groups Projects
Commit bb711577 authored by Jeremy Auclair's avatar Jeremy Auclair
Browse files

Calculation of ndvi cube and weather cube operational. Starting the samir...

Calculation of ndvi cube and weather cube operational. Starting the samir params handling and time loop architecture
parent 0bd7068e
No related branches found
No related tags found
No related merge requests found
tests.py
*__pycache__/*
\ No newline at end of file
*__pycache__*
*config_modspa.json
\ No newline at end of file
# -*- coding: UTF-8 -*-
# Python
"""
11-07-2023
@author: jeremy auclair
Usage of the SAMIR model in the Modspa framework.
"""
import os # for path exploration
import csv # open csv files
from fnmatch import fnmatch # for character string comparison
from typing import List, Union # to declare variables
import xarray as xr # to manage dataset
import pandas as pd # to manage dataframes
import rasterio as rio # to open geotiff files
import geopandas as gpd # to manage shapefile crs projections
from code.params_samir_class import samir_parameters
def rasterize_samir_parameters(csv_param_file: str, empty_dataset: xr.Dataset):
# Load samir params into an object
table_param = samir_parameters(csv_param_file)
return None
def setup_time_loop():
return None
def run_samir():
return None
#! /usr/bin/env python
#-*- coding: utf-8 -*-
"""
11-07-2023 adapted from modspa-parcel code
@author: jeremy auclair
Classes to load and store SAMIR parameters.
"""
from pandas import read_csv # to read csv parameter files
from numpy import nan # to fill nan values
import param # type: ignore
class samir_parameters_LC:
"""
This class allows to store all the SAMIR parameters for one land cover class
"""
def __init__(self, csvLine, defaultClass, mode_init = 1):
# List of parameters that will be optimised (and hence that are not read in the param csv file)
self.optimList = []
if defaultClass:
#print(csvLine)
for v in csvLine.values():
if v == '':
raise ValueError("All fields must be filled for the default value line")
self.name = csvLine['ClassName']
self.number = int(csvLine['ClassNumber'])
# Parameters for the NDVI - Fraction Cover relation
if (csvLine['FminFC'] != "optim"):
self.ndviFCminFC = param.Number(float(csvLine['FminFC']), bounds=(0., 1.)).default
else:
self.optimList.append("FminFC")
if (csvLine['FmaxFC'] != "optim"):
self.ndviFCmaxFC = param.Number(float(csvLine['FmaxFC']), bounds=(0., 1.)).default
else:
self.optimList.append("FmaxFC")
if (csvLine['Fslope'] != "optim"):
self.ndviFCslope = param.Number(float(csvLine['Fslope']), bounds=(0., 10)).default
else:
self.optimList.append("Fslope")
if (csvLine['Foffset'] != "optim"):
self.ndviFCoffset = param.Number(float(csvLine['Foffset']), bounds=(-1, 1)).default
else:
self.optimList.append("Foffset")
if (csvLine['Plateau'] != "optim"):
self.ndviPlateau = param.Number(int(float(csvLine['Plateau'])), bounds=(0, 365)).default
else:
self.optimList.append("Plateau")
# Parameters for the NDVI -Kcb relation
if (csvLine['KminKcb'] != "optim"):
self.ndviKcbminKcb = param.Number(float(csvLine['KminKcb']), bounds=(0, 0.5)).default
else:
self.optimList.append("KminKcb")
if (csvLine['KmaxKcb'] != "optim"):
self.ndviKcbmaxKcb = param.Number(float(csvLine['KmaxKcb']), bounds=(0.5, 2)).default
else:
self.optimList.append("KmaxKcb")
if (csvLine['Kslope'] != "optim"):
self.ndviKcbslope = param.Number(float(csvLine['Kslope'])).default
else:
self.optimList.append("Kslope")
if (csvLine['Koffset'] != "optim"):
self.ndviKcboffset = param.Number(float(csvLine['Koffset'])).default
else:
self.optimList.append("Koffset")
# Soil parameters
if (csvLine['Zsoil'] != "optim"):
self.Zsoil = param.Number(float(csvLine['Zsoil']), bounds=(100, 10000), doc = "Soil depth (in mm)").default
else:
self.optimList.append("Zsoil")
if (csvLine['Ze'] != "optim"):
self.Ze = param.Number(float(csvLine['Ze']), bounds=(1, self.Zsoil), doc = "Evaporative layer depth (in mm)").default
else:
self.optimList.append("Ze")
if mode_init == 1 or mode_init == 3:
if (csvLine['Init_RU'] != "optim"):
self.Init_RU = param.Number(float(csvLine['Init_RU']), doc = "Filling rate of the available water").default
else:
self.optimList.append("Init_RU")
else :
self.Dei = param.Number(float(csvLine['Init_Dei']), bounds=(0, None), doc = "Initial Depletion of the evaporative layer (irrigation + precipitation) (in mm)").default
self.Dep = param.Number(float(csvLine['Init_Dep']), bounds=(0, None), doc = "Initial Depletion of the evaporative layer (precipitation only) (in mm)").default
self.Dr = param.Number(float(csvLine['Init_Dr']), bounds=(0, None), doc = "Initial Depletion of the root layer (in mm)").default
self.Dd = param.Number(float(csvLine['Init_Dd']), bounds=(0, None), doc = "Initial Depletion of the deep layer (in mm)").default
if (csvLine['DiffE'] != "optim"):
self.DiffE = param.Number(float(csvLine['DiffE']), bounds=(0, 1000), doc = "Diffusion coefficient between evaporative and root layers (unitless)").default
else:
self.optimList.append("DiffE")
if (csvLine['DiffR'] != "optim"):
self.DiffR = param.Number(float(csvLine['DiffR']), bounds=(0, 1000), doc = "Diffusion coefficient between root and deep layers (unitless)").default
else:
self.optimList.append("DiffR")
if (csvLine['REW'] != "optim"):
self.REW = param.Number(float(csvLine['REW']), bounds=(-1000, 1000), doc = "Readily Evaporable Water (in mm)").default
else:
self.optimList.append("REW")
if (csvLine['m'] != "optim"):
self.m = param.Number(float(csvLine['m']), bounds=(0, 1), doc = "").default ## si utilise, REW minimum doit etre à 0 ?
else:
self.optimList.append("m")
# Crop parameters
if (csvLine['minZr'] != "optim"):
if (csvLine['Ze'] != "optim") & (csvLine['Zsoil'] != "optim"):
self.minZr = param.Number(float(csvLine['minZr']), bounds=(self.Ze, self.Zsoil), doc = "Minimum root depth (mm)").default
else:
self.minZr = param.Number(float(csvLine['minZr']), bounds=(1, 10000), doc = "Minimum root depth (mm)").default
else:
self.optimList.append("minZr")
if (csvLine['maxZr'] != "optim"):
if (csvLine['Zsoil'] != "optim"):
self.maxZr = param.Number(float(csvLine['maxZr']), bounds=(0, self.Zsoil-1), doc = "Maximum root depth (mm)").default
else:
self.maxZr = param.Number(float(csvLine['maxZr']), bounds=(0, 10000-1), doc = "Maximum root depth (mm)").default
else:
self.optimList.append("maxZr")
if (csvLine['p'] != "optim"):
self.p = param.Number(float(csvLine['p']), bounds=(0, 1), doc = "Fraction of readily available water").default
else:
self.optimList.append("p")
# Irrigation parameters
self.irrigFW = param.Number(float(csvLine['FW']), bounds=(0, 100), doc = "% of soil wetted by irrigation").default
self.Irrig_auto = param.Integer(int(float(csvLine['Irrig_auto'])), doc = "1 if the automatic irrigation mode is activated").default
self.Irrig_man = param.Integer(int(float(csvLine['Irrig_man'])), doc = "1 if the manual irrigation mode is activated").default
if (csvLine['Lame_max'] != "optim"):
self.Lame_max = param.Number(float(csvLine['Lame_max']), doc = "Maximum of irrigation height for each irrigation event (in mm)").default
else:
self.optimList.append("Lame_max")
if (csvLine['minDays'] != "optim"):
self.irrigMinDays = param.Integer(int(float(csvLine['minDays'])), bounds=(0, None), doc = "Minimum number of days between two irrigation events").default
else:
self.optimList.append("minDays")
if (csvLine['Kcbmin_start'] != "optim"):
self.Kcbmin_start = param.Number(float(csvLine['Kcbmin_start']), bounds=(0, 1), doc = "Minimum Kcb value above which irrigation may start").default
else:
self.optimList.append("Kcbmin_start")
if (csvLine['Kcbmax_stop'] != "optim"):
self.Kcbmax_stop = param.Number(float(csvLine['Kcbmax_stop']), bounds=(0, 1), doc = "Fraction of peak Kcb value below which irrigation stops").default
else:
self.optimList.append("Kcbmax_stop")
if (csvLine['Kcmax'] != "optim"):
self.Kcmax = param.Number(float(csvLine['Kcmax']), bounds=(0, None), doc = "pas d'info").default
else:
self.optimList.append("Kcmax")
if (csvLine['Fc_stop'] != "optim"):
self.Fc_stop = param.Number(float(csvLine['Fc_stop']), bounds=(0, None), doc = "pas d'info").default
else:
self.optimList.append("Fc_stop")
if (csvLine['Start_date_Irr'] != "optim"):
self.Start_date_Irr = param.Number(int(float(csvLine['Start_date_Irr'])), bounds=(0, None), doc = "pas d'info").default
else:
self.optimList.append("Start_date_Irr")
if (csvLine["p_trigger"] != "optim"):
self.p_trigger = param.Number(float(csvLine['p_trigger']), bounds=(-1, 1), doc = "Fraction of water storage capacity below which irrigation is triggered").default
else:
self.optimList.append("Fc_stop")
def setParam(self, paramName, value):
# Soil parameters
if paramName == "REW":
self.REW = value
elif paramName == "Init_RU":
self.Init_RU = value
elif paramName == "minZr":
self.minZr = value
elif paramName == "maxZr":
self.maxZr = value
elif paramName == "Ze":
self.Ze = value
elif paramName == "Zsoil":
self.Zsoil = value
elif paramName == "DiffR":
self.DiffR = value
elif paramName == "DiffE":
self.DiffE = value
# Irrigation parameters
elif paramName == "Lame_max":
self.Lame_max = value
elif paramName == "minDays":
self.irrigMinDays = value
# Vegetation parameters
elif paramName == "FminFC":
self.ndviFCminFC = value
elif paramName == "FmaxFC":
self.ndviFCmaxFC = value
elif paramName == "Fc_stop":
self.Fc_stop = value
elif paramName == "Kcmax":
self.Kcmax = value
elif paramName == "Kcbmin_start" :
self.Kcbmin_start = value
elif paramName == "Kcbmax_stop" :
self.Kcbmax_stop = value
elif paramName == "Plateau" :
self.ndviPlateau = value
elif paramName == "Fslope" :
self.ndviFCslope = value
elif paramName == "Foffset" :
self.ndviFCoffset = value
elif paramName == "KmaxKcb" :
self.ndviKcbmaxKcb = value
elif paramName == "KminKcb" :
self.ndviKcbminKcb = value
elif paramName == "Kslope" :
self.ndviKcbslope = value
elif paramName == "Koffset" :
self.ndviKcboffset = value
elif paramName == "m" :
self.m = value
elif paramName == "p" :
self.p = value
elif paramName == "p_trigger":
self.p_trigger = value
class samir_parameters:
"""
Load all parameters for multiples classes in one object.
"""
def __init__(self, paramFile, mode_init = 1):
self.d = {}
# Read csv file with Pandas
csvFile = read_csv(paramFile, header = None)
# Index file for correct conversion to dictionnary
csvFile.index = csvFile.iloc[:,0]
csvFile.replace(nan, '', inplace = True)
defaultClass = True
# Loop on columns
for column in csvFile.columns[1:]:
# Convert pandas column to dictionnary
line = csvFile[column].to_dict()
#TODO : @VR+@CO Intoduire ici une verification des valeurs
#!! notamment si min=max alors error_rel=0
#!! Ajouter la possibilité de configurer plusieurs land cover
if defaultClass:
defaultLine = line.copy()
elif line['ClassName'] in ['error_rel','error_abs','min','max']:
self.d[line['ClassName']] ={}
for k in line.keys():
if k != 'ClassName':
if line[k] == '':
line[k] = 0
self.d[line['ClassName']][k] = float(line[k])
continue
else:
for k in line.keys():
if line[k] == '':
line[k] = defaultLine[k]
self.d[line['ClassName']] = samir_parameters_LC(line, defaultClass, mode_init)
defaultClass = False
# -*- coding: UTF-8 -*-
# Python
"""
07-12-2022
@author: jeremy auclair
"""
# Create class that contains configuration file date
import json # to open config file
class config:
"""
The `Config` class contains input parameters necessary for the Vegetation and Weather parts to run.
It loads all these parameters from the `.json` input file and loads it automatically.
### Attributes
- start_date: `str`
- end_date: `str`
- path_to_config_file: `str`
- shapefile_path: `str`
- download_path: `str`
- era5_path: `str`
- run_name: `str`
- preferred_provider: `str`
- ndvi_overwrite: `bool`
- cloud_cover_limit: `int`
- max_cpu: `int`
"""
# Constructor
def __init__(self, config_file: str) -> None:
# Load json file
with open(config_file, "r") as read_file:
input_data = json.load(read_file)
# Add attributes in new object
for key, value in input_data.items():
setattr(self, key.strip(), value)
\ No newline at end of file
{
"_comment": "Sart date of the period on which the model will run",
"start_date": "2020-01-01",
"_comment1": "End date of the period on which the model will run",
"end_date": "2020-12-31",
"_comment2": "Path to the shapefile to run the model on",
"path_to_config_file": "/home/auclairj/.config/eodag/eodag.yml",
"_comment3": "Path to the shapefile to run the model on",
"shapefile_path": "/mnt/e/DATA/SCIHUB/boundary.shp",
"_comment4": "Path to the directory on which the satellite image data will be downloaded",
"download_path": "/mnt/e/DATA",
"_comment5": "output path for netcdf era5 files (Weather)",
"era5_path": "/mnt/e/DATA/WEATHER",
"_comment9": "Name of the current run, all output files will be saved under a subdirectory of Saves/ with that name, log file will also have that name",
"run_name": "TEST",
"_comment10": "Prefered S2 data provider, choices = theia, copernicus",
"preferred_provider": "copernicus",
"_comment13": "Overwrite NDVI images or not (set to true if you want the code to rewrite NDVI images, takes longer)",
"ndvi_overwrite": false,
"_comment14": "Maximum cloud cover percentage to download data, images with higher cloud cover will not be downloaded",
"cloud_cover_limit": 80,
"_comment20": "Max number of processor cores to use for multiprocessing calculations",
"max_cpu": 3
}
......@@ -13,12 +13,13 @@ from fnmatch import fnmatch # for character string comparison
from typing import List, Union # to declare variables
import xarray as xr # to manage dataset
import pandas as pd # to manage dataframes
# import dask.array as da # dask xarray
from code.toolbox import product_str_to_datetime
import rasterio as rio # to open geotiff files
import geopandas as gpd # to manage shapefile crs projections
from shapely.geometry import box # to create boundary box
from input.input_toolbox import product_str_to_datetime
def calculate_ndvi(extracted_paths: Union[List[str], str], save_dir: str, resolution: int = 20, chunk_size: dict = {'x': 4000, 'y': 4000, 'time': 2}, acorvi_corr: int = 500) -> str:
def calculate_ndvi(extracted_paths: Union[List[str], str], save_dir: str, boundary_shapefile_path: str, resolution: int = 20, chunk_size: dict = {'x': 4000, 'y': 4000, 'time': 8}, acorvi_corr: int = 500) -> str:
# Check resolution for Sentinel-2
if not resolution in [10, 20]:
......@@ -41,18 +42,28 @@ def calculate_ndvi(extracted_paths: Union[List[str], str], save_dir: str, resolu
for product in extracted_paths:
if fnmatch(product, '*_B04_10m*'):
red_paths.append(product)
elif fnmatch(product, '*_B8A_20m*'):
elif fnmatch(product, '*_B08_10m*'):
nir_paths.append(product)
elif fnmatch(product, '*_SCL_20m*'):
mask_paths.append(product)
else:
for product in extracted_paths:
if fnmatch(product, '*_B04_10m*'):
if fnmatch(product, '*_B04_20m*'):
red_paths.append(product)
elif fnmatch(product, '*_B08_10m*'):
elif fnmatch(product, '*_B8A_20m*'):
nir_paths.append(product)
elif fnmatch(product, '*_SCL_20m*'):
mask_paths.append(product)
# Create boundary shapefile from Sentinel-2 image for weather download
ra = rio.open(red_paths[0])
bounds = ra.bounds
geom = box(*bounds)
df = gpd.GeoDataFrame({"id":1,"geometry":[geom]})
df.crs = ra.crs
df.geometry = df.geometry.to_crs('epsg:4326')
df.to_file(boundary_shapefile_path)
del df, ra, geom, bounds
# Sort and get dates
red_paths.sort()
......@@ -61,9 +72,9 @@ def calculate_ndvi(extracted_paths: Union[List[str], str], save_dir: str, resolu
dates = [product_str_to_datetime(prod) for prod in red_paths]
# Open datasets with xarray
red = xr.open_mfdataset(red_paths, combine = 'nested', concat_dim = 'time', chunks = chunk_size).squeeze(dim = ['band'], drop = True).rename({'band_data': 'red'}).astype('f4')
nir = xr.open_mfdataset(nir_paths, combine = 'nested', concat_dim = 'time', chunks = chunk_size).squeeze(dim = ['band'], drop = True).rename({'band_data': 'nir'}).astype('f4')
mask = xr.open_mfdataset(mask_paths, combine = 'nested', concat_dim = 'time', chunks = chunk_size).squeeze(dim = ['band'], drop = True).rename({'band_data': 'mask'}).astype('f4')
red = xr.open_mfdataset(red_paths, combine = 'nested', concat_dim = 'time', chunks = chunk_size, parallel = True).squeeze(dim = ['band'], drop = True).rename({'band_data': 'red'}).astype('f4')
nir = xr.open_mfdataset(nir_paths, combine = 'nested', concat_dim = 'time', chunks = chunk_size, parallel = True).squeeze(dim = ['band'], drop = True).rename({'band_data': 'nir'}).astype('f4')
mask = xr.open_mfdataset(mask_paths, combine = 'nested', concat_dim = 'time', chunks = chunk_size, parallel = True).squeeze(dim = ['band'], drop = True).rename({'band_data': 'mask'}).astype('f4')
if resolution == 10:
mask = xr.where((mask == 4) | (mask == 5), 1, 0).interp(x = red.coords['x'], y = red.coords['y'], method = 'nearest')
else:
......@@ -83,13 +94,19 @@ def calculate_ndvi(extracted_paths: Union[List[str], str], save_dir: str, resolu
# Mask and scale ndvi
ndvi['ndvi'] = xr.where(ndvi.ndvi < 0, 0, ndvi.ndvi)
ndvi['ndvi'] = xr.where(ndvi.ndvi > 1, 1, ndvi.ndvi)
ndvi['ndvi'] = ndvi.ndvi*255
ndvi['ndvi'] = (ndvi.ndvi*255) #.astype('u1')
# Write attributes
ndvi['ndvi'].attrs['long_name'] = 'Normalized Difference Vegetation Index'
ndvi['ndvi'].attrs['units'] = 'None'
ndvi['ndvi'].attrs['standard_name'] = 'NDVI'
ndvi['ndvi'].attrs['comment'] = 'Normalized difference of the near infrared and red band. A value of one is a high vegetation presence.'
# Create save path
ndvi_cube_path = save_dir + os.sep + 'NDVI_precube_' + dates[0].strftime('%d-%m-%Y') + '_' + dates[-1].strftime('%d-%m-%Y') + '.nc'
# Save NDVI cude to netcdf
ndvi.to_netcdf(ndvi_cube_path, encoding = {"ndvi": {"dtype": "u8", "_FillValue": 0}})
# Save NDVI cube to netcdf
ndvi.to_netcdf(ndvi_cube_path, encoding = {"ndvi": {"dtype": "u1", "_FillValue": 0}})
ndvi.close()
return ndvi_cube_path
\ No newline at end of file
# -*- coding: UTF-8 -*-
# Python
"""
04-07-2023
@author: rivallandv, modified by jeremy auclair
Download ERA5 weather files for modspa
"""
import glob # for path management
import sys # for path management
import os # for path exploration
import xarray as xr # to manage nc files
import pandas as pd # to manage dataframes
import geopandas as gpd # to manage shapefiles
from psutil import cpu_count # to get number of physical cores available
import input.lib_era5_land as era5land # custom built functions for ERA5-Land data download
from config.config import config # to import config file
def request_ER5_weather(input_file: str) -> str:
# Get config file
config_params = config(input_file)
outpath = config_params.era5_path + os.sep + config_params.run_name
# Geometry configuration
wgs84_epsg = 'epsg:4326' # WGS84 is the ERA5 epsg
# ERA5 product parameters
wind_height = 10 # height of ERA5 wind measurements in meters
print('REQUEST CONFIGURATION INFORMATIONS:')
if config_params.shapefile_path:
if os.path.exists(config_params.shapefile_path):
print('shapeFile: ', config_params.shapefile_path)
else:
print('shapeFile not found')
else:
# print('specify either shapeFile, boxbound or point coordinate in json file')
print('specify shapeFile in json file')
sys.exit(-1)
print('period: ', config_params.start_date, ' - ', config_params.end_date)
print('experiment name:', config_params.run_name)
if os.path.exists(outpath):
print('path for nc files: ', outpath)
else:
os.mkdir(outpath)
print('mkdir path for nc files: ', outpath)
print('----------')
# %% Request ERA5-land BoxBound Determination
if config_params.shapefile_path:
# Load shapefile to access geometrics informations for ERA5-Land request
gdf_expe_polygons = gpd.read_file(config_params.shapefile_path)
print('Input polygons CRS :', gdf_expe_polygons.crs)
expe_epsg = gdf_expe_polygons.crs
# verification que les polygones sont tous fermés
liste_polygons_validity = gdf_expe_polygons.geometry.is_valid
if list(liste_polygons_validity).count(False) > 0:
print('some polygons of Shapefile are not valid')
polygons_invalid = liste_polygons_validity.loc[liste_polygons_validity == False]
print('invalid polygons:', polygons_invalid)
for i in polygons_invalid.index:
gdf_expe_polygons.geometry[i]
# Application d'un buffer de zero m
gdf_expe_polygons_clean = gdf_expe_polygons.geometry.buffer(0)
gdf_expe_polygons = gdf_expe_polygons_clean
# search for the total extent of the whole polygons in lat/lon [xlo/ylo/xhi/yhi] [W S E N]
expe_polygons_boxbound = gdf_expe_polygons.geometry.total_bounds
expe_polygons_boxbound = list(expe_polygons_boxbound)
print('shape extend in ', expe_epsg.srs, ':', expe_polygons_boxbound)
if expe_epsg.srs != wgs84_epsg:
print('--- convert extend in wgs84 coordinates ---')
# idem en wgs84 pour des lat/lon en degree (format utilisé par google earth engine)
expe_polygons_boxbound_wgs84 = gdf_expe_polygons.to_crs(
wgs84_epsg).geometry.total_bounds
# convert to list for earth engine
expe_polygons_boxbound_wgs84 = list(expe_polygons_boxbound_wgs84)
else:
expe_polygons_boxbound_wgs84 = expe_polygons_boxbound
# switch coordinates order to agree with ECMWF order: N W S E
expe_area = expe_polygons_boxbound_wgs84[3], expe_polygons_boxbound_wgs84[0],\
expe_polygons_boxbound_wgs84[1], expe_polygons_boxbound_wgs84[2]
print('boxbound [N W S E] extend in ', wgs84_epsg)
print(expe_area)
# determine boxbound for ECMWF request (included shape boxbound)
era5_expe_polygons_boxbound_wgs84 = era5land.era5_enclosing_shp_aera(expe_area, 0.1)
print('boxbound [N W S E] request extend in ', wgs84_epsg)
print(era5_expe_polygons_boxbound_wgs84)
print('--start request--')
# Get number of available CPUs
nb_processes = 4 * min([cpu_count(logical = False), len(os.sched_getaffinity(0)), config_params.max_cpu]) # downloading data demands very little computing power, each processor core can manage multiple downloads
#============================================================================================
# Call daily data
era5land.call_era5land_daily_for_MODSPA(config_params.start_date, config_params.end_date, era5_expe_polygons_boxbound_wgs84, output_path = outpath, processes = nb_processes)
year = config_params.start_date[0:4]
list_era5land_hourly_ncFiles = glob.glob(outpath + os.sep + 'ERA5-land_' + year + '*' + '.nc')
for ncfile in list_era5land_hourly_ncFiles:
print(ncfile)
save_dir = outpath + os.sep + 'ncdailyfiles'
if os.path.exists(outpath+os.sep+'ncdailyfiles'):
print('path for nc daily files: ', save_dir)
else:
os.mkdir(outpath+os.sep+'ncdailyfiles')
print('mkdir path for nc daily files: ', save_dir)
print('----------')
# Save daily wheather data into ncfile
weather_daily_ncFile = save_dir + os.sep + config_params.start_date + '_' + config_params.end_date + '_' + config_params.run_name + '_era5-land-daily-meteo.nc'
# Temporary save directory for daily file merge
variable_list = ['2m_dewpoint_temperature_daily_maximum', '2m_dewpoint_temperature_daily_minimum', '2m_temperature_daily_maximum', '2m_temperature_daily_minimum', 'total_precipitation_daily_mean', '10m_u_component_of_wind_daily_mean', '10m_v_component_of_wind_daily_mean', 'surface_solar_radiation_downwards_daily_mean']
# Aggregate monthly files
aggregated_files = era5land.concat_monthly_nc_file(list_era5land_hourly_ncFiles, variable_list, save_dir)
# Calculate ET0 over the whole time period
era5land.era5Land_nc_daily_to_ET0(aggregated_files, weather_daily_ncFile, h = wind_height)
return weather_daily_ncFile
\ No newline at end of file
File moved
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment