Newer
Older
Jeremy Auclair
committed
# -*- coding: UTF-8 -*-
# Python
"""
29-08-2023
@author: jeremy auclair
Generate NDVI pandas dataframe for modspa parcel mode.
"""
import os # system managment
import csv # for loading and saving results in csv format
from typing import List, Union # to declare variables
import pandas as pd # to manage dataframes
import geopandas as gpd # to manage shapefile crs projections
import rasterio as rio # to open geotiff files
from datetime import datetime # manage dates
Jeremy Auclair
committed
from rasterio.mask import mask # to mask images
import numpy as np # vectorized math
from shapely.geometry import box # to extract parcel statistics
from p_tqdm import p_map # for multiprocessing with progress bars
from psutil import cpu_count # to get number of physical cores available
import warnings # To suppress some warnings
from modspa_pixel.preprocessing.input_toolbox import product_str_to_datetime, find_anomalies
def extract_ndvi_stats_image(args: tuple) -> list:
"""
extract_ndvi_stats extracts ndvi statistics (``mean`` and ``count``) from a ndvi image and a
geopandas shapefile object. It iterates over the features of the shapefile geometry (polygons).
This information is stored in a list.
It returns a list that contains the statistics, a feature ``id`` and the date for the image and every polygon in
the shapefile geometry. It also has identification data relative to the shapefile : landcover (``LC``), land cover
identifier (``id``) This list is returned to be later agregated in a ``DataFrame``.
This function is used to allow multiprocessing for NDVI extraction.
Arguments (packed in args: ``tuple``)
=====================================
1. ndvi_image: ``str``
path to the ndvi image
2. shapefile: ``str``
path to the shapefile
Jeremy Auclair
committed
3. scaling: ``int`` ``default = 255``
integer scaling used to save NDVI values as integers
4. buffer_distance: ``int`` ``default = -10``
Jeremy Auclair
committed
distance to buffer shapefile polygon to prevent extracting pixels that are not entirely in a polygon,
in meters for Sentinel-2 and LandSat 8 images, < 0 to reduce size
Returns
=======
1. ndvi_stats: ``list``
list containing ndvi statistics and feature information for every
polygon in the shapefile
"""
# Turn off numpy warings
warnings.filterwarnings('ignore')
# Unpack arguments
Jeremy Auclair
committed
ndvi_image, shapefile, scaling, buffer_distance = args
Jeremy Auclair
committed
# Create dataframe where zonal statistics will be stored
ndvi_stats = []
# Get info to put in the DataFrame
date = product_str_to_datetime(ndvi_image)
# Open ndvi image and shapefile geometry
ndvi_dataset = rio.open(ndvi_image)
# Get input raster spatial reference and epsg code to reproject shapefile in the same spatial reference
target_epsg = ndvi_dataset.crs
# Open shapefile with geopandas and reproject its geometry
shapefile = gpd.read_file(shapefile)
shapefile['geometry'] = shapefile['geometry'].to_crs(target_epsg)
Jeremy Auclair
committed
# TODO: decide how to manage small polygons
# shapefile['geometry'] = shapefile['geometry'].buffer(buffer_distance)
Jeremy Auclair
committed
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# Get no data value
nodata = ndvi_dataset.nodata
# Loop on the individual polygons in the shapefile geometry
for index, row in shapefile.iterrows():
# Get the feature geometry as a shapely object
geom = row.geometry
# id number of the current parcel geometry
id = index + 1
# Get land cover
LC = row.LC
# Create a bounding box around the geometry
bbox = box(*geom.bounds)
# Crop the raster using the bounding box
try:
cropped_raster, _ = mask(ndvi_dataset, [bbox], crop = True)
except:
ndvi_stats.append([date, id, 0, 0, LC])
continue
# Mask the raster using the geometry
masked_raster, _ = mask(ndvi_dataset, [geom], crop=True)
# Replace the nodata values with nan
cropped_raster = cropped_raster.astype(np.float32)
cropped_raster[cropped_raster == nodata] = np.NaN
masked_raster = masked_raster.astype(np.float32)
masked_raster[masked_raster == nodata] = np.NaN
Jeremy Auclair
committed
# Correct scaling
np.round((masked_raster * scaling)/(scaling - 1) - 1, decimals = 0, out = masked_raster)
Jeremy Auclair
committed
# Calculate the zonal statistics
mean = np.nanmean(masked_raster)
count = np.count_nonzero(~np.isnan(masked_raster))
# Append current statistics to dataframe
ndvi_stats.append([date, id, mean, count, LC])
# Close dataset
ndvi_dataset.close()
return ndvi_stats
Jeremy Auclair
committed
def extract_ndvi_stats(ndvi_paths: Union[List[str], str], shapefile: str, save_raw_dataframe_path: str, scaling: int = 255, buffer_distance: int = -10, max_cpu: int = 4) -> pd.DataFrame:
Jeremy Auclair
committed
"""
extract_ndvi_stats extracts ndvi statistics (``mean`` and ``pixel count``) from
a list of ndvi images and a shapefile (``.shp``). It iterates over the list of ndvi images and for one image,
calls the ``rasterstats`` ``zonal_stats`` method. This information is stored in a ``pandas DataFrame``.
This function calls the ``extract_ndvi_image`` function to allow for the use of multiprocessing. A modified
version of the ``tqdm`` module (``p_tqdm``) is used for progress bars.
It returns a ``pandas DataFrame`` that contains the statistics, a feature ``id``, the date and tile for every
dates in the dataset and every polygon in the shapefile geometry. This ``DataFrame`` is saved as a ``csv`` file.
Arguments
=========
1. ndvi_paths: ``list[str]`` or ``str``
list of paths to the ndvi images or path to ``csv`` file containing that list
2. shapefile: ``str``
path to the shapefile
3. save_raw_dataframe_path: ``str``
path to save the ``DataFrame`` as ``csv``
Jeremy Auclair
committed
4. scaling: ``int`` ``default = 255``
integer scaling used to save NDVI values as integers
5. buffer_distance: ``int`` ``default = -10``
Jeremy Auclair
committed
distance to buffer shapefile polygon to prevent extracting pixels that are not entirely in a polygon,
in meters for Sentinel-2 and LandSat 8 images, < 0 to reduce size
Jeremy Auclair
committed
6. max_cpu: ``int`` `default = 4`
Jeremy Auclair
committed
max number of CPU cores to use for calculation
Returns
=======
1. ndvi_dataframe: `pd.DataFrame`
``pandas DataFrame`` containing ndvi statistics, feature information and image information for every
polygon in the shapefile and every date in the ndvi image dataset
"""
# If a file is provided instead of a list of paths, load the csv file that contains the list of paths
if type(ndvi_paths) == str:
with open(ndvi_paths, 'r') as file:
ndvi_paths = []
csvreader = csv.reader(file, delimiter='\n')
for row in csvreader:
ndvi_paths.append(row[0])
# Prepare arguments for multiprocessing
Jeremy Auclair
committed
args = [(ndvi_image, shapefile, scaling, buffer_distance) for ndvi_image in ndvi_paths]
Jeremy Auclair
committed
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
# Get number of CPU cores and limit max value (working on a cluster requires os.sched_getaffinity to get true number of available CPUs,
# this is not true on a "personnal" computer, hence the use of the min function)
nb_cores = min([max_cpu, cpu_count(logical = False), len(os.sched_getaffinity(0))])
print('\nStarting NDVI extraction with %d cores for %d images...\n' % (nb_cores, len(ndvi_paths)))
# Start multiprocessing
results = p_map(extract_ndvi_stats_image, args, **{"num_cpus": nb_cores})
# Collect results and sort them
ndvi_stats_list = []
for result in results:
ndvi_stats_list.extend(result)
# Build DataFrame and put the dates as index
ndvi_dataframe = pd.DataFrame(ndvi_stats_list, columns = ['date', 'id', 'NDVI', 'count', 'LC'])
ndvi_dataframe.loc[ndvi_dataframe['count'] == 0, 'NDVI'] = 0 # set mean to 0 when no data
ndvi_dataframe.index = list(ndvi_dataframe['date'])
# Save DataFrame
ndvi_dataframe.to_csv(save_raw_dataframe_path, index = False)
return ndvi_dataframe
def filter_raw_ndvi_feature(args: tuple) -> list:
"""
filter_raw_ndvi_feature takes the raw ndvi dataframe for one shapefile element and filters it for further processing. It removes
measurements with too few valid pixels and averages the statistics over identical dates where
multiple products cover the same parcel.
It takes two steps :
- finds max pixel count (taken as total pixel count) and removes measurements with too few
valid pixels (``min_pixel_ratio`` is entered as an argument).
- averages over dates to smooth multiple values for a single date.
It returns a dataframe with the filtered data.
This function is called for multiprocessing
Arguments (packed in args ``tuple``)
====================================
1. ndvi_raw_dataframe: ``pd.DataFrame`` or ``str``
Dataframe containing the raw ndvi statistics for a given shapefile freature
2. min_pixel_ratio: ``float`` ``default = 0.8``
minimum ratio of valid pixels to total pixels of a feature. Between 0 and 1.
3. custom: ``bool`` ``default = False``
if function is called with custom set as true, count values are not used for filtering
Returns
=======
1. average_on_dates: ``pd.DataFrame``
ndvi statistics, feature information and image information for a shapefile element and
every date in the ndvi image dataset, filtered according to pixel count and similar dates.
"""
# Unpack args
id_filter, min_pixel_ratio, custom = args
# Filter if custom is True
if custom:
# Average measurements per date, happens when multiple S2 tiles cover the same feature
average_on_dates = id_filter.groupby('date', as_index = False).mean(numeric_only = True)
else:
# Find max number of pixel for a feature (we suppose it's the total number of pixel for this feature)
count_max = id_filter['count'].max()
# Filter all measurements that have too few valid pixels
Jeremy Auclair
committed
count_filter = id_filter.query('count >= ' + str(round(count_max*min_pixel_ratio, 0)))
Jeremy Auclair
committed
# Average measurements per date, happens when multiple S2 tiles cover the same feature
average_on_dates = count_filter.groupby('date', as_index = False).mean(numeric_only = True)
# Reset the date as index
average_on_dates.index = list(average_on_dates['date'])
# Check if dataframe contains more than one element
Jeremy Auclair
committed
if average_on_dates.shape[0] < 1:
Jeremy Auclair
committed
return None
else:
# return the two dataframes
return average_on_dates
def filter_raw_ndvi(ndvi_raw_dataframe: Union[pd.DataFrame, str], save_filtered_dataframe_path: str, min_pixel_ratio: float = 0.7, max_cpu: int = 4, custom: bool = False) -> pd.DataFrame:
"""
filter_raw_ndvi takes the raw ndvi dataframe and filters it for further processing. It removes
measurements with too few valid pixels and averages the statistics over identical dates where
multiple products cover the same parcel.
It takes three steps :
- collect all measurements for a single id value.
- finds max pixel count (taken as total pixel count) and removes measurements with too few valid pixels (`min_pixel_ratio` is entered as an argument).
Jeremy Auclair
committed
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
- averages over dates to smooth multiple values for a single date.
It returns a ``pandas DataFrame`` that has the same structure as the raw ndvi ``DataFrame``, but
filtered. This ``DataFrame`` is saved as a `csv` file.
This function uses multiprocessing for faster calculations.
Arguments
=========
1. ndvi_raw_dataframe: ``pd.DataFrame`` or ``str``
Dataframe containing the raw ndvi statistics or path to `csv` file containing that dataframe
2. save_filtered_dataframe_path: ``str``
path to save the ``DataFrame`` as ``csv``
3. min_pixel_ratio: ``float`` ``default = 0.8``
minimum ratio of valid pixels to total pixels of a feature. Between 0 and 1.
4. max_cpu: ``int`` ``default = 4``
max number of CPU cores to use for calculation
5. custom: ``bool`` ``default = False``
if function is called with custom set as true, count values are not used for filtering
Returns
=======
1. filtered_ndvi: ``pd.DataFrame``
``pandas DataFrame`` containing ndvi statistics, feature information and image information for
every polygon in the shapefile and every date in the ndvi image dataset, filtered according
to pixel count and similar dates.
"""
# If a string is given, load the DataFrame
if type(ndvi_raw_dataframe) == str:
ndvi_raw_dataframe = pd.read_csv(ndvi_raw_dataframe)
ndvi_raw_dataframe.index = list(ndvi_raw_dataframe['date'])
# Get a list of all feature id values
IDs = list(ndvi_raw_dataframe['id'].drop_duplicates())
IDs.sort()
# Prepare arguments for multiprocessing
args = [(id_filter, min_pixel_ratio, custom) for _, id_filter in ndvi_raw_dataframe.groupby('id')]
# Get number of CPU cores and limit max value (working on a cluster requires os.sched_getaffinity to get true number of available CPUs,
# this is not true on a "personnal" computer, hence the use of the min function)
nb_cores = min([max_cpu, cpu_count(logical = False), len(os.sched_getaffinity(0))])
print('\nStarting NDVI DataFrame filtering with %d cores for %d elements...\n' % (nb_cores, len(IDs)))
# Start multiprocessing
results = p_map(filter_raw_ndvi_feature, args, **{"num_cpus": nb_cores})
# List where filtered id DataFrames will be stored before concatenation
filtered_ndvi_by_IDs = []
# Collect results and sort them
for result in results:
filtered_ndvi_by_IDs.append(result)
# Concatenate all id DataFrames into a single DataFrame
filtered_ndvi = pd.concat(filtered_ndvi_by_IDs)
# Save dataframe
filtered_ndvi.to_csv(save_filtered_dataframe_path, index = False)
return filtered_ndvi
def clean_dataframe_feature(args: tuple) -> pd.DataFrame:
"""
Apply an anomaly detection algorithm to filter out eroneous values.
Arguments (packed in args: ``tuple``)
===================================
1. ndvi_dataframe_feature: ``pd.DataFrame``
filtered dataframe for the given feature
2. threshold_ratio_deriv: ``int`` ``default = 25``
ratio to apply to the NDVI max value to calculate a threshold for the derivative filter
3. threshold_median: ``float`` ``default = 0.1``
threshold for the moving median filter
4. median_window: ``int`` ``default = 3``
window size for the moving median filter
Returns
=======
1. cleaned_dataframe_feature: ``pd.DataFrame``
resulting dataframe for the given feature
"""
# Unpack arguments
ndvi_dataframe_feature, threshold_ratio_deriv, threshold_median, median_window = args
# Reset dates and indexes
ndvi_dataframe_feature.index = list(ndvi_dataframe_feature['date'])
# Get anomalies
anomalies = find_anomalies(ndvi_dataframe_feature['NDVI'].values, pd.to_datetime(ndvi_dataframe_feature['date'].values), threshold_ratio_deriv, threshold_median, median_window)
# Remove row flagged as anomalies
cleaned_dataframe_feature = ndvi_dataframe_feature.drop(index = ndvi_dataframe_feature['date'].values[anomalies])
# Remove 'count' column which is no longer relevant
cleaned_dataframe_feature.drop(columns='count', inplace = True)
# Re-index by dates
cleaned_dataframe_feature.index = list(cleaned_dataframe_feature['date'])
return cleaned_dataframe_feature
def interpolate_ndvi_feature(args: tuple) -> pd.DataFrame:
"""
interpolate_ndvi_id takes in the filtered NDVI ``DataFrame`` for a given shapefile feature and builds a ``DataFrame``
with daily values between the first and last dates and fills in the missing values with the chosen
interpolation method (default is ``linear``), using the ``pandas`` library.
The interpolated ``DataFrame`` is then returned.
This function is called for multiprocessing
Arguments (packed in args: ``tuple``)
=====================================
1. id_data: ``pd.DataFrame``
``DataFrame`` containing the filtered NDVI data for a given id
2. interp_method: ``str`` ``default = 'linear'``
chosen interpolation method, possible values are : ``'linear'``, ``'pchip'``
3. start_date: ``str``
beginning of the time window to download (format: ``YYYY-MM-DD``)
Jeremy Auclair
committed
4. end_date: ``str``
end of the time window to download (format: ``YYYY-MM-DD``)
Jeremy Auclair
committed
Returns
=======
1. interpolated_id: ``pd.DataFrame``
``pandas DataFrame`` containing interpolated NDVI values for a shapefile feature
"""
# Unpack args
id_data, interp_method, start_date, end_date = args
# Insert first and last date to hava data on all the time window
# id_data = pd.concat([id_data.iloc[0:1], id_data, id_data.iloc[-1:]]).reset_index(drop = True)
# id_data.at[0, 'date'], id_data.at[id_data.index[-1], 'date'] = start_date, end_date
# Add start date and end date if they are not in dataframe
if id_data.at[id_data.index[0], 'date'] > datetime.strptime(start_date, '%Y-%m-%d'):
id_data = pd.concat([id_data.iloc[0:1], id_data]).reset_index(drop = True)
id_data.at[0, 'date'] = start_date
if id_data.at[id_data.index[-1], 'date'] < datetime.strptime(end_date, '%Y-%m-%d'):
id_data = pd.concat([id_data, id_data.iloc[-1:]]).reset_index(drop = True)
id_data.at[id_data.index[-1], 'date'] = end_date
Jeremy Auclair
committed
# Reindex with correct date format
id_data.replace('date', pd.to_datetime(id_data['date'], format = '%Y-%m-%d'))
Jeremy Auclair
committed
id_data.index = list(id_data['date'])
# Resample the data to a daily frequency, fill the voids with NaNs
upsampled = id_data.reindex(pd.date_range(start=id_data.index.min(), end=id_data.index.max(), freq = 'D'))
Jeremy Auclair
committed
upsampled['date'] = upsampled.index # Reset date column
dates = upsampled['date'].values # Collect up-sampled dates for later indexing
# Remove date column, interpolation is only performed on numeric values
upsampled.drop(columns='date', inplace = True)
upsampled.reset_index(drop = True, inplace = True) # Remove date index
Jeremy Auclair
committed
# Apply interpolation method
try:
interpolated_id = upsampled.interpolate(method = interp_method) # Interpolate
Jeremy Auclair
committed
except ValueError:
# If number of points is too low for given interpolator, use linear interpolation instead
interpolated_id = upsampled.interpolate(method = 'linear')
Jeremy Auclair
committed
# Re-insert dates
interpolated_id.insert(0, 'date', dates, True)
interpolated_id.index = interpolated_id['date'] # Index by dates
# Only return data between start date and end date
mask = (interpolated_id['date'] >= datetime.strptime(start_date, '%Y-%m-%d')) & (interpolated_id['date'] <= datetime.strptime(end_date, '%Y-%m-%d'))
interpolated_id = interpolated_id.loc[mask]
Jeremy Auclair
committed
return interpolated_id
Jeremy Auclair
committed
def interpolate_ndvi_parcel(ndvi_dataframe: Union[pd.DataFrame, str], save_path: str, start_date: str, end_date: str, interp_method: str = 'linear', threshold_ratio_deriv: int = 25, threshold_median: float = 0.1, median_window: int = 3, max_cpu: int = 4) -> pd.DataFrame:
Jeremy Auclair
committed
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
"""
interpolate_ndvi takes in the filtered NDVI ``DataFrame`` and builds a ``DataFrame`` with daily values
between the first and last dates and fills in the missing values with the chosen interpolation
method (default is ``linear``), using the ``pandas`` library.
The interpolated ``DataFrame`` is then saved as a ``csv`` file and returned.
Arguments
=========
1. ndvi_dataframe: ``pd.DataFrame`` or ``str``
``DataFrame`` containing the filtered NDVI data
2. save_path: ``str``
path to save the interpolated ``DataFrame`` csv
3. start_date: ``str``
beginning of the time window to download (format: ``yyyy-mm-dd``)
4. end_date: ``str``
end of the time window to download (format: ``yyyy-mm-dd``)
5. interp_method: ``str`` ``default = 'linear'``
chosen interpolation method, possible values are : ``'linear'`` or ``'pchip'``
6. max_cpu: ``int`` ``default = 4``
max number of cores to use for calculation
Returns
=======
1. interpolated_dataframe: ``pd.DataFrame``
``pandas DataFrame`` containing interpolated NDVI values for every shapefile feature
"""
# If a string is given, load the DataFrame
if type(ndvi_dataframe) == str:
ndvi_dataframe = pd.read_csv(ndvi_dataframe)
ndvi_dataframe['date'] = pd.to_datetime(ndvi_dataframe['date']) # Transform dates to datetimes
ndvi_dataframe.index = list(ndvi_dataframe['date'])
else:
ndvi_dataframe['date'] = pd.to_datetime(ndvi_dataframe['date']) # Transform dates to datetimes
# Get a list of all feature id values
IDs = list(ndvi_dataframe['id'].drop_duplicates())
IDs.sort()
# Get number of CPU cores and limit max value (working on a cluster requires os.sched_getaffinity to get true number of available CPUs,
# this is not true on a "personnal" computer, hence the use of the min function)
nb_cores = min([max_cpu, cpu_count(logical = False), len(os.sched_getaffinity(0))])
# Prepare clean_dataframes_feature arguments for multiprocessing
args_clean = [(id_filter, threshold_ratio_deriv, threshold_median, median_window) for _, id_filter in ndvi_dataframe.groupby('id')]
print('\nStarting NDVI DataFrame anomaly detection with %d cores for %d elements...\n' % (nb_cores, len(IDs)))
# Start multiprocessing
results_clean = p_map(clean_dataframe_feature, args_clean, **{"num_cpus": nb_cores})
# List where cleaned id DataFrames will be stored before concatenation
cleaned_dataframes = []
# Get results
for result in results_clean:
cleaned_dataframes.append(result)
# Concatenate all individual dataframes
cleaned_dataframes = pd.concat(cleaned_dataframes)
# Prepare arguments for multiprocessing
args = [(id_filter, interp_method, start_date, end_date) for _, id_filter in cleaned_dataframes.groupby('id')]
print('\nStarting NDVI DataFrame interpolation with %d cores for %d elements...\n' % (nb_cores, len(IDs)))
# Start multiprocessing
results = p_map(interpolate_ndvi_feature, args, **{"num_cpus": nb_cores})
# List where filtered id DataFrames will be stored before concatenation
interpolated_dataframes = []
# Get results
for result in results:
interpolated_dataframes.append(result)
# Concatenate all individual dataframes
interpolated_dataframe = pd.concat(interpolated_dataframes)
# Round float values, count column does not exist if S2 and LandSat datasets are merged
# interpolated_dataframe = interpolated_dataframe.round({'NDVI': 0})
# Round int values for better visualisation of file
interpolated_dataframe['id'] = np.round(interpolated_dataframe['id']).astype(int)
interpolated_dataframe['LC'] = np.round(interpolated_dataframe['LC']).astype(int)
interpolated_dataframe['NDVI'] = np.round(interpolated_dataframe['NDVI']).astype(int)
# Save to csv
interpolated_dataframe.to_csv(save_path, index = False)
Jeremy Auclair
committed
return interpolated_dataframe