diff --git a/Dockerfile b/Dockerfile index 28ee7875d9c80324d48f3b476371d89c1b53bb25..6a8e71257bf9dc1090e70945e251e2b1c6a8b93d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,7 +25,7 @@ RUN if $GUI; then \ RUN ln -s /usr/bin/python3 /usr/local/bin/python && ln -s /usr/bin/pip3 /usr/local/bin/pip # NumPy version is conflicting with system's gdal dep and may require venv ARG NUMPY_SPEC="==1.22.*" -RUN pip install --no-cache-dir -U pip wheel mock six future deprecated "numpy$NUMPY_SPEC" \ +RUN pip install --no-cache-dir -U pip wheel mock six future tqdm deprecated "numpy$NUMPY_SPEC" \ && pip install --no-cache-dir --no-deps keras_applications keras_preprocessing # ---------------------------------------------------------------------------- diff --git a/python/otbtf.py b/python/otbtf.py index 860e86a7b30dc182698ec521a341daeb44d00c99..a1cf9bd442896b2d29c3986c876ce82d3ef9b6aa 100644 --- a/python/otbtf.py +++ b/python/otbtf.py @@ -18,17 +18,21 @@ # # ==========================================================================*/ """ -Contains stuff to help working with TensorFlow and geospatial data in the -OTBTF framework. +Contains stuff to help working with TensorFlow and geospatial data in the OTBTF framework. """ +import glob +import json +import os import threading import multiprocessing import time import logging from abc import ABC, abstractmethod +from functools import partial import numpy as np import tensorflow as tf from osgeo import gdal +from tqdm import tqdm # ----------------------------------------------------- Helpers -------------------------------------------------------- @@ -54,8 +58,11 @@ def read_as_np_arr(gdal_ds, as_patches=True): False, the shape is (1, psz_y, psz_x, nb_channels) :return: Numpy array of dim 4 """ - buffer = gdal_ds.ReadAsArray() + gdal_to_np_types = {1: 'uint8', 2: 'uint16', 3: 'int16', 4: 'uint32', 5: 'int32', 6: 'float32', 7: 'float64', + 10: 'complex64', 11: 'complex128'} + gdal_type = gdal_ds.GetRasterBand(1).DataType size_x = gdal_ds.RasterXSize + buffer = gdal_ds.ReadAsArray().astype(gdal_to_np_types[gdal_type]) if len(buffer.shape) == 3: buffer = np.transpose(buffer, axes=(1, 2, 0)) if not as_patches: @@ -64,7 +71,7 @@ def read_as_np_arr(gdal_ds, as_patches=True): else: n_elems = int(gdal_ds.RasterYSize / size_x) size_y = size_x - return np.float32(buffer.reshape((n_elems, size_y, size_x, gdal_ds.RasterCount))) + return buffer.reshape((n_elems, size_y, size_x, gdal_ds.RasterCount)) # -------------------------------------------------- Buffer class ------------------------------------------------------ @@ -167,7 +174,7 @@ class PatchesImagesReader(PatchesReaderBase): :see PatchesReaderBase """ - def __init__(self, filenames_dict: dict, use_streaming=False): + def __init__(self, filenames_dict, use_streaming=False, scalar_dict=None): """ :param filenames_dict: A dict() structured as follow: {src_name1: [src1_patches_image_1.tif, ..., src1_patches_image_N.tif], @@ -175,6 +182,11 @@ class PatchesImagesReader(PatchesReaderBase): ... src_nameM: [srcM_patches_image_1.tif, ..., srcM_patches_image_N.tif]} :param use_streaming: if True, the patches are read on the fly from the disc, nothing is kept in memory. + :param scalar_dict: (optional) a dict containing list of scalars (int, float, str) as follow: + {scalar_name1: ["value_1", ..., "value_N"], + scalar_name2: [value_1, ..., value_N], + ... + scalar_nameM: [value1, ..., valueN]} """ assert len(filenames_dict.values()) > 0 @@ -182,13 +194,18 @@ class PatchesImagesReader(PatchesReaderBase): # gdal_ds dict self.gdal_ds = {key: [gdal_open(src_fn) for src_fn in src_fns] for key, src_fns in filenames_dict.items()} - # check number of patches in each sources - if len({len(ds_list) for ds_list in self.gdal_ds.values()}) != 1: - raise Exception("Each source must have the same number of patches images") - # streaming on/off self.use_streaming = use_streaming + # Scalar dict (e.g. for metadata) + # If the scalars are not numpy.ndarray, convert them + self.scalar_dict = {key: [i if isinstance(i, np.ndarray) else np.asarray(i) for i in scalars] + for key, scalars in scalar_dict.items()} if scalar_dict else {} + + # check number of patches in each sources + if len({len(ds_list) for ds_list in list(self.gdal_ds.values()) + list(self.scalar_dict.values())}) != 1: + raise Exception("Each source must have the same number of patches images") + # gdal_ds check nb_of_patches = {key: 0 for key in self.gdal_ds} self.nb_of_channels = dict() @@ -211,8 +228,8 @@ class PatchesImagesReader(PatchesReaderBase): # if use_streaming is False, we store in memory all patches images if not self.use_streaming: - patches_list = {src_key: [read_as_np_arr(ds) for ds in self.gdal_ds[src_key]] for src_key in self.gdal_ds} - self.patches_buffer = {src_key: np.concatenate(patches_list[src_key], axis=0) for src_key in self.gdal_ds} + self.patches_buffer = {src_key: np.concatenate([read_as_np_arr(ds) for ds in src_ds], axis=0) for + src_key, src_ds in self.gdal_ds.items()} def _get_ds_and_offset_from_index(self, index): offset = index @@ -230,14 +247,20 @@ class PatchesImagesReader(PatchesReaderBase): @staticmethod def _read_extract_as_np_arr(gdal_ds, offset): + gdal_to_np_types = {1: 'uint8', 2: 'uint16', 3: 'int16', 4: 'uint32', 5: 'int32', 6: 'float32', 7: 'float64', + 10: 'complex64', 11: 'complex128'} assert gdal_ds is not None psz = gdal_ds.RasterXSize + gdal_type = gdal_ds.GetRasterBand(1).DataType yoff = int(offset * psz) assert yoff + psz <= gdal_ds.RasterYSize buffer = gdal_ds.ReadAsArray(0, yoff, psz, psz) if len(buffer.shape) == 3: buffer = np.transpose(buffer, axes=(1, 2, 0)) - return np.float32(buffer) + else: # single-band raster + buffer = np.expand_dims(buffer, axis=2) + + return buffer.astype(gdal_to_np_types[gdal_type]) def get_sample(self, index): """ @@ -252,18 +275,19 @@ class PatchesImagesReader(PatchesReaderBase): assert index >= 0 assert index < self.size + i, offset = self._get_ds_and_offset_from_index(index) + res = {src_key: scalar[i] for src_key, scalar in self.scalar_dict.items()} if not self.use_streaming: - res = {src_key: self.patches_buffer[src_key][index, :, :, :] for src_key in self.gdal_ds} + res.update({src_key: arr[index, :, :, :] for src_key, arr in self.patches_buffer.items()}) else: - i, offset = self._get_ds_and_offset_from_index(index) - res = {src_key: self._read_extract_as_np_arr(self.gdal_ds[src_key][i], offset) for src_key in self.gdal_ds} - + res.update({src_key: self._read_extract_as_np_arr(self.gdal_ds[src_key][i], offset) + for src_key in self.gdal_ds}) return res def get_stats(self): """ Compute some statistics for each source. - Depending if streaming is used, the statistics are computed directly in memory, or chunk-by-chunk. + When streaming is used, chunk-by-chunk. Else, the statistics are computed directly in memory. :return statistics dict """ @@ -314,6 +338,7 @@ class IteratorBase(ABC): """ Base class for iterators """ + @abstractmethod def __init__(self, patches_reader: PatchesReaderBase): pass @@ -361,17 +386,24 @@ class Dataset: :see Buffer """ - def __init__(self, patches_reader: PatchesReaderBase, buffer_length: int = 128, - Iterator: IteratorBase = RandomIterator): + def __init__(self, patches_reader: PatchesReaderBase = None, buffer_length: int = 128, + Iterator=RandomIterator, max_nb_of_samples=None): """ :param patches_reader: The patches reader instance :param buffer_length: The number of samples that are stored in the buffer :param Iterator: The iterator class used to generate the sequence of patches indices. + :param max_nb_of_samples: Optional, max number of samples to consider """ - # patches reader self.patches_reader = patches_reader - self.size = self.patches_reader.get_size() + + # If necessary, limit the nb of samples + logging.info('Number of samples: %s', self.patches_reader.get_size()) + if max_nb_of_samples and self.patches_reader.get_size() > max_nb_of_samples: + logging.info('Reducing number of samples to %s', max_nb_of_samples) + self.size = max_nb_of_samples + else: + self.size = self.patches_reader.get_size() # iterator self.iterator = Iterator(patches_reader=self.patches_reader) @@ -404,8 +436,21 @@ class Dataset: output_types=self.output_types, output_shapes=self.output_shapes).repeat(1) + def to_tfrecords(self, output_dir, n_samples_per_shard=100, drop_remainder=True): + """ + Save the dataset into TFRecord files + + :param output_dir: output directory + :param n_samples_per_shard: number of samples per TFRecord file + :param drop_remainder: drop remainder samples + """ + tfrecord = TFRecords(output_dir) + tfrecord.ds2tfrecord(self, n_samples_per_shard=n_samples_per_shard, drop_remainder=drop_remainder) + def get_stats(self) -> dict: """ + Compute dataset statistics + :return: the dataset statistics, computed by the patches reader """ with self.mining_lock: @@ -502,8 +547,8 @@ class DatasetFromPatchesImages(Dataset): :see Dataset """ - def __init__(self, filenames_dict: dict, use_streaming: bool = False, buffer_length: int = 128, - Iterator: IteratorBase = RandomIterator): + def __init__(self, filenames_dict, use_streaming=False, buffer_length: int = 128, + Iterator=RandomIterator): """ :param filenames_dict: A dict() structured as follow: {src_name1: [src1_patches_image1, ..., src1_patches_imageN1], @@ -518,3 +563,204 @@ class DatasetFromPatchesImages(Dataset): patches_reader = PatchesImagesReader(filenames_dict=filenames_dict, use_streaming=use_streaming) super().__init__(patches_reader=patches_reader, buffer_length=buffer_length, Iterator=Iterator) + + +class TFRecords: + """ + This class allows to convert Dataset objects to TFRecords and to load them in dataset tensorflows format. + """ + + def __init__(self, path): + """ + :param path: Can be a directory where TFRecords must be saved/loaded or a single TFRecord path + """ + if os.path.isdir(path) or not os.path.exists(path): + self.dirpath = path + os.makedirs(self.dirpath, exist_ok=True) + self.tfrecords_pattern_path = os.path.join(self.dirpath, "*.records") + else: + self.dirpath = os.path.dirname(path) + self.tfrecords_pattern_path = path + self.output_types_file = os.path.join(self.dirpath, "output_types.json") + self.output_shape_file = os.path.join(self.dirpath, "output_shape.json") + self.output_shape = self.load(self.output_shape_file) if os.path.exists(self.output_shape_file) else None + self.output_types = self.load(self.output_types_file) if os.path.exists(self.output_types_file) else None + + @staticmethod + def _bytes_feature(value): + """ + Convert a value to a type compatible with tf.train.Example. + :param value: value + :return a bytes_list from a string / byte. + """ + if isinstance(value, type(tf.constant(0))): + value = value.numpy() # BytesList won't unpack a string from an EagerTensor. + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + def ds2tfrecord(self, dataset, n_samples_per_shard=100, drop_remainder=True): + """ + Convert and save samples from dataset object to tfrecord files. + :param dataset: Dataset object to convert into a set of tfrecords + :param n_samples_per_shard: Number of samples per shard + :param drop_remainder: Whether additional samples should be dropped. Advisable if using multiworkers training. + If True, all TFRecords will have `n_samples_per_shard` samples + """ + logging.info("%s samples", dataset.size) + + nb_shards = (dataset.size // n_samples_per_shard) + if not drop_remainder and dataset.size % n_samples_per_shard > 0: + nb_shards += 1 + + self.convert_dataset_output_shapes(dataset) + + def _convert_data(data): + """ + Convert data + """ + data_converted = {} + + for k, d in data.items(): + data_converted[k] = d.name + + return data_converted + + self.save(_convert_data(dataset.output_types), self.output_types_file) + + for i in tqdm(range(nb_shards)): + + if (i + 1) * n_samples_per_shard <= dataset.size: + nb_sample = n_samples_per_shard + else: + nb_sample = dataset.size - i * n_samples_per_shard + + filepath = os.path.join(self.dirpath, f"{i}.records") + with tf.io.TFRecordWriter(filepath) as writer: + for s in range(nb_sample): + sample = dataset.read_one_sample() + serialized_sample = {name: tf.io.serialize_tensor(fea) for name, fea in sample.items()} + features = {name: self._bytes_feature(serialized_tensor) for name, serialized_tensor in + serialized_sample.items()} + tf_features = tf.train.Features(feature=features) + example = tf.train.Example(features=tf_features) + writer.write(example.SerializeToString()) + + @staticmethod + def save(data, filepath): + """ + Save data to pickle format. + :param data: Data to save json format + :param filepath: Output file name + """ + + with open(filepath, 'w') as f: + json.dump(data, f, indent=4) + + @staticmethod + def load(filepath): + """ + Return data from pickle format. + :param filepath: Input file name + """ + with open(filepath, 'r') as f: + return json.load(f) + + def convert_dataset_output_shapes(self, dataset): + """ + Convert and save numpy shape to tensorflow shape. + :param dataset: Dataset object containing output shapes + """ + output_shapes = {} + + for key in dataset.output_shapes.keys(): + output_shapes[key] = (None,) + dataset.output_shapes[key] + + self.save(output_shapes, self.output_shape_file) + + @staticmethod + def parse_tfrecord(example, features_types, target_keys, preprocessing_fn=None, **kwargs): + """ + Parse example object to sample dict. + :param example: Example object to parse + :param features_types: List of types for each feature + :param target_keys: list of keys of the targets + :param preprocessing_fn: Optional. A preprocessing function that takes input, target as args and returns + a tuple (input_preprocessed, target_preprocessed) + :param kwargs: some keywords arguments for preprocessing_fn + """ + read_features = {key: tf.io.FixedLenFeature([], dtype=tf.string) for key in features_types} + example_parsed = tf.io.parse_single_example(example, read_features) + + for key in read_features.keys(): + example_parsed[key] = tf.io.parse_tensor(example_parsed[key], out_type=features_types[key]) + + # Differentiating inputs and outputs + input_parsed = {key: value for (key, value) in example_parsed.items() if key not in target_keys} + target_parsed = {key: value for (key, value) in example_parsed.items() if key in target_keys} + + if preprocessing_fn: + input_parsed, target_parsed = preprocessing_fn(input_parsed, target_parsed, **kwargs) + + return input_parsed, target_parsed + + def read(self, batch_size, target_keys, n_workers=1, drop_remainder=True, shuffle_buffer_size=None, + preprocessing_fn=None, **kwargs): + """ + Read all tfrecord files matching with pattern and convert data to tensorflow dataset. + :param batch_size: Size of tensorflow batch + :param target_keys: Keys of the target, e.g. ['s2_out'] + :param n_workers: number of workers, e.g. 4 if using 4 GPUs + e.g. 12 if using 3 nodes of 4 GPUs + :param drop_remainder: whether the last batch should be dropped in the case it has fewer than + `batch_size` elements. True is advisable when training on multiworkers. + False is advisable when evaluating metrics so that all samples are used + :param shuffle_buffer_size: if None, shuffle is not used. Else, blocks of shuffle_buffer_size + elements are shuffled using uniform random. + :param preprocessing_fn: Optional. A preprocessing function that takes input, target as args and returns + a tuple (input_preprocessed, target_preprocessed) + :param kwargs: some keywords arguments for preprocessing_fn + """ + options = tf.data.Options() + if shuffle_buffer_size: + options.experimental_deterministic = False # disable order, increase speed + options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.AUTO # for multiworker + parse = partial(self.parse_tfrecord, features_types=self.output_types, target_keys=target_keys, + preprocessing_fn=preprocessing_fn, **kwargs) + + # TODO: to be investigated : + # 1/ num_parallel_reads useful ? I/O bottleneck of not ? + # 2/ num_parallel_calls=tf.data.experimental.AUTOTUNE useful ? + # 3/ shuffle or not shuffle ? + matching_files = glob.glob(self.tfrecords_pattern_path) + logging.info('Searching TFRecords in %s...', self.tfrecords_pattern_path) + logging.info('Number of matching TFRecords: %s', len(matching_files)) + matching_files = matching_files[:n_workers * (len(matching_files) // n_workers)] # files multiple of workers + nb_matching_files = len(matching_files) + if nb_matching_files == 0: + raise Exception("At least one worker has no TFRecord file in {}. Please ensure that the number of TFRecord " + "files is greater or equal than the number of workers!".format(self.tfrecords_pattern_path)) + logging.info('Reducing number of records to : %s', nb_matching_files) + dataset = tf.data.TFRecordDataset(matching_files) # , num_parallel_reads=2) # interleaves reads from xxx files + dataset = dataset.with_options(options) # uses data as soon as it streams in, rather than in its original order + dataset = dataset.map(parse, num_parallel_calls=tf.data.experimental.AUTOTUNE) + if shuffle_buffer_size: + dataset = dataset.shuffle(buffer_size=shuffle_buffer_size) + dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) + dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) + # TODO voir si on met le prefetch avant le batch cf https://keras.io/examples/keras_recipes/tfrecord/ + + return dataset + + def read_one_sample(self, target_keys): + """ + Read one tfrecord file matching with pattern and convert data to tensorflow dataset. + :param target_key: Key of the target, e.g. 's2_out' + """ + matching_files = glob.glob(self.tfrecords_pattern_path) + one_file = matching_files[0] + parse = partial(self.parse_tfrecord, features_types=self.output_types, target_keys=target_keys) + dataset = tf.data.TFRecordDataset(one_file) + dataset = dataset.map(parse) + dataset = dataset.batch(1) + + sample = iter(dataset).get_next() + return sample