gluonts.dataset.arrow.enc module#

class gluonts.dataset.arrow.enc.ArrowEncoder(columns: List[str], ndarray_columns: Set[str] = <factory>, flatten_arrays: bool = True)[source]#

Bases: object

columns: List[str]#
encode(entry: dict)[source]#
flatten_arrays: bool = True#
classmethod infer(sample: dict, flatten_arrays=True)[source]#
ndarray_columns: Set[str]#
class gluonts.dataset.arrow.enc.ArrowWriter(stream: bool = False, suffix: str = '.arrow', flatten_arrays: bool = True, metadata: Union[dict, NoneType] = None)[source]#

Bases: gluonts.dataset.DatasetWriter

flatten_arrays: bool = True#
metadata: Optional[dict] = None#
stream: bool = False#
suffix: str = '.arrow'#
write_to_file(dataset: gluonts.dataset.Dataset, path: pathlib.Path) None[source]#
write_to_folder(dataset: gluonts.dataset.Dataset, folder: pathlib.Path, name: Optional[str] = None) None[source]#
class gluonts.dataset.arrow.enc.ParquetWriter(suffix: str = '.parquet', flatten_arrays: bool = True, metadata: Union[dict, NoneType] = None)[source]#

Bases: gluonts.dataset.DatasetWriter

flatten_arrays: bool = True#
metadata: Optional[dict] = None#
suffix: str = '.parquet'#
write_to_file(dataset: gluonts.dataset.Dataset, path: pathlib.Path) None[source]#
write_to_folder(dataset: gluonts.dataset.Dataset, folder: pathlib.Path, name: Optional[str] = None) None[source]#
gluonts.dataset.arrow.enc.into_arrow_batches(dataset, batch_size=1024, flatten_arrays=True)[source]#
gluonts.dataset.arrow.enc.write_dataset(Writer, dataset, path, metadata=None, batch_size=1024, flatten_arrays=True)[source]#