Module ktrain.tabular.dataset

Expand source code
from ..dataset import SequenceDataset
from ..imports import *


class TabularDataset(SequenceDataset):
    def __init__(
        self, df, cat_columns, cont_columns, label_columns, batch_size=32, shuffle=False
    ):
        # error checks
        if not isinstance(df, pd.DataFrame):
            raise ValueError("df must be pandas Dataframe")
        all_columns = cat_columns + cont_columns + label_columns
        missing_columns = []
        for col in df.columns.values:
            if col not in all_columns:
                missing_columns.append(col)
        if len(missing_columns) > 0:
            raise ValueError("df is missing these columns: %s" % (missing_columns))

        # set variables
        super().__init__(batch_size=batch_size)
        self.indices = np.arange(df.shape[0])
        self.df = df
        self.cat_columns = cat_columns
        self.cont_columns = cont_columns
        self.label_columns = label_columns
        self.shuffle = shuffle

    def __len__(self):
        return math.ceil(self.df.shape[0] / self.batch_size)

    def __getitem__(self, idx):
        inds = self.indices[idx * self.batch_size : (idx + 1) * self.batch_size]
        batch_x = []
        df = self.df[self.cat_columns + self.cont_columns].iloc[inds]
        for cat_name in self.cat_columns:
            codes = (
                np.stack(
                    [c.cat.codes.values for n, c in df[[cat_name]].items()], 1
                ).astype(np.int64)
                + 1
            )
            batch_x.append(codes)
        if len(self.cont_columns) > 0:
            conts = np.stack(
                [c.astype("float32").values for n, c in df[self.cont_columns].items()],
                1,
            )
            batch_x.append(conts)
        batch_y = self.df[self.label_columns].iloc[inds].values
        batch_x = batch_x[0] if len(batch_x) == 1 else tuple(batch_x)
        return batch_x, batch_y

    def nsamples(self):
        return self.df.shape[0]

    def get_y(self):
        return self.df[self.label_columns].values

    def on_epoch_end(self):
        if self.shuffle:
            np.random.shuffle(self.indices)

    def xshape(self):
        return self.df.shape

    def nclasses(self):
        return self.get_y().shape[1]

Classes

class TabularDataset (df, cat_columns, cont_columns, label_columns, batch_size=32, shuffle=False)
Base class for custom datasets in ktrain.

If subclass of Dataset implements a method to to_tfdataset
that converts the data to a tf.Dataset, then this will be
invoked by Learner instances just prior to training so
fit() will train using a tf.Dataset representation of your data.
Sequence methods such as __get_item__ and __len__
must still be implemented.

The signature of to_tfdataset is as follows:

def to_tfdataset(self, training=True)

See ktrain.text.preprocess.TransformerDataset as an example.
Expand source code
class TabularDataset(SequenceDataset):
    def __init__(
        self, df, cat_columns, cont_columns, label_columns, batch_size=32, shuffle=False
    ):
        # error checks
        if not isinstance(df, pd.DataFrame):
            raise ValueError("df must be pandas Dataframe")
        all_columns = cat_columns + cont_columns + label_columns
        missing_columns = []
        for col in df.columns.values:
            if col not in all_columns:
                missing_columns.append(col)
        if len(missing_columns) > 0:
            raise ValueError("df is missing these columns: %s" % (missing_columns))

        # set variables
        super().__init__(batch_size=batch_size)
        self.indices = np.arange(df.shape[0])
        self.df = df
        self.cat_columns = cat_columns
        self.cont_columns = cont_columns
        self.label_columns = label_columns
        self.shuffle = shuffle

    def __len__(self):
        return math.ceil(self.df.shape[0] / self.batch_size)

    def __getitem__(self, idx):
        inds = self.indices[idx * self.batch_size : (idx + 1) * self.batch_size]
        batch_x = []
        df = self.df[self.cat_columns + self.cont_columns].iloc[inds]
        for cat_name in self.cat_columns:
            codes = (
                np.stack(
                    [c.cat.codes.values for n, c in df[[cat_name]].items()], 1
                ).astype(np.int64)
                + 1
            )
            batch_x.append(codes)
        if len(self.cont_columns) > 0:
            conts = np.stack(
                [c.astype("float32").values for n, c in df[self.cont_columns].items()],
                1,
            )
            batch_x.append(conts)
        batch_y = self.df[self.label_columns].iloc[inds].values
        batch_x = batch_x[0] if len(batch_x) == 1 else tuple(batch_x)
        return batch_x, batch_y

    def nsamples(self):
        return self.df.shape[0]

    def get_y(self):
        return self.df[self.label_columns].values

    def on_epoch_end(self):
        if self.shuffle:
            np.random.shuffle(self.indices)

    def xshape(self):
        return self.df.shape

    def nclasses(self):
        return self.get_y().shape[1]

Ancestors

Methods

def get_y(self)
Expand source code
def get_y(self):
    return self.df[self.label_columns].values
def nsamples(self)
Expand source code
def nsamples(self):
    return self.df.shape[0]
def on_epoch_end(self)

Method called at the end of every epoch.

Expand source code
def on_epoch_end(self):
    if self.shuffle:
        np.random.shuffle(self.indices)

Inherited members