Module ktrain.vision

Expand source code
from .data import (
    get_data_aug,
    images_from_array,
    images_from_csv,
    images_from_fname,
    images_from_folder,
    preprocess_csv,
    preview_data_aug,
    show_image,
    show_random_images,
)
from .models import (
    image_classifier,
    image_regression_model,
    print_image_classifiers,
    print_image_regression_models,
)
from .predictor import ImagePredictor

__all__ = [
    "image_classifier",
    "image_regression_model",
    "print_image_classifiers",
    "print_image_regression_models",
    "images_from_folder",
    "images_from_csv",
    "images_from_array",
    "images_from_fname",
    "get_data_aug",
    "preprocess_csv",
    "ImagePredictor",
    "show_image",
    "show_random_images",
    "preview_data_aug",
]

Sub-modules

ktrain.vision.caption
ktrain.vision.data
ktrain.vision.learner
ktrain.vision.models
ktrain.vision.object_detection
ktrain.vision.predictor
ktrain.vision.preprocessor
ktrain.vision.wrn

Functions

def get_data_aug(rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=False, vertical_flip=False, featurewise_center=True, featurewise_std_normalization=True, samplewise_center=False, samplewise_std_normalization=False, rescale=None, **kwargs)
This function is simply a wrapper around ImageDataGenerator
with some reasonable defaults for data augmentation.
Returns the default image_data_generator to support
data augmentation and data normalization.
Parameters can be adjusted by caller.
Note that the ktrain.vision.model.image_classifier
function may adjust these as needed.
Expand source code
def get_data_aug(
    rotation_range=40,
    zoom_range=0.2,
    width_shift_range=0.2,
    height_shift_range=0.2,
    horizontal_flip=False,
    vertical_flip=False,
    featurewise_center=True,
    featurewise_std_normalization=True,
    samplewise_center=False,
    samplewise_std_normalization=False,
    rescale=None,
    **kwargs
):
    """
    ```
    This function is simply a wrapper around ImageDataGenerator
    with some reasonable defaults for data augmentation.
    Returns the default image_data_generator to support
    data augmentation and data normalization.
    Parameters can be adjusted by caller.
    Note that the ktrain.vision.model.image_classifier
    function may adjust these as needed.
    ```
    """

    data_aug = keras.preprocessing.image.ImageDataGenerator(
        rotation_range=rotation_range,
        zoom_range=zoom_range,
        width_shift_range=width_shift_range,
        height_shift_range=height_shift_range,
        horizontal_flip=horizontal_flip,
        vertical_flip=vertical_flip,
        featurewise_center=featurewise_center,
        featurewise_std_normalization=featurewise_std_normalization,
        samplewise_center=samplewise_center,
        samplewise_std_normalization=samplewise_std_normalization,
        rescale=rescale,
        **kwargs
    )
    return data_aug
def image_classifier(name, train_data, val_data=None, freeze_layers=None, metrics=None, optimizer_name=<keras.optimizers.optimizer_v2.adam.Adam object>, multilabel=None, pt_fc=[], pt_ps=[], verbose=1)
Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
for multi-class classification. By default, all layers are
trainable/unfrozen.


Args:
    name (string): one of model shown on ktrain.vision.print_image_classifiers
    train_data (image.Iterator): train data. Note: Will be manipulated here!
    val_data (image.Iterator): validation data.  Note: Will be manipulated here!
    freeze_layers (int):  number of beginning layers to make untrainable
                        If None, then all layers except new Dense layers
                        will be frozen/untrainable.
    metrics (list):  metrics to use
    metrics(list): List of metrics to use.  If None: 'accuracy' is used for binar/multiclass,
                   'binary_accuracy' is used for multilabel classification, and 'mae' is used for regression
    optimizer_name(str|obj): name of Keras optimizer (e.g., 'adam', 'sgd') or instance of keras Optimizer
    multilabel(bool):  If True, model will be build to support
                       multilabel classification (labels are not mutually exclusive).
                       If False, binary/multiclassification model will be returned.
                       If None, multilabel status will be inferred from data.
    pt_fc (list of ints): number of hidden units in extra Dense layers
                            before final Dense layer of pretrained model.
                            Only takes effect if name in PRETRAINED_MODELS
    pt_ps (list of floats): dropout probabilities to use before
                            each extra Dense layer in pretrained model.
                            Only takes effect if name in PRETRAINED_MODELS
    verbose (int):         verbosity
Return:
    model(Model):  the compiled model ready to be fine-tuned/trained

Expand source code
def image_classifier(
    name,
    train_data,
    val_data=None,
    freeze_layers=None,
    metrics=None,
    optimizer_name=U.DEFAULT_OPT,
    multilabel=None,
    pt_fc=[],
    pt_ps=[],
    verbose=1,
):
    """
    ```
    Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
    for multi-class classification. By default, all layers are
    trainable/unfrozen.


    Args:
        name (string): one of model shown on ktrain.vision.print_image_classifiers
        train_data (image.Iterator): train data. Note: Will be manipulated here!
        val_data (image.Iterator): validation data.  Note: Will be manipulated here!
        freeze_layers (int):  number of beginning layers to make untrainable
                            If None, then all layers except new Dense layers
                            will be frozen/untrainable.
        metrics (list):  metrics to use
        metrics(list): List of metrics to use.  If None: 'accuracy' is used for binar/multiclass,
                       'binary_accuracy' is used for multilabel classification, and 'mae' is used for regression
        optimizer_name(str|obj): name of Keras optimizer (e.g., 'adam', 'sgd') or instance of keras Optimizer
        multilabel(bool):  If True, model will be build to support
                           multilabel classification (labels are not mutually exclusive).
                           If False, binary/multiclassification model will be returned.
                           If None, multilabel status will be inferred from data.
        pt_fc (list of ints): number of hidden units in extra Dense layers
                                before final Dense layer of pretrained model.
                                Only takes effect if name in PRETRAINED_MODELS
        pt_ps (list of floats): dropout probabilities to use before
                                each extra Dense layer in pretrained model.
                                Only takes effect if name in PRETRAINED_MODELS
        verbose (int):         verbosity
    Return:
        model(Model):  the compiled model ready to be fine-tuned/trained

    ```
    """
    return image_model(
        name,
        train_data,
        val_data=val_data,
        freeze_layers=freeze_layers,
        metrics=metrics,
        optimizer_name=optimizer_name,
        multilabel=multilabel,
        pt_fc=pt_fc,
        pt_ps=pt_ps,
        verbose=verbose,
    )
def image_regression_model(name, train_data, val_data=None, freeze_layers=None, metrics=['mae'], optimizer_name=<keras.optimizers.optimizer_v2.adam.Adam object>, pt_fc=[], pt_ps=[], verbose=1)
Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
for multi-class classification. By default, all layers are
trainable/unfrozen.


Args:
    name (string): one of model shown on ktrain.vision.print_image_regression_models
    train_data (image.Iterator): train data. Note: Will be manipulated here!
    val_data (image.Iterator): validation data.  Note: Will be manipulated here!
    freeze_layers (int):  number of beginning layers to make untrainable
                        If None, then all layers except new Dense layers
                        will be frozen/untrainable.
    metrics (list):  metrics to use
    optimizer_name(str): name of Keras optimizer (e.g., 'adam', 'sgd')
    multilabel(bool):  If True, model will be build to support
                       multilabel classificaiton (labels are not mutually exclusive).
                       If False, binary/multiclassification model will be returned.
                       If None, multilabel status will be inferred from data.
    pt_fc (list of ints): number of hidden units in extra Dense layers
                            before final Dense layer of pretrained model.
                            Only takes effect if name in PRETRAINED_MODELS
    pt_ps (list of floats): dropout probabilities to use before
                            each extra Dense layer in pretrained model.
                            Only takes effect if name in PRETRAINED_MODELS
    verbose (int):         verbosity
Return:
    model(Model):  the compiled model ready to be fine-tuned/trained

Expand source code
def image_regression_model(
    name,
    train_data,
    val_data=None,
    freeze_layers=None,
    metrics=["mae"],
    optimizer_name=U.DEFAULT_OPT,
    pt_fc=[],
    pt_ps=[],
    verbose=1,
):
    """
    ```
    Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
    for multi-class classification. By default, all layers are
    trainable/unfrozen.


    Args:
        name (string): one of model shown on ktrain.vision.print_image_regression_models
        train_data (image.Iterator): train data. Note: Will be manipulated here!
        val_data (image.Iterator): validation data.  Note: Will be manipulated here!
        freeze_layers (int):  number of beginning layers to make untrainable
                            If None, then all layers except new Dense layers
                            will be frozen/untrainable.
        metrics (list):  metrics to use
        optimizer_name(str): name of Keras optimizer (e.g., 'adam', 'sgd')
        multilabel(bool):  If True, model will be build to support
                           multilabel classificaiton (labels are not mutually exclusive).
                           If False, binary/multiclassification model will be returned.
                           If None, multilabel status will be inferred from data.
        pt_fc (list of ints): number of hidden units in extra Dense layers
                                before final Dense layer of pretrained model.
                                Only takes effect if name in PRETRAINED_MODELS
        pt_ps (list of floats): dropout probabilities to use before
                                each extra Dense layer in pretrained model.
                                Only takes effect if name in PRETRAINED_MODELS
        verbose (int):         verbosity
    Return:
        model(Model):  the compiled model ready to be fine-tuned/trained

    ```
    """

    return image_model(
        name,
        train_data,
        val_data=val_data,
        freeze_layers=freeze_layers,
        metrics=metrics,
        optimizer_name=optimizer_name,
        multilabel=False,
        pt_fc=pt_fc,
        pt_ps=pt_ps,
        verbose=verbose,
    )
def images_from_array(x_train, y_train, validation_data=None, val_pct=0.1, random_state=None, data_aug=None, classes=None, class_names=None, is_regression=False)
Returns image generator (Iterator instance) from training
and validation data in the form of NumPy arrays.
This function only supports image classification.
For image regression, please use images_from_df.

Args:
  x_train(numpy.ndarray):  training gdata
  y_train(numpy.ndarray):  labels must either be:
                           1. one-hot (or multi-hot) encoded arrays
                           2. integer values representing the label
  validation_data (tuple): tuple of numpy.ndarrays for validation data.
                           labels should be in one of the formats listed above.
  val_pct(float): percentage of training data to use for validaton if validation_data is None
  random_state(int): random state to use for splitting data
  data_aug(ImageDataGenerator):  a keras.preprocessing.image.ImageDataGenerator
  classes(str): old name for class_names - should no longer be used
  class_names(str): list of strings to use as class names
  is_regression(bool): If True, task is treated as regression.
                       Used when there is single column of numeric values and
                       numeric values should be treated as numeric targets as opposed to class labels
Returns:
  batches: a tuple of two image.Iterator - one for train and one for test and ImagePreprocessor instance
Expand source code
def images_from_array(
    x_train,
    y_train,
    validation_data=None,
    val_pct=0.1,
    random_state=None,
    data_aug=None,
    classes=None,
    class_names=None,
    is_regression=False,
):
    """
    ```
    Returns image generator (Iterator instance) from training
    and validation data in the form of NumPy arrays.
    This function only supports image classification.
    For image regression, please use images_from_df.

    Args:
      x_train(numpy.ndarray):  training gdata
      y_train(numpy.ndarray):  labels must either be:
                               1. one-hot (or multi-hot) encoded arrays
                               2. integer values representing the label
      validation_data (tuple): tuple of numpy.ndarrays for validation data.
                               labels should be in one of the formats listed above.
      val_pct(float): percentage of training data to use for validaton if validation_data is None
      random_state(int): random state to use for splitting data
      data_aug(ImageDataGenerator):  a keras.preprocessing.image.ImageDataGenerator
      classes(str): old name for class_names - should no longer be used
      class_names(str): list of strings to use as class names
      is_regression(bool): If True, task is treated as regression.
                           Used when there is single column of numeric values and
                           numeric values should be treated as numeric targets as opposed to class labels
    Returns:
      batches: a tuple of two image.Iterator - one for train and one for test and ImagePreprocessor instance
    ```
    """
    if classes is not None:
        raise ValueError('Please use class_names argument instead of "classes".')
    if class_names and is_regression:
        warnings.warn(
            "is_regression=True, but class_names is not empty.  Task being treated as regression."
        )

    # split out validation set if necessary
    if validation_data:
        x_test = validation_data[0]
        y_test = validation_data[1]
    elif val_pct is not None and val_pct > 0:
        x_train, x_test, y_train, y_test = train_test_split(
            x_train, y_train, test_size=val_pct, random_state=random_state
        )
    else:
        x_test = None
        y_test = None

    # transform labels
    ytrans = U.YTransform(class_names=class_names if not is_regression else [])
    y_train = ytrans.apply_train(y_train)
    y_test = ytrans.apply_test(y_test)
    class_names = ytrans.get_classes()

    # train and test data generators
    (train_datagen, test_datagen) = process_datagen(data_aug, train_array=x_train)

    # Image preprocessor
    preproc = ImagePreprocessor(
        test_datagen, class_names, target_size=None, color_mode=None
    )

    # training data
    batches_tr = train_datagen.flow(x_train, y_train, shuffle=True)

    # validation data
    batches_te = None
    if x_test is not None and y_test is not None:
        batches_te = test_datagen.flow(x_test, y_test, shuffle=False)
    return (batches_tr, batches_te, preproc)
def images_from_csv(train_filepath, image_column, label_columns=[], directory=None, suffix='', val_filepath=None, is_regression=False, target_size=(224, 224), color_mode='rgb', data_aug=None, val_pct=0.1, random_state=None)
Returns image generator (Iterator instance).
Assumes output will be 2D one-hot-encoded labels for categorization.
Note: This function preprocesses the input in preparation
      for a ResNet50 model.

Args:
train_filepath (string): path to training dataset in CSV format with header row
image_column (string): name of column containing the filenames of images
                       If values in image_column do not have a file extension,
                       the extension should be supplied with suffix argument.
                       If values in image_column are not full file paths,
                       then the path to directory containing images should be supplied
                       as directory argument.

label_columns(list or str): list or str representing the columns that store labels
                            Labels can be in any one of the following formats:
                            1. a single column string string (or integer) labels

                               image_fname,label
                               -----------------
                               image01,cat
                               image02,dog

                            2. multiple columns for one-hot-encoded labels
                               image_fname,cat,dog
                               image01,1,0
                               image02,0,1

                            3. a single column of numeric values for image regression
                               image_fname,age
                               -----------------
                               image01,68
                               image02,18

directory (string): path to directory containing images
                    not required if image_column contains full filepaths
suffix(str): will be appended to each entry in image_column
             Used when the filenames in image_column do not contain file extensions.
             The extension in suffx should include ".".
val_filepath (string): path to validation dataset in CSV format
suffix(string): suffix to add to file names in image_column
is_regression(bool): If True, task is treated as regression.
                     Used when there is single column of numeric values and
                     numeric values should be treated as numeric targets as opposed to class labels
target_size (tuple):  image dimensions
color_mode (string):  color mode
data_aug(ImageDataGenerator):  a keras.preprocessing.image.ImageDataGenerator
                              for data augmentation
val_pct(float):  proportion of training data to be used for validation
                 only used if val_filepath is None
random_state(int): random seed for train/test split

Returns:
batches: a tuple of two Iterators - one for train and one for test
Expand source code
def images_from_csv(
    train_filepath,
    image_column,
    label_columns=[],
    directory=None,
    suffix="",
    val_filepath=None,
    is_regression=False,
    target_size=(224, 224),
    color_mode="rgb",
    data_aug=None,
    val_pct=0.1,
    random_state=None,
):
    """
    ```
    Returns image generator (Iterator instance).
    Assumes output will be 2D one-hot-encoded labels for categorization.
    Note: This function preprocesses the input in preparation
          for a ResNet50 model.

    Args:
    train_filepath (string): path to training dataset in CSV format with header row
    image_column (string): name of column containing the filenames of images
                           If values in image_column do not have a file extension,
                           the extension should be supplied with suffix argument.
                           If values in image_column are not full file paths,
                           then the path to directory containing images should be supplied
                           as directory argument.

    label_columns(list or str): list or str representing the columns that store labels
                                Labels can be in any one of the following formats:
                                1. a single column string string (or integer) labels

                                   image_fname,label
                                   -----------------
                                   image01,cat
                                   image02,dog

                                2. multiple columns for one-hot-encoded labels
                                   image_fname,cat,dog
                                   image01,1,0
                                   image02,0,1

                                3. a single column of numeric values for image regression
                                   image_fname,age
                                   -----------------
                                   image01,68
                                   image02,18

    directory (string): path to directory containing images
                        not required if image_column contains full filepaths
    suffix(str): will be appended to each entry in image_column
                 Used when the filenames in image_column do not contain file extensions.
                 The extension in suffx should include ".".
    val_filepath (string): path to validation dataset in CSV format
    suffix(string): suffix to add to file names in image_column
    is_regression(bool): If True, task is treated as regression.
                         Used when there is single column of numeric values and
                         numeric values should be treated as numeric targets as opposed to class labels
    target_size (tuple):  image dimensions
    color_mode (string):  color mode
    data_aug(ImageDataGenerator):  a keras.preprocessing.image.ImageDataGenerator
                                  for data augmentation
    val_pct(float):  proportion of training data to be used for validation
                     only used if val_filepath is None
    random_state(int): random seed for train/test split

    Returns:
    batches: a tuple of two Iterators - one for train and one for test
    ```
    """

    # convert to dataframes
    train_df = pd.read_csv(train_filepath)
    val_df = None
    if val_filepath is not None:
        val_df = pd.read_csv(val_filepath)

    return images_from_df(
        train_df,
        image_column,
        label_columns=label_columns,
        directory=directory,
        suffix=suffix,
        val_df=val_df,
        is_regression=is_regression,
        target_size=target_size,
        color_mode=color_mode,
        data_aug=data_aug,
        val_pct=val_pct,
        random_state=random_state,
    )
def images_from_fname(train_folder, pattern='([^/]+)_\\d+.jpg$', val_folder=None, is_regression=False, target_size=(224, 224), color_mode='rgb', data_aug=None, val_pct=0.1, random_state=None, verbose=1)
Returns image generator (Iterator instance).

Args:
train_folder (str): directory containing images
pat (str):  regular expression to extract class from file name of each image
            Example: r'([^/]+)_\d+.jpg$' to match 'english_setter' in 'english_setter_140.jpg'
            By default, it will extract classes from file names of the form:
               <class_name>_<numbers>.jpg
val_folder (str): directory containing validation images. default:None
is_regression(bool): If True, task is treated as regression.
                     Used when there is single column of numeric values and
                     numeric values should be treated as numeric targets as opposed to class labels
target_size (tuple):  image dimensions
color_mode (string):  color mode
data_aug(ImageDataGenerator):  a keras.preprocessing.image.ImageDataGenerator
                              for data augmentation
val_pct(float):  proportion of training data to be used for validation
                 only used if val_folder is None
random_state(int): random seed for train/test split
verbose(bool):   verbosity

Returns:
batches: a tuple of two Iterators - one for train and one for test
Expand source code
def images_from_fname(
    train_folder,
    pattern=r"([^/]+)_\d+.jpg$",
    val_folder=None,
    is_regression=False,
    target_size=(224, 224),
    color_mode="rgb",
    data_aug=None,
    val_pct=0.1,
    random_state=None,
    verbose=1,
):
    """
    ```
    Returns image generator (Iterator instance).

    Args:
    train_folder (str): directory containing images
    pat (str):  regular expression to extract class from file name of each image
                Example: r'([^/]+)_\d+.jpg$' to match 'english_setter' in 'english_setter_140.jpg'
                By default, it will extract classes from file names of the form:
                   <class_name>_<numbers>.jpg
    val_folder (str): directory containing validation images. default:None
    is_regression(bool): If True, task is treated as regression.
                         Used when there is single column of numeric values and
                         numeric values should be treated as numeric targets as opposed to class labels
    target_size (tuple):  image dimensions
    color_mode (string):  color mode
    data_aug(ImageDataGenerator):  a keras.preprocessing.image.ImageDataGenerator
                                  for data augmentation
    val_pct(float):  proportion of training data to be used for validation
                     only used if val_folder is None
    random_state(int): random seed for train/test split
    verbose(bool):   verbosity

    Returns:
    batches: a tuple of two Iterators - one for train and one for test
    ```
    """

    image_column = "image_name"
    label_column = "label"
    train_df = _img_fnames_to_df(
        train_folder,
        pattern,
        image_column=image_column,
        label_column=label_column,
        verbose=verbose,
    )
    val_df = None
    if val_folder is not None:
        val_df = _img_fnames_to_df(
            val_folder,
            pattern,
            image_column=image_column,
            label_column=label_column,
            verbose=verbose,
        )
    return images_from_df(
        train_df,
        image_column,
        label_columns=label_column,
        directory=train_folder,
        val_directory=val_folder,
        val_df=val_df,
        is_regression=is_regression,
        target_size=target_size,
        color_mode=color_mode,
        data_aug=data_aug,
        val_pct=val_pct,
        random_state=random_state,
    )
def images_from_folder(datadir, target_size=(224, 224), classes=None, color_mode='rgb', train_test_names=['train', 'test'], data_aug=None, verbose=1)
Returns image generator (Iterator instance).
Assumes output will be 2D one-hot-encoded labels for categorization.
Note: This function preprocesses the input in preparation
      for a ResNet50 model.

Args:
datadir (string): path to training (or validation/test) dataset
    Assumes folder follows this structure:
    ├── datadir
    │   ├── train
    │   │   ├── class0       # folder containing documents of class 0
    │   │   ├── class1       # folder containing documents of class 1
    │   │   ├── class2       # folder containing documents of class 2
    │   │   └── classN       # folder containing documents of class N
    │   └── test
    │       ├── class0       # folder containing documents of class 0
    │       ├── class1       # folder containing documents of class 1
    │       ├── class2       # folder containing documents of class 2
    │       └── classN       # folder containing documents of class N

target_size (tuple):  image dimensions
classes (list):  optional list of class subdirectories (e.g., ['cats','dogs'])
color_mode (string):  color mode
train_test_names(list): names for train and test subfolders
data_aug(ImageDataGenerator):  a keras.preprocessing.image.ImageDataGenerator
                              for data augmentation
verbose (bool):               verbosity

Returns:
batches: a tuple of two Iterators - one for train and one for test
Expand source code
def images_from_folder(
    datadir,
    target_size=(224, 224),
    classes=None,
    color_mode="rgb",
    train_test_names=["train", "test"],
    data_aug=None,
    verbose=1,
):
    """
    ```
    Returns image generator (Iterator instance).
    Assumes output will be 2D one-hot-encoded labels for categorization.
    Note: This function preprocesses the input in preparation
          for a ResNet50 model.

    Args:
    datadir (string): path to training (or validation/test) dataset
        Assumes folder follows this structure:
        ├── datadir
        │   ├── train
        │   │   ├── class0       # folder containing documents of class 0
        │   │   ├── class1       # folder containing documents of class 1
        │   │   ├── class2       # folder containing documents of class 2
        │   │   └── classN       # folder containing documents of class N
        │   └── test
        │       ├── class0       # folder containing documents of class 0
        │       ├── class1       # folder containing documents of class 1
        │       ├── class2       # folder containing documents of class 2
        │       └── classN       # folder containing documents of class N

    target_size (tuple):  image dimensions
    classes (list):  optional list of class subdirectories (e.g., ['cats','dogs'])
    color_mode (string):  color mode
    train_test_names(list): names for train and test subfolders
    data_aug(ImageDataGenerator):  a keras.preprocessing.image.ImageDataGenerator
                                  for data augmentation
    verbose (bool):               verbosity

    Returns:
    batches: a tuple of two Iterators - one for train and one for test
    ```
    """

    # train/test names
    train_str = train_test_names[0]
    test_str = train_test_names[1]
    train_dir = os.path.join(datadir, train_str)
    test_dir = os.path.join(datadir, test_str)

    # color mode warning
    if PIL_INSTALLED:
        inferred_color_mode = detect_color_mode(train_dir)
        if inferred_color_mode is not None and (inferred_color_mode != color_mode):
            U.vprint(
                "color_mode detected (%s) different than color_mode selected (%s)"
                % (inferred_color_mode, color_mode),
                verbose=verbose,
            )

    # get train and test data generators
    (train_datagen, test_datagen) = process_datagen(
        data_aug,
        train_directory=train_dir,
        target_size=target_size,
        color_mode=color_mode,
    )
    batches_tr = train_datagen.flow_from_directory(
        train_dir,
        target_size=target_size,
        classes=classes,
        class_mode="categorical",
        shuffle=True,
        interpolation="bicubic",
        color_mode=color_mode,
    )

    batches_te = test_datagen.flow_from_directory(
        test_dir,
        target_size=target_size,
        classes=classes,
        class_mode="categorical",
        shuffle=False,
        interpolation="bicubic",
        color_mode=color_mode,
    )

    # setup preprocessor
    class_tup = sorted(batches_tr.class_indices.items(), key=operator.itemgetter(1))
    preproc = ImagePreprocessor(
        test_datagen,
        [x[0] for x in class_tup],
        target_size=target_size,
        color_mode=color_mode,
    )
    return (batches_tr, batches_te, preproc)
def preprocess_csv(csv_in, csv_out, x_col='filename', y_col=None, sep=',', label_sep=' ', suffix='', split_by=None)
Takes a CSV where the one column contains a file name and a column
containing a string representations of the class(es) like here:
image_name,tags
01, sunny|hot
02, cloudy|cold
03, cloudy|hot

.... and one-hot encodes the classes to produce a CSV as follows:
image_name, cloudy, cold, hot, sunny
01.jpg,0,0,1,1
02.jpg,1,1,0,0
03.jpg,1,0,1,0
Args:
    csv_in (str):  filepath to input CSV file
    csv_out (str): filepath to output CSV file
    x_col (str):  name of column containing file names
    y_col (str): name of column containing the classes
    sep (str): field delimiter of entire file (e.g., comma fore CSV)
    label_sep (str): delimiter for column containing classes
    suffix (str): adds suffix to x_col values
    split_by(str): name of column. A separate CSV will be
                   created for each value in column. Useful
                   for splitting a CSV based on whether a column
                   contains 'train' or 'valid'.
Return:
    list :  the list of clases (and csv_out will be new CSV file)
Expand source code
def preprocess_csv(
    csv_in,
    csv_out,
    x_col="filename",
    y_col=None,
    sep=",",
    label_sep=" ",
    suffix="",
    split_by=None,
):
    """
    ```
    Takes a CSV where the one column contains a file name and a column
    containing a string representations of the class(es) like here:
    image_name,tags
    01, sunny|hot
    02, cloudy|cold
    03, cloudy|hot

    .... and one-hot encodes the classes to produce a CSV as follows:
    image_name, cloudy, cold, hot, sunny
    01.jpg,0,0,1,1
    02.jpg,1,1,0,0
    03.jpg,1,0,1,0
    Args:
        csv_in (str):  filepath to input CSV file
        csv_out (str): filepath to output CSV file
        x_col (str):  name of column containing file names
        y_col (str): name of column containing the classes
        sep (str): field delimiter of entire file (e.g., comma fore CSV)
        label_sep (str): delimiter for column containing classes
        suffix (str): adds suffix to x_col values
        split_by(str): name of column. A separate CSV will be
                       created for each value in column. Useful
                       for splitting a CSV based on whether a column
                       contains 'train' or 'valid'.
    Return:
        list :  the list of clases (and csv_out will be new CSV file)
    ```
    """
    if not y_col and not suffix:
        raise ValueError("one or both of y_col and suffix should be supplied")
    df = pd.read_csv(csv_in, sep=sep)
    f_csv_out = open(csv_out, "w")
    writer = csv.writer(f_csv_out, delimiter=sep)
    if y_col:
        df[y_col] = df[y_col].apply(str)

    # write header
    if y_col:
        classes = set()
        for row in df.iterrows():
            data = row[1]
            tags = data[y_col].split(label_sep)
            classes.update(tags)
        classes = list(classes)
        classes.sort()
        writer.writerow([x_col] + classes)
    else:
        classes = df.columns[:-1]
        write.writerow(df.columns)

    # write rows
    for row in df.iterrows():
        data = row[1]
        data[x_col] = data[x_col] + suffix
        if y_col:
            out = list(data[[x_col]].values)
            tags = set(data[y_col].strip().split(label_sep))
            for c in classes:
                if c in tags:
                    out.append(1)
                else:
                    out.append(0)
        else:
            out = data
        writer.writerow(out)
    f_csv_out.close()
    return classes
def preview_data_aug(img_path, data_aug, rows=1, n=4)
Preview data augmentation (ImageDatagenerator)
on a supplied image.
Expand source code
def preview_data_aug(img_path, data_aug, rows=1, n=4):
    """
    ```
    Preview data augmentation (ImageDatagenerator)
    on a supplied image.
    ```
    """
    if type(img_path) != type("") or not os.path.isfile(img_path):
        raise ValueError("img_path must be valid file path to image")
    idg = copy.copy(data_aug)
    idg.featurewise_center = False
    idg.featurewise_std_normalization = False
    idg.samplewise_center = False
    idg.samplewise_std_normalization = False
    idg.rescale = None
    idg.zca_whitening = False
    idg.preprocessing_function = None

    img = keras.preprocessing.image.load_img(img_path)
    x = img_to_array(img)
    x = x / 255.0
    x = x.reshape((1,) + x.shape)
    i = 0
    ims = []
    for batch in idg.flow(x, batch_size=1):
        ims.append(np.squeeze(batch))
        i += 1
        if i >= n:
            break
    U.plots(ims, rows=rows)
    return
def print_image_classifiers()
Expand source code
def print_image_classifiers():
    for k, v in IMAGE_CLASSIFIERS.items():
        print("%s: %s" % (k, v))
def print_image_regression_models()
Expand source code
def print_image_regression_models():
    for k, v in IMAGE_CLASSIFIERS.items():
        print("%s: %s" % (k, v))
def show_image(img_path)
Given file path to image, show it in Jupyter notebook
Expand source code
def show_image(img_path):
    """
    ```
    Given file path to image, show it in Jupyter notebook
    ```
    """
    if not os.path.isfile(img_path):
        raise ValueError("%s is not valid file" % (img_path))
    img = plt.imread(img_path)
    out = plt.imshow(img)
    return out
def show_random_images(img_folder, n=4, rows=1)
display random images from a img_folder
Expand source code
def show_random_images(img_folder, n=4, rows=1):
    """
    ```
    display random images from a img_folder
    ```
    """
    fnames = []
    for ext in ("*.gif", "*.png", "*.jpg"):
        fnames.extend(glob.glob(os.path.join(img_folder, ext)))
    ims = []
    for i in range(n):
        img_path = random.choice(fnames)
        img = keras.preprocessing.image.load_img(img_path)
        x = img_to_array(img)
        x = x / 255.0
        ims.append(x)
    U.plots(ims, rows=rows)
    return

Classes

class ImagePredictor (model, preproc, batch_size=32)
predicts image classes
Expand source code
class ImagePredictor(Predictor):
    """
    ```
    predicts image classes
    ```
    """

    def __init__(self, model, preproc, batch_size=U.DEFAULT_BS):
        if not isinstance(model, keras.Model):
            raise ValueError("model must be of instance keras.Model")
        if not isinstance(preproc, ImagePreprocessor):
            raise ValueError("preproc must be instance of ImagePreprocessor")
        self.model = model
        self.preproc = preproc
        self.datagen = self.preproc.get_preprocessor()
        self.c = self.preproc.get_classes()
        self.batch_size = batch_size

    def get_classes(self):
        return self.c

    def explain(self, img_fpath):
        """
        ```
        Highlights image to explain prediction
        ```
        """
        try:
            import eli5
        except:
            msg = (
                "ktrain requires a forked version of eli5 to support tf.keras. "
                + "Install with: pip install https://github.com/amaiya/eli5-tf/archive/refs/heads/master.zip"
            )
            warnings.warn(msg)
            return

        if not DISABLE_V2_BEHAVIOR:
            warnings.warn(
                "Please add os.environ['DISABLE_V2_BEHAVIOR'] = '1' at top of your script or notebook."
            )
            msg = (
                "\nFor image classification, the explain method currently requires disabling V2 behavior in TensorFlow 2.\n"
                + "Please add the following to the top of your script or notebook BEFORE you import ktrain and restart Colab runtime or Jupyter kernel:\n\n"
                + "import os\n"
                + "os.environ['DISABLE_V2_BEHAVIOR'] = '1'\n"
            )
            print(msg)
            return

        img = keras.preprocessing.image.load_img(
            img_fpath,
            target_size=self.preproc.target_size,
            color_mode=self.preproc.color_mode,
        )
        x = keras.preprocessing.image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        return eli5.show_prediction(self.model, x)

    def predict(self, data, return_proba=False, verbose=0):
        """
        ```
        Predicts class from image in array format.
        If return_proba is True, returns probabilities of each class.
        ```
        """
        if not isinstance(data, np.ndarray):
            raise ValueError("data must be numpy.ndarray")
        (generator, steps) = self.preproc.preprocess(data, batch_size=self.batch_size)
        return self.predict_generator(
            generator, steps=steps, return_proba=return_proba, verbose=verbose
        )

    def predict_filename(self, img_path, return_proba=False, verbose=0):
        """
        ```
        Predicts class from filepath to single image file.
        If return_proba is True, returns probabilities of each class.
        ```
        """
        if not os.path.isfile(img_path):
            raise ValueError("img_path must be valid file")
        (generator, steps) = self.preproc.preprocess(
            img_path, batch_size=self.batch_size
        )
        return self.predict_generator(
            generator, steps=steps, return_proba=return_proba, verbose=verbose
        )

    def predict_folder(self, folder, return_proba=False, verbose=0):
        """
        ```
        Predicts the classes of all images in a folder.
        If return_proba is True, returns probabilities of each class.
        ```

        """
        if not os.path.isdir(folder):
            raise ValueError("folder must be valid directory")
        (generator, steps) = self.preproc.preprocess(folder, batch_size=self.batch_size)
        result = self.predict_generator(
            generator, steps=steps, return_proba=return_proba, verbose=verbose
        )
        if len(result) != len(generator.filenames):
            raise Exception("number of results does not equal number of filenames")
        return list(zip(generator.filenames, result))

    def predict_generator(self, generator, steps=None, return_proba=False, verbose=0):
        # loss = self.model.loss
        # if callable(loss): loss = loss.__name__
        # treat_multilabel = False
        # if loss != 'categorical_crossentropy' and not return_proba:
        #    return_proba=True
        #    treat_multilabel = True
        classification, multilabel = U.is_classifier(self.model)
        if not classification:
            return_proba = True
        # *_generator methods are deprecated from TF 2.1.0
        # preds =  self.model.predict_generator(generator, steps=steps)
        preds = self.model.predict(generator, steps=steps, verbose=verbose)
        result = (
            preds
            if return_proba or multilabel
            else [self.c[np.argmax(pred)] for pred in preds]
        )
        if multilabel and not return_proba:
            return [list(zip(self.c, r)) for r in result]
        if not classification:
            return np.squeeze(result, axis=1)
        else:
            return result

    def predict_proba(self, data, verbose=0):
        return self.predict(data, return_proba=True, verbose=verbose)

    def predict_proba_folder(self, folder, verbose=0):
        return self.predict_folder(folder, return_proba=True, verbose=verbose)

    def predict_proba_filename(self, img_path, verbose=0):
        return self.predict_filename(img_path, return_proba=True, verbose=verbose)

    def predict_proba_generator(self, generator, steps=None, verbose=0):
        return self.predict_proba_generator(
            generator, steps=steps, return_proba=True, verbose=verbose
        )

    def analyze_valid(self, generator, print_report=True, multilabel=None):
        """
        ```
        Makes predictions on validation set and returns the confusion matrix.
        Accepts as input a genrator (e.g., DirectoryIterator, DataframeIterator)
        representing the validation set.


        Optionally prints a classification report.
        Currently, this method is only supported for binary and multiclass
        problems, not multilabel classification problems.
        ```
        """
        if multilabel is None:
            multilabel = U.is_multilabel(generator)
        if multilabel:
            warnings.warn("multilabel_confusion_matrix not yet supported - skipping")
            return

        y_true = generator.classes
        # *_generator methods are deprecated from TF 2.1.0
        # y_pred = self.model.predict_generator(generator)
        y_pred = self.model.predict(generator)
        y_pred = np.argmax(y_pred, axis=1)
        if print_report:
            print(classification_report(y_true, y_pred, target_names=self.c))
        if not multilabel:
            cm_func = confusion_matrix
            cm = cm_func(y_true, y_pred)
        else:
            cm = None
        return cm

    def _save_preproc(self, fpath):
        preproc_name = "tf_model.preproc"
        with open(os.path.join(fpath, preproc_name), "wb") as f:
            datagen = self.preproc.get_preprocessor()
            pfunc = datagen.preprocessing_function
            datagen.preprocessing_function = None
            pickle.dump(self.preproc, f)
            datagen.preprocessing_function = pfunc
        return

Ancestors

Methods

def analyze_valid(self, generator, print_report=True, multilabel=None)
Makes predictions on validation set and returns the confusion matrix.
Accepts as input a genrator (e.g., DirectoryIterator, DataframeIterator)
representing the validation set.


Optionally prints a classification report.
Currently, this method is only supported for binary and multiclass
problems, not multilabel classification problems.
Expand source code
def analyze_valid(self, generator, print_report=True, multilabel=None):
    """
    ```
    Makes predictions on validation set and returns the confusion matrix.
    Accepts as input a genrator (e.g., DirectoryIterator, DataframeIterator)
    representing the validation set.


    Optionally prints a classification report.
    Currently, this method is only supported for binary and multiclass
    problems, not multilabel classification problems.
    ```
    """
    if multilabel is None:
        multilabel = U.is_multilabel(generator)
    if multilabel:
        warnings.warn("multilabel_confusion_matrix not yet supported - skipping")
        return

    y_true = generator.classes
    # *_generator methods are deprecated from TF 2.1.0
    # y_pred = self.model.predict_generator(generator)
    y_pred = self.model.predict(generator)
    y_pred = np.argmax(y_pred, axis=1)
    if print_report:
        print(classification_report(y_true, y_pred, target_names=self.c))
    if not multilabel:
        cm_func = confusion_matrix
        cm = cm_func(y_true, y_pred)
    else:
        cm = None
    return cm
def explain(self, img_fpath)
Highlights image to explain prediction
Expand source code
def explain(self, img_fpath):
    """
    ```
    Highlights image to explain prediction
    ```
    """
    try:
        import eli5
    except:
        msg = (
            "ktrain requires a forked version of eli5 to support tf.keras. "
            + "Install with: pip install https://github.com/amaiya/eli5-tf/archive/refs/heads/master.zip"
        )
        warnings.warn(msg)
        return

    if not DISABLE_V2_BEHAVIOR:
        warnings.warn(
            "Please add os.environ['DISABLE_V2_BEHAVIOR'] = '1' at top of your script or notebook."
        )
        msg = (
            "\nFor image classification, the explain method currently requires disabling V2 behavior in TensorFlow 2.\n"
            + "Please add the following to the top of your script or notebook BEFORE you import ktrain and restart Colab runtime or Jupyter kernel:\n\n"
            + "import os\n"
            + "os.environ['DISABLE_V2_BEHAVIOR'] = '1'\n"
        )
        print(msg)
        return

    img = keras.preprocessing.image.load_img(
        img_fpath,
        target_size=self.preproc.target_size,
        color_mode=self.preproc.color_mode,
    )
    x = keras.preprocessing.image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    return eli5.show_prediction(self.model, x)
def get_classes(self)
Expand source code
def get_classes(self):
    return self.c
def predict(self, data, return_proba=False, verbose=0)
Predicts class from image in array format.
If return_proba is True, returns probabilities of each class.
Expand source code
def predict(self, data, return_proba=False, verbose=0):
    """
    ```
    Predicts class from image in array format.
    If return_proba is True, returns probabilities of each class.
    ```
    """
    if not isinstance(data, np.ndarray):
        raise ValueError("data must be numpy.ndarray")
    (generator, steps) = self.preproc.preprocess(data, batch_size=self.batch_size)
    return self.predict_generator(
        generator, steps=steps, return_proba=return_proba, verbose=verbose
    )
def predict_filename(self, img_path, return_proba=False, verbose=0)
Predicts class from filepath to single image file.
If return_proba is True, returns probabilities of each class.
Expand source code
def predict_filename(self, img_path, return_proba=False, verbose=0):
    """
    ```
    Predicts class from filepath to single image file.
    If return_proba is True, returns probabilities of each class.
    ```
    """
    if not os.path.isfile(img_path):
        raise ValueError("img_path must be valid file")
    (generator, steps) = self.preproc.preprocess(
        img_path, batch_size=self.batch_size
    )
    return self.predict_generator(
        generator, steps=steps, return_proba=return_proba, verbose=verbose
    )
def predict_folder(self, folder, return_proba=False, verbose=0)
Predicts the classes of all images in a folder.
If return_proba is True, returns probabilities of each class.
Expand source code
def predict_folder(self, folder, return_proba=False, verbose=0):
    """
    ```
    Predicts the classes of all images in a folder.
    If return_proba is True, returns probabilities of each class.
    ```

    """
    if not os.path.isdir(folder):
        raise ValueError("folder must be valid directory")
    (generator, steps) = self.preproc.preprocess(folder, batch_size=self.batch_size)
    result = self.predict_generator(
        generator, steps=steps, return_proba=return_proba, verbose=verbose
    )
    if len(result) != len(generator.filenames):
        raise Exception("number of results does not equal number of filenames")
    return list(zip(generator.filenames, result))
def predict_generator(self, generator, steps=None, return_proba=False, verbose=0)
Expand source code
def predict_generator(self, generator, steps=None, return_proba=False, verbose=0):
    # loss = self.model.loss
    # if callable(loss): loss = loss.__name__
    # treat_multilabel = False
    # if loss != 'categorical_crossentropy' and not return_proba:
    #    return_proba=True
    #    treat_multilabel = True
    classification, multilabel = U.is_classifier(self.model)
    if not classification:
        return_proba = True
    # *_generator methods are deprecated from TF 2.1.0
    # preds =  self.model.predict_generator(generator, steps=steps)
    preds = self.model.predict(generator, steps=steps, verbose=verbose)
    result = (
        preds
        if return_proba or multilabel
        else [self.c[np.argmax(pred)] for pred in preds]
    )
    if multilabel and not return_proba:
        return [list(zip(self.c, r)) for r in result]
    if not classification:
        return np.squeeze(result, axis=1)
    else:
        return result
def predict_proba(self, data, verbose=0)
Expand source code
def predict_proba(self, data, verbose=0):
    return self.predict(data, return_proba=True, verbose=verbose)
def predict_proba_filename(self, img_path, verbose=0)
Expand source code
def predict_proba_filename(self, img_path, verbose=0):
    return self.predict_filename(img_path, return_proba=True, verbose=verbose)
def predict_proba_folder(self, folder, verbose=0)
Expand source code
def predict_proba_folder(self, folder, verbose=0):
    return self.predict_folder(folder, return_proba=True, verbose=verbose)
def predict_proba_generator(self, generator, steps=None, verbose=0)
Expand source code
def predict_proba_generator(self, generator, steps=None, verbose=0):
    return self.predict_proba_generator(
        generator, steps=steps, return_proba=True, verbose=verbose
    )

Inherited members