Module ktrain.text.ner.anago.callbacks
Custom callbacks.
Expand source code
"""
Custom callbacks.
"""
from ....imports import *
from .. import metrics
class F1score(keras.callbacks.Callback):
def __init__(self, seq, preprocessor=None):
super(F1score, self).__init__()
self.seq = seq
self.p = preprocessor
def get_lengths(self, y_true):
lengths = []
for y in np.argmax(y_true, -1):
try:
i = list(y).index(0)
except ValueError:
i = len(y)
lengths.append(i)
return lengths
def on_epoch_end(self, epoch, logs={}):
label_true = []
label_pred = []
for i in range(len(self.seq)):
x_true, y_true = self.seq[i]
lengths = self.get_lengths(y_true)
y_pred = self.model.predict_on_batch(x_true)
y_true = self.p.inverse_transform(y_true, lengths)
y_pred = self.p.inverse_transform(y_pred, lengths)
label_true.extend(y_true)
label_pred.extend(y_pred)
score = metrics.f1_score(label_true, label_pred)
print(" - f1: {:04.2f}".format(score * 100))
print(metrics.classification_report(label_true, label_pred))
logs["f1"] = score
Classes
class F1score (seq, preprocessor=None)
-
Abstract base class used to build new callbacks.
Callbacks can be passed to keras methods such as
fit
,evaluate
, andpredict
in order to hook into the various stages of the model training and inference lifecycle.To create a custom callback, subclass
keras.callbacks.Callback
and override the method associated with the stage of interest. See https://www.tensorflow.org/guide/keras/custom_callback for more information.Example:
>>> training_finished = False >>> class MyCallback(tf.keras.callbacks.Callback): ... def on_train_end(self, logs=None): ... global training_finished ... training_finished = True >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(1, input_shape=(1,))]) >>> model.compile(loss='mean_squared_error') >>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]), ... callbacks=[MyCallback()]) >>> assert training_finished == True
If you want to use
Callback
objects in a custom training loop:- You should pack all your callbacks into a single
callbacks.CallbackList
so they can all be called together. - You will need to manually call all the
on_*
methods at the appropriate locations in your loop. Like this:
Example:
callbacks = tf.keras.callbacks.CallbackList([...]) callbacks.append(...) callbacks.on_train_begin(...) for epoch in range(EPOCHS): callbacks.on_epoch_begin(epoch) for i, data in dataset.enumerate(): callbacks.on_train_batch_begin(i) batch_logs = model.train_step(data) callbacks.on_train_batch_end(i, batch_logs) epoch_logs = ... callbacks.on_epoch_end(epoch, epoch_logs) final_logs=... callbacks.on_train_end(final_logs)
Attributes
params
- Dict. Training parameters (eg. verbosity, batch size, number of epochs…).
model
- Instance of
keras.models.Model
. Reference of the model being trained.
The
logs
dictionary that callback methods take as argument will contain keys for quantities relevant to the current batch or epoch (see method-specific docstrings).Expand source code
class F1score(keras.callbacks.Callback): def __init__(self, seq, preprocessor=None): super(F1score, self).__init__() self.seq = seq self.p = preprocessor def get_lengths(self, y_true): lengths = [] for y in np.argmax(y_true, -1): try: i = list(y).index(0) except ValueError: i = len(y) lengths.append(i) return lengths def on_epoch_end(self, epoch, logs={}): label_true = [] label_pred = [] for i in range(len(self.seq)): x_true, y_true = self.seq[i] lengths = self.get_lengths(y_true) y_pred = self.model.predict_on_batch(x_true) y_true = self.p.inverse_transform(y_true, lengths) y_pred = self.p.inverse_transform(y_pred, lengths) label_true.extend(y_true) label_pred.extend(y_pred) score = metrics.f1_score(label_true, label_pred) print(" - f1: {:04.2f}".format(score * 100)) print(metrics.classification_report(label_true, label_pred)) logs["f1"] = score
Ancestors
- keras.callbacks.Callback
Methods
def get_lengths(self, y_true)
-
Expand source code
def get_lengths(self, y_true): lengths = [] for y in np.argmax(y_true, -1): try: i = list(y).index(0) except ValueError: i = len(y) lengths.append(i) return lengths
def on_epoch_end(self, epoch, logs={})
-
Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only be called during TRAIN mode.
Args
epoch
- Integer, index of epoch.
logs
- Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result
keys are prefixed with
val_
. For training epoch, the values of theModel
's metrics are returned. Example:{'loss': 0.2, 'accuracy': 0.7}
.
Expand source code
def on_epoch_end(self, epoch, logs={}): label_true = [] label_pred = [] for i in range(len(self.seq)): x_true, y_true = self.seq[i] lengths = self.get_lengths(y_true) y_pred = self.model.predict_on_batch(x_true) y_true = self.p.inverse_transform(y_true, lengths) y_pred = self.p.inverse_transform(y_pred, lengths) label_true.extend(y_true) label_pred.extend(y_pred) score = metrics.f1_score(label_true, label_pred) print(" - f1: {:04.2f}".format(score * 100)) print(metrics.classification_report(label_true, label_pred)) logs["f1"] = score
- You should pack all your callbacks into a single