Deploy AI apps for free on Ploomber Cloud!

Experiment tracking#

Experiment#

class sklearn_evaluation.tracker.Experiment(tracker, uuid, data)#

An experiment instance used to log values

comment(comment)#

Add a comment to an experiment

Examples

>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker("experiments.db")
>>> exp = tracker.new_experiment()
>>> exp.comment("some comment") # add comment at runtime
>>> retrieved = tracker.get(exp.uuid)
>>> retrieved.comment("another commment")
log(key, obj)#

Log a value. Any JSON-serializable object works

Examples

>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker("experiments.db")
>>> exp = tracker.new_experiment()
>>> exp.log("accuracy", 0.8)
0.8
>>> data = tracker.get(exp.uuid)
>>> data['accuracy']
0.8
log_classification_report(y_true, y_pred, *, target_names=None, sample_weight=None, zero_division=0)#

Log classification report

Examples

>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker("experiments.db")
>>> exp = tracker.new_experiment()
>>> exp.log_classification_report([1, 1, 0, 0], [1, 0, 1, 0]) 
>>> data = tracker.get(exp.uuid)
>>> data['classification_report'] 
log_confusion_matrix(y_true, y_pred, target_names=None, normalize=False)#

Log a confusion matrix

Examples

>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker("experiments.db")
>>> exp = tracker.new_experiment()
>>> exp.log_confusion_matrix([1, 1, 0, 0], [1, 0, 1, 0]) 
>>> data = tracker.get(exp.uuid)
>>> data['confusion_matrix'] 
log_dict(obj)#

Log a dictionary with values

Examples

>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker("experiments.db")
>>> exp = tracker.new_experiment()
>>> exp.log_dict({"precision": 0.9, "recall": 0.7})
{'precision': 0.9, 'recall': 0.7}
>>> data = tracker.get(exp.uuid)
>>> data['precision']
0.9
>>> data['recall']
0.7
log_figure(key, fig)#

Log a matplotlib figure

>>> import matplotlib.pyplot as plt
>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker("experiments.db")
>>> fig, ax = plt.subplots()
>>> ax.scatter([1, 2, 3], [1, 2, 3]) 
>>> exp = tracker.new_experiment()
>>> exp.log_figure("scatter", fig)
>>> data = tracker.get(exp.uuid)
>>> data['scatter'] 

SQLiteTracker#

class sklearn_evaluation.SQLiteTracker(path: str)#

A experiment tracker backed by a SQLite database

Click here to see the user guide.

Parameters

path – Database location

Examples

>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker("experiments.db")
>>> experiment = tracker.new_experiment() # new experiment
>>> experiment.log("accuracy", 0.8) # log metric
0.8
>>> tracker.get(experiment.uuid) # retrieve it later with the uuid
Experiment({'accuracy': 0.8})
>>> experiment.log_confusion_matrix([1, 1, 0, 0], [1, 0, 1, 0]) 
>>> data = tracker.get(experiment.uuid)
>>> data['confusion_matrix'] 
comment(uuid, comment)#

Add a comment to an experiment given its uuid

get(uuid, unserialize_plots=True)#

Get an experiment given its UUID

Examples

>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker("experiments.db")
>>> experiment = tracker.new_experiment() # new experiment
>>> experiment.log("accuracy", 0.8) # log metric
0.8
>>> experiment = tracker.get(experiment.uuid) # retrieve it with the uuid
>>> experiment
Experiment({'accuracy': 0.8})
>>> experiment.comment("best model")
get_parameters_keys(limit=100)#

Return the keys in the parameters column by randomly sampling records and obtaining the keys of the JSON objects

insert(uuid, parameters)#

Insert a new experiment

insert_many(parameters_all)#

Insert many experiments at once

new()#

Create a new experiment, returns a uuid

new_experiment()#

Returns an experiment instance

query(code, as_frame=True, render_plots=False)#

Query the database

Parameters
  • code (str) – The SQL query to execute

  • as_frame (bool, default=True) – If True, it’ll return the results of your query in a pandas.DataFrame, otherwise it’ll return a Results object. The Results object can render HTML stored in the database but cannot be filtered or manipulated like a pandas.DataFrame

  • render_plots (bool, default=False) – Whether to render plots in the results or not. Only valid when as_frame=False

Examples

>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker('experiments.db')
>>> exp1 = tracker.new_experiment()
>>> exp1.log("accuracy", 0.8) 
>>> exp1.log_confusion_matrix([1, 1, 0, 0], [1, 0, 1, 0]) 
>>> exp2 = tracker.new_experiment()
>>> exp2.log("accuracy", 1.0) 
>>> exp2.log_confusion_matrix([1, 1, 0, 0], [1, 1, 0, 0]) 
>>> df = tracker.query('''
... SELECT uuid,
...        json_extract(parameters, '$.accuracy') AS accuracy,
...        json_extract(parameters, '$.confusion_matrix') AS cm
... FROM experiments
... ''', as_frame=True)
>>> results = tracker.query('''
... SELECT uuid,
...        json_extract(parameters, '$.accuracy') AS accuracy,
...        json_extract(parameters, '$.confusion_matrix') AS cm
... FROM experiments
... ''', as_frame=False, render_plots=True)
recent(n=5, normalize=False)#

Get most recent experiments as a pandas.DataFrame

update(uuid, parameters, allow_overwrite=False)#

Update the parameters of a experiment given its uuid

upsert(uuid, parameters)#

Modify the stored parameters of an existing experiment

upsert_append(uuid, parameters)#

Append the parameters to an existing experiment

Examples

>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker('experiments.db')
>>> exp = tracker.new_experiment()
>>> #Log initial metric_a values for the experiment
>>> exp.log("metric_a", [0.8, 0.85]) 
metric_a: [0.8, 0.85]
>>> #Append new "metric_a" values and adding "metric_b" values
>>> tracker.upsert_append(
...        exp.uuid,
...        dict(
...            metric_a=0.9,
...            metric_b=[0.4, 0.2],)
... )

metric_a: [0.8, 0.85, 0.9] metric_b: [0.4, 0.2]