Profiling notebook#
This notebook collects and compares the run time for several notebooks. These notebooks are specified in the metrics.py
, in the EXPERIMENT_NOTEBOOKS
variable.
Simply run the whole notebook, and the results will be displayed in tables at the end.
Each notebook listed in the EXPERIMENT_NOTEBOOKS
(see metrics.py
) table must have the run_experiment()
defined. Optionally, you can define the close_experiment()
function.
This profiler will profile the run_experiment
function, and after that’s done, call the close_experiment
(if it exists). The close_experiment
function is not mandatory, but if there are any resources that need to be closed, you can implement that here. The profiler measures the times listed in the METHODS
variable (see metrics.py
), and the total time.
After the profiling is done, the notebook generates a file in this directory for each notebook. This file contains the detailed profiling report. For the notebook <notebook>.ipynb
it generates <notebook>.ipynb.prof
file, which can be opened with snakeviz (pip install snakeviz
): snakeviz <notebook>.ipynb.prof
.
Configuration#
# `benchmark_mode` sets whether we run the schedules in benchmark mode.
# If it's benchmark mode, we override the reference measurements file
# with the current timing values, and that will be those will be the new reference values.
benchmark_mode = False
profiling_reference_filename = "profiling_reference_values.pickle"
# The end result table will display each cell in different colors.
# Each value's "sigma" is practically it's measurement error,
# and if the current time is above/below
# the `reference value±sigma*sigma_multiplier_threshold`
# the cell will be displayed in different colors.
sigma_multiplier_threshold = 2.0 # 2.0 is a reasonable value.
Loading reference data#
# Reference values for profiling.
# Each notebook has a reference timing value.
import pickle
from os.path import exists
if not benchmark_mode:
if not exists(profiling_reference_filename):
raise RuntimeError(
f"Reference file '{profiling_reference_filename}' does not exist! "
f"Make sure this file is created by first running the profiling with 'benchmark_mode=True'!"
)
with open(profiling_reference_filename, "rb") as f:
reference = pickle.load(f)
Running the profiling#
import metrics
measured_data = metrics.measure_experiment_runtimes()
measured_data
[('simple_binned_acquisition',
[(0.9946758512999999, 0.060520291938758214),
(0.1054618794, 0.0023019861591870687),
(0.2950480362000001, 0.07708127271427773),
(0.0283041775, 0.0005857374014951724),
(0.0103337558, 0.001243391736535975)],
(1.5076610159000001, 0.0752881679789585)),
('resonator_spectroscopy',
[(0.24496450730000002, 0.08676685139754789),
(0.15548121180000002, 0.0021772059857387196),
(0.062539442, 0.0009306177699387383),
(0.0027010211000000005, 0.00014065694190436276),
(0.012418523100000002, 0.000398350933049799)],
(0.6421398809, 0.09432553767909004)),
('random_gates',
[(1.030944426, 0.07697153933708932),
(0.11840048560000001, 0.0015075617320314957),
(0.33237856450000003, 0.007154834526667149),
(0.00043242370000000005, 5.157527486897152e-05),
(0.0032713507000000004, 0.00022519136410841211)],
(1.4963750570999994, 0.07880670434003852))]
if benchmark_mode:
with open(profiling_reference_filename, "wb") as f:
pickle.dump(measured_data, f)
reference = measured_data
Displaying the results#
reference
[('simple_binned_acquisition',
[(0.9926163084, 0.06192060210975775),
(0.10498222319999999, 0.0015965531111733849),
(0.29654308549999997, 0.07541338532140536),
(0.028360908400000006, 0.0005025226272929342),
(0.010576865600000002, 0.0013565539147959512)],
(1.5076823723000008, 0.073541431604032)),
('resonator_spectroscopy',
[(0.24136320129999994, 0.08250571058252025),
(0.15536156240000001, 0.0037741058575561878),
(0.061933728300000004, 0.0006777892286995014),
(0.0026206999, 4.7912470390180906e-05),
(0.012418401700000001, 0.00048759705280013274)],
(0.6349079799999999, 0.09074046088950546)),
('random_gates',
[(1.0291132077, 0.07413907143808215),
(0.1209051369, 0.0023142911883413317),
(0.3420565029, 0.004800341060046611),
(0.0004728105, 0.00010186780396597362),
(0.0033847724000000004, 0.00022984585643086612)],
(1.5066835084, 0.07132325504794586))]
import pandas as pd
import numpy as np
import metrics
table = []
header = []
table_diff = []
header_diff = []
header.append("")
header_diff.append("")
for method in metrics.METHODS:
header.append(method[0])
header_diff.append(method[0])
header.append("total")
header_diff.append("total")
for row_id, (experiment_notebook, times, total_time) in enumerate(measured_data):
row = []
row_diff = []
row.append(experiment_notebook)
row_diff.append(experiment_notebook)
for column_id, time in enumerate(times):
expected_value = time[0]
sigma = time[1]
row.append(f"{expected_value:.2g} ± {sigma:.2g} s")
time_diff = expected_value - reference[row_id][1][column_id][0]
row_diff.append(f"{time_diff:.2g} ± {sigma:.2g} s")
row.append(f"{total_time[0]:.2g} ± {total_time[1]:.2g} s")
total_time_diff = total_time[0] - reference[row_id][2][0]
row_diff.append(f"{total_time_diff:.2g} ± {total_time[1]:.2g} s")
table.append(row)
table_diff.append(row_diff)
def diff_to_style(current, ref):
green = "#d0ffd0"
red = "#ffd0d0"
val, sigma = current[0], current[1]
ref_val, ref_sigma = ref[0], ref[1]
if (val - sigma * sigma_multiplier_threshold) > (
ref_val + ref_sigma * sigma_multiplier_threshold
):
return f"background-color: {red}"
if (val + sigma * sigma_multiplier_threshold) < (
ref_val - ref_sigma * sigma_multiplier_threshold
):
return f"background-color: {green}"
return ""
style_table = []
for row_id, (experiment_notebook, times, total_time) in enumerate(measured_data):
row = []
row.append("")
for column_id, time in enumerate(times):
if row_id < len(reference) and column_id < len(reference[row_id][1]):
row.append(diff_to_style(time, reference[row_id][1][column_id]))
else:
row.append("")
if row_id < len(reference):
row.append(diff_to_style(total_time, reference[row_id][2]))
else:
row.append("")
style_table.append(row)
style_table = np.array(style_table)
style_properties = {"border": "1px solid gray"}
styles = [
dict(
selector="caption",
props=[("text-align", "center"), ("font-size", "200%"), ("color", "black")],
)
]
df = pd.DataFrame(table, columns=header)
df = df.style.set_properties(**style_properties).apply(lambda _: style_table, axis=None)
df = df.set_caption("Measured times").set_table_styles(styles)
df_diff = pd.DataFrame(table_diff, columns=header)
df_diff = df_diff.style.set_properties(**style_properties).apply(
lambda _: style_table, axis=None
)
df_diff = df_diff.set_caption("Measured diffs to reference").set_table_styles(styles)
# If the cell is green (or red), the current time
# is significantly less (or more) than the reference time.
df
compile | prepare | schedule | run | process | total | ||
---|---|---|---|---|---|---|---|
0 | simple_binned_acquisition | 0.99 ± 0.061 s | 0.11 ± 0.0023 s | 0.3 ± 0.077 s | 0.028 ± 0.00059 s | 0.01 ± 0.0012 s | 1.5 ± 0.075 s |
1 | resonator_spectroscopy | 0.24 ± 0.087 s | 0.16 ± 0.0022 s | 0.063 ± 0.00093 s | 0.0027 ± 0.00014 s | 0.012 ± 0.0004 s | 0.64 ± 0.094 s |
2 | random_gates | 1 ± 0.077 s | 0.12 ± 0.0015 s | 0.33 ± 0.0072 s | 0.00043 ± 5.2e-05 s | 0.0033 ± 0.00023 s | 1.5 ± 0.079 s |
# All data is (current_time - reference_time).
# If the cell is green (or red), the current time
# is significantly less (or more) than the reference time.
df_diff
compile | prepare | schedule | run | process | total | ||
---|---|---|---|---|---|---|---|
0 | simple_binned_acquisition | 0.0021 ± 0.061 s | 0.00048 ± 0.0023 s | -0.0015 ± 0.077 s | -5.7e-05 ± 0.00059 s | -0.00024 ± 0.0012 s | -2.1e-05 ± 0.075 s |
1 | resonator_spectroscopy | 0.0036 ± 0.087 s | 0.00012 ± 0.0022 s | 0.00061 ± 0.00093 s | 8e-05 ± 0.00014 s | 1.2e-07 ± 0.0004 s | 0.0072 ± 0.094 s |
2 | random_gates | 0.0018 ± 0.077 s | -0.0025 ± 0.0015 s | -0.0097 ± 0.0072 s | -4e-05 ± 5.2e-05 s | -0.00011 ± 0.00023 s | -0.01 ± 0.079 s |