Skip to content

Commit

Permalink
Plot dashboard stress tool for Python & Rust (#4599)
Browse files Browse the repository at this point in the history
Adds stress tools that simulate end-to-end plot dashboard workloads in
order to gauge the cost of each step in the way.

`just py-plot-dashboard --help` gives a pretty good idea of what you're
in for:
```
usage: main.py [-h] [--headless] [--connect] [--serve] [--addr ADDR] [--save SAVE] [-o] [--num-plots NUM_PLOTS] [--num-series-per-plot NUM_SERIES_PER_PLOT] [--num-points-per-series NUM_POINTS_PER_SERIES] [--freq FREQ] [--order ORDER]

Plot dashboard stress test

options:
  -h, --help            show this help message and exit
  --headless            Don't show GUI
  --connect             Connect to an external viewer
  --serve               Serve a web viewer (WARNING: experimental feature)
  --addr ADDR           Connect to this ip:port
  --save SAVE           Save data to a .rrd file at this path
  -o, --stdout          Log data to standard output, to be piped into a Rerun Viewer
  --num-plots NUM_PLOTS
                        How many different plots?
  --num-series-per-plot NUM_SERIES_PER_PLOT
                        How many series in each single plot?
  --num-points-per-series NUM_POINTS_PER_SERIES
                        How many points in each single series?
  --freq FREQ           Frequency of logging (applies to all series)
  --order ORDER         What order to log the data in (applies to all series)
```

C++ comes in the next PR because it's gonna spawn heated discussions, as
C++ does.

## Example

- 10 plots
- 5 series per plot
- 5000 points per series
- log 1000 points per series per second

### Python

Suffering heavily:
```
$ just py-plot-dashboard --num-plots 10 --num-series-per-plot 5 --num-points-per-series 5000 --freq 1000
logged 30700 scalars over 1.001s (freq=30661.263Hz, expected=50000.0Hz, load=289.202%)
logged 30700 scalars over 1.001s (freq=30673.738Hz, expected=50000.0Hz, load=173.354%)
logged 30000 scalars over 1.001s (freq=29984.101Hz, expected=50000.0Hz, load=196.29%)
logged 28900 scalars over 1.002s (freq=28856.395Hz, expected=50000.0Hz, load=195.074%)
logged 28400 scalars over 1.001s (freq=28367.522Hz, expected=50000.0Hz, load=198.889%)
logged 28400 scalars over 1.001s (freq=28359.02Hz, expected=50000.0Hz, load=199.151%)
logged 27100 scalars over 1.001s (freq=27076.734Hz, expected=50000.0Hz, load=290.751%)
logged 24400 scalars over 1.001s (freq=24379.348Hz, expected=50000.0Hz, load=291.467%)
logged 21400 scalars over 0.857s (freq=24958.854Hz, expected=50000.0Hz, load=288.01%)
```

### Rust

Casually breezin' through:
```
$ just rs-plot-dashboard --num-plots 10 --num-series-per-plot 5 --num-points-per-series 5000 --freq 1000
logged 50050 scalars over 1.000277763s (freq=50036.102Hz, expected=50000.000Hz, load=31.367%)
logged 50000 scalars over 1.000359198s (freq=49982.047Hz, expected=50000.000Hz, load=31.741%)
logged 50000 scalars over 1.000592801s (freq=49970.378Hz, expected=50000.000Hz, load=33.446%)
logged 50000 scalars over 1.000630557s (freq=49968.492Hz, expected=50000.000Hz, load=46.129%)
logged 49950 scalars over 1.000663673s (freq=49916.872Hz, expected=50000.000Hz, load=34.742%)
```
  • Loading branch information
teh-cmc authored Dec 21, 2023
1 parent acf0e78 commit 2516995
Show file tree
Hide file tree
Showing 7 changed files with 319 additions and 1 deletion.
10 changes: 10 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@
resolver = "2"
members = [
"crates/*",
"examples/rust/*",
"docs/code-examples",
"examples/rust/*",
"rerun_py",
"run_wasm",
"tests/rust/log_benchmark",
"tests/rust/plot_dashboard_stress",
"tests/rust/roundtrips/*",
"tests/rust/test_*",
]
Expand Down
5 changes: 5 additions & 0 deletions justfile
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,9 @@ py-bench *ARGS:
py-docs-serve:
mkdocs serve -f rerun_py/mkdocs.yml -w rerun_py

py-plot-dashboard *ARGS:
pixi run py-plot-dashboard {{ARGS}}

### Rust

# Generate and open the documentation for Rerun and all of its Rust dependencies.
Expand Down Expand Up @@ -198,6 +201,8 @@ rs-run-all *ARGS:
set -euo pipefail
find examples/rust/ -name main.rs | xargs -I _ sh -c 'cd $(dirname _) && echo $(pwd) && cargo r'
rs-plot-dashboard *ARGS:
pixi run rs-plot-dashboard {{ARGS}}

### TOML

Expand Down
3 changes: 3 additions & 0 deletions pixi.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ py-build = "maturin develop --manifest-path rerun_py/Cargo.toml --extras=tests"
py-test = { cmd = "python -m pytest -vv rerun_py/tests/unit", depends_on = [
"py-build",
] }
py-plot-dashboard = { cmd = "python tests/python/plot_dashboard_stress/main.py" }

rs-plot-dashboard = { cmd = "cargo r -p plot_dashboard_stress --release --" }

# All the cpp-* tasks can be configured with environment variables, e.g.: RERUN_WERROR=ON CXX=clang++
cpp-prepare-release = "cmake -G 'Ninja' -B build/release -S . -DCMAKE_BUILD_TYPE=Release"
Expand Down
129 changes: 129 additions & 0 deletions tests/python/plot_dashboard_stress/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
#!/usr/bin/env python3
"""
Plot dashboard stress test.
Usage:
-----
```
just py-plot-dashboard --help
```
Example:
-------
```
just py-plot-dashboard --num-plots 10 --num-series-per-plot 5 --num-points-per-series 5000 --freq 1000
```
"""
from __future__ import annotations

import argparse
import math
import random
import time

import numpy as np
import rerun as rr # pip install rerun-sdk

parser = argparse.ArgumentParser(description="Plot dashboard stress test")
rr.script_add_args(parser)

parser.add_argument("--num-plots", type=int, default=1, help="How many different plots?")
parser.add_argument("--num-series-per-plot", type=int, default=1, help="How many series in each single plot?")
parser.add_argument("--num-points-per-series", type=int, default=100000, help="How many points in each single series?")
parser.add_argument("--freq", type=float, default=1000, help="Frequency of logging (applies to all series)")

order = [
"forwards",
"backwards",
"random",
]
parser.add_argument(
"--order", type=str, default="forwards", help="What order to log the data in (applies to all series)"
)

# TODO(cmc): could have flags to add attributes (color, radius...) to put some more stress
# on the line fragmenter.

args = parser.parse_args()


def main() -> None:
rr.script_setup(args, "rerun_example_plot_dashboard_stress")

plot_paths = [f"plot_{i}" for i in range(0, args.num_plots)]
series_paths = [f"series_{i}" for i in range(0, args.num_series_per_plot)]

num_series = len(plot_paths) * len(series_paths)
time_per_tick = 1.0 / args.freq
expected_total_freq = args.freq * num_series

if args.order == "forwards":
sim_times = np.arange(args.num_points_per_series)
elif args.order == "backwards":
sim_times = np.arange(args.num_points_per_series)[::-1]
else:
sim_times = np.random.randint(0, args.num_points_per_series)

total_start_time = time.time()
total_num_scalars = 0

tick_start_time = time.time()
max_load = 0.0

for sim_time in sim_times:
rr.set_time_seconds("sim_time", sim_time)

# Log

for plot_path in plot_paths:
for series_path in series_paths:
value = math.sin(random.uniform(0.0, math.pi))
rr.log(f"{plot_path}/{series_path}", rr.TimeSeriesScalar(value))

# Progress report

total_num_scalars += num_series
total_elapsed = time.time() - total_start_time
if total_elapsed >= 1.0:
print(
f"logged {total_num_scalars} scalars over {round(total_elapsed, 3)}s \
(freq={round(total_num_scalars/total_elapsed, 3)}Hz, expected={round(expected_total_freq, 3)}Hz, \
load={round(max_load * 100.0, 3)}%)"
)

elapsed_debt = total_elapsed % 1 # just keep the fractional part
total_start_time = time.time() - elapsed_debt
total_num_scalars = 0
max_load = 0.0

# Throttle

elapsed = time.time() - tick_start_time
sleep_duration = time_per_tick - elapsed
if sleep_duration > 0.0:
sleep_start_time = time.time()
time.sleep(sleep_duration)
sleep_elapsed = time.time() - sleep_start_time

# We will very likely be put to sleep for more than we asked for, and therefore need
# to pay off that debt in order to meet our frequency goal.
sleep_debt = sleep_elapsed - sleep_duration
tick_start_time = time.time() - sleep_debt
else:
tick_start_time = time.time()

max_load = max(max_load, elapsed / time_per_tick)

total_elapsed = time.time() - total_start_time
print(
f"logged {total_num_scalars} scalars over {round(total_elapsed, 3)}s \
(freq={round(total_num_scalars/total_elapsed, 3)}Hz, expected={round(expected_total_freq, 3)}Hz, \
load={round(max_load * 100.0, 3)}%)"
)

rr.script_teardown(args)


if __name__ == "__main__":
main()
14 changes: 14 additions & 0 deletions tests/rust/plot_dashboard_stress/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
[package]
name = "plot_dashboard_stress"
version = "0.12.0-alpha.1+dev"
edition = "2021"
rust-version = "1.72"
license = "MIT OR Apache-2.0"
publish = false

[dependencies]
rerun = { path = "../../../crates/rerun" }

anyhow = "1.0"
clap = { version = "4.0", features = ["derive"] }
rand = "0.8"
156 changes: 156 additions & 0 deletions tests/rust/plot_dashboard_stress/src/main.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
//! Plot dashboard stress test.
//!
//! Usage:
//! ```text
//! just rs-plot-dashboard --help
//! ```
//!
//! Example:
//! ```text
//! just rs-plot-dashboard --num-plots 10 --num-series-per-plot 5 --num-points-per-series 5000 --freq 1000
//! ```
use rerun::external::re_log;

#[derive(Debug, clap::ValueEnum, Clone)]
enum Order {
Forwards,
Backwards,
Random,
}

// TODO(cmc): could have flags to add attributes (color, radius...) to put some more stress
// on the line fragmenter.
#[derive(Debug, clap::Parser)]
#[clap(author, version, about)]
struct Args {
#[command(flatten)]
rerun: rerun::clap::RerunArgs,

/// How many different plots?
#[clap(long, default_value = "1")]
num_plots: u64,

/// How many series in each single plot?
#[clap(long, default_value = "1")]
num_series_per_plot: u64,

/// How many points in each single series?
#[clap(long, default_value = "10000")]
num_points_per_series: u64,

/// Frequency of logging (applies to all series).
#[clap(long, default_value = "1000.0")]
freq: f64,

/// What order to log the data in (applies to all series)
#[clap(long, value_enum, default_value = "forwards")]
order: Order,
}

fn main() -> anyhow::Result<()> {
re_log::setup_native_logging();

use clap::Parser as _;
let args = Args::parse();

let (rec, _serve_guard) = args.rerun.init("rerun_example_plot_dashboard_stress")?;
run(&rec, &args)
}

fn run(rec: &rerun::RecordingStream, args: &Args) -> anyhow::Result<()> {
let plot_paths: Vec<_> = (0..args.num_plots).map(|i| format!("plot_{i}")).collect();
let series_paths: Vec<_> = (0..args.num_series_per_plot)
.map(|i| format!("series_{i}"))
.collect();

let num_series = args.num_plots * args.num_series_per_plot;
let time_per_tick = 1.0 / args.freq;
let expected_total_freq = args.freq * num_series as f64;

use rand::Rng as _;
let mut rng = rand::thread_rng();
let uniform_pi = rand::distributions::Uniform::new(0f64, std::f64::consts::PI);

let sim_times: Vec<i64> = match args.order {
Order::Forwards => (0..args.num_points_per_series as i64).collect(),
Order::Backwards => (0..args.num_points_per_series as i64).rev().collect(),
Order::Random => {
use rand::seq::SliceRandom as _;
let mut sim_times: Vec<i64> = (0..args.num_points_per_series as i64).collect();
sim_times.shuffle(&mut rng);
sim_times
}
};

let mut total_num_scalars = 0;
let mut total_start_time = std::time::Instant::now();
let mut max_load = 0.0;

let mut tick_start_time = std::time::Instant::now();

#[allow(clippy::unchecked_duration_subtraction)]
for sim_time in sim_times {
rec.set_time_sequence("sim_time", sim_time);

// Log

for plot_path in &plot_paths {
for series_path in &series_paths {
let value = rng.sample(uniform_pi).sin();
rec.log(
format!("{plot_path}/{series_path}"),
&rerun::TimeSeriesScalar::new(value),
)?;
}
}

// Progress report

total_num_scalars += num_series;
let total_elapsed = total_start_time.elapsed();
if total_elapsed.as_secs_f64() >= 1.0 {
println!(
"logged {total_num_scalars} scalars over {:?} (freq={:.3}Hz, expected={expected_total_freq:.3}Hz, load={:.3}%)",
total_elapsed,
total_num_scalars as f64 / total_elapsed.as_secs_f64(),
max_load * 100.0,
);

let elapsed_debt =
std::time::Duration::from_secs_f64(total_elapsed.as_secs_f64().fract());
total_start_time = std::time::Instant::now() - elapsed_debt;
total_num_scalars = 0;
max_load = 0.0;
}

// Throttle

let elapsed = tick_start_time.elapsed();
let sleep_duration = time_per_tick - elapsed.as_secs_f64();
if sleep_duration > 0.0 {
let sleep_duration = std::time::Duration::from_secs_f64(sleep_duration);
let sleep_start_time = std::time::Instant::now();
std::thread::sleep(sleep_duration);

// We will very likely be put to sleep for more than we asked for, and therefore need
// to pay off that debt in order to meet our frequency goal.
let sleep_debt = sleep_start_time.elapsed() - sleep_duration;
tick_start_time = std::time::Instant::now() - sleep_debt;
} else {
tick_start_time = std::time::Instant::now();
}

max_load = f64::max(max_load, elapsed.as_secs_f64() / time_per_tick);
}

let total_elapsed = total_start_time.elapsed();
println!(
"logged {total_num_scalars} scalars over {:?} (freq={:.3}Hz, expected={expected_total_freq:.3}Hz, load={:.3}%)",
total_elapsed,
total_num_scalars as f64 / total_elapsed.as_secs_f64(),
max_load * 100.0,
);

Ok(())
}

0 comments on commit 2516995

Please sign in to comment.