Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make benchmark graph extensible and add more bars #1080

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
9 changes: 8 additions & 1 deletion crossbeam-channel/benchmarks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,12 @@ Default configuration:

### Running

Install python packages:

```
pip install -r requirements.txt
```

Runs benchmarks, stores results into `*.txt` files, and generates `plot.png`:

```
Expand All @@ -28,7 +34,8 @@ Dependencies:
- Go
- Bash
- Python
- Matplotlib
- plotly (python graphing library)
- kaleido (image export engine for plotly)

### Results

Expand Down
272 changes: 148 additions & 124 deletions crossbeam-channel/benchmarks/plot.py
Original file line number Diff line number Diff line change
@@ -1,148 +1,172 @@
#!/usr/bin/env python3
import random

from __future__ import annotations

import sys
import matplotlib.pyplot as plt
from dataclasses import dataclass
from math import ceil
from pathlib import Path

import plotly.graph_objects as go
from plotly.subplots import make_subplots

COLUMN_LENGTH = 2


Pre = str
Test = str
Lang = str
Impl = str
Secs = float
BenchResult = tuple[Pre, Test, Lang, Impl, Secs]


@dataclass
class Label:
name: Pre
description: str

def read_data(files):
results = []
for f in files:
with open(f) as f:
for line in f.readlines():
test, lang, impl, secs, _ = line.split()
splt = test.split('_')
results.append((splt[0], '_'.join(splt[1:]), lang, impl, float(secs)))

def read_data(files: list[str]) -> list[BenchResult]:
results: list[BenchResult] = []
for file in files:
with Path(file).open() as f:
for line in f:
temp, lang, impl, secs, _ = line.split()
pre, test = temp.split("_", maxsplit=1)
results.append(
(Pre(pre), Test(test), Lang(lang), Impl(impl), Secs(secs)),
)
return results


def get_runs(results, prefix):
runs = set()
def get_scores(results: list[BenchResult], label: Label) -> dict[str, dict[Test, Secs]]:
scores: dict[str, dict[Test, Secs]] = {}
for pre, test, lang, impl, secs in results:
if pre == prefix:
runs.add(test)
result = list(runs)
result.sort()
return result


def find(s, x):
for i in range(len(s)):
if s[i] == x:
return i
return None


color_set = {
'aqua': '#00ffff',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'black': '#000000',
'blue': '#0000ff',
'brown': '#a52a2a',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkviolet': '#9400d3',
'fuchsia': '#ff00ff',
'gold': '#ffd700',
'green': '#008000',
'indigo': '#4b0082',
'khaki': '#f0e68c',
'lightblue': '#add8e6',
'lightcyan': '#e0ffff',
'lightgreen': '#90ee90',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'magenta': '#ff00ff',
'maroon': '#800000',
'navy': '#000080',
'olive': '#808000',
'orange': '#ffa500',
'pink': '#ffc0cb',
'purple': '#800080',
'red': '#ff0000',
if pre != label.name:
continue
name = impl if lang == "Rust" else f"{impl} ({lang})"
scores.setdefault(name, {})[test] = secs
return scores


color_set: dict[str, str] = {
"aqua": "#00ffff",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"black": "#000000",
"blue": "#0000ff",
"brown": "#a52a2a",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkviolet": "#9400d3",
"fuchsia": "#ff00ff",
"gold": "#ffd700",
"green": "#008000",
"indigo": "#4b0082",
"khaki": "#f0e68c",
"lightblue": "#add8e6",
"lightcyan": "#e0ffff",
"lightgreen": "#90ee90",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"magenta": "#ff00ff",
"maroon": "#800000",
"navy": "#000080",
"olive": "#808000",
"orange": "#ffa500",
"pink": "#ffc0cb",
"purple": "#800080",
"red": "#ff0000",
}
saved_color = {}
saved_color: dict[str, tuple[str, str]] = {}


def get_color(name):
def get_color(name: str) -> str:
if name not in saved_color:
color = color_set.popitem()
saved_color[name] = color
return saved_color[name][1]


def plot(results, fig, subplot, title, prefix):
runs = get_runs(results, prefix)

ys = [len(runs) * (i + 1) for i in range(len(runs))]

ax = fig.add_subplot(subplot)
ax.set_title(title)
ax.set_yticks(ys)
ax.set_yticklabels(runs)
ax.tick_params(which='major', length=0)
ax.set_xlabel('seconds')

scores = {}

for pre, test, lang, impl, secs in results:
if pre == prefix:
name = impl if lang == 'Rust' else impl + f' ({lang})'
if name not in scores:
scores[name] = [0] * len(runs)
scores[name][find(runs, test)] = secs

opts = dict(height=0.8, align='center')
x_max = max(max(scores.values(), key=lambda x: max(x)))
for i, (name, score) in enumerate(scores.items()):
yy = [y + i - len(runs) // 2 + 0.2 for y in ys]
ax.barh(yy, score, color=get_color(name), **opts)
for xxx, yyy in zip(score, yy):
if xxx:
ax.text(min(x_max - len(name) * 0.018 * x_max, xxx), yyy - 0.25, name, fontsize=9)


def plot_all(results, descriptions, labels):
fig = plt.figure(figsize=(10, 10))
# TODO support more subplots
subplot = [221, 222, 223, 224]
for p, d, l in zip(subplot, descriptions, labels):
plot(results, fig, p, d, l)
plt.subplots_adjust(
top=0.95,
bottom=0.05,
left=0.1,
right=0.95,
wspace=0.3,
hspace=0.2,
def plot(
scores: dict[str, dict[Test, Secs]],
fig: go.Figure,
row: int,
column: int,
) -> None:
for key, value in scores.items():
tests: list[Test] = []
secs: list[Secs] = []
for inner_key, inner_value in value.items():
tests.append(inner_key)
secs.append(inner_value)
fig.add_trace(
go.Bar(
name=key,
x=secs,
y=tests,
marker_color=get_color(key),
orientation="h",
text=key,
constraintext="none",
textposition="auto",
),
row=row,
col=column,
)


def plot_all(results: list[BenchResult], labels: list[Label]) -> None:
rows = ceil(len(labels) / COLUMN_LENGTH)
titles = [i.description for i in labels]
fig = make_subplots(
rows=rows,
cols=COLUMN_LENGTH,
subplot_titles=titles,
horizontal_spacing=0.1,
vertical_spacing=0.1,
)
max_length = 0
for i, label in enumerate(labels):
row, column = divmod(i, COLUMN_LENGTH)
(row, column) = (row + 1, column + 1)
scores: dict[str, dict[Test, Secs]] = get_scores(results, label)
max_length = max(max_length, len(scores))
plot(scores, fig, row, column)
fig.update_xaxes(title_text="seconds", row=row, col=column)
fig.update_layout(
showlegend=False,
barmode="group",
width=COLUMN_LENGTH * 1024,
height=rows * max_length * 128,
)
plt.savefig('plot.png')
# plt.show()
fig.update_yaxes(categoryorder="category ascending")
fig.write_image("plot.png")


def main():
results = read_data(sys.argv[1:])
descriptions = [
'Bounded channel of capacity 0',
'Bounded channel of capacity 1',
'Bounded channel of capacity N',
'Unbounded channel',
def main() -> None:
labels: list[Label] = [
Label(Pre("bounded0"), description="Bounded channel of capacity 0"),
Label(Pre("bounded1"), description="Bounded channel of capacity 1"),
Label(Pre("bounded"), description="Bounded channel of capacity N"),
Label(Pre("unbounded"), description="Unbounded channel"),
]
labels = ['bounded0', 'bounded1', 'bounded', 'unbounded']
plot_all(results, descriptions, labels)
results: list[BenchResult] = read_data(sys.argv[1:])
plot_all(results, labels)


if __name__ == '__main__':
if __name__ == "__main__":
main()
19 changes: 7 additions & 12 deletions crossbeam-channel/benchmarks/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,13 @@ cargo run --release --bin crossbeam-channel | tee crossbeam-channel.txt
cargo run --release --bin futures-channel | tee futures-channel.txt
cargo run --release --bin mpsc | tee mpsc.txt
cargo run --release --bin flume | tee flume.txt
cargo run --release --bin atomicringqueue | tee atomicringqueue.txt
cargo run --release --bin atomicring | tee atomicring.txt
cargo run --release --bin bus | tee bus.txt
cargo run --release --bin crossbeam-deque | tee crossbeam-deque.txt
cargo run --release --bin lockfree | tee lockfree.txt
cargo run --release --bin segqueue | tee segqueue.txt
cargo run --release --bin mpmc | tee mpmc.txt
go run go.go | tee go.txt

# These can also be run, but too many plot bars mess
# up the plot (they start to overlap). So only 5 contenders
# with the most tests are included by default.

# cargo run --release --bin atomicringqueue | tee atomicringqueue.txt
# cargo run --release --bin atomicring | tee atomicring.txt
# cargo run --release --bin bus | tee bus.txt
# cargo run --release --bin crossbeam-deque | tee crossbeam-deque.txt
# cargo run --release --bin lockfree | tee lockfree.txt
# cargo run --release --bin segqueue | tee segqueue.txt
# cargo run --release --bin mpmc | tee mpmc.txt

./plot.py ./*.txt