Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updated LIF and IAF benchmarks #341

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
107 changes: 11 additions & 96 deletions norse/benchmark/main.py → norse/benchmark/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import logging
from pathlib import Path
from typing import Callable
import tqdm

import numpy as np

Expand All @@ -15,6 +16,8 @@

# pytype: disable=import-error
from .benchmark import BenchmarkConfig, BenchmarkData, BenchmarkParameters
from .lif_box import main as lif_box_main
from .lif import main as lif_main

# pytype: enable=import-error

Expand All @@ -28,7 +31,9 @@ def benchmark(
Benchmarks a model with the given configurations
"""
results = []
for features in range(config.start, config.stop, config.step):
for features in tqdm.tqdm(
range(config.start, config.stop, config.step), desc=f"{config.label} - Features"
):
parameters = BenchmarkParameters(
device=config.device,
dt=config.dt,
Expand All @@ -39,7 +44,7 @@ def benchmark(

durations = []
try:
for _ in range(config.runs):
for _ in tqdm.tqdm(range(config.runs), desc="Runs", leave=False):
duration = model(parameters)
durations.append(duration)
# Clean up by GC and empty cache
Expand Down Expand Up @@ -86,43 +91,6 @@ def collect(data: BenchmarkData, label: str) -> dict:
}


def main(args):
# pytype: disable=import-error
if args.bindsnet:
import bindsnet_lif

run_benchmark(
args, bindsnet_lif.lif_feed_forward_benchmark, label="BindsNET_lif"
)
if args.genn:
import genn_lif

run_benchmark(args, genn_lif.lif_feed_forward_benchmark, label="GeNN_lif")
if args.norse:
import norse
from . import norse_lif

if args.profile:
import torch.autograd.profiler as profiler

with profiler.profile(
profile_memory=True, use_cuda=(args.device == "cuda")
) as prof:
run_benchmark(
args,
norse_lif.lif_feed_forward_benchmark,
label=f"Norse v{norse.__version__} lif",
)
prof.export_chrome_trace("trace.json")
else:
run_benchmark(
args,
norse_lif.lif_feed_forward_benchmark,
label=f"Norse v{norse.__version__} lif",
)
# pytype: enable=import-error


def run_benchmark(args, function, label):
config = BenchmarkConfig(
batch_size=args.batch_size,
Expand All @@ -147,61 +115,8 @@ def run_benchmark(args, function, label):

if __name__ == "__main__":
parser = ArgumentParser("SNN library benchmarks")
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="Number of data points per batch",
)
parser.add_argument(
"--start", type=int, default=250, help="Start of the number of inputs to sweep"
)
parser.add_argument(
"--step",
type=int,
default=250,
help="Steps in which to sweep over the number of inputs",
)
parser.add_argument(
"--stop", type=int, default=5001, help="Number of inputs to sweep to"
)
parser.add_argument(
"--sequence_length",
type=int,
default=1000,
help="Number of timesteps to simulate",
)
parser.add_argument("--dt", type=float, default=0.001, help="Simulation timestep")
parser.add_argument(
"--device",
type=str,
default="cuda",
choices=["cuda", "cpu"],
help="Device to use [cpu, cuda]",
)
parser.add_argument(
"--runs", type=int, default=5, help="Number of runs per simulation step"
)
parser.add_argument(
"--profile",
default=False,
action="store_true",
help="Profile Norse benchmark? (Only works for Norse)",
)
parser.add_argument(
"--bindsnet",
default=False,
action="store_true",
help="Benchmark Bindsnet?",
)
parser.add_argument(
"--genn", default=False, action="store_true", help="Benchmark GeNN?"
)
parser.add_argument(
"--norse",
default=False,
action="store_true",
help="Benchmark Norse?",
)
subparsers = parser.add_subparsers(help="Task types", required=True)
lif_main.init_parser(subparsers.add_parser("lif"))
lif_box_main.init_parser(subparsers.add_parser("lif_box"))
args = parser.parse_args()
main(args)
args.func(args, run_benchmark)
10 changes: 5 additions & 5 deletions norse/benchmark/docker/Dockerfile.bindsnet
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM nvidia/cuda:11.4.2-devel-ubuntu20.04
FROM nvidia/cuda:11.7.0-devel-ubuntu20.04

# Non-interactive
ENV DEBIAN_FRONTEND noninteractive
Expand All @@ -10,8 +10,8 @@ RUN apt update && apt install -y \
ENV CUDA_PATH=/usr/local/cuda
ENV PATH=$PATH:$CUDA_PATH/bin

# Install bindsnet
RUN pip install git+https://github.com/BindsNET/bindsnet
# Install torch
RUN pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116

# Install correct version of torch
RUN pip3 install torch==1.9.1+cu111 torchvision==0.10.1+cu111 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
# Install bindsnet
RUN pip install git+https://github.com/BindsNET/bindsnet
4 changes: 2 additions & 2 deletions norse/benchmark/docker/Dockerfile.genn
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM nvidia/cuda:11.4.2-devel-ubuntu20.04
FROM nvidia/cuda:11.7.0-devel-ubuntu20.04

RUN apt update && apt install -y \
git python3-pip swig
Expand All @@ -15,6 +15,6 @@ ENV PATH=$PATH:/genn/bin
# Install PyGeNN
RUN env
RUN make DYNAMIC=1 LIBRARY_DIRECTORY=/genn/pygenn/genn_wrapper -j 4
RUN pip3 install numpy pandas matplotlib
RUN pip3 install numpy pandas matplotlib tqdm
RUN python3 setup.py develop

13 changes: 13 additions & 0 deletions norse/benchmark/docker/Dockerfile.lava
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
FROM nvidia/cuda:11.7.0-devel-ubuntu20.04

RUN apt update && apt install -y \
git python3-pip

# Install torch
RUN pip3 install --upgrade pip
RUN pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116

# Install lava-dl
RUN pip install poetry pandas tqdm
RUN git clone https://github.com/lava-nc/lava-dl
RUN cd lava-dl && poetry install && pip3 install -e .
10 changes: 10 additions & 0 deletions norse/benchmark/docker/Dockerfile.nengo
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
FROM nvidia/cuda:11.7.0-devel-ubuntu20.04

RUN apt update && apt install -y \
python3-pip

# Install tensorflow
RUN pip3 install tensorflow torch

# Install nengo
RUN pip install nengo-dl pandas
9 changes: 5 additions & 4 deletions norse/benchmark/docker/Dockerfile.norse
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
FROM nvidia/cuda:11.4.2-devel-ubuntu20.04
FROM nvidia/cuda:11.7.0-devel-ubuntu20.04

RUN apt update && apt install -y \
git python3-pip

# Install torch
RUN pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116

# Install norse
RUN pip install --upgrade pip
RUN pip install pandas
RUN pip install git+https://github.com/norse/norse

# Install correct version of torch
RUN pip3 install torch==1.9.1+cu111 torchvision==0.10.1+cu111 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
11 changes: 11 additions & 0 deletions norse/benchmark/docker/Dockerfile.rockpool
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
FROM nvidia/cuda:11.7.0-devel-ubuntu20.04

RUN apt update && apt install -y \
git python3-pip

# Install torch
RUN pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116

# Install rockpool
RUN pip install --upgrade pip
RUN pip install pandas rockpool
11 changes: 11 additions & 0 deletions norse/benchmark/docker/Dockerfile.spikingjelly
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
FROM nvidia/cuda:11.7.0-devel-ubuntu20.04

RUN apt update && apt install -y \
git python3-pip

# Install torch
RUN pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116

# Install spiking jelly
RUN pip install --upgrade pip
RUN pip install pandas spikingjelly
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from bindsnet.encoding import poisson
from bindsnet.network.monitors import Monitor

from benchmark import BenchmarkParameters
from ..benchmark import BenchmarkParameters

# pytype: enable=import-error

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from pygenn.genn_model import GeNNModel, init_var
from pygenn.genn_wrapper import NO_DELAY

from benchmark import BenchmarkParameters
from norse.benchmark.benchmark import BenchmarkParameters


def lif_feed_forward_benchmark(parameters: BenchmarkParameters):
Expand Down
59 changes: 59 additions & 0 deletions norse/benchmark/lif/lava_lif.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import time
import torch
import traceback

from lava.lib.dl.slayer.neuron import cuba
from norse.torch.module.encode import PoissonEncoder

# pytype: disable=import-error
from ..benchmark import BenchmarkParameters

# pytype: enable=import-error


class LIFBenchmark(torch.nn.Module):
def __init__(self, parameters):
super().__init__()
self.fc = torch.nn.Linear(parameters.features, parameters.features, bias=False)
self.neuron = cuba.Neuron(
threshold=1.0,
current_decay=0.9,
voltage_decay=0.9,
persistent_state=False,
shared_param=False,
)

def forward(self, input_spikes: torch.Tensor):
x = self.fc(input_spikes)
x = x.permute(1, 2, 0) # BNT
return self.neuron(x)


def lif_feed_forward_benchmark(parameters: BenchmarkParameters):
with torch.no_grad():
model = LIFBenchmark(parameters).to(parameters.device)
input_sequence = torch.randn(
parameters.sequence_length,
parameters.batch_size,
parameters.features,
device=parameters.device,
)

# Warmup model
for _ in range(2):
_ = model(input_sequence)

# Set real data
poisson_data = PoissonEncoder(parameters.sequence_length, dt=parameters.dt)(
0.6
* torch.ones(
parameters.batch_size, parameters.features, device=parameters.device
)
).contiguous()

# Start recording
start = time.time()
model(poisson_data)
end = time.time()
duration = end - start
return duration