diff options
| author | Raphael Dichler <raphael@dichler.com> | 2025-05-11 22:30:59 +0200 |
|---|---|---|
| committer | Raphael Dichler <raphael@dichler.com> | 2025-05-11 22:30:59 +0200 |
| commit | b8c6be36f890f9326dc96c155fb6fc0c8f1db2f2 (patch) | |
| tree | 3ffb0e0537143494c2614329fc0a0e0b17d70317 /archive | |
| parent | 6a4c54b0c2a348526138063efdb75b8bd775d17c (diff) | |
| download | research-work-archive-artifacts-b8c6be36f890f9326dc96c155fb6fc0c8f1db2f2.tar.gz research-work-archive-artifacts-b8c6be36f890f9326dc96c155fb6fc0c8f1db2f2.zip | |
fix experiment discovery
Diffstat (limited to 'archive')
| -rw-r--r-- | archive/2024/winter/bsc_dichler/README.md | 30 | ||||
| -rw-r--r-- | archive/2024/winter/bsc_dichler/scripts/experiments.py | 68 | ||||
| -rwxr-xr-x[-rw-r--r--] | archive/2024/winter/bsc_dichler/scripts/main.py | 86 | ||||
| -rw-r--r-- | archive/2024/winter/bsc_dichler/scripts/plot/__init__.py | 0 |
4 files changed, 125 insertions, 59 deletions
diff --git a/archive/2024/winter/bsc_dichler/README.md b/archive/2024/winter/bsc_dichler/README.md index deb8718e3..91867bde2 100644 --- a/archive/2024/winter/bsc_dichler/README.md +++ b/archive/2024/winter/bsc_dichler/README.md @@ -61,23 +61,13 @@ The structure of the experiment follows this pattern: ## Adding Additional Experiments -To add a custom experiment called `my-experiment`, follow these steps. Assume that the standard `--base` directory (`/experiments/`) is used. - -1. Create a new directory: - ```bash - cd <root-of-repo>/experiments/ - mkdir my-experiment - ``` - -2. Add a Makefile inside the directory (this will be called to perform any necessary work). For example, use `make` to compile and run your experiment. - -3. Add a Python script to evaluate the results of the benchmark. - -4. Add the name of your experiment to the literals specified in this file: - ```python - Experiments = Literal[ ... "my-experiment" ] - ``` - -5. To add your analysis step, include `"my-experiment"` in the relevant section. How you evaluate the results is up to you, but you must add a function call to the `experiments` dictionary. - -Now you can run your experiment! +To add a custom experiment called `my-experiment`, run the following command: + +```bash +./scripts/main.py create --name my-experiment +``` + +This command sets up the necessary structure and adds the appropriate files required to run the experiment. +- Modify the Makefile at `./experiments/my-experiment/Makefile` to compile and execute your experiment as needed. +- Adjust the evaluation script located at `./scripts/plot/my-experiment.py`, which will be executed after the benchmark completes and the results have been copied. + diff --git a/archive/2024/winter/bsc_dichler/scripts/experiments.py b/archive/2024/winter/bsc_dichler/scripts/experiments.py index 0077dbad2..c3779f7e1 100644 --- a/archive/2024/winter/bsc_dichler/scripts/experiments.py +++ b/archive/2024/winter/bsc_dichler/scripts/experiments.py @@ -3,12 +3,8 @@ import matplotlib as mpl import matplotlib.pyplot as plt from pathlib import Path -from plot import parallel_non_contiguous -from plot import cas -from plot import non_contiguous -from plot import contiguous -from plot import malloc -from plot import contiguous_tagging +import importlib +import importlib.util Experiments = Literal[ "cas", @@ -19,22 +15,16 @@ Experiments = Literal[ "parallel_non_contiguous", ] PlottingFunction = Callable[[Path, str], None] +experiments: dict[str, PlottingFunction] | None = None -experiments: dict[str, PlottingFunction] = { - "cas": lambda r, t: cas.plot(r, t), - "contiguous": lambda r, t: contiguous.plot(r, t), - "non_contiguous": lambda r, t: non_contiguous.plot(r, t), - "contiguous_tagging": lambda r, t: contiguous_tagging.plot(r, t), - "malloc": lambda r, t: malloc.plot(r, t), - "parallel_non_contiguous": lambda r, t: parallel_non_contiguous.plot(r, t), -} - -def experiment_choices() -> list[str]: - return list(experiments.keys()) +def set_experiments(discovered_experiments: dict[str, PlottingFunction]): + global experiments + experiments = discovered_experiments def plot(output_root: Path, experiment: Experiments, format: Literal["pdf", "png"]): + assert experiment, "invalid state, experiment discovery was not executed" rcParams = { "font.family": "serif", "font.size": 11, @@ -48,3 +38,47 @@ def plot(output_root: Path, experiment: Experiments, format: Literal["pdf", "png mpl.rcParams.update(rcParams) experiments[experiment](output_root, format) + + +def verify_experiment(experiment_base: Path, experiment_name: str) -> None: + experiment_directory = experiment_base / Path(experiment_name) + if not experiment_directory.exists(): + raise Exception(f"Invalid state - cannot find experiment '{experiment_name}'.") + + makefile = experiment_directory / Path("Makefile") + if not makefile.exists(): + raise Exception( + f"Invalid state - cannot find Makefile for '{experiment_name}'." + ) + + evaluation_script = ( + Path(__file__).parent / Path("plot") / Path(f"{experiment_name}.py") + ) + if not evaluation_script.exists(): + raise Exception( + f"Invalid state - cannot find evaluation script for '{experiment_name}'." + ) + + +def discover_experiments(base: Path): + experiments = {} + + evaluation_script = Path(__file__).parent / Path("plot") + for file in evaluation_script.glob("*.py"): + if file.name == "__init__.py": + continue + module_name = file.stem + spec = importlib.util.spec_from_file_location(module_name, file) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + if hasattr(module, "plot"): + experiments[module_name] = module.plot + else: + print("foo") + + experiment_source = set() + for experiment in base.iterdir(): + if experiment.is_dir(): + experiment_source.add(experiment.name) + + return {k: v for k, v in experiments.items() if k in experiment_source} diff --git a/archive/2024/winter/bsc_dichler/scripts/main.py b/archive/2024/winter/bsc_dichler/scripts/main.py index 330f8bdfd..7ce4392e7 100644..100755 --- a/archive/2024/winter/bsc_dichler/scripts/main.py +++ b/archive/2024/winter/bsc_dichler/scripts/main.py @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python import argparse import asyncio @@ -19,7 +19,6 @@ async def run( experiment: experiments.Experiments, ) -> None: info(str(ctx)) - """ async with ctx as client: info(f"Open new context ({client.context()})") @@ -36,7 +35,6 @@ async def run( ) info("Done running experiments") - """ info("Start plotting measurements") experiments.plot(local_result_path, experiment, format="pdf") @@ -115,6 +113,35 @@ def run_remote(args): ) +def run_create(args): + experiment = args.name + experiment_base = Path(args.base) + experiment_root = experiment_base / Path(experiment) + if experiment_root.exists(): + error(f"Experiment with name '{experiment}' already exists") + sys.exit(1) + + evaluation_script = Path(__file__).parent / Path("plot") / Path(f"{experiment}.py") + if evaluation_script.exists(): + error(f"Experiment with name '{experiment}' already exists") + sys.exit(1) + + experiment_root.mkdir(parents=True, exist_ok=True) + makefile = experiment_root / Path("Makefile") + makefile.write_text(("default:\n" "\techo Hello World\n")) + + evaluation_script = Path(__file__).parent / Path("plot") / Path(f"{experiment}.py") + evaluation_script.write_text( + ( + "from pathlib import Path\n\n" + "def plot(output_root: Path, format):\n" + "\tpass\n" + ) + ) + + experiments.verify_experiment(experiment_base, experiment) + + if __name__ == "__main__": parser = argparse.ArgumentParser( prog="benchmark-tool", @@ -129,35 +156,50 @@ if __name__ == "__main__": help="The base directory in which the experiments are implemented.", ) - result_root = Path(__file__).parent.parent / Path("results/") - _ = parser.add_argument( - "--result", - default=str(result_root), - type=str, - help="The base directory in which all the results are placed.", - ) + args, _ = parser.parse_known_args() + discovered_experiments = experiments.discover_experiments(Path(args.base)) + choices = list(discovered_experiments.keys()) + experiments.set_experiments(discovered_experiments) + + def add_argument(p): + result_root = Path(__file__).parent.parent / Path("results/") + _ = p.add_argument( + "--result", + default=str(result_root), + type=str, + help="The base directory in which all the results are placed.", + ) - _ = parser.add_argument( - "--experiment", + _ = p.add_argument( + "--cleanup", + action="store_true", + help="Cleanup all the resources of the context", + ) + _ = p.add_argument( + "--experiment", + type=str, + choices=choices, + required=True, + help="The experiment to run", + ) + + subparsers = parser.add_subparsers(dest="command", required=True) + create = subparsers.add_parser("create", help="Run the experiment locally.") + create.add_argument( + "--name", type=str, - choices=experiments.experiment_choices(), required=True, - help="The experiment to run", - ) - - _ = parser.add_argument( - "--cleanup", - action="store_true", - help="Cleanup all the resources of the context", + help="The name of the new experiment", ) - - subparsers = parser.add_subparsers(dest="command", required=True) + create.set_defaults(func=run_create) local = subparsers.add_parser("local", help="Run the experiment locally.") local.set_defaults(func=run_local) + add_argument(local) remote = subparsers.add_parser("remote", help="Run the experiment remotely") remote.set_defaults(func=run_remote) + add_argument(remote) remote.add_argument( "--remote-user", help="Remote username (or set MTE_REMOTE_USER)" diff --git a/archive/2024/winter/bsc_dichler/scripts/plot/__init__.py b/archive/2024/winter/bsc_dichler/scripts/plot/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/archive/2024/winter/bsc_dichler/scripts/plot/__init__.py |