From 994fdabfdec6a5a97841571147b689bcd099dc34 Mon Sep 17 00:00:00 2001 From: Dmitrii Makarenko Date: Fri, 22 Dec 2023 09:27:44 -0700 Subject: [PATCH] xsmm config draft --- .github/workflows/execute-test-script.yml | 2 +- .github/workflows/test-single-config.yml | 1 + .github/workflows/test.yml | 3 +- dl_bench/cli/launcher.py | 1 + dl_bench/utils.py | 45 +++++++++++++++++++++++ 5 files changed, 50 insertions(+), 2 deletions(-) diff --git a/.github/workflows/execute-test-script.yml b/.github/workflows/execute-test-script.yml index 50a9a64..05c5bcf 100644 --- a/.github/workflows/execute-test-script.yml +++ b/.github/workflows/execute-test-script.yml @@ -82,7 +82,7 @@ jobs: shell: bash -el {0} run: | case "${{ inputs.compiler }}" in - torch_mlir) + torch_mlir | torch_mlir_xsmm) echo conda_env=mlir-dev >> $GITHUB_OUTPUT;; ipex) echo conda_env=ipex >> $GITHUB_OUTPUT;; diff --git a/.github/workflows/test-single-config.yml b/.github/workflows/test-single-config.yml index 1b9c180..c2c2a83 100644 --- a/.github/workflows/test-single-config.yml +++ b/.github/workflows/test-single-config.yml @@ -24,6 +24,7 @@ on: - torchscript_onednn - ipex - torch_mlir + - torch_mlir_xsmm tag: description: tag to label this run in DB required: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8ce7f0a..84e2259 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -52,7 +52,8 @@ jobs: {device: 'cpu', compiler: 'torchscript_onednn'}, {device: 'cpu', compiler: 'ipex'}, # {device: 'xpu', compiler: 'ipex'}, - {device: 'cpu', compiler: 'torch_mlir'} + {device: 'cpu', compiler: 'torch_mlir'}, + {device: 'cpu', compiler: 'torch_mlir_xsmm'} ] test_script: ${{ fromJson(inputs.test_scripts) }} fail-fast: false diff --git a/dl_bench/cli/launcher.py b/dl_bench/cli/launcher.py index 76a8473..bd1580b 100644 --- a/dl_bench/cli/launcher.py +++ b/dl_bench/cli/launcher.py @@ -82,6 +82,7 @@ def parse_args(): "torchscript_onednn", "ipex", "torch_mlir", + "torch_mlir_xsmm", ], help="Compilation mode to use. No compilation by default.", ) diff --git a/dl_bench/utils.py b/dl_bench/utils.py index 6345e19..fdfea31 100644 --- a/dl_bench/utils.py +++ b/dl_bench/utils.py @@ -229,6 +229,51 @@ def eval(self): compiled_model = result() print("Compiled with torch_mlir") + elif compile_mode == "torch_mlir_xsmm": + from torch_mlir._dynamo_fx_importer import import_fx_graph_as_func + from torch_mlir_e2e_test.configs.torchdynamo import jit + from torch_mlir_e2e_test.framework import TestOptions + + # from torch_mlir_e2e_test.linalg_on_tensors_backends.refbackend import RefBackendLinalgOnTensorsBackend + from torch_mlir_e2e_test.linalg_on_tensors_backends.xsmmprotobackend import ( + XsmmProtoLinalgOnTensorsBackend, + ) + import torch.utils._pytree as pytree + + # debug_timer seems to cause problems: + # TypeError: TestOptions.__init__() got an unexpected keyword argument 'debug_timer' + # opts = TestOptions(debug_timer=False, use_kernels=True) + module = jit( + model, + [sample_input], + "test_name", + opts, + output_type="linalg-on-tensors", + ) + backend = XsmmProtoLinalgOnTensorsBackend(opts) + # backend = RefBackendLinalgOnTensorsBackend() + module = backend.compile(module) + backend_module = backend.load(module) + + params = { + **dict(model.named_parameters(remove_duplicate=False)), + **dict(model.named_buffers(remove_duplicate=False)), + } + params_flat, params_spec = pytree.tree_flatten(params) + params_flat = list(params_flat) + + class result: + def __call__(self, *args): + numpy_inputs = recursively_convert_to_numpy(params_flat + [*args]) + return refine_result_type( + getattr(backend_module, model.__class__.__name__)(*numpy_inputs) + ) + + def eval(self): + pass + + compiled_model = result() + print("Compiled with XSMM torch_mlir") else: raise ValueError(f"Unsupported mode {compile_mode}")