Skip to content
This repository has been archived by the owner on Jul 24, 2024. It is now read-only.

xsmm config draft #52

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/execute-test-script.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ jobs:
shell: bash -el {0}
run: |
case "${{ inputs.compiler }}" in
torch_mlir)
torch_mlir | torch_mlir_xsmm)
echo conda_env=mlir-dev >> $GITHUB_OUTPUT;;
ipex)
echo conda_env=ipex >> $GITHUB_OUTPUT;;
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/test-single-config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ on:
- torchscript_onednn
- ipex
- torch_mlir
- torch_mlir_xsmm
tag:
description: tag to label this run in DB
required: true
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ jobs:
{device: 'cpu', compiler: 'torchscript_onednn'},
{device: 'cpu', compiler: 'ipex'},
# {device: 'xpu', compiler: 'ipex'},
{device: 'cpu', compiler: 'torch_mlir'}
{device: 'cpu', compiler: 'torch_mlir'},
{device: 'cpu', compiler: 'torch_mlir_xsmm'}
]
test_script: ${{ fromJson(inputs.test_scripts) }}
fail-fast: false
Expand Down
1 change: 1 addition & 0 deletions dl_bench/cli/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ def parse_args():
"torchscript_onednn",
"ipex",
"torch_mlir",
"torch_mlir_xsmm",
],
help="Compilation mode to use. No compilation by default.",
)
Expand Down
46 changes: 46 additions & 0 deletions dl_bench/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,52 @@ def eval(self):

compiled_model = result()
print("Compiled with torch_mlir")
elif compile_mode == "torch_mlir_xsmm":
from torch_mlir._dynamo_fx_importer import import_fx_graph_as_func
from torch_mlir_e2e_test.configs.torchdynamo import jit
from torch_mlir_e2e_test.framework import TestOptions

# from torch_mlir_e2e_test.linalg_on_tensors_backends.refbackend import RefBackendLinalgOnTensorsBackend
from torch_mlir_e2e_test.linalg_on_tensors_backends.xsmmprotobackend import (
XsmmProtoLinalgOnTensorsBackend,
)
import torch.utils._pytree as pytree

# debug_timer seems to cause problems:
# TypeError: TestOptions.__init__() got an unexpected keyword argument 'debug_timer'
# opts = TestOptions(debug_timer=False, use_kernels=True)
opts = TestOptions()
module = jit(
model,
[sample_input],
"test_name",
opts,
output_type="linalg-on-tensors",
)
backend = XsmmProtoLinalgOnTensorsBackend(opts)
# backend = RefBackendLinalgOnTensorsBackend()
module = backend.compile(module)
backend_module = backend.load(module)

params = {
**dict(model.named_parameters(remove_duplicate=False)),
**dict(model.named_buffers(remove_duplicate=False)),
}
params_flat, params_spec = pytree.tree_flatten(params)
params_flat = list(params_flat)

class result:
def __call__(self, *args):
numpy_inputs = recursively_convert_to_numpy(params_flat + [*args])
return refine_result_type(
getattr(backend_module, model.__class__.__name__)(*numpy_inputs)
)

def eval(self):
pass

compiled_model = result()
print("Compiled with XSMM torch_mlir")
else:
raise ValueError(f"Unsupported mode {compile_mode}")

Expand Down
Loading