Skip to content
This repository has been archived by the owner on Jul 24, 2024. It is now read-only.

Run MLP test in one configuration #214

Run MLP test in one configuration

Run MLP test in one configuration #214

name: Run MLP test in one configuration
on:
workflow_dispatch:
inputs:
device:
description: Device to use to execute the test
required: true
default: cpu
type: choice
options:
- cpu
- xpu
- cuda
compiler:
description: JIT compiler to use to execute the test
required: true
default: torch_mlir
type: choice
options:
- torch
- dynamo
- torch_mlir
- torch_mlir_xsmm
- torchscript
- torchscript_onednn
- ipex
- ipex_onednn_graph
tag:
description: tag to label this run in DB
required: true
default: "test"
torch_mlir_repo:
description: Torch-MLIR repository on github
required: true
default: intel-ai/torch-mlir
type: string
torch_mlir_branch:
description: Torch-MLIR branch to checkout
required: true
default: cpu-proto
runner_type:
description: Runner type to use
required: false
default: spr
type: choice
options:
- spr
- amd32c
- amd64c
- nvidia
shutdown_cloud_runner:
default: true
type: boolean
test_scripts:
description: Test scripts to execute
required: false
default: '["mlp.sh", "cnn.sh", "llm.sh"]'
type: string
jobs:
print_inputs:
runs-on: Linux
steps:
- name: Print Inputs
run: echo "${{ toJSON(github.event.inputs) }}"
mlp_test:
strategy:
matrix:
test_script: ${{ fromJson(inputs.test_scripts) }}
fail-fast: false
uses: ./.github/workflows/execute-test-script.yml
with:
compiler: ${{ inputs.compiler }}
device: ${{ inputs.device }}
tag: ${{ inputs.tag }}
torch_mlir_repo: ${{ inputs.torch_mlir_repo }}
torch_mlir_branch: ${{ inputs.torch_mlir_branch }}
runner_type: ${{ inputs.runner_type }}
test_script: ${{ matrix.test_script }}
secrets:
DB_URL: ${{ secrets.DB_URL }}
HF_TOKEN: ${{ secrets.HF_TOKEN }}
shutdown:
needs: mlp_test
if: ${{ !contains(inputs.runner_type, 'spr') && inputs.shutdown_cloud_runner }}
runs-on: ${{ inputs.runner_type }}
steps:
- name: shutdown
shell: bash -el {0}
run: sudo shutdown -h +2