Skip to content

Commit

Permalink
Fix AutoAWQQuantizer GptqQuantizer supported ep and device (#1571)
Browse files Browse the repository at this point in the history
## Describe your changes

Fix AutoAWQQuantizer GptqQuantizer supported ep and device.

## Checklist before requesting a review
- [ ] Add unit tests for this change.
- [ ] Make sure all tests can pass.
- [ ] Update documents if necessary.
- [ ] Lint and apply fixes to your code by running `lintrunner -a`
- [ ] Is this a user-facing change? If yes, give a description of this
change to be included in the release notes.
- [ ] Is this PR including examples changes? If yes, please remember to
update [example
documentation](https://github.com/microsoft/Olive/blob/main/docs/source/examples.md)
in a follow-up PR.

## (Optional) Issue link
  • Loading branch information
xiaoyu-work authored Feb 7, 2025
1 parent 353daf5 commit 5748965
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions olive/olive_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -214,29 +214,29 @@
},
"OpenVINOConversion": {
"module_path": "olive.passes.openvino.conversion.OpenVINOConversion",
"supported_providers": [ "OpenVINOExecutionProvider" ],
"supported_accelerators": [ "cpu", "gpu", "npu" ],
"supported_providers": [ "*" ],
"supported_accelerators": [ "*" ],
"supported_precisions": [ "*" ],
"extra_dependencies": [ "openvino" ]
},
"OpenVINOQuantization": {
"module_path": "olive.passes.openvino.quantization.OpenVINOQuantization",
"supported_providers": [ "OpenVINOExecutionProvider" ],
"supported_accelerators": [ "cpu", "gpu", "npu" ],
"supported_providers": [ "*" ],
"supported_accelerators": [ "*" ],
"supported_precisions": [ "*" ],
"extra_dependencies": [ "openvino" ]
},
"AutoAWQQuantizer": {
"module_path": "olive.passes.pytorch.autoawq.AutoAWQQuantizer",
"supported_providers": [ "CPUExecutionProvider" ],
"supported_accelerators": [ "cpu" ],
"supported_providers": [ "*" ],
"supported_accelerators": [ "*" ],
"supported_precisions": [ "int4", "int8", "int16", "uint4", "uint8", "uint16" ],
"module_dependencies": [ "autoawq" ]
},
"GptqQuantizer": {
"module_path": "olive.passes.pytorch.gptq.GptqQuantizer",
"supported_providers": [ "CPUExecutionProvider" ],
"supported_accelerators": [ "cpu" ],
"supported_providers": [ "*" ],
"supported_accelerators": [ "*" ],
"supported_precisions": [ "int4", "int8", "int16", "uint4", "uint8", "uint16" ],
"module_dependencies": [ "auto-gptq", "optimum" ]
},
Expand Down

0 comments on commit 5748965

Please sign in to comment.