This repository has been archived by the owner on Mar 30, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 17
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add experimental support for GPTQ models (#50)
Signed-off-by: Hung-Han (Henry) Chen <[email protected]>
- Loading branch information
1 parent
c1fa9ba
commit 28d515d
Showing
5 changed files
with
107 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
name: Build and Push GPTQ Image to Github Container Registry | ||
|
||
on: | ||
push: | ||
branches: | ||
- main | ||
paths: | ||
- '**.py' | ||
- 'requirements.txt' | ||
- 'Dockerfile.gptq' | ||
- '.github/workflows/gptq_image.yaml' | ||
|
||
env: | ||
REGISTRY: ghcr.io | ||
GPTQ_IMAGE_NAME: ialacol-gptq | ||
jobs: | ||
gptq_image_to_gcr: | ||
runs-on: ubuntu-latest | ||
permissions: | ||
contents: read | ||
packages: write | ||
steps: | ||
- name: Checkout | ||
uses: actions/checkout@v3 | ||
with: | ||
fetch-depth: 0 | ||
- uses: docker/login-action@v2 | ||
with: | ||
registry: ${{ env.REGISTRY }} | ||
username: ${{ github.actor }} | ||
password: ${{ secrets.GITHUB_TOKEN }} | ||
- name: Extract metadata (tags, labels) for Docker | ||
id: meta | ||
uses: docker/metadata-action@v4 | ||
with: | ||
images: ${{ env.REGISTRY }}/${{ env.GPTQ_IMAGE_NAME }} | ||
- name: Build and push Docker image | ||
uses: docker/build-push-action@v4 | ||
with: | ||
context: . | ||
file: ./Dockerfile.gptq | ||
push: true | ||
tags: | | ||
${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ env.GPTQ_IMAGE_NAME }}:${{ github.sha }} | ||
${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ env.GPTQ_IMAGE_NAME }}:latest | ||
labels: ${{ steps.meta.outputs.labels }} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
# syntax=docker/dockerfile:1 | ||
|
||
FROM python:3.11-slim | ||
WORKDIR /app | ||
COPY requirements.txt requirements.txt | ||
RUN pip3 install -r requirements.txt | ||
# https://github.com/marella/ctransformers#gptq | ||
RUN pip3 install ctransformers[gptq] | ||
COPY . . | ||
EXPOSE 8000 | ||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,6 @@ | ||
apiVersion: v2 | ||
appVersion: 0.8.0 | ||
appVersion: 0.9.0 | ||
description: A Helm chart for ialacol | ||
name: ialacol | ||
type: application | ||
version: 0.8.0 | ||
version: 0.9.0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
replicas: 1 | ||
deployment: | ||
image: ghcr.io/chenhunghan/ialacol-gptq:latest | ||
env: | ||
DEFAULT_MODEL_HG_REPO_ID: TheBloke/Llama-2-7b-Chat-GPTQ | ||
DEFAULT_MODEL_FILE: gptq_model-4bit-128g.safetensors | ||
MODEL_TYPE: "gptq" | ||
resources: | ||
{} | ||
cache: | ||
persistence: | ||
size: 5Gi | ||
accessModes: | ||
- ReadWriteOnce | ||
storageClassName: ~ | ||
cacheMountPath: /app/cache | ||
model: | ||
persistence: | ||
size: 5Gi | ||
accessModes: | ||
- ReadWriteOnce | ||
storageClassName: ~ | ||
modelMountPath: /app/models | ||
service: | ||
type: ClusterIP | ||
port: 8000 | ||
annotations: {} | ||
nodeSelector: {} | ||
tolerations: [] | ||
affinity: {} |