-
Notifications
You must be signed in to change notification settings - Fork 6
127 lines (115 loc) · 4.15 KB
/
build-windows.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
name: Windows Build Flow
on:
push:
branches:
- main
workflow_dispatch:
env:
CI: true
RUST_BACKTRACE: 1
jobs:
build-windows:
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
include:
- platform: windows-latest
target: x86_64-pc-windows-msvc
runs-on: ${{ matrix.platform }}
if: false
steps:
- name: Set environment variables
shell: pwsh
run: |
echo "USER_HOME=${HOME}" >> $env:GITHUB_ENV
- name: Enable symlinks
shell: pwsh
run: |
git config --global core.symlinks true
git config --global core.autocrlf false
git config --global core.eol lf
- uses: actions/checkout@v4
- name: Setup Git with PAT
shell: pwsh
run: |
git config --global url.https://gh_pat:${{ secrets.GH_PAT }}@github.com/.insteadOf [email protected]:
git submodule sync --recursive
git submodule update --init --recursive --depth=1
git config --global --unset url.https://gh_pat:${{ secrets.GH_PAT }}@github.com/.insteadOf
- name: Setup Python packages
shell: pwsh
run: |
& python -m pip install -U pip
& python -m pip install -r crates/objs/tests/scripts/requirements.txt
& python -m pip install -U huggingface_hub
- name: Cache HuggingFace models
uses: actions/cache@v4
id: cache-hf
with:
path: ${{ env.USER_HOME }}\.cache\huggingface
key: hf-cache-Windows-llama2-7b-chat
- name: Check and Download Llama model
if: steps.cache-hf.outputs.cache-hit != 'true'
shell: pwsh
run: |
$HF_PATH = Join-Path $env:USERPROFILE ".cache\huggingface"
$SNAPSHOT_DIR = Join-Path $HF_PATH "hub\models--TheBloke--Llama-2-7B-Chat-GGUF\snapshots\191239b3e26b2882fb562ffccdd1cf0f65402adb"
New-Item -ItemType Directory -Force -Path $SNAPSHOT_DIR
$MODEL_PATH = Join-Path $SNAPSHOT_DIR "llama-2-7b-chat.Q4_K_M.gguf"
if (-not (Test-Path $MODEL_PATH)) {
$MODEL_URL = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/191239b3e26b2882fb562ffccdd1cf0f65402adb/llama-2-7b-chat.Q4_K_M.gguf?download=true"
curl.exe -L $MODEL_URL -o $MODEL_PATH
}
- name: Download tokenizer config
shell: pwsh
run: |
& huggingface-cli download --revision main --token ${{ secrets.CI_HF_TOKEN }} meta-llama/Llama-2-13b-chat-hf tokenizer_config.json
- name: Windows Setup
uses: ./.github/actions/setup-win
- name: setup-rust
uses: ./.github/actions/setup-rust
with:
platform: ${{ matrix.platform }}
target: ${{ matrix.target }}
- name: setup-node
uses: ./.github/actions/setup-node
with:
platform: ${{ matrix.platform }}
- name: Clean
shell: pwsh
run: |
make -f Makefile.win.mk ci.clean
- name: Generate code coverage
shell: pwsh
run: make -f Makefile.win.mk ci.coverage
env:
HF_TEST_TOKEN_ALLOWED: ${{ secrets.HF_TEST_TOKEN_ALLOWED }}
HF_TEST_TOKEN_PUBLIC: ${{ secrets.HF_TEST_TOKEN_PUBLIC }}
- name: Upload coverage reports to Codecov
uses: codecov/[email protected]
if: success()
continue-on-error: true
with:
name: BodhiApp-${{ matrix.target }}
token: ${{ secrets.CODECOV_TOKEN }}
slug: BodhiSearch/BodhiApp
files: lcov.info
fail_ci_if_error: false
flags: ${{ matrix.target }},bodhi
- name: Run UI Tests
run: |
make -f Makefile.win.mk ci.ui
- name: Upload coverage reports to Codecov
uses: codecov/[email protected]
if: success()
continue-on-error: true
with:
name: BodhiApp-${{ matrix.target }}
token: ${{ secrets.CODECOV_TOKEN }}
slug: BodhiSearch/BodhiApp
files: |
crates/bodhi/coverage/coverage-final.json
crates/bodhi/coverage/clover.xml
fail_ci_if_error: false
flags: ${{ matrix.target }},bodhi,ui