-
Notifications
You must be signed in to change notification settings - Fork 41
313 lines (288 loc) · 17 KB
/
llama.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
name: ggml llama2 examples
on:
schedule:
- cron: "0 0 * * *"
workflow_dispatch:
inputs:
logLevel:
description: 'Log level'
required: true
default: 'info'
push:
branches: [ '*' ]
paths:
- ".github/workflows/llama.yml"
- "wasmedge-ggml/**"
pull_request:
branches: [ '*' ]
paths:
- ".github/workflows/llama.yml"
- "wasmedge-ggml/**"
merge_group:
jobs:
build:
strategy:
matrix:
runner: [ubuntu-latest, macos-m1]
wasmedge: ["0.14.1"]
plugin: [wasi_nn-ggml]
job:
- name: "Tiny Llama"
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/llama
curl -LO https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/resolve/main/tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-llama.wasm \
default \
$'<|im_start|>system\nYou are an AI assistant<|im_end|>\n<|im_start|>user\nWhere is the capital of Japan?<|im_end|>\n<|im_start|>assistant'
- name: Gemma 2B
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/gemma
curl -LO https://huggingface.co/second-state/Gemma-2b-it-GGUF/resolve/main/gemma-2b-it-Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:gemma-2b-it-Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-gemma.wasm \
default \
'<start_of_turn>user Where is the capital of Japan? <end_of_turn><start_of_turn>model'
- name: Llava v1.6 7B
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/llava
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/vicuna-7b-q5_k.gguf
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-vicuna7b-f16.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env mmproj=mmproj-vicuna7b-f16.gguf \
--env image=monalisa.jpg \
--env ctx_size=4096 \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:vicuna-7b-q5_k.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-llava.wasm \
default \
$'You are a helpful, respectful and honest assistant. Always answer as short as possible, while being safe.\nUSER:<image>\nDo you know who drew this painting?\nASSISTANT:'
- name: Llama3 8B
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/llama
curl -LO https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--env llama3=true \
--nn-preload default:GGML:AUTO:Meta-Llama-3-8B-Instruct.Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-llama.wasm \
default \
$"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you do not know the answer to a question, please do not share false information.<|eot_id|>\n<|start_header_id|>user<|end_header_id|>\n\nWhat's the capital of Japan?<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n\n"
- name: Llama3 8B (Streaming)
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/llama-stream
curl -LO https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--env llama3=true \
--nn-preload default:GGML:AUTO:Meta-Llama-3-8B-Instruct.Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-llama-stream.wasm \
default \
$"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you do not know the answer to a question, please do not share false information.<|eot_id|>\n<|start_header_id|>user<|end_header_id|>\n\nWhat's the capital of Japan?<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n\n"
- name: Multiple Models Example
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/multimodel
curl -LO https://huggingface.co/second-state/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/vicuna-7b-q5_k.gguf
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-vicuna7b-f16.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--env image=monalisa.jpg \
--env mmproj=mmproj-vicuna7b-f16.gguf \
--nn-preload llama2:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
--nn-preload llava:GGML:AUTO:vicuna-7b-q5_k.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-multimodel.wasm \
'describe this picture please'
- name: Embedding Example (All-MiniLM)
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/embedding
curl -LO https://huggingface.co/second-state/All-MiniLM-L6-v2-Embedding-GGUF/resolve/main/all-MiniLM-L6-v2-ggml-model-f16.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:all-MiniLM-L6-v2-ggml-model-f16.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-llama-embedding.wasm \
default \
'hello world'
- name: Embedding Example (Llama-2)
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/embedding
curl -LO https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-llama-embedding.wasm \
default \
'hello world'
- name: RPC Example
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/nnrpc
curl -LO https://huggingface.co/second-state/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-nnrpc.wasm \
default \
$'[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you do not know the answer to a question, please do not share false information.\n<</SYS>>\nWhat is the capital of Japan?[/INST]'
- name: Set Input Twice
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/test/set-input-twice
curl -LO https://huggingface.co/second-state/Gemma-2b-it-GGUF/resolve/main/gemma-2b-it-Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:gemma-2b-it-Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-set-input-twice.wasm \
default \
'<start_of_turn>user Where is the capital of Japan? <end_of_turn><start_of_turn>model'
- name: Grammar Example
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/grammar
curl -LO https://huggingface.co/TheBloke/Llama-2-7b-GGUF/resolve/main/llama-2-7b.Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:llama-2-7b.Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-grammar.wasm \
default \
'JSON object with 5 country names as keys and their capitals as values: '
- name: Model Not Found
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/test/model-not-found
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:model-not-found.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-model-not-found.wasm \
default
- name: Unload
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/test/unload
curl -LO https://huggingface.co/second-state/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-unload.wasm \
default \
$'[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you do not know the answer to a question, please do not share false information.\n<</SYS>>\nWhat is the capital of Japan?[/INST]'
- name: JSON Schema
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/json-schema
curl -LO https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-json-schema.wasm \
default \
$'[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always output JSON format string.\n<</SYS>>\nGive me a JSON array of Apple products.[/INST]'
- name: Qwen2-VL
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/qwen2vl
curl -LO https://huggingface.co/second-state/Qwen2-VL-2B-Instruct-GGUF/resolve/main/Qwen2-VL-2B-Instruct-vision-encoder.gguf
curl -LO https://huggingface.co/second-state/Qwen2-VL-2B-Instruct-GGUF/resolve/main/Qwen2-VL-2B-Instruct-Q5_K_M.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:Qwen2-VL-2B-Instruct-Q5_K_M.gguf \
--env mmproj=Qwen2-VL-2B-Instruct-vision-encoder.gguf \
--env image=monalisa.jpg \
target/wasm32-wasip1/release/wasmedge-ggml-qwen2vl.wasm \
default \
$'<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><image><|vision_end|>what is in this picture?<|im_end|>\n<|im_start|>assistant\n'
- name: Text-to-speech
shell: bash
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/tts
curl -LO https://huggingface.co/second-state/OuteTTS-0.2-500M-GGUF/resolve/main/OuteTTS-0.2-500M-Q5_K_M.gguf
curl -LO https://huggingface.co/second-state/OuteTTS-0.2-500M-GGUF/resolve/main/wavtokenizer-large-75-ggml-f16.gguf
curl -LO https://raw.githubusercontent.com/edwko/OuteTTS/refs/heads/main/outetts/version/v1/default_speakers/en_male_1.json
cargo build --target wasm32-wasip1 --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:OuteTTS-0.2-500M-Q5_K_M.gguf \
--env tts=true \
--env tts_output_file=output.wav \
--env tts_speaker_file=en_male_1.json \
--env model_vocoder=wavtokenizer-large-75-ggml-f16.gguf \
target/wasm32-wasip1/release/wasmedge-ggml-tts.wasm \
default \
'Hello, world.'
sha1sum *.wav
- name: Build llama-stream
run: |
cd wasmedge-ggml/llama-stream
cargo build --target wasm32-wasip1 --release
- name: Build llava-base64-stream
run: |
cd wasmedge-ggml/llava-base64-stream
cargo build --target wasm32-wasip1 --release
name: ${{ matrix.runner == 'ubuntu-latest' && 'ubuntu:20.04' || matrix.runner }} - ${{ matrix.job.name }} - ${{ matrix.wasmedge }} - ${{ matrix.plugin }}
runs-on: ${{ matrix.runner }}
# set image to `ubuntu:20.04` if runner is `ubuntu-latest`
container: ${{ matrix.runner == 'ubuntu-latest' && fromJSON('{"image":"ubuntu:20.04"}') || null }}
steps:
- uses: actions/checkout@v4
- if: ${{ matrix.runner == 'ubuntu-latest' }}
name: Install apt-get packages
run: |
ACCEPT_EULA=Y apt-get update
ACCEPT_EULA=Y apt-get upgrade -y
apt-get install -y wget git curl software-properties-common build-essential
env:
DEBIAN_FRONTEND: noninteractive
- name: Install Rust target for wasm
uses: dtolnay/rust-toolchain@stable
with:
target: wasm32-wasip1
- name: Install WasmEdge + WASI-NN + GGML
run: |
curl -sSf https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install.sh | bash -s -- -v ${{ matrix.wasmedge }} --plugins ${{ matrix.plugin }}
- name: Set environment variable
run: echo "NGL=${{ matrix.ngl || 0 }}" >> $GITHUB_ENV
- name: ${{ matrix.job.name }}
run: ${{ matrix.job.run }}
shell: bash