mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-06-25 17:40:04 +00:00
Compare commits
619 Commits
java-bindi
...
gg/cuda-no
Author | SHA1 | Date | |
---|---|---|---|
267e15a46d | |||
420b6abc54 | |||
99804b0f3e | |||
c55964c956 | |||
20c542c713 | |||
c2bdb960cd | |||
87acd6d629 | |||
f842d31171 | |||
ffef323c4c | |||
af5833e298 | |||
b87494bb8f | |||
ad130431aa | |||
e130b66642 | |||
c7b6988678 | |||
05042a782d | |||
a7dc2aab16 | |||
22d46b7ba4 | |||
c10db6ea28 | |||
1b51fdf170 | |||
adee3f9c1f | |||
4798be1f9a | |||
08981d1bac | |||
7094ea5e75 | |||
9d5771ae43 | |||
f56b8305c4 | |||
1056ad762c | |||
c451080c8b | |||
8e7c22fbdb | |||
e57e95eb0d | |||
130f43e4b8 | |||
d8356a1cc2 | |||
4ef8d9f44e | |||
3928dbd206 | |||
2ced6f0742 | |||
30f73109b8 | |||
17fa62d3d3 | |||
1da5edcde0 | |||
0bb05b113d | |||
f141b2b938 | |||
2b434c449e | |||
e93081f83f | |||
b6bbce4ae9 | |||
7705dc52da | |||
e6acaf9d91 | |||
2c81e6fd51 | |||
9506267ce5 | |||
fbeb80b5f0 | |||
3fa7d29876 | |||
fe179ae0cc | |||
40aeeeecc4 | |||
5a863fbe18 | |||
91c646c61d | |||
accada542a | |||
e54329da7b | |||
284fac39fb | |||
fe454b8d9e | |||
c114b75aee | |||
4be936b88b | |||
26c550f772 | |||
24f0aa460b | |||
69efc39d5c | |||
a2ad810118 | |||
1ae1a9cd56 | |||
b5521fea19 | |||
9b84195225 | |||
11c1df0436 | |||
c754494fdd | |||
1bce67999d | |||
6c39ea46b6 | |||
156a33a990 | |||
5167ebdfca | |||
b574646d75 | |||
388c3462a6 | |||
9ad202bee9 | |||
f0d3fb4a7e | |||
9d4c8b8aa5 | |||
ecfac1e240 | |||
6f7140f568 | |||
05b17112cf | |||
a15fb5cd79 | |||
63fd148d8f | |||
6c3971b29b | |||
a6d264f331 | |||
2959686019 | |||
c96b0a938e | |||
c97796aa0f | |||
7a4f7d825e | |||
fdb2c87350 | |||
98c0b77e0c | |||
9d6d50d933 | |||
c1320c1f0c | |||
66aaf03a7a | |||
00a0947c65 | |||
60f3713026 | |||
37e6757453 | |||
8dcefdf4a9 | |||
73d13ad19a | |||
b6680fab50 | |||
f760756078 | |||
58210d6a76 | |||
8fac6455ff | |||
22b6598cc9 | |||
858452d58d | |||
7f85e1d7fd | |||
b0c3cbf2e8 | |||
a750868428 | |||
7395c70a74 | |||
9fab28135c | |||
08d3eef97d | |||
1b5439a6c2 | |||
c7f95b7ca2 | |||
5c554c04ff | |||
c383f091a1 | |||
8f253ef3af | |||
c7dc37f97c | |||
526332873b | |||
1d2721ca72 | |||
219e601dab | |||
3b8aade3c2 | |||
52ccd4a3a8 | |||
5275074d37 | |||
c15b4cda7d | |||
d3cfb6ca2b | |||
956ef860bc | |||
671b4bde6c | |||
c8eeb93a6a | |||
319fe5146e | |||
13c22321d1 | |||
ccbe9d5676 | |||
81a3c41aa0 | |||
a50207c65d | |||
97878e53fd | |||
61b05815e0 | |||
1dce94cf26 | |||
f12e982c0b | |||
fa966b9b40 | |||
b83a9fc9d3 | |||
3adbf2fb03 | |||
700d146127 | |||
a74fde9b4c | |||
1d7657f409 | |||
ac283dbce7 | |||
1e8f28c42a | |||
fc366b807a | |||
9fb308d90f | |||
2948c740a2 | |||
1558ec5a16 | |||
fff24a0148 | |||
48a145207e | |||
79d5765e7e | |||
04e48094e4 | |||
741abb162c | |||
e7794a868f | |||
725350d4ea | |||
906c73b219 | |||
00d80ff965 | |||
1b553b9817 | |||
de4d067f1e | |||
e715f6a601 | |||
f60ccfd83b | |||
3753a2b2a8 | |||
592dd25615 | |||
c8709d4604 | |||
8932c2d6ce | |||
2bddfdd7c8 | |||
46e3c3f112 | |||
ef24ae0c7d | |||
a753926f02 | |||
9dc60fc02d | |||
d73a63629e | |||
f79d0d4f74 | |||
4f88940ff6 | |||
7bdb1de9ec | |||
653d2e8ff9 | |||
2fef660d0a | |||
24eba5a2ff | |||
6e9d3aa32d | |||
9ae0d18856 | |||
a56f435fd4 | |||
ec166499d8 | |||
ccf022f970 | |||
2852e1af55 | |||
ce945b50c3 | |||
2f5a5a66dd | |||
8e409d1113 | |||
05d1b61af4 | |||
647cae178a | |||
bae7c23fbf | |||
18ea187d42 | |||
1daeffca54 | |||
2f6f1d4465 | |||
7ff1894c34 | |||
8edfc54c2b | |||
9c399689ec | |||
9d9a405cfd | |||
edd8b38a75 | |||
ed76818700 | |||
9a0b59d990 | |||
93a84a143b | |||
bd26876267 | |||
21d295180d | |||
c3bfc9bfda | |||
422a6b16fc | |||
11dd0d4482 | |||
26dd2f06ac | |||
8cee7c08b6 | |||
2e2626b167 | |||
c0c0ae2dea | |||
897412b5b6 | |||
f22d27a385 | |||
ccd7c1d2da | |||
c713eb5e2a | |||
25d313b38b | |||
3168dbf23b | |||
1711bb3881 | |||
2533305596 | |||
0eca512ac8 | |||
013e394a4b | |||
d83f371b5f | |||
1c71816eab | |||
7b1d8ea7e0 | |||
b1f7223a0a | |||
8408a4be8e | |||
72849c24ba | |||
c19c28be71 | |||
0d8fd8483a | |||
3170841ed9 | |||
7a6e385c1b | |||
578e47e70c | |||
fac5b43830 | |||
9e7c5212a1 | |||
1cb64f7368 | |||
f18738f247 | |||
a0ddd8392c | |||
a2506909b1 | |||
7b1ff212d9 | |||
e5d06cfc0f | |||
31891db2e3 | |||
5fdb27ff80 | |||
6b16927d18 | |||
ce411498f6 | |||
208de95ac7 | |||
c2ce39c795 | |||
8daa534818 | |||
9fca69b410 | |||
b26c645420 | |||
1879ec556e | |||
c6e53cfc46 | |||
b19f2fb815 | |||
a6b0950916 | |||
d352dbd163 | |||
eb23f4ef16 | |||
c56344b509 | |||
59119f4f20 | |||
276615d708 | |||
b602819b6e | |||
c2c606f05b | |||
83afebe872 | |||
a4d8f9d559 | |||
5ec1e0edfa | |||
30a11b1ab8 | |||
f04e6b87d7 | |||
0c33928b55 | |||
0775374750 | |||
7d90bb035b | |||
2c1ad21ba8 | |||
eca5ff9868 | |||
1b25d2fa0a | |||
74a6acc999 | |||
a4ed8a0821 | |||
9f675e021c | |||
a38efcb9fd | |||
31591649a0 | |||
4f5c46a84f | |||
462ffc58db | |||
65faae0b6a | |||
dda4b0ed06 | |||
07d04280be | |||
917c56ded4 | |||
3d42463845 | |||
3ffc83d90a | |||
e3c5e2cba8 | |||
b742f13e70 | |||
52c529eeb1 | |||
551529290d | |||
25a90ffa38 | |||
866b67ca93 | |||
d7e9f58f7f | |||
04839bae22 | |||
3cc6e04a52 | |||
b7ef178b9c | |||
47dfe9d4db | |||
1d3270cc8f | |||
a6fb6ab597 | |||
163e74b6c3 | |||
f273e66dc6 | |||
02b4c52c12 | |||
518199c09e | |||
8b17a2f776 | |||
b6d2827914 | |||
9711bae0b3 | |||
eec38f63bd | |||
ef5e6b746f | |||
77bf6b5f56 | |||
b562fff9d0 | |||
b5dec374f4 | |||
fa0dc6167c | |||
55bcd62a4b | |||
0ed762d691 | |||
1b5bb7792e | |||
9b735cea77 | |||
12c462d656 | |||
fc7b0e2c28 | |||
f850a067ed | |||
f75e1197f1 | |||
aa8a75e287 | |||
80e8a2ea39 | |||
19f8048139 | |||
0f80e5a80a | |||
b6559333ff | |||
434b8f3b96 | |||
7a74e929c8 | |||
361ecebe90 | |||
807cbc672e | |||
98ae5276b7 | |||
6adb969b09 | |||
8a7d6ff51a | |||
25f650a8e8 | |||
44e517f074 | |||
cb9de61659 | |||
a2ef80d66f | |||
baa190446a | |||
8f5220d81f | |||
8e391fcf3a | |||
593657054e | |||
ae5c4f7340 | |||
baa30bacdb | |||
3e6fad07aa | |||
e72e4158de | |||
bd41733db2 | |||
23c648e98d | |||
75ab2d06f5 | |||
adc099edee | |||
52cce82493 | |||
ef3c9ed9eb | |||
7fe3ed5e00 | |||
6061241292 | |||
0878ab7c15 | |||
c65edd5b64 | |||
3c8d14e9c5 | |||
c3977cb2ce | |||
6da1661bc2 | |||
cc56540661 | |||
94c1ae8668 | |||
55d54359e0 | |||
d33c2ad354 | |||
9afa7ff624 | |||
0649289f02 | |||
aaeaa43878 | |||
078b8e23bf | |||
74da3e1757 | |||
2d2c93a798 | |||
4bbb60efce | |||
1cf679dec4 | |||
41026c1e4b | |||
d6b9be21d7 | |||
c0329acde8 | |||
fb466b3417 | |||
1f50a7d29f | |||
1de21b913d | |||
4aea058e5a | |||
fd10234363 | |||
8fb5c6a409 | |||
2fe5fbfcc2 | |||
01637e1a4c | |||
1b349eb1f9 | |||
138eaebead | |||
61b9192f27 | |||
161b51d91a | |||
f904b31a7d | |||
f6614155e4 | |||
f5f159c320 | |||
6ebba525f1 | |||
2a5874441d | |||
d08445c9ad | |||
4a945696cb | |||
dabc964d83 | |||
654baf693d | |||
f001a3b7b6 | |||
c615f2c335 | |||
d839dd0242 | |||
435847891c | |||
182f290808 | |||
447dfc11fc | |||
9aa9f3b84e | |||
396ebd1e80 | |||
12490f4398 | |||
db078a9ba8 | |||
a13a7da5ad | |||
519f8e8684 | |||
40ae0962f4 | |||
1560288048 | |||
1ad6fafd91 | |||
70840aed5f | |||
b24d18feb9 | |||
3fa98f4395 | |||
d05b7ee90e | |||
6dcee35129 | |||
5cb345f5e9 | |||
fbcb52d3cd | |||
6b01e3fedd | |||
f7908f9bb8 | |||
00b7a4be02 | |||
04b0a768b8 | |||
87670425f2 | |||
32e71a1861 | |||
9c857cf280 | |||
97b12212dd | |||
9fa34d79ec | |||
a0a64a19dd | |||
bbc23611fa | |||
e9783a1fb4 | |||
9e0cc28792 | |||
73072a7c73 | |||
a8ba1262ff | |||
e66a9a7806 | |||
338442d773 | |||
10651bddf6 | |||
53d4d0b30d | |||
2865e4710b | |||
c46a74a19d | |||
46dc49a6a1 | |||
cc7f872131 | |||
bcc1658cd0 | |||
c46886f599 | |||
29f78392c1 | |||
022756a872 | |||
3b8c2dff57 | |||
0b9af32a8b | |||
11b1b63b14 | |||
0e26a6c92e | |||
66d8f0b7f1 | |||
ba5bcde874 | |||
ab0a8593c5 | |||
668ffc9b23 | |||
9962371f71 | |||
993acb5d41 | |||
a3d0aa73d1 | |||
14c57952f7 | |||
6c369d6788 | |||
4cdd9aad9b | |||
f38c057503 | |||
1e5544b39b | |||
d5673af79f | |||
a28dacec65 | |||
dbe29d4e33 | |||
fe3a67c546 | |||
b138ff2be3 | |||
cf6f1e4181 | |||
620a223814 | |||
f39f9690ec | |||
f9ca90256b | |||
2623640cd6 | |||
d87de61ae6 | |||
f5f485f899 | |||
e77b27c331 | |||
a5cc3dc8a2 | |||
37a709f655 | |||
3a5302108d | |||
d2ee117a0a | |||
db8ccdb850 | |||
d2419030b0 | |||
8986690c2a | |||
9286d3f584 | |||
940de9dbe9 | |||
88112c8afb | |||
375585c07c | |||
fd99ece8e3 | |||
8171e621fc | |||
ec03661b20 | |||
6335933a5b | |||
885b5563d0 | |||
9521ba6801 | |||
29511d33c7 | |||
7bc4d22337 | |||
afce6fa113 | |||
3163090d89 | |||
f0efd0202d | |||
3c28d1a571 | |||
e369243ebd | |||
a0ec3fac54 | |||
6559b538e5 | |||
73d5005880 | |||
6b094b6dfe | |||
641f2f4282 | |||
bfacd9f8ce | |||
f52e74d4dc | |||
23c21e92eb | |||
447d49530c | |||
9d6ebd877c | |||
0ba365f958 | |||
010c8ec3ab | |||
ffdb5c4735 | |||
a5881d619c | |||
34f70b3a56 | |||
8328d1900f | |||
d2bd5f0bdc | |||
34209a37a2 | |||
180e062eda | |||
5c7be85fdc | |||
146169ec38 | |||
9befab5ab9 | |||
9ac88f2b57 | |||
46f5b6cb08 | |||
eff3570f78 | |||
fa19bc4195 | |||
a01b2e0971 | |||
8159a9ab99 | |||
7516d9c16d | |||
46cc26d1b9 | |||
f784f9fa12 | |||
ca23f8ee6d | |||
e2f0eba2d4 | |||
d4353e48f7 | |||
bebf0da983 | |||
848e54f3ad | |||
7883d1cae4 | |||
ccc85b4ff8 | |||
c7606b47df | |||
d38af151a1 | |||
94267df08e | |||
8713c67133 | |||
57a60639bb | |||
bfbaa4dce5 | |||
1d79e78402 | |||
b6c5f49b78 | |||
d4231649e6 | |||
3e5c7feeff | |||
c23598e4ca | |||
54a08bde29 | |||
9f8bbd3fee | |||
3172006a24 | |||
684bc8bd70 | |||
b0502836b8 | |||
ec7a6f04f9 | |||
37947203e6 | |||
953419c69a | |||
0de8582f65 | |||
baeb733691 | |||
d03c60dd7f | |||
6a5d195109 | |||
0cbef75422 | |||
2cdfc4e025 | |||
973111088b | |||
11b503055e | |||
b629d2d4fe | |||
3bd7d48f51 | |||
435a6b74e3 | |||
75dc800d21 | |||
0c91aef2d8 | |||
3989b29a9b | |||
0463028bc2 | |||
39cfad0dee | |||
6d4d0b5b4b | |||
f96e1c5b78 | |||
8a2bee6717 | |||
d445098c8f | |||
74de25158e | |||
bce49a260e | |||
45c87b5481 | |||
dfe4bc6e59 | |||
54c978c3a3 | |||
9a7074d4aa | |||
a0040f5d12 | |||
940cdb1396 | |||
1b775cdd68 | |||
80bf931668 | |||
91c0b23384 | |||
2f668c330e | |||
08fa34882f | |||
4037705531 | |||
c76c11e59c | |||
9edbd0a204 | |||
707507ff6d | |||
7e1592d2cd | |||
903c9579b8 | |||
b440ef8c96 | |||
700f63a806 | |||
951a119926 | |||
1ca4041b86 | |||
80c1512fd5 | |||
0ac9cefd03 | |||
b8432f28f4 | |||
93935980f8 | |||
3fec2119e6 | |||
9b14418863 | |||
6ddc727fac | |||
acb5278cc8 | |||
0839209cab | |||
b39809668a | |||
3e9edc6845 | |||
bfc73f1fa2 | |||
f00c9bba33 | |||
b55b505690 | |||
2818de21ff | |||
aed5d40607 | |||
afa5477d1c | |||
01fcd42431 | |||
f990610776 | |||
64cb45fd79 | |||
ace6c12ec6 | |||
cac75be05b | |||
c3f319d7c2 | |||
ba3c333611 | |||
59a3d0cb57 | |||
6780c98e19 | |||
2f52783a08 | |||
7dec9d8cc4 | |||
fb0a24fba2 |
28
.devops/cublas.Dockerfile
Normal file
28
.devops/cublas.Dockerfile
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
ARG UBUNTU_VERSION=22.04
|
||||||
|
|
||||||
|
# This needs to generally match the container host's environment.
|
||||||
|
ARG CUDA_VERSION=11.7.1
|
||||||
|
|
||||||
|
# Target the CUDA build image
|
||||||
|
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||||
|
|
||||||
|
FROM ${BASE_CUDA_DEV_CONTAINER} as build
|
||||||
|
|
||||||
|
# Unless otherwise specified, we make a fat build.
|
||||||
|
ARG CUDA_DOCKER_ARCH=all
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential git cmake
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Set nvcc architecture
|
||||||
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||||
|
# Enable cuBLAS
|
||||||
|
ENV WHISPER_CUBLAS=1
|
||||||
|
|
||||||
|
RUN make
|
||||||
|
|
||||||
|
ENTRYPOINT ["/app/main"]
|
40
.devops/main-cuda.Dockerfile
Normal file
40
.devops/main-cuda.Dockerfile
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
ARG UBUNTU_VERSION=22.04
|
||||||
|
# This needs to generally match the container host's environment.
|
||||||
|
ARG CUDA_VERSION=12.3.1
|
||||||
|
# Target the CUDA build image
|
||||||
|
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||||
|
# Target the CUDA runtime image
|
||||||
|
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
|
||||||
|
|
||||||
|
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Unless otherwise specified, we make a fat build.
|
||||||
|
ARG CUDA_DOCKER_ARCH=all
|
||||||
|
# Set nvcc architecture
|
||||||
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||||
|
# Enable cuBLAS
|
||||||
|
ENV WHISPER_CUBLAS=1
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
|
# Ref: https://stackoverflow.com/a/53464012
|
||||||
|
ENV CUDA_MAIN_VERSION=12.3
|
||||||
|
ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH
|
||||||
|
|
||||||
|
COPY .. .
|
||||||
|
RUN make
|
||||||
|
|
||||||
|
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
|
||||||
|
ENV CUDA_MAIN_VERSION=12.3
|
||||||
|
ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y curl ffmpeg \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
|
COPY --from=build /app /app
|
||||||
|
ENTRYPOINT [ "bash", "-c" ]
|
19
.devops/main.Dockerfile
Normal file
19
.devops/main.Dockerfile
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
FROM ubuntu:22.04 AS build
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
|
COPY .. .
|
||||||
|
RUN make
|
||||||
|
|
||||||
|
FROM ubuntu:22.04 AS runtime
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y curl ffmpeg \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
|
COPY --from=build /app /app
|
||||||
|
ENTRYPOINT [ "bash", "-c" ]
|
354
.github/workflows/build.yml
vendored
354
.github/workflows/build.yml
vendored
@ -15,16 +15,17 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Build ${{ matrix.arch }}
|
- name: Build ${{ matrix.arch }}
|
||||||
run: |
|
run: |
|
||||||
docker run --platform ${{ matrix.arch }} --rm \
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
-v ${{ github.workspace }}:/workspace \
|
-v ${{ github.workspace }}:/workspace \
|
||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y build-essential libsdl2-dev
|
apt install -y build-essential libsdl2-dev
|
||||||
make
|
make
|
||||||
@ -35,7 +36,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
@ -52,10 +53,10 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
uses: cross-platform-actions/action@v0.15.0
|
uses: cross-platform-actions/action@v0.24.0
|
||||||
with:
|
with:
|
||||||
operating_system: freebsd
|
operating_system: freebsd
|
||||||
version: '13.2'
|
version: '13.2'
|
||||||
@ -76,19 +77,20 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Build ${{ matrix.arch }}
|
- name: Build ${{ matrix.arch }}
|
||||||
run: |
|
run: |
|
||||||
docker run --platform ${{ matrix.arch }} --rm \
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
-v ${{ github.workspace }}:/workspace \
|
-v ${{ github.workspace }}:/workspace \
|
||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y build-essential cmake libsdl2-dev
|
apt install -y build-essential cmake libsdl2-dev
|
||||||
cmake . -DWHISPER_SUPPORT_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure'
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
@ -103,19 +105,20 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Build ${{ matrix.arch }}
|
- name: Build ${{ matrix.arch }}
|
||||||
run: |
|
run: |
|
||||||
docker run --platform ${{ matrix.arch }} --rm \
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
-v ${{ github.workspace }}:/workspace \
|
-v ${{ github.workspace }}:/workspace \
|
||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y build-essential cmake libsdl2-dev
|
apt install -y clang build-essential cmake libsdl2-dev
|
||||||
cmake . -DWHISPER_SUPPORT_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang
|
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang
|
||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure'
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
@ -130,22 +133,181 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Build ${{ matrix.arch }}
|
- name: Build ${{ matrix.arch }}
|
||||||
run: |
|
run: |
|
||||||
docker run --platform ${{ matrix.arch }} --rm \
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
-v ${{ github.workspace }}:/workspace \
|
-v ${{ github.workspace }}:/workspace \
|
||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y build-essential cmake
|
apt install -y build-essential cmake
|
||||||
cmake . -DCMAKE_BUILD_TYPE=Debug -DWHISPER_SANITIZE_${{ matrix.sanitizer }}=ON
|
cmake . -DCMAKE_BUILD_TYPE=Debug -DWHISPER_SANITIZE_${{ matrix.sanitizer }}=ON
|
||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure'
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
|
ubuntu-22-cmake-sycl:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
dwhisper_sycl: [ON]
|
||||||
|
dcmake_c_compiler: [icx]
|
||||||
|
dcmake_cxx_compiler: [icpx]
|
||||||
|
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
||||||
|
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: add oneAPI to apt
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd /tmp
|
||||||
|
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
rm GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main"
|
||||||
|
|
||||||
|
- name: install oneAPI dpcpp compiler
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install intel-oneapi-compiler-dpcpp-cpp
|
||||||
|
|
||||||
|
- name: install oneAPI MKL library
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt install intel-oneapi-mkl-devel
|
||||||
|
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
|
||||||
|
cmake --build . --config Release -j $(nproc)
|
||||||
|
|
||||||
|
ubuntu-22-cmake-sycl-fp16:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
dwhisper_sycl: [ON]
|
||||||
|
dcmake_c_compiler: [icx]
|
||||||
|
dcmake_cxx_compiler: [icpx]
|
||||||
|
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
||||||
|
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: add oneAPI to apt
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd /tmp
|
||||||
|
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
rm GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main"
|
||||||
|
|
||||||
|
- name: install oneAPI dpcpp compiler
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install intel-oneapi-compiler-dpcpp-cpp
|
||||||
|
|
||||||
|
- name: install oneAPI MKL library
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt install intel-oneapi-mkl-devel
|
||||||
|
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -DWHISPER_SYCL_F16=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
|
||||||
|
cmake --build . --config Release -j $(nproc)
|
||||||
|
|
||||||
|
windows-msys2:
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- { sys: UCRT64, env: ucrt-x86_64, build: Release }
|
||||||
|
- { sys: CLANG64, env: clang-x86_64, build: Release }
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup ${{ matrix.sys }}
|
||||||
|
uses: msys2/setup-msys2@v2
|
||||||
|
with:
|
||||||
|
update: true
|
||||||
|
msystem: ${{matrix.sys}}
|
||||||
|
install: >-
|
||||||
|
base-devel
|
||||||
|
mingw-w64-${{matrix.env}}-toolchain
|
||||||
|
mingw-w64-${{matrix.env}}-cmake
|
||||||
|
mingw-w64-${{matrix.env}}-SDL2
|
||||||
|
mingw-w64-${{matrix.env}}-openblas
|
||||||
|
|
||||||
|
- name: Build using make
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
make -j $(nproc)
|
||||||
|
|
||||||
|
- name: Clean after building using make
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
make clean
|
||||||
|
|
||||||
|
- name: Build using make w/ OpenBLAS
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
make WHISPER_OPENBLAS=1 -j $(nproc)
|
||||||
|
|
||||||
|
- name: Build using CMake
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
cmake -B build
|
||||||
|
cmake --build build --config ${{ matrix.build }} -j $(nproc)
|
||||||
|
|
||||||
|
- name: Clean after building using CMake
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
rm -rf build
|
||||||
|
|
||||||
|
- name: Build using CMake w/ OpenBLAS
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
cmake -B build -DWHISPER_OPENBLAS=ON
|
||||||
|
cmake --build build --config ${{ matrix.build }} -j $(nproc)
|
||||||
|
|
||||||
windows:
|
windows:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
|
||||||
@ -162,14 +324,14 @@ jobs:
|
|||||||
s2arc: x64
|
s2arc: x64
|
||||||
jnaPath: win32-x86-64
|
jnaPath: win32-x86-64
|
||||||
- sdl2: ON
|
- sdl2: ON
|
||||||
s2ver: 2.26.0
|
s2ver: 2.28.5
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
uses: microsoft/setup-msbuild@v1
|
uses: microsoft/setup-msbuild@v2
|
||||||
|
|
||||||
- name: Fetch SDL2 and set SDL2_DIR
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
@ -182,7 +344,7 @@ jobs:
|
|||||||
run: >
|
run: >
|
||||||
cmake -S . -B ./build -A ${{ matrix.arch }}
|
cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
-DWHISPER_SUPPORT_SDL2=${{ matrix.sdl2 }}
|
-DWHISPER_SDL2=${{ matrix.sdl2 }}
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@ -194,14 +356,14 @@ jobs:
|
|||||||
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Upload dll
|
- name: Upload dll
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.jnaPath }}_whisper.dll
|
name: ${{ matrix.jnaPath }}_whisper.dll
|
||||||
path: build/bin/${{ matrix.build }}/whisper.dll
|
path: build/bin/${{ matrix.build }}/whisper.dll
|
||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: whisper-bin-${{ matrix.arch }}
|
name: whisper-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
@ -217,20 +379,23 @@ jobs:
|
|||||||
sdl2: [ON]
|
sdl2: [ON]
|
||||||
include:
|
include:
|
||||||
- arch: Win32
|
- arch: Win32
|
||||||
obzip: https://github.com/xianyi/OpenBLAS/releases/download/v0.3.21/OpenBLAS-0.3.21-x86.zip
|
obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x86.zip
|
||||||
s2arc: x86
|
s2arc: x86
|
||||||
|
clblast: OFF
|
||||||
- arch: x64
|
- arch: x64
|
||||||
obzip: https://github.com/xianyi/OpenBLAS/releases/download/v0.3.21/OpenBLAS-0.3.21-x64.zip
|
obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x64.zip
|
||||||
s2arc: x64
|
s2arc: x64
|
||||||
|
clblast: ON
|
||||||
|
clver: 1.6.1
|
||||||
- sdl2: ON
|
- sdl2: ON
|
||||||
s2ver: 2.26.0
|
s2ver: 2.28.5
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
uses: microsoft/setup-msbuild@v1
|
uses: microsoft/setup-msbuild@v2
|
||||||
|
|
||||||
- name: Fetch OpenBLAS
|
- name: Fetch OpenBLAS
|
||||||
if: matrix.blas == 'ON'
|
if: matrix.blas == 'ON'
|
||||||
@ -239,7 +404,7 @@ jobs:
|
|||||||
7z x blas.zip -oblas -y
|
7z x blas.zip -oblas -y
|
||||||
copy blas/include/cblas.h .
|
copy blas/include/cblas.h .
|
||||||
copy blas/include/openblas_config.h .
|
copy blas/include/openblas_config.h .
|
||||||
echo "blasdir=$env:GITHUB_WORKSPACE/blas" >> $env:GITHUB_ENV
|
echo "OPENBLAS_PATH=$env:GITHUB_WORKSPACE/blas" >> $env:GITHUB_ENV
|
||||||
|
|
||||||
- name: Fetch SDL2 and set SDL2_DIR
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
@ -248,13 +413,26 @@ jobs:
|
|||||||
7z x sdl2.zip
|
7z x sdl2.zip
|
||||||
echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Install OpenCL
|
||||||
|
if: matrix.clblast == 'ON'
|
||||||
|
run: vcpkg.exe --triplet=${{ matrix.arch }}-windows install opencl
|
||||||
|
|
||||||
|
- name: Fetch CLBlast and set CLBlast_DIR
|
||||||
|
if: matrix.clblast == 'ON'
|
||||||
|
run: |
|
||||||
|
C:/msys64/usr/bin/wget.exe -qO clblast.zip https://github.com/CNugteren/CLBlast/releases/download/${{ matrix.clver }}/CLBlast-${{ matrix.clver }}-windows-x64.zip
|
||||||
|
7z x clblast.zip
|
||||||
|
7z x CLBlast-${{ matrix.clver }}-windows-x64.7z
|
||||||
|
echo "CLBlast_DIR=$env:GITHUB_WORKSPACE/CLBlast-${{ matrix.clver }}-windows-x64/lib/cmake/CLBlast" >> $env:GITHUB_ENV
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: >
|
run: >
|
||||||
cmake -S . -B ./build -A ${{ matrix.arch }}
|
cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
-DWHISPER_SUPPORT_OPENBLAS=${{ matrix.blas }}
|
-DWHISPER_OPENBLAS=${{ matrix.blas }}
|
||||||
-DCMAKE_LIBRARY_PATH="$env:blasdir/lib"
|
-DCMAKE_LIBRARY_PATH="$env:OPENBLAS_PATH/lib"
|
||||||
-DWHISPER_SUPPORT_SDL2=${{ matrix.sdl2 }}
|
-DWHISPER_SDL2=${{ matrix.sdl2 }}
|
||||||
|
-DWHISPER_CLBLAST=${{ matrix.clblast }}
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@ -263,21 +441,25 @@ jobs:
|
|||||||
|
|
||||||
- name: Copy libopenblas.dll
|
- name: Copy libopenblas.dll
|
||||||
if: matrix.blas == 'ON'
|
if: matrix.blas == 'ON'
|
||||||
run: copy "$env:blasdir/bin/libopenblas.dll" build/bin/${{ matrix.build }}
|
run: copy "$env:OPENBLAS_PATH/bin/libopenblas.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Copy SDL2.dll
|
- name: Copy SDL2.dll
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
|
- name: Copy clblast.dll
|
||||||
|
if: matrix.clblast == 'ON'
|
||||||
|
run: copy "$env:CLBlast_DIR/../../clblast.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
|
if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: whisper-blas-bin-${{ matrix.arch }}
|
name: whisper-blas${{ matrix.clblast == 'ON' && '-clblast' || ''}}-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
windows-cublas:
|
windows-cublas:
|
||||||
runs-on: windows-latest
|
runs-on: windows-2019
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@ -285,22 +467,25 @@ jobs:
|
|||||||
arch: [x64]
|
arch: [x64]
|
||||||
cublas: [ON]
|
cublas: [ON]
|
||||||
sdl2: [ON]
|
sdl2: [ON]
|
||||||
|
cuda-toolkit: [12.2.0, 11.8.0]
|
||||||
include:
|
include:
|
||||||
- arch: x64
|
- arch: x64
|
||||||
s2arc: x64
|
s2arc: x64
|
||||||
- sdl2: ON
|
- sdl2: ON
|
||||||
s2ver: 2.26.0
|
s2ver: 2.28.5
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
uses: microsoft/setup-msbuild@v1
|
uses: microsoft/setup-msbuild@v2
|
||||||
|
|
||||||
- name: Install CUDA Toolkit
|
- name: Install CUDA Toolkit
|
||||||
id: cuda-toolkit
|
id: cuda-toolkit
|
||||||
uses: Jimver/cuda-toolkit@v0.2.10
|
uses: Jimver/cuda-toolkit@v0.2.15
|
||||||
|
with:
|
||||||
|
cuda: '${{ matrix.cuda-toolkit }}'
|
||||||
|
|
||||||
- name: Fetch SDL2 and set SDL2_DIR
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
@ -313,12 +498,20 @@ jobs:
|
|||||||
run: >
|
run: >
|
||||||
cmake -S . -B ./build -A ${{ matrix.arch }}
|
cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
-DWHISPER_CUBLAS=1
|
-DWHISPER_CUDA=${{ matrix.cublas }}
|
||||||
|
-DWHISPER_SDL2=${{ matrix.sdl2 }}
|
||||||
|
|
||||||
- name: Build
|
- name: Build ${{ matrix.cuda-toolkit }}
|
||||||
run: |
|
run: |
|
||||||
cd ./build
|
cd ./build
|
||||||
msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
cmake --build . --config ${{ matrix.build }}
|
||||||
|
|
||||||
|
- name: Copy CUDA DLLs
|
||||||
|
run: >
|
||||||
|
Copy-Item -PassThru
|
||||||
|
-Path "${{ steps.cuda-toolkit.outputs.CUDA_PATH }}/bin/*.dll"
|
||||||
|
-Include cudart64_*,cublas64_*,cublasLt64_*
|
||||||
|
-Destination build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Copy SDL2.dll
|
- name: Copy SDL2.dll
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
@ -326,9 +519,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: whisper-cublas-bin-${{ matrix.arch }}
|
name: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
emscripten:
|
emscripten:
|
||||||
@ -340,10 +533,10 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Setup emsdk
|
- name: Setup emsdk
|
||||||
uses: mymindstorm/setup-emsdk@v12
|
uses: mymindstorm/setup-emsdk@v14
|
||||||
|
|
||||||
- name: Verify
|
- name: Verify
|
||||||
run: emcc -v
|
run: emcc -v
|
||||||
@ -362,7 +555,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: |
|
run: |
|
||||||
@ -380,35 +573,75 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
path: whisper
|
||||||
|
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: ggerganov/ggml
|
||||||
|
path: ggml
|
||||||
|
|
||||||
- name: Install Java
|
- name: Install Java
|
||||||
uses: actions/setup-java@v3
|
uses: actions/setup-java@v4
|
||||||
with:
|
with:
|
||||||
distribution: zulu
|
distribution: zulu
|
||||||
java-version: 17
|
java-version: 21
|
||||||
|
|
||||||
- name: Setup Android SDK
|
- name: Setup Android SDK
|
||||||
uses: android-actions/setup-android@v2
|
uses: android-actions/setup-android@v3
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
cd examples/whisper.android
|
cd whisper/examples/whisper.android
|
||||||
./gradlew assembleRelease --no-daemon
|
./gradlew assembleRelease --no-daemon
|
||||||
|
|
||||||
|
- name: Build with external ggml
|
||||||
|
run: |
|
||||||
|
export PATH_TO_GGML=$PWD/ggml
|
||||||
|
cd whisper/examples/whisper.android
|
||||||
|
./gradlew assembleRelease --no-daemon -PGGML_HOME=$PATH_TO_GGML
|
||||||
|
|
||||||
|
android_java:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: set up JDK 11
|
||||||
|
uses: actions/setup-java@v4
|
||||||
|
with:
|
||||||
|
java-version: '11'
|
||||||
|
distribution: 'temurin'
|
||||||
|
cache: gradle
|
||||||
|
|
||||||
|
- name: Setup Android SDK
|
||||||
|
uses: android-actions/setup-android@v3
|
||||||
|
with:
|
||||||
|
cmdline-tools-version: 9.0
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
cd examples/whisper.android.java
|
||||||
|
chmod +x ./gradlew
|
||||||
|
./gradlew assembleRelease
|
||||||
|
|
||||||
java:
|
java:
|
||||||
needs: [ 'windows' ]
|
needs: [ 'windows' ]
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Install Java
|
- name: Install Java
|
||||||
uses: actions/setup-java@v1
|
uses: actions/setup-java@v4
|
||||||
with:
|
with:
|
||||||
java-version: 17
|
distribution: zulu
|
||||||
|
java-version: 20
|
||||||
|
|
||||||
- name: Download Windows lib
|
- name: Download Windows lib
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: win32-x86-64_whisper.dll
|
name: win32-x86-64_whisper.dll
|
||||||
path: bindings/java/build/generated/resources/main/win32-x86-64
|
path: bindings/java/build/generated/resources/main/win32-x86-64
|
||||||
@ -421,26 +654,29 @@ jobs:
|
|||||||
./gradlew build
|
./gradlew build
|
||||||
|
|
||||||
- name: Upload jar
|
- name: Upload jar
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: whispercpp.jar
|
name: whispercpp.jar
|
||||||
path: bindings/java/build/libs/whispercpp-*.jar
|
path: bindings/java/build/libs/whispercpp-*.jar
|
||||||
|
|
||||||
- name: Publish package
|
- name: Publish package
|
||||||
if: ${{ github.ref == 'refs/heads/master' }}
|
if: ${{ github.ref == 'refs/heads/master' }}
|
||||||
uses: gradle/gradle-build-action@v2
|
uses: gradle/gradle-build-action@v2.4.2
|
||||||
with:
|
with:
|
||||||
arguments: publish
|
arguments: publish
|
||||||
|
build-root-directory: bindings/java
|
||||||
env:
|
env:
|
||||||
MAVEN_USERNAME: ${{ secrets.OSSRH_USERNAME }}
|
MAVEN_USERNAME: ${{ secrets.JIRA_USER }}
|
||||||
MAVEN_PASSWORD: ${{ secrets.OSSRH_TOKEN }}
|
MAVEN_PASSWORD: ${{ secrets.JIRA_PASS }}
|
||||||
|
PGP_SECRET: ${{ secrets.GPG_PRIVATE_KEY }}
|
||||||
|
PGP_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
||||||
|
|
||||||
quantize:
|
quantize:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Test quantize
|
- name: Test quantize
|
||||||
run: |
|
run: |
|
||||||
|
57
.github/workflows/docker.yml
vendored
Normal file
57
.github/workflows/docker.yml
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
name: Publish Docker image
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
push_to_registry:
|
||||||
|
name: Push Docker image to Docker Hub
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
COMMIT_SHA: ${{ github.sha }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
config:
|
||||||
|
- { tag: "main", dockerfile: ".devops/main.Dockerfile", platform: "linux/amd64,linux/arm64" }
|
||||||
|
- { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out the repo
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Log in to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push Docker image (versioned)
|
||||||
|
if: github.event_name == 'push'
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
platforms: ${{ matrix.config.platforms }}
|
||||||
|
tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
|
||||||
|
file: ${{ matrix.config.dockerfile }}
|
||||||
|
|
||||||
|
- name: Build and push Docker image (tagged)
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: ${{ github.event_name == 'push' }}
|
||||||
|
platforms: ${{ matrix.config.platforms }}
|
||||||
|
tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}"
|
||||||
|
file: ${{ matrix.config.dockerfile }}
|
2
.github/workflows/examples.yml
vendored
2
.github/workflows/examples.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
|||||||
run: npm install
|
run: npm install
|
||||||
|
|
||||||
- name: Compile addon.node
|
- name: Compile addon.node
|
||||||
run: npx cmake-js compile -T whisper-addon -B Release
|
run: npx cmake-js compile -T addon.node -B Release
|
||||||
|
|
||||||
- name: Download test model
|
- name: Download test model
|
||||||
run: |
|
run: |
|
||||||
|
16
.gitignore
vendored
16
.gitignore
vendored
@ -6,17 +6,26 @@
|
|||||||
.vs/
|
.vs/
|
||||||
.vscode/
|
.vscode/
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
.vimspector.json
|
||||||
|
/CMakeSettings.json
|
||||||
|
|
||||||
build/
|
build/
|
||||||
|
build-coreml/
|
||||||
build-em/
|
build-em/
|
||||||
build-debug/
|
build-debug/
|
||||||
build-release/
|
build-release/
|
||||||
|
build-rwdi/
|
||||||
build-static/
|
build-static/
|
||||||
build-cublas/
|
build-cublas/
|
||||||
build-no-accel/
|
build-no-accel/
|
||||||
build-sanitize-addr/
|
build-sanitize-addr/
|
||||||
build-sanitize-thread/
|
build-sanitize-thread/
|
||||||
|
|
||||||
|
# SPM
|
||||||
|
.build/
|
||||||
|
.swiftpm
|
||||||
|
*.metallib
|
||||||
|
|
||||||
/main
|
/main
|
||||||
/stream
|
/stream
|
||||||
/command
|
/command
|
||||||
@ -24,6 +33,7 @@ build-sanitize-thread/
|
|||||||
/talk-llama
|
/talk-llama
|
||||||
/bench
|
/bench
|
||||||
/quantize
|
/quantize
|
||||||
|
/server
|
||||||
/lsp
|
/lsp
|
||||||
|
|
||||||
arm_neon.h
|
arm_neon.h
|
||||||
@ -45,3 +55,9 @@ models/*.mlpackage
|
|||||||
bindings/java/.gradle/
|
bindings/java/.gradle/
|
||||||
bindings/java/.idea/
|
bindings/java/.idea/
|
||||||
.idea/
|
.idea/
|
||||||
|
|
||||||
|
benchmark_results.csv
|
||||||
|
cmake-build-debug/
|
||||||
|
.cxx/
|
||||||
|
.gradle/
|
||||||
|
local.properties
|
||||||
|
301
AUTHORS
Normal file
301
AUTHORS
Normal file
@ -0,0 +1,301 @@
|
|||||||
|
# date: Tue Apr 9 20:27:03 EEST 2024
|
||||||
|
# this file is auto-generated by scripts/gen-authors.sh
|
||||||
|
|
||||||
|
0/0 <zero@imaskeleton.me>
|
||||||
|
0cc4m <picard12@live.de>
|
||||||
|
0xsourcecode <134374803+0xsourcecode@users.noreply.github.com>
|
||||||
|
AT <manyoso@users.noreply.github.com>
|
||||||
|
Aarni Koskela <akx@iki.fi>
|
||||||
|
Aaron Pham <29749331+aarnphm@users.noreply.github.com>
|
||||||
|
Aaron Taylor <aaron@exphat.com>
|
||||||
|
Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com>
|
||||||
|
Abitofevrything <54505189+abitofevrything@users.noreply.github.com>
|
||||||
|
AfryMask <AfryMask@163.com>
|
||||||
|
Ahmad Bilal <ahmad.bilal@empglabs.com>
|
||||||
|
AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com>
|
||||||
|
Akash Mahajan <akash7190@gmail.com>
|
||||||
|
Akash Mahajan <akashmjn@stanford.edu>
|
||||||
|
Al Hoang <3811822-hoanga@users.noreply.gitlab.com>
|
||||||
|
Alan <unknown>
|
||||||
|
Aleksander Andrzejewski <18704749+aleksanderandrzejewski@users.noreply.github.com>
|
||||||
|
Alex Azarov <alex@azarov.by>
|
||||||
|
Alex Bacart <13940752+alex-bacart@users.noreply.github.com>
|
||||||
|
Alex Evgrashin <aevgrashin@yandex.ru>
|
||||||
|
Alexandr Graschenkov <alexandr.graschenkov91@gmail.com>
|
||||||
|
Alexandru Mariuti <alex@mariuti.com>
|
||||||
|
Alexey Kharlamov <alexey@kharlamov.biz>
|
||||||
|
Alfredo Montesinos <alfredo.montesinos@g.austincc.edu>
|
||||||
|
Ali Alameh <ali.alameh@isae.edu.lb>
|
||||||
|
Ananta Bastola <anantarajbastola@gmail.com>
|
||||||
|
Andreu Huguet <andreuhuguet@gmail.com>
|
||||||
|
Andrew Huynh <a5thuynh@gmail.com>
|
||||||
|
Andrew S <andrews54757@gmail.com>
|
||||||
|
Andy Maloney <asmaloney@gmail.com>
|
||||||
|
Anton Kostin <masguit42@users.noreply.github.com>
|
||||||
|
Artyom Mezin <psycho.fading@gmail.com>
|
||||||
|
Asad Memon <asad.lionpk@gmail.com>
|
||||||
|
Ashraful Islam <ashraful.meche@gmail.com>
|
||||||
|
AsukaMinato <asukaminato@nyan.eu.org>
|
||||||
|
AustinMroz <austinmroz@utexas.edu>
|
||||||
|
Avik Sengupta <avik@sengupta.net>
|
||||||
|
Bader-eddine Ouaich <49657842+baderouaich@users.noreply.github.com>
|
||||||
|
Baffin Lee <baffinlee@gmail.com>
|
||||||
|
Ben Nortier <bjnortier@gmail.com>
|
||||||
|
Benjamin Heiniger <benjamin.heiniger@bluewin.ch>
|
||||||
|
Bo-Yi Wu <appleboy.tw@gmail.com>
|
||||||
|
Boris Bliznioukov <blib@mail.com>
|
||||||
|
Borislav Stanimirov <b.stanimirov@abv.bg>
|
||||||
|
Brad Murray <59848399+bradmurray-dt@users.noreply.github.com>
|
||||||
|
Brian Murray <brian@bmurray.ca>
|
||||||
|
CRD716 <crd716@gmail.com>
|
||||||
|
Canis Lupus <Canis-UK@users.noreply.github.com>
|
||||||
|
Carolinabanana <140120812+Carolinabanana@users.noreply.github.com>
|
||||||
|
ChangSeok Oh <shivamidow@users.noreply.github.com>
|
||||||
|
Chaoqun <27287694+OpenWaygate@users.noreply.github.com>
|
||||||
|
Chia-Hsiang Cheng <88014292+garychia@users.noreply.github.com>
|
||||||
|
Chidi Williams <williamschidi1@gmail.com>
|
||||||
|
Christian <12550267+iceychris@users.noreply.github.com>
|
||||||
|
Clifford Heath <clifford.heath@gmail.com>
|
||||||
|
Colin <github@whoisc.cc>
|
||||||
|
DGdev91 <DGdev91@users.noreply.github.com>
|
||||||
|
Damian Czaja <trojan295@protonmail.com>
|
||||||
|
Daniel Bevenius <daniel.bevenius@gmail.com>
|
||||||
|
David <dnhkng@gmail.com>
|
||||||
|
David Thorpe <djt@mutablelogic.com>
|
||||||
|
Davidson Francis <davidsondfgl@gmail.com>
|
||||||
|
Dener Stassun <denerstassun@gmail.com>
|
||||||
|
Didzis Gosko <didzis@users.noreply.github.com>
|
||||||
|
Digipom <admin@digipom.com>
|
||||||
|
Dimo <dimo@ieee.org>
|
||||||
|
Dody Suria Wijaya <dodysw@gmail.com>
|
||||||
|
Dr. Tom Murphy VII Ph.D <499244+tom7@users.noreply.github.com>
|
||||||
|
Duncan McConnell <ddmcconnell4@gmail.com>
|
||||||
|
Egor Egorov <me@egorfine.com>
|
||||||
|
Elkana Bardugo <ttv200@gmail.com>
|
||||||
|
Emmanuel Schmidbauer <eschmidbauer@gmail.com>
|
||||||
|
Engininja2 <139037756+Engininja2@users.noreply.github.com>
|
||||||
|
Eric Swanson <eswanson@alloscomp.com>
|
||||||
|
Eric Tendian <erictendian@gmail.com>
|
||||||
|
Erik Scholz <Green-Sky@users.noreply.github.com>
|
||||||
|
Evan Jones <evan.q.jones@gmail.com>
|
||||||
|
Evan Martin <evan.martin@gmail.com>
|
||||||
|
Eve <139727413+netrunnereve@users.noreply.github.com>
|
||||||
|
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
||||||
|
F1L1P <78918286+F1L1Pv2@users.noreply.github.com>
|
||||||
|
Fangjun Kuang <csukuangfj@gmail.com>
|
||||||
|
Felix <stenbackfelix@gmail.com>
|
||||||
|
Finn Voorhees <finnvoorhees@gmail.com>
|
||||||
|
FlippFuzz <41221030+FlippFuzz@users.noreply.github.com>
|
||||||
|
Gang Chen <goncha@gmail.com>
|
||||||
|
Gavin Cai <gavin1818@hotmail.com>
|
||||||
|
George Hindle <george@georgehindle.com>
|
||||||
|
Georgi Gerganov <ggerganov@gmail.com>
|
||||||
|
GitAritron <103900385+GitAritron@users.noreply.github.com>
|
||||||
|
GiviMAD <GiviMAD@users.noreply.github.com>
|
||||||
|
Gleicon Moraes <gleicon@gmail.com>
|
||||||
|
Gregor Jasny <gjasny@googlemail.com>
|
||||||
|
Guillaume Wenzek <gwenzek@users.noreply.github.com>
|
||||||
|
HY. Kelvin Lee <34256578+hykelvinlee42@users.noreply.github.com>
|
||||||
|
Halalaluyafail3 <55773281+Halalaluyafail3@users.noreply.github.com>
|
||||||
|
Hang <bebound@gmail.com>
|
||||||
|
Herman Semenov <GermanAizek@yandex.ru>
|
||||||
|
Hrishikesh Barman <geekodour@users.noreply.github.com>
|
||||||
|
Ian Bicking <ian@ianbicking.org>
|
||||||
|
Ian Bull <irbull@eclipsesource.com>
|
||||||
|
Ikko Ashimine <eltociear@gmail.com>
|
||||||
|
InconsolableCellist <23345188+InconsolableCellist@users.noreply.github.com>
|
||||||
|
Ismatulla Mansurov <47342870+sapoepsilon@users.noreply.github.com>
|
||||||
|
Ivan Gorin <ivangorin21@gmail.com>
|
||||||
|
JJ <103335846+computerscienceiscool@users.noreply.github.com>
|
||||||
|
Jack Mousseau <jmousseau@users.noreply.github.com>
|
||||||
|
JacobLinCool <jacoblincool@gmail.com>
|
||||||
|
Jakub Ráček <blizzcz@gmail.com>
|
||||||
|
Jared Van Bortel <jared@nomic.ai>
|
||||||
|
Jay Binks <jaybinks@gmail.com>
|
||||||
|
Jhen-Jie Hong <developer@jhen.me>
|
||||||
|
Jhen-Jie Hong <iainst0409@gmail.com>
|
||||||
|
JidongZhang-THU <1119708529@qq.com>
|
||||||
|
Jo Liss <joliss42@gmail.com>
|
||||||
|
Johan <jr.raffin@gmail.com>
|
||||||
|
Johannes Gäßler <johannesg@5d6.de>
|
||||||
|
John Balis <phobossystems@gmail.com>
|
||||||
|
Jonathan Soo <jcsoo@agora.com>
|
||||||
|
Jonno <1160532+razodactyl@users.noreply.github.com>
|
||||||
|
Joonas Pihlajamaa <joonas.pihlajamaa@iki.fi>
|
||||||
|
Jose <34888496+Jerry-Master@users.noreply.github.com>
|
||||||
|
Josh Bleecher Snyder <josharian@gmail.com>
|
||||||
|
Judd <foldl@users.noreply.github.com>
|
||||||
|
Jumper775 <78500318+jumpers775@users.noreply.github.com>
|
||||||
|
Justine Tunney <jtunney@gmail.com>
|
||||||
|
KP Kaiser <kirk@zothcorp.com>
|
||||||
|
Kamilake <exjang0@gmail.com>
|
||||||
|
Kartik Saranathan <278928+Kartiku@users.noreply.github.com>
|
||||||
|
Kasumi <90275229+kasumi-1@users.noreply.github.com>
|
||||||
|
Kawrakow <48489457+ikawrakow@users.noreply.github.com>
|
||||||
|
Kevin Brothaler <admin@digipom.com>
|
||||||
|
Konstantin Zhuravlyov <konstantin.zhuravlyov@amd.com>
|
||||||
|
Kreijstal <rainb@tfwno.gf>
|
||||||
|
Kylin <56434533+KyL0N@users.noreply.github.com>
|
||||||
|
LBlue <153975653+lbluep@users.noreply.github.com>
|
||||||
|
Larry Battle <larry.battle.tech@gmail.com>
|
||||||
|
Laytan Laats <laytanlaats@hotmail.com>
|
||||||
|
Leo Moll <leo.moll@yeasoft.com>
|
||||||
|
Lexevolution <31176843+Lexevolution@users.noreply.github.com>
|
||||||
|
LittleLoli <26589867+WhichWho@users.noreply.github.com>
|
||||||
|
Lucas Zanek <57494138+LucasZNK@users.noreply.github.com>
|
||||||
|
Luis Herrera <herrera-luis@users.noreply.github.com>
|
||||||
|
Lukas Rist <glaslos@gmail.com>
|
||||||
|
M. A. Ali <73258591+MightyStud@users.noreply.github.com>
|
||||||
|
M. Eren Akbiyik <erenakbiyik@gmail.com>
|
||||||
|
Maciek <maciek.mab122@gmail.com>
|
||||||
|
Marcin Mielniczuk <marmistrz.dev@zoho.eu>
|
||||||
|
Martin Warnaar <martinwarnaar@gmail.com>
|
||||||
|
Matheus de Sousa <23645013+keyehzy@users.noreply.github.com>
|
||||||
|
Mathijs de Bruin <mathijs@mathijsfietst.nl>
|
||||||
|
Matija Pevec <mightymatth@users.noreply.github.com>
|
||||||
|
Maximiliano Levi <8160966+maxilevi@users.noreply.github.com>
|
||||||
|
Meng, Hengyu <hengyu.meng@intel.com>
|
||||||
|
Michael Podvitskiy <podvitskiymichael@gmail.com>
|
||||||
|
Michael Rienstra <mrienstra@gmail.com>
|
||||||
|
Mikhail Grigorev <sleuthhound@gmail.com>
|
||||||
|
Mohammadreza Hendiani <hendiani.mohammadreza@gmail.com>
|
||||||
|
Mohit Agarwal <mohit@sdf.org>
|
||||||
|
Murilo Santana <mvrilo@gmail.com>
|
||||||
|
Neil Chudleigh <nchudleigh@users.noreply.github.com>
|
||||||
|
Neo Zhang Jianyu <jianyu.zhang@intel.com>
|
||||||
|
Neuman Vong <neuman.vong@gmail.com>
|
||||||
|
Nicholas Albion <nalbion@yahoo.com>
|
||||||
|
Niels Mayer <Niels.Mayer@gmail.com>
|
||||||
|
Okabintaro <103938900+Okabintaro@users.noreply.github.com>
|
||||||
|
Oleg Sidorov <me@whitebox.io>
|
||||||
|
Oleg Sidorov <oleg@sidorov.nl>
|
||||||
|
Ondrej Kokes <ondrej.kokes@gmail.com>
|
||||||
|
Ouadie EL FAROUKI <ouadie.elfarouki@codeplay.com>
|
||||||
|
Paul Tsochantaris <ptsochantaris@icloud.com>
|
||||||
|
Philipp Zabel <philipp.zabel@gmail.com>
|
||||||
|
Philippe Normand <phil@base-art.net>
|
||||||
|
Przemysław Pawełczyk <przemoc@gmail.com>
|
||||||
|
Qianhe Chen <54462604+chenqianhe@users.noreply.github.com>
|
||||||
|
Radosław Gryta <radek.gryta@gmail.com>
|
||||||
|
Reinforce-II <fate@eastal.com>
|
||||||
|
Reinis Muiznieks <muiznieks.reinis@gmail.com>
|
||||||
|
RelatedTitle <r3latedtitle@gmail.com>
|
||||||
|
RhinoDevel <RhinoDevel@users.noreply.github.com>
|
||||||
|
Rich Jones <miserlou@gmail.com>
|
||||||
|
Robin <robin.xw@hotmail.com>
|
||||||
|
Roddur Dasgupta <roddurd@gmail.com>
|
||||||
|
Roland Rabien <figbug@gmail.com>
|
||||||
|
Rotem Dan <rotemdan@gmail.com>
|
||||||
|
Ryan Hitchman <hitchmanr@gmail.com>
|
||||||
|
Ryan Metcalfe <107415876+RyanMetcalfeInt8@users.noreply.github.com>
|
||||||
|
RyanChang <ftes90015@gmail.com>
|
||||||
|
Sam <49637763+Onlyartist9@users.noreply.github.com>
|
||||||
|
Sam Pullara <spullara@gmail.com>
|
||||||
|
Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com>
|
||||||
|
Sergio López <slp@sinrega.org>
|
||||||
|
Siddharth Ramakrishnan <srr2141@columbia.edu>
|
||||||
|
Simon Moisselin <simon.moisstoll@gmail.com>
|
||||||
|
Sindre Sorhus <sindresorhus@gmail.com>
|
||||||
|
Slava Primenko <primenko.s@gmail.com>
|
||||||
|
Syahmi Azhar <prsyahmi@gmail.com>
|
||||||
|
Syed Jafri <syedjafri97@gmail.com>
|
||||||
|
Sơn Phan Trung <phantrungson17@gmail.com>
|
||||||
|
Taisei Mima <bhbstar.me@gmail.com>
|
||||||
|
Takeshi Inoue <inoue.takeshi@gmail.com>
|
||||||
|
Tamotsu Takahashi <ttakah+github@gmail.com>
|
||||||
|
Taras Glek <taras@thegp.com>
|
||||||
|
Tauseef Mohiuddin <35351464+tauseefmohammed2@users.noreply.github.com>
|
||||||
|
Thijs Raymakers <thijs@raymakers.nl>
|
||||||
|
Thomas Fitzsimmons <fitzsim@fitzsim.org>
|
||||||
|
Tiago Fassoni <tiagofassoni@users.noreply.github.com>
|
||||||
|
Tienshiao Ma <tienshiao@tienshiao.org>
|
||||||
|
Timothy Cronin <40186632+4imothy@users.noreply.github.com>
|
||||||
|
Tobrun <tobrun.van.nuland@gmail.com>
|
||||||
|
Todd <taf2@users.noreply.github.com>
|
||||||
|
Tong Li <31761981+litongjava@users.noreply.github.com>
|
||||||
|
Topping1 <78745143+Topping1@users.noreply.github.com>
|
||||||
|
Travis Cline <travis.cline@gmail.com>
|
||||||
|
UEXTM.com <84163508+uextm@users.noreply.github.com>
|
||||||
|
Vadim Peretokin <vperetokin@hey.com>
|
||||||
|
Valentin Gosu <1454649+valenting@users.noreply.github.com>
|
||||||
|
Vulcan <93451215+trholding@users.noreply.github.com>
|
||||||
|
WhiteOlivierus <36532695+WhiteOlivierus@users.noreply.github.com>
|
||||||
|
Xiang (Kevin) Li <kevinli020508@gmail.com>
|
||||||
|
Xiao-Yong Jin <jinxiaoyong@gmail.com>
|
||||||
|
XiaotaoChen <chenxiaotao1234@gmail.com>
|
||||||
|
Yajing Tang <phillis@google.com>
|
||||||
|
Yang Shen <aplshenyang@gmail.com>
|
||||||
|
Yunès <jean.baptiste.yunes@free.fr>
|
||||||
|
ZaBlazzingZephyrus <119159668+blazingzephyr@users.noreply.github.com>
|
||||||
|
Zigfrid Zvezdin <ziggerZZ@gmail.com>
|
||||||
|
Zollner <24618122+Zolliner@users.noreply.github.com>
|
||||||
|
ai-at-home <149282006+ai-at-home@users.noreply.github.com>
|
||||||
|
alonfaraj <alonfaraj@gmail.com>
|
||||||
|
andypayne <apayne@gmail.com>
|
||||||
|
ardfork <134447697+ardfork@users.noreply.github.com>
|
||||||
|
automaticcat <daogiatuank54@gmail.com>
|
||||||
|
be-next <jerome.ramette@gmail.com>
|
||||||
|
bert hubert <bert@hubertnet.nl>
|
||||||
|
bmwl <brian.marshall@tolko.com>
|
||||||
|
bobqianic <129547291+bobqianic@users.noreply.github.com>
|
||||||
|
bocytko <bocytko+github@gmail.com>
|
||||||
|
boolemancer <48014766+boolemancer@users.noreply.github.com>
|
||||||
|
boolemancer <boolemancer@gmail.com>
|
||||||
|
bradmit <151883577+bradmit@users.noreply.github.com>
|
||||||
|
brunofaustino <b.fa.amorim@gmail.com>
|
||||||
|
bssrdf <merlintiger@hotmail.com>
|
||||||
|
byte-6174 <88070277+byte-6174@users.noreply.github.com>
|
||||||
|
cdosoftei <ciprian.dosoftei@gmail.com>
|
||||||
|
clach04 <Chris.Clark@actian.com>
|
||||||
|
compilade <113953597+compilade@users.noreply.github.com>
|
||||||
|
conradg <conradjgodfrey@gmail.com>
|
||||||
|
ddpasa <112642920+ddpasa@users.noreply.github.com>
|
||||||
|
denersc <denerstassun@gmail.com>
|
||||||
|
dscripka <dscripka@users.noreply.github.com>
|
||||||
|
duthils <duthils@duthils.net>
|
||||||
|
ecneladis <ecneladis@users.noreply.github.com>
|
||||||
|
faker <nspyia2002@gmail.com>
|
||||||
|
fitzsim <fitzsim@fitzsim.org>
|
||||||
|
fraxy-v <65565042+fraxy-v@users.noreply.github.com>
|
||||||
|
genevera (she/her) <genevera@users.noreply.github.com>
|
||||||
|
geniusnut <geniusnut@gmail.com>
|
||||||
|
greeshmay <greeshmay@gmail.com>
|
||||||
|
hydai <z54981220@gmail.com>
|
||||||
|
iamthad <thadeus.j.fleming@gmail.com>
|
||||||
|
james wolf <contractorwolf@hotmail.com>
|
||||||
|
joecryptotoo <80373433+joecryptotoo@users.noreply.github.com>
|
||||||
|
jorismertz <35079666+jorismertz@users.noreply.github.com>
|
||||||
|
junkfood <69683722+JunkFood02@users.noreply.github.com>
|
||||||
|
jwijffels <jwijffels@bnosac.be>
|
||||||
|
kamranjon <kamranjon@gmail.com>
|
||||||
|
katsu560 <katsu560oo-@docomo.ne.jp>
|
||||||
|
kennethge <57784063+kenneth-ge@users.noreply.github.com>
|
||||||
|
keyehzy <msamuel@aluno.puc-rio.br>
|
||||||
|
leejet <leejet714@gmail.com>
|
||||||
|
litong <31761981+litongjava@users.noreply.github.com>
|
||||||
|
lnyan <lkwq007@gmail.com>
|
||||||
|
m.bell <m.bell@techsmith.com>
|
||||||
|
mkiol <mkiol@users.noreply.github.com>
|
||||||
|
novag <7754358+novag@users.noreply.github.com>
|
||||||
|
pajowu <pajowu@pajowu.de>
|
||||||
|
polarmoon <90010972+polarmoon@users.noreply.github.com>
|
||||||
|
rlapray <lapray.romain@gmail.com>
|
||||||
|
sandrohanea <40202887+sandrohanea@users.noreply.github.com>
|
||||||
|
semiformal-net <84111142+semiformal-net@users.noreply.github.com>
|
||||||
|
shibukazu <61775791+shibukazu@users.noreply.github.com>
|
||||||
|
shikokuchuo <53399081+shikokuchuo@users.noreply.github.com>
|
||||||
|
slaren <slarengh@gmail.com>
|
||||||
|
slashlib <slashlib@users.noreply.github.com>
|
||||||
|
snadampal <87143774+snadampal@users.noreply.github.com>
|
||||||
|
st-gr <38470677+st-gr@users.noreply.github.com>
|
||||||
|
texmex76 <40733439+texmex76@users.noreply.github.com>
|
||||||
|
thefinaldegree <thefinaldegree@gmail.com>
|
||||||
|
trixirt <trix@redhat.com>
|
||||||
|
ulatekh <ulatekh@yahoo.com>
|
||||||
|
undef <undefdev@gmail.com>
|
||||||
|
venkr <venkateshrameshkumar+1@gmail.com>
|
||||||
|
vicalloy <zbirder@gmail.com>
|
||||||
|
xdrudis <xavierdrudis@yahoo.es>
|
||||||
|
zhouwg <6889919+zhouwg@users.noreply.github.com>
|
||||||
|
布客飞龙 <562826179@qq.com>
|
||||||
|
Артём Земляк <azemlyak@smart-consulting.ru>
|
448
CMakeLists.txt
448
CMakeLists.txt
@ -1,6 +1,10 @@
|
|||||||
cmake_minimum_required (VERSION 3.0)
|
cmake_minimum_required (VERSION 3.5)
|
||||||
|
|
||||||
project(whisper.cpp VERSION 1.4.2)
|
# Allow for the creation of solution folders.
|
||||||
|
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
|
||||||
|
|
||||||
|
project(whisper.cpp VERSION 1.6.2)
|
||||||
|
set(SOVERSION 1)
|
||||||
|
|
||||||
# Add path to modules
|
# Add path to modules
|
||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||||
@ -35,6 +39,12 @@ endif()
|
|||||||
|
|
||||||
# options
|
# options
|
||||||
|
|
||||||
|
if (APPLE)
|
||||||
|
set(WHISPER_METAL_DEFAULT ON)
|
||||||
|
else()
|
||||||
|
set(WHISPER_METAL_DEFAULT OFF)
|
||||||
|
endif()
|
||||||
|
|
||||||
option(BUILD_SHARED_LIBS "whisper: build shared libs" ${BUILD_SHARED_LIBS_DEFAULT})
|
option(BUILD_SHARED_LIBS "whisper: build shared libs" ${BUILD_SHARED_LIBS_DEFAULT})
|
||||||
|
|
||||||
option(WHISPER_ALL_WARNINGS "whisper: enable all compiler warnings" ON)
|
option(WHISPER_ALL_WARNINGS "whisper: enable all compiler warnings" ON)
|
||||||
@ -49,24 +59,39 @@ option(WHISPER_BUILD_EXAMPLES "whisper: build examples" ${WHISPER_STANDA
|
|||||||
|
|
||||||
option(WHISPER_SDL2 "whisper: support for libSDL2" OFF)
|
option(WHISPER_SDL2 "whisper: support for libSDL2" OFF)
|
||||||
|
|
||||||
option(WHISPER_NO_AVX "whisper: disable AVX" OFF)
|
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
option(WHISPER_NO_AVX2 "whisper: disable AVX2" OFF)
|
option(WHISPER_FFMPEG "whisper: support building and linking with ffmpeg libs (avcodec, swresample, ...)" OFF)
|
||||||
option(WHISPER_NO_FMA "whisper: disable FMA" OFF)
|
endif()
|
||||||
option(WHISPER_NO_F16C "whisper: disable F16c" OFF)
|
|
||||||
|
option(WHISPER_NO_AVX "whisper: disable AVX" OFF)
|
||||||
|
option(WHISPER_NO_AVX2 "whisper: disable AVX2" OFF)
|
||||||
|
option(WHISPER_NO_AVX512 "whisper: disable AVX512" ON)
|
||||||
|
option(WHISPER_NO_AVX512_VBMI "whisper: disable AVX512-VBMI" ON)
|
||||||
|
option(WHISPER_NO_AVX512_VNNI "whisper: disable AVX512-VNNI" ON)
|
||||||
|
option(WHISPER_NO_FMA "whisper: disable FMA" OFF)
|
||||||
|
option(WHISPER_NO_F16C "whisper: disable F16c" OFF)
|
||||||
|
|
||||||
option(WHISPER_OPENVINO "whisper: support for OpenVINO" OFF)
|
option(WHISPER_OPENVINO "whisper: support for OpenVINO" OFF)
|
||||||
|
|
||||||
if (APPLE)
|
if (APPLE)
|
||||||
option(WHISPER_NO_ACCELERATE "whisper: disable Accelerate framework" OFF)
|
option(WHISPER_NO_ACCELERATE "whisper: disable Accelerate framework" OFF)
|
||||||
|
option(WHISPER_METAL "whisper: use Metal" ${WHISPER_METAL_DEFAULT})
|
||||||
|
option(WHISPER_METAL_NDEBUG "whisper: disable Metal debugging" OFF)
|
||||||
option(WHISPER_COREML "whisper: enable Core ML framework" OFF)
|
option(WHISPER_COREML "whisper: enable Core ML framework" OFF)
|
||||||
option(WHISPER_COREML_ALLOW_FALLBACK "whisper: allow non-CoreML fallback" OFF)
|
option(WHISPER_COREML_ALLOW_FALLBACK "whisper: allow non-CoreML fallback" OFF)
|
||||||
|
option(WHISPER_METAL_EMBED_LIBRARY "whisper: embed Metal library" OFF)
|
||||||
else()
|
else()
|
||||||
option(WHISPER_BLAS "whisper: use BLAS libraries" OFF)
|
option(WHISPER_BLAS "whisper: use BLAS libraries" OFF)
|
||||||
option(WHISPER_BLAS_VENDOR "whisper: BLAS library vendor" Generic)
|
option(WHISPER_BLAS_VENDOR "whisper: BLAS library vendor" Generic)
|
||||||
option(WHISPER_OPENBLAS "whisper: prefer OpenBLAS" OFF)
|
option(WHISPER_OPENBLAS "whisper: prefer OpenBLAS" OFF)
|
||||||
option(WHISPER_CUBLAS "whisper: support for cuBLAS" OFF)
|
option(WHISPER_OPENBLAS_INTERFACE64 "whisper: use OpenBLAS w/ 64-bit interface" OFF)
|
||||||
option(WHISPER_HIPBLAS "whisper: support for hipBLAS" OFF)
|
option(WHISPER_CUDA "whisper: support for CUDA" OFF)
|
||||||
option(WHISPER_CLBLAST "whisper: use CLBlast" OFF)
|
option(WHISPER_CUBLAS "whisper: support for CUDA (deprecated)" OFF)
|
||||||
|
option(WHISPER_HIPBLAS "whisper: support for hipBLAS" OFF)
|
||||||
|
option(WHISPER_CLBLAST "whisper: use CLBlast" OFF)
|
||||||
|
option(WHISPER_MKL "whisper: use Intel Math Kernel Library (MKL)" OFF)
|
||||||
|
option(WHISPER_SYCL "whisper: use SYCL" OFF)
|
||||||
|
option(WHISPER_SYCL_F16 "whisper: use 16 bit floats for sycl calculations" OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option(WHISPER_PERF "whisper: enable perf timings" OFF)
|
option(WHISPER_PERF "whisper: enable perf timings" OFF)
|
||||||
@ -97,6 +122,33 @@ endif()
|
|||||||
|
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
|
#compile flag sycl
|
||||||
|
if (WHISPER_SYCL)
|
||||||
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
|
else()
|
||||||
|
set(CMAKE_CXX_STANDARD 11)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (WHISPER_FFMPEG)
|
||||||
|
# As of cmake 3.27, there is no official cmake support for FindFFmpeg.
|
||||||
|
# Consequnelty we added a FindFFmpeg.cmake script the cmake subfolder:
|
||||||
|
# whisper.cpp does not need the full ffmpeg libs, just AVFORMAT AVCODEC AVUTIL SWRESAMPLE
|
||||||
|
# libswresample performs highly optimized audio resampling, rematrixing and sample format conversion operations
|
||||||
|
# libavcodec provides a generic encoding/decoding framework and contains multiple decoders and encoders for audio, video and subtitle streams, and several bitstream filters.
|
||||||
|
# libavformat provides a generic framework for multiplexing and demultiplexing (muxing and demuxing) audio, video and subtitle streams.
|
||||||
|
find_package(FFmpeg REQUIRED)
|
||||||
|
if (NOT ${FFMPEG_FOUND})
|
||||||
|
message(FATAL_ERROR "Cannot find ffmpeg libs/headers")
|
||||||
|
endif()
|
||||||
|
message(STATUS "Found ffmpeg libs: ${FFMPEG_LIBRARIES}")
|
||||||
|
message(STATUS "Found ffmpeg headers in: ${FFMPEG_INCLUDE_DIRS}")
|
||||||
|
message(STATUS "ffmpeg definitions: ${FFMPEG_DEFINITIONS}")
|
||||||
|
message(STATUS "Found avformat ${AVFORMAT_VERSION}")
|
||||||
|
include_directories(${FFMPEG_INCLUDE_DIRS})
|
||||||
|
add_compile_definitions(WHISPER_FFMPEG)
|
||||||
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${FFMPEG_LIBRARIES})
|
||||||
|
endif()
|
||||||
|
|
||||||
# on APPLE
|
# on APPLE
|
||||||
if (APPLE)
|
if (APPLE)
|
||||||
# include Accelerate framework
|
# include Accelerate framework
|
||||||
@ -107,9 +159,71 @@ if (APPLE)
|
|||||||
message(STATUS "Accelerate framework found")
|
message(STATUS "Accelerate framework found")
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK})
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK})
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_ACCELERATE)
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64)
|
||||||
else()
|
else()
|
||||||
message(WARNING "Accelerate framework not found")
|
message(FATAL_ERROR "Accelerate framework not found")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (WHISPER_METAL)
|
||||||
|
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
||||||
|
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
||||||
|
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
||||||
|
|
||||||
|
if (METAL_FRAMEWORK)
|
||||||
|
message(STATUS "Metal framework found")
|
||||||
|
|
||||||
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS}
|
||||||
|
${FOUNDATION_LIBRARY}
|
||||||
|
${METAL_FRAMEWORK}
|
||||||
|
${METALKIT_FRAMEWORK}
|
||||||
|
)
|
||||||
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_METAL)
|
||||||
|
|
||||||
|
if (WHISPER_METAL_NDEBUG)
|
||||||
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_METAL_NDEBUG)
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
message(FATAL_ERROR "Metal framework not found")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
|
||||||
|
|
||||||
|
# copy ggml-common.h and ggml-metal.metal to bin directory
|
||||||
|
configure_file(ggml-common.h bin/ggml-common.h COPYONLY)
|
||||||
|
configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY)
|
||||||
|
|
||||||
|
if (WHISPER_METAL_EMBED_LIBRARY)
|
||||||
|
enable_language(ASM)
|
||||||
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_METAL_EMBED_LIBRARY)
|
||||||
|
|
||||||
|
set(METALLIB_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
|
||||||
|
set(COMMON_HEADER "${CMAKE_CURRENT_SOURCE_DIR}/ggml-common.h")
|
||||||
|
|
||||||
|
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/autogenerated")
|
||||||
|
set(EMBED_METALLIB_ASSEMBLY "${CMAKE_BINARY_DIR}/autogenerated/ggml-embed-metallib.s")
|
||||||
|
set(EMBED_METALLIB_SOURCE "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-combined.metal")
|
||||||
|
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${EMBED_METALLIB_SOURCE}
|
||||||
|
COMMAND sed -e "/^#include \\\"ggml-common.h\\\"/r ${COMMON_HEADER}" -e "/^#include \\\"ggml-common.h\\\"/d" ${METALLIB_SOURCE} > ${EMBED_METALLIB_SOURCE}
|
||||||
|
DEPENDS ${METALLIB_SOURCE} ${COMMON_HEADER}
|
||||||
|
COMMENT "Generating combined Metal library for embedding"
|
||||||
|
)
|
||||||
|
|
||||||
|
add_custom_command(
|
||||||
|
OUTPUT ${EMBED_METALLIB_ASSEMBLY}
|
||||||
|
COMMAND echo ".section __DATA,__ggml_metallib" > ${EMBED_METALLIB_ASSEMBLY}
|
||||||
|
COMMAND echo ".globl _ggml_metallib_start" >> ${EMBED_METALLIB_ASSEMBLY}
|
||||||
|
COMMAND echo "_ggml_metallib_start:" >> ${EMBED_METALLIB_ASSEMBLY}
|
||||||
|
COMMAND echo ".incbin \\\"${EMBED_METALLIB_SOURCE}\\\"" >> ${EMBED_METALLIB_ASSEMBLY}
|
||||||
|
COMMAND echo ".globl _ggml_metallib_end" >> ${EMBED_METALLIB_ASSEMBLY}
|
||||||
|
COMMAND echo "_ggml_metallib_end:" >> ${EMBED_METALLIB_ASSEMBLY}
|
||||||
|
DEPENDS ${EMBED_METALLIB_SOURCE}
|
||||||
|
COMMENT "Generate assembly for embedded Metal library"
|
||||||
|
)
|
||||||
|
|
||||||
|
set(GGML_SOURCES_METAL ${GGML_SOURCES_METAL} ${EMBED_METALLIB_ASSEMBLY})
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -122,7 +236,7 @@ if (APPLE)
|
|||||||
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_COREML)
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_COREML)
|
||||||
else()
|
else()
|
||||||
message(WARNING "CoreML framework not found")
|
message(FATAL_ERROR "CoreML framework not found")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WHISPER_COREML_ALLOW_FALLBACK)
|
if (WHISPER_COREML_ALLOW_FALLBACK)
|
||||||
@ -134,40 +248,104 @@ endif()
|
|||||||
if (WHISPER_OPENBLAS)
|
if (WHISPER_OPENBLAS)
|
||||||
set(WHISPER_BLAS_VENDOR "OpenBLAS")
|
set(WHISPER_BLAS_VENDOR "OpenBLAS")
|
||||||
set(WHISPER_BLAS ON)
|
set(WHISPER_BLAS ON)
|
||||||
|
# BLA_PKGCONFIG_BLAS is supported since CMake 3.25.
|
||||||
|
# FindBLAS.cmake pkg-config logic seems incomplete, because when
|
||||||
|
# BLA_SIZEOF_INTEGER is 8, then it should search for blas64 instead of blas.
|
||||||
|
# blas.pc/blas64.pc are not always provided, so let's be more specific
|
||||||
|
# and go with openblas.pc/openblas64.pc if WHISPER_OPENBLAS is on.
|
||||||
|
if (WHISPER_OPENBLAS_INTERFACE64)
|
||||||
|
set(WHISPER_BLAS_LIB "openblas64")
|
||||||
|
else ()
|
||||||
|
set(WHISPER_BLAS_LIB "openblas")
|
||||||
|
endif ()
|
||||||
|
set(BLA_PKGCONFIG_BLAS ${WHISPER_BLAS_LIB})
|
||||||
|
# OpenBLAS prebuilt libraries for Windows do not have "64" suffix in filename.
|
||||||
|
# (But .pc file has "64" suffix in filename for USE_64BITINT=1 Windows build.)
|
||||||
|
if (MSVC)
|
||||||
|
set(WHISPER_BLAS_LIB "openblas")
|
||||||
|
endif ()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WHISPER_BLAS)
|
if (WHISPER_BLAS)
|
||||||
if (WIN32)
|
if (NOT "$ENV{OPENBLAS_PATH}" STREQUAL "")
|
||||||
if(DEFINED ENV{OPENBLAS_PATH})
|
if (WHISPER_STATIC)
|
||||||
set(BLAS_LIBRARIES $ENV{OPENBLAS_PATH}/lib/libopenblas.dll.a)
|
set(WHISPER_BLAS_LIB_PREFIX ${CMAKE_STATIC_LIBRARY_PREFIX})
|
||||||
message(STATUS "Libraries ${BLAS_LIBRARIES}")
|
set(WHISPER_BLAS_LIB_SUFFIX ${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
|
||||||
include_directories($ENV{OPENBLAS_PATH}/include)
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${BLAS_LIBRARIES})
|
|
||||||
else ()
|
else ()
|
||||||
message(WARNING "BLAS library was not found. Environment variable OPENBLAS_PATH not defined.")
|
if (CMAKE_IMPORT_LIBRARY_SUFFIX)
|
||||||
|
set(WHISPER_BLAS_LIB_PREFIX ${CMAKE_IMPORT_LIBRARY_PREFIX})
|
||||||
|
set(WHISPER_BLAS_LIB_SUFFIX ${CMAKE_IMPORT_LIBRARY_SUFFIX})
|
||||||
|
else ()
|
||||||
|
set(WHISPER_BLAS_LIB_PREFIX ${CMAKE_SHARED_LIBRARY_PREFIX})
|
||||||
|
set(WHISPER_BLAS_LIB_SUFFIX ${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||||
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
# OpenBLAS prebuilt libraries hardcode "lib" prefix in filename even on Windows
|
||||||
|
if (WHISPER_OPENBLAS)
|
||||||
|
set(WHISPER_BLAS_LIB_PREFIX "lib")
|
||||||
|
endif ()
|
||||||
|
message(STATUS "BLAS compatible library path provided")
|
||||||
|
set(BLAS_LIBRARIES "$ENV{OPENBLAS_PATH}/lib/${WHISPER_BLAS_LIB_PREFIX}${WHISPER_BLAS_LIB}${WHISPER_BLAS_LIB_SUFFIX}")
|
||||||
|
message(STATUS "Libraries ${BLAS_LIBRARIES}")
|
||||||
|
set(BLAS_INCLUDE_DIRS "$ENV{OPENBLAS_PATH}/include")
|
||||||
|
message(STATUS "Include dirs ${BLAS_INCLUDE_DIRS}")
|
||||||
|
if (NOT EXISTS "${BLAS_LIBRARIES}")
|
||||||
|
message(FATAL_ERROR "BLAS library was not found. Environment variable OPENBLAS_PATH misdefined.")
|
||||||
|
endif ()
|
||||||
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
||||||
|
include_directories(${BLAS_INCLUDE_DIRS})
|
||||||
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${BLAS_LIBRARIES})
|
||||||
else ()
|
else ()
|
||||||
set(BLA_STATIC 1)
|
if (WHISPER_STATIC)
|
||||||
|
# FindBLAS.cmake pkg-config logic seems incomplete, because when
|
||||||
|
# BLA_STATIC is on, then it should use pkg_check_modules_static
|
||||||
|
# instead of pkg_check_modules.
|
||||||
|
# Some manual variable overriding may be necessary if you don't
|
||||||
|
# achieve desired results.
|
||||||
|
set(BLA_STATIC 1)
|
||||||
|
endif ()
|
||||||
set(BLA_VENDOR ${WHISPER_BLAS_VENDOR})
|
set(BLA_VENDOR ${WHISPER_BLAS_VENDOR})
|
||||||
# set(BLA_PREFER_PKGCONFIG 1)
|
if (WHISPER_OPENBLAS_INTERFACE64)
|
||||||
set(BLA_SIZEOF_INTEGER 8)
|
set(BLA_SIZEOF_INTEGER 8)
|
||||||
|
else ()
|
||||||
|
set(BLA_SIZEOF_INTEGER 4)
|
||||||
|
endif()
|
||||||
|
set(BLA_PREFER_PKGCONFIG 1)
|
||||||
find_package(BLAS)
|
find_package(BLAS)
|
||||||
|
|
||||||
if(BLAS_FOUND)
|
if(BLAS_FOUND)
|
||||||
message(STATUS "BLAS compatible library found")
|
message(STATUS "BLAS compatible library found")
|
||||||
message(STATUS "Libraries ${BLAS_LIBRARIES}")
|
message(STATUS "Libraries ${BLAS_LIBRARIES}")
|
||||||
find_path(BLAS_INCLUDE_DIRS cblas.h /usr/include/openblas /usr/local/include/openblas $ENV{BLAS_HOME}/include)
|
if (NOT DEFINED BLAS_INCLUDE_DIRS)
|
||||||
|
if (PKGC_BLAS_FOUND)
|
||||||
|
set(BLAS_INCLUDE_DIRS "${PKGC_BLAS_INCLUDE_DIRS}")
|
||||||
|
else ()
|
||||||
|
find_path(BLAS_INCLUDE_DIRS cblas.h /usr/include/openblas)
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
message(STATUS "Include dirs ${BLAS_INCLUDE_DIRS}")
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
||||||
include_directories(${BLAS_INCLUDE_DIRS})
|
include_directories(${BLAS_INCLUDE_DIRS})
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${BLAS_LIBRARIES})
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${BLAS_LIBRARIES})
|
||||||
else()
|
else()
|
||||||
message(WARNING "BLAS library was not found")
|
message(FATAL_ERROR "BLAS library was not found")
|
||||||
endif()
|
endif()
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
if (WHISPER_MKL)
|
||||||
|
find_package(MKL CONFIG REQUIRED PATHS $ENV{MKLROOT})
|
||||||
|
message(STATUS "Imported oneMKL targets: ${MKL_IMPORTED_TARGETS}")
|
||||||
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
||||||
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_BLAS_USE_MKL)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (WHISPER_CUBLAS)
|
if (WHISPER_CUBLAS)
|
||||||
|
message(WARNING "WHISPER_CUBLAS is deprecated and will be removed in the future.\nUse WHISPER_CUDA instead")
|
||||||
|
set(WHISPER_CUDA ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (WHISPER_CUDA)
|
||||||
cmake_minimum_required(VERSION 3.17)
|
cmake_minimum_required(VERSION 3.17)
|
||||||
|
|
||||||
find_package(CUDAToolkit)
|
find_package(CUDAToolkit)
|
||||||
@ -177,18 +355,26 @@ if (WHISPER_CUBLAS)
|
|||||||
|
|
||||||
enable_language(CUDA)
|
enable_language(CUDA)
|
||||||
|
|
||||||
set(GGML_CUDA_SOURCES ggml-cuda.cu ggml-cuda.h)
|
file(GLOB GGML_SOURCES_CUDA "ggml-cuda/*.cu")
|
||||||
|
list(APPEND GGML_SOURCES_CUDA ggml-cuda.h)
|
||||||
|
list(APPEND GGML_SOURCES_CUDA ggml-cuda.cu)
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CUBLAS)
|
add_compile_definitions(GGML_USE_CUDA)
|
||||||
|
|
||||||
if (WHISPER_STATIC)
|
if (WHISPER_STATIC)
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
|
if (WIN32)
|
||||||
|
# As of 12.3.1 CUDA Tookit for Windows does not offer a static cublas library
|
||||||
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt CUDA::cufft)
|
||||||
|
else ()
|
||||||
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static CUDA::cufft_static)
|
||||||
|
endif()
|
||||||
else()
|
else()
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt CUDA::cufft)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cuda_driver)
|
||||||
else()
|
else()
|
||||||
message(WARNING "cuBLAS not found")
|
message(FATAL_ERROR "cuBLAS not found")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -208,18 +394,20 @@ if (WHISPER_HIPBLAS)
|
|||||||
|
|
||||||
if (${hipblas_FOUND} AND ${hip_FOUND})
|
if (${hipblas_FOUND} AND ${hip_FOUND})
|
||||||
message(STATUS "HIP and hipBLAS found")
|
message(STATUS "HIP and hipBLAS found")
|
||||||
add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUBLAS)
|
set(GGML_HEADERS_ROCM "ggml-cuda.h")
|
||||||
add_library(ggml-rocm OBJECT ggml-cuda.cu ggml-cuda.h)
|
|
||||||
set_property(TARGET ggml-rocm PROPERTY POSITION_INDEPENDENT_CODE ON)
|
|
||||||
set_source_files_properties(ggml-cuda.cu PROPERTIES LANGUAGE CXX)
|
|
||||||
target_link_libraries(ggml-rocm PRIVATE hip::device PUBLIC hip::host roc::rocblas roc::hipblas)
|
|
||||||
|
|
||||||
|
file(GLOB GGML_SOURCES_ROCM "ggml-cuda/*.cu")
|
||||||
|
list(APPEND GGML_SOURCES_ROCM "ggml-cuda.cu")
|
||||||
|
|
||||||
|
add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUDA)
|
||||||
|
|
||||||
|
set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX)
|
||||||
if (WHISPER_STATIC)
|
if (WHISPER_STATIC)
|
||||||
message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
|
message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
|
||||||
endif()
|
endif()
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ggml-rocm)
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} hip::device PUBLIC hip::host roc::rocblas roc::hipblas)
|
||||||
else()
|
else()
|
||||||
message(WARNING "hipBLAS or HIP not found. Try setting CMAKE_PREFIX_PATH=/opt/rocm")
|
message(FATAL_ERROR "hipBLAS or HIP not found. Try setting CMAKE_PREFIX_PATH=/opt/rocm")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -228,13 +416,13 @@ if (WHISPER_CLBLAST)
|
|||||||
if (CLBlast_FOUND)
|
if (CLBlast_FOUND)
|
||||||
message(STATUS "CLBlast found")
|
message(STATUS "CLBlast found")
|
||||||
|
|
||||||
set(GGML_OPENCL_SOURCES ggml-opencl.cpp ggml-opencl.h)
|
set(GGML_SOURCES_OPENCL ggml-opencl.cpp ggml-opencl.h)
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CLBLAST)
|
add_compile_definitions(GGML_USE_CLBLAST)
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} clblast)
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} clblast)
|
||||||
else()
|
else()
|
||||||
message(WARNING "CLBlast not found")
|
message(FATAL_ERROR "CLBlast not found")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -242,6 +430,30 @@ if( WHISPER_OPENVINO )
|
|||||||
find_package(OpenVINO REQUIRED COMPONENTS Runtime)
|
find_package(OpenVINO REQUIRED COMPONENTS Runtime)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (WHISPER_SYCL)
|
||||||
|
if ( NOT DEFINED ENV{ONEAPI_ROOT})
|
||||||
|
message(FATAL_ERROR "Not detect ENV {ONEAPI_ROOT}, please install oneAPI & source it, like: source /opt/intel/oneapi/setvars.sh")
|
||||||
|
endif()
|
||||||
|
#todo: AOT
|
||||||
|
|
||||||
|
find_package(IntelSYCL REQUIRED)
|
||||||
|
if (WHISPER_SYCL_F16)
|
||||||
|
add_compile_definitions(GGML_SYCL_F16)
|
||||||
|
endif()
|
||||||
|
add_compile_definitions(GGML_USE_SYCL)
|
||||||
|
|
||||||
|
add_compile_options(-I./) #include DPCT
|
||||||
|
add_compile_options(-I/${SYCL_INCLUDE_DIR})
|
||||||
|
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl -L${MKLROOT}/lib")
|
||||||
|
|
||||||
|
set(GGML_HEADERS_SYCL ggml-sycl.h)
|
||||||
|
set(GGML_SOURCES_SYCL ggml-sycl.cpp)
|
||||||
|
|
||||||
|
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} sycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread)
|
||||||
|
endif()
|
||||||
# compiler flags
|
# compiler flags
|
||||||
|
|
||||||
if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||||
@ -273,7 +485,8 @@ if (WHISPER_ALL_WARNINGS)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (NOT MSVC)
|
if (NOT MSVC)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=vla")
|
# TODO: temporary disabled until we figure out ggml-metal.m
|
||||||
|
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=vla")
|
||||||
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-math-errno -ffinite-math-only -funsafe-math-optimizations")
|
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-math-errno -ffinite-math-only -funsafe-math-optimizations")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -289,21 +502,35 @@ else()
|
|||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /utf-8")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /utf-8")
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /utf-8")
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /utf-8")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /utf-8")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /utf-8")
|
||||||
if(NOT WHISPER_NO_AVX2)
|
if(NOT WHISPER_NO_AVX512)
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX512")
|
||||||
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX512")
|
||||||
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX512")
|
||||||
|
# MSVC has no compile-time flags enabling specific
|
||||||
|
# AVX512 extensions, neither it defines the
|
||||||
|
# macros corresponding to the extensions.
|
||||||
|
# Do it manually.
|
||||||
|
if (NOT WHISPER_NO_AVX512_VBMI)
|
||||||
|
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>)
|
||||||
|
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>)
|
||||||
|
endif()
|
||||||
|
if (NOT WHISPER_NO_AVX512_VNNI)
|
||||||
|
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
|
||||||
|
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
|
||||||
|
endif()
|
||||||
|
elseif(NOT WHISPER_NO_AVX2)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX2")
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX2")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX2")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX2")
|
||||||
else()
|
elseif(NOT WHISPER_NO_AVX)
|
||||||
if(NOT WHISPER_NO_AVX)
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX")
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX")
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX")
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
else()
|
else()
|
||||||
if (EMSCRIPTEN)
|
if (EMSCRIPTEN)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread -s TOTAL_STACK=5242880")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -s TOTAL_STACK=5242880")
|
||||||
else()
|
else()
|
||||||
if(NOT WHISPER_NO_AVX)
|
if(NOT WHISPER_NO_AVX)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
||||||
@ -311,6 +538,15 @@ else()
|
|||||||
if(NOT WHISPER_NO_AVX2)
|
if(NOT WHISPER_NO_AVX2)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
||||||
endif()
|
endif()
|
||||||
|
if(NOT WHISPER_NO_AVX512)
|
||||||
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512cd -mavx512vl -mavx512dq -mavx512bw")
|
||||||
|
if(NOT WHISPER_NO_AVX512_VBMI)
|
||||||
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vbmi")
|
||||||
|
endif()
|
||||||
|
if(NOT WHISPER_NO_AVX512_VNNI)
|
||||||
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vnni")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
if(NOT WHISPER_NO_FMA)
|
if(NOT WHISPER_NO_FMA)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
||||||
endif()
|
endif()
|
||||||
@ -321,6 +557,53 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
#
|
||||||
|
# POSIX conformance
|
||||||
|
#
|
||||||
|
|
||||||
|
# clock_gettime came in POSIX.1b (1993)
|
||||||
|
# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional
|
||||||
|
# posix_memalign came in POSIX.1-2001 / SUSv3
|
||||||
|
# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985)
|
||||||
|
add_compile_definitions(_XOPEN_SOURCE=600)
|
||||||
|
|
||||||
|
# Somehow in OpenBSD whenever POSIX conformance is specified
|
||||||
|
# some string functions rely on locale_t availability,
|
||||||
|
# which was introduced in POSIX.1-2008, forcing us to go higher
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
||||||
|
remove_definitions(-D_XOPEN_SOURCE=600)
|
||||||
|
add_compile_definitions(_XOPEN_SOURCE=700)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Data types, macros and functions related to controlling CPU affinity
|
||||||
|
# are available on Linux through GNU extensions in libc
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
|
add_compile_definitions(_GNU_SOURCE)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1,
|
||||||
|
# and on macOS its availability depends on enabling Darwin extensions
|
||||||
|
# similarly on DragonFly, enabling BSD extensions is necessary
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||||
|
add_compile_definitions(_DARWIN_C_SOURCE)
|
||||||
|
endif()
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES "DragonFly")
|
||||||
|
add_compile_definitions(_DARWIN_C_SOURCE)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# alloca is a non-standard interface that is not visible on BSDs when
|
||||||
|
# POSIX conformance is specified, but not all of them provide a clean way
|
||||||
|
# to enable it in such cases
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||||
|
add_compile_definitions(__BSD_VISIBLE)
|
||||||
|
endif()
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES "NetBSD")
|
||||||
|
add_compile_definitions(_NETBSD_SOURCE)
|
||||||
|
endif()
|
||||||
|
if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
||||||
|
add_compile_definitions(_BSD_SOURCE)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (WHISPER_PERF)
|
if (WHISPER_PERF)
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_PERF)
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_PERF)
|
||||||
endif()
|
endif()
|
||||||
@ -350,6 +633,7 @@ if (WHISPER_COREML)
|
|||||||
set_target_properties(${TARGET} PROPERTIES
|
set_target_properties(${TARGET} PROPERTIES
|
||||||
COMPILE_FLAGS "-fobjc-arc"
|
COMPILE_FLAGS "-fobjc-arc"
|
||||||
)
|
)
|
||||||
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WHISPER_OPENVINO)
|
if (WHISPER_OPENVINO)
|
||||||
@ -368,6 +652,7 @@ if (WHISPER_OPENVINO)
|
|||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_OPENVINO)
|
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_OPENVINO)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE openvino::runtime)
|
target_link_libraries(${TARGET} PRIVATE openvino::runtime)
|
||||||
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -379,12 +664,34 @@ set(TARGET whisper)
|
|||||||
add_library(${TARGET}
|
add_library(${TARGET}
|
||||||
ggml.h
|
ggml.h
|
||||||
ggml.c
|
ggml.c
|
||||||
${GGML_CUDA_SOURCES}
|
ggml-alloc.h
|
||||||
${GGML_OPENCL_SOURCES}
|
ggml-alloc.c
|
||||||
|
ggml-backend.h
|
||||||
|
ggml-backend.c
|
||||||
|
ggml-quants.h
|
||||||
|
ggml-quants.c
|
||||||
|
${GGML_SOURCES_METAL}
|
||||||
|
${GGML_SOURCES_CUDA}
|
||||||
|
${GGML_SOURCES_OPENCL}
|
||||||
|
${GGML_SOURCES_SYCL} ${GGML_HEADERS_SYCL}
|
||||||
|
${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM}
|
||||||
whisper.h
|
whisper.h
|
||||||
whisper.cpp
|
whisper.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if (WHISPER_CUDA)
|
||||||
|
target_sources(${TARGET} PRIVATE whisper-mel-cuda.cu)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
include_directories (
|
||||||
|
.
|
||||||
|
)
|
||||||
|
# Set the version numbers
|
||||||
|
set_target_properties(whisper PROPERTIES
|
||||||
|
VERSION ${PROJECT_VERSION}
|
||||||
|
SOVERSION ${SOVERSION}
|
||||||
|
)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC
|
target_include_directories(${TARGET} PUBLIC
|
||||||
@ -399,6 +706,10 @@ if (WHISPER_OPENVINO)
|
|||||||
target_link_libraries(${TARGET} PRIVATE whisper.openvino)
|
target_link_libraries(${TARGET} PRIVATE whisper.openvino)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (WHISPER_MKL)
|
||||||
|
target_link_libraries(${TARGET} PUBLIC MKL::MKL)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (MSVC)
|
if (MSVC)
|
||||||
target_link_libraries(${TARGET} PRIVATE ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
@ -408,6 +719,7 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (BUILD_SHARED_LIBS)
|
if (BUILD_SHARED_LIBS)
|
||||||
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
target_link_libraries(${TARGET} PUBLIC
|
target_link_libraries(${TARGET} PUBLIC
|
||||||
${CMAKE_DL_LIBS}
|
${CMAKE_DL_LIBS}
|
||||||
)
|
)
|
||||||
@ -421,11 +733,23 @@ if (BUILD_SHARED_LIBS)
|
|||||||
WHISPER_BUILD
|
WHISPER_BUILD
|
||||||
GGML_BUILD
|
GGML_BUILD
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if (WHISPER_METAL)
|
||||||
|
# TODO: I think this should make ggml-metal.m "see" the ggml-metal.metal file from the "bin" directory
|
||||||
|
# but for some reason it does not work here like it does in llama.cpp
|
||||||
|
set_target_properties(${TARGET} PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (GGML_CUDA_SOURCES)
|
if (GGML_SOURCES_CUDA)
|
||||||
message(STATUS "GGML CUDA sources found, configuring CUDA architecture")
|
message(STATUS "GGML CUDA sources found, configuring CUDA architecture")
|
||||||
set_property(TARGET whisper PROPERTY CUDA_ARCHITECTURES OFF)
|
# Only configure gmml CUDA architectures is not globally set
|
||||||
|
if (NOT DEFINED GGML_CUDA_ARCHITECTURES)
|
||||||
|
# Not overriden by user, so set defaults
|
||||||
|
set(GGML_CUDA_ARCHITECTURES 52 61 70)
|
||||||
|
endif()
|
||||||
|
message(STATUS "GGML Configuring CUDA architectures ${GGML_CUDA_ARCHITECTURES}")
|
||||||
|
set_property(TARGET whisper PROPERTY CUDA_ARCHITECTURES ${GGML_CUDA_ARCHITECTURES})
|
||||||
set_property(TARGET whisper PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
|
set_property(TARGET whisper PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -437,12 +761,16 @@ target_compile_definitions(${TARGET} PUBLIC
|
|||||||
${WHISPER_EXTRA_FLAGS}
|
${WHISPER_EXTRA_FLAGS}
|
||||||
)
|
)
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES PUBLIC_HEADER "whisper.h")
|
set_target_properties(${TARGET} PROPERTIES PUBLIC_HEADER "ggml.h;whisper.h")
|
||||||
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
|
|
||||||
|
include(GNUInstallDirs)
|
||||||
|
|
||||||
install(TARGETS ${TARGET}
|
install(TARGETS ${TARGET}
|
||||||
LIBRARY DESTINATION lib
|
LIBRARY DESTINATION lib
|
||||||
ARCHIVE DESTINATION lib/static
|
ARCHIVE DESTINATION lib/static
|
||||||
RUNTIME DESTINATION bin
|
RUNTIME DESTINATION bin
|
||||||
|
RESOURCE DESTINATION bin
|
||||||
PUBLIC_HEADER DESTINATION include
|
PUBLIC_HEADER DESTINATION include
|
||||||
)
|
)
|
||||||
|
|
||||||
|
2
LICENSE
2
LICENSE
@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023 Georgi Gerganov
|
Copyright (c) 2023-2024 The ggml authors
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
333
Makefile
333
Makefile
@ -1,4 +1,4 @@
|
|||||||
default: main bench quantize
|
default: main bench quantize server
|
||||||
|
|
||||||
ifndef UNAME_S
|
ifndef UNAME_S
|
||||||
UNAME_S := $(shell uname -s)
|
UNAME_S := $(shell uname -s)
|
||||||
@ -18,7 +18,18 @@ ifndef NVCC_VERSION
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
CCV := $(shell $(CC) --version | head -n 1)
|
# In GNU make default CXX is g++ instead of c++. Let's fix that so that users
|
||||||
|
# of non-gcc compilers don't have to provide g++ alias or wrapper.
|
||||||
|
DEFCC := cc
|
||||||
|
DEFCXX := c++
|
||||||
|
ifeq ($(origin CC),default)
|
||||||
|
CC := $(DEFCC)
|
||||||
|
endif
|
||||||
|
ifeq ($(origin CXX),default)
|
||||||
|
CXX := $(DEFCXX)
|
||||||
|
endif
|
||||||
|
|
||||||
|
CCV := $(shell $(CC) --version | head -n 1)
|
||||||
CXXV := $(shell $(CXX) --version | head -n 1)
|
CXXV := $(shell $(CXX) --version | head -n 1)
|
||||||
|
|
||||||
# Mac OS + Arm can report x86_64
|
# Mac OS + Arm can report x86_64
|
||||||
@ -42,18 +53,61 @@ CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC
|
|||||||
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC
|
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC
|
||||||
LDFLAGS =
|
LDFLAGS =
|
||||||
|
|
||||||
# ref: https://github.com/ggerganov/whisper.cpp/issues/37
|
ifdef MACOSX_DEPLOYMENT_TARGET
|
||||||
ifneq ($(wildcard /usr/include/musl/*),)
|
CFLAGS += -mmacosx-version-min=$(MACOSX_DEPLOYMENT_TARGET)
|
||||||
CFLAGS += -D_POSIX_SOURCE -D_GNU_SOURCE
|
CXXFLAGS += -mmacosx-version-min=$(MACOSX_DEPLOYMENT_TARGET)
|
||||||
CXXFLAGS += -D_POSIX_SOURCE -D_GNU_SOURCE
|
LDFLAGS += -mmacosx-version-min=$(MACOSX_DEPLOYMENT_TARGET)
|
||||||
|
endif
|
||||||
|
|
||||||
|
# clock_gettime came in POSIX.1b (1993)
|
||||||
|
# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional
|
||||||
|
# posix_memalign came in POSIX.1-2001 / SUSv3
|
||||||
|
# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985)
|
||||||
|
CFLAGS += -D_XOPEN_SOURCE=600
|
||||||
|
CXXFLAGS += -D_XOPEN_SOURCE=600
|
||||||
|
|
||||||
|
# Somehow in OpenBSD whenever POSIX conformance is specified
|
||||||
|
# some string functions rely on locale_t availability,
|
||||||
|
# which was introduced in POSIX.1-2008, forcing us to go higher
|
||||||
|
ifeq ($(UNAME_S),OpenBSD)
|
||||||
|
CFLAGS += -U_XOPEN_SOURCE -D_XOPEN_SOURCE=700
|
||||||
|
CXXFLAGS += -U_XOPEN_SOURCE -D_XOPEN_SOURCE=700
|
||||||
|
endif
|
||||||
|
|
||||||
|
# Data types, macros and functions related to controlling CPU affinity
|
||||||
|
# are available on Linux through GNU extensions in libc
|
||||||
|
ifeq ($(UNAME_S),Linux)
|
||||||
|
CFLAGS += -D_GNU_SOURCE
|
||||||
|
CXXFLAGS += -D_GNU_SOURCE
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1,
|
# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1,
|
||||||
# and on macOS its availability depends on enabling Darwin extensions
|
# and on macOS its availability depends on enabling Darwin extensions
|
||||||
|
# similarly on DragonFly, enabling BSD extensions is necessary
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Darwin)
|
||||||
CFLAGS += -D_DARWIN_C_SOURCE
|
CFLAGS += -D_DARWIN_C_SOURCE
|
||||||
CXXFLAGS += -D_DARWIN_C_SOURCE
|
CXXFLAGS += -D_DARWIN_C_SOURCE
|
||||||
endif
|
endif
|
||||||
|
ifeq ($(UNAME_S),DragonFly)
|
||||||
|
CFLAGS += -D__BSD_VISIBLE
|
||||||
|
CXXFLAGS += -D__BSD_VISIBLE
|
||||||
|
endif
|
||||||
|
|
||||||
|
# alloca is a non-standard interface that is not visible on BSDs when
|
||||||
|
# POSIX conformance is specified, but not all of them provide a clean way
|
||||||
|
# to enable it in such cases
|
||||||
|
ifeq ($(UNAME_S),FreeBSD)
|
||||||
|
CFLAGS += -D__BSD_VISIBLE
|
||||||
|
CXXFLAGS += -D__BSD_VISIBLE
|
||||||
|
endif
|
||||||
|
ifeq ($(UNAME_S),NetBSD)
|
||||||
|
CFLAGS += -D_NETBSD_SOURCE
|
||||||
|
CXXFLAGS += -D_NETBSD_SOURCE
|
||||||
|
endif
|
||||||
|
ifeq ($(UNAME_S),OpenBSD)
|
||||||
|
CFLAGS += -D_BSD_SOURCE
|
||||||
|
CXXFLAGS += -D_BSD_SOURCE
|
||||||
|
endif
|
||||||
|
|
||||||
# OS specific
|
# OS specific
|
||||||
# TODO: support Windows
|
# TODO: support Windows
|
||||||
@ -62,60 +116,97 @@ ifeq ($(filter $(UNAME_S),Linux Darwin DragonFly FreeBSD NetBSD OpenBSD Haiku),$
|
|||||||
CXXFLAGS += -pthread
|
CXXFLAGS += -pthread
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
# detect Windows
|
||||||
|
ifneq ($(findstring _NT,$(UNAME_S)),)
|
||||||
|
_WIN32 := 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
# Windows Sockets 2 (Winsock) for network-capable apps
|
||||||
|
ifeq ($(_WIN32),1)
|
||||||
|
LWINSOCK2 := -lws2_32
|
||||||
|
endif
|
||||||
|
|
||||||
# Architecture specific
|
# Architecture specific
|
||||||
# TODO: probably these flags need to be tweaked on some architectures
|
# TODO: probably these flags need to be tweaked on some architectures
|
||||||
# feel free to update the Makefile for your architecture and send a pull request or issue
|
# feel free to update the Makefile for your architecture and send a pull request or issue
|
||||||
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
|
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64))
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Darwin)
|
||||||
CPUINFO_CMD := sysctl machdep.cpu.features
|
CPUINFO_CMD := sysctl machdep.cpu.features machdep.cpu.leaf7_features
|
||||||
else ifeq ($(UNAME_S),Linux)
|
else ifeq ($(UNAME_S),Linux)
|
||||||
CPUINFO_CMD := cat /proc/cpuinfo
|
CPUINFO_CMD := cat /proc/cpuinfo
|
||||||
else ifneq (,$(filter MINGW32_NT% MINGW64_NT%,$(UNAME_S)))
|
else ifneq (,$(filter MINGW32_NT% MINGW64_NT% MSYS_NT%,$(UNAME_S)))
|
||||||
CPUINFO_CMD := cat /proc/cpuinfo
|
CPUINFO_CMD := cat /proc/cpuinfo
|
||||||
|
else ifneq (,$(filter DragonFly FreeBSD,$(UNAME_S)))
|
||||||
|
CPUINFO_CMD := grep Features /var/run/dmesg.boot
|
||||||
else ifeq ($(UNAME_S),Haiku)
|
else ifeq ($(UNAME_S),Haiku)
|
||||||
CPUINFO_CMD := sysinfo -cpu
|
CPUINFO_CMD := sysinfo -cpu
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
# x86 ISA extensions (chronological order)
|
||||||
ifdef CPUINFO_CMD
|
ifdef CPUINFO_CMD
|
||||||
AVX_M := $(shell $(CPUINFO_CMD) | grep -m 1 "avx ")
|
SSE3_M := $(shell $(CPUINFO_CMD) | grep -iwE 'PNI|SSE3')
|
||||||
ifneq (,$(findstring avx,$(AVX_M)))
|
SSSE3_M := $(shell $(CPUINFO_CMD) | grep -iw 'SSSE3')
|
||||||
CFLAGS += -mavx
|
AVX_M := $(shell $(CPUINFO_CMD) | grep -iwE 'AVX|AVX1.0')
|
||||||
|
F16C_M := $(shell $(CPUINFO_CMD) | grep -iw 'F16C')
|
||||||
|
FMA_M := $(shell $(CPUINFO_CMD) | grep -iw 'FMA')
|
||||||
|
AVX2_M := $(shell $(CPUINFO_CMD) | grep -iw 'AVX2')
|
||||||
|
AVX512F_M := $(shell $(CPUINFO_CMD) | grep -iw 'AVX512F')
|
||||||
|
AVX512VBMI_M := $(shell $(CPUINFO_CMD) | grep -iw 'AVX512VBMI')
|
||||||
|
AVX512VNNI_M := $(shell $(CPUINFO_CMD) | grep -iwE 'AVX512_VNNI|AVX512VNNI')
|
||||||
|
|
||||||
|
# AVX-512 has many subsets, so let's make it easy to disable them all
|
||||||
|
ifneq ($(filter-out 0,$(WHISPER_NO_AVX512)),)
|
||||||
|
AVX512F_M :=
|
||||||
|
AVX512VBMI_M :=
|
||||||
|
AVX512VNNI_M :=
|
||||||
endif
|
endif
|
||||||
|
|
||||||
AVX2_M := $(shell $(CPUINFO_CMD) | grep -m 1 "avx2 ")
|
ifneq (,$(SSE3_M))
|
||||||
ifneq (,$(findstring avx2,$(AVX2_M)))
|
CFLAGS += -msse3
|
||||||
CFLAGS += -mavx2
|
CXXFLAGS += -msse3
|
||||||
endif
|
endif
|
||||||
|
|
||||||
FMA_M := $(shell $(CPUINFO_CMD) | grep -m 1 "fma ")
|
ifneq (,$(SSSE3_M))
|
||||||
ifneq (,$(findstring fma,$(FMA_M)))
|
CFLAGS += -mssse3
|
||||||
CFLAGS += -mfma
|
CXXFLAGS += -mssse3
|
||||||
endif
|
endif
|
||||||
|
|
||||||
F16C_M := $(shell $(CPUINFO_CMD) | grep -m 1 "f16c ")
|
ifneq (,$(AVX_M))
|
||||||
ifneq (,$(findstring f16c,$(F16C_M)))
|
CFLAGS += -mavx
|
||||||
CFLAGS += -mf16c
|
CXXFLAGS += -mavx
|
||||||
|
|
||||||
AVX1_M := $(shell $(CPUINFO_CMD) | grep -m 1 "avx ")
|
|
||||||
ifneq (,$(findstring avx,$(AVX1_M)))
|
|
||||||
CFLAGS += -mavx
|
|
||||||
endif
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
SSE3_M := $(shell $(CPUINFO_CMD) | grep -m 1 "sse3 ")
|
ifneq (,$(F16C_M))
|
||||||
ifneq (,$(findstring sse3,$(SSE3_M)))
|
CFLAGS += -mf16c
|
||||||
CFLAGS += -msse3
|
CXXFLAGS += -mf16c
|
||||||
endif
|
endif
|
||||||
|
|
||||||
SSSE3_M := $(shell $(CPUINFO_CMD) | grep -m 1 "ssse3 ")
|
ifneq (,$(FMA_M))
|
||||||
ifneq (,$(findstring ssse3,$(SSSE3_M)))
|
CFLAGS += -mfma
|
||||||
CFLAGS += -mssse3
|
CXXFLAGS += -mfma
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifneq (,$(AVX2_M))
|
||||||
|
CFLAGS += -mavx2
|
||||||
|
CXXFLAGS += -mavx2
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifneq (,$(AVX512F_M))
|
||||||
|
CFLAGS += -mavx512f -mavx512cd -mavx512vl -mavx512dq -mavx512bw
|
||||||
|
CXXFLAGS += -mavx512f -mavx512cd -mavx512vl -mavx512dq -mavx512bw
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifneq (,$(AVX512VBMI_M))
|
||||||
|
CFLAGS += -mavx512vbmi
|
||||||
|
CXXFLAGS += -mavx512vbmi
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifneq (,$(AVX512VNNI_M))
|
||||||
|
CFLAGS += -mavx512vnni
|
||||||
|
CXXFLAGS += -mavx512vnni
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
ifeq ($(UNAME_M),amd64)
|
|
||||||
CFLAGS += -mavx -mavx2 -mfma -mf16c
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(filter ppc64%,$(UNAME_M)),)
|
ifneq ($(filter ppc64%,$(UNAME_M)),)
|
||||||
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
|
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
|
||||||
@ -132,6 +223,8 @@ ifndef WHISPER_NO_ACCELERATE
|
|||||||
# Mac M1 - include Accelerate framework
|
# Mac M1 - include Accelerate framework
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Darwin)
|
||||||
CFLAGS += -DGGML_USE_ACCELERATE
|
CFLAGS += -DGGML_USE_ACCELERATE
|
||||||
|
CFLAGS += -DACCELERATE_NEW_LAPACK
|
||||||
|
CFLAGS += -DACCELERATE_LAPACK_ILP64
|
||||||
LDFLAGS += -framework Accelerate
|
LDFLAGS += -framework Accelerate
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
@ -145,26 +238,67 @@ ifdef WHISPER_COREML_ALLOW_FALLBACK
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef WHISPER_OPENBLAS
|
ifndef WHISPER_NO_METAL
|
||||||
CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas -I/usr/include/openblas
|
ifeq ($(UNAME_S),Darwin)
|
||||||
LDFLAGS += -lopenblas
|
WHISPER_METAL := 1
|
||||||
|
|
||||||
|
CFLAGS += -DGGML_USE_METAL
|
||||||
|
CXXFLAGS += -DGGML_USE_METAL
|
||||||
|
LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifneq ($(filter-out 0,$(WHISPER_OPENBLAS)),) # OpenBLAS
|
||||||
|
WHISPER_OPENBLAS_INTERFACE64 ?= 0 # use 32-bit interface by default
|
||||||
|
ifneq ($(filter-out 0,$(WHISPER_OPENBLAS_INTERFACE64)),)
|
||||||
|
WHISPER_BLAS_LIB := openblas64
|
||||||
|
else
|
||||||
|
WHISPER_BLAS_LIB := openblas
|
||||||
|
endif
|
||||||
|
ifneq ($(OPENBLAS_PATH),)
|
||||||
|
WHISPER_BLAS_CFLAGS := -I$(OPENBLAS_PATH)/include
|
||||||
|
WHISPER_BLAS_LDFLAGS := -L$(OPENBLAS_PATH)/lib -l$(WHISPER_BLAS_LIB)
|
||||||
|
else
|
||||||
|
WHISPER_BLAS_LIB_PC_EXISTS := $(shell pkg-config --exists $(WHISPER_BLAS_LIB) && echo 1)
|
||||||
|
ifneq ($(filter-out 0,$(WHISPER_BLAS_LIB_PC_EXISTS)),)
|
||||||
|
WHISPER_BLAS_CFLAGS := $(shell pkg-config --cflags $(WHISPER_BLAS_LIB))
|
||||||
|
WHISPER_BLAS_LDFLAGS := $(shell pkg-config --libs $(WHISPER_BLAS_LIB))
|
||||||
|
else
|
||||||
|
WHISPER_BLAS_CFLAGS := -I/usr/include/openblas
|
||||||
|
WHISPER_BLAS_LDFLAGS := -l$(WHISPER_BLAS_LIB)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
CFLAGS += $(WHISPER_BLAS_CFLAGS) -DGGML_USE_OPENBLAS
|
||||||
|
LDFLAGS += $(WHISPER_BLAS_LDFLAGS)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef WHISPER_CUBLAS
|
ifdef WHISPER_CUBLAS
|
||||||
|
# WHISPER_CUBLAS is deprecated and will be removed in the future
|
||||||
|
WHISPER_CUDA := 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef WHISPER_CUDA
|
||||||
ifeq ($(shell expr $(NVCC_VERSION) \>= 11.6), 1)
|
ifeq ($(shell expr $(NVCC_VERSION) \>= 11.6), 1)
|
||||||
CUDA_ARCH_FLAG=native
|
CUDA_ARCH_FLAG ?= native
|
||||||
else
|
else
|
||||||
CUDA_ARCH_FLAG=all
|
CUDA_ARCH_FLAG ?= all
|
||||||
endif
|
endif
|
||||||
|
|
||||||
CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
CFLAGS += -DGGML_USE_CUDA -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
||||||
CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
CXXFLAGS += -DGGML_USE_CUDA -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
||||||
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib
|
LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lcufft -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
|
||||||
WHISPER_OBJ += ggml-cuda.o
|
WHISPER_OBJ += ggml-cuda.o whisper-mel-cuda.o
|
||||||
|
WHISPER_OBJ += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
||||||
NVCC = nvcc
|
NVCC = nvcc
|
||||||
NVCCFLAGS = --forward-unknown-to-host-compiler -arch=$(CUDA_ARCH_FLAG)
|
NVCCFLAGS = --forward-unknown-to-host-compiler -arch=$(CUDA_ARCH_FLAG)
|
||||||
|
|
||||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/common.cuh
|
||||||
|
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh)
|
||||||
|
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
|
||||||
|
|
||||||
|
whisper-mel-cuda.o: whisper-mel-cuda.cu whisper.h ggml.h ggml-backend.h whisper-mel.hpp whisper-mel-cuda.hpp
|
||||||
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
|
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -172,14 +306,18 @@ ifdef WHISPER_HIPBLAS
|
|||||||
ROCM_PATH ?= /opt/rocm
|
ROCM_PATH ?= /opt/rocm
|
||||||
HIPCC ?= $(ROCM_PATH)/bin/hipcc
|
HIPCC ?= $(ROCM_PATH)/bin/hipcc
|
||||||
GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch)
|
GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch)
|
||||||
CFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
|
CFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUDA
|
||||||
CXXFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
|
CXXFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUDA
|
||||||
LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
||||||
LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
||||||
HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS))
|
HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS))
|
||||||
WHISPER_OBJ += ggml-cuda.o
|
WHISPER_OBJ += ggml-cuda.o
|
||||||
|
WHISPER_OBJ += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
||||||
|
|
||||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/common.cuh
|
||||||
|
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
||||||
|
|
||||||
|
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh)
|
||||||
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -244,6 +382,13 @@ $(info I CC: $(CCV))
|
|||||||
$(info I CXX: $(CXXV))
|
$(info I CXX: $(CXXV))
|
||||||
$(info )
|
$(info )
|
||||||
|
|
||||||
|
ifdef WHISPER_CUBLAS
|
||||||
|
$(info !!!!)
|
||||||
|
$(info WHISPER_CUBLAS is deprecated and will be removed in the future. Use WHISPER_CUDA instead.)
|
||||||
|
$(info !!!!)
|
||||||
|
$(info )
|
||||||
|
endif
|
||||||
|
|
||||||
#
|
#
|
||||||
# Build library
|
# Build library
|
||||||
#
|
#
|
||||||
@ -251,7 +396,18 @@ $(info )
|
|||||||
ggml.o: ggml.c ggml.h ggml-cuda.h
|
ggml.o: ggml.c ggml.h ggml-cuda.h
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
whisper.o: whisper.cpp whisper.h ggml.h ggml-cuda.h
|
ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h
|
||||||
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h
|
||||||
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h
|
||||||
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
WHISPER_OBJ += ggml.o ggml-alloc.o ggml-backend.o ggml-quants.o
|
||||||
|
|
||||||
|
whisper.o: whisper.cpp whisper.h whisper-mel.hpp ggml.h ggml-cuda.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
ifndef WHISPER_COREML
|
ifndef WHISPER_COREML
|
||||||
@ -266,14 +422,41 @@ whisper-encoder-impl.o: coreml/whisper-encoder-impl.m coreml/whisper-encoder-imp
|
|||||||
WHISPER_OBJ += whisper.o whisper-encoder.o whisper-encoder-impl.o
|
WHISPER_OBJ += whisper.o whisper-encoder.o whisper-encoder-impl.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
libwhisper.a: ggml.o $(WHISPER_OBJ)
|
ifdef WHISPER_METAL
|
||||||
$(AR) rcs libwhisper.a ggml.o $(WHISPER_OBJ)
|
ggml-metal.o: ggml-metal.m ggml-metal.h
|
||||||
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
libwhisper.so: ggml.o $(WHISPER_OBJ)
|
WHISPER_OBJ += ggml-metal.o
|
||||||
$(CXX) $(CXXFLAGS) -shared -o libwhisper.so ggml.o $(WHISPER_OBJ) $(LDFLAGS)
|
|
||||||
|
ifdef WHISPER_METAL_EMBED_LIBRARY
|
||||||
|
CFLAGS += -DGGML_METAL_EMBED_LIBRARY
|
||||||
|
|
||||||
|
ggml-metal-embed.o: ggml-metal.metal ggml-common.h
|
||||||
|
@echo "Embedding Metal library"
|
||||||
|
$(eval TEMP_ASSEMBLY=$(shell mktemp))
|
||||||
|
$(eval TEMP_METALLIB=$(shell mktemp))
|
||||||
|
@sed "/^#include \"ggml-common.h\"/{r ggml-common.h"$$'\n'"d;}" ggml-metal.metal > $(TEMP_METALLIB)
|
||||||
|
@echo ".section __DATA, __ggml_metallib" > $(TEMP_ASSEMBLY)
|
||||||
|
@echo ".globl _ggml_metallib_start" >> $(TEMP_ASSEMBLY)
|
||||||
|
@echo "_ggml_metallib_start:" >> $(TEMP_ASSEMBLY)
|
||||||
|
@echo ".incbin \"$(TEMP_METALLIB)\"" >> $(TEMP_ASSEMBLY)
|
||||||
|
@echo ".globl _ggml_metallib_end" >> $(TEMP_ASSEMBLY)
|
||||||
|
@echo "_ggml_metallib_end:" >> $(TEMP_ASSEMBLY)
|
||||||
|
@$(AS) $(TEMP_ASSEMBLY) -o $@
|
||||||
|
@rm -f $(TEMP_ASSEMBLY) $(TEMP_METALLIB)
|
||||||
|
|
||||||
|
WHISPER_OBJ += ggml-metal-embed.o
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
libwhisper.a: $(WHISPER_OBJ)
|
||||||
|
$(AR) rcs libwhisper.a $(WHISPER_OBJ)
|
||||||
|
|
||||||
|
libwhisper.so: $(WHISPER_OBJ)
|
||||||
|
$(CXX) $(CXXFLAGS) -shared -o libwhisper.so $(WHISPER_OBJ) $(LDFLAGS)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f *.o main stream command talk talk-llama bench quantize lsp libwhisper.a libwhisper.so
|
rm -f *.o main stream command talk talk-llama bench quantize server lsp libwhisper.a libwhisper.so
|
||||||
|
|
||||||
#
|
#
|
||||||
# Examples
|
# Examples
|
||||||
@ -281,33 +464,36 @@ clean:
|
|||||||
|
|
||||||
CC_SDL=`sdl2-config --cflags --libs`
|
CC_SDL=`sdl2-config --cflags --libs`
|
||||||
|
|
||||||
SRC_COMMON = examples/common.cpp examples/common-ggml.cpp
|
SRC_COMMON = examples/common.cpp examples/common-ggml.cpp examples/grammar-parser.cpp
|
||||||
SRC_COMMON_SDL = examples/common-sdl.cpp
|
SRC_COMMON_SDL = examples/common-sdl.cpp
|
||||||
|
|
||||||
main: examples/main/main.cpp $(SRC_COMMON) ggml.o $(WHISPER_OBJ)
|
main: examples/main/main.cpp $(SRC_COMMON) $(WHISPER_OBJ)
|
||||||
$(CXX) $(CXXFLAGS) examples/main/main.cpp $(SRC_COMMON) ggml.o $(WHISPER_OBJ) -o main $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/main/main.cpp $(SRC_COMMON) $(WHISPER_OBJ) -o main $(LDFLAGS)
|
||||||
./main -h
|
./main -h
|
||||||
|
|
||||||
bench: examples/bench/bench.cpp ggml.o $(WHISPER_OBJ)
|
bench: examples/bench/bench.cpp $(WHISPER_OBJ)
|
||||||
$(CXX) $(CXXFLAGS) examples/bench/bench.cpp ggml.o $(WHISPER_OBJ) -o bench $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/bench/bench.cpp $(WHISPER_OBJ) -o bench $(LDFLAGS)
|
||||||
|
|
||||||
quantize: examples/quantize/quantize.cpp ggml.o $(WHISPER_OBJ) $(SRC_COMMON)
|
quantize: examples/quantize/quantize.cpp $(WHISPER_OBJ) $(SRC_COMMON)
|
||||||
$(CXX) $(CXXFLAGS) examples/quantize/quantize.cpp $(SRC_COMMON) ggml.o $(WHISPER_OBJ) -o quantize $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/quantize/quantize.cpp $(SRC_COMMON) $(WHISPER_OBJ) -o quantize $(LDFLAGS)
|
||||||
|
|
||||||
stream: examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ)
|
server: examples/server/server.cpp $(SRC_COMMON) $(WHISPER_OBJ)
|
||||||
$(CXX) $(CXXFLAGS) examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ) -o stream $(CC_SDL) $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/server/server.cpp $(SRC_COMMON) $(WHISPER_OBJ) -o server $(LDFLAGS) $(LWINSOCK2)
|
||||||
|
|
||||||
command: examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ)
|
stream: examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
||||||
$(CXX) $(CXXFLAGS) examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ) -o command $(CC_SDL) $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o stream $(CC_SDL) $(LDFLAGS)
|
||||||
|
|
||||||
lsp: examples/lsp/lsp.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ)
|
command: examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
||||||
$(CXX) $(CXXFLAGS) examples/lsp/lsp.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ) -o lsp $(CC_SDL) $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o command $(CC_SDL) $(LDFLAGS)
|
||||||
|
|
||||||
talk: examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ)
|
lsp: examples/lsp/lsp.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
||||||
$(CXX) $(CXXFLAGS) examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ) -o talk $(CC_SDL) $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/lsp/lsp.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o lsp $(CC_SDL) $(LDFLAGS)
|
||||||
|
|
||||||
talk-llama: examples/talk-llama/talk-llama.cpp examples/talk-llama/llama.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ)
|
talk: examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
||||||
$(CXX) $(CXXFLAGS) examples/talk-llama/talk-llama.cpp examples/talk-llama/llama.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ) -o talk-llama $(CC_SDL) $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o talk $(CC_SDL) $(LDFLAGS)
|
||||||
|
|
||||||
|
talk-llama: examples/talk-llama/talk-llama.cpp examples/talk-llama/llama.cpp examples/talk-llama/unicode.cpp examples/talk-llama/unicode-data.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
||||||
|
$(CXX) $(CXXFLAGS) examples/talk-llama/talk-llama.cpp examples/talk-llama/llama.cpp examples/talk-llama/unicode.cpp examples/talk-llama/unicode-data.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o talk-llama $(CC_SDL) $(LDFLAGS)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Audio samples
|
# Audio samples
|
||||||
@ -352,9 +538,10 @@ samples:
|
|||||||
.PHONY: medium.en
|
.PHONY: medium.en
|
||||||
.PHONY: medium
|
.PHONY: medium
|
||||||
.PHONY: large-v1
|
.PHONY: large-v1
|
||||||
.PHONY: large
|
.PHONY: large-v2
|
||||||
|
.PHONY: large-v3
|
||||||
|
|
||||||
tiny.en tiny base.en base small.en small medium.en medium large-v1 large: main
|
tiny.en tiny base.en base small.en small medium.en medium large-v1 large-v2 large-v3: main
|
||||||
bash ./models/download-ggml-model.sh $@
|
bash ./models/download-ggml-model.sh $@
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "==============================================="
|
@echo "==============================================="
|
||||||
|
61
Package.swift
Normal file
61
Package.swift
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
// swift-tools-version:5.5
|
||||||
|
|
||||||
|
import PackageDescription
|
||||||
|
|
||||||
|
let package = Package(
|
||||||
|
name: "whisper",
|
||||||
|
platforms: [
|
||||||
|
.macOS(.v12),
|
||||||
|
.iOS(.v14),
|
||||||
|
.watchOS(.v4),
|
||||||
|
.tvOS(.v14)
|
||||||
|
],
|
||||||
|
products: [
|
||||||
|
.library(name: "whisper", targets: ["whisper"]),
|
||||||
|
],
|
||||||
|
targets: [
|
||||||
|
.target(
|
||||||
|
name: "whisper",
|
||||||
|
path: ".",
|
||||||
|
exclude: [
|
||||||
|
"bindings",
|
||||||
|
"cmake",
|
||||||
|
"coreml",
|
||||||
|
"examples",
|
||||||
|
"extra",
|
||||||
|
"models",
|
||||||
|
"samples",
|
||||||
|
"tests",
|
||||||
|
"CMakeLists.txt",
|
||||||
|
"ggml-cuda.cu",
|
||||||
|
"ggml-cuda.h",
|
||||||
|
"Makefile"
|
||||||
|
],
|
||||||
|
sources: [
|
||||||
|
"ggml.c",
|
||||||
|
"whisper.cpp",
|
||||||
|
"ggml-alloc.c",
|
||||||
|
"ggml-backend.c",
|
||||||
|
"ggml-quants.c",
|
||||||
|
"ggml-metal.m"
|
||||||
|
],
|
||||||
|
resources: [.process("ggml-metal.metal")],
|
||||||
|
publicHeadersPath: "spm-headers",
|
||||||
|
cSettings: [
|
||||||
|
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
||||||
|
.define("GGML_USE_ACCELERATE"),
|
||||||
|
.unsafeFlags(["-fno-objc-arc"]),
|
||||||
|
.define("GGML_USE_METAL")
|
||||||
|
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
||||||
|
// We should consider add this in the future when we drop support for iOS 14
|
||||||
|
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
|
||||||
|
// .define("ACCELERATE_NEW_LAPACK"),
|
||||||
|
// .define("ACCELERATE_LAPACK_ILP64")
|
||||||
|
],
|
||||||
|
linkerSettings: [
|
||||||
|
.linkedFramework("Accelerate")
|
||||||
|
]
|
||||||
|
)
|
||||||
|
],
|
||||||
|
cxxLanguageStandard: .cxx11
|
||||||
|
)
|
284
README.md
284
README.md
@ -4,24 +4,23 @@
|
|||||||
|
|
||||||
[](https://github.com/ggerganov/whisper.cpp/actions)
|
[](https://github.com/ggerganov/whisper.cpp/actions)
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
|
[](https://conan.io/center/whisper-cpp)
|
||||||
[](https://www.npmjs.com/package/whisper.cpp/)
|
[](https://www.npmjs.com/package/whisper.cpp/)
|
||||||
|
|
||||||
Beta: [v1.4.2](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.4.2) / Stable: [v1.2.1](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.2.1) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
Stable: [v1.6.2](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.6.0) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||||
|
|
||||||
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
||||||
|
|
||||||
- Plain C/C++ implementation without dependencies
|
- Plain C/C++ implementation without dependencies
|
||||||
- Apple silicon first-class citizen - optimized via ARM NEON, Accelerate framework and [Core ML](https://github.com/ggerganov/whisper.cpp#core-ml-support)
|
- Apple Silicon first-class citizen - optimized via ARM NEON, Accelerate framework, Metal and [Core ML](https://github.com/ggerganov/whisper.cpp#core-ml-support)
|
||||||
- AVX intrinsics support for x86 architectures
|
- AVX intrinsics support for x86 architectures
|
||||||
- VSX intrinsics support for POWER architectures
|
- VSX intrinsics support for POWER architectures
|
||||||
- Mixed F16 / F32 precision
|
- Mixed F16 / F32 precision
|
||||||
- [4-bit and 5-bit integer quantization support](https://github.com/ggerganov/whisper.cpp#quantization)
|
- [4-bit and 5-bit integer quantization support](https://github.com/ggerganov/whisper.cpp#quantization)
|
||||||
- Low memory usage (Flash Attention)
|
|
||||||
- Zero memory allocations at runtime
|
- Zero memory allocations at runtime
|
||||||
- Runs on the CPU
|
- Support for CPU-only inference
|
||||||
- [Partial GPU support for NVIDIA via cuBLAS](https://github.com/ggerganov/whisper.cpp#nvidia-gpu-support-via-cublas)
|
- [Efficient GPU support for NVIDIA](https://github.com/ggerganov/whisper.cpp#nvidia-gpu-support-via-cublas)
|
||||||
- [Partial OpenCL GPU support via CLBlast](https://github.com/ggerganov/whisper.cpp#opencl-gpu-support-via-clblast)
|
- [Partial OpenCL GPU support via CLBlast](https://github.com/ggerganov/whisper.cpp#opencl-gpu-support-via-clblast)
|
||||||
- [BLAS CPU support via OpenBLAS](https://github.com/ggerganov/whisper.cpp#blas-cpu-support-via-openblas)
|
|
||||||
- [OpenVINO Support](https://github.com/ggerganov/whisper.cpp#openvino-support)
|
- [OpenVINO Support](https://github.com/ggerganov/whisper.cpp#openvino-support)
|
||||||
- [C-style API](https://github.com/ggerganov/whisper.cpp/blob/master/whisper.h)
|
- [C-style API](https://github.com/ggerganov/whisper.cpp/blob/master/whisper.h)
|
||||||
|
|
||||||
@ -35,11 +34,10 @@ Supported platforms:
|
|||||||
- [x] [WebAssembly](examples/whisper.wasm)
|
- [x] [WebAssembly](examples/whisper.wasm)
|
||||||
- [x] Windows ([MSVC](https://github.com/ggerganov/whisper.cpp/blob/master/.github/workflows/build.yml#L117-L144) and [MinGW](https://github.com/ggerganov/whisper.cpp/issues/168)]
|
- [x] Windows ([MSVC](https://github.com/ggerganov/whisper.cpp/blob/master/.github/workflows/build.yml#L117-L144) and [MinGW](https://github.com/ggerganov/whisper.cpp/issues/168)]
|
||||||
- [x] [Raspberry Pi](https://github.com/ggerganov/whisper.cpp/discussions/166)
|
- [x] [Raspberry Pi](https://github.com/ggerganov/whisper.cpp/discussions/166)
|
||||||
|
- [x] [docker](https://github.com/ggerganov/whisper.cpp/pkgs/container/whisper.cpp)
|
||||||
|
|
||||||
The entire implementation of the model is contained in 2 source files:
|
The entire high-level implementation of the model is contained in [whisper.h](whisper.h) and [whisper.cpp](whisper.cpp).
|
||||||
|
The rest of the code is part of the [`ggml`](https://github.com/ggerganov/ggml) machine learning library.
|
||||||
- Tensor operations: [ggml.h](ggml.h) / [ggml.c](ggml.c)
|
|
||||||
- Transformer inference: [whisper.h](whisper.h) / [whisper.cpp](whisper.cpp)
|
|
||||||
|
|
||||||
Having such a lightweight implementation of the model allows to easily integrate it in different platforms and applications.
|
Having such a lightweight implementation of the model allows to easily integrate it in different platforms and applications.
|
||||||
As an example, here is a video of running the model on an iPhone 13 device - fully offline, on-device: [whisper.objc](examples/whisper.objc)
|
As an example, here is a video of running the model on an iPhone 13 device - fully offline, on-device: [whisper.objc](examples/whisper.objc)
|
||||||
@ -50,6 +48,10 @@ You can also easily make your own offline voice assistant application: [command]
|
|||||||
|
|
||||||
https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4
|
https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4
|
||||||
|
|
||||||
|
On Apple Silicon, the inference runs fully on the GPU via Metal:
|
||||||
|
|
||||||
|
https://github.com/ggerganov/whisper.cpp/assets/1991296/c82e8f86-60dc-49f2-b048-d2fdbd6b5225
|
||||||
|
|
||||||
Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm)
|
Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm)
|
||||||
|
|
||||||
## Implementation details
|
## Implementation details
|
||||||
@ -60,22 +62,22 @@ Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm)
|
|||||||
- Sample real-time audio transcription from the microphone is demonstrated in [stream.cpp](examples/stream)
|
- Sample real-time audio transcription from the microphone is demonstrated in [stream.cpp](examples/stream)
|
||||||
- Various other examples are available in the [examples](examples) folder
|
- Various other examples are available in the [examples](examples) folder
|
||||||
|
|
||||||
The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD
|
The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products.
|
||||||
intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since
|
|
||||||
the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products.
|
|
||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
|
|
||||||
First clone the repository.
|
First clone the repository:
|
||||||
|
|
||||||
Then, download one of the Whisper models converted in [ggml format](models). For example:
|
```bash
|
||||||
|
git clone https://github.com/ggerganov/whisper.cpp.git
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, download one of the Whisper [models](models/README.md) converted in [`ggml` format](#ggml-format). For example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash ./models/download-ggml-model.sh base.en
|
bash ./models/download-ggml-model.sh base.en
|
||||||
```
|
```
|
||||||
|
|
||||||
If you wish to convert the Whisper models to ggml format yourself, instructions are in [models/README.md](models/README.md).
|
|
||||||
|
|
||||||
Now build the [main](examples/main) example and transcribe an audio file like this:
|
Now build the [main](examples/main) example and transcribe an audio file like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -90,7 +92,7 @@ make
|
|||||||
|
|
||||||
For a quick demo, simply run `make base.en`:
|
For a quick demo, simply run `make base.en`:
|
||||||
|
|
||||||
```java
|
```text
|
||||||
$ make base.en
|
$ make base.en
|
||||||
|
|
||||||
cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o
|
cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o
|
||||||
@ -109,30 +111,39 @@ options:
|
|||||||
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
||||||
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
||||||
-ml N, --max-len N [0 ] maximum segment length in characters
|
-ml N, --max-len N [0 ] maximum segment length in characters
|
||||||
|
-sow, --split-on-word [false ] split on word rather than on token
|
||||||
-bo N, --best-of N [5 ] number of best candidates to keep
|
-bo N, --best-of N [5 ] number of best candidates to keep
|
||||||
-bs N, --beam-size N [-1 ] beam size for beam search
|
-bs N, --beam-size N [5 ] beam size for beam search
|
||||||
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
||||||
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
||||||
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
||||||
-su, --speed-up [false ] speed up audio by x2 (reduced accuracy)
|
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
|
||||||
-tr, --translate [false ] translate from source language to english
|
-tr, --translate [false ] translate from source language to english
|
||||||
-tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model)
|
|
||||||
-di, --diarize [false ] stereo audio diarization
|
-di, --diarize [false ] stereo audio diarization
|
||||||
|
-tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model)
|
||||||
-nf, --no-fallback [false ] do not use temperature fallback while decoding
|
-nf, --no-fallback [false ] do not use temperature fallback while decoding
|
||||||
-otxt, --output-txt [false ] output result in a text file
|
-otxt, --output-txt [false ] output result in a text file
|
||||||
-ovtt, --output-vtt [false ] output result in a vtt file
|
-ovtt, --output-vtt [false ] output result in a vtt file
|
||||||
-osrt, --output-srt [false ] output result in a srt file
|
-osrt, --output-srt [false ] output result in a srt file
|
||||||
|
-olrc, --output-lrc [false ] output result in a lrc file
|
||||||
-owts, --output-words [false ] output script for generating karaoke video
|
-owts, --output-words [false ] output script for generating karaoke video
|
||||||
|
-fp, --font-path [/System/Library/Fonts/Supplemental/Courier New Bold.ttf] path to a monospace font for karaoke video
|
||||||
-ocsv, --output-csv [false ] output result in a CSV file
|
-ocsv, --output-csv [false ] output result in a CSV file
|
||||||
|
-oj, --output-json [false ] output result in a JSON file
|
||||||
|
-ojf, --output-json-full [false ] include more information in the JSON file
|
||||||
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
||||||
-ps, --print-special [false ] print special tokens
|
-ps, --print-special [false ] print special tokens
|
||||||
-pc, --print-colors [false ] print colors
|
-pc, --print-colors [false ] print colors
|
||||||
-pp, --print-progress [false ] print progress
|
-pp, --print-progress [false ] print progress
|
||||||
-nt, --no-timestamps [true ] do not print timestamps
|
-nt, --no-timestamps [false ] do not print timestamps
|
||||||
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
||||||
|
-dl, --detect-language [false ] exit after automatically detecting language
|
||||||
--prompt PROMPT [ ] initial prompt
|
--prompt PROMPT [ ] initial prompt
|
||||||
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
||||||
-f FNAME, --file FNAME [ ] input WAV file path
|
-f FNAME, --file FNAME [ ] input WAV file path
|
||||||
|
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
|
||||||
|
-ls, --log-score [false ] log best decoder scores of tokens
|
||||||
|
-ng, --no-gpu [false ] disable GPU
|
||||||
|
|
||||||
|
|
||||||
bash ./models/download-ggml-model.sh base.en
|
bash ./models/download-ggml-model.sh base.en
|
||||||
@ -197,7 +208,7 @@ For detailed usage instructions, run: `./main -h`
|
|||||||
Note that the [main](examples/main) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
|
Note that the [main](examples/main) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
|
||||||
For example, you can use `ffmpeg` like this:
|
For example, you can use `ffmpeg` like this:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
ffmpeg -i input.mp3 -ar 16000 -ac 1 -c:a pcm_s16le output.wav
|
ffmpeg -i input.mp3 -ar 16000 -ac 1 -c:a pcm_s16le output.wav
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -223,18 +234,19 @@ make small
|
|||||||
make medium.en
|
make medium.en
|
||||||
make medium
|
make medium
|
||||||
make large-v1
|
make large-v1
|
||||||
make large
|
make large-v2
|
||||||
|
make large-v3
|
||||||
```
|
```
|
||||||
|
|
||||||
## Memory usage
|
## Memory usage
|
||||||
|
|
||||||
| Model | Disk | Mem | SHA |
|
| Model | Disk | Mem |
|
||||||
| --- | --- | --- | --- |
|
| ------ | ------- | ------- |
|
||||||
| tiny | 75 MB | ~125 MB | `bd577a113a864445d4c299885e0cb97d4ba92b5f` |
|
| tiny | 75 MiB | ~273 MB |
|
||||||
| base | 142 MB | ~210 MB | `465707469ff3a37a2b9b8d8f89f2f99de7299dac` |
|
| base | 142 MiB | ~388 MB |
|
||||||
| small | 466 MB | ~600 MB | `55356645c2b361a969dfd0ef2c5a50d530afd8d5` |
|
| small | 466 MiB | ~852 MB |
|
||||||
| medium | 1.5 GB | ~1.7 GB | `fd9727b6e1217c2f614f9b698455c4ffd82463b4` |
|
| medium | 1.5 GiB | ~2.1 GB |
|
||||||
| large | 2.9 GB | ~3.3 GB | `0f4c8e34f21cf1a914c59d8b3ce882345ad349d6` |
|
| large | 2.9 GiB | ~3.9 GB |
|
||||||
|
|
||||||
## Quantization
|
## Quantization
|
||||||
|
|
||||||
@ -267,7 +279,8 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in
|
|||||||
|
|
||||||
- To ensure `coremltools` operates correctly, please confirm that [Xcode](https://developer.apple.com/xcode/) is installed and execute `xcode-select --install` to install the command-line tools.
|
- To ensure `coremltools` operates correctly, please confirm that [Xcode](https://developer.apple.com/xcode/) is installed and execute `xcode-select --install` to install the command-line tools.
|
||||||
- Python 3.10 is recommended.
|
- Python 3.10 is recommended.
|
||||||
- [OPTIONAL] It is recommended to utilize a Python version management system, such as [Miniconda](https://docs.conda.io/en/latest/miniconda.html) for this step:
|
- MacOS Sonoma (version 14) or newer is recommended, as older versions of MacOS might experience issues with transcription hallucination.
|
||||||
|
- [OPTIONAL] It is recommended to utilize a Python version management system, such as [Miniconda](https://docs.conda.io/en/latest/miniconda.html) for this step:
|
||||||
- To create an environment, use: `conda create -n py310-whisper python=3.10 -y`
|
- To create an environment, use: `conda create -n py310-whisper python=3.10 -y`
|
||||||
- To activate the environment, use: `conda activate py310-whisper`
|
- To activate the environment, use: `conda activate py310-whisper`
|
||||||
|
|
||||||
@ -287,14 +300,14 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in
|
|||||||
WHISPER_COREML=1 make -j
|
WHISPER_COREML=1 make -j
|
||||||
|
|
||||||
# using CMake
|
# using CMake
|
||||||
cd build
|
cmake -B build -DWHISPER_COREML=1
|
||||||
cmake -DWHISPER_COREML=1 ..
|
cmake --build build -j --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
- Run the examples as usual. For example:
|
- Run the examples as usual. For example:
|
||||||
|
|
||||||
```bash
|
```text
|
||||||
./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
@ -322,21 +335,23 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
- First, setup python virtual env. and install python dependencies. Python 3.10 is recommended.
|
- First, setup python virtual env. and install python dependencies. Python 3.10 is recommended.
|
||||||
|
|
||||||
Windows:
|
Windows:
|
||||||
```
|
|
||||||
|
```powershell
|
||||||
cd models
|
cd models
|
||||||
python -m venv openvino_conv_env
|
python -m venv openvino_conv_env
|
||||||
openvino_conv_env\Scripts\activate
|
openvino_conv_env\Scripts\activate
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install -r openvino-conversion-requirements.txt
|
pip install -r requirements-openvino.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Linux and macOS:
|
Linux and macOS:
|
||||||
```
|
|
||||||
|
```bash
|
||||||
cd models
|
cd models
|
||||||
python3 -m venv openvino_conv_env
|
python3 -m venv openvino_conv_env
|
||||||
source openvino_conv_env/bin/activate
|
source openvino_conv_env/bin/activate
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install -r openvino-conversion-requirements.txt
|
pip install -r requirements-openvino.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
- Generate an OpenVINO encoder model. For example, to generate a `base.en` model, use:
|
- Generate an OpenVINO encoder model. For example, to generate a `base.en` model, use:
|
||||||
@ -345,7 +360,7 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
python convert-whisper-to-openvino.py --model base.en
|
python convert-whisper-to-openvino.py --model base.en
|
||||||
```
|
```
|
||||||
|
|
||||||
This will produce ggml-base.en-encoder-openvino.xml/.bin IR model files. It's recommended to relocate these to the same folder as ggml models, as that
|
This will produce ggml-base.en-encoder-openvino.xml/.bin IR model files. It's recommended to relocate these to the same folder as `ggml` models, as that
|
||||||
is the default location that the OpenVINO extension will search at runtime.
|
is the default location that the OpenVINO extension will search at runtime.
|
||||||
|
|
||||||
- Build `whisper.cpp` with OpenVINO support:
|
- Build `whisper.cpp` with OpenVINO support:
|
||||||
@ -355,24 +370,28 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
After downloading & extracting package onto your development system, set up required environment by sourcing setupvars script. For example:
|
After downloading & extracting package onto your development system, set up required environment by sourcing setupvars script. For example:
|
||||||
|
|
||||||
Linux:
|
Linux:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
source /path/to/l_openvino_toolkit_ubuntu22_2023.0.0.10926.b4452d56304_x86_64/setupvars.sh
|
source /path/to/l_openvino_toolkit_ubuntu22_2023.0.0.10926.b4452d56304_x86_64/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Windows (cmd):
|
Windows (cmd):
|
||||||
```
|
|
||||||
|
```powershell
|
||||||
C:\Path\To\w_openvino_toolkit_windows_2023.0.0.10926.b4452d56304_x86_64\setupvars.bat
|
C:\Path\To\w_openvino_toolkit_windows_2023.0.0.10926.b4452d56304_x86_64\setupvars.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
And then build the project using cmake:
|
And then build the project using cmake:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd build
|
cmake -B build -DWHISPER_OPENVINO=1
|
||||||
cmake -DWHISPER_OPENVINO=1 ..
|
cmake --build build -j --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
- Run the examples as usual. For example:
|
- Run the examples as usual. For example:
|
||||||
```bash
|
|
||||||
./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
```text
|
||||||
|
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
@ -391,16 +410,16 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
|
|
||||||
For more information about the Core ML implementation please refer to PR [#1037](https://github.com/ggerganov/whisper.cpp/pull/1037).
|
For more information about the Core ML implementation please refer to PR [#1037](https://github.com/ggerganov/whisper.cpp/pull/1037).
|
||||||
|
|
||||||
## NVIDIA GPU support via cuBLAS
|
## NVIDIA GPU support
|
||||||
|
|
||||||
With NVIDIA cards the Encoder processing can to a large extent be offloaded to the GPU through cuBLAS.
|
With NVIDIA cards the processing of the models is done efficiently on the GPU via cuBLAS and custom CUDA kernels.
|
||||||
First, make sure you have installed `cuda`: https://developer.nvidia.com/cuda-downloads
|
First, make sure you have installed `cuda`: https://developer.nvidia.com/cuda-downloads
|
||||||
|
|
||||||
Now build `whisper.cpp` with cuBLAS support:
|
Now build `whisper.cpp` with CUDA support:
|
||||||
|
|
||||||
```
|
```
|
||||||
make clean
|
make clean
|
||||||
WHISPER_CUBLAS=1 make -j
|
WHISPER_CUDA=1 make -j
|
||||||
```
|
```
|
||||||
|
|
||||||
## OpenCL GPU support via CLBlast
|
## OpenCL GPU support via CLBlast
|
||||||
@ -418,14 +437,11 @@ make clean
|
|||||||
WHISPER_CLBLAST=1 make -j
|
WHISPER_CLBLAST=1 make -j
|
||||||
|
|
||||||
CMake:
|
CMake:
|
||||||
cd whisper.cpp ; mkdir build ; cd build
|
cd whisper.cpp
|
||||||
cmake -DWHISPER_CLBLAST=ON ..
|
cmake -B build -DWHISPER_CLBLAST=ON
|
||||||
make clean
|
cmake --build build -j --config Release
|
||||||
make -j
|
|
||||||
cp bin/* ../
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Run all the examples as usual.
|
Run all the examples as usual.
|
||||||
|
|
||||||
## BLAS CPU support via OpenBLAS
|
## BLAS CPU support via OpenBLAS
|
||||||
@ -440,6 +456,63 @@ make clean
|
|||||||
WHISPER_OPENBLAS=1 make -j
|
WHISPER_OPENBLAS=1 make -j
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## BLAS CPU support via Intel MKL
|
||||||
|
|
||||||
|
Encoder processing can be accelerated on the CPU via the BLAS compatible interface of Intel's Math Kernel Library.
|
||||||
|
First, make sure you have installed Intel's MKL runtime and development packages: https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl-download.html
|
||||||
|
|
||||||
|
Now build `whisper.cpp` with Intel MKL BLAS support:
|
||||||
|
|
||||||
|
```
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -DWHISPER_MKL=ON ..
|
||||||
|
WHISPER_MKL=1 make -j
|
||||||
|
```
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Docker must be installed and running on your system.
|
||||||
|
- Create a folder to store big models & intermediate files (ex. /whisper/models)
|
||||||
|
|
||||||
|
### Images
|
||||||
|
|
||||||
|
We have two Docker images available for this project:
|
||||||
|
|
||||||
|
1. `ghcr.io/ggerganov/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`)
|
||||||
|
2. `ghcr.io/ggerganov/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# download model and persist it in a local folder
|
||||||
|
docker run -it --rm \
|
||||||
|
-v path/to/models:/models \
|
||||||
|
whisper.cpp:main "./models/download-ggml-model.sh base /models"
|
||||||
|
# transcribe an audio file
|
||||||
|
docker run -it --rm \
|
||||||
|
-v path/to/models:/models \
|
||||||
|
-v path/to/audios:/audios \
|
||||||
|
whisper.cpp:main "./main -m /models/ggml-base.bin -f /audios/jfk.wav"
|
||||||
|
# transcribe an audio file in samples folder
|
||||||
|
docker run -it --rm \
|
||||||
|
-v path/to/models:/models \
|
||||||
|
whisper.cpp:main "./main -m /models/ggml-base.bin -f ./samples/jfk.wav"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installing with Conan
|
||||||
|
|
||||||
|
You can install pre-built binaries for whisper.cpp or build it from source using [Conan](https://conan.io/). Use the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
conan install --requires="whisper-cpp/[*]" --build=missing
|
||||||
|
```
|
||||||
|
|
||||||
|
For detailed instructions on how to use Conan, please refer to the [Conan documentation](https://docs.conan.io/2/).
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
- Inference only
|
- Inference only
|
||||||
@ -452,7 +525,7 @@ in about half a minute on a MacBook M1 Pro, using `medium.en` model:
|
|||||||
<details>
|
<details>
|
||||||
<summary>Expand to see the result</summary>
|
<summary>Expand to see the result</summary>
|
||||||
|
|
||||||
```java
|
```text
|
||||||
$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
|
$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
|
||||||
|
|
||||||
whisper_init_from_file: loading model from 'models/ggml-medium.en.bin'
|
whisper_init_from_file: loading model from 'models/ggml-medium.en.bin'
|
||||||
@ -524,6 +597,7 @@ whisper_print_timings: encode time = 18665.10 ms / 9 runs ( 2073.90 ms per
|
|||||||
whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run)
|
whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run)
|
||||||
whisper_print_timings: total time = 32733.52 ms
|
whisper_print_timings: total time = 32733.52 ms
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## Real-time audio input example
|
## Real-time audio input example
|
||||||
@ -532,7 +606,7 @@ This is a naive example of performing real-time inference on audio from your mic
|
|||||||
The [stream](examples/stream) tool samples the audio every half a second and runs the transcription continuously.
|
The [stream](examples/stream) tool samples the audio every half a second and runs the transcription continuously.
|
||||||
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
make stream
|
make stream
|
||||||
./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
||||||
```
|
```
|
||||||
@ -544,7 +618,7 @@ https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a
|
|||||||
Adding the `--print-colors` argument will print the transcribed text using an experimental color coding strategy
|
Adding the `--print-colors` argument will print the transcribed text using an experimental color coding strategy
|
||||||
to highlight words with high or low confidence:
|
to highlight words with high or low confidence:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./main -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
|
./main -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -554,8 +628,8 @@ to highlight words with high or low confidence:
|
|||||||
|
|
||||||
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
||||||
|
|
||||||
```java
|
```text
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
@ -578,8 +652,8 @@ main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 pr
|
|||||||
|
|
||||||
The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`:
|
The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`:
|
||||||
|
|
||||||
```java
|
```text
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
|
$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
@ -647,9 +721,9 @@ The [main](examples/main) example provides support for output of karaoke-style m
|
|||||||
currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script.
|
currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script.
|
||||||
This requires to have `ffmpeg` installed.
|
This requires to have `ffmpeg` installed.
|
||||||
|
|
||||||
Here are a few *"typical"* examples:
|
Here are a few _"typical"_ examples:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
|
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
|
||||||
source ./samples/jfk.wav.wts
|
source ./samples/jfk.wav.wts
|
||||||
ffplay ./samples/jfk.wav.mp4
|
ffplay ./samples/jfk.wav.mp4
|
||||||
@ -659,7 +733,7 @@ https://user-images.githubusercontent.com/1991296/199337465-dbee4b5e-9aeb-48a3-b
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
|
./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
|
||||||
source ./samples/mm0.wav.wts
|
source ./samples/mm0.wav.wts
|
||||||
ffplay ./samples/mm0.wav.mp4
|
ffplay ./samples/mm0.wav.mp4
|
||||||
@ -669,7 +743,7 @@ https://user-images.githubusercontent.com/1991296/199337504-cc8fd233-0cb7-4920-9
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
|
./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
|
||||||
source ./samples/gb0.wav.wts
|
source ./samples/gb0.wav.wts
|
||||||
ffplay ./samples/gb0.wav.mp4
|
ffplay ./samples/gb0.wav.mp4
|
||||||
@ -681,10 +755,10 @@ https://user-images.githubusercontent.com/1991296/199337538-b7b0c7a3-2753-4a88-a
|
|||||||
|
|
||||||
## Video comparison of different models
|
## Video comparison of different models
|
||||||
|
|
||||||
Use the [extra/bench-wts.sh](https://github.com/ggerganov/whisper.cpp/blob/master/extra/bench-wts.sh) script to generate a video in the following format:
|
Use the [scripts/bench-wts.sh](https://github.com/ggerganov/whisper.cpp/blob/master/scripts/bench-wts.sh) script to generate a video in the following format:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./extra/bench-wts.sh samples/jfk.wav
|
./scripts/bench-wts.sh samples/jfk.wav
|
||||||
ffplay ./samples/jfk.wav.all.mp4
|
ffplay ./samples/jfk.wav.all.mp4
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -700,7 +774,19 @@ took to execute it. The results are summarized in the following Github issue:
|
|||||||
|
|
||||||
[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89)
|
[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89)
|
||||||
|
|
||||||
## ggml format
|
Additionally a script to run whisper.cpp with different models and audio files is provided [bench.py](bench.py).
|
||||||
|
|
||||||
|
You can run it with the following command, by default it will run against any standard model in the models folder.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 scripts/bench.py -f samples/jfk.wav -t 2,4,8 -p 1,2
|
||||||
|
```
|
||||||
|
|
||||||
|
It is written in python with the intention of being easy to modify and extend for your benchmarking use case.
|
||||||
|
|
||||||
|
It outputs a csv file with the results of the benchmarking.
|
||||||
|
|
||||||
|
## `ggml` format
|
||||||
|
|
||||||
The original models are converted to a custom binary format. This allows to pack everything needed into a single file:
|
The original models are converted to a custom binary format. This allows to pack everything needed into a single file:
|
||||||
|
|
||||||
@ -715,49 +801,51 @@ or manually from here:
|
|||||||
- https://huggingface.co/ggerganov/whisper.cpp
|
- https://huggingface.co/ggerganov/whisper.cpp
|
||||||
- https://ggml.ggerganov.com
|
- https://ggml.ggerganov.com
|
||||||
|
|
||||||
For more details, see the conversion script [models/convert-pt-to-ggml.py](models/convert-pt-to-ggml.py) or the README
|
For more details, see the conversion script [models/convert-pt-to-ggml.py](models/convert-pt-to-ggml.py) or [models/README.md](models/README.md).
|
||||||
in [models](models).
|
|
||||||
|
|
||||||
## [Bindings](https://github.com/ggerganov/whisper.cpp/discussions/categories/bindings)
|
## [Bindings](https://github.com/ggerganov/whisper.cpp/discussions/categories/bindings)
|
||||||
|
|
||||||
- [X] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs) | [#310](https://github.com/ggerganov/whisper.cpp/discussions/310)
|
- [x] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs) | [#310](https://github.com/ggerganov/whisper.cpp/discussions/310)
|
||||||
- [X] Javascript: [bindings/javascript](bindings/javascript) | [#309](https://github.com/ggerganov/whisper.cpp/discussions/309)
|
- [x] JavaScript: [bindings/javascript](bindings/javascript) | [#309](https://github.com/ggerganov/whisper.cpp/discussions/309)
|
||||||
- React Native (iOS / Android): [whisper.rn](https://github.com/mybigday/whisper.rn)
|
- React Native (iOS / Android): [whisper.rn](https://github.com/mybigday/whisper.rn)
|
||||||
- [X] Go: [bindings/go](bindings/go) | [#312](https://github.com/ggerganov/whisper.cpp/discussions/312)
|
- [x] Go: [bindings/go](bindings/go) | [#312](https://github.com/ggerganov/whisper.cpp/discussions/312)
|
||||||
- [X] Java:
|
- [x] Java:
|
||||||
- [GiviMAD/whisper-jni](https://github.com/GiviMAD/whisper-jni)
|
- [GiviMAD/whisper-jni](https://github.com/GiviMAD/whisper-jni)
|
||||||
- [X] Ruby: [bindings/ruby](bindings/ruby) | [#507](https://github.com/ggerganov/whisper.cpp/discussions/507)
|
- [x] Ruby: [bindings/ruby](bindings/ruby) | [#507](https://github.com/ggerganov/whisper.cpp/discussions/507)
|
||||||
- [X] Objective-C / Swift: [ggerganov/whisper.spm](https://github.com/ggerganov/whisper.spm) | [#313](https://github.com/ggerganov/whisper.cpp/discussions/313)
|
- [x] Objective-C / Swift: [ggerganov/whisper.spm](https://github.com/ggerganov/whisper.spm) | [#313](https://github.com/ggerganov/whisper.cpp/discussions/313)
|
||||||
- [exPHAT/SwiftWhisper](https://github.com/exPHAT/SwiftWhisper)
|
- [exPHAT/SwiftWhisper](https://github.com/exPHAT/SwiftWhisper)
|
||||||
- [X] .NET: | [#422](https://github.com/ggerganov/whisper.cpp/discussions/422)
|
- [x] .NET: | [#422](https://github.com/ggerganov/whisper.cpp/discussions/422)
|
||||||
- [sandrohanea/whisper.net](https://github.com/sandrohanea/whisper.net)
|
- [sandrohanea/whisper.net](https://github.com/sandrohanea/whisper.net)
|
||||||
- [NickDarvey/whisper](https://github.com/NickDarvey/whisper)
|
- [NickDarvey/whisper](https://github.com/NickDarvey/whisper)
|
||||||
- [X] Python: | [#9](https://github.com/ggerganov/whisper.cpp/issues/9)
|
- [x] Python: | [#9](https://github.com/ggerganov/whisper.cpp/issues/9)
|
||||||
- [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython)
|
- [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython)
|
||||||
|
- [AIWintermuteAI/whispercpp](https://github.com/AIWintermuteAI/whispercpp) (Updated fork of aarnphm/whispercpp)
|
||||||
- [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11)
|
- [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11)
|
||||||
- [X] R: [bnosac/audio.whisper](https://github.com/bnosac/audio.whisper)
|
- [x] R: [bnosac/audio.whisper](https://github.com/bnosac/audio.whisper)
|
||||||
- [X] Unity: [macoron/whisper.unity](https://github.com/Macoron/whisper.unity)
|
- [x] Unity: [macoron/whisper.unity](https://github.com/Macoron/whisper.unity)
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
There are various examples of using the library for different projects in the [examples](examples) folder.
|
There are various examples of using the library for different projects in the [examples](examples) folder.
|
||||||
Some of the examples are even ported to run in the browser using WebAssembly. Check them out!
|
Some of the examples are even ported to run in the browser using WebAssembly. Check them out!
|
||||||
|
|
||||||
| Example | Web | Description |
|
| Example | Web | Description |
|
||||||
| --- | --- | --- |
|
| --------------------------------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| [main](examples/main) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
|
| [main](examples/main) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
|
||||||
| [bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
|
| [bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
|
||||||
| [stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
|
| [stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
|
||||||
| [command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
|
| [command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
|
||||||
| [talk](examples/talk) | [talk.wasm](examples/talk.wasm) | Talk with a GPT-2 bot |
|
| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess |
|
||||||
| [talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
|
| [talk](examples/talk) | [talk.wasm](examples/talk.wasm) | Talk with a GPT-2 bot |
|
||||||
| [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp |
|
| [talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
|
||||||
| [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp |
|
| [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp |
|
||||||
| [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp |
|
| [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp |
|
||||||
| [whisper.nvim](examples/whisper.nvim) | | Speech-to-text plugin for Neovim |
|
| [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp |
|
||||||
| [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture |
|
| [whisper.nvim](examples/whisper.nvim) | | Speech-to-text plugin for Neovim |
|
||||||
| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) |
|
| [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture |
|
||||||
| [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) |
|
| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) |
|
||||||
|
| [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) |
|
||||||
|
| [server](examples/server) | | HTTP transcription server with OAI-like API |
|
||||||
|
|
||||||
## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions)
|
## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions)
|
||||||
|
|
||||||
|
249
README_sycl.md
Normal file
249
README_sycl.md
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
# whisper.cpp for SYCL
|
||||||
|
|
||||||
|
[Background](#background)
|
||||||
|
|
||||||
|
[OS](#os)
|
||||||
|
|
||||||
|
[Intel GPU](#intel-gpu)
|
||||||
|
|
||||||
|
[Linux](#linux)
|
||||||
|
|
||||||
|
[Environment Variable](#environment-variable)
|
||||||
|
|
||||||
|
[Known Issue](#known-issue)
|
||||||
|
|
||||||
|
[Todo](#todo)
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators<72>such as CPUs, GPUs, and FPGAs. It is a single-source embedded domain-specific language based on pure C++17.
|
||||||
|
|
||||||
|
oneAPI is a specification that is open and standards-based, supporting multiple architecture types including but not limited to GPU, CPU, and FPGA. The spec has both direct programming and API-based programming paradigms.
|
||||||
|
|
||||||
|
Intel uses the SYCL as direct programming language to support CPU, GPUs and FPGAs.
|
||||||
|
|
||||||
|
To avoid re-inventing the wheel, this code refers other code paths in llama.cpp (like OpenBLAS, cuBLAS, CLBlast). We use a open-source tool [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) (Commercial release [Intel<EFBFBD> DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) migrate to SYCL.
|
||||||
|
|
||||||
|
The whisper.cpp for SYCL is used to support Intel GPUs.
|
||||||
|
|
||||||
|
For Intel CPU, recommend to use whisper.cpp for X86 (Intel MKL build).
|
||||||
|
|
||||||
|
## OS
|
||||||
|
|
||||||
|
|OS|Status|Verified|
|
||||||
|
|-|-|-|
|
||||||
|
|Linux|Support|Ubuntu 22.04|
|
||||||
|
|Windows|Ongoing| |
|
||||||
|
|
||||||
|
|
||||||
|
## Intel GPU
|
||||||
|
|
||||||
|
|Intel GPU| Status | Verified Model|
|
||||||
|
|-|-|-|
|
||||||
|
|Intel Data Center Max Series| Support| Max 1550|
|
||||||
|
|Intel Data Center Flex Series| Support| Flex 170|
|
||||||
|
|Intel Arc Series| Support| Arc 770|
|
||||||
|
|Intel built-in Arc GPU| Support| built-in Arc GPU in Meteor Lake|
|
||||||
|
|Intel iGPU| Support| iGPU in i5-1250P, i7-1165G7|
|
||||||
|
|
||||||
|
|
||||||
|
## Linux
|
||||||
|
|
||||||
|
### Setup Environment
|
||||||
|
|
||||||
|
1. Install Intel GPU driver.
|
||||||
|
|
||||||
|
a. Please install Intel GPU driver by official guide: [Install GPU Drivers](https://dgpu-docs.intel.com/driver/installation.html).
|
||||||
|
|
||||||
|
Note: for iGPU, please install the client GPU driver.
|
||||||
|
|
||||||
|
b. Add user to group: video, render.
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo usermod -aG render username
|
||||||
|
sudo usermod -aG video username
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: re-login to enable it.
|
||||||
|
|
||||||
|
c. Check
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt install clinfo
|
||||||
|
sudo clinfo -l
|
||||||
|
```
|
||||||
|
|
||||||
|
Output (example):
|
||||||
|
|
||||||
|
```
|
||||||
|
Platform #0: Intel(R) OpenCL Graphics
|
||||||
|
`-- Device #0: Intel(R) Arc(TM) A770 Graphics
|
||||||
|
|
||||||
|
|
||||||
|
Platform #0: Intel(R) OpenCL HD Graphics
|
||||||
|
`-- Device #0: Intel(R) Iris(R) Xe Graphics [0x9a49]
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install Intel<65> oneAPI Base toolkit.
|
||||||
|
|
||||||
|
|
||||||
|
a. Please follow the procedure in [Get the Intel<65> oneAPI Base Toolkit ](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html).
|
||||||
|
|
||||||
|
Recommend to install to default folder: **/opt/intel/oneapi**.
|
||||||
|
|
||||||
|
Following guide use the default folder as example. If you use other folder, please modify the following guide info with your folder.
|
||||||
|
|
||||||
|
b. Check
|
||||||
|
|
||||||
|
```
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
sycl-ls
|
||||||
|
```
|
||||||
|
|
||||||
|
There should be one or more level-zero devices. Like **[ext_oneapi_level_zero:gpu:0]**.
|
||||||
|
|
||||||
|
Output (example):
|
||||||
|
```
|
||||||
|
[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000]
|
||||||
|
[opencl:cpu:1] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i7-13700K OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000]
|
||||||
|
[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics OpenCL 3.0 NEO [23.30.26918.50]
|
||||||
|
[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Arc(TM) A770 Graphics 1.3 [1.3.26918]
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Build locally:
|
||||||
|
|
||||||
|
```
|
||||||
|
mkdir -p build
|
||||||
|
cd build
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
#for FP16
|
||||||
|
#cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DWHISPER_SYCL_F16=ON
|
||||||
|
|
||||||
|
#for FP32
|
||||||
|
cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||||
|
|
||||||
|
#build example/main only
|
||||||
|
#cmake --build . --config Release --target main
|
||||||
|
|
||||||
|
#build all binary
|
||||||
|
cmake --build . --config Release -v
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
```
|
||||||
|
./examples/sycl/build.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Note:
|
||||||
|
|
||||||
|
- By default, it will build for all binary files. It will take more time. To reduce the time, we recommend to build for **example/main** only.
|
||||||
|
|
||||||
|
### Run
|
||||||
|
|
||||||
|
1. Put model file to folder **models**
|
||||||
|
|
||||||
|
2. Enable oneAPI running environment
|
||||||
|
|
||||||
|
```
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
3. List device ID
|
||||||
|
|
||||||
|
Run without parameter:
|
||||||
|
|
||||||
|
```
|
||||||
|
./build/bin/ls-sycl-device
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
./build/bin/main
|
||||||
|
```
|
||||||
|
|
||||||
|
Check the ID in startup log, like:
|
||||||
|
|
||||||
|
```
|
||||||
|
found 4 SYCL devices:
|
||||||
|
Device 0: Intel(R) Arc(TM) A770 Graphics, compute capability 1.3,
|
||||||
|
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
||||||
|
Device 1: Intel(R) FPGA Emulation Device, compute capability 1.2,
|
||||||
|
max compute_units 24, max work group size 67108864, max sub group size 64, global mem size 67065057280
|
||||||
|
Device 2: 13th Gen Intel(R) Core(TM) i7-13700K, compute capability 3.0,
|
||||||
|
max compute_units 24, max work group size 8192, max sub group size 64, global mem size 67065057280
|
||||||
|
Device 3: Intel(R) Arc(TM) A770 Graphics, compute capability 3.0,
|
||||||
|
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|Attribute|Note|
|
||||||
|
|-|-|
|
||||||
|
|compute capability 1.3|Level-zero running time, recommended |
|
||||||
|
|compute capability 3.0|OpenCL running time, slower than level-zero in most cases|
|
||||||
|
|
||||||
|
4. Set device ID and execute whisper.cpp
|
||||||
|
|
||||||
|
Set device ID = 0 by **GGML_SYCL_DEVICE=0**
|
||||||
|
|
||||||
|
```
|
||||||
|
GGML_SYCL_DEVICE=0 ./build/bin/main -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
```
|
||||||
|
or run by script:
|
||||||
|
|
||||||
|
```
|
||||||
|
./examples/sycl/run_whisper.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
5. Check the device ID in output
|
||||||
|
|
||||||
|
Like:
|
||||||
|
```
|
||||||
|
Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Environment Variable
|
||||||
|
|
||||||
|
#### Build
|
||||||
|
|
||||||
|
|Name|Value|Function|
|
||||||
|
|-|-|-|
|
||||||
|
|WHISPER_SYCL|ON (mandatory)|Enable build with SYCL code path. <br>For FP32/FP16, WHISPER_SYCL=ON is mandatory.|
|
||||||
|
|WHISPER_SYCL_F16|ON (optional)|Enable FP16 build with SYCL code path.For FP32, do not set it.|
|
||||||
|
|CMAKE_C_COMPILER|icx|Use icx compiler for SYCL code path|
|
||||||
|
|CMAKE_CXX_COMPILER|icpx|use icpx for SYCL code path|
|
||||||
|
|
||||||
|
#### Running
|
||||||
|
|
||||||
|
|
||||||
|
|Name|Value|Function|
|
||||||
|
|-|-|-|
|
||||||
|
|GGML_SYCL_DEVICE|0 (default) or 1|Set the device id used. Check the device ids by default running output|
|
||||||
|
|GGML_SYCL_DEBUG|0 (default) or 1|Enable log function by macro: GGML_SYCL_DEBUG|
|
||||||
|
|
||||||
|
## Known Issue
|
||||||
|
|
||||||
|
- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`.
|
||||||
|
|
||||||
|
Miss to enable oneAPI running environment.
|
||||||
|
|
||||||
|
Install oneAPI base toolkit and enable it by: `source /opt/intel/oneapi/setvars.sh`.
|
||||||
|
|
||||||
|
|
||||||
|
- Hang during startup
|
||||||
|
|
||||||
|
llama.cpp use mmap as default way to read model file and copy to GPU. In some system, memcpy will be abnormal and block.
|
||||||
|
|
||||||
|
Solution: add **--no-mmap**.
|
||||||
|
|
||||||
|
## Todo
|
||||||
|
|
||||||
|
- Support to build in Windows.
|
||||||
|
|
||||||
|
- Support multiple cards.
|
@ -1,9 +1,26 @@
|
|||||||
|
ifndef UNAME_S
|
||||||
|
UNAME_S := $(shell uname -s)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifndef UNAME_P
|
||||||
|
UNAME_P := $(shell uname -p)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifndef UNAME_M
|
||||||
|
UNAME_M := $(shell uname -m)
|
||||||
|
endif
|
||||||
|
|
||||||
|
GGML_METAL_PATH_RESOURCES := $(abspath ../..)
|
||||||
BUILD_DIR := build
|
BUILD_DIR := build
|
||||||
MODELS_DIR := models
|
MODELS_DIR := models
|
||||||
EXAMPLES_DIR := $(wildcard examples/*)
|
EXAMPLES_DIR := $(wildcard examples/*)
|
||||||
INCLUDE_PATH := $(abspath ../..)
|
INCLUDE_PATH := $(abspath ../..)
|
||||||
LIBRARY_PATH := $(abspath ../..)
|
LIBRARY_PATH := $(abspath ../..)
|
||||||
|
|
||||||
|
ifeq ($(UNAME_S),Darwin)
|
||||||
|
EXT_LDFLAGS := -framework Foundation -framework Metal -framework MetalKit
|
||||||
|
endif
|
||||||
|
|
||||||
all: clean whisper examples
|
all: clean whisper examples
|
||||||
|
|
||||||
whisper: mkdir
|
whisper: mkdir
|
||||||
@ -11,8 +28,13 @@ whisper: mkdir
|
|||||||
@${MAKE} -C ../.. libwhisper.a
|
@${MAKE} -C ../.. libwhisper.a
|
||||||
|
|
||||||
test: model-small whisper modtidy
|
test: model-small whisper modtidy
|
||||||
|
ifeq ($(UNAME_S),Darwin)
|
||||||
|
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} GGML_METAL_PATH_RESOURCES=${GGML_METAL_PATH_RESOURCES} go test -ldflags "-extldflags '$(EXT_LDFLAGS)'" -v .
|
||||||
|
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} GGML_METAL_PATH_RESOURCES=${GGML_METAL_PATH_RESOURCES} go test -ldflags "-extldflags '$(EXT_LDFLAGS)'" -v ./pkg/whisper/...
|
||||||
|
else
|
||||||
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} go test -v .
|
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} go test -v .
|
||||||
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} go test -v ./pkg/whisper/...
|
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} go test -v ./pkg/whisper/...
|
||||||
|
endif
|
||||||
|
|
||||||
examples: $(EXAMPLES_DIR)
|
examples: $(EXAMPLES_DIR)
|
||||||
|
|
||||||
@ -21,7 +43,11 @@ model-small: mkdir examples/go-model-download
|
|||||||
|
|
||||||
$(EXAMPLES_DIR): mkdir whisper modtidy
|
$(EXAMPLES_DIR): mkdir whisper modtidy
|
||||||
@echo Build example $(notdir $@)
|
@echo Build example $(notdir $@)
|
||||||
|
ifeq ($(UNAME_S),Darwin)
|
||||||
|
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} GGML_METAL_PATH_RESOURCES=${GGML_METAL_PATH_RESOURCES} go build ${BUILD_FLAGS} -ldflags "-extldflags '$(EXT_LDFLAGS)'" -o ${BUILD_DIR}/$(notdir $@) ./$@
|
||||||
|
else
|
||||||
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} go build ${BUILD_FLAGS} -o ${BUILD_DIR}/$(notdir $@) ./$@
|
@C_INCLUDE_PATH=${INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} go build ${BUILD_FLAGS} -o ${BUILD_DIR}/$(notdir $@) ./$@
|
||||||
|
endif
|
||||||
|
|
||||||
mkdir:
|
mkdir:
|
||||||
@echo Mkdir ${BUILD_DIR}
|
@echo Mkdir ${BUILD_DIR}
|
||||||
|
@ -24,7 +24,7 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// The models which will be downloaded, if no model is specified as an argument
|
// The models which will be downloaded, if no model is specified as an argument
|
||||||
modelNames = []string{"ggml-tiny.en", "ggml-tiny", "ggml-base.en", "ggml-base", "ggml-small.en", "ggml-small", "ggml-medium.en", "ggml-medium", "ggml-large-v1", "ggml-large"}
|
modelNames = []string{"ggml-tiny.en", "ggml-tiny", "ggml-base.en", "ggml-base", "ggml-small.en", "ggml-small", "ggml-medium.en", "ggml-medium", "ggml-large-v1", "ggml-large-v2", "ggml-large-v3"}
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -68,10 +68,6 @@ func (flags *Flags) GetOut() string {
|
|||||||
return strings.ToLower(flags.Lookup("out").Value.String())
|
return strings.ToLower(flags.Lookup("out").Value.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flags *Flags) IsSpeedup() bool {
|
|
||||||
return flags.Lookup("speedup").Value.String() == "true"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flags *Flags) IsTokens() bool {
|
func (flags *Flags) IsTokens() bool {
|
||||||
return flags.Lookup("tokens").Value.String() == "true"
|
return flags.Lookup("tokens").Value.String() == "true"
|
||||||
}
|
}
|
||||||
@ -111,10 +107,6 @@ func (flags *Flags) SetParams(context whisper.Context) error {
|
|||||||
fmt.Fprintf(flags.Output(), "Setting duration to %v\n", duration)
|
fmt.Fprintf(flags.Output(), "Setting duration to %v\n", duration)
|
||||||
context.SetDuration(duration)
|
context.SetDuration(duration)
|
||||||
}
|
}
|
||||||
if flags.IsSpeedup() {
|
|
||||||
fmt.Fprintf(flags.Output(), "Setting speedup to true\n")
|
|
||||||
context.SetSpeedup(true)
|
|
||||||
}
|
|
||||||
if threads := flags.GetThreads(); threads != 0 {
|
if threads := flags.GetThreads(); threads != 0 {
|
||||||
fmt.Fprintf(flags.Output(), "Setting threads to %d\n", threads)
|
fmt.Fprintf(flags.Output(), "Setting threads to %d\n", threads)
|
||||||
context.SetThreads(threads)
|
context.SetThreads(threads)
|
||||||
@ -146,7 +138,6 @@ func registerFlags(flag *Flags) {
|
|||||||
flag.Duration("offset", 0, "Time offset")
|
flag.Duration("offset", 0, "Time offset")
|
||||||
flag.Duration("duration", 0, "Duration of audio to process")
|
flag.Duration("duration", 0, "Duration of audio to process")
|
||||||
flag.Uint("threads", 0, "Number of threads to use")
|
flag.Uint("threads", 0, "Number of threads to use")
|
||||||
flag.Bool("speedup", false, "Enable speedup")
|
|
||||||
flag.Uint("max-len", 0, "Maximum segment length in characters")
|
flag.Uint("max-len", 0, "Maximum segment length in characters")
|
||||||
flag.Uint("max-tokens", 0, "Maximum tokens per segment")
|
flag.Uint("max-tokens", 0, "Maximum tokens per segment")
|
||||||
flag.Float64("word-thold", 0, "Maximum segment score")
|
flag.Float64("word-thold", 0, "Maximum segment score")
|
||||||
|
@ -47,10 +47,6 @@ func (p *Params) SetPrintTimestamps(v bool) {
|
|||||||
p.print_timestamps = toBool(v)
|
p.print_timestamps = toBool(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Params) SetSpeedup(v bool) {
|
|
||||||
p.speed_up = toBool(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set language id
|
// Set language id
|
||||||
func (p *Params) SetLanguage(lang int) error {
|
func (p *Params) SetLanguage(lang int) error {
|
||||||
if lang == -1 {
|
if lang == -1 {
|
||||||
@ -118,6 +114,16 @@ func (p *Params) SetMaxTokensPerSegment(n int) {
|
|||||||
p.max_tokens = C.int(n)
|
p.max_tokens = C.int(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set audio encoder context
|
||||||
|
func (p *Params) SetAudioCtx(n int) {
|
||||||
|
p.audio_ctx = C.int(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set initial prompt
|
||||||
|
func (p *Params) SetInitialPrompt(prompt string) {
|
||||||
|
p.initial_prompt = C.CString(prompt)
|
||||||
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// PRIVATE METHODS
|
// PRIVATE METHODS
|
||||||
|
|
||||||
@ -141,6 +147,8 @@ func (p *Params) String() string {
|
|||||||
str += fmt.Sprintf(" n_max_text_ctx=%d", p.n_max_text_ctx)
|
str += fmt.Sprintf(" n_max_text_ctx=%d", p.n_max_text_ctx)
|
||||||
str += fmt.Sprintf(" offset_ms=%d", p.offset_ms)
|
str += fmt.Sprintf(" offset_ms=%d", p.offset_ms)
|
||||||
str += fmt.Sprintf(" duration_ms=%d", p.duration_ms)
|
str += fmt.Sprintf(" duration_ms=%d", p.duration_ms)
|
||||||
|
str += fmt.Sprintf(" audio_ctx=%d", p.audio_ctx)
|
||||||
|
str += fmt.Sprintf(" initial_prompt=%s", C.GoString(p.initial_prompt))
|
||||||
if p.translate {
|
if p.translate {
|
||||||
str += " translate"
|
str += " translate"
|
||||||
}
|
}
|
||||||
@ -165,9 +173,6 @@ func (p *Params) String() string {
|
|||||||
if p.token_timestamps {
|
if p.token_timestamps {
|
||||||
str += " token_timestamps"
|
str += " token_timestamps"
|
||||||
}
|
}
|
||||||
if p.speed_up {
|
|
||||||
str += " speed_up"
|
|
||||||
}
|
|
||||||
|
|
||||||
return str + ">"
|
return str + ">"
|
||||||
}
|
}
|
||||||
|
@ -76,13 +76,8 @@ func (context *context) SetTranslate(v bool) {
|
|||||||
context.params.SetTranslate(v)
|
context.params.SetTranslate(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set speedup flag
|
|
||||||
func (context *context) SetSpeedup(v bool) {
|
|
||||||
context.params.SetSpeedup(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (context *context) SetSplitOnWord(v bool) {
|
func (context *context) SetSplitOnWord(v bool) {
|
||||||
context.params.SetSplitOnWord(v)
|
context.params.SetSplitOnWord(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set number of threads to use
|
// Set number of threads to use
|
||||||
@ -125,6 +120,16 @@ func (context *context) SetMaxTokensPerSegment(n uint) {
|
|||||||
context.params.SetMaxTokensPerSegment(int(n))
|
context.params.SetMaxTokensPerSegment(int(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set audio encoder context
|
||||||
|
func (context *context) SetAudioCtx(n uint) {
|
||||||
|
context.params.SetAudioCtx(int(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set initial prompt
|
||||||
|
func (context *context) SetInitialPrompt(prompt string) {
|
||||||
|
context.params.SetInitialPrompt(prompt)
|
||||||
|
}
|
||||||
|
|
||||||
// ResetTimings resets the mode timings. Should be called before processing
|
// ResetTimings resets the mode timings. Should be called before processing
|
||||||
func (context *context) ResetTimings() {
|
func (context *context) ResetTimings() {
|
||||||
context.model.ctx.Whisper_reset_timings()
|
context.model.ctx.Whisper_reset_timings()
|
||||||
|
@ -38,16 +38,17 @@ type Context interface {
|
|||||||
IsMultilingual() bool // Return true if the model is multilingual.
|
IsMultilingual() bool // Return true if the model is multilingual.
|
||||||
Language() string // Get language
|
Language() string // Get language
|
||||||
|
|
||||||
SetOffset(time.Duration) // Set offset
|
SetOffset(time.Duration) // Set offset
|
||||||
SetDuration(time.Duration) // Set duration
|
SetDuration(time.Duration) // Set duration
|
||||||
SetThreads(uint) // Set number of threads to use
|
SetThreads(uint) // Set number of threads to use
|
||||||
SetSpeedup(bool) // Set speedup flag
|
SetSplitOnWord(bool) // Set split on word flag
|
||||||
SetSplitOnWord(bool) // Set split on word flag
|
SetTokenThreshold(float32) // Set timestamp token probability threshold
|
||||||
SetTokenThreshold(float32) // Set timestamp token probability threshold
|
SetTokenSumThreshold(float32) // Set timestamp token sum probability threshold
|
||||||
SetTokenSumThreshold(float32) // Set timestamp token sum probability threshold
|
SetMaxSegmentLength(uint) // Set max segment length in characters
|
||||||
SetMaxSegmentLength(uint) // Set max segment length in characters
|
SetTokenTimestamps(bool) // Set token timestamps flag
|
||||||
SetTokenTimestamps(bool) // Set token timestamps flag
|
SetMaxTokensPerSegment(uint) // Set max tokens per segment (0 = no limit)
|
||||||
SetMaxTokensPerSegment(uint) // Set max tokens per segment (0 = no limit)
|
SetAudioCtx(uint) // Set audio encoder context
|
||||||
|
SetInitialPrompt(prompt string) // Set initial prompt
|
||||||
|
|
||||||
// Process mono audio data and return any errors.
|
// Process mono audio data and return any errors.
|
||||||
// If defined, newly generated segments are passed to the
|
// If defined, newly generated segments are passed to the
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
#cgo LDFLAGS: -lwhisper -lm -lstdc++
|
#cgo LDFLAGS: -lwhisper -lm -lstdc++
|
||||||
#cgo darwin LDFLAGS: -framework Accelerate
|
#cgo darwin LDFLAGS: -framework Accelerate -framework Metal -framework Foundation -framework CoreGraphics
|
||||||
#include <whisper.h>
|
#include <whisper.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
@ -83,7 +83,6 @@ const (
|
|||||||
SampleRate = C.WHISPER_SAMPLE_RATE // Expected sample rate, samples per second
|
SampleRate = C.WHISPER_SAMPLE_RATE // Expected sample rate, samples per second
|
||||||
SampleBits = uint16(unsafe.Sizeof(C.float(0))) * 8 // Sample size in bits
|
SampleBits = uint16(unsafe.Sizeof(C.float(0))) * 8 // Sample size in bits
|
||||||
NumFFT = C.WHISPER_N_FFT
|
NumFFT = C.WHISPER_N_FFT
|
||||||
NumMEL = C.WHISPER_N_MEL
|
|
||||||
HopLength = C.WHISPER_HOP_LENGTH
|
HopLength = C.WHISPER_HOP_LENGTH
|
||||||
ChunkSize = C.WHISPER_CHUNK_SIZE
|
ChunkSize = C.WHISPER_CHUNK_SIZE
|
||||||
)
|
)
|
||||||
@ -103,7 +102,7 @@ var (
|
|||||||
func Whisper_init(path string) *Context {
|
func Whisper_init(path string) *Context {
|
||||||
cPath := C.CString(path)
|
cPath := C.CString(path)
|
||||||
defer C.free(unsafe.Pointer(cPath))
|
defer C.free(unsafe.Pointer(cPath))
|
||||||
if ctx := C.whisper_init_from_file(cPath); ctx != nil {
|
if ctx := C.whisper_init_from_file_with_params(cPath, C.whisper_context_default_params()); ctx != nil {
|
||||||
return (*Context)(ctx)
|
return (*Context)(ctx)
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
|
Submodule bindings/ios updated: de46d9e781...a2085436c2
@ -2,12 +2,14 @@ plugins {
|
|||||||
id 'java'
|
id 'java'
|
||||||
id 'java-library'
|
id 'java-library'
|
||||||
id 'maven-publish'
|
id 'maven-publish'
|
||||||
|
id 'signing'
|
||||||
}
|
}
|
||||||
|
|
||||||
archivesBaseName = 'whispercpp'
|
archivesBaseName = 'whispercpp'
|
||||||
group = 'io.github.ggerganov'
|
group = 'io.github.ggerganov'
|
||||||
version = '1.4.0'
|
version = '1.4.0'
|
||||||
|
|
||||||
|
|
||||||
sourceCompatibility = 1.8
|
sourceCompatibility = 1.8
|
||||||
targetCompatibility = 1.8
|
targetCompatibility = 1.8
|
||||||
|
|
||||||
@ -109,4 +111,23 @@ publishing {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
repositories {
|
||||||
|
maven {
|
||||||
|
def releasesRepoUrl = 'https://s01.oss.sonatype.org/service/local/staging/deploy/maven2/'
|
||||||
|
def snapshotsRepoUrl = 'https://s01.oss.sonatype.org/content/repositories/snapshots/'
|
||||||
|
url = version.endsWith('-SNAPSHOT') ? snapshotsRepoUrl : releasesRepoUrl
|
||||||
|
credentials {
|
||||||
|
username = System.getenv("MAVEN_USERNAME")
|
||||||
|
password = System.getenv("MAVEN_PASSWORD")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
signing {
|
||||||
|
def signingKey = System.getenv("PGP_SECRET")
|
||||||
|
def signingPassword = System.getenv("PGP_PASSPHRASE")
|
||||||
|
useInMemoryPgpKeys(signingKey, signingPassword)
|
||||||
|
sign publishing.publications.mavenJava
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ import com.sun.jna.Structure;
|
|||||||
import com.sun.jna.ptr.PointerByReference;
|
import com.sun.jna.ptr.PointerByReference;
|
||||||
import io.github.ggerganov.whispercpp.ggml.GgmlType;
|
import io.github.ggerganov.whispercpp.ggml.GgmlType;
|
||||||
import io.github.ggerganov.whispercpp.WhisperModel;
|
import io.github.ggerganov.whispercpp.WhisperModel;
|
||||||
|
import io.github.ggerganov.whispercpp.params.WhisperContextParams;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
@ -23,8 +24,9 @@ public class WhisperContext extends Structure {
|
|||||||
public PointerByReference vocab;
|
public PointerByReference vocab;
|
||||||
public PointerByReference state;
|
public PointerByReference state;
|
||||||
|
|
||||||
/** populated by whisper_init_from_file() */
|
/** populated by whisper_init_from_file_with_params() */
|
||||||
String path_model;
|
String path_model;
|
||||||
|
WhisperContextParams params;
|
||||||
|
|
||||||
// public static class ByReference extends WhisperContext implements Structure.ByReference {
|
// public static class ByReference extends WhisperContext implements Structure.ByReference {
|
||||||
// }
|
// }
|
||||||
|
@ -2,12 +2,16 @@ package io.github.ggerganov.whispercpp;
|
|||||||
|
|
||||||
import com.sun.jna.Native;
|
import com.sun.jna.Native;
|
||||||
import com.sun.jna.Pointer;
|
import com.sun.jna.Pointer;
|
||||||
|
import io.github.ggerganov.whispercpp.bean.WhisperSegment;
|
||||||
|
import io.github.ggerganov.whispercpp.params.WhisperContextParams;
|
||||||
import io.github.ggerganov.whispercpp.params.WhisperFullParams;
|
import io.github.ggerganov.whispercpp.params.WhisperFullParams;
|
||||||
import io.github.ggerganov.whispercpp.params.WhisperSamplingStrategy;
|
import io.github.ggerganov.whispercpp.params.WhisperSamplingStrategy;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Before calling most methods, you must call `initContext(modelPath)` to initialise the `ctx` Pointer.
|
* Before calling most methods, you must call `initContext(modelPath)` to initialise the `ctx` Pointer.
|
||||||
@ -15,8 +19,9 @@ import java.io.IOException;
|
|||||||
public class WhisperCpp implements AutoCloseable {
|
public class WhisperCpp implements AutoCloseable {
|
||||||
private WhisperCppJnaLibrary lib = WhisperCppJnaLibrary.instance;
|
private WhisperCppJnaLibrary lib = WhisperCppJnaLibrary.instance;
|
||||||
private Pointer ctx = null;
|
private Pointer ctx = null;
|
||||||
private Pointer greedyPointer = null;
|
private Pointer paramsPointer = null;
|
||||||
private Pointer beamPointer = null;
|
private Pointer greedyParamsPointer = null;
|
||||||
|
private Pointer beamParamsPointer = null;
|
||||||
|
|
||||||
public File modelDir() {
|
public File modelDir() {
|
||||||
String modelDirPath = System.getenv("XDG_CACHE_HOME");
|
String modelDirPath = System.getenv("XDG_CACHE_HOME");
|
||||||
@ -31,6 +36,18 @@ public class WhisperCpp implements AutoCloseable {
|
|||||||
* @param modelPath - absolute path, or just the name (eg: "base", "base-en" or "base.en")
|
* @param modelPath - absolute path, or just the name (eg: "base", "base-en" or "base.en")
|
||||||
*/
|
*/
|
||||||
public void initContext(String modelPath) throws FileNotFoundException {
|
public void initContext(String modelPath) throws FileNotFoundException {
|
||||||
|
initContextImpl(modelPath, getContextDefaultParams());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param modelPath - absolute path, or just the name (eg: "base", "base-en" or "base.en")
|
||||||
|
* @param params - params to use when initialising the context
|
||||||
|
*/
|
||||||
|
public void initContext(String modelPath, WhisperContextParams params) throws FileNotFoundException {
|
||||||
|
initContextImpl(modelPath, params);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void initContextImpl(String modelPath, WhisperContextParams params) throws FileNotFoundException {
|
||||||
if (ctx != null) {
|
if (ctx != null) {
|
||||||
lib.whisper_free(ctx);
|
lib.whisper_free(ctx);
|
||||||
}
|
}
|
||||||
@ -43,13 +60,26 @@ public class WhisperCpp implements AutoCloseable {
|
|||||||
modelPath = new File(modelDir(), modelPath).getAbsolutePath();
|
modelPath = new File(modelDir(), modelPath).getAbsolutePath();
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = lib.whisper_init_from_file(modelPath);
|
ctx = lib.whisper_init_from_file_with_params(modelPath, params);
|
||||||
|
|
||||||
if (ctx == null) {
|
if (ctx == null) {
|
||||||
throw new FileNotFoundException(modelPath);
|
throw new FileNotFoundException(modelPath);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides default params which can be used with `whisper_init_from_file_with_params()` etc.
|
||||||
|
* Because this function allocates memory for the params, the caller must call either:
|
||||||
|
* - call `whisper_free_context_params()`
|
||||||
|
* - `Native.free(Pointer.nativeValue(pointer));`
|
||||||
|
*/
|
||||||
|
public WhisperContextParams getContextDefaultParams() {
|
||||||
|
paramsPointer = lib.whisper_context_default_params_by_ref();
|
||||||
|
WhisperContextParams params = new WhisperContextParams(paramsPointer);
|
||||||
|
params.read();
|
||||||
|
return params;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provides default params which can be used with `whisper_full()` etc.
|
* Provides default params which can be used with `whisper_full()` etc.
|
||||||
* Because this function allocates memory for the params, the caller must call either:
|
* Because this function allocates memory for the params, the caller must call either:
|
||||||
@ -63,15 +93,15 @@ public class WhisperCpp implements AutoCloseable {
|
|||||||
|
|
||||||
// whisper_full_default_params_by_ref allocates memory which we need to delete, so only create max 1 pointer for each strategy.
|
// whisper_full_default_params_by_ref allocates memory which we need to delete, so only create max 1 pointer for each strategy.
|
||||||
if (strategy == WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY) {
|
if (strategy == WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY) {
|
||||||
if (greedyPointer == null) {
|
if (greedyParamsPointer == null) {
|
||||||
greedyPointer = lib.whisper_full_default_params_by_ref(strategy.ordinal());
|
greedyParamsPointer = lib.whisper_full_default_params_by_ref(strategy.ordinal());
|
||||||
}
|
}
|
||||||
pointer = greedyPointer;
|
pointer = greedyParamsPointer;
|
||||||
} else {
|
} else {
|
||||||
if (beamPointer == null) {
|
if (beamParamsPointer == null) {
|
||||||
beamPointer = lib.whisper_full_default_params_by_ref(strategy.ordinal());
|
beamParamsPointer = lib.whisper_full_default_params_by_ref(strategy.ordinal());
|
||||||
}
|
}
|
||||||
pointer = beamPointer;
|
pointer = beamParamsPointer;
|
||||||
}
|
}
|
||||||
|
|
||||||
WhisperFullParams params = new WhisperFullParams(pointer);
|
WhisperFullParams params = new WhisperFullParams(pointer);
|
||||||
@ -93,13 +123,17 @@ public class WhisperCpp implements AutoCloseable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void freeParams() {
|
private void freeParams() {
|
||||||
if (greedyPointer != null) {
|
if (paramsPointer != null) {
|
||||||
Native.free(Pointer.nativeValue(greedyPointer));
|
Native.free(Pointer.nativeValue(paramsPointer));
|
||||||
greedyPointer = null;
|
paramsPointer = null;
|
||||||
}
|
}
|
||||||
if (beamPointer != null) {
|
if (greedyParamsPointer != null) {
|
||||||
Native.free(Pointer.nativeValue(beamPointer));
|
Native.free(Pointer.nativeValue(greedyParamsPointer));
|
||||||
beamPointer = null;
|
greedyParamsPointer = null;
|
||||||
|
}
|
||||||
|
if (beamParamsPointer != null) {
|
||||||
|
Native.free(Pointer.nativeValue(beamParamsPointer));
|
||||||
|
beamParamsPointer = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,6 +163,28 @@ public class WhisperCpp implements AutoCloseable {
|
|||||||
|
|
||||||
return str.toString().trim();
|
return str.toString().trim();
|
||||||
}
|
}
|
||||||
|
public List<WhisperSegment> fullTranscribeWithTime(WhisperFullParams whisperParams, float[] audioData) throws IOException {
|
||||||
|
if (ctx == null) {
|
||||||
|
throw new IllegalStateException("Model not initialised");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (lib.whisper_full(ctx, whisperParams, audioData, audioData.length) != 0) {
|
||||||
|
throw new IOException("Failed to process audio");
|
||||||
|
}
|
||||||
|
|
||||||
|
int nSegments = lib.whisper_full_n_segments(ctx);
|
||||||
|
List<WhisperSegment> segments= new ArrayList<>(nSegments);
|
||||||
|
|
||||||
|
|
||||||
|
for (int i = 0; i < nSegments; i++) {
|
||||||
|
long t0 = lib.whisper_full_get_segment_t0(ctx, i);
|
||||||
|
String text = lib.whisper_full_get_segment_text(ctx, i);
|
||||||
|
long t1 = lib.whisper_full_get_segment_t1(ctx, i);
|
||||||
|
segments.add(new WhisperSegment(t0,t1,text));
|
||||||
|
}
|
||||||
|
|
||||||
|
return segments;
|
||||||
|
}
|
||||||
|
|
||||||
// public int getTextSegmentCount(Pointer ctx) {
|
// public int getTextSegmentCount(Pointer ctx) {
|
||||||
// return lib.whisper_full_n_segments(ctx);
|
// return lib.whisper_full_n_segments(ctx);
|
||||||
|
@ -5,6 +5,7 @@ import com.sun.jna.Native;
|
|||||||
import com.sun.jna.Pointer;
|
import com.sun.jna.Pointer;
|
||||||
import io.github.ggerganov.whispercpp.model.WhisperModelLoader;
|
import io.github.ggerganov.whispercpp.model.WhisperModelLoader;
|
||||||
import io.github.ggerganov.whispercpp.model.WhisperTokenData;
|
import io.github.ggerganov.whispercpp.model.WhisperTokenData;
|
||||||
|
import io.github.ggerganov.whispercpp.params.WhisperContextParams;
|
||||||
import io.github.ggerganov.whispercpp.params.WhisperFullParams;
|
import io.github.ggerganov.whispercpp.params.WhisperFullParams;
|
||||||
|
|
||||||
public interface WhisperCppJnaLibrary extends Library {
|
public interface WhisperCppJnaLibrary extends Library {
|
||||||
@ -13,13 +14,32 @@ public interface WhisperCppJnaLibrary extends Library {
|
|||||||
String whisper_print_system_info();
|
String whisper_print_system_info();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate (almost) all memory needed for the model by loading from a file.
|
* DEPRECATED. Allocate (almost) all memory needed for the model by loading from a file.
|
||||||
*
|
*
|
||||||
* @param path_model Path to the model file
|
* @param path_model Path to the model file
|
||||||
* @return Whisper context on success, null on failure
|
* @return Whisper context on success, null on failure
|
||||||
*/
|
*/
|
||||||
Pointer whisper_init_from_file(String path_model);
|
Pointer whisper_init_from_file(String path_model);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides default params which can be used with `whisper_init_from_file_with_params()` etc.
|
||||||
|
* Because this function allocates memory for the params, the caller must call either:
|
||||||
|
* - call `whisper_free_context_params()`
|
||||||
|
* - `Native.free(Pointer.nativeValue(pointer));`
|
||||||
|
*/
|
||||||
|
Pointer whisper_context_default_params_by_ref();
|
||||||
|
|
||||||
|
void whisper_free_context_params(Pointer params);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocate (almost) all memory needed for the model by loading from a file.
|
||||||
|
*
|
||||||
|
* @param path_model Path to the model file
|
||||||
|
* @param params Pointer to whisper_context_params
|
||||||
|
* @return Whisper context on success, null on failure
|
||||||
|
*/
|
||||||
|
Pointer whisper_init_from_file_with_params(String path_model, WhisperContextParams params);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate (almost) all memory needed for the model by loading from a buffer.
|
* Allocate (almost) all memory needed for the model by loading from a buffer.
|
||||||
*
|
*
|
||||||
@ -284,14 +304,6 @@ public interface WhisperCppJnaLibrary extends Library {
|
|||||||
/** Language id associated with the provided state */
|
/** Language id associated with the provided state */
|
||||||
int whisper_full_lang_id_from_state(Pointer state);
|
int whisper_full_lang_id_from_state(Pointer state);
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert RAW PCM audio to log mel spectrogram but applies a Phase Vocoder to speed up the audio x2.
|
|
||||||
* The resulting spectrogram is stored inside the default state of the provided whisper context.
|
|
||||||
* @return 0 on success
|
|
||||||
*/
|
|
||||||
int whisper_pcm_to_mel_phase_vocoder(Pointer ctx, final float[] samples, int n_samples, int n_threads);
|
|
||||||
|
|
||||||
int whisper_pcm_to_mel_phase_vocoder_with_state(Pointer ctx, Pointer state, final float[] samples, int n_samples, int n_threads);
|
|
||||||
|
|
||||||
/** Get the start time of the specified segment. */
|
/** Get the start time of the specified segment. */
|
||||||
long whisper_full_get_segment_t0(Pointer ctx, int i_segment);
|
long whisper_full_get_segment_t0(Pointer ctx, int i_segment);
|
||||||
|
@ -0,0 +1,47 @@
|
|||||||
|
package io.github.ggerganov.whispercpp.bean;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Created by litonglinux@qq.com on 10/21/2023_7:48 AM
|
||||||
|
*/
|
||||||
|
public class WhisperSegment {
|
||||||
|
private long start, end;
|
||||||
|
private String sentence;
|
||||||
|
|
||||||
|
public WhisperSegment() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public WhisperSegment(long start, long end, String sentence) {
|
||||||
|
this.start = start;
|
||||||
|
this.end = end;
|
||||||
|
this.sentence = sentence;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getStart() {
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getEnd() {
|
||||||
|
return end;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getSentence() {
|
||||||
|
return sentence;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setStart(long start) {
|
||||||
|
this.start = start;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setEnd(long end) {
|
||||||
|
this.end = end;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setSentence(String sentence) {
|
||||||
|
this.sentence = sentence;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "[" + start + " --> " + end + "]:" + sentence;
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,31 @@
|
|||||||
|
package io.github.ggerganov.whispercpp.params;
|
||||||
|
|
||||||
|
import com.sun.jna.*;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameters for the whisper_init_from_file_with_params() function.
|
||||||
|
* If you change the order or add new parameters, make sure to update the default values in whisper.cpp:
|
||||||
|
* whisper_context_default_params()
|
||||||
|
*/
|
||||||
|
public class WhisperContextParams extends Structure {
|
||||||
|
|
||||||
|
public WhisperContextParams(Pointer p) {
|
||||||
|
super(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Use GPU for inference Number (default = true) */
|
||||||
|
public CBool use_gpu;
|
||||||
|
|
||||||
|
/** Use GPU for inference Number (default = true) */
|
||||||
|
public void useGpu(boolean enable) {
|
||||||
|
use_gpu = enable ? CBool.TRUE : CBool.FALSE;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected List<String> getFieldOrder() {
|
||||||
|
return Arrays.asList("use_gpu");
|
||||||
|
}
|
||||||
|
}
|
@ -58,6 +58,9 @@ public class WhisperFullParams extends Structure {
|
|||||||
no_context = enable ? CBool.FALSE : CBool.TRUE;
|
no_context = enable ? CBool.FALSE : CBool.TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Generate timestamps or not? */
|
||||||
|
public CBool no_timestamps;
|
||||||
|
|
||||||
/** Flag to force single segment output (useful for streaming). (default = false) */
|
/** Flag to force single segment output (useful for streaming). (default = false) */
|
||||||
public CBool single_segment;
|
public CBool single_segment;
|
||||||
|
|
||||||
@ -126,14 +129,6 @@ public class WhisperFullParams extends Structure {
|
|||||||
/** Maximum tokens per segment (0, default = no limit) */
|
/** Maximum tokens per segment (0, default = no limit) */
|
||||||
public int max_tokens;
|
public int max_tokens;
|
||||||
|
|
||||||
/** Flag to speed up the audio by 2x using Phase Vocoder. (default = false) */
|
|
||||||
public CBool speed_up;
|
|
||||||
|
|
||||||
/** Flag to speed up the audio by 2x using Phase Vocoder. (default = false) */
|
|
||||||
public void speedUp(boolean enable) {
|
|
||||||
speed_up = enable ? CBool.TRUE : CBool.FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Overwrite the audio context size (0 = use default). */
|
/** Overwrite the audio context size (0 = use default). */
|
||||||
public int audio_ctx;
|
public int audio_ctx;
|
||||||
|
|
||||||
@ -145,6 +140,9 @@ public class WhisperFullParams extends Structure {
|
|||||||
tdrz_enable = enable ? CBool.TRUE : CBool.FALSE;
|
tdrz_enable = enable ? CBool.TRUE : CBool.FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Regular expression matching tokens to suppress. */
|
||||||
|
public String suppress_regex;
|
||||||
|
|
||||||
/** Tokens to provide to the whisper decoder as an initial prompt.
|
/** Tokens to provide to the whisper decoder as an initial prompt.
|
||||||
* These are prepended to any existing text context from a previous call. */
|
* These are prepended to any existing text context from a previous call. */
|
||||||
public String initial_prompt;
|
public String initial_prompt;
|
||||||
@ -304,18 +302,25 @@ public class WhisperFullParams extends Structure {
|
|||||||
logits_filter_callback = CallbackReference.getFunctionPointer(callback);
|
logits_filter_callback = CallbackReference.getFunctionPointer(callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Grammar stuff */
|
||||||
|
public Pointer grammar_rules;
|
||||||
|
public long n_grammar_rules;
|
||||||
|
public long i_start_rule;
|
||||||
|
public float grammar_penalty;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected List<String> getFieldOrder() {
|
protected List<String> getFieldOrder() {
|
||||||
return Arrays.asList("strategy", "n_threads", "n_max_text_ctx", "offset_ms", "duration_ms", "translate",
|
return Arrays.asList("strategy", "n_threads", "n_max_text_ctx", "offset_ms", "duration_ms", "translate",
|
||||||
"no_context", "single_segment",
|
"no_context", "single_segment", "no_timestamps",
|
||||||
"print_special", "print_progress", "print_realtime", "print_timestamps", "token_timestamps",
|
"print_special", "print_progress", "print_realtime", "print_timestamps", "token_timestamps",
|
||||||
"thold_pt", "thold_ptsum", "max_len", "split_on_word", "max_tokens", "speed_up", "audio_ctx",
|
"thold_pt", "thold_ptsum", "max_len", "split_on_word", "max_tokens", "audio_ctx",
|
||||||
"tdrz_enable", "initial_prompt", "prompt_tokens", "prompt_n_tokens", "language", "detect_language",
|
"tdrz_enable", "suppress_regex", "initial_prompt", "prompt_tokens", "prompt_n_tokens", "language", "detect_language",
|
||||||
"suppress_blank", "suppress_non_speech_tokens", "temperature", "max_initial_ts", "length_penalty",
|
"suppress_blank", "suppress_non_speech_tokens", "temperature", "max_initial_ts", "length_penalty",
|
||||||
"temperature_inc", "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", "beam_search",
|
"temperature_inc", "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", "beam_search",
|
||||||
"new_segment_callback", "new_segment_callback_user_data",
|
"new_segment_callback", "new_segment_callback_user_data",
|
||||||
"progress_callback", "progress_callback_user_data",
|
"progress_callback", "progress_callback_user_data",
|
||||||
"encoder_begin_callback", "encoder_begin_callback_user_data",
|
"encoder_begin_callback", "encoder_begin_callback_user_data",
|
||||||
"logits_filter_callback", "logits_filter_callback_user_data");
|
"logits_filter_callback", "logits_filter_callback_user_data",
|
||||||
|
"grammar_rules", "n_grammar_rules", "i_start_rule", "grammar_penalty");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package io.github.ggerganov.whispercpp;
|
|||||||
|
|
||||||
import static org.junit.jupiter.api.Assertions.*;
|
import static org.junit.jupiter.api.Assertions.*;
|
||||||
|
|
||||||
|
import io.github.ggerganov.whispercpp.bean.WhisperSegment;
|
||||||
import io.github.ggerganov.whispercpp.params.CBool;
|
import io.github.ggerganov.whispercpp.params.CBool;
|
||||||
import io.github.ggerganov.whispercpp.params.WhisperFullParams;
|
import io.github.ggerganov.whispercpp.params.WhisperFullParams;
|
||||||
import io.github.ggerganov.whispercpp.params.WhisperSamplingStrategy;
|
import io.github.ggerganov.whispercpp.params.WhisperSamplingStrategy;
|
||||||
@ -11,6 +12,7 @@ import javax.sound.sampled.AudioInputStream;
|
|||||||
import javax.sound.sampled.AudioSystem;
|
import javax.sound.sampled.AudioSystem;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
class WhisperCppTest {
|
class WhisperCppTest {
|
||||||
private static WhisperCpp whisper = new WhisperCpp();
|
private static WhisperCpp whisper = new WhisperCpp();
|
||||||
@ -20,11 +22,12 @@ class WhisperCppTest {
|
|||||||
static void init() throws FileNotFoundException {
|
static void init() throws FileNotFoundException {
|
||||||
// By default, models are loaded from ~/.cache/whisper/ and are usually named "ggml-${name}.bin"
|
// By default, models are loaded from ~/.cache/whisper/ and are usually named "ggml-${name}.bin"
|
||||||
// or you can provide the absolute path to the model file.
|
// or you can provide the absolute path to the model file.
|
||||||
|
//String modelName = "../../models/ggml-tiny.bin";
|
||||||
String modelName = "../../models/ggml-tiny.en.bin";
|
String modelName = "../../models/ggml-tiny.en.bin";
|
||||||
try {
|
try {
|
||||||
whisper.initContext(modelName);
|
whisper.initContext(modelName);
|
||||||
// whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY);
|
//whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY);
|
||||||
// whisper.getJavaDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH);
|
//whisper.getJavaDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH);
|
||||||
modelInitialised = true;
|
modelInitialised = true;
|
||||||
} catch (FileNotFoundException ex) {
|
} catch (FileNotFoundException ex) {
|
||||||
System.out.println("Model " + modelName + " not found");
|
System.out.println("Model " + modelName + " not found");
|
||||||
@ -42,7 +45,7 @@ class WhisperCppTest {
|
|||||||
assertEquals(16384, params.n_max_text_ctx);
|
assertEquals(16384, params.n_max_text_ctx);
|
||||||
assertFalse(params.translate);
|
assertFalse(params.translate);
|
||||||
assertEquals(0.01f, params.thold_pt);
|
assertEquals(0.01f, params.thold_pt);
|
||||||
assertEquals(2, params.beam_search.beam_size);
|
assertEquals(5, params.beam_search.beam_size);
|
||||||
assertEquals(-1.0f, params.beam_search.patience);
|
assertEquals(-1.0f, params.beam_search.patience);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,7 +58,7 @@ class WhisperCppTest {
|
|||||||
assertEquals(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY.ordinal(), params.strategy);
|
assertEquals(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY.ordinal(), params.strategy);
|
||||||
assertNotEquals(0, params.n_threads);
|
assertNotEquals(0, params.n_threads);
|
||||||
assertEquals(16384, params.n_max_text_ctx);
|
assertEquals(16384, params.n_max_text_ctx);
|
||||||
assertEquals(2, params.greedy.best_of);
|
assertEquals(5, params.greedy.best_of);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -72,11 +75,11 @@ class WhisperCppTest {
|
|||||||
byte[] b = new byte[audioInputStream.available()];
|
byte[] b = new byte[audioInputStream.available()];
|
||||||
float[] floats = new float[b.length / 2];
|
float[] floats = new float[b.length / 2];
|
||||||
|
|
||||||
// WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY);
|
//WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY);
|
||||||
WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH);
|
WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH);
|
||||||
params.setProgressCallback((ctx, state, progress, user_data) -> System.out.println("progress: " + progress));
|
params.setProgressCallback((ctx, state, progress, user_data) -> System.out.println("progress: " + progress));
|
||||||
params.print_progress = CBool.FALSE;
|
params.print_progress = CBool.FALSE;
|
||||||
// params.initial_prompt = "and so my fellow Americans um, like";
|
//params.initial_prompt = "and so my fellow Americans um, like";
|
||||||
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -99,4 +102,43 @@ class WhisperCppTest {
|
|||||||
audioInputStream.close();
|
audioInputStream.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void testFullTranscribeWithTime() throws Exception {
|
||||||
|
if (!modelInitialised) {
|
||||||
|
System.out.println("Model not initialised, skipping test");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given
|
||||||
|
File file = new File(System.getProperty("user.dir"), "../../samples/jfk.wav");
|
||||||
|
AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(file);
|
||||||
|
|
||||||
|
byte[] b = new byte[audioInputStream.available()];
|
||||||
|
float[] floats = new float[b.length / 2];
|
||||||
|
|
||||||
|
//WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_GREEDY);
|
||||||
|
WhisperFullParams params = whisper.getFullDefaultParams(WhisperSamplingStrategy.WHISPER_SAMPLING_BEAM_SEARCH);
|
||||||
|
params.setProgressCallback((ctx, state, progress, user_data) -> System.out.println("progress: " + progress));
|
||||||
|
params.print_progress = CBool.FALSE;
|
||||||
|
//params.initial_prompt = "and so my fellow Americans um, like";
|
||||||
|
|
||||||
|
try {
|
||||||
|
audioInputStream.read(b);
|
||||||
|
|
||||||
|
for (int i = 0, j = 0; i < b.length; i += 2, j++) {
|
||||||
|
int intSample = (int) (b[i + 1]) << 8 | (int) (b[i]) & 0xFF;
|
||||||
|
floats[j] = intSample / 32767.0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
List<WhisperSegment> segments = whisper.fullTranscribeWithTime(params, floats);
|
||||||
|
assertTrue(segments.size() > 0, "The size of segments should be greater than 0");
|
||||||
|
for (WhisperSegment segment : segments) {
|
||||||
|
System.out.println(segment);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
audioInputStream.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ make publish-npm
|
|||||||
|
|
||||||
## Sample run
|
## Sample run
|
||||||
|
|
||||||
```java
|
```text
|
||||||
$ node --experimental-wasm-threads --experimental-wasm-simd ../tests/test-whisper.js
|
$ node --experimental-wasm-threads --experimental-wasm-simd ../tests/test-whisper.js
|
||||||
|
|
||||||
whisper_model_load: loading model from 'whisper.bin'
|
whisper_model_load: loading model from 'whisper.bin'
|
||||||
|
@ -20,7 +20,7 @@ struct whisper_context * g_context;
|
|||||||
EMSCRIPTEN_BINDINGS(whisper) {
|
EMSCRIPTEN_BINDINGS(whisper) {
|
||||||
emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
|
emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
|
||||||
if (g_context == nullptr) {
|
if (g_context == nullptr) {
|
||||||
g_context = whisper_init_from_file(path_model.c_str());
|
g_context = whisper_init_from_file_with_params(path_model.c_str(), whisper_context_default_params());
|
||||||
if (g_context != nullptr) {
|
if (g_context != nullptr) {
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1 +1 @@
|
|||||||
"use strict";var Module={};var ENVIRONMENT_IS_NODE=typeof process=="object"&&typeof process.versions=="object"&&typeof process.versions.node=="string";if(ENVIRONMENT_IS_NODE){var nodeWorkerThreads=require("worker_threads");var parentPort=nodeWorkerThreads.parentPort;parentPort.on("message",data=>onmessage({data:data}));var fs=require("fs");Object.assign(global,{self:global,require:require,Module:Module,location:{href:__filename},Worker:nodeWorkerThreads.Worker,importScripts:function(f){(0,eval)(fs.readFileSync(f,"utf8")+"//# sourceURL="+f)},postMessage:function(msg){parentPort.postMessage(msg)},performance:global.performance||{now:function(){return Date.now()}}})}var initializedJS=false;var pendingNotifiedProxyingQueues=[];function threadPrintErr(){var text=Array.prototype.slice.call(arguments).join(" ");if(ENVIRONMENT_IS_NODE){fs.writeSync(2,text+"\n");return}console.error(text)}function threadAlert(){var text=Array.prototype.slice.call(arguments).join(" ");postMessage({cmd:"alert",text:text,threadId:Module["_pthread_self"]()})}var err=threadPrintErr;self.alert=threadAlert;Module["instantiateWasm"]=(info,receiveInstance)=>{var instance=new WebAssembly.Instance(Module["wasmModule"],info);receiveInstance(instance);Module["wasmModule"]=null;return instance.exports};self.onunhandledrejection=e=>{throw e.reason??e};self.onmessage=e=>{try{if(e.data.cmd==="load"){Module["wasmModule"]=e.data.wasmModule;for(const handler of e.data.handlers){Module[handler]=function(){postMessage({cmd:"callHandler",handler:handler,args:[...arguments]})}}Module["wasmMemory"]=e.data.wasmMemory;Module["buffer"]=Module["wasmMemory"].buffer;Module["ENVIRONMENT_IS_PTHREAD"]=true;if(typeof e.data.urlOrBlob=="string"){importScripts(e.data.urlOrBlob)}else{var objectUrl=URL.createObjectURL(e.data.urlOrBlob);importScripts(objectUrl);URL.revokeObjectURL(objectUrl)}whisper_factory(Module).then(function(instance){Module=instance})}else if(e.data.cmd==="run"){Module["__performance_now_clock_drift"]=performance.now()-e.data.time;Module["__emscripten_thread_init"](e.data.pthread_ptr,0,0,1);Module["establishStackSpace"]();Module["PThread"].receiveObjectTransfer(e.data);Module["PThread"].threadInitTLS();if(!initializedJS){Module["__embind_initialize_bindings"]();pendingNotifiedProxyingQueues.forEach(queue=>{Module["executeNotifiedProxyingQueue"](queue)});pendingNotifiedProxyingQueues=[];initializedJS=true}try{Module["invokeEntryPoint"](e.data.start_routine,e.data.arg)}catch(ex){if(ex!="unwind"){if(ex instanceof Module["ExitStatus"]){if(Module["keepRuntimeAlive"]()){}else{Module["__emscripten_thread_exit"](ex.status)}}else{throw ex}}}}else if(e.data.cmd==="cancel"){if(Module["_pthread_self"]()){Module["__emscripten_thread_exit"](-1)}}else if(e.data.target==="setimmediate"){}else if(e.data.cmd==="processProxyingQueue"){if(initializedJS){Module["executeNotifiedProxyingQueue"](e.data.queue)}else{pendingNotifiedProxyingQueues.push(e.data.queue)}}else if(e.data.cmd){err("worker.js received unknown command "+e.data.cmd);err(e.data)}}catch(ex){if(Module["__emscripten_thread_crashed"]){Module["__emscripten_thread_crashed"]()}throw ex}};
|
"use strict";var Module={};var ENVIRONMENT_IS_NODE=typeof process=="object"&&typeof process.versions=="object"&&typeof process.versions.node=="string";if(ENVIRONMENT_IS_NODE){var nodeWorkerThreads=require("worker_threads");var parentPort=nodeWorkerThreads.parentPort;parentPort.on("message",data=>onmessage({data:data}));var fs=require("fs");Object.assign(global,{self:global,require:require,Module:Module,location:{href:__filename},Worker:nodeWorkerThreads.Worker,importScripts:f=>(0,eval)(fs.readFileSync(f,"utf8")+"//# sourceURL="+f),postMessage:msg=>parentPort.postMessage(msg),performance:global.performance||{now:Date.now}})}var initializedJS=false;function threadPrintErr(){var text=Array.prototype.slice.call(arguments).join(" ");if(ENVIRONMENT_IS_NODE){fs.writeSync(2,text+"\n");return}console.error(text)}function threadAlert(){var text=Array.prototype.slice.call(arguments).join(" ");postMessage({cmd:"alert",text:text,threadId:Module["_pthread_self"]()})}var err=threadPrintErr;self.alert=threadAlert;Module["instantiateWasm"]=(info,receiveInstance)=>{var module=Module["wasmModule"];Module["wasmModule"]=null;var instance=new WebAssembly.Instance(module,info);return receiveInstance(instance)};self.onunhandledrejection=e=>{throw e.reason||e};function handleMessage(e){try{if(e.data.cmd==="load"){let messageQueue=[];self.onmessage=e=>messageQueue.push(e);self.startWorker=instance=>{Module=instance;postMessage({"cmd":"loaded"});for(let msg of messageQueue){handleMessage(msg)}self.onmessage=handleMessage};Module["wasmModule"]=e.data.wasmModule;for(const handler of e.data.handlers){Module[handler]=(...args)=>{postMessage({cmd:"callHandler",handler:handler,args:args})}}Module["wasmMemory"]=e.data.wasmMemory;Module["buffer"]=Module["wasmMemory"].buffer;Module["ENVIRONMENT_IS_PTHREAD"]=true;if(typeof e.data.urlOrBlob=="string"){importScripts(e.data.urlOrBlob)}else{var objectUrl=URL.createObjectURL(e.data.urlOrBlob);importScripts(objectUrl);URL.revokeObjectURL(objectUrl)}whisper_factory(Module)}else if(e.data.cmd==="run"){Module["__emscripten_thread_init"](e.data.pthread_ptr,0,0,1);Module["__emscripten_thread_mailbox_await"](e.data.pthread_ptr);Module["establishStackSpace"]();Module["PThread"].receiveObjectTransfer(e.data);Module["PThread"].threadInitTLS();if(!initializedJS){Module["__embind_initialize_bindings"]();initializedJS=true}try{Module["invokeEntryPoint"](e.data.start_routine,e.data.arg)}catch(ex){if(ex!="unwind"){throw ex}}}else if(e.data.cmd==="cancel"){if(Module["_pthread_self"]()){Module["__emscripten_thread_exit"](-1)}}else if(e.data.target==="setimmediate"){}else if(e.data.cmd==="checkMailbox"){if(initializedJS){Module["checkMailbox"]()}}else if(e.data.cmd){err(`worker.js received unknown command ${e.data.cmd}`);err(e.data)}}catch(ex){if(Module["__emscripten_thread_crashed"]){Module["__emscripten_thread_crashed"]()}throw ex}}self.onmessage=handleMessage;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "whisper.cpp",
|
"name": "whisper.cpp",
|
||||||
"version": "1.4.2",
|
"version": "1.6.2",
|
||||||
"description": "Whisper speech recognition",
|
"description": "Whisper speech recognition",
|
||||||
"main": "whisper.js",
|
"main": "whisper.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
File diff suppressed because one or more lines are too long
12
bindings/ruby/Rakefile
Normal file
12
bindings/ruby/Rakefile
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
require 'rake/clean'
|
||||||
|
require 'rubygems/package'
|
||||||
|
|
||||||
|
desc 'Build gem'
|
||||||
|
task :package do
|
||||||
|
spec_source = File.read File.join(File.dirname(__FILE__),'whispercpp.gemspec')
|
||||||
|
spec = nil
|
||||||
|
# see: http://gist.github.com/16215
|
||||||
|
Thread.new { spec = eval("#{spec_source}") }.join
|
||||||
|
spec.validate
|
||||||
|
Gem::Package.build(spec)
|
||||||
|
end
|
2
bindings/ruby/ext/.gitignore
vendored
2
bindings/ruby/ext/.gitignore
vendored
@ -1,6 +1,8 @@
|
|||||||
Makefile
|
Makefile
|
||||||
ggml.c
|
ggml.c
|
||||||
ggml.h
|
ggml.h
|
||||||
|
ggml-alloc.c
|
||||||
|
ggml-alloc.h
|
||||||
whisper.bundle
|
whisper.bundle
|
||||||
whisper.cpp
|
whisper.cpp
|
||||||
whisper.h
|
whisper.h
|
||||||
|
@ -1,8 +1,18 @@
|
|||||||
require 'mkmf'
|
require 'mkmf'
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.cpp')} .")
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.cpp')} .")
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.h')} .")
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.h')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper-mel.hpp')} .")
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.h')} .")
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.h')} .")
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.c')} .")
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.c')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-impl.h')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-alloc.h')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-alloc.c')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend-impl.h')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend.h')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend.c')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-common.h')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-quants.h')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-quants.c')} .")
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','examples','dr_wav.h')} .")
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','examples','dr_wav.h')} .")
|
||||||
|
|
||||||
|
|
||||||
|
141
bindings/ruby/ext/ggml-backend-impl.h
Normal file
141
bindings/ruby/ext/ggml-backend-impl.h
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
// ggml-backend internal header
|
||||||
|
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//
|
||||||
|
// Backend buffer
|
||||||
|
//
|
||||||
|
|
||||||
|
// buffer type
|
||||||
|
typedef void * ggml_backend_buffer_type_context_t;
|
||||||
|
|
||||||
|
struct ggml_backend_buffer_type_i {
|
||||||
|
const char * (*GGML_CALL get_name) (ggml_backend_buffer_type_t buft);
|
||||||
|
ggml_backend_buffer_t (*GGML_CALL alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
|
||||||
|
size_t (*GGML_CALL get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
|
||||||
|
size_t (*GGML_CALL get_max_size) (ggml_backend_buffer_type_t buft); // allocation max size
|
||||||
|
size_t (*GGML_CALL get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
|
||||||
|
bool (*GGML_CALL supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
|
||||||
|
// check if tensor data is in host memory
|
||||||
|
// should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
|
||||||
|
bool (*GGML_CALL is_host) (ggml_backend_buffer_type_t buft);
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_backend_buffer_type {
|
||||||
|
struct ggml_backend_buffer_type_i iface;
|
||||||
|
ggml_backend_buffer_type_context_t context;
|
||||||
|
};
|
||||||
|
|
||||||
|
// buffer
|
||||||
|
typedef void * ggml_backend_buffer_context_t;
|
||||||
|
|
||||||
|
struct ggml_backend_buffer_i {
|
||||||
|
const char * (*GGML_CALL get_name) (ggml_backend_buffer_t buffer);
|
||||||
|
void (*GGML_CALL free_buffer)(ggml_backend_buffer_t buffer);
|
||||||
|
void * (*GGML_CALL get_base) (ggml_backend_buffer_t buffer);
|
||||||
|
void (*GGML_CALL init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
||||||
|
void (*GGML_CALL set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||||
|
void (*GGML_CALL get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
||||||
|
bool (*GGML_CALL cpy_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
|
||||||
|
void (*GGML_CALL clear) (ggml_backend_buffer_t buffer, uint8_t value);
|
||||||
|
void (*GGML_CALL reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_backend_buffer {
|
||||||
|
struct ggml_backend_buffer_i iface;
|
||||||
|
ggml_backend_buffer_type_t buft;
|
||||||
|
ggml_backend_buffer_context_t context;
|
||||||
|
size_t size;
|
||||||
|
enum ggml_backend_buffer_usage usage;
|
||||||
|
};
|
||||||
|
|
||||||
|
GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init(
|
||||||
|
ggml_backend_buffer_type_t buft,
|
||||||
|
struct ggml_backend_buffer_i iface,
|
||||||
|
ggml_backend_buffer_context_t context,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
// do not use directly, use ggml_backend_tensor_copy instead
|
||||||
|
bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
|
|
||||||
|
// buffer that contains a collection of buffers
|
||||||
|
GGML_CALL ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers);
|
||||||
|
GGML_CALL bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer);
|
||||||
|
GGML_CALL void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Backend
|
||||||
|
//
|
||||||
|
|
||||||
|
typedef void * ggml_backend_context_t;
|
||||||
|
|
||||||
|
struct ggml_backend_i {
|
||||||
|
const char * (*GGML_CALL get_name)(ggml_backend_t backend);
|
||||||
|
|
||||||
|
void (*GGML_CALL free)(ggml_backend_t backend);
|
||||||
|
|
||||||
|
// buffer allocation
|
||||||
|
ggml_backend_buffer_type_t (*GGML_CALL get_default_buffer_type)(ggml_backend_t backend);
|
||||||
|
|
||||||
|
// (optional) asynchronous tensor data access
|
||||||
|
void (*GGML_CALL set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||||
|
void (*GGML_CALL get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
||||||
|
bool (*GGML_CALL cpy_tensor_async)(ggml_backend_t backend_src, ggml_backend_t backend_dst, const struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
|
|
||||||
|
// (optional) complete all pending operations
|
||||||
|
void (*GGML_CALL synchronize)(ggml_backend_t backend);
|
||||||
|
|
||||||
|
// compute graph with a plan (not used currently)
|
||||||
|
ggml_backend_graph_plan_t (*GGML_CALL graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph);
|
||||||
|
void (*GGML_CALL graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
|
|
||||||
|
// compute graph with a plan
|
||||||
|
enum ggml_status (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
|
// compute graph without a plan (async)
|
||||||
|
enum ggml_status (*GGML_CALL graph_compute) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
|
|
||||||
|
// check if the backend supports an operation
|
||||||
|
bool (*GGML_CALL supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
|
||||||
|
// check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
|
||||||
|
// these should be expensive operations with large batch sizes that may benefit from running on this backend
|
||||||
|
// even if the weight has to be copied from the CPU temporarily
|
||||||
|
bool (*GGML_CALL offload_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
|
||||||
|
// (optional) event synchronization
|
||||||
|
ggml_backend_event_t (*GGML_CALL event_new) (ggml_backend_t backend);
|
||||||
|
void (*GGML_CALL event_free) (ggml_backend_event_t event);
|
||||||
|
void (*GGML_CALL event_record) (ggml_backend_event_t event);
|
||||||
|
void (*GGML_CALL event_wait) (ggml_backend_t backend, ggml_backend_event_t event);
|
||||||
|
void (*GGML_CALL event_synchronize) (ggml_backend_event_t event);
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_backend {
|
||||||
|
ggml_guid_t guid;
|
||||||
|
|
||||||
|
struct ggml_backend_i iface;
|
||||||
|
ggml_backend_context_t context;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_backend_event {
|
||||||
|
ggml_backend_t backend;
|
||||||
|
void * context;
|
||||||
|
};
|
||||||
|
|
||||||
|
//
|
||||||
|
// Backend registry
|
||||||
|
//
|
||||||
|
|
||||||
|
typedef ggml_backend_t (*GGML_CALL ggml_backend_init_fn)(const char * params, void * user_data);
|
||||||
|
|
||||||
|
GGML_CALL void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
2095
bindings/ruby/ext/ggml-backend.c
Normal file
2095
bindings/ruby/ext/ggml-backend.c
Normal file
File diff suppressed because it is too large
Load Diff
233
bindings/ruby/ext/ggml-backend.h
Normal file
233
bindings/ruby/ext/ggml-backend.h
Normal file
@ -0,0 +1,233 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "ggml-alloc.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t;
|
||||||
|
typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
|
||||||
|
typedef struct ggml_backend_event * ggml_backend_event_t;
|
||||||
|
typedef struct ggml_backend * ggml_backend_t;
|
||||||
|
typedef void * ggml_backend_graph_plan_t;
|
||||||
|
|
||||||
|
//
|
||||||
|
// Backend buffer
|
||||||
|
//
|
||||||
|
|
||||||
|
// buffer type
|
||||||
|
GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft);
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size);
|
||||||
|
GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
|
||||||
|
GGML_API size_t ggml_backend_buft_get_max_size (ggml_backend_buffer_type_t buft);
|
||||||
|
GGML_API GGML_CALL size_t ggml_backend_buft_get_alloc_size (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
|
||||||
|
GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend);
|
||||||
|
GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
|
||||||
|
|
||||||
|
// buffer
|
||||||
|
enum ggml_backend_buffer_usage {
|
||||||
|
GGML_BACKEND_BUFFER_USAGE_ANY = 0,
|
||||||
|
GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
||||||
|
GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API size_t ggml_backend_buffer_get_max_size (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
||||||
|
GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value);
|
||||||
|
GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer);
|
||||||
|
GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Backend
|
||||||
|
//
|
||||||
|
|
||||||
|
GGML_API ggml_guid_t ggml_backend_guid(ggml_backend_t backend);
|
||||||
|
GGML_API const char * ggml_backend_name(ggml_backend_t backend);
|
||||||
|
GGML_API void ggml_backend_free(ggml_backend_t backend);
|
||||||
|
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend);
|
||||||
|
GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size);
|
||||||
|
GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend);
|
||||||
|
GGML_API size_t ggml_backend_get_max_size(ggml_backend_t backend);
|
||||||
|
|
||||||
|
GGML_API void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||||
|
GGML_API void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
||||||
|
|
||||||
|
GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
|
||||||
|
|
||||||
|
GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
|
GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
|
|
||||||
|
GGML_API enum ggml_status ggml_backend_graph_plan_compute (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
||||||
|
GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
|
GGML_API enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
||||||
|
GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
GGML_API bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op);
|
||||||
|
|
||||||
|
// tensor copy between different backends
|
||||||
|
GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
|
|
||||||
|
// asynchronous copy
|
||||||
|
// the copy is performed after all the currently queued operations in backend_src
|
||||||
|
// backend_dst will wait for the copy to complete before performing other operations
|
||||||
|
// automatic fallback to sync copy if async is not supported
|
||||||
|
GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst);
|
||||||
|
|
||||||
|
// events
|
||||||
|
GGML_API ggml_backend_event_t ggml_backend_event_new (ggml_backend_t backend);
|
||||||
|
GGML_API void ggml_backend_event_free (ggml_backend_event_t event);
|
||||||
|
GGML_API void ggml_backend_event_record (ggml_backend_event_t event);
|
||||||
|
GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event);
|
||||||
|
GGML_API void ggml_backend_event_wait (ggml_backend_t backend, ggml_backend_event_t event); // wait async on event
|
||||||
|
|
||||||
|
//
|
||||||
|
// CPU backend
|
||||||
|
//
|
||||||
|
|
||||||
|
GGML_API ggml_backend_t ggml_backend_cpu_init(void);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend);
|
||||||
|
GGML_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads);
|
||||||
|
GGML_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data);
|
||||||
|
|
||||||
|
// Create a backend buffer from an existing pointer
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void);
|
||||||
|
|
||||||
|
#ifdef GGML_USE_CPU_HBM
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//
|
||||||
|
// Backend registry
|
||||||
|
//
|
||||||
|
|
||||||
|
// The backend registry is a registry of all the available backends, and allows initializing backends in a generic way
|
||||||
|
|
||||||
|
GGML_API size_t ggml_backend_reg_get_count(void);
|
||||||
|
GGML_API size_t ggml_backend_reg_find_by_name(const char * name);
|
||||||
|
GGML_API ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is name[:params]
|
||||||
|
GGML_API const char * ggml_backend_reg_get_name(size_t i);
|
||||||
|
GGML_API ggml_backend_t ggml_backend_reg_init_backend(size_t i, const char * params); // params is backend-specific
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_reg_get_default_buffer_type(size_t i);
|
||||||
|
GGML_API ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Backend scheduler
|
||||||
|
//
|
||||||
|
|
||||||
|
// The backend scheduler allows for multiple backends to be used together
|
||||||
|
// Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
|
||||||
|
// The backends are selected based on:
|
||||||
|
// - the backend that supports the operation
|
||||||
|
// - the location of the pre-allocated tensors (e.g. the weights)
|
||||||
|
/*
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
// operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned
|
||||||
|
// preferrably to run on the same backend as the buffer
|
||||||
|
ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
||||||
|
|
||||||
|
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
|
||||||
|
|
||||||
|
// initialize buffers from a max size graph (optional)
|
||||||
|
reserve_graph = build_graph(sched, max_batch_size);
|
||||||
|
|
||||||
|
// manually assign nodes to a backend (optional, should not be needed in most cases)
|
||||||
|
struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
|
||||||
|
ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu);
|
||||||
|
|
||||||
|
ggml_backend_sched_reserve(sched, reserve_graph);
|
||||||
|
|
||||||
|
// compute
|
||||||
|
graph = build_graph(sched);
|
||||||
|
ggml_backend_sched_graph_compute(sched, graph);
|
||||||
|
|
||||||
|
// if there are graph inputs:
|
||||||
|
ggml_backend_sched_reset(sched);
|
||||||
|
ggml_backend_sched_alloc_graph(sched, graph);
|
||||||
|
ggml_backend_tensor_set(input_tensor, ...);
|
||||||
|
ggml_backend_sched_graph_compute(sched, graph);
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct ggml_backend_sched;
|
||||||
|
typedef struct ggml_backend_sched * ggml_backend_sched_t;
|
||||||
|
|
||||||
|
// when ask == true, the scheduler wants to know if the user wants to observe this node
|
||||||
|
// this allows the scheduler to batch nodes together in order to evaluate them in a single call
|
||||||
|
//
|
||||||
|
// when ask == false, the scheduler is passing the node tensor to the user for observation
|
||||||
|
// if the user returns false, the scheduler will cancel the graph compute
|
||||||
|
//
|
||||||
|
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
|
||||||
|
|
||||||
|
// Initialize a backend scheduler
|
||||||
|
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
||||||
|
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
|
// Initialize backend buffers from a measure graph
|
||||||
|
GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
|
||||||
|
|
||||||
|
// Get the number of splits of the last graph
|
||||||
|
GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
|
||||||
|
GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
|
GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend);
|
||||||
|
|
||||||
|
GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
|
||||||
|
GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
|
||||||
|
|
||||||
|
// Allocate and compute graph on the backend scheduler
|
||||||
|
GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||||
|
GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||||
|
GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
||||||
|
GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
|
// Reset all assignments and allocators - must be called before changing the node backends
|
||||||
|
GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
|
||||||
|
|
||||||
|
// Set a callback to be called for each resulting node during graph compute
|
||||||
|
GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Utils
|
||||||
|
//
|
||||||
|
|
||||||
|
struct ggml_backend_graph_copy {
|
||||||
|
ggml_backend_buffer_t buffer;
|
||||||
|
struct ggml_context * ctx_allocated;
|
||||||
|
struct ggml_context * ctx_unallocated;
|
||||||
|
struct ggml_cgraph * graph;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Copy a graph to a different backend
|
||||||
|
GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph);
|
||||||
|
GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy);
|
||||||
|
|
||||||
|
typedef bool (*GGML_CALL ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data);
|
||||||
|
|
||||||
|
// Compare the output of two backends
|
||||||
|
GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data);
|
||||||
|
|
||||||
|
// Tensor initialization
|
||||||
|
GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
|
||||||
|
GGML_API void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
1853
bindings/ruby/ext/ggml-common.h
Normal file
1853
bindings/ruby/ext/ggml-common.h
Normal file
File diff suppressed because it is too large
Load Diff
43
bindings/ruby/ext/ggml-cuda.h
Normal file
43
bindings/ruby/ext/ggml-cuda.h
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#ifdef GGML_USE_HIPBLAS
|
||||||
|
#define GGML_CUDA_NAME "ROCm"
|
||||||
|
#define GGML_CUBLAS_NAME "hipBLAS"
|
||||||
|
#else
|
||||||
|
#define GGML_CUDA_NAME "CUDA"
|
||||||
|
#define GGML_CUBLAS_NAME "cuBLAS"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define GGML_CUDA_MAX_DEVICES 16
|
||||||
|
|
||||||
|
// backend API
|
||||||
|
GGML_API GGML_CALL ggml_backend_t ggml_backend_cuda_init(int device);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL bool ggml_backend_is_cuda(ggml_backend_t backend);
|
||||||
|
|
||||||
|
// device buffer
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
|
||||||
|
|
||||||
|
// split tensor buffer that splits matrices by rows across multiple devices
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split);
|
||||||
|
|
||||||
|
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL int ggml_backend_cuda_get_device_count(void);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_cuda_unregister_host_buffer(void * buffer);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
272
bindings/ruby/ext/ggml-impl.h
Normal file
272
bindings/ruby/ext/ggml-impl.h
Normal file
@ -0,0 +1,272 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
// GGML internal header
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
#include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <string.h> // memcpy
|
||||||
|
#include <math.h> // fabsf
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// static_assert should be a #define, but if it's not,
|
||||||
|
// fall back to the _Static_assert C11 keyword.
|
||||||
|
// if C99 - static_assert is noop
|
||||||
|
// ref: https://stackoverflow.com/a/53923785/4039976
|
||||||
|
#ifndef __cplusplus
|
||||||
|
#ifndef static_assert
|
||||||
|
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
||||||
|
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
||||||
|
#else
|
||||||
|
#define static_assert(cond, msg) struct global_scope_noop_trick
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
|
||||||
|
#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
|
||||||
|
#ifndef __FMA__
|
||||||
|
#define __FMA__
|
||||||
|
#endif
|
||||||
|
#ifndef __F16C__
|
||||||
|
#define __F16C__
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// __SSE3__ and __SSSE3__ are not defined in MSVC, but SSE3/SSSE3 are present when AVX/AVX2/AVX512 are available
|
||||||
|
#if defined(_MSC_VER) && (defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__))
|
||||||
|
#ifndef __SSE3__
|
||||||
|
#define __SSE3__
|
||||||
|
#endif
|
||||||
|
#ifndef __SSSE3__
|
||||||
|
#define __SSSE3__
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// 16-bit float
|
||||||
|
// on Arm, we use __fp16
|
||||||
|
// on x86, we use uint16_t
|
||||||
|
#if defined(__ARM_NEON) && !defined(_MSC_VER)
|
||||||
|
|
||||||
|
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
||||||
|
//
|
||||||
|
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
||||||
|
//
|
||||||
|
#include <arm_neon.h>
|
||||||
|
|
||||||
|
typedef __fp16 ggml_fp16_internal_t;
|
||||||
|
|
||||||
|
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
||||||
|
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
||||||
|
|
||||||
|
#define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
||||||
|
|
||||||
|
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
||||||
|
ggml_fp16_internal_t tmp;
|
||||||
|
memcpy(&tmp, &h, sizeof(ggml_fp16_t));
|
||||||
|
return (float)tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
||||||
|
ggml_fp16_t res;
|
||||||
|
ggml_fp16_internal_t tmp = f;
|
||||||
|
memcpy(&res, &tmp, sizeof(ggml_fp16_t));
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
typedef uint16_t ggml_fp16_internal_t;
|
||||||
|
|
||||||
|
#ifdef __wasm_simd128__
|
||||||
|
#include <wasm_simd128.h>
|
||||||
|
#else
|
||||||
|
#ifdef __POWER9_VECTOR__
|
||||||
|
#include <altivec.h>
|
||||||
|
#undef bool
|
||||||
|
#define bool _Bool
|
||||||
|
#else
|
||||||
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||||
|
#include <intrin.h>
|
||||||
|
#else
|
||||||
|
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
|
||||||
|
#if !defined(__riscv)
|
||||||
|
#include <immintrin.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __riscv_v_intrinsic
|
||||||
|
#include <riscv_vector.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __F16C__
|
||||||
|
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
|
||||||
|
#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
|
||||||
|
#else
|
||||||
|
#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
|
||||||
|
#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#elif defined(__POWER9_VECTOR__)
|
||||||
|
|
||||||
|
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
||||||
|
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
||||||
|
/* the inline asm below is about 12% faster than the lookup method */
|
||||||
|
#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
|
||||||
|
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
||||||
|
|
||||||
|
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
||||||
|
register float f;
|
||||||
|
register double d;
|
||||||
|
__asm__(
|
||||||
|
"mtfprd %0,%2\n"
|
||||||
|
"xscvhpdp %0,%0\n"
|
||||||
|
"frsp %1,%0\n" :
|
||||||
|
/* temp */ "=d"(d),
|
||||||
|
/* out */ "=f"(f):
|
||||||
|
/* in */ "r"(h));
|
||||||
|
return f;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
||||||
|
register double d;
|
||||||
|
register ggml_fp16_t r;
|
||||||
|
__asm__( /* xscvdphp can work on double or single precision */
|
||||||
|
"xscvdphp %0,%2\n"
|
||||||
|
"mffprd %1,%0\n" :
|
||||||
|
/* temp */ "=d"(d),
|
||||||
|
/* out */ "=r"(r):
|
||||||
|
/* in */ "f"(f));
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
// FP16 <-> FP32
|
||||||
|
// ref: https://github.com/Maratyszcza/FP16
|
||||||
|
|
||||||
|
static inline float fp32_from_bits(uint32_t w) {
|
||||||
|
union {
|
||||||
|
uint32_t as_bits;
|
||||||
|
float as_value;
|
||||||
|
} fp32;
|
||||||
|
fp32.as_bits = w;
|
||||||
|
return fp32.as_value;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uint32_t fp32_to_bits(float f) {
|
||||||
|
union {
|
||||||
|
float as_value;
|
||||||
|
uint32_t as_bits;
|
||||||
|
} fp32;
|
||||||
|
fp32.as_value = f;
|
||||||
|
return fp32.as_bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
||||||
|
const uint32_t w = (uint32_t) h << 16;
|
||||||
|
const uint32_t sign = w & UINT32_C(0x80000000);
|
||||||
|
const uint32_t two_w = w + w;
|
||||||
|
|
||||||
|
const uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
||||||
|
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
||||||
|
const float exp_scale = 0x1.0p-112f;
|
||||||
|
#else
|
||||||
|
const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
|
||||||
|
#endif
|
||||||
|
const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
||||||
|
|
||||||
|
const uint32_t magic_mask = UINT32_C(126) << 23;
|
||||||
|
const float magic_bias = 0.5f;
|
||||||
|
const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
||||||
|
|
||||||
|
const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
||||||
|
const uint32_t result = sign |
|
||||||
|
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
|
||||||
|
return fp32_from_bits(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
||||||
|
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
||||||
|
const float scale_to_inf = 0x1.0p+112f;
|
||||||
|
const float scale_to_zero = 0x1.0p-110f;
|
||||||
|
#else
|
||||||
|
const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
|
||||||
|
const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
|
||||||
|
#endif
|
||||||
|
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
||||||
|
|
||||||
|
const uint32_t w = fp32_to_bits(f);
|
||||||
|
const uint32_t shl1_w = w + w;
|
||||||
|
const uint32_t sign = w & UINT32_C(0x80000000);
|
||||||
|
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
||||||
|
if (bias < UINT32_C(0x71000000)) {
|
||||||
|
bias = UINT32_C(0x71000000);
|
||||||
|
}
|
||||||
|
|
||||||
|
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
||||||
|
const uint32_t bits = fp32_to_bits(base);
|
||||||
|
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
||||||
|
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
||||||
|
const uint32_t nonsign = exp_bits + mantissa_bits;
|
||||||
|
return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
||||||
|
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
||||||
|
|
||||||
|
#endif // __F16C__
|
||||||
|
|
||||||
|
#endif // __ARM_NEON
|
||||||
|
|
||||||
|
// precomputed f32 table for f16 (256 KB)
|
||||||
|
// defined in ggml.c, initialized in ggml_init()
|
||||||
|
extern float ggml_table_f32_f16[1 << 16];
|
||||||
|
|
||||||
|
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
|
||||||
|
// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
|
||||||
|
// This is also true for POWER9.
|
||||||
|
#if !defined(GGML_FP16_TO_FP32)
|
||||||
|
inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
|
||||||
|
uint16_t s;
|
||||||
|
memcpy(&s, &f, sizeof(uint16_t));
|
||||||
|
return ggml_table_f32_f16[s];
|
||||||
|
}
|
||||||
|
|
||||||
|
#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(GGML_FP32_TO_FP16)
|
||||||
|
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define GGML_HASHTABLE_FULL ((size_t)-1)
|
||||||
|
#define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
|
||||||
|
|
||||||
|
struct ggml_hash_set ggml_hash_set_new(size_t size);
|
||||||
|
|
||||||
|
bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
||||||
|
|
||||||
|
// returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
||||||
|
size_t ggml_hash_find (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
||||||
|
|
||||||
|
// returns GGML_HASHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
||||||
|
size_t ggml_hash_insert ( struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
||||||
|
|
||||||
|
// return index, asserts if table is full
|
||||||
|
size_t ggml_hash_find_or_insert( struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
46
bindings/ruby/ext/ggml-kompute.h
Normal file
46
bindings/ruby/ext/ggml-kompute.h
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct ggml_vk_device {
|
||||||
|
int index;
|
||||||
|
int type; // same as VkPhysicalDeviceType
|
||||||
|
size_t heapSize;
|
||||||
|
const char * name;
|
||||||
|
const char * vendor;
|
||||||
|
int subgroupSize;
|
||||||
|
uint64_t bufferAlignment;
|
||||||
|
uint64_t maxAlloc;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_vk_device * ggml_vk_available_devices(size_t memoryRequired, size_t * count);
|
||||||
|
bool ggml_vk_get_device(struct ggml_vk_device * device, size_t memoryRequired, const char * name);
|
||||||
|
bool ggml_vk_has_vulkan(void);
|
||||||
|
bool ggml_vk_has_device(void);
|
||||||
|
struct ggml_vk_device ggml_vk_current_device(void);
|
||||||
|
|
||||||
|
//
|
||||||
|
// backend API
|
||||||
|
//
|
||||||
|
|
||||||
|
// forward declaration
|
||||||
|
typedef struct ggml_backend * ggml_backend_t;
|
||||||
|
|
||||||
|
GGML_API ggml_backend_t ggml_backend_kompute_init(int device);
|
||||||
|
|
||||||
|
GGML_API bool ggml_backend_is_kompute(ggml_backend_t backend);
|
||||||
|
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
66
bindings/ruby/ext/ggml-metal.h
Normal file
66
bindings/ruby/ext/ggml-metal.h
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// An interface allowing to compute ggml_cgraph with Metal
|
||||||
|
//
|
||||||
|
// This is a fully functional interface that extends ggml with GPU support for Apple devices.
|
||||||
|
// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
|
||||||
|
//
|
||||||
|
// How it works?
|
||||||
|
//
|
||||||
|
// As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
|
||||||
|
// interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
|
||||||
|
// use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
|
||||||
|
//
|
||||||
|
// You only need to make sure that all memory buffers that you used during the graph creation
|
||||||
|
// are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
|
||||||
|
// used during the graph evaluation to determine the arguments of the compute kernels.
|
||||||
|
//
|
||||||
|
// Synchronization between device and host memory (for example for input and output tensors)
|
||||||
|
// is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
|
||||||
|
// max memory buffers that can be mapped to the device
|
||||||
|
#define GGML_METAL_MAX_BUFFERS 64
|
||||||
|
|
||||||
|
struct ggml_tensor;
|
||||||
|
struct ggml_cgraph;
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//
|
||||||
|
// backend API
|
||||||
|
// user-code should use only these functions
|
||||||
|
//
|
||||||
|
|
||||||
|
GGML_API void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data);
|
||||||
|
|
||||||
|
GGML_API ggml_backend_t ggml_backend_metal_init(void);
|
||||||
|
|
||||||
|
GGML_API bool ggml_backend_is_metal(ggml_backend_t backend);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size);
|
||||||
|
|
||||||
|
GGML_API void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void);
|
||||||
|
|
||||||
|
// helper to check if the device supports a specific family
|
||||||
|
// ideally, the user code should be doing these checks
|
||||||
|
// ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
|
||||||
|
GGML_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family);
|
||||||
|
|
||||||
|
// capture all command buffers committed the next time `ggml_backend_graph_compute` is called
|
||||||
|
GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
36
bindings/ruby/ext/ggml-opencl.h
Normal file
36
bindings/ruby/ext/ggml-opencl.h
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
GGML_API void ggml_cl_init(void);
|
||||||
|
|
||||||
|
GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
|
GGML_API void ggml_cl_add(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
|
GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst);
|
||||||
|
GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||||
|
GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
||||||
|
|
||||||
|
// GGML_API void * ggml_cl_host_malloc(size_t size);
|
||||||
|
// GGML_API void ggml_cl_host_free(void * ptr);
|
||||||
|
|
||||||
|
GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor);
|
||||||
|
|
||||||
|
GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
|
||||||
|
|
||||||
|
// backend API
|
||||||
|
|
||||||
|
// GGML_API ggml_backend_t ggml_backend_opencl_init(void);
|
||||||
|
|
||||||
|
// GGML_API bool ggml_backend_is_opencl(ggml_backend_t backend);
|
||||||
|
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void);
|
||||||
|
// GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
12678
bindings/ruby/ext/ggml-quants.c
Normal file
12678
bindings/ruby/ext/ggml-quants.c
Normal file
File diff suppressed because it is too large
Load Diff
133
bindings/ruby/ext/ggml-quants.h
Normal file
133
bindings/ruby/ext/ggml-quants.h
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#define GGML_COMMON_DECL_C
|
||||||
|
#include "ggml-common.h"
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
// GGML internal header
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Quantization
|
||||||
|
void quantize_row_q4_0_reference(const float * GGML_RESTRICT x, block_q4_0 * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q4_1_reference(const float * GGML_RESTRICT x, block_q4_1 * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q5_0_reference(const float * GGML_RESTRICT x, block_q5_0 * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q5_1_reference(const float * GGML_RESTRICT x, block_q5_1 * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q8_0_reference(const float * GGML_RESTRICT x, block_q8_0 * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q8_1_reference(const float * GGML_RESTRICT x, block_q8_1 * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
void quantize_row_q2_K_reference(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q3_K_reference(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q4_K_reference(const float * GGML_RESTRICT x, block_q4_K * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q5_K_reference(const float * GGML_RESTRICT x, block_q5_K * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q6_K_reference(const float * GGML_RESTRICT x, block_q6_K * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q8_K_reference(const float * GGML_RESTRICT x, block_q8_K * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
void quantize_row_iq3_xxs_reference(const float * GGML_RESTRICT x, block_iq3_xxs * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_iq4_nl_reference (const float * GGML_RESTRICT x, block_iq4_nl * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_iq4_xs_reference (const float * GGML_RESTRICT x, block_iq4_xs * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_iq3_s_reference (const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_iq2_s_reference (const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
void quantize_row_iq3_xxs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_iq4_nl (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_iq3_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_row_iq2_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
// Dequantization
|
||||||
|
void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_q4_1(const block_q4_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_q5_0(const block_q5_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_q5_1(const block_q5_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
//void dequantize_row_q8_1(const block_q8_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
void dequantize_row_q2_K(const block_q2_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_q3_K(const block_q3_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_q4_K(const block_q4_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_q5_K(const block_q5_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_q6_K(const block_q6_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_q8_K(const block_q8_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
void dequantize_row_iq2_xxs(const block_iq2_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_iq2_xs (const block_iq2_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_iq2_s (const block_iq2_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_iq3_xxs(const block_iq3_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_iq1_s (const block_iq1_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_iq1_m (const block_iq1_m * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_iq4_nl (const block_iq4_nl * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_iq4_xs (const block_iq4_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
void dequantize_row_iq3_s (const block_iq3_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
// Dot product
|
||||||
|
void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
|
||||||
|
void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
|
||||||
|
void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_iq2_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_iq2_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
||||||
|
|
||||||
|
// Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization")
|
||||||
|
size_t quantize_iq2_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_iq2_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_iq2_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_iq3_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_iq1_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_iq1_m (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_iq4_nl (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_iq4_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_iq3_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
|
||||||
|
size_t quantize_q2_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q3_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q4_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q5_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q6_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q4_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q4_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q5_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q5_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
|
||||||
|
void iq2xs_init_impl(enum ggml_type type);
|
||||||
|
void iq2xs_free_impl(enum ggml_type type);
|
||||||
|
void iq3xs_init_impl(int grid_size);
|
||||||
|
void iq3xs_free_impl(int grid_size);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
49
bindings/ruby/ext/ggml-sycl.h
Normal file
49
bindings/ruby/ext/ggml-sycl.h
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
//
|
||||||
|
// MIT license
|
||||||
|
// Copyright (C) 2024 Intel Corporation
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
//
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define GGML_SYCL_MAX_DEVICES 48
|
||||||
|
#define GGML_SYCL_NAME "SYCL"
|
||||||
|
|
||||||
|
// backend API
|
||||||
|
GGML_API ggml_backend_t ggml_backend_sycl_init(int device);
|
||||||
|
|
||||||
|
// devide buffer
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device);
|
||||||
|
|
||||||
|
// split tensor buffer that splits matrices by rows across multiple devices
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split);
|
||||||
|
|
||||||
|
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
||||||
|
GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void);
|
||||||
|
|
||||||
|
GGML_API void ggml_backend_sycl_print_sycl_devices(void);
|
||||||
|
GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len);
|
||||||
|
GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
|
||||||
|
GGML_API GGML_CALL int ggml_backend_sycl_get_device_count();
|
||||||
|
GGML_API GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
|
||||||
|
GGML_API GGML_CALL int ggml_backend_sycl_get_device_index(int device_id);
|
||||||
|
|
||||||
|
// TODO: these are temporary
|
||||||
|
// ref: https://github.com/ggerganov/llama.cpp/pull/6022#issuecomment-1992615670
|
||||||
|
GGML_API GGML_CALL int ggml_backend_sycl_get_device_id(int device_index);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_sycl_set_single_device_mode(int main_gpu_id);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_sycl_set_mul_device_mode();
|
||||||
|
|
||||||
|
// SYCL doesn't support registering host memory, keep here for reference
|
||||||
|
// GGML_API GGML_CALL bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size);
|
||||||
|
// GGML_API GGML_CALL void ggml_backend_sycl_unregister_host_buffer(void * buffer);
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
29
bindings/ruby/ext/ggml-vulkan.h
Normal file
29
bindings/ruby/ext/ggml-vulkan.h
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define GGML_VK_NAME "Vulkan"
|
||||||
|
#define GGML_VK_MAX_DEVICES 16
|
||||||
|
|
||||||
|
GGML_API void ggml_vk_instance_init(void);
|
||||||
|
|
||||||
|
// backend API
|
||||||
|
GGML_API GGML_CALL ggml_backend_t ggml_backend_vk_init(size_t dev_num);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL bool ggml_backend_is_vk(ggml_backend_t backend);
|
||||||
|
GGML_API GGML_CALL int ggml_backend_vk_get_device_count(void);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size);
|
||||||
|
GGML_API GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total);
|
||||||
|
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num);
|
||||||
|
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
||||||
|
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
@ -87,7 +87,7 @@ static VALUE ruby_whisper_initialize(int argc, VALUE *argv, VALUE self) {
|
|||||||
if (!rb_respond_to(whisper_model_file_path, rb_intern("to_s"))) {
|
if (!rb_respond_to(whisper_model_file_path, rb_intern("to_s"))) {
|
||||||
rb_raise(rb_eRuntimeError, "Expected file path to model to initialize Whisper::Context");
|
rb_raise(rb_eRuntimeError, "Expected file path to model to initialize Whisper::Context");
|
||||||
}
|
}
|
||||||
rw->context = whisper_init_from_file(StringValueCStr(whisper_model_file_path));
|
rw->context = whisper_init_from_file_with_params(StringValueCStr(whisper_model_file_path), whisper_context_default_params());
|
||||||
if (rw->context == nullptr) {
|
if (rw->context == nullptr) {
|
||||||
rb_raise(rb_eRuntimeError, "error: failed to initialize whisper context");
|
rb_raise(rb_eRuntimeError, "error: failed to initialize whisper context");
|
||||||
}
|
}
|
||||||
@ -311,12 +311,6 @@ static VALUE ruby_whisper_params_get_split_on_word(VALUE self) {
|
|||||||
static VALUE ruby_whisper_params_set_split_on_word(VALUE self, VALUE value) {
|
static VALUE ruby_whisper_params_set_split_on_word(VALUE self, VALUE value) {
|
||||||
BOOL_PARAMS_SETTER(self, split_on_word, value)
|
BOOL_PARAMS_SETTER(self, split_on_word, value)
|
||||||
}
|
}
|
||||||
static VALUE ruby_whisper_params_get_speed_up(VALUE self) {
|
|
||||||
BOOL_PARAMS_GETTER(self, speed_up)
|
|
||||||
}
|
|
||||||
static VALUE ruby_whisper_params_set_speed_up(VALUE self, VALUE value) {
|
|
||||||
BOOL_PARAMS_SETTER(self, speed_up, value)
|
|
||||||
}
|
|
||||||
static VALUE ruby_whisper_params_get_diarize(VALUE self) {
|
static VALUE ruby_whisper_params_get_diarize(VALUE self) {
|
||||||
ruby_whisper_params *rwp;
|
ruby_whisper_params *rwp;
|
||||||
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
@ -408,8 +402,6 @@ void Init_whisper() {
|
|||||||
rb_define_method(cParams, "token_timestamps=", ruby_whisper_params_set_token_timestamps, 1);
|
rb_define_method(cParams, "token_timestamps=", ruby_whisper_params_set_token_timestamps, 1);
|
||||||
rb_define_method(cParams, "split_on_word", ruby_whisper_params_get_split_on_word, 0);
|
rb_define_method(cParams, "split_on_word", ruby_whisper_params_get_split_on_word, 0);
|
||||||
rb_define_method(cParams, "split_on_word=", ruby_whisper_params_set_split_on_word, 1);
|
rb_define_method(cParams, "split_on_word=", ruby_whisper_params_set_split_on_word, 1);
|
||||||
rb_define_method(cParams, "speed_up", ruby_whisper_params_get_speed_up, 0);
|
|
||||||
rb_define_method(cParams, "speed_up=", ruby_whisper_params_set_speed_up, 1);
|
|
||||||
rb_define_method(cParams, "diarize", ruby_whisper_params_get_diarize, 0);
|
rb_define_method(cParams, "diarize", ruby_whisper_params_get_diarize, 0);
|
||||||
rb_define_method(cParams, "diarize=", ruby_whisper_params_set_diarize, 1);
|
rb_define_method(cParams, "diarize=", ruby_whisper_params_set_diarize, 1);
|
||||||
|
|
||||||
|
@ -117,13 +117,6 @@ class TestWhisper < Test::Unit::TestCase
|
|||||||
assert !@params.split_on_word
|
assert !@params.split_on_word
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_speed_up
|
|
||||||
@params.speed_up = true
|
|
||||||
assert @params.speed_up
|
|
||||||
@params.speed_up = false
|
|
||||||
assert !@params.speed_up
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_whisper
|
def test_whisper
|
||||||
@whisper = Whisper::Context.new(File.join(TOPDIR, '..', '..', 'models', 'ggml-base.en.bin'))
|
@whisper = Whisper::Context.new(File.join(TOPDIR, '..', '..', 'models', 'ggml-base.en.bin'))
|
||||||
params = Whisper::Params.new
|
params = Whisper::Params.new
|
||||||
|
28
bindings/ruby/whispercpp.gemspec
Normal file
28
bindings/ruby/whispercpp.gemspec
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
Gem::Specification.new do |s|
|
||||||
|
s.name = "whispercpp"
|
||||||
|
s.authors = ["Georgi Gerganov", "Todd A. Fisher"]
|
||||||
|
s.version = '1.3.0'
|
||||||
|
s.date = '2024-05-14'
|
||||||
|
s.description = %q{High-performance inference of OpenAI's Whisper automatic speech recognition (ASR) model via Ruby}
|
||||||
|
s.email = 'todd.fisher@gmail.com'
|
||||||
|
s.extra_rdoc_files = ['LICENSE', 'README.md']
|
||||||
|
|
||||||
|
s.files = ["LICENSE", "README.md", "Rakefile", "ext/extconf.rb", "ext/ggml.c", "ext/ruby_whisper.cpp", "ext/whisper.cpp", "ext/dr_wav.h", "ext/ggml.h", "ext/ruby_whisper.h", "ext/whisper.h"]
|
||||||
|
|
||||||
|
#### Load-time details
|
||||||
|
s.require_paths = ['lib','ext']
|
||||||
|
s.summary = %q{Ruby whisper.cpp bindings}
|
||||||
|
s.test_files = ["tests/test_whisper.rb"]
|
||||||
|
|
||||||
|
s.extensions << 'ext/extconf.rb'
|
||||||
|
|
||||||
|
|
||||||
|
#### Documentation and testing.
|
||||||
|
s.homepage = 'https://github.com/ggerganov/whisper.cpp'
|
||||||
|
s.rdoc_options = ['--main', '../../README.md']
|
||||||
|
|
||||||
|
|
||||||
|
s.platform = Gem::Platform::RUBY
|
||||||
|
|
||||||
|
s.licenses = ['MIT']
|
||||||
|
end
|
163
cmake/FindFFmpeg.cmake
Normal file
163
cmake/FindFFmpeg.cmake
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
# From
|
||||||
|
# https://github.com/snikulov/cmake-modules/blob/master/FindFFmpeg.cmake
|
||||||
|
#
|
||||||
|
# vim: ts=2 sw=2
|
||||||
|
# - Try to find the required ffmpeg components(default: AVFORMAT, AVUTIL, AVCODEC)
|
||||||
|
#
|
||||||
|
# Once done this will define
|
||||||
|
# FFMPEG_FOUND - System has the all required components.
|
||||||
|
# FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers.
|
||||||
|
# FFMPEG_LIBRARIES - Link these to use the required ffmpeg components.
|
||||||
|
# FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components.
|
||||||
|
#
|
||||||
|
# For each of the components it will additionally set.
|
||||||
|
# - AVCODEC
|
||||||
|
# - AVDEVICE
|
||||||
|
# - AVFORMAT
|
||||||
|
# - AVFILTER
|
||||||
|
# - AVUTIL
|
||||||
|
# - POSTPROC
|
||||||
|
# - SWSCALE
|
||||||
|
# the following variables will be defined
|
||||||
|
# <component>_FOUND - System has <component>
|
||||||
|
# <component>_INCLUDE_DIRS - Include directory necessary for using the <component> headers
|
||||||
|
# <component>_LIBRARIES - Link these to use <component>
|
||||||
|
# <component>_DEFINITIONS - Compiler switches required for using <component>
|
||||||
|
# <component>_VERSION - The components version
|
||||||
|
#
|
||||||
|
# Copyright (c) 2006, Matthias Kretz, <kretz@kde.org>
|
||||||
|
# Copyright (c) 2008, Alexander Neundorf, <neundorf@kde.org>
|
||||||
|
# Copyright (c) 2011, Michael Jansen, <kde@michael-jansen.biz>
|
||||||
|
#
|
||||||
|
# Redistribution and use is allowed according to the terms of the BSD license.
|
||||||
|
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
|
||||||
|
# The default components were taken from a survey over other FindFFMPEG.cmake files
|
||||||
|
if (NOT FFmpeg_FIND_COMPONENTS)
|
||||||
|
set(FFmpeg_FIND_COMPONENTS AVFORMAT AVCODEC AVUTIL SWRESAMPLE)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
#
|
||||||
|
### Macro: set_component_found
|
||||||
|
#
|
||||||
|
# Marks the given component as found if both *_LIBRARIES AND *_INCLUDE_DIRS is present.
|
||||||
|
#
|
||||||
|
macro(set_component_found _component )
|
||||||
|
if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS)
|
||||||
|
message(DEBUG " - ${_component} found.")
|
||||||
|
set(${_component}_FOUND TRUE)
|
||||||
|
else ()
|
||||||
|
message(DEBUG " - ${_component} not found.")
|
||||||
|
endif ()
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
#
|
||||||
|
### Macro: find_component
|
||||||
|
#
|
||||||
|
# Checks for the given component by invoking pkgconfig and then looking up the libraries and
|
||||||
|
# include directories.
|
||||||
|
#
|
||||||
|
macro(find_component _component _pkgconfig _library _header)
|
||||||
|
|
||||||
|
if (NOT WIN32)
|
||||||
|
# use pkg-config to get the directories and then use these values
|
||||||
|
# in the FIND_PATH() and FIND_LIBRARY() calls
|
||||||
|
find_package(PkgConfig)
|
||||||
|
if (PKG_CONFIG_FOUND)
|
||||||
|
pkg_check_modules(PC_${_component} ${_pkgconfig})
|
||||||
|
message(STATUS "Pkgconfig found: ${PC_${_component}_INCLUDEDIR}")
|
||||||
|
message(STATUS "Pkgconfig found: ${PC_${_component}_INCLUDE_DIRS}")
|
||||||
|
message(STATUS "${PC_${_component}_CFLAGS}")
|
||||||
|
endif ()
|
||||||
|
endif (NOT WIN32)
|
||||||
|
|
||||||
|
|
||||||
|
find_path(${_component}_INCLUDE_DIRS ${_header}
|
||||||
|
HINTS
|
||||||
|
${PC_${_component}_INCLUDEDIR}
|
||||||
|
${PC_${_component}_INCLUDE_DIRS}
|
||||||
|
PATH_SUFFIXES
|
||||||
|
ffmpeg
|
||||||
|
)
|
||||||
|
|
||||||
|
# CMake's default is to search first for shared libraries and then for static libraries.
|
||||||
|
# Todo later: add option to prefer static libs over dynamic:
|
||||||
|
find_library(${_component}_LIBRARIES NAMES ${_library} lib${_library}.a
|
||||||
|
HINTS
|
||||||
|
${PC_${_component}_LIBDIR}
|
||||||
|
${PC_${_component}_LIBRARY_DIRS}
|
||||||
|
)
|
||||||
|
|
||||||
|
set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.")
|
||||||
|
set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.")
|
||||||
|
|
||||||
|
set_component_found(${_component})
|
||||||
|
|
||||||
|
mark_as_advanced(
|
||||||
|
${_component}_INCLUDE_DIRS
|
||||||
|
${_component}_LIBRARIES
|
||||||
|
${_component}_DEFINITIONS
|
||||||
|
${_component}_VERSION)
|
||||||
|
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
|
||||||
|
# Check for cached results. If there are skip the costly part.
|
||||||
|
if (NOT FFMPEG_LIBRARIES)
|
||||||
|
|
||||||
|
# Check for all possible component.
|
||||||
|
find_component(AVCODEC libavcodec avcodec libavcodec/avcodec.h)
|
||||||
|
find_component(AVFORMAT libavformat avformat libavformat/avformat.h)
|
||||||
|
find_component(AVDEVICE libavdevice avdevice libavdevice/avdevice.h)
|
||||||
|
#find_component(AVRESAMPLE libavresample avresample libavresample/avresample.h) # old name for swresample
|
||||||
|
find_component(AVUTIL libavutil avutil libavutil/avutil.h)
|
||||||
|
find_component(AVFILTER libavfilter avfilter libavfilter/avfilter.h)
|
||||||
|
find_component(SWSCALE libswscale swscale libswscale/swscale.h)
|
||||||
|
find_component(POSTPROC libpostproc postproc libpostproc/postprocess.h)
|
||||||
|
find_component(SWRESAMPLE libswresample swresample libswresample/swresample.h)
|
||||||
|
|
||||||
|
# Check if the required components were found and add their stuff to the FFMPEG_* vars.
|
||||||
|
foreach (_component ${FFmpeg_FIND_COMPONENTS})
|
||||||
|
if (${_component}_FOUND)
|
||||||
|
# message(STATUS "Required component ${_component} present.")
|
||||||
|
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} ${${_component}_LIBRARIES})
|
||||||
|
set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} ${${_component}_DEFINITIONS})
|
||||||
|
list(APPEND FFMPEG_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS})
|
||||||
|
else ()
|
||||||
|
# message(STATUS "Required component ${_component} missing.")
|
||||||
|
endif ()
|
||||||
|
endforeach ()
|
||||||
|
|
||||||
|
# Build the include path with duplicates removed.
|
||||||
|
if (FFMPEG_INCLUDE_DIRS)
|
||||||
|
list(REMOVE_DUPLICATES FFMPEG_INCLUDE_DIRS)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# cache the vars.
|
||||||
|
set(FFMPEG_INCLUDE_DIRS ${FFMPEG_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE)
|
||||||
|
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE)
|
||||||
|
set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE)
|
||||||
|
|
||||||
|
mark_as_advanced(FFMPEG_INCLUDE_DIRS
|
||||||
|
FFMPEG_LIBRARIES
|
||||||
|
FFMPEG_DEFINITIONS)
|
||||||
|
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Now set the noncached _FOUND vars for the components.
|
||||||
|
# whisper.cpp does not need SWSCALE
|
||||||
|
foreach (_component AVCODEC AVDEVICE AVFORMAT AVRESAMPLE AVUTIL POSTPROCESS)
|
||||||
|
set_component_found(${_component})
|
||||||
|
endforeach ()
|
||||||
|
|
||||||
|
# Compile the list of required vars
|
||||||
|
set(_FFmpeg_REQUIRED_VARS FFMPEG_LIBRARIES FFMPEG_INCLUDE_DIRS)
|
||||||
|
foreach (_component ${FFmpeg_FIND_COMPONENTS})
|
||||||
|
list(APPEND _FFmpeg_REQUIRED_VARS ${_component}_LIBRARIES ${_component}_INCLUDE_DIRS)
|
||||||
|
endforeach ()
|
||||||
|
|
||||||
|
# Give a nice error message if some of the required vars are missing.
|
||||||
|
find_package_handle_standard_args(FFmpeg DEFAULT_MSG ${_FFmpeg_REQUIRED_VARS})
|
||||||
|
|
@ -123,7 +123,7 @@ API_AVAILABLE(macos(12.0), ios(15.0), watchos(8.0), tvos(15.0)) __attribute__((v
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
Make a prediction using the convenience interface
|
Make a prediction using the convenience interface
|
||||||
@param logmel_data as 1 × 80 × 3000 3-dimensional array of floats:
|
@param logmel_data as 1 × n_mel × 3000 3-dimensional array of floats:
|
||||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||||
@return the prediction as whisper_encoder_implOutput
|
@return the prediction as whisper_encoder_implOutput
|
||||||
*/
|
*/
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
// Code is derived from the work of Github user @wangchou
|
// Code is derived from the work of Github user @wangchou
|
||||||
// ref: https://github.com/wangchou/callCoreMLFromCpp
|
// ref: https://github.com/wangchou/callCoreMLFromCpp
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
#if __cplusplus
|
#if __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
@ -14,6 +16,8 @@ void whisper_coreml_free(struct whisper_coreml_context * ctx);
|
|||||||
|
|
||||||
void whisper_coreml_encode(
|
void whisper_coreml_encode(
|
||||||
const whisper_coreml_context * ctx,
|
const whisper_coreml_context * ctx,
|
||||||
|
int64_t n_ctx,
|
||||||
|
int64_t n_mel,
|
||||||
float * mel,
|
float * mel,
|
||||||
float * out);
|
float * out);
|
||||||
|
|
||||||
|
@ -22,7 +22,13 @@ struct whisper_coreml_context * whisper_coreml_init(const char * path_model) {
|
|||||||
|
|
||||||
NSURL * url_model = [NSURL fileURLWithPath: path_model_str];
|
NSURL * url_model = [NSURL fileURLWithPath: path_model_str];
|
||||||
|
|
||||||
const void * data = CFBridgingRetain([[whisper_encoder_impl alloc] initWithContentsOfURL:url_model error:nil]);
|
// select which device to run the Core ML model on
|
||||||
|
MLModelConfiguration *config = [[MLModelConfiguration alloc] init];
|
||||||
|
// config.computeUnits = MLComputeUnitsCPUAndGPU;
|
||||||
|
//config.computeUnits = MLComputeUnitsCPUAndNeuralEngine;
|
||||||
|
config.computeUnits = MLComputeUnitsAll;
|
||||||
|
|
||||||
|
const void * data = CFBridgingRetain([[whisper_encoder_impl alloc] initWithContentsOfURL:url_model configuration:config error:nil]);
|
||||||
|
|
||||||
if (data == NULL) {
|
if (data == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -42,13 +48,15 @@ void whisper_coreml_free(struct whisper_coreml_context * ctx) {
|
|||||||
|
|
||||||
void whisper_coreml_encode(
|
void whisper_coreml_encode(
|
||||||
const whisper_coreml_context * ctx,
|
const whisper_coreml_context * ctx,
|
||||||
|
int64_t n_ctx,
|
||||||
|
int64_t n_mel,
|
||||||
float * mel,
|
float * mel,
|
||||||
float * out) {
|
float * out) {
|
||||||
MLMultiArray * inMultiArray = [
|
MLMultiArray * inMultiArray = [
|
||||||
[MLMultiArray alloc] initWithDataPointer: mel
|
[MLMultiArray alloc] initWithDataPointer: mel
|
||||||
shape: @[@1, @80, @3000]
|
shape: @[@1, @(n_mel), @(n_ctx)]
|
||||||
dataType: MLMultiArrayDataTypeFloat32
|
dataType: MLMultiArrayDataTypeFloat32
|
||||||
strides: @[@(240000), @(3000), @1]
|
strides: @[@(n_ctx*n_mel), @(n_ctx), @1]
|
||||||
deallocator: nil
|
deallocator: nil
|
||||||
error: nil
|
error: nil
|
||||||
];
|
];
|
||||||
|
@ -14,15 +14,26 @@ if (WHISPER_SDL2)
|
|||||||
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (WHISPER_CLBLAST)
|
||||||
|
find_package(CLBlast REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
# common
|
# common
|
||||||
|
|
||||||
set(TARGET common)
|
set(TARGET common)
|
||||||
|
|
||||||
|
if (WHISPER_FFMPEG)
|
||||||
|
set(COMMON_SOURCES_FFMPEG ffmpeg-transcode.cpp)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_library(${TARGET} STATIC
|
add_library(${TARGET} STATIC
|
||||||
common.h
|
common.h
|
||||||
common.cpp
|
common.cpp
|
||||||
common-ggml.h
|
common-ggml.h
|
||||||
common-ggml.cpp
|
common-ggml.cpp
|
||||||
|
grammar-parser.h
|
||||||
|
grammar-parser.cpp
|
||||||
|
${COMMON_SOURCES_FFMPEG}
|
||||||
)
|
)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
@ -30,6 +41,7 @@ include(DefaultTargetOptions)
|
|||||||
target_link_libraries(${TARGET} PRIVATE whisper)
|
target_link_libraries(${TARGET} PRIVATE whisper)
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
|
|
||||||
if (WHISPER_SDL2)
|
if (WHISPER_SDL2)
|
||||||
# common-sdl
|
# common-sdl
|
||||||
@ -47,27 +59,63 @@ if (WHISPER_SDL2)
|
|||||||
target_link_libraries(${TARGET} PRIVATE ${SDL2_LIBRARIES})
|
target_link_libraries(${TARGET} PRIVATE ${SDL2_LIBRARIES})
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# add json lib
|
||||||
|
add_library(json_cpp INTERFACE)
|
||||||
|
target_include_directories(json_cpp INTERFACE ${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
|
||||||
# examples
|
# examples
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
|
||||||
if (EMSCRIPTEN)
|
if (EMSCRIPTEN)
|
||||||
add_subdirectory(whisper.wasm)
|
add_subdirectory(whisper.wasm)
|
||||||
|
set_target_properties(libmain PROPERTIES FOLDER "libs")
|
||||||
add_subdirectory(stream.wasm)
|
add_subdirectory(stream.wasm)
|
||||||
|
set_target_properties(libstream PROPERTIES FOLDER "libs")
|
||||||
add_subdirectory(command.wasm)
|
add_subdirectory(command.wasm)
|
||||||
|
set_target_properties(libcommand PROPERTIES FOLDER "libs")
|
||||||
add_subdirectory(talk.wasm)
|
add_subdirectory(talk.wasm)
|
||||||
|
set_target_properties(libtalk PROPERTIES FOLDER "libs")
|
||||||
add_subdirectory(bench.wasm)
|
add_subdirectory(bench.wasm)
|
||||||
|
set_target_properties(libbench PROPERTIES FOLDER "libs")
|
||||||
elseif(CMAKE_JS_VERSION)
|
elseif(CMAKE_JS_VERSION)
|
||||||
add_subdirectory(addon.node)
|
add_subdirectory(addon.node)
|
||||||
|
set_target_properties(addon.node PROPERTIES FOLDER "examples")
|
||||||
else()
|
else()
|
||||||
add_subdirectory(main)
|
add_subdirectory(main)
|
||||||
|
set_target_properties(main PROPERTIES FOLDER "examples")
|
||||||
|
if (WHISPER_SDL2)
|
||||||
add_subdirectory(stream)
|
add_subdirectory(stream)
|
||||||
|
set_target_properties(stream PROPERTIES FOLDER "examples")
|
||||||
|
endif (WHISPER_SDL2)
|
||||||
|
add_subdirectory(server)
|
||||||
|
set_target_properties(server PROPERTIES FOLDER "examples")
|
||||||
|
if (WHISPER_SDL2)
|
||||||
add_subdirectory(command)
|
add_subdirectory(command)
|
||||||
|
set_target_properties(command PROPERTIES FOLDER "examples")
|
||||||
|
endif (WHISPER_SDL2)
|
||||||
add_subdirectory(bench)
|
add_subdirectory(bench)
|
||||||
|
set_target_properties(bench PROPERTIES FOLDER "examples")
|
||||||
add_subdirectory(quantize)
|
add_subdirectory(quantize)
|
||||||
|
set_target_properties(quantize PROPERTIES FOLDER "examples")
|
||||||
|
if (WHISPER_SDL2)
|
||||||
add_subdirectory(talk)
|
add_subdirectory(talk)
|
||||||
|
set_target_properties(talk PROPERTIES FOLDER "examples")
|
||||||
add_subdirectory(talk-llama)
|
add_subdirectory(talk-llama)
|
||||||
|
set_target_properties(talk-llama PROPERTIES FOLDER "examples")
|
||||||
add_subdirectory(lsp)
|
add_subdirectory(lsp)
|
||||||
|
set_target_properties(lsp PROPERTIES FOLDER "examples")
|
||||||
|
if (LLAMA_SYCL)
|
||||||
|
add_subdirectory(sycl)
|
||||||
|
set_target_properties(sycl PROPERTIES FOLDER "examples")
|
||||||
|
endif()
|
||||||
|
endif (WHISPER_SDL2)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (WHISPER_SDL2)
|
||||||
|
add_subdirectory(wchess)
|
||||||
|
set_target_properties(wchess PROPERTIES FOLDER "examples")
|
||||||
|
endif (WHISPER_SDL2)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set(TARGET whisper-addon)
|
set(TARGET addon.node)
|
||||||
|
|
||||||
# Base settings
|
# Base settings
|
||||||
#==================================================================
|
#==================================================================
|
||||||
|
@ -14,14 +14,14 @@ npm install
|
|||||||
Make sure it is in the project root directory and compiled with make-js.
|
Make sure it is in the project root directory and compiled with make-js.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
npx cmake-js compile -T whisper-addon -B Release
|
npx cmake-js compile -T addon.node -B Release
|
||||||
```
|
```
|
||||||
|
|
||||||
For Electron addon and cmake-js options, you can see [cmake-js](https://github.com/cmake-js/cmake-js) and make very few configuration changes.
|
For Electron addon and cmake-js options, you can see [cmake-js](https://github.com/cmake-js/cmake-js) and make very few configuration changes.
|
||||||
|
|
||||||
> Such as appointing special cmake path:
|
> Such as appointing special cmake path:
|
||||||
> ```shell
|
> ```shell
|
||||||
> npx cmake-js compile -c 'xxx/cmake' -T whisper-addon -B Release
|
> npx cmake-js compile -c 'xxx/cmake' -T addon.node -B Release
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
## Run
|
## Run
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
const path = require("path");
|
const path = require("path");
|
||||||
const { whisper } = require(path.join(
|
const { whisper } = require(path.join(
|
||||||
__dirname,
|
__dirname,
|
||||||
"../../../build/Release/whisper-addon"
|
"../../../build/Release/addon.node"
|
||||||
));
|
));
|
||||||
const { promisify } = require("util");
|
const { promisify } = require("util");
|
||||||
|
|
||||||
@ -11,6 +11,13 @@ const whisperParamsMock = {
|
|||||||
language: "en",
|
language: "en",
|
||||||
model: path.join(__dirname, "../../../models/ggml-base.en.bin"),
|
model: path.join(__dirname, "../../../models/ggml-base.en.bin"),
|
||||||
fname_inp: path.join(__dirname, "../../../samples/jfk.wav"),
|
fname_inp: path.join(__dirname, "../../../samples/jfk.wav"),
|
||||||
|
use_gpu: true,
|
||||||
|
flash_attn: false,
|
||||||
|
no_prints: true,
|
||||||
|
comma_in_time: false,
|
||||||
|
translate: true,
|
||||||
|
no_timestamps: false,
|
||||||
|
audio_ctx: 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
describe("Run whisper.node", () => {
|
describe("Run whisper.node", () => {
|
||||||
|
@ -19,12 +19,12 @@ struct whisper_params {
|
|||||||
int32_t max_len = 0;
|
int32_t max_len = 0;
|
||||||
int32_t best_of = 5;
|
int32_t best_of = 5;
|
||||||
int32_t beam_size = -1;
|
int32_t beam_size = -1;
|
||||||
|
int32_t audio_ctx = 0;
|
||||||
|
|
||||||
float word_thold = 0.01f;
|
float word_thold = 0.01f;
|
||||||
float entropy_thold = 2.4f;
|
float entropy_thold = 2.4f;
|
||||||
float logprob_thold = -1.0f;
|
float logprob_thold = -1.0f;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool diarize = false;
|
bool diarize = false;
|
||||||
bool output_txt = false;
|
bool output_txt = false;
|
||||||
@ -36,6 +36,10 @@ struct whisper_params {
|
|||||||
bool print_colors = false;
|
bool print_colors = false;
|
||||||
bool print_progress = false;
|
bool print_progress = false;
|
||||||
bool no_timestamps = false;
|
bool no_timestamps = false;
|
||||||
|
bool no_prints = false;
|
||||||
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
bool comma_in_time = true;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt;
|
std::string prompt;
|
||||||
@ -43,6 +47,8 @@ struct whisper_params {
|
|||||||
|
|
||||||
std::vector<std::string> fname_inp = {};
|
std::vector<std::string> fname_inp = {};
|
||||||
std::vector<std::string> fname_out = {};
|
std::vector<std::string> fname_out = {};
|
||||||
|
|
||||||
|
std::vector<float> pcmf32 = {}; // mono-channel F32 PCM
|
||||||
};
|
};
|
||||||
|
|
||||||
struct whisper_print_user_data {
|
struct whisper_print_user_data {
|
||||||
@ -51,27 +57,6 @@ struct whisper_print_user_data {
|
|||||||
const std::vector<std::vector<float>> * pcmf32s;
|
const std::vector<std::vector<float>> * pcmf32s;
|
||||||
};
|
};
|
||||||
|
|
||||||
// 500 -> 00:05.000
|
|
||||||
// 6000 -> 01:00.000
|
|
||||||
std::string to_timestamp(int64_t t, bool comma = false) {
|
|
||||||
int64_t msec = t * 10;
|
|
||||||
int64_t hr = msec / (1000 * 60 * 60);
|
|
||||||
msec = msec - hr * (1000 * 60 * 60);
|
|
||||||
int64_t min = msec / (1000 * 60);
|
|
||||||
msec = msec - min * (1000 * 60);
|
|
||||||
int64_t sec = msec / 1000;
|
|
||||||
msec = msec - sec * 1000;
|
|
||||||
|
|
||||||
char buf[32];
|
|
||||||
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
|
||||||
|
|
||||||
return std::string(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
int timestamp_to_sample(int64_t t, int n_samples) {
|
|
||||||
return std::max(0, std::min((int) n_samples - 1, (int) ((t*WHISPER_SAMPLE_RATE)/100)));
|
|
||||||
}
|
|
||||||
|
|
||||||
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * state, int n_new, void * user_data) {
|
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * state, int n_new, void * user_data) {
|
||||||
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||||
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||||
@ -103,8 +88,8 @@ void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper
|
|||||||
if (params.diarize && pcmf32s.size() == 2) {
|
if (params.diarize && pcmf32s.size() == 2) {
|
||||||
const int64_t n_samples = pcmf32s[0].size();
|
const int64_t n_samples = pcmf32s[0].size();
|
||||||
|
|
||||||
const int64_t is0 = timestamp_to_sample(t0, n_samples);
|
const int64_t is0 = timestamp_to_sample(t0, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
const int64_t is1 = timestamp_to_sample(t1, n_samples);
|
const int64_t is1 = timestamp_to_sample(t1, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
|
|
||||||
double energy0 = 0.0f;
|
double energy0 = 0.0f;
|
||||||
double energy1 = 0.0f;
|
double energy1 = 0.0f;
|
||||||
@ -140,9 +125,15 @@ void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void cb_log_disable(enum ggml_log_level, const char *, void *) {}
|
||||||
|
|
||||||
int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
||||||
if (params.fname_inp.empty()) {
|
if (params.no_prints) {
|
||||||
fprintf(stderr, "error: no input files specified\n");
|
whisper_log_set(cb_log_disable, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.fname_inp.empty() && params.pcmf32.empty()) {
|
||||||
|
fprintf(stderr, "error: no input files or audio buffer specified\n");
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,13 +144,24 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
|
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file(params.model.c_str());
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
if (ctx == nullptr) {
|
if (ctx == nullptr) {
|
||||||
fprintf(stderr, "error: failed to initialize whisper context\n");
|
fprintf(stderr, "error: failed to initialize whisper context\n");
|
||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if params.pcmf32 is provided, set params.fname_inp to "buffer"
|
||||||
|
// this is simpler than further modifications in the code
|
||||||
|
if (!params.pcmf32.empty()) {
|
||||||
|
fprintf(stderr, "info: using audio buffer as input\n");
|
||||||
|
params.fname_inp.clear();
|
||||||
|
params.fname_inp.emplace_back("buffer");
|
||||||
|
}
|
||||||
|
|
||||||
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
||||||
const auto fname_inp = params.fname_inp[f];
|
const auto fname_inp = params.fname_inp[f];
|
||||||
const auto fname_out = f < (int)params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
const auto fname_out = f < (int)params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
||||||
@ -167,20 +169,25 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
std::vector<float> pcmf32; // mono-channel F32 PCM
|
std::vector<float> pcmf32; // mono-channel F32 PCM
|
||||||
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
||||||
|
|
||||||
if (!::read_wav(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
// read the input audio file if params.pcmf32 is not provided
|
||||||
fprintf(stderr, "error: failed to read WAV file '%s'\n", fname_inp.c_str());
|
if (params.pcmf32.empty()) {
|
||||||
continue;
|
if (!::read_wav(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
||||||
|
fprintf(stderr, "error: failed to read WAV file '%s'\n", fname_inp.c_str());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pcmf32 = params.pcmf32;
|
||||||
}
|
}
|
||||||
|
|
||||||
// print system information
|
// print system information
|
||||||
{
|
if (!params.no_prints) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||||
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
|
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
|
||||||
}
|
}
|
||||||
|
|
||||||
// print some info about the processing
|
// print some info about the processing
|
||||||
{
|
if (!params.no_prints) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
if (!whisper_is_multilingual(ctx)) {
|
if (!whisper_is_multilingual(ctx)) {
|
||||||
if (params.language != "en" || params.translate) {
|
if (params.language != "en" || params.translate) {
|
||||||
@ -189,12 +196,13 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, lang = %s, task = %s, timestamps = %d ...\n",
|
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, lang = %s, task = %s, timestamps = %d, audio_ctx = %d ...\n",
|
||||||
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
||||||
params.n_threads, params.n_processors,
|
params.n_threads, params.n_processors,
|
||||||
params.language.c_str(),
|
params.language.c_str(),
|
||||||
params.translate ? "translate" : "transcribe",
|
params.translate ? "translate" : "transcribe",
|
||||||
params.no_timestamps ? 0 : 1);
|
params.no_timestamps ? 0 : 1,
|
||||||
|
params.audio_ctx);
|
||||||
|
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
@ -221,14 +229,15 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
wparams.entropy_thold = params.entropy_thold;
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
wparams.logprob_thold = params.logprob_thold;
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
||||||
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
|
||||||
wparams.greedy.best_of = params.best_of;
|
wparams.greedy.best_of = params.best_of;
|
||||||
wparams.beam_search.beam_size = params.beam_size;
|
wparams.beam_search.beam_size = params.beam_size;
|
||||||
|
|
||||||
wparams.initial_prompt = params.prompt.c_str();
|
wparams.initial_prompt = params.prompt.c_str();
|
||||||
|
|
||||||
|
wparams.no_timestamps = params.no_timestamps;
|
||||||
|
|
||||||
whisper_print_user_data user_data = { ¶ms, &pcmf32s };
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s };
|
||||||
|
|
||||||
// this callback is called on each new segment
|
// this callback is called on each new segment
|
||||||
@ -264,8 +273,8 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
|
|
||||||
result[i].emplace_back(to_timestamp(t0, true));
|
result[i].emplace_back(to_timestamp(t0, params.comma_in_time));
|
||||||
result[i].emplace_back(to_timestamp(t1, true));
|
result[i].emplace_back(to_timestamp(t1, params.comma_in_time));
|
||||||
result[i].emplace_back(text);
|
result[i].emplace_back(text);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,10 +324,34 @@ Napi::Value whisper(const Napi::CallbackInfo& info) {
|
|||||||
std::string language = whisper_params.Get("language").As<Napi::String>();
|
std::string language = whisper_params.Get("language").As<Napi::String>();
|
||||||
std::string model = whisper_params.Get("model").As<Napi::String>();
|
std::string model = whisper_params.Get("model").As<Napi::String>();
|
||||||
std::string input = whisper_params.Get("fname_inp").As<Napi::String>();
|
std::string input = whisper_params.Get("fname_inp").As<Napi::String>();
|
||||||
|
bool use_gpu = whisper_params.Get("use_gpu").As<Napi::Boolean>();
|
||||||
|
bool flash_attn = whisper_params.Get("flash_attn").As<Napi::Boolean>();
|
||||||
|
bool no_prints = whisper_params.Get("no_prints").As<Napi::Boolean>();
|
||||||
|
bool no_timestamps = whisper_params.Get("no_timestamps").As<Napi::Boolean>();
|
||||||
|
int32_t audio_ctx = whisper_params.Get("audio_ctx").As<Napi::Number>();
|
||||||
|
bool comma_in_time = whisper_params.Get("comma_in_time").As<Napi::Boolean>();
|
||||||
|
|
||||||
|
Napi::Value pcmf32Value = whisper_params.Get("pcmf32");
|
||||||
|
std::vector<float> pcmf32_vec;
|
||||||
|
if (pcmf32Value.IsTypedArray()) {
|
||||||
|
Napi::Float32Array pcmf32 = pcmf32Value.As<Napi::Float32Array>();
|
||||||
|
size_t length = pcmf32.ElementLength();
|
||||||
|
pcmf32_vec.reserve(length);
|
||||||
|
for (size_t i = 0; i < length; i++) {
|
||||||
|
pcmf32_vec.push_back(pcmf32[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
params.language = language;
|
params.language = language;
|
||||||
params.model = model;
|
params.model = model;
|
||||||
params.fname_inp.emplace_back(input);
|
params.fname_inp.emplace_back(input);
|
||||||
|
params.use_gpu = use_gpu;
|
||||||
|
params.flash_attn = flash_attn;
|
||||||
|
params.no_prints = no_prints;
|
||||||
|
params.no_timestamps = no_timestamps;
|
||||||
|
params.audio_ctx = audio_ctx;
|
||||||
|
params.pcmf32 = pcmf32_vec;
|
||||||
|
params.comma_in_time = comma_in_time;
|
||||||
|
|
||||||
Napi::Function callback = info[1].As<Napi::Function>();
|
Napi::Function callback = info[1].As<Napi::Function>();
|
||||||
Worker* worker = new Worker(callback, params);
|
Worker* worker = new Worker(callback, params);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
const path = require("path");
|
const path = require("path");
|
||||||
const { whisper } = require(path.join(
|
const { whisper } = require(path.join(
|
||||||
__dirname,
|
__dirname,
|
||||||
"../../build/Release/whisper-addon"
|
"../../build/Release/addon.node"
|
||||||
));
|
));
|
||||||
const { promisify } = require("util");
|
const { promisify } = require("util");
|
||||||
|
|
||||||
@ -10,14 +10,27 @@ const whisperAsync = promisify(whisper);
|
|||||||
const whisperParams = {
|
const whisperParams = {
|
||||||
language: "en",
|
language: "en",
|
||||||
model: path.join(__dirname, "../../models/ggml-base.en.bin"),
|
model: path.join(__dirname, "../../models/ggml-base.en.bin"),
|
||||||
fname_inp: "../../samples/jfk.wav",
|
fname_inp: path.join(__dirname, "../../samples/jfk.wav"),
|
||||||
|
use_gpu: true,
|
||||||
|
flash_attn: false,
|
||||||
|
no_prints: true,
|
||||||
|
comma_in_time: false,
|
||||||
|
translate: true,
|
||||||
|
no_timestamps: false,
|
||||||
|
audio_ctx: 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
const arguments = process.argv.slice(2);
|
const arguments = process.argv.slice(2);
|
||||||
const params = Object.fromEntries(
|
const params = Object.fromEntries(
|
||||||
arguments.reduce((pre, item) => {
|
arguments.reduce((pre, item) => {
|
||||||
if (item.startsWith("--")) {
|
if (item.startsWith("--")) {
|
||||||
return [...pre, item.slice(2).split("=")];
|
const [key, value] = item.slice(2).split("=");
|
||||||
|
if (key === "audio_ctx") {
|
||||||
|
whisperParams[key] = parseInt(value);
|
||||||
|
} else {
|
||||||
|
whisperParams[key] = value;
|
||||||
|
}
|
||||||
|
return pre;
|
||||||
}
|
}
|
||||||
return pre;
|
return pre;
|
||||||
}, [])
|
}, [])
|
||||||
@ -32,5 +45,6 @@ for (const key in params) {
|
|||||||
console.log("whisperParams =", whisperParams);
|
console.log("whisperParams =", whisperParams);
|
||||||
|
|
||||||
whisperAsync(whisperParams).then((result) => {
|
whisperAsync(whisperParams).then((result) => {
|
||||||
console.log(`Result from whisper: ${result}`);
|
console.log();
|
||||||
|
console.log(result);
|
||||||
});
|
});
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"name": "whisper-addon",
|
"name": "addon.node",
|
||||||
"version": "0.0.0",
|
"version": "0.0.0",
|
||||||
"description": "",
|
"description": "",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
|
@ -23,7 +23,9 @@ void bench_main(size_t index) {
|
|||||||
|
|
||||||
fprintf(stderr, "%s: running benchmark with %d threads - please wait...\n", __func__, n_threads);
|
fprintf(stderr, "%s: running benchmark with %d threads - please wait...\n", __func__, n_threads);
|
||||||
|
|
||||||
if (int ret = whisper_set_mel(ctx, nullptr, 0, WHISPER_N_MEL)) {
|
const int n_mels = whisper_model_n_mels(ctx);
|
||||||
|
|
||||||
|
if (int ret = whisper_set_mel(ctx, nullptr, 0, n_mels)) {
|
||||||
fprintf(stderr, "error: failed to set mel: %d\n", ret);
|
fprintf(stderr, "error: failed to set mel: %d\n", ret);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -57,7 +59,7 @@ EMSCRIPTEN_BINDINGS(bench) {
|
|||||||
emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
|
emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
|
||||||
for (size_t i = 0; i < g_contexts.size(); ++i) {
|
for (size_t i = 0; i < g_contexts.size(); ++i) {
|
||||||
if (g_contexts[i] == nullptr) {
|
if (g_contexts[i] == nullptr) {
|
||||||
g_contexts[i] = whisper_init_from_file(path_model.c_str());
|
g_contexts[i] = whisper_init_from_file_with_params(path_model.c_str(), whisper_context_default_params());
|
||||||
if (g_contexts[i] != nullptr) {
|
if (g_contexts[i] != nullptr) {
|
||||||
if (g_worker.joinable()) {
|
if (g_worker.joinable()) {
|
||||||
g_worker.join();
|
g_worker.join();
|
||||||
|
@ -1,15 +1,19 @@
|
|||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
#include <cstring>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t what = 0; // what to benchmark: 0 - whisper ecoder, 1 - memcpy, 2 - ggml_mul_mat
|
int32_t what = 0; // what to benchmark: 0 - whisper encoder, 1 - memcpy, 2 - ggml_mul_mat
|
||||||
|
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
|
|
||||||
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
@ -22,9 +26,11 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
|
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else if (arg == "-w" || arg == "--what") { params.what = atoi(argv[++i]); }
|
else if (arg == "-w" || arg == "--what") { params.what = atoi(argv[++i]); }
|
||||||
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -44,16 +50,23 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n", params.n_threads);
|
fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n", params.n_threads);
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -w N, --what N [%-7d] what to benchmark:\n", params.what);
|
fprintf(stderr, " -w N, --what N [%-7d] what to benchmark:\n", params.what);
|
||||||
fprintf(stderr, " %-7s 0 - whisper encoder\n", "");
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] enable flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
|
fprintf(stderr, " %-7s 0 - whisper\n", "");
|
||||||
fprintf(stderr, " %-7s 1 - memcpy\n", "");
|
fprintf(stderr, " %-7s 1 - memcpy\n", "");
|
||||||
fprintf(stderr, " %-7s 2 - ggml_mul_mat\n", "");
|
fprintf(stderr, " %-7s 2 - ggml_mul_mat\n", "");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
int whisper_bench_encoder(const whisper_params & params) {
|
int whisper_bench_full(const whisper_params & params) {
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file(params.model.c_str());
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
{
|
{
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
@ -65,16 +78,65 @@ int whisper_bench_encoder(const whisper_params & params) {
|
|||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (int ret = whisper_set_mel(ctx, nullptr, 0, WHISPER_N_MEL)) {
|
const int n_mels = whisper_model_n_mels(ctx);
|
||||||
|
|
||||||
|
if (int ret = whisper_set_mel(ctx, nullptr, 0, n_mels)) {
|
||||||
fprintf(stderr, "error: failed to set mel: %d\n", ret);
|
fprintf(stderr, "error: failed to set mel: %d\n", ret);
|
||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
|
// heat encoder
|
||||||
if (int ret = whisper_encode(ctx, 0, params.n_threads) != 0) {
|
if (int ret = whisper_encode(ctx, 0, params.n_threads) != 0) {
|
||||||
fprintf(stderr, "error: failed to encode model: %d\n", ret);
|
fprintf(stderr, "error: failed to encode: %d\n", ret);
|
||||||
return 4;
|
return 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
whisper_token tokens[512];
|
||||||
|
memset(tokens, 0, sizeof(tokens));
|
||||||
|
|
||||||
|
// prompt heat
|
||||||
|
if (int ret = whisper_decode(ctx, tokens, 256, 0, params.n_threads) != 0) {
|
||||||
|
fprintf(stderr, "error: failed to decode: %d\n", ret);
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// text-generation heat
|
||||||
|
if (int ret = whisper_decode(ctx, tokens, 1, 256, params.n_threads) != 0) {
|
||||||
|
fprintf(stderr, "error: failed to decode: %d\n", ret);
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
whisper_reset_timings(ctx);
|
||||||
|
|
||||||
|
// actual run
|
||||||
|
if (int ret = whisper_encode(ctx, 0, params.n_threads) != 0) {
|
||||||
|
fprintf(stderr, "error: failed to encode: %d\n", ret);
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// text-generation
|
||||||
|
for (int i = 0; i < 256; i++) {
|
||||||
|
if (int ret = whisper_decode(ctx, tokens, 1, i, params.n_threads) != 0) {
|
||||||
|
fprintf(stderr, "error: failed to decode: %d\n", ret);
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// batched decoding
|
||||||
|
for (int i = 0; i < 64; i++) {
|
||||||
|
if (int ret = whisper_decode(ctx, tokens, 5, 0, params.n_threads) != 0) {
|
||||||
|
fprintf(stderr, "error: failed to decode: %d\n", ret);
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// prompt processing
|
||||||
|
for (int i = 0; i < 16; i++) {
|
||||||
|
if (int ret = whisper_decode(ctx, tokens, 256, 0, params.n_threads) != 0) {
|
||||||
|
fprintf(stderr, "error: failed to decode: %d\n", ret);
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
whisper_print_timings(ctx);
|
whisper_print_timings(ctx);
|
||||||
whisper_free(ctx);
|
whisper_free(ctx);
|
||||||
|
|
||||||
@ -103,7 +165,7 @@ int main(int argc, char ** argv) {
|
|||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
switch (params.what) {
|
switch (params.what) {
|
||||||
case 0: ret = whisper_bench_encoder(params); break;
|
case 0: ret = whisper_bench_full(params); break;
|
||||||
case 1: ret = whisper_bench_memcpy(params.n_threads); break;
|
case 1: ret = whisper_bench_memcpy(params.n_threads); break;
|
||||||
case 2: ret = whisper_bench_ggml_mul_mat(params.n_threads); break;
|
case 2: ret = whisper_bench_ggml_mul_mat(params.n_threads); break;
|
||||||
default: fprintf(stderr, "error: unknown benchmark: %d\n", params.what); break;
|
default: fprintf(stderr, "error: unknown benchmark: %d\n", params.what); break;
|
||||||
|
@ -243,7 +243,7 @@ EMSCRIPTEN_BINDINGS(command) {
|
|||||||
emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
|
emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
|
||||||
for (size_t i = 0; i < g_contexts.size(); ++i) {
|
for (size_t i = 0; i < g_contexts.size(); ++i) {
|
||||||
if (g_contexts[i] == nullptr) {
|
if (g_contexts[i] == nullptr) {
|
||||||
g_contexts[i] = whisper_init_from_file(path_model.c_str());
|
g_contexts[i] = whisper_init_from_file_with_params(path_model.c_str(), whisper_context_default_params());
|
||||||
if (g_contexts[i] != nullptr) {
|
if (g_contexts[i] != nullptr) {
|
||||||
g_running = true;
|
g_running = true;
|
||||||
if (g_worker.joinable()) {
|
if (g_worker.joinable()) {
|
||||||
|
@ -37,9 +37,13 @@ https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9
|
|||||||
The `command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
The `command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install SDL2 on Linux
|
# Install SDL2
|
||||||
|
# On Debian based linux distributions:
|
||||||
sudo apt-get install libsdl2-dev
|
sudo apt-get install libsdl2-dev
|
||||||
|
|
||||||
|
# On Fedora Linux:
|
||||||
|
sudo dnf install SDL2 SDL2-devel
|
||||||
|
|
||||||
# Install SDL2 on Mac OS
|
# Install SDL2 on Mac OS
|
||||||
brew install sdl2
|
brew install sdl2
|
||||||
|
|
||||||
|
@ -6,9 +6,10 @@
|
|||||||
// ref: https://github.com/ggerganov/whisper.cpp/issues/171
|
// ref: https://github.com/ggerganov/whisper.cpp/issues/171
|
||||||
//
|
//
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "common-sdl.h"
|
#include "common-sdl.h"
|
||||||
|
#include "common.h"
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
#include "grammar-parser.h"
|
||||||
|
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
@ -30,20 +31,30 @@ struct whisper_params {
|
|||||||
int32_t max_tokens = 32;
|
int32_t max_tokens = 32;
|
||||||
int32_t audio_ctx = 0;
|
int32_t audio_ctx = 0;
|
||||||
|
|
||||||
float vad_thold = 0.6f;
|
float vad_thold = 0.6f;
|
||||||
float freq_thold = 100.0f;
|
float freq_thold = 100.0f;
|
||||||
|
|
||||||
|
float grammar_penalty = 100.0f;
|
||||||
|
|
||||||
|
grammar_parser::parse_state grammar_parsed;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool print_special = false;
|
bool print_special = false;
|
||||||
bool print_energy = false;
|
bool print_energy = false;
|
||||||
bool no_timestamps = true;
|
bool no_timestamps = true;
|
||||||
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
std::string fname_out;
|
std::string fname_out;
|
||||||
std::string commands;
|
std::string commands;
|
||||||
std::string prompt;
|
std::string prompt;
|
||||||
|
std::string context;
|
||||||
|
std::string grammar;
|
||||||
|
|
||||||
|
// A regular expression that matches tokens to suppress
|
||||||
|
std::string suppress_regex;
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
@ -64,15 +75,20 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-pe" || arg == "--print-energy") { params.print_energy = true; }
|
else if (arg == "-pe" || arg == "--print-energy") { params.print_energy = true; }
|
||||||
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else if (arg == "-f" || arg == "--file") { params.fname_out = argv[++i]; }
|
else if (arg == "-f" || arg == "--file") { params.fname_out = argv[++i]; }
|
||||||
else if (arg == "-cmd" || arg == "--commands") { params.commands = argv[++i]; }
|
else if (arg == "-cmd" || arg == "--commands") { params.commands = argv[++i]; }
|
||||||
else if (arg == "-p" || arg == "--prompt") { params.prompt = argv[++i]; }
|
else if (arg == "-p" || arg == "--prompt") { params.prompt = argv[++i]; }
|
||||||
|
else if (arg == "-ctx" || arg == "--context") { params.context = argv[++i]; }
|
||||||
|
else if ( arg == "--grammar") { params.grammar = argv[++i]; }
|
||||||
|
else if ( arg == "--grammar-penalty") { params.grammar_penalty = std::stof(argv[++i]); }
|
||||||
|
else if ( arg == "--suppress-regex") { params.suppress_regex = argv[++i]; }
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -97,25 +113,41 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
||||||
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
||||||
fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
fprintf(stderr, " -pe, --print-energy [%-7s] print sound energy (for debugging)\n", params.print_energy ? "true" : "false");
|
fprintf(stderr, " -pe, --print-energy [%-7s] print sound energy (for debugging)\n", params.print_energy ? "true" : "false");
|
||||||
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] text output file name\n", params.fname_out.c_str());
|
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] text output file name\n", params.fname_out.c_str());
|
||||||
fprintf(stderr, " -cmd FNAME, --commands FNAME [%-7s] text file with allowed commands\n", params.commands.c_str());
|
fprintf(stderr, " -cmd FNAME, --commands FNAME [%-7s] text file with allowed commands\n", params.commands.c_str());
|
||||||
fprintf(stderr, " -p, --prompt [%-7s] the required activation prompt\n", params.prompt.c_str());
|
fprintf(stderr, " -p, --prompt [%-7s] the required activation prompt\n", params.prompt.c_str());
|
||||||
|
fprintf(stderr, " -ctx, --context [%-7s] sample text to help the transcription\n", params.context.c_str());
|
||||||
|
fprintf(stderr, " --grammar GRAMMAR [%-7s] GBNF grammar to guide decoding\n", params.grammar.c_str());
|
||||||
|
fprintf(stderr, " --grammar-penalty N [%-7.1f] scales down logits of nongrammar tokens\n", params.grammar_penalty);
|
||||||
|
fprintf(stderr, " --suppress-regex REGEX [%-7s] regular expression matching tokens to suppress\n", params.suppress_regex.c_str());
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string transcribe(whisper_context * ctx, const whisper_params & params, const std::vector<float> & pcmf32, float & prob, int64_t & t_ms) {
|
std::string transcribe(
|
||||||
|
whisper_context * ctx,
|
||||||
|
const whisper_params & params,
|
||||||
|
const std::vector<float> & pcmf32,
|
||||||
|
const std::string & grammar_rule,
|
||||||
|
float & logprob_min,
|
||||||
|
float & logprob_sum,
|
||||||
|
int & n_tokens,
|
||||||
|
int64_t & t_ms) {
|
||||||
const auto t_start = std::chrono::high_resolution_clock::now();
|
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
prob = 0.0f;
|
logprob_min = 0.0f;
|
||||||
|
logprob_sum = 0.0f;
|
||||||
|
n_tokens = 0;
|
||||||
t_ms = 0;
|
t_ms = 0;
|
||||||
|
|
||||||
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
//whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
||||||
|
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_BEAM_SEARCH);
|
||||||
|
|
||||||
wparams.print_progress = false;
|
wparams.print_progress = false;
|
||||||
wparams.print_special = params.print_special;
|
wparams.print_special = params.print_special;
|
||||||
@ -123,19 +155,42 @@ std::string transcribe(whisper_context * ctx, const whisper_params & params, con
|
|||||||
wparams.print_timestamps = !params.no_timestamps;
|
wparams.print_timestamps = !params.no_timestamps;
|
||||||
wparams.translate = params.translate;
|
wparams.translate = params.translate;
|
||||||
wparams.no_context = true;
|
wparams.no_context = true;
|
||||||
|
wparams.no_timestamps = params.no_timestamps;
|
||||||
wparams.single_segment = true;
|
wparams.single_segment = true;
|
||||||
wparams.max_tokens = params.max_tokens;
|
wparams.max_tokens = params.max_tokens;
|
||||||
wparams.language = params.language.c_str();
|
wparams.language = params.language.c_str();
|
||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
wparams.temperature = 0.4f;
|
||||||
|
wparams.temperature_inc = 1.0f;
|
||||||
|
wparams.greedy.best_of = 5;
|
||||||
|
|
||||||
|
wparams.beam_search.beam_size = 5;
|
||||||
|
|
||||||
|
wparams.initial_prompt = params.context.data();
|
||||||
|
|
||||||
|
wparams.suppress_regex = params.suppress_regex.c_str();
|
||||||
|
|
||||||
|
const auto & grammar_parsed = params.grammar_parsed;
|
||||||
|
auto grammar_rules = grammar_parsed.c_rules();
|
||||||
|
|
||||||
|
if (!params.grammar_parsed.rules.empty() && !grammar_rule.empty()) {
|
||||||
|
if (grammar_parsed.symbol_ids.find(grammar_rule) == grammar_parsed.symbol_ids.end()) {
|
||||||
|
fprintf(stderr, "%s: warning: grammar rule '%s' not found - skipping grammar sampling\n", __func__, grammar_rule.c_str());
|
||||||
|
} else {
|
||||||
|
wparams.grammar_rules = grammar_rules.data();
|
||||||
|
wparams.n_grammar_rules = grammar_rules.size();
|
||||||
|
wparams.i_start_rule = grammar_parsed.symbol_ids.at(grammar_rule);
|
||||||
|
wparams.grammar_penalty = params.grammar_penalty;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
int prob_n = 0;
|
|
||||||
std::string result;
|
std::string result;
|
||||||
|
|
||||||
const int n_segments = whisper_full_n_segments(ctx);
|
const int n_segments = whisper_full_n_segments(ctx);
|
||||||
@ -144,19 +199,17 @@ std::string transcribe(whisper_context * ctx, const whisper_params & params, con
|
|||||||
|
|
||||||
result += text;
|
result += text;
|
||||||
|
|
||||||
const int n_tokens = whisper_full_n_tokens(ctx, i);
|
const int n = whisper_full_n_tokens(ctx, i);
|
||||||
for (int j = 0; j < n_tokens; ++j) {
|
for (int j = 0; j < n; ++j) {
|
||||||
const auto token = whisper_full_get_token_data(ctx, i, j);
|
const auto token = whisper_full_get_token_data(ctx, i, j);
|
||||||
|
|
||||||
prob += token.p;
|
if(token.plog > 0.0f) exit(0);
|
||||||
++prob_n;
|
logprob_min = std::min(logprob_min, token.plog);
|
||||||
|
logprob_sum += token.plog;
|
||||||
|
++n_tokens;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (prob_n > 0) {
|
|
||||||
prob /= prob_n;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||||
t_ms = std::chrono::duration_cast<std::chrono::milliseconds>(t_end - t_start).count();
|
t_ms = std::chrono::duration_cast<std::chrono::milliseconds>(t_end - t_start).count();
|
||||||
|
|
||||||
@ -247,7 +300,7 @@ int process_command_list(struct whisper_context * ctx, audio_async &audio, const
|
|||||||
fprintf(stderr, " ]\n");
|
fprintf(stderr, " ]\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string k_prompt = "select one from the available words: ";
|
std::string k_prompt = "select one from the available words: ";
|
||||||
for (int i = 0; i < (int) allowed_commands.size(); ++i) {
|
for (int i = 0; i < (int) allowed_commands.size(); ++i) {
|
||||||
if (i > 0) {
|
if (i > 0) {
|
||||||
k_prompt += ", ";
|
k_prompt += ", ";
|
||||||
@ -314,7 +367,6 @@ int process_command_list(struct whisper_context * ctx, audio_async &audio, const
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
|
||||||
wparams.prompt_tokens = k_tokens.data();
|
wparams.prompt_tokens = k_tokens.data();
|
||||||
wparams.prompt_n_tokens = k_tokens.size();
|
wparams.prompt_n_tokens = k_tokens.size();
|
||||||
@ -415,7 +467,9 @@ int always_prompt_transcription(struct whisper_context * ctx, audio_async & audi
|
|||||||
bool is_running = true;
|
bool is_running = true;
|
||||||
bool ask_prompt = true;
|
bool ask_prompt = true;
|
||||||
|
|
||||||
float prob = 0.0f;
|
float logprob_min = 0.0f;
|
||||||
|
float logprob_sum = 0.0f;
|
||||||
|
int n_tokens = 0;
|
||||||
|
|
||||||
std::vector<float> pcmf32_cur;
|
std::vector<float> pcmf32_cur;
|
||||||
|
|
||||||
@ -453,7 +507,7 @@ int always_prompt_transcription(struct whisper_context * ctx, audio_async & audi
|
|||||||
// detect the commands
|
// detect the commands
|
||||||
audio.get(params.command_ms, pcmf32_cur);
|
audio.get(params.command_ms, pcmf32_cur);
|
||||||
|
|
||||||
const auto txt = ::trim(::transcribe(ctx, params, pcmf32_cur, prob, t_ms));
|
const auto txt = ::trim(::transcribe(ctx, params, pcmf32_cur, "", logprob_min, logprob_sum, n_tokens, t_ms));
|
||||||
|
|
||||||
const auto words = get_words(txt);
|
const auto words = get_words(txt);
|
||||||
|
|
||||||
@ -489,18 +543,27 @@ int always_prompt_transcription(struct whisper_context * ctx, audio_async & audi
|
|||||||
|
|
||||||
// general-purpose mode
|
// general-purpose mode
|
||||||
// freely transcribe the voice into text
|
// freely transcribe the voice into text
|
||||||
int process_general_transcription(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
int process_general_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
||||||
bool is_running = true;
|
bool is_running = true;
|
||||||
bool have_prompt = false;
|
bool have_prompt = false;
|
||||||
bool ask_prompt = true;
|
bool ask_prompt = true;
|
||||||
|
|
||||||
float prob0 = 0.0f;
|
float logprob_min0 = 0.0f;
|
||||||
float prob = 0.0f;
|
float logprob_min = 0.0f;
|
||||||
|
|
||||||
|
float logprob_sum0 = 0.0f;
|
||||||
|
float logprob_sum = 0.0f;
|
||||||
|
|
||||||
|
int n_tokens0 = 0;
|
||||||
|
int n_tokens = 0;
|
||||||
|
|
||||||
std::vector<float> pcmf32_cur;
|
std::vector<float> pcmf32_cur;
|
||||||
std::vector<float> pcmf32_prompt;
|
std::vector<float> pcmf32_prompt;
|
||||||
|
|
||||||
const std::string k_prompt = "Ok Whisper, start listening for commands.";
|
std::string k_prompt = "Ok Whisper, start listening for commands.";
|
||||||
|
if (!params.prompt.empty()) {
|
||||||
|
k_prompt = params.prompt;
|
||||||
|
}
|
||||||
|
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s: general-purpose mode\n", __func__);
|
fprintf(stderr, "%s: general-purpose mode\n", __func__);
|
||||||
@ -533,9 +596,11 @@ int process_general_transcription(struct whisper_context * ctx, audio_async &aud
|
|||||||
// wait for activation phrase
|
// wait for activation phrase
|
||||||
audio.get(params.prompt_ms, pcmf32_cur);
|
audio.get(params.prompt_ms, pcmf32_cur);
|
||||||
|
|
||||||
const auto txt = ::trim(::transcribe(ctx, params, pcmf32_cur, prob0, t_ms));
|
const auto txt = ::trim(::transcribe(ctx, params, pcmf32_cur, "prompt", logprob_min0, logprob_sum0, n_tokens0, t_ms));
|
||||||
|
|
||||||
fprintf(stdout, "%s: Heard '%s%s%s', (t = %d ms)\n", __func__, "\033[1m", txt.c_str(), "\033[0m", (int) t_ms);
|
const float p = 100.0f * std::exp(logprob_min0);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: Heard '%s%s%s', (t = %d ms, p = %.2f%%)\n", __func__, "\033[1m", txt.c_str(), "\033[0m", (int) t_ms, p);
|
||||||
|
|
||||||
const float sim = similarity(txt, k_prompt);
|
const float sim = similarity(txt, k_prompt);
|
||||||
|
|
||||||
@ -556,19 +621,30 @@ int process_general_transcription(struct whisper_context * ctx, audio_async &aud
|
|||||||
// we have heard the activation phrase, now detect the commands
|
// we have heard the activation phrase, now detect the commands
|
||||||
audio.get(params.command_ms, pcmf32_cur);
|
audio.get(params.command_ms, pcmf32_cur);
|
||||||
|
|
||||||
|
//printf("len prompt: %.4f\n", pcmf32_prompt.size() / (float) WHISPER_SAMPLE_RATE);
|
||||||
|
//printf("len command: %.4f\n", pcmf32_cur.size() / (float) WHISPER_SAMPLE_RATE);
|
||||||
|
|
||||||
|
// prepend 3 second of silence
|
||||||
|
pcmf32_cur.insert(pcmf32_cur.begin(), 3.0f*WHISPER_SAMPLE_RATE, 0.0f);
|
||||||
|
|
||||||
// prepend the prompt audio
|
// prepend the prompt audio
|
||||||
pcmf32_cur.insert(pcmf32_cur.begin(), pcmf32_prompt.begin(), pcmf32_prompt.end());
|
pcmf32_cur.insert(pcmf32_cur.begin(), pcmf32_prompt.begin(), pcmf32_prompt.end());
|
||||||
|
|
||||||
const auto txt = ::trim(::transcribe(ctx, params, pcmf32_cur, prob, t_ms));
|
const auto txt = ::trim(::transcribe(ctx, params, pcmf32_cur, "root", logprob_min, logprob_sum, n_tokens, t_ms));
|
||||||
|
|
||||||
prob = 100.0f*(prob - prob0);
|
//const float p = 100.0f * std::exp((logprob - logprob0) / (n_tokens - n_tokens0));
|
||||||
|
const float p = 100.0f * std::exp(logprob_min);
|
||||||
|
|
||||||
//fprintf(stdout, "%s: heard '%s'\n", __func__, txt.c_str());
|
//fprintf(stdout, "%s: heard '%s'\n", __func__, txt.c_str());
|
||||||
|
|
||||||
// find the prompt in the text
|
// find the prompt in the text
|
||||||
float best_sim = 0.0f;
|
float best_sim = 0.0f;
|
||||||
size_t best_len = 0;
|
size_t best_len = 0;
|
||||||
for (int n = 0.8*k_prompt.size(); n <= 1.2*k_prompt.size(); ++n) {
|
for (size_t n = 0.8*k_prompt.size(); n <= 1.2*k_prompt.size(); ++n) {
|
||||||
|
if (n >= txt.size()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
const auto prompt = txt.substr(0, n);
|
const auto prompt = txt.substr(0, n);
|
||||||
|
|
||||||
const float sim = similarity(prompt, k_prompt);
|
const float sim = similarity(prompt, k_prompt);
|
||||||
@ -581,9 +657,16 @@ int process_general_transcription(struct whisper_context * ctx, audio_async &aud
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string command = ::trim(txt.substr(best_len));
|
fprintf(stdout, "%s: DEBUG: txt = '%s', prob = %.2f%%\n", __func__, txt.c_str(), p);
|
||||||
|
if (best_len == 0) {
|
||||||
|
fprintf(stdout, "%s: WARNING: command not recognized, try again\n", __func__);
|
||||||
|
} else {
|
||||||
|
// cut the prompt from the decoded text
|
||||||
|
const std::string command = ::trim(txt.substr(best_len));
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: Command '%s%s%s', (t = %d ms)\n", __func__, "\033[1m", command.c_str(), "\033[0m", (int) t_ms);
|
||||||
|
}
|
||||||
|
|
||||||
fprintf(stdout, "%s: Command '%s%s%s', (t = %d ms)\n", __func__, "\033[1m", command.c_str(), "\033[0m", (int) t_ms);
|
|
||||||
fprintf(stdout, "\n");
|
fprintf(stdout, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -610,7 +693,12 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file(params.model.c_str());
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
// print some info about the processing
|
// print some info about the processing
|
||||||
{
|
{
|
||||||
@ -648,12 +736,36 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
int ret_val = 0;
|
int ret_val = 0;
|
||||||
|
|
||||||
if (!params.commands.empty()) {
|
if (!params.grammar.empty()) {
|
||||||
ret_val = process_command_list(ctx, audio, params);
|
auto & grammar = params.grammar_parsed;
|
||||||
} else if (!params.prompt.empty()) {
|
if (is_file_exist(params.grammar.c_str())) {
|
||||||
ret_val = always_prompt_transcription(ctx, audio, params);
|
// read grammar from file
|
||||||
} else {
|
std::ifstream ifs(params.grammar.c_str());
|
||||||
ret_val = process_general_transcription(ctx, audio, params);
|
const std::string txt = std::string((std::istreambuf_iterator<char>(ifs)), std::istreambuf_iterator<char>());
|
||||||
|
grammar = grammar_parser::parse(txt.c_str());
|
||||||
|
} else {
|
||||||
|
// read grammar from string
|
||||||
|
grammar = grammar_parser::parse(params.grammar.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// will be empty (default) if there are parse errors
|
||||||
|
if (grammar.rules.empty()) {
|
||||||
|
ret_val = 1;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: grammar:\n", __func__);
|
||||||
|
grammar_parser::print_grammar(stderr, grammar);
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret_val == 0) {
|
||||||
|
if (!params.commands.empty()) {
|
||||||
|
ret_val = process_command_list(ctx, audio, params);
|
||||||
|
} else if (!params.prompt.empty() && params.grammar_parsed.rules.empty()) {
|
||||||
|
ret_val = always_prompt_transcription(ctx, audio, params);
|
||||||
|
} else {
|
||||||
|
ret_val = process_general_transcription(ctx, audio, params);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
audio.pause();
|
audio.pause();
|
||||||
|
@ -9,6 +9,11 @@ static const std::map<std::string, enum ggml_ftype> GGML_FTYPE_MAP = {
|
|||||||
{"q5_0", GGML_FTYPE_MOSTLY_Q5_0},
|
{"q5_0", GGML_FTYPE_MOSTLY_Q5_0},
|
||||||
{"q5_1", GGML_FTYPE_MOSTLY_Q5_1},
|
{"q5_1", GGML_FTYPE_MOSTLY_Q5_1},
|
||||||
{"q8_0", GGML_FTYPE_MOSTLY_Q8_0},
|
{"q8_0", GGML_FTYPE_MOSTLY_Q8_0},
|
||||||
|
{"q2_k", GGML_FTYPE_MOSTLY_Q2_K},
|
||||||
|
{"q3_k", GGML_FTYPE_MOSTLY_Q3_K},
|
||||||
|
{"q4_k", GGML_FTYPE_MOSTLY_Q4_K},
|
||||||
|
{"q5_k", GGML_FTYPE_MOSTLY_Q5_K},
|
||||||
|
{"q6_k", GGML_FTYPE_MOSTLY_Q6_K},
|
||||||
};
|
};
|
||||||
|
|
||||||
void ggml_print_ftypes(FILE * fp) {
|
void ggml_print_ftypes(FILE * fp) {
|
||||||
@ -48,15 +53,25 @@ bool ggml_common_quantize_0(
|
|||||||
case GGML_FTYPE_MOSTLY_Q5_0: qtype = GGML_TYPE_Q5_0; break;
|
case GGML_FTYPE_MOSTLY_Q5_0: qtype = GGML_TYPE_Q5_0; break;
|
||||||
case GGML_FTYPE_MOSTLY_Q5_1: qtype = GGML_TYPE_Q5_1; break;
|
case GGML_FTYPE_MOSTLY_Q5_1: qtype = GGML_TYPE_Q5_1; break;
|
||||||
case GGML_FTYPE_MOSTLY_Q8_0: qtype = GGML_TYPE_Q8_0; break;
|
case GGML_FTYPE_MOSTLY_Q8_0: qtype = GGML_TYPE_Q8_0; break;
|
||||||
|
case GGML_FTYPE_MOSTLY_Q2_K: qtype = GGML_TYPE_Q2_K; break;
|
||||||
|
case GGML_FTYPE_MOSTLY_Q3_K: qtype = GGML_TYPE_Q3_K; break;
|
||||||
|
case GGML_FTYPE_MOSTLY_Q4_K: qtype = GGML_TYPE_Q4_K; break;
|
||||||
|
case GGML_FTYPE_MOSTLY_Q5_K: qtype = GGML_TYPE_Q5_K; break;
|
||||||
|
case GGML_FTYPE_MOSTLY_Q6_K: qtype = GGML_TYPE_Q6_K; break;
|
||||||
case GGML_FTYPE_UNKNOWN:
|
case GGML_FTYPE_UNKNOWN:
|
||||||
case GGML_FTYPE_ALL_F32:
|
case GGML_FTYPE_ALL_F32:
|
||||||
case GGML_FTYPE_MOSTLY_F16:
|
case GGML_FTYPE_MOSTLY_F16:
|
||||||
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16:
|
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16:
|
||||||
case GGML_FTYPE_MOSTLY_Q2_K:
|
case GGML_FTYPE_MOSTLY_IQ2_XXS:
|
||||||
case GGML_FTYPE_MOSTLY_Q3_K:
|
case GGML_FTYPE_MOSTLY_IQ2_XS:
|
||||||
case GGML_FTYPE_MOSTLY_Q4_K:
|
case GGML_FTYPE_MOSTLY_IQ2_S:
|
||||||
case GGML_FTYPE_MOSTLY_Q5_K:
|
case GGML_FTYPE_MOSTLY_IQ3_XXS:
|
||||||
case GGML_FTYPE_MOSTLY_Q6_K:
|
case GGML_FTYPE_MOSTLY_IQ3_S:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ1_S:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ4_NL:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ4_XS:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ1_M:
|
||||||
|
case GGML_FTYPE_MOSTLY_BF16:
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: invalid model type %d\n", __func__, ftype);
|
fprintf(stderr, "%s: invalid model type %d\n", __func__, ftype);
|
||||||
return false;
|
return false;
|
||||||
@ -77,8 +92,6 @@ bool ggml_common_quantize_0(
|
|||||||
std::vector<ggml_fp16_t> data_f16;
|
std::vector<ggml_fp16_t> data_f16;
|
||||||
std::vector<float> data_f32;
|
std::vector<float> data_f32;
|
||||||
|
|
||||||
std::vector<int64_t> hist_all(1 << 4, 0);
|
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
int32_t n_dims;
|
int32_t n_dims;
|
||||||
int32_t length;
|
int32_t length;
|
||||||
@ -163,41 +176,39 @@ bool ggml_common_quantize_0(
|
|||||||
work.resize(nelements); // for quantization
|
work.resize(nelements); // for quantization
|
||||||
|
|
||||||
size_t cur_size = 0;
|
size_t cur_size = 0;
|
||||||
std::vector<int64_t> hist_cur(1 << 4, 0);
|
|
||||||
|
|
||||||
switch ((ggml_type) ttype) {
|
switch ((ggml_type) ttype) {
|
||||||
case GGML_TYPE_Q4_0:
|
case GGML_TYPE_Q4_0:
|
||||||
{
|
|
||||||
cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q4_1:
|
case GGML_TYPE_Q4_1:
|
||||||
{
|
|
||||||
cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q5_0:
|
case GGML_TYPE_Q5_0:
|
||||||
{
|
|
||||||
cur_size = ggml_quantize_q5_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q5_1:
|
case GGML_TYPE_Q5_1:
|
||||||
{
|
|
||||||
cur_size = ggml_quantize_q5_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q8_0:
|
case GGML_TYPE_Q8_0:
|
||||||
|
case GGML_TYPE_Q2_K:
|
||||||
|
case GGML_TYPE_Q3_K:
|
||||||
|
case GGML_TYPE_Q4_K:
|
||||||
|
case GGML_TYPE_Q5_K:
|
||||||
|
case GGML_TYPE_Q6_K:
|
||||||
{
|
{
|
||||||
cur_size = ggml_quantize_q8_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
cur_size = ggml_quantize_chunk((ggml_type) ttype, data_f32.data(), work.data(), 0, nelements/ne[0], ne[0], nullptr);
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
case GGML_TYPE_F16:
|
case GGML_TYPE_F16:
|
||||||
case GGML_TYPE_I8:
|
case GGML_TYPE_I8:
|
||||||
case GGML_TYPE_I16:
|
case GGML_TYPE_I16:
|
||||||
case GGML_TYPE_I32:
|
case GGML_TYPE_I32:
|
||||||
|
case GGML_TYPE_I64:
|
||||||
|
case GGML_TYPE_F64:
|
||||||
case GGML_TYPE_Q8_1:
|
case GGML_TYPE_Q8_1:
|
||||||
case GGML_TYPE_Q2_K:
|
|
||||||
case GGML_TYPE_Q3_K:
|
|
||||||
case GGML_TYPE_Q4_K:
|
|
||||||
case GGML_TYPE_Q5_K:
|
|
||||||
case GGML_TYPE_Q6_K:
|
|
||||||
case GGML_TYPE_Q8_K:
|
case GGML_TYPE_Q8_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
|
case GGML_TYPE_IQ2_XS:
|
||||||
|
case GGML_TYPE_IQ2_S:
|
||||||
|
case GGML_TYPE_IQ3_XXS:
|
||||||
|
case GGML_TYPE_IQ3_S:
|
||||||
|
case GGML_TYPE_IQ1_S:
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
case GGML_TYPE_IQ4_XS:
|
||||||
|
case GGML_TYPE_IQ1_M:
|
||||||
|
case GGML_TYPE_BF16:
|
||||||
case GGML_TYPE_COUNT:
|
case GGML_TYPE_COUNT:
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type) ttype));
|
fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type) ttype));
|
||||||
@ -208,15 +219,7 @@ bool ggml_common_quantize_0(
|
|||||||
fout.write(reinterpret_cast<char *>(work.data()), cur_size);
|
fout.write(reinterpret_cast<char *>(work.data()), cur_size);
|
||||||
total_size_new += cur_size;
|
total_size_new += cur_size;
|
||||||
|
|
||||||
printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
|
printf("size = %8.2f MB -> %8.2f MB\n", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
|
||||||
for (int i = 0; i < (int) hist_cur.size(); ++i) {
|
|
||||||
hist_all[i] += hist_cur[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < (int) hist_cur.size(); ++i) {
|
|
||||||
printf("%5.3f ", hist_cur[i] / (float)nelements);
|
|
||||||
}
|
|
||||||
printf("\n");
|
|
||||||
} else {
|
} else {
|
||||||
printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0);
|
printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0);
|
||||||
fout.write(reinterpret_cast<char *>(data_u8.data()), data_u8.size());
|
fout.write(reinterpret_cast<char *>(data_u8.data()), data_u8.size());
|
||||||
@ -229,18 +232,5 @@ bool ggml_common_quantize_0(
|
|||||||
printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
||||||
printf("%s: quant size = %8.2f MB | ftype = %d (%s)\n", __func__, total_size_new/1024.0/1024.0, ftype, ggml_type_name(qtype));
|
printf("%s: quant size = %8.2f MB | ftype = %d (%s)\n", __func__, total_size_new/1024.0/1024.0, ftype, ggml_type_name(qtype));
|
||||||
|
|
||||||
{
|
|
||||||
int64_t sum_all = 0;
|
|
||||||
for (int i = 0; i < (int) hist_all.size(); ++i) {
|
|
||||||
sum_all += hist_all[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("%s: hist: ", __func__);
|
|
||||||
for (int i = 0; i < (int) hist_all.size(); ++i) {
|
|
||||||
printf("%5.3f ", hist_all[i] / (float)sum_all);
|
|
||||||
}
|
|
||||||
printf("\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -139,10 +139,13 @@ void audio_async::callback(uint8_t * stream, int len) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t n_samples = len / sizeof(float);
|
size_t n_samples = len / sizeof(float);
|
||||||
|
|
||||||
m_audio_new.resize(n_samples);
|
if (n_samples > m_audio.size()) {
|
||||||
memcpy(m_audio_new.data(), stream, n_samples * sizeof(float));
|
n_samples = m_audio.size();
|
||||||
|
|
||||||
|
stream += (len - (n_samples * sizeof(float)));
|
||||||
|
}
|
||||||
|
|
||||||
//fprintf(stderr, "%s: %zu samples, pos %zu, len %zu\n", __func__, n_samples, m_audio_pos, m_audio_len);
|
//fprintf(stderr, "%s: %zu samples, pos %zu, len %zu\n", __func__, n_samples, m_audio_pos, m_audio_len);
|
||||||
|
|
||||||
@ -153,7 +156,7 @@ void audio_async::callback(uint8_t * stream, int len) {
|
|||||||
const size_t n0 = m_audio.size() - m_audio_pos;
|
const size_t n0 = m_audio.size() - m_audio_pos;
|
||||||
|
|
||||||
memcpy(&m_audio[m_audio_pos], stream, n0 * sizeof(float));
|
memcpy(&m_audio[m_audio_pos], stream, n0 * sizeof(float));
|
||||||
memcpy(&m_audio[0], &stream[n0], (n_samples - n0) * sizeof(float));
|
memcpy(&m_audio[0], stream + n0 * sizeof(float), (n_samples - n0) * sizeof(float));
|
||||||
|
|
||||||
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
||||||
m_audio_len = m_audio.size();
|
m_audio_len = m_audio.size();
|
||||||
|
@ -41,7 +41,6 @@ private:
|
|||||||
std::mutex m_mutex;
|
std::mutex m_mutex;
|
||||||
|
|
||||||
std::vector<float> m_audio;
|
std::vector<float> m_audio;
|
||||||
std::vector<float> m_audio_new;
|
|
||||||
size_t m_audio_pos = 0;
|
size_t m_audio_pos = 0;
|
||||||
size_t m_audio_len = 0;
|
size_t m_audio_len = 0;
|
||||||
};
|
};
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#define _USE_MATH_DEFINES // for M_PI
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
// third-party utilities
|
// third-party utilities
|
||||||
@ -13,53 +15,75 @@
|
|||||||
#include <codecvt>
|
#include <codecvt>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
#ifndef M_PI
|
|
||||||
#define M_PI 3.14159265358979323846
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <io.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef WHISPER_FFMPEG
|
||||||
|
// as implemented in ffmpeg_trancode.cpp only embedded in common lib if whisper built with ffmpeg support
|
||||||
|
extern bool ffmpeg_decode_audio(const std::string & ifname, std::vector<uint8_t> & wav_data);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Function to check if the next argument exists
|
||||||
|
std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
|
||||||
|
if (i + 1 < argc && argv[i + 1][0] != '-') {
|
||||||
|
return argv[++i];
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "error: %s requires one argument.\n", flag.c_str());
|
||||||
|
gpt_print_usage(argc, argv, params);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
if (arg == "-s" || arg == "--seed") {
|
if (arg == "-s" || arg == "--seed") {
|
||||||
params.seed = std::stoi(argv[++i]);
|
params.seed = std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
} else if (arg == "-t" || arg == "--threads") {
|
} else if (arg == "-t" || arg == "--threads") {
|
||||||
params.n_threads = std::stoi(argv[++i]);
|
params.n_threads = std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
} else if (arg == "-p" || arg == "--prompt") {
|
} else if (arg == "-p" || arg == "--prompt") {
|
||||||
params.prompt = argv[++i];
|
params.prompt = get_next_arg(i, argc, argv, arg, params);
|
||||||
} else if (arg == "-n" || arg == "--n_predict") {
|
} else if (arg == "-n" || arg == "--n_predict") {
|
||||||
params.n_predict = std::stoi(argv[++i]);
|
params.n_predict = std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
|
} else if (arg == "-np" || arg == "--n_parallel") {
|
||||||
|
params.n_parallel = std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
} else if (arg == "--top_k") {
|
} else if (arg == "--top_k") {
|
||||||
params.top_k = std::max(1, std::stoi(argv[++i]));
|
params.top_k = std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
} else if (arg == "--top_p") {
|
} else if (arg == "--top_p") {
|
||||||
params.top_p = std::stof(argv[++i]);
|
params.top_p = std::stof(get_next_arg(i, argc, argv, arg, params));
|
||||||
} else if (arg == "--temp") {
|
} else if (arg == "--temp") {
|
||||||
params.temp = std::stof(argv[++i]);
|
params.temp = std::stof(get_next_arg(i, argc, argv, arg, params));
|
||||||
} else if (arg == "--repeat-last-n") {
|
} else if (arg == "--repeat-last-n") {
|
||||||
params.repeat_last_n = std::stof(argv[++i]);
|
params.repeat_last_n = std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
} else if (arg == "--repeat-penalty") {
|
} else if (arg == "--repeat-penalty") {
|
||||||
params.repeat_penalty = std::stof(argv[++i]);
|
params.repeat_penalty = std::stof(get_next_arg(i, argc, argv, arg, params));
|
||||||
} else if (arg == "-b" || arg == "--batch_size") {
|
} else if (arg == "-b" || arg == "--batch_size") {
|
||||||
params.n_batch = std::stoi(argv[++i]);
|
params.n_batch= std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
|
} else if (arg == "-c" || arg == "--context") {
|
||||||
|
params.n_ctx= std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
|
} else if (arg == "-ngl" || arg == "--gpu-layers" || arg == "--n-gpu-layers") {
|
||||||
|
params.n_gpu_layers = std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
|
} else if (arg == "--ignore-eos") {
|
||||||
|
params.ignore_eos = true;
|
||||||
} else if (arg == "-m" || arg == "--model") {
|
} else if (arg == "-m" || arg == "--model") {
|
||||||
params.model = argv[++i];
|
params.model = get_next_arg(i, argc, argv, arg, params);
|
||||||
} else if (arg == "-i" || arg == "--interactive") {
|
} else if (arg == "-i" || arg == "--interactive") {
|
||||||
params.interactive = true;
|
params.interactive = true;
|
||||||
} else if (arg == "-ip" || arg == "--interactive-port") {
|
} else if (arg == "-ip" || arg == "--interactive-port") {
|
||||||
params.interactive = true;
|
params.interactive = true;
|
||||||
params.interactive_port = std::stoi(argv[++i]);
|
params.interactive_port = std::stoi(get_next_arg(i, argc, argv, arg, params));
|
||||||
} else if (arg == "-h" || arg == "--help") {
|
} else if (arg == "-h" || arg == "--help") {
|
||||||
gpt_print_usage(argc, argv, params);
|
gpt_print_usage(argc, argv, params);
|
||||||
exit(0);
|
exit(0);
|
||||||
} else if (arg == "-f" || arg == "--file") {
|
} else if (arg == "-f" || arg == "--file") {
|
||||||
if (++i > argc) {
|
get_next_arg(i, argc, argv, arg, params);
|
||||||
fprintf(stderr, "Invalid file param");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
std::ifstream file(argv[i]);
|
std::ifstream file(argv[i]);
|
||||||
if (!file) {
|
if (!file) {
|
||||||
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
||||||
@ -70,7 +94,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||||||
params.prompt.pop_back();
|
params.prompt.pop_back();
|
||||||
}
|
}
|
||||||
} else if (arg == "-tt" || arg == "--token_test") {
|
} else if (arg == "-tt" || arg == "--token_test") {
|
||||||
params.token_test = argv[++i];
|
params.token_test = get_next_arg(i, argc, argv, arg, params);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
@ -102,6 +126,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
fprintf(stderr, " --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled)\n", params.repeat_last_n);
|
fprintf(stderr, " --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled)\n", params.repeat_last_n);
|
||||||
fprintf(stderr, " --repeat-penalty N penalize repeat sequence of tokens (default: %.2f, 1.0 = disabled)\n", (double)params.repeat_penalty);
|
fprintf(stderr, " --repeat-penalty N penalize repeat sequence of tokens (default: %.2f, 1.0 = disabled)\n", (double)params.repeat_penalty);
|
||||||
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
||||||
|
fprintf(stderr, " -c N, --context N context / KV cache size (default: %d)\n", params.n_ctx);
|
||||||
|
fprintf(stderr, " --ignore-eos ignore EOS token during generation\n");
|
||||||
|
fprintf(stderr, " -ngl N, --gpu-layers N number of layers to offload to GPU on supported models (default: %d)\n", params.n_gpu_layers);
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME\n");
|
fprintf(stderr, " -m FNAME, --model FNAME\n");
|
||||||
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
|
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
@ -598,12 +625,31 @@ gpt_vocab::id gpt_sample_top_k_top_p_repeat(
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_wav_buffer(const std::string buf) {
|
||||||
|
// RIFF ref: https://en.wikipedia.org/wiki/Resource_Interchange_File_Format
|
||||||
|
// WAV ref: https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
|
||||||
|
if (buf.size() < 12 || buf.substr(0, 4) != "RIFF" || buf.substr(8, 4) != "WAVE") {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t chunk_size = *reinterpret_cast<const uint32_t*>(buf.data() + 4);
|
||||||
|
if (chunk_size + 8 != buf.size()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool read_wav(const std::string & fname, std::vector<float>& pcmf32, std::vector<std::vector<float>>& pcmf32s, bool stereo) {
|
bool read_wav(const std::string & fname, std::vector<float>& pcmf32, std::vector<std::vector<float>>& pcmf32s, bool stereo) {
|
||||||
drwav wav;
|
drwav wav;
|
||||||
std::vector<uint8_t> wav_data; // used for pipe input from stdin
|
std::vector<uint8_t> wav_data; // used for pipe input from stdin or ffmpeg decoding output
|
||||||
|
|
||||||
if (fname == "-") {
|
if (fname == "-") {
|
||||||
{
|
{
|
||||||
|
#ifdef _WIN32
|
||||||
|
_setmode(_fileno(stdin), _O_BINARY);
|
||||||
|
#endif
|
||||||
|
|
||||||
uint8_t buf[1024];
|
uint8_t buf[1024];
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
@ -622,28 +668,49 @@ bool read_wav(const std::string & fname, std::vector<float>& pcmf32, std::vector
|
|||||||
|
|
||||||
fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size());
|
fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size());
|
||||||
}
|
}
|
||||||
|
else if (is_wav_buffer(fname)) {
|
||||||
|
if (drwav_init_memory(&wav, fname.c_str(), fname.size(), nullptr) == false) {
|
||||||
|
fprintf(stderr, "error: failed to open WAV file from fname buffer\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
else if (drwav_init_file(&wav, fname.c_str(), nullptr) == false) {
|
else if (drwav_init_file(&wav, fname.c_str(), nullptr) == false) {
|
||||||
|
#if defined(WHISPER_FFMPEG)
|
||||||
|
if (ffmpeg_decode_audio(fname, wav_data) != 0) {
|
||||||
|
fprintf(stderr, "error: failed to ffmpeg decode '%s' \n", fname.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), nullptr) == false) {
|
||||||
|
fprintf(stderr, "error: failed to read wav data as wav \n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#else
|
||||||
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname.c_str());
|
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname.c_str());
|
||||||
return false;
|
return false;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wav.channels != 1 && wav.channels != 2) {
|
if (wav.channels != 1 && wav.channels != 2) {
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", __func__, fname.c_str());
|
||||||
|
drwav_uninit(&wav);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stereo && wav.channels != 2) {
|
if (stereo && wav.channels != 2) {
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be stereo for diarization\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: WAV file '%s' must be stereo for diarization\n", __func__, fname.c_str());
|
||||||
|
drwav_uninit(&wav);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wav.sampleRate != COMMON_SAMPLE_RATE) {
|
if (wav.sampleRate != COMMON_SAMPLE_RATE) {
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be %i kHz\n", __func__, fname.c_str(), COMMON_SAMPLE_RATE/1000);
|
fprintf(stderr, "%s: WAV file '%s' must be %i kHz\n", __func__, fname.c_str(), COMMON_SAMPLE_RATE/1000);
|
||||||
|
drwav_uninit(&wav);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wav.bitsPerSample != 16) {
|
if (wav.bitsPerSample != 16) {
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", __func__, fname.c_str());
|
||||||
|
drwav_uninit(&wav);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -755,3 +822,91 @@ float similarity(const std::string & s0, const std::string & s1) {
|
|||||||
|
|
||||||
return 1.0f - (dist / std::max(s0.size(), s1.size()));
|
return 1.0f - (dist / std::max(s0.size(), s1.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool sam_params_parse(int argc, char ** argv, sam_params & params) {
|
||||||
|
for (int i = 1; i < argc; i++) {
|
||||||
|
std::string arg = argv[i];
|
||||||
|
|
||||||
|
if (arg == "-s" || arg == "--seed") {
|
||||||
|
params.seed = std::stoi(argv[++i]);
|
||||||
|
} else if (arg == "-t" || arg == "--threads") {
|
||||||
|
params.n_threads = std::stoi(argv[++i]);
|
||||||
|
} else if (arg == "-m" || arg == "--model") {
|
||||||
|
params.model = argv[++i];
|
||||||
|
} else if (arg == "-i" || arg == "--inp") {
|
||||||
|
params.fname_inp = argv[++i];
|
||||||
|
} else if (arg == "-o" || arg == "--out") {
|
||||||
|
params.fname_out = argv[++i];
|
||||||
|
} else if (arg == "-h" || arg == "--help") {
|
||||||
|
sam_print_usage(argc, argv, params);
|
||||||
|
exit(0);
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
|
sam_print_usage(argc, argv, params);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sam_print_usage(int /*argc*/, char ** argv, const sam_params & params) {
|
||||||
|
fprintf(stderr, "usage: %s [options]\n", argv[0]);
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
fprintf(stderr, "options:\n");
|
||||||
|
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||||
|
fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
|
||||||
|
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
|
||||||
|
fprintf(stderr, " -m FNAME, --model FNAME\n");
|
||||||
|
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
|
||||||
|
fprintf(stderr, " -i FNAME, --inp FNAME\n");
|
||||||
|
fprintf(stderr, " input file (default: %s)\n", params.fname_inp.c_str());
|
||||||
|
fprintf(stderr, " -o FNAME, --out FNAME\n");
|
||||||
|
fprintf(stderr, " output file (default: %s)\n", params.fname_out.c_str());
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// 500 -> 00:05.000
|
||||||
|
// 6000 -> 01:00.000
|
||||||
|
std::string to_timestamp(int64_t t, bool comma) {
|
||||||
|
int64_t msec = t * 10;
|
||||||
|
int64_t hr = msec / (1000 * 60 * 60);
|
||||||
|
msec = msec - hr * (1000 * 60 * 60);
|
||||||
|
int64_t min = msec / (1000 * 60);
|
||||||
|
msec = msec - min * (1000 * 60);
|
||||||
|
int64_t sec = msec / 1000;
|
||||||
|
msec = msec - sec * 1000;
|
||||||
|
|
||||||
|
char buf[32];
|
||||||
|
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
||||||
|
|
||||||
|
return std::string(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
int timestamp_to_sample(int64_t t, int n_samples, int whisper_sample_rate) {
|
||||||
|
return std::max(0, std::min((int) n_samples - 1, (int) ((t*whisper_sample_rate)/100)));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_file_exist(const char *fileName)
|
||||||
|
{
|
||||||
|
std::ifstream infile(fileName);
|
||||||
|
return infile.good();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool speak_with_file(const std::string & command, const std::string & text, const std::string & path, int voice_id)
|
||||||
|
{
|
||||||
|
std::ofstream speak_file(path.c_str());
|
||||||
|
if (speak_file.fail()) {
|
||||||
|
fprintf(stderr, "%s: failed to open speak_file\n", __func__);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
speak_file.write(text.c_str(), text.size());
|
||||||
|
speak_file.close();
|
||||||
|
int ret = system((command + " " + std::to_string(voice_id) + " " + path).c_str());
|
||||||
|
if (ret != 0) {
|
||||||
|
fprintf(stderr, "%s: failed to speak\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
@ -7,18 +7,25 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <random>
|
#include <random>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
#include <ctime>
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
#define COMMON_SAMPLE_RATE 16000
|
#define COMMON_SAMPLE_RATE 16000
|
||||||
|
|
||||||
//
|
//
|
||||||
// CLI argument parsing
|
// GPT CLI argument parsing
|
||||||
//
|
//
|
||||||
|
|
||||||
struct gpt_params {
|
struct gpt_params {
|
||||||
int32_t seed = -1; // RNG seed
|
int32_t seed = -1; // RNG seed
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t n_predict = 200; // new tokens to predict
|
int32_t n_predict = 200; // new tokens to predict
|
||||||
int32_t n_batch = 8; // batch size for prompt processing
|
int32_t n_parallel = 1; // number of parallel streams
|
||||||
|
int32_t n_batch = 8; // batch size for prompt processing
|
||||||
|
int32_t n_ctx = 2048; // context size (this is the KV cache max size)
|
||||||
|
int32_t n_gpu_layers = 0; // number of layers to offlload to the GPU
|
||||||
|
|
||||||
|
bool ignore_eos = false; // ignore EOS token when generating text
|
||||||
|
|
||||||
// sampling parameters
|
// sampling parameters
|
||||||
int32_t top_k = 40;
|
int32_t top_k = 40;
|
||||||
@ -128,7 +135,11 @@ gpt_vocab::id gpt_sample_top_k_top_p_repeat(
|
|||||||
// Audio utils
|
// Audio utils
|
||||||
//
|
//
|
||||||
|
|
||||||
|
// Check if a buffer is a WAV audio file
|
||||||
|
bool is_wav_buffer(const std::string buf);
|
||||||
|
|
||||||
// Read WAV audio file and store the PCM data into pcmf32
|
// Read WAV audio file and store the PCM data into pcmf32
|
||||||
|
// fname can be a buffer of WAV data instead of a filename
|
||||||
// The sample rate of the audio must be equal to COMMON_SAMPLE_RATE
|
// The sample rate of the audio must be equal to COMMON_SAMPLE_RATE
|
||||||
// If stereo flag is set and the audio has 2 channels, the pcmf32s will contain 2 channel PCM
|
// If stereo flag is set and the audio has 2 channels, the pcmf32s will contain 2 channel PCM
|
||||||
bool read_wav(
|
bool read_wav(
|
||||||
@ -137,6 +148,104 @@ bool read_wav(
|
|||||||
std::vector<std::vector<float>> & pcmf32s,
|
std::vector<std::vector<float>> & pcmf32s,
|
||||||
bool stereo);
|
bool stereo);
|
||||||
|
|
||||||
|
// Write PCM data into WAV audio file
|
||||||
|
class wav_writer {
|
||||||
|
private:
|
||||||
|
std::ofstream file;
|
||||||
|
uint32_t dataSize = 0;
|
||||||
|
std::string wav_filename;
|
||||||
|
|
||||||
|
bool write_header(const uint32_t sample_rate,
|
||||||
|
const uint16_t bits_per_sample,
|
||||||
|
const uint16_t channels) {
|
||||||
|
|
||||||
|
file.write("RIFF", 4);
|
||||||
|
file.write("\0\0\0\0", 4); // Placeholder for file size
|
||||||
|
file.write("WAVE", 4);
|
||||||
|
file.write("fmt ", 4);
|
||||||
|
|
||||||
|
const uint32_t sub_chunk_size = 16;
|
||||||
|
const uint16_t audio_format = 1; // PCM format
|
||||||
|
const uint32_t byte_rate = sample_rate * channels * bits_per_sample / 8;
|
||||||
|
const uint16_t block_align = channels * bits_per_sample / 8;
|
||||||
|
|
||||||
|
file.write(reinterpret_cast<const char *>(&sub_chunk_size), 4);
|
||||||
|
file.write(reinterpret_cast<const char *>(&audio_format), 2);
|
||||||
|
file.write(reinterpret_cast<const char *>(&channels), 2);
|
||||||
|
file.write(reinterpret_cast<const char *>(&sample_rate), 4);
|
||||||
|
file.write(reinterpret_cast<const char *>(&byte_rate), 4);
|
||||||
|
file.write(reinterpret_cast<const char *>(&block_align), 2);
|
||||||
|
file.write(reinterpret_cast<const char *>(&bits_per_sample), 2);
|
||||||
|
file.write("data", 4);
|
||||||
|
file.write("\0\0\0\0", 4); // Placeholder for data size
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// It is assumed that PCM data is normalized to a range from -1 to 1
|
||||||
|
bool write_audio(const float * data, size_t length) {
|
||||||
|
for (size_t i = 0; i < length; ++i) {
|
||||||
|
const int16_t intSample = int16_t(data[i] * 32767);
|
||||||
|
file.write(reinterpret_cast<const char *>(&intSample), sizeof(int16_t));
|
||||||
|
dataSize += sizeof(int16_t);
|
||||||
|
}
|
||||||
|
if (file.is_open()) {
|
||||||
|
file.seekp(4, std::ios::beg);
|
||||||
|
uint32_t fileSize = 36 + dataSize;
|
||||||
|
file.write(reinterpret_cast<char *>(&fileSize), 4);
|
||||||
|
file.seekp(40, std::ios::beg);
|
||||||
|
file.write(reinterpret_cast<char *>(&dataSize), 4);
|
||||||
|
file.seekp(0, std::ios::end);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool open_wav(const std::string & filename) {
|
||||||
|
if (filename != wav_filename) {
|
||||||
|
if (file.is_open()) {
|
||||||
|
file.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!file.is_open()) {
|
||||||
|
file.open(filename, std::ios::binary);
|
||||||
|
wav_filename = filename;
|
||||||
|
dataSize = 0;
|
||||||
|
}
|
||||||
|
return file.is_open();
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
bool open(const std::string & filename,
|
||||||
|
const uint32_t sample_rate,
|
||||||
|
const uint16_t bits_per_sample,
|
||||||
|
const uint16_t channels) {
|
||||||
|
|
||||||
|
if (open_wav(filename)) {
|
||||||
|
write_header(sample_rate, bits_per_sample, channels);
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool close() {
|
||||||
|
file.close();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool write(const float * data, size_t length) {
|
||||||
|
return write_audio(data, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
~wav_writer() {
|
||||||
|
if (file.is_open()) {
|
||||||
|
file.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
// Apply a high-pass frequency filter to PCM audio
|
// Apply a high-pass frequency filter to PCM audio
|
||||||
// Suppresses frequencies below cutoff Hz
|
// Suppresses frequencies below cutoff Hz
|
||||||
void high_pass_filter(
|
void high_pass_filter(
|
||||||
@ -155,3 +264,48 @@ bool vad_simple(
|
|||||||
|
|
||||||
// compute similarity between two strings using Levenshtein distance
|
// compute similarity between two strings using Levenshtein distance
|
||||||
float similarity(const std::string & s0, const std::string & s1);
|
float similarity(const std::string & s0, const std::string & s1);
|
||||||
|
|
||||||
|
//
|
||||||
|
// SAM argument parsing
|
||||||
|
//
|
||||||
|
|
||||||
|
struct sam_params {
|
||||||
|
int32_t seed = -1; // RNG seed
|
||||||
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
|
|
||||||
|
std::string model = "models/sam-vit-b/ggml-model-f16.bin"; // model path
|
||||||
|
std::string fname_inp = "img.jpg";
|
||||||
|
std::string fname_out = "img.out";
|
||||||
|
};
|
||||||
|
|
||||||
|
bool sam_params_parse(int argc, char ** argv, sam_params & params);
|
||||||
|
|
||||||
|
void sam_print_usage(int argc, char ** argv, const sam_params & params);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Terminal utils
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
|
// Terminal color map. 10 colors grouped in ranges [0.0, 0.1, ..., 0.9]
|
||||||
|
// Lowest is red, middle is yellow, highest is green.
|
||||||
|
const std::vector<std::string> k_colors = {
|
||||||
|
"\033[38;5;196m", "\033[38;5;202m", "\033[38;5;208m", "\033[38;5;214m", "\033[38;5;220m",
|
||||||
|
"\033[38;5;226m", "\033[38;5;190m", "\033[38;5;154m", "\033[38;5;118m", "\033[38;5;82m",
|
||||||
|
};
|
||||||
|
|
||||||
|
//
|
||||||
|
// Other utils
|
||||||
|
//
|
||||||
|
|
||||||
|
// convert timestamp to string, 6000 -> 01:00.000
|
||||||
|
std::string to_timestamp(int64_t t, bool comma = false);
|
||||||
|
|
||||||
|
// given a timestamp get the sample
|
||||||
|
int timestamp_to_sample(int64_t t, int n_samples, int whisper_sample_rate);
|
||||||
|
|
||||||
|
// check if file exists using ifstream
|
||||||
|
bool is_file_exist(const char *fileName);
|
||||||
|
|
||||||
|
// write text to file, and call system("command voice_id file")
|
||||||
|
bool speak_with_file(const std::string & command, const std::string & text, const std::string & path, int voice_id);
|
||||||
|
350
examples/ffmpeg-transcode.cpp
Normal file
350
examples/ffmpeg-transcode.cpp
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* transcode.c - convert audio file to WAVE
|
||||||
|
*
|
||||||
|
* Copyright (C) 2019 Andrew Clayton <andrew@digital-domain.net>
|
||||||
|
* Copyright (C) 2024 William Tambellini <william.tambellini@gmail.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Just for conveninent C++ API
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
// C
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <sys/mman.h>
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#include <libavutil/opt.h>
|
||||||
|
#include <libavcodec/avcodec.h>
|
||||||
|
#include <libavformat/avformat.h>
|
||||||
|
#include <libswresample/swresample.h>
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef uint64_t u64;
|
||||||
|
typedef int64_t s64;
|
||||||
|
typedef uint32_t u32;
|
||||||
|
typedef int32_t s32;
|
||||||
|
typedef uint16_t u16;
|
||||||
|
typedef int16_t s16;
|
||||||
|
typedef uint8_t u8;
|
||||||
|
typedef int8_t s8;
|
||||||
|
|
||||||
|
#define WAVE_SAMPLE_RATE 16000
|
||||||
|
#define AVIO_CTX_BUF_SZ 4096
|
||||||
|
|
||||||
|
static const char* ffmpegLog = getenv("FFMPEG_LOG");
|
||||||
|
// Todo: add __FILE__ __LINE__
|
||||||
|
#define LOG(...) \
|
||||||
|
do { if (ffmpegLog) fprintf(stderr, __VA_ARGS__); } while(0) // C99
|
||||||
|
|
||||||
|
/*
|
||||||
|
* WAVE file header based on definition from
|
||||||
|
* https://gist.github.com/Jon-Schneider/8b7c53d27a7a13346a643dac9c19d34f
|
||||||
|
*
|
||||||
|
* We must ensure this structure doesn't have any holes or
|
||||||
|
* padding so we can just map it straight to the WAVE data.
|
||||||
|
*/
|
||||||
|
struct wave_hdr {
|
||||||
|
/* RIFF Header: "RIFF" */
|
||||||
|
char riff_header[4];
|
||||||
|
/* size of audio data + sizeof(struct wave_hdr) - 8 */
|
||||||
|
int wav_size;
|
||||||
|
/* "WAVE" */
|
||||||
|
char wav_header[4];
|
||||||
|
|
||||||
|
/* Format Header */
|
||||||
|
/* "fmt " (includes trailing space) */
|
||||||
|
char fmt_header[4];
|
||||||
|
/* Should be 16 for PCM */
|
||||||
|
int fmt_chunk_size;
|
||||||
|
/* Should be 1 for PCM. 3 for IEEE Float */
|
||||||
|
s16 audio_format;
|
||||||
|
s16 num_channels;
|
||||||
|
int sample_rate;
|
||||||
|
/*
|
||||||
|
* Number of bytes per second
|
||||||
|
* sample_rate * num_channels * bit_depth/8
|
||||||
|
*/
|
||||||
|
int byte_rate;
|
||||||
|
/* num_channels * bytes per sample */
|
||||||
|
s16 sample_alignment;
|
||||||
|
/* bits per sample */
|
||||||
|
s16 bit_depth;
|
||||||
|
|
||||||
|
/* Data Header */
|
||||||
|
/* "data" */
|
||||||
|
char data_header[4];
|
||||||
|
/*
|
||||||
|
* size of audio
|
||||||
|
* number of samples * num_channels * bit_depth/8
|
||||||
|
*/
|
||||||
|
int data_bytes;
|
||||||
|
} __attribute__((__packed__));
|
||||||
|
|
||||||
|
struct audio_buffer {
|
||||||
|
u8 *ptr;
|
||||||
|
int size; /* size left in the buffer */
|
||||||
|
};
|
||||||
|
|
||||||
|
static void set_wave_hdr(wave_hdr& wh, size_t size) {
|
||||||
|
memcpy(&wh.riff_header, "RIFF", 4);
|
||||||
|
wh.wav_size = size + sizeof(struct wave_hdr) - 8;
|
||||||
|
memcpy(&wh.wav_header, "WAVE", 4);
|
||||||
|
memcpy(&wh.fmt_header, "fmt ", 4);
|
||||||
|
wh.fmt_chunk_size = 16;
|
||||||
|
wh.audio_format = 1;
|
||||||
|
wh.num_channels = 1;
|
||||||
|
wh.sample_rate = WAVE_SAMPLE_RATE;
|
||||||
|
wh.sample_alignment = 2;
|
||||||
|
wh.bit_depth = 16;
|
||||||
|
wh.byte_rate = wh.sample_rate * wh.sample_alignment;
|
||||||
|
memcpy(&wh.data_header, "data", 4);
|
||||||
|
wh.data_bytes = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_wave_hdr(int fd, size_t size) {
|
||||||
|
struct wave_hdr wh;
|
||||||
|
set_wave_hdr(wh, size);
|
||||||
|
write(fd, &wh, sizeof(struct wave_hdr));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int map_file(int fd, u8 **ptr, size_t *size)
|
||||||
|
{
|
||||||
|
struct stat sb;
|
||||||
|
|
||||||
|
fstat(fd, &sb);
|
||||||
|
*size = sb.st_size;
|
||||||
|
|
||||||
|
*ptr = (u8*)mmap(NULL, *size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
|
||||||
|
if (*ptr == MAP_FAILED) {
|
||||||
|
perror("mmap");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int read_packet(void *opaque, u8 *buf, int buf_size)
|
||||||
|
{
|
||||||
|
struct audio_buffer *audio_buf = (audio_buffer*)opaque;
|
||||||
|
|
||||||
|
buf_size = FFMIN(buf_size, audio_buf->size);
|
||||||
|
|
||||||
|
/* copy internal buffer data to buf */
|
||||||
|
memcpy(buf, audio_buf->ptr, buf_size);
|
||||||
|
audio_buf->ptr += buf_size;
|
||||||
|
audio_buf->size -= buf_size;
|
||||||
|
|
||||||
|
return buf_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void convert_frame(struct SwrContext *swr, AVCodecContext *codec,
|
||||||
|
AVFrame *frame, s16 **data, int *size, bool flush)
|
||||||
|
{
|
||||||
|
int nr_samples;
|
||||||
|
s64 delay;
|
||||||
|
u8 *buffer;
|
||||||
|
|
||||||
|
delay = swr_get_delay(swr, codec->sample_rate);
|
||||||
|
nr_samples = av_rescale_rnd(delay + frame->nb_samples,
|
||||||
|
WAVE_SAMPLE_RATE, codec->sample_rate,
|
||||||
|
AV_ROUND_UP);
|
||||||
|
av_samples_alloc(&buffer, NULL, 1, nr_samples, AV_SAMPLE_FMT_S16, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* !flush is used to check if we are flushing any remaining
|
||||||
|
* conversion buffers...
|
||||||
|
*/
|
||||||
|
nr_samples = swr_convert(swr, &buffer, nr_samples,
|
||||||
|
!flush ? (const u8 **)frame->data : NULL,
|
||||||
|
!flush ? frame->nb_samples : 0);
|
||||||
|
|
||||||
|
*data = (s16*)realloc(*data, (*size + nr_samples) * sizeof(s16));
|
||||||
|
memcpy(*data + *size, buffer, nr_samples * sizeof(s16));
|
||||||
|
*size += nr_samples;
|
||||||
|
av_freep(&buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool is_audio_stream(const AVStream *stream)
|
||||||
|
{
|
||||||
|
if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return non zero on error, 0 on success
|
||||||
|
// audio_buffer: input memory
|
||||||
|
// data: decoded output audio data (wav file)
|
||||||
|
// size: size of output data
|
||||||
|
static int decode_audio(struct audio_buffer *audio_buf, s16 **data, int *size)
|
||||||
|
{
|
||||||
|
LOG("decode_audio: input size: %d\n", audio_buf->size);
|
||||||
|
AVFormatContext *fmt_ctx;
|
||||||
|
AVIOContext *avio_ctx;
|
||||||
|
AVStream *stream;
|
||||||
|
AVCodecContext *codec;
|
||||||
|
AVPacket packet;
|
||||||
|
AVFrame *frame;
|
||||||
|
struct SwrContext *swr;
|
||||||
|
u8 *avio_ctx_buffer;
|
||||||
|
unsigned int i;
|
||||||
|
int stream_index = -1;
|
||||||
|
int err;
|
||||||
|
const size_t errbuffsize = 1024;
|
||||||
|
char errbuff[errbuffsize];
|
||||||
|
|
||||||
|
av_register_all(); // from avformat. Still a must-have call for ffmpeg v3! (can be skipped for later versions)
|
||||||
|
|
||||||
|
fmt_ctx = avformat_alloc_context();
|
||||||
|
avio_ctx_buffer = (u8*)av_malloc(AVIO_CTX_BUF_SZ);
|
||||||
|
LOG("Creating an avio context: AVIO_CTX_BUF_SZ=%d\n", AVIO_CTX_BUF_SZ);
|
||||||
|
avio_ctx = avio_alloc_context(avio_ctx_buffer, AVIO_CTX_BUF_SZ, 0, audio_buf, &read_packet, NULL, NULL);
|
||||||
|
fmt_ctx->pb = avio_ctx;
|
||||||
|
|
||||||
|
// open the input stream and read header
|
||||||
|
err = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
|
||||||
|
if (err) {
|
||||||
|
LOG("Could not read audio buffer: %d: %s\n", err, av_make_error_string(errbuff, errbuffsize, err));
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = avformat_find_stream_info(fmt_ctx, NULL);
|
||||||
|
if (err < 0) {
|
||||||
|
LOG("Could not retrieve stream info from audio buffer: %d\n", err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < fmt_ctx->nb_streams; i++) {
|
||||||
|
if (is_audio_stream(fmt_ctx->streams[i])) {
|
||||||
|
stream_index = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stream_index == -1) {
|
||||||
|
LOG("Could not retrieve audio stream from buffer\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
stream = fmt_ctx->streams[stream_index];
|
||||||
|
codec = avcodec_alloc_context3(
|
||||||
|
avcodec_find_decoder(stream->codecpar->codec_id));
|
||||||
|
avcodec_parameters_to_context(codec, stream->codecpar);
|
||||||
|
err = avcodec_open2(codec, avcodec_find_decoder(codec->codec_id),
|
||||||
|
NULL);
|
||||||
|
if (err) {
|
||||||
|
LOG("Failed to open decoder for stream #%d in audio buffer\n", stream_index);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* prepare resampler */
|
||||||
|
swr = swr_alloc();
|
||||||
|
|
||||||
|
av_opt_set_int(swr, "in_channel_count", codec->channels, 0);
|
||||||
|
av_opt_set_int(swr, "out_channel_count", 1, 0);
|
||||||
|
av_opt_set_int(swr, "in_channel_layout", codec->channel_layout, 0);
|
||||||
|
av_opt_set_int(swr, "out_channel_layout", AV_CH_LAYOUT_MONO, 0);
|
||||||
|
av_opt_set_int(swr, "in_sample_rate", codec->sample_rate, 0);
|
||||||
|
av_opt_set_int(swr, "out_sample_rate", WAVE_SAMPLE_RATE, 0);
|
||||||
|
av_opt_set_sample_fmt(swr, "in_sample_fmt", codec->sample_fmt, 0);
|
||||||
|
av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||||
|
|
||||||
|
swr_init(swr);
|
||||||
|
if (!swr_is_initialized(swr)) {
|
||||||
|
LOG("Resampler has not been properly initialized\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
av_init_packet(&packet);
|
||||||
|
frame = av_frame_alloc();
|
||||||
|
if (!frame) {
|
||||||
|
LOG("Error allocating the frame\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* iterate through frames */
|
||||||
|
*data = NULL;
|
||||||
|
*size = 0;
|
||||||
|
while (av_read_frame(fmt_ctx, &packet) >= 0) {
|
||||||
|
avcodec_send_packet(codec, &packet);
|
||||||
|
|
||||||
|
err = avcodec_receive_frame(codec, frame);
|
||||||
|
if (err == AVERROR(EAGAIN))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
convert_frame(swr, codec, frame, data, size, false);
|
||||||
|
}
|
||||||
|
/* Flush any remaining conversion buffers... */
|
||||||
|
convert_frame(swr, codec, frame, data, size, true);
|
||||||
|
|
||||||
|
av_frame_free(&frame);
|
||||||
|
swr_free(&swr);
|
||||||
|
//avio_context_free(); // todo?
|
||||||
|
avcodec_close(codec);
|
||||||
|
avformat_close_input(&fmt_ctx);
|
||||||
|
avformat_free_context(fmt_ctx);
|
||||||
|
|
||||||
|
if (avio_ctx) {
|
||||||
|
av_freep(&avio_ctx->buffer);
|
||||||
|
av_freep(&avio_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// in mem decoding/conversion/resampling:
|
||||||
|
// ifname: input file path
|
||||||
|
// owav_data: in mem wav file. Can be forwarded as it to whisper/drwav
|
||||||
|
// return 0 on success
|
||||||
|
int ffmpeg_decode_audio(const std::string &ifname, std::vector<uint8_t>& owav_data) {
|
||||||
|
LOG("ffmpeg_decode_audio: %s\n", ifname.c_str());
|
||||||
|
int ifd = open(ifname.c_str(), O_RDONLY);
|
||||||
|
if (ifd == -1) {
|
||||||
|
fprintf(stderr, "Couldn't open input file %s\n", ifname.c_str());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
u8 *ibuf = NULL;
|
||||||
|
size_t ibuf_size;
|
||||||
|
int err = map_file(ifd, &ibuf, &ibuf_size);
|
||||||
|
if (err) {
|
||||||
|
LOG("Couldn't map input file %s\n", ifname.c_str());
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
LOG("Mapped input file: %x size: %d\n", ibuf, ibuf_size);
|
||||||
|
struct audio_buffer inaudio_buf;
|
||||||
|
inaudio_buf.ptr = ibuf;
|
||||||
|
inaudio_buf.size = ibuf_size;
|
||||||
|
|
||||||
|
s16 *odata=NULL;
|
||||||
|
int osize=0;
|
||||||
|
|
||||||
|
err = decode_audio(&inaudio_buf, &odata, &osize);
|
||||||
|
LOG("decode_audio returned %d \n", err);
|
||||||
|
if (err != 0) {
|
||||||
|
LOG("decode_audio failed\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
LOG("decode_audio output size: %d\n", osize);
|
||||||
|
|
||||||
|
wave_hdr wh;
|
||||||
|
const size_t outdatasize = osize * sizeof(s16);
|
||||||
|
set_wave_hdr(wh, outdatasize);
|
||||||
|
owav_data.resize(sizeof(wave_hdr) + outdatasize);
|
||||||
|
// header:
|
||||||
|
memcpy(owav_data.data(), &wh, sizeof(wave_hdr));
|
||||||
|
// the data:
|
||||||
|
memcpy(owav_data.data() + sizeof(wave_hdr), odata, osize* sizeof(s16));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
423
examples/grammar-parser.cpp
Normal file
423
examples/grammar-parser.cpp
Normal file
@ -0,0 +1,423 @@
|
|||||||
|
#include "grammar-parser.h"
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cwchar>
|
||||||
|
#include <string>
|
||||||
|
#include <utility>
|
||||||
|
#include <stdexcept>
|
||||||
|
#include <exception>
|
||||||
|
|
||||||
|
namespace grammar_parser {
|
||||||
|
// NOTE: assumes valid utf8 (but checks for overrun)
|
||||||
|
// copied from whisper.cpp
|
||||||
|
std::pair<uint32_t, const char *> decode_utf8(const char * src) {
|
||||||
|
static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
|
||||||
|
uint8_t first_byte = static_cast<uint8_t>(*src);
|
||||||
|
uint8_t highbits = first_byte >> 4;
|
||||||
|
int len = lookup[highbits];
|
||||||
|
uint8_t mask = (1 << (8 - len)) - 1;
|
||||||
|
uint32_t value = first_byte & mask;
|
||||||
|
const char * end = src + len; // may overrun!
|
||||||
|
const char * pos = src + 1;
|
||||||
|
for ( ; pos < end && *pos; pos++) {
|
||||||
|
value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
|
||||||
|
}
|
||||||
|
return std::make_pair(value, pos);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t get_symbol_id(parse_state & state, const char * src, size_t len) {
|
||||||
|
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
||||||
|
auto result = state.symbol_ids.insert(std::make_pair(std::string(src, len), next_id));
|
||||||
|
return result.first->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t generate_symbol_id(parse_state & state, const std::string & base_name) {
|
||||||
|
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
||||||
|
state.symbol_ids[base_name + '_' + std::to_string(next_id)] = next_id;
|
||||||
|
return next_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
void add_rule(
|
||||||
|
parse_state & state,
|
||||||
|
uint32_t rule_id,
|
||||||
|
const std::vector<whisper_grammar_element> & rule) {
|
||||||
|
if (state.rules.size() <= rule_id) {
|
||||||
|
state.rules.resize(rule_id + 1);
|
||||||
|
}
|
||||||
|
state.rules[rule_id] = rule;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_word_char(char c) {
|
||||||
|
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '-' || ('0' <= c && c <= '9');
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<uint32_t, const char *> parse_hex(const char * src, int size) {
|
||||||
|
const char * pos = src;
|
||||||
|
const char * end = src + size;
|
||||||
|
uint32_t value = 0;
|
||||||
|
for ( ; pos < end && *pos; pos++) {
|
||||||
|
value <<= 4;
|
||||||
|
char c = *pos;
|
||||||
|
if ('a' <= c && c <= 'f') {
|
||||||
|
value += c - 'a' + 10;
|
||||||
|
} else if ('A' <= c && c <= 'F') {
|
||||||
|
value += c - 'A' + 10;
|
||||||
|
} else if ('0' <= c && c <= '9') {
|
||||||
|
value += c - '0';
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (pos != end) {
|
||||||
|
throw std::runtime_error("expecting " + std::to_string(size) + " hex chars at " + src);
|
||||||
|
}
|
||||||
|
return std::make_pair(value, pos);
|
||||||
|
}
|
||||||
|
|
||||||
|
const char * parse_space(const char * src, bool newline_ok) {
|
||||||
|
const char * pos = src;
|
||||||
|
while (*pos == ' ' || *pos == '\t' || *pos == '#' ||
|
||||||
|
(newline_ok && (*pos == '\r' || *pos == '\n'))) {
|
||||||
|
if (*pos == '#') {
|
||||||
|
while (*pos && *pos != '\r' && *pos != '\n') {
|
||||||
|
pos++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pos++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char * parse_name(const char * src) {
|
||||||
|
const char * pos = src;
|
||||||
|
while (is_word_char(*pos)) {
|
||||||
|
pos++;
|
||||||
|
}
|
||||||
|
if (pos == src) {
|
||||||
|
throw std::runtime_error(std::string("expecting name at ") + src);
|
||||||
|
}
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<uint32_t, const char *> parse_char(const char * src) {
|
||||||
|
if (*src == '\\') {
|
||||||
|
switch (src[1]) {
|
||||||
|
case 'x': return parse_hex(src + 2, 2);
|
||||||
|
case 'u': return parse_hex(src + 2, 4);
|
||||||
|
case 'U': return parse_hex(src + 2, 8);
|
||||||
|
case 't': return std::make_pair('\t', src + 2);
|
||||||
|
case 'r': return std::make_pair('\r', src + 2);
|
||||||
|
case 'n': return std::make_pair('\n', src + 2);
|
||||||
|
case '\\':
|
||||||
|
case '"':
|
||||||
|
case '[':
|
||||||
|
case ']':
|
||||||
|
return std::make_pair(src[1], src + 2);
|
||||||
|
default:
|
||||||
|
throw std::runtime_error(std::string("unknown escape at ") + src);
|
||||||
|
}
|
||||||
|
} else if (*src) {
|
||||||
|
return decode_utf8(src);
|
||||||
|
}
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
|
|
||||||
|
const char * parse_alternates(
|
||||||
|
parse_state & state,
|
||||||
|
const char * src,
|
||||||
|
const std::string & rule_name,
|
||||||
|
uint32_t rule_id,
|
||||||
|
bool is_nested);
|
||||||
|
|
||||||
|
const char * parse_sequence(
|
||||||
|
parse_state & state,
|
||||||
|
const char * src,
|
||||||
|
const std::string & rule_name,
|
||||||
|
std::vector<whisper_grammar_element> & out_elements,
|
||||||
|
bool is_nested) {
|
||||||
|
size_t last_sym_start = out_elements.size();
|
||||||
|
const char * pos = src;
|
||||||
|
while (*pos) {
|
||||||
|
if (*pos == '"') { // literal string
|
||||||
|
pos++;
|
||||||
|
last_sym_start = out_elements.size();
|
||||||
|
while (*pos != '"') {
|
||||||
|
auto char_pair = parse_char(pos);
|
||||||
|
pos = char_pair.second;
|
||||||
|
out_elements.push_back({WHISPER_GRETYPE_CHAR, char_pair.first});
|
||||||
|
}
|
||||||
|
pos = parse_space(pos + 1, is_nested);
|
||||||
|
} else if (*pos == '[') { // char range(s)
|
||||||
|
pos++;
|
||||||
|
enum whisper_gretype start_type = WHISPER_GRETYPE_CHAR;
|
||||||
|
if (*pos == '^') {
|
||||||
|
pos++;
|
||||||
|
start_type = WHISPER_GRETYPE_CHAR_NOT;
|
||||||
|
}
|
||||||
|
last_sym_start = out_elements.size();
|
||||||
|
while (*pos != ']') {
|
||||||
|
auto char_pair = parse_char(pos);
|
||||||
|
pos = char_pair.second;
|
||||||
|
enum whisper_gretype type = last_sym_start < out_elements.size()
|
||||||
|
? WHISPER_GRETYPE_CHAR_ALT
|
||||||
|
: start_type;
|
||||||
|
|
||||||
|
out_elements.push_back({type, char_pair.first});
|
||||||
|
if (pos[0] == '-' && pos[1] != ']') {
|
||||||
|
auto endchar_pair = parse_char(pos + 1);
|
||||||
|
pos = endchar_pair.second;
|
||||||
|
out_elements.push_back({WHISPER_GRETYPE_CHAR_RNG_UPPER, endchar_pair.first});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pos = parse_space(pos + 1, is_nested);
|
||||||
|
} else if (is_word_char(*pos)) { // rule reference
|
||||||
|
const char * name_end = parse_name(pos);
|
||||||
|
uint32_t ref_rule_id = get_symbol_id(state, pos, name_end - pos);
|
||||||
|
pos = parse_space(name_end, is_nested);
|
||||||
|
last_sym_start = out_elements.size();
|
||||||
|
out_elements.push_back({WHISPER_GRETYPE_RULE_REF, ref_rule_id});
|
||||||
|
} else if (*pos == '(') { // grouping
|
||||||
|
// parse nested alternates into synthesized rule
|
||||||
|
pos = parse_space(pos + 1, true);
|
||||||
|
uint32_t sub_rule_id = generate_symbol_id(state, rule_name);
|
||||||
|
pos = parse_alternates(state, pos, rule_name, sub_rule_id, true);
|
||||||
|
last_sym_start = out_elements.size();
|
||||||
|
// output reference to synthesized rule
|
||||||
|
out_elements.push_back({WHISPER_GRETYPE_RULE_REF, sub_rule_id});
|
||||||
|
if (*pos != ')') {
|
||||||
|
throw std::runtime_error(std::string("expecting ')' at ") + pos);
|
||||||
|
}
|
||||||
|
pos = parse_space(pos + 1, is_nested);
|
||||||
|
} else if (*pos == '*' || *pos == '+' || *pos == '?') { // repetition operator
|
||||||
|
if (last_sym_start == out_elements.size()) {
|
||||||
|
throw std::runtime_error(std::string("expecting preceding item to */+/? at ") + pos);
|
||||||
|
}
|
||||||
|
|
||||||
|
// apply transformation to previous symbol (last_sym_start to end) according to
|
||||||
|
// rewrite rules:
|
||||||
|
// S* --> S' ::= S S' |
|
||||||
|
// S+ --> S' ::= S S' | S
|
||||||
|
// S? --> S' ::= S |
|
||||||
|
uint32_t sub_rule_id = generate_symbol_id(state, rule_name);
|
||||||
|
std::vector<whisper_grammar_element> sub_rule;
|
||||||
|
// add preceding symbol to generated rule
|
||||||
|
sub_rule.insert(
|
||||||
|
sub_rule.end(), out_elements.begin() + last_sym_start, out_elements.end());
|
||||||
|
if (*pos == '*' || *pos == '+') {
|
||||||
|
// cause generated rule to recurse
|
||||||
|
sub_rule.push_back({WHISPER_GRETYPE_RULE_REF, sub_rule_id});
|
||||||
|
}
|
||||||
|
// mark start of alternate def
|
||||||
|
sub_rule.push_back({WHISPER_GRETYPE_ALT, 0});
|
||||||
|
if (*pos == '+') {
|
||||||
|
// add preceding symbol as alternate only for '+' (otherwise empty)
|
||||||
|
sub_rule.insert(
|
||||||
|
sub_rule.end(), out_elements.begin() + last_sym_start, out_elements.end());
|
||||||
|
}
|
||||||
|
sub_rule.push_back({WHISPER_GRETYPE_END, 0});
|
||||||
|
add_rule(state, sub_rule_id, sub_rule);
|
||||||
|
|
||||||
|
// in original rule, replace previous symbol with reference to generated rule
|
||||||
|
out_elements.resize(last_sym_start);
|
||||||
|
out_elements.push_back({WHISPER_GRETYPE_RULE_REF, sub_rule_id});
|
||||||
|
|
||||||
|
pos = parse_space(pos + 1, is_nested);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char * parse_alternates(
|
||||||
|
parse_state & state,
|
||||||
|
const char * src,
|
||||||
|
const std::string & rule_name,
|
||||||
|
uint32_t rule_id,
|
||||||
|
bool is_nested) {
|
||||||
|
std::vector<whisper_grammar_element> rule;
|
||||||
|
const char * pos = parse_sequence(state, src, rule_name, rule, is_nested);
|
||||||
|
while (*pos == '|') {
|
||||||
|
rule.push_back({WHISPER_GRETYPE_ALT, 0});
|
||||||
|
pos = parse_space(pos + 1, true);
|
||||||
|
pos = parse_sequence(state, pos, rule_name, rule, is_nested);
|
||||||
|
}
|
||||||
|
rule.push_back({WHISPER_GRETYPE_END, 0});
|
||||||
|
add_rule(state, rule_id, rule);
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char * parse_rule(parse_state & state, const char * src) {
|
||||||
|
const char * name_end = parse_name(src);
|
||||||
|
const char * pos = parse_space(name_end, false);
|
||||||
|
size_t name_len = name_end - src;
|
||||||
|
uint32_t rule_id = get_symbol_id(state, src, name_len);
|
||||||
|
const std::string name(src, name_len);
|
||||||
|
|
||||||
|
if (!(pos[0] == ':' && pos[1] == ':' && pos[2] == '=')) {
|
||||||
|
throw std::runtime_error(std::string("expecting ::= at ") + pos);
|
||||||
|
}
|
||||||
|
pos = parse_space(pos + 3, true);
|
||||||
|
|
||||||
|
pos = parse_alternates(state, pos, name, rule_id, false);
|
||||||
|
|
||||||
|
if (*pos == '\r') {
|
||||||
|
pos += pos[1] == '\n' ? 2 : 1;
|
||||||
|
} else if (*pos == '\n') {
|
||||||
|
pos++;
|
||||||
|
} else if (*pos) {
|
||||||
|
throw std::runtime_error(std::string("expecting newline or end at ") + pos);
|
||||||
|
}
|
||||||
|
return parse_space(pos, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
parse_state parse(const char * src) {
|
||||||
|
try {
|
||||||
|
parse_state state;
|
||||||
|
const char * pos = parse_space(src, true);
|
||||||
|
while (*pos) {
|
||||||
|
pos = parse_rule(state, pos);
|
||||||
|
}
|
||||||
|
return state;
|
||||||
|
} catch (const std::exception & err) {
|
||||||
|
fprintf(stderr, "%s: error parsing grammar: %s\n", __func__, err.what());
|
||||||
|
return parse_state();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void print_grammar_char(FILE * file, uint32_t c) {
|
||||||
|
if (0x20 <= c && c <= 0x7f) {
|
||||||
|
fprintf(file, "%c", static_cast<char>(c));
|
||||||
|
} else {
|
||||||
|
// cop out of encoding UTF-8
|
||||||
|
fprintf(file, "<U+%04X>", c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_char_element(whisper_grammar_element elem) {
|
||||||
|
switch (elem.type) {
|
||||||
|
case WHISPER_GRETYPE_CHAR: return true;
|
||||||
|
case WHISPER_GRETYPE_CHAR_NOT: return true;
|
||||||
|
case WHISPER_GRETYPE_CHAR_ALT: return true;
|
||||||
|
case WHISPER_GRETYPE_CHAR_RNG_UPPER: return true;
|
||||||
|
default: return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void print_rule_binary(FILE * file, const std::vector<whisper_grammar_element> & rule) {
|
||||||
|
for (auto elem : rule) {
|
||||||
|
switch (elem.type) {
|
||||||
|
case WHISPER_GRETYPE_END: fprintf(file, "END"); break;
|
||||||
|
case WHISPER_GRETYPE_ALT: fprintf(file, "ALT"); break;
|
||||||
|
case WHISPER_GRETYPE_RULE_REF: fprintf(file, "RULE_REF"); break;
|
||||||
|
case WHISPER_GRETYPE_CHAR: fprintf(file, "CHAR"); break;
|
||||||
|
case WHISPER_GRETYPE_CHAR_NOT: fprintf(file, "CHAR_NOT"); break;
|
||||||
|
case WHISPER_GRETYPE_CHAR_RNG_UPPER: fprintf(file, "CHAR_RNG_UPPER"); break;
|
||||||
|
case WHISPER_GRETYPE_CHAR_ALT: fprintf(file, "CHAR_ALT"); break;
|
||||||
|
}
|
||||||
|
switch (elem.type) {
|
||||||
|
case WHISPER_GRETYPE_END:
|
||||||
|
case WHISPER_GRETYPE_ALT:
|
||||||
|
case WHISPER_GRETYPE_RULE_REF:
|
||||||
|
fprintf(file, "(%u) ", elem.value);
|
||||||
|
break;
|
||||||
|
case WHISPER_GRETYPE_CHAR:
|
||||||
|
case WHISPER_GRETYPE_CHAR_NOT:
|
||||||
|
case WHISPER_GRETYPE_CHAR_RNG_UPPER:
|
||||||
|
case WHISPER_GRETYPE_CHAR_ALT:
|
||||||
|
fprintf(file, "(\"");
|
||||||
|
print_grammar_char(file, elem.value);
|
||||||
|
fprintf(file, "\") ");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fprintf(file, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
void print_rule(
|
||||||
|
FILE * file,
|
||||||
|
uint32_t rule_id,
|
||||||
|
const std::vector<whisper_grammar_element> & rule,
|
||||||
|
const std::map<uint32_t, std::string> & symbol_id_names) {
|
||||||
|
if (rule.empty() || rule.back().type != WHISPER_GRETYPE_END) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"malformed rule, does not end with WHISPER_GRETYPE_END: " + std::to_string(rule_id));
|
||||||
|
}
|
||||||
|
fprintf(file, "%s ::= ", symbol_id_names.at(rule_id).c_str());
|
||||||
|
for (size_t i = 0, end = rule.size() - 1; i < end; i++) {
|
||||||
|
whisper_grammar_element elem = rule[i];
|
||||||
|
switch (elem.type) {
|
||||||
|
case WHISPER_GRETYPE_END:
|
||||||
|
throw std::runtime_error(
|
||||||
|
"unexpected end of rule: " + std::to_string(rule_id) + "," +
|
||||||
|
std::to_string(i));
|
||||||
|
case WHISPER_GRETYPE_ALT:
|
||||||
|
fprintf(file, "| ");
|
||||||
|
break;
|
||||||
|
case WHISPER_GRETYPE_RULE_REF:
|
||||||
|
fprintf(file, "%s ", symbol_id_names.at(elem.value).c_str());
|
||||||
|
break;
|
||||||
|
case WHISPER_GRETYPE_CHAR:
|
||||||
|
fprintf(file, "[");
|
||||||
|
print_grammar_char(file, elem.value);
|
||||||
|
break;
|
||||||
|
case WHISPER_GRETYPE_CHAR_NOT:
|
||||||
|
fprintf(file, "[^");
|
||||||
|
print_grammar_char(file, elem.value);
|
||||||
|
break;
|
||||||
|
case WHISPER_GRETYPE_CHAR_RNG_UPPER:
|
||||||
|
if (i == 0 || !is_char_element(rule[i - 1])) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"WHISPER_GRETYPE_CHAR_RNG_UPPER without preceding char: " +
|
||||||
|
std::to_string(rule_id) + "," + std::to_string(i));
|
||||||
|
}
|
||||||
|
fprintf(file, "-");
|
||||||
|
print_grammar_char(file, elem.value);
|
||||||
|
break;
|
||||||
|
case WHISPER_GRETYPE_CHAR_ALT:
|
||||||
|
if (i == 0 || !is_char_element(rule[i - 1])) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"WHISPER_GRETYPE_CHAR_ALT without preceding char: " +
|
||||||
|
std::to_string(rule_id) + "," + std::to_string(i));
|
||||||
|
}
|
||||||
|
print_grammar_char(file, elem.value);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (is_char_element(elem)) {
|
||||||
|
switch (rule[i + 1].type) {
|
||||||
|
case WHISPER_GRETYPE_CHAR_ALT:
|
||||||
|
case WHISPER_GRETYPE_CHAR_RNG_UPPER:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
fprintf(file, "] ");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fprintf(file, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
void print_grammar(FILE * file, const parse_state & state) {
|
||||||
|
try {
|
||||||
|
std::map<uint32_t, std::string> symbol_id_names;
|
||||||
|
for (auto kv : state.symbol_ids) {
|
||||||
|
symbol_id_names[kv.second] = kv.first;
|
||||||
|
}
|
||||||
|
for (size_t i = 0, end = state.rules.size(); i < end; i++) {
|
||||||
|
// fprintf(file, "%zu: ", i);
|
||||||
|
// print_rule_binary(file, state.rules[i]);
|
||||||
|
print_rule(file, uint32_t(i), state.rules[i], symbol_id_names);
|
||||||
|
// fprintf(file, "\n");
|
||||||
|
}
|
||||||
|
} catch (const std::exception & err) {
|
||||||
|
fprintf(stderr, "\n%s: error printing grammar: %s\n", __func__, err.what());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<const whisper_grammar_element *> parse_state::c_rules() const{
|
||||||
|
std::vector<const whisper_grammar_element *> ret;
|
||||||
|
for (const auto & rule : rules) {
|
||||||
|
ret.push_back(rule.data());
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
29
examples/grammar-parser.h
Normal file
29
examples/grammar-parser.h
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// Implements a parser for an extended Backus-Naur form (BNF), producing the
|
||||||
|
// binary context-free grammar format specified by whisper.h. Supports character
|
||||||
|
// ranges, grouping, and repetition operators. As an example, a grammar for
|
||||||
|
// arithmetic might look like:
|
||||||
|
//
|
||||||
|
// root ::= expr
|
||||||
|
// expr ::= term ([-+*/] term)*
|
||||||
|
// term ::= num | "(" space expr ")" space
|
||||||
|
// num ::= [0-9]+ space
|
||||||
|
// space ::= [ \t\n]*
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
#include "whisper.h"
|
||||||
|
#include <vector>
|
||||||
|
#include <map>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
namespace grammar_parser {
|
||||||
|
struct parse_state {
|
||||||
|
std::map<std::string, uint32_t> symbol_ids;
|
||||||
|
std::vector<std::vector<whisper_grammar_element>> rules;
|
||||||
|
|
||||||
|
std::vector<const whisper_grammar_element *> c_rules() const;
|
||||||
|
};
|
||||||
|
|
||||||
|
parse_state parse(const char * src);
|
||||||
|
void print_grammar(FILE * file, const parse_state & state);
|
||||||
|
}
|
@ -22,6 +22,7 @@ var printTextarea = (function() {
|
|||||||
async function clearCache() {
|
async function clearCache() {
|
||||||
if (confirm('Are you sure you want to clear the cache?\nAll the models will be downloaded again.')) {
|
if (confirm('Are you sure you want to clear the cache?\nAll the models will be downloaded again.')) {
|
||||||
indexedDB.deleteDatabase(dbName);
|
indexedDB.deleteDatabase(dbName);
|
||||||
|
location.reload();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,9 +34,6 @@ async function fetchRemote(url, cbProgress, cbPrint) {
|
|||||||
url,
|
url,
|
||||||
{
|
{
|
||||||
method: 'GET',
|
method: 'GET',
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/octet-stream',
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ if [ -n "$3" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Whisper models
|
# Whisper models
|
||||||
models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large-v1" "large" )
|
models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large-v1" "large-v2" "large-v3" )
|
||||||
|
|
||||||
# list available models
|
# list available models
|
||||||
function list_models {
|
function list_models {
|
||||||
|
@ -5,5 +5,5 @@ if (WHISPER_SDL2)
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common json_cpp common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -26,10 +26,11 @@ struct whisper_params {
|
|||||||
float vad_thold = 0.6f;
|
float vad_thold = 0.6f;
|
||||||
float freq_thold = 100.0f;
|
float freq_thold = 100.0f;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool print_special = false;
|
bool print_special = false;
|
||||||
bool print_energy = false;
|
bool print_energy = false;
|
||||||
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
@ -68,10 +69,11 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-pe" || arg == "--print-energy") { params.print_energy = true; }
|
else if (arg == "-pe" || arg == "--print-energy") { params.print_energy = true; }
|
||||||
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else {
|
else {
|
||||||
@ -98,10 +100,11 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
||||||
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
||||||
fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
fprintf(stderr, " -pe, --print-energy [%-7s] print sound energy (for debugging)\n", params.print_energy ? "true" : "false");
|
fprintf(stderr, " -pe, --print-energy [%-7s] print sound energy (for debugging)\n", params.print_energy ? "true" : "false");
|
||||||
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
@ -178,7 +181,6 @@ json unguided_transcription(struct whisper_context * ctx, audio_async &audio, js
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
wparams.suppress_non_speech_tokens = true;
|
wparams.suppress_non_speech_tokens = true;
|
||||||
// run the transformer and a single decoding pass
|
// run the transformer and a single decoding pass
|
||||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||||
@ -217,7 +219,6 @@ json guided_transcription(struct whisper_context * ctx, audio_async &audio, cons
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
|
||||||
// TODO: Do some time testing. Does an overly long prompt slow down processing?
|
// TODO: Do some time testing. Does an overly long prompt slow down processing?
|
||||||
// Set up command sets/precompute prompts
|
// Set up command sets/precompute prompts
|
||||||
@ -324,12 +325,12 @@ json register_commandset(struct whisper_context * ctx, json jparams, std::vector
|
|||||||
commandset_list.push_back(cs);
|
commandset_list.push_back(cs);
|
||||||
return json{{"index",index}};
|
return json{{"index",index}};
|
||||||
}
|
}
|
||||||
json seek(struct whisper_context * ctx, audio_async &audio, json params) {
|
json seek(struct whisper_context * /*ctx*/, audio_async & /*audio*/, json /*params*/) {
|
||||||
// whisper_state has the pertinent offsets, but there also seem to be a large
|
// whisper_state has the pertinent offsets, but there also seem to be a large
|
||||||
// number of scratch buffers that would prevent rewinding context in a manner similar to llama
|
// number of scratch buffers that would prevent rewinding context in a manner similar to llama
|
||||||
// I'll give this a another pass once everything else is implemented,
|
// I'll give this a another pass once everything else is implemented,
|
||||||
// but for now, it's unsupported
|
// but for now, it's unsupported
|
||||||
throw json{
|
throw json {
|
||||||
{"code", -32601},
|
{"code", -32601},
|
||||||
{"message", "Seeking is not yet supported."}
|
{"message", "Seeking is not yet supported."}
|
||||||
};
|
};
|
||||||
@ -412,7 +413,7 @@ void process_loop(struct whisper_context * ctx, audio_async &audio, const whispe
|
|||||||
jobqueue.pop_front();
|
jobqueue.pop_front();
|
||||||
// send response
|
// send response
|
||||||
std::string data = resp.dump(-1, ' ', false, json::error_handler_t::replace);
|
std::string data = resp.dump(-1, ' ', false, json::error_handler_t::replace);
|
||||||
fprintf(stdout, "Content-Length: %d\r\n\r\n%s\n", data.length()+1, data.c_str());
|
fprintf(stdout, "Content-Length: %d\r\n\r\n%s\n", (int)data.length()+1, data.c_str());
|
||||||
std::cout.flush();
|
std::cout.flush();
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -432,7 +433,12 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// whisper init
|
// whisper init
|
||||||
struct whisper_context * ctx = whisper_init_from_file(params.model.c_str());
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
// init audio
|
// init audio
|
||||||
|
|
||||||
audio_async audio(30*1000);
|
audio_async audio(30*1000);
|
||||||
|
@ -3,4 +3,4 @@ add_executable(${TARGET} main.cpp)
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common whisper ${FFMPEG_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
@ -17,28 +17,37 @@ options:
|
|||||||
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
||||||
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
||||||
-ml N, --max-len N [0 ] maximum segment length in characters
|
-ml N, --max-len N [0 ] maximum segment length in characters
|
||||||
|
-sow, --split-on-word [false ] split on word rather than on token
|
||||||
-bo N, --best-of N [5 ] number of best candidates to keep
|
-bo N, --best-of N [5 ] number of best candidates to keep
|
||||||
-bs N, --beam-size N [-1 ] beam size for beam search
|
-bs N, --beam-size N [5 ] beam size for beam search
|
||||||
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
||||||
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
||||||
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
||||||
-su, --speed-up [false ] speed up audio by x2 (reduced accuracy)
|
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
|
||||||
-tr, --translate [false ] translate from source language to english
|
-tr, --translate [false ] translate from source language to english
|
||||||
-di, --diarize [false ] stereo audio diarization
|
-di, --diarize [false ] stereo audio diarization
|
||||||
|
-tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model)
|
||||||
-nf, --no-fallback [false ] do not use temperature fallback while decoding
|
-nf, --no-fallback [false ] do not use temperature fallback while decoding
|
||||||
-otxt, --output-txt [false ] output result in a text file
|
-otxt, --output-txt [false ] output result in a text file
|
||||||
-ovtt, --output-vtt [false ] output result in a vtt file
|
-ovtt, --output-vtt [false ] output result in a vtt file
|
||||||
-osrt, --output-srt [false ] output result in a srt file
|
-osrt, --output-srt [false ] output result in a srt file
|
||||||
|
-olrc, --output-lrc [false ] output result in a lrc file
|
||||||
-owts, --output-words [false ] output script for generating karaoke video
|
-owts, --output-words [false ] output script for generating karaoke video
|
||||||
|
-fp, --font-path [/System/Library/Fonts/Supplemental/Courier New Bold.ttf] path to a monospace font for karaoke video
|
||||||
-ocsv, --output-csv [false ] output result in a CSV file
|
-ocsv, --output-csv [false ] output result in a CSV file
|
||||||
-oj, --output-json [false ] output result in a JSON file
|
-oj, --output-json [false ] output result in a JSON file
|
||||||
|
-ojf, --output-json-full [false ] include more information in the JSON file
|
||||||
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
||||||
-ps, --print-special [false ] print special tokens
|
-ps, --print-special [false ] print special tokens
|
||||||
-pc, --print-colors [false ] print colors
|
-pc, --print-colors [false ] print colors
|
||||||
-pp, --print-progress [false ] print progress
|
-pp, --print-progress [false ] print progress
|
||||||
-nt, --no-timestamps [true ] do not print timestamps
|
-nt, --no-timestamps [false ] do not print timestamps
|
||||||
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
||||||
|
-dl, --detect-language [false ] exit after automatically detecting language
|
||||||
--prompt PROMPT [ ] initial prompt
|
--prompt PROMPT [ ] initial prompt
|
||||||
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
||||||
-f FNAME, --file FNAME [ ] input WAV file path
|
-f FNAME, --file FNAME [ ] input WAV file path
|
||||||
|
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
|
||||||
|
-ls, --log-score [false ] log best decoder scores of tokens
|
||||||
|
-ng, --no-gpu [false ] disable GPU
|
||||||
```
|
```
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
#include "grammar-parser.h"
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
#include <regex>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -14,34 +16,6 @@
|
|||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Terminal color map. 10 colors grouped in ranges [0.0, 0.1, ..., 0.9]
|
|
||||||
// Lowest is red, middle is yellow, highest is green.
|
|
||||||
const std::vector<std::string> k_colors = {
|
|
||||||
"\033[38;5;196m", "\033[38;5;202m", "\033[38;5;208m", "\033[38;5;214m", "\033[38;5;220m",
|
|
||||||
"\033[38;5;226m", "\033[38;5;190m", "\033[38;5;154m", "\033[38;5;118m", "\033[38;5;82m",
|
|
||||||
};
|
|
||||||
|
|
||||||
// 500 -> 00:05.000
|
|
||||||
// 6000 -> 01:00.000
|
|
||||||
std::string to_timestamp(int64_t t, bool comma = false) {
|
|
||||||
int64_t msec = t * 10;
|
|
||||||
int64_t hr = msec / (1000 * 60 * 60);
|
|
||||||
msec = msec - hr * (1000 * 60 * 60);
|
|
||||||
int64_t min = msec / (1000 * 60);
|
|
||||||
msec = msec - min * (1000 * 60);
|
|
||||||
int64_t sec = msec / 1000;
|
|
||||||
msec = msec - sec * 1000;
|
|
||||||
|
|
||||||
char buf[32];
|
|
||||||
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
|
||||||
|
|
||||||
return std::string(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
int timestamp_to_sample(int64_t t, int n_samples) {
|
|
||||||
return std::max(0, std::min((int) n_samples - 1, (int) ((t*WHISPER_SAMPLE_RATE)/100)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// helper function to replace substrings
|
// helper function to replace substrings
|
||||||
void replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
void replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
||||||
for (size_t pos = 0; ; pos += replace.length()) {
|
for (size_t pos = 0; ; pos += replace.length()) {
|
||||||
@ -54,22 +28,25 @@ void replace_all(std::string & s, const std::string & search, const std::string
|
|||||||
|
|
||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t n_processors = 1;
|
int32_t n_processors = 1;
|
||||||
int32_t offset_t_ms = 0;
|
int32_t offset_t_ms = 0;
|
||||||
int32_t offset_n = 0;
|
int32_t offset_n = 0;
|
||||||
int32_t duration_ms = 0;
|
int32_t duration_ms = 0;
|
||||||
int32_t progress_step = 5;
|
int32_t progress_step = 5;
|
||||||
int32_t max_context = -1;
|
int32_t max_context = -1;
|
||||||
int32_t max_len = 0;
|
int32_t max_len = 0;
|
||||||
int32_t best_of = 2;
|
int32_t best_of = whisper_full_default_params(WHISPER_SAMPLING_GREEDY).greedy.best_of;
|
||||||
int32_t beam_size = -1;
|
int32_t beam_size = whisper_full_default_params(WHISPER_SAMPLING_BEAM_SEARCH).beam_search.beam_size;
|
||||||
|
int32_t audio_ctx = 0;
|
||||||
|
|
||||||
float word_thold = 0.01f;
|
float word_thold = 0.01f;
|
||||||
float entropy_thold = 2.40f;
|
float entropy_thold = 2.40f;
|
||||||
float logprob_thold = -1.00f;
|
float logprob_thold = -1.00f;
|
||||||
|
float grammar_penalty = 100.0f;
|
||||||
|
float temperature = 0.0f;
|
||||||
|
float temperature_inc = 0.2f;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool debug_mode = false;
|
bool debug_mode = false;
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool detect_language = false;
|
bool detect_language = false;
|
||||||
@ -83,29 +60,50 @@ struct whisper_params {
|
|||||||
bool output_wts = false;
|
bool output_wts = false;
|
||||||
bool output_csv = false;
|
bool output_csv = false;
|
||||||
bool output_jsn = false;
|
bool output_jsn = false;
|
||||||
|
bool output_jsn_full = false;
|
||||||
bool output_lrc = false;
|
bool output_lrc = false;
|
||||||
|
bool no_prints = false;
|
||||||
bool print_special = false;
|
bool print_special = false;
|
||||||
bool print_colors = false;
|
bool print_colors = false;
|
||||||
bool print_progress = false;
|
bool print_progress = false;
|
||||||
bool no_timestamps = false;
|
bool no_timestamps = false;
|
||||||
bool log_score = false;
|
bool log_score = false;
|
||||||
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt;
|
std::string prompt;
|
||||||
std::string font_path = "/System/Library/Fonts/Supplemental/Courier New Bold.ttf";
|
std::string font_path = "/System/Library/Fonts/Supplemental/Courier New Bold.ttf";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
|
std::string grammar;
|
||||||
|
std::string grammar_rule;
|
||||||
|
|
||||||
// [TDRZ] speaker turn string
|
// [TDRZ] speaker turn string
|
||||||
std::string tdrz_speaker_turn = " [SPEAKER_TURN]"; // TODO: set from command line
|
std::string tdrz_speaker_turn = " [SPEAKER_TURN]"; // TODO: set from command line
|
||||||
|
|
||||||
|
// A regular expression that matches tokens to suppress
|
||||||
|
std::string suppress_regex;
|
||||||
|
|
||||||
std::string openvino_encode_device = "CPU";
|
std::string openvino_encode_device = "CPU";
|
||||||
|
|
||||||
|
std::string dtw = "";
|
||||||
|
|
||||||
std::vector<std::string> fname_inp = {};
|
std::vector<std::string> fname_inp = {};
|
||||||
std::vector<std::string> fname_out = {};
|
std::vector<std::string> fname_out = {};
|
||||||
|
|
||||||
|
grammar_parser::parse_state grammar_parsed;
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
|
char* whisper_param_turn_lowercase(char* in){
|
||||||
|
int string_len = strlen(in);
|
||||||
|
for(int i = 0; i < string_len; i++){
|
||||||
|
*(in+i) = tolower((unsigned char)*(in+i));
|
||||||
|
}
|
||||||
|
return in;
|
||||||
|
}
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
@ -133,10 +131,12 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
|
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(argv[++i]); }
|
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
|
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
|
||||||
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
|
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(argv[++i]); }
|
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(argv[++i]); }
|
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(argv[++i]); }
|
||||||
// else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
else if (arg == "-tp" || arg == "--temperature") { params.temperature = std::stof(argv[++i]); }
|
||||||
|
else if (arg == "-tpi" || arg == "--temperature-inc") { params.temperature_inc = std::stof(argv[++i]); }
|
||||||
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
||||||
@ -151,18 +151,27 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-fp" || arg == "--font-path") { params.font_path = argv[++i]; }
|
else if (arg == "-fp" || arg == "--font-path") { params.font_path = argv[++i]; }
|
||||||
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
||||||
else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
|
else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
|
||||||
|
else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; }
|
||||||
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
|
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
|
||||||
|
else if (arg == "-np" || arg == "--no-prints") { params.no_prints = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
||||||
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
||||||
else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
|
else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
|
||||||
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
else if (arg == "-l" || arg == "--language") { params.language = whisper_param_turn_lowercase(argv[++i]); }
|
||||||
else if (arg == "-dl" || arg == "--detect-language") { params.detect_language = true; }
|
else if (arg == "-dl" || arg == "--detect-language") { params.detect_language = true; }
|
||||||
else if ( arg == "--prompt") { params.prompt = argv[++i]; }
|
else if ( arg == "--prompt") { params.prompt = argv[++i]; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else if (arg == "-f" || arg == "--file") { params.fname_inp.emplace_back(argv[++i]); }
|
else if (arg == "-f" || arg == "--file") { params.fname_inp.emplace_back(argv[++i]); }
|
||||||
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = argv[++i]; }
|
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = argv[++i]; }
|
||||||
else if (arg == "-ls" || arg == "--log-score") { params.log_score = true; }
|
else if (arg == "-dtw" || arg == "--dtw") { params.dtw = argv[++i]; }
|
||||||
|
else if (arg == "-ls" || arg == "--log-score") { params.log_score = true; }
|
||||||
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
|
else if ( arg == "--suppress-regex") { params.suppress_regex = argv[++i]; }
|
||||||
|
else if ( arg == "--grammar") { params.grammar = argv[++i]; }
|
||||||
|
else if ( arg == "--grammar-rule") { params.grammar_rule = argv[++i]; }
|
||||||
|
else if ( arg == "--grammar-penalty") { params.grammar_penalty = std::stof(argv[++i]); }
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -189,10 +198,12 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -sow, --split-on-word [%-7s] split on word rather than on token\n", params.split_on_word ? "true" : "false");
|
fprintf(stderr, " -sow, --split-on-word [%-7s] split on word rather than on token\n", params.split_on_word ? "true" : "false");
|
||||||
fprintf(stderr, " -bo N, --best-of N [%-7d] number of best candidates to keep\n", params.best_of);
|
fprintf(stderr, " -bo N, --best-of N [%-7d] number of best candidates to keep\n", params.best_of);
|
||||||
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
|
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
|
||||||
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
||||||
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
||||||
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
||||||
// fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
fprintf(stderr, " -tp, --temperature N [%-7.2f] The sampling temperature, between 0 and 1\n", params.temperature);
|
||||||
|
fprintf(stderr, " -tpi, --temperature-inc N [%-7.2f] The increment of temperature, between 0 and 1\n",params.temperature_inc);
|
||||||
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -di, --diarize [%-7s] stereo audio diarization\n", params.diarize ? "true" : "false");
|
fprintf(stderr, " -di, --diarize [%-7s] stereo audio diarization\n", params.diarize ? "true" : "false");
|
||||||
@ -206,18 +217,27 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -fp, --font-path [%-7s] path to a monospace font for karaoke video\n", params.font_path.c_str());
|
fprintf(stderr, " -fp, --font-path [%-7s] path to a monospace font for karaoke video\n", params.font_path.c_str());
|
||||||
fprintf(stderr, " -ocsv, --output-csv [%-7s] output result in a CSV file\n", params.output_csv ? "true" : "false");
|
fprintf(stderr, " -ocsv, --output-csv [%-7s] output result in a CSV file\n", params.output_csv ? "true" : "false");
|
||||||
fprintf(stderr, " -oj, --output-json [%-7s] output result in a JSON file\n", params.output_jsn ? "true" : "false");
|
fprintf(stderr, " -oj, --output-json [%-7s] output result in a JSON file\n", params.output_jsn ? "true" : "false");
|
||||||
|
fprintf(stderr, " -ojf, --output-json-full [%-7s] include more information in the JSON file\n", params.output_jsn_full ? "true" : "false");
|
||||||
fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
|
fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
|
||||||
|
fprintf(stderr, " -np, --no-prints [%-7s] do not print anything other than the results\n", params.no_prints ? "true" : "false");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
fprintf(stderr, " -pc, --print-colors [%-7s] print colors\n", params.print_colors ? "true" : "false");
|
fprintf(stderr, " -pc, --print-colors [%-7s] print colors\n", params.print_colors ? "true" : "false");
|
||||||
fprintf(stderr, " -pp, --print-progress [%-7s] print progress\n", params.print_progress ? "true" : "false");
|
fprintf(stderr, " -pp, --print-progress [%-7s] print progress\n", params.print_progress ? "true" : "false");
|
||||||
fprintf(stderr, " -nt, --no-timestamps [%-7s] do not print timestamps\n", params.no_timestamps ? "true" : "false");
|
fprintf(stderr, " -nt, --no-timestamps [%-7s] do not print timestamps\n", params.no_timestamps ? "true" : "false");
|
||||||
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language ('auto' for auto-detect)\n", params.language.c_str());
|
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language ('auto' for auto-detect)\n", params.language.c_str());
|
||||||
fprintf(stderr, " -dl, --detect-language [%-7s] exit after automatically detecting language\n", params.detect_language ? "true" : "false");
|
fprintf(stderr, " -dl, --detect-language [%-7s] exit after automatically detecting language\n", params.detect_language ? "true" : "false");
|
||||||
fprintf(stderr, " --prompt PROMPT [%-7s] initial prompt\n", params.prompt.c_str());
|
fprintf(stderr, " --prompt PROMPT [%-7s] initial prompt (max n_text_ctx/2 tokens)\n", params.prompt.c_str());
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] input WAV file path\n", "");
|
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] input WAV file path\n", "");
|
||||||
fprintf(stderr, " -oved D, --ov-e-device DNAME [%-7s] the OpenVINO device used for encode inference\n", params.openvino_encode_device.c_str());
|
fprintf(stderr, " -oved D, --ov-e-device DNAME [%-7s] the OpenVINO device used for encode inference\n", params.openvino_encode_device.c_str());
|
||||||
|
fprintf(stderr, " -dtw MODEL --dtw MODEL [%-7s] compute token-level timestamps\n", params.dtw.c_str());
|
||||||
fprintf(stderr, " -ls, --log-score [%-7s] log best decoder scores of tokens\n", params.log_score?"true":"false");
|
fprintf(stderr, " -ls, --log-score [%-7s] log best decoder scores of tokens\n", params.log_score?"true":"false");
|
||||||
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
|
fprintf(stderr, " --suppress-regex REGEX [%-7s] regular expression matching tokens to suppress\n", params.suppress_regex.c_str());
|
||||||
|
fprintf(stderr, " --grammar GRAMMAR [%-7s] GBNF grammar to guide decoding\n", params.grammar.c_str());
|
||||||
|
fprintf(stderr, " --grammar-rule RULE [%-7s] top-level GBNF grammar rule name\n", params.grammar_rule.c_str());
|
||||||
|
fprintf(stderr, " --grammar-penalty N [%-7.1f] scales down logits of nongrammar tokens\n", params.grammar_penalty);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -232,8 +252,8 @@ std::string estimate_diarization_speaker(std::vector<std::vector<float>> pcmf32s
|
|||||||
std::string speaker = "";
|
std::string speaker = "";
|
||||||
const int64_t n_samples = pcmf32s[0].size();
|
const int64_t n_samples = pcmf32s[0].size();
|
||||||
|
|
||||||
const int64_t is0 = timestamp_to_sample(t0, n_samples);
|
const int64_t is0 = timestamp_to_sample(t0, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
const int64_t is1 = timestamp_to_sample(t1, n_samples);
|
const int64_t is1 = timestamp_to_sample(t1, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
|
|
||||||
double energy0 = 0.0f;
|
double energy0 = 0.0f;
|
||||||
double energy1 = 0.0f;
|
double energy1 = 0.0f;
|
||||||
@ -260,7 +280,7 @@ std::string estimate_diarization_speaker(std::vector<std::vector<float>> pcmf32s
|
|||||||
|
|
||||||
return speaker;
|
return speaker;
|
||||||
}
|
}
|
||||||
void whisper_print_progress_callback(struct whisper_context * ctx, struct whisper_state * /*state*/, int progress, void * user_data) {
|
void whisper_print_progress_callback(struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, int progress, void * user_data) {
|
||||||
int progress_step = ((whisper_print_user_data *) user_data)->params->progress_step;
|
int progress_step = ((whisper_print_user_data *) user_data)->params->progress_step;
|
||||||
int * progress_prev = &(((whisper_print_user_data *) user_data)->progress_prev);
|
int * progress_prev = &(((whisper_print_user_data *) user_data)->progress_prev);
|
||||||
if (progress >= *progress_prev + progress_step) {
|
if (progress >= *progress_prev + progress_step) {
|
||||||
@ -457,6 +477,38 @@ char *escape_double_quotes_and_backslashes(const char *str) {
|
|||||||
return escaped;
|
return escaped;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// double quote should be escaped by another double quote. (rfc4180)
|
||||||
|
char *escape_double_quotes_in_csv(const char *str) {
|
||||||
|
if (str == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t escaped_length = strlen(str) + 1;
|
||||||
|
|
||||||
|
for (size_t i = 0; str[i] != '\0'; i++) {
|
||||||
|
if (str[i] == '"') {
|
||||||
|
escaped_length++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char *escaped = (char *)calloc(escaped_length, 1); // pre-zeroed
|
||||||
|
if (escaped == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t pos = 0;
|
||||||
|
for (size_t i = 0; str[i] != '\0'; i++) {
|
||||||
|
if (str[i] == '"') {
|
||||||
|
escaped[pos++] = '"';
|
||||||
|
}
|
||||||
|
escaped[pos++] = str[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
// no need to set zero due to calloc() being used prior
|
||||||
|
|
||||||
|
return escaped;
|
||||||
|
}
|
||||||
|
|
||||||
bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
@ -478,7 +530,7 @@ bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
const char * text = whisper_full_get_segment_text(ctx, i);
|
const char * text = whisper_full_get_segment_text(ctx, i);
|
||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
char * text_escaped = escape_double_quotes_and_backslashes(text);
|
char * text_escaped = escape_double_quotes_in_csv(text);
|
||||||
|
|
||||||
//need to multiply times returned from whisper_full_get_segment_t{0,1}() by 10 to get milliseconds.
|
//need to multiply times returned from whisper_full_get_segment_t{0,1}() by 10 to get milliseconds.
|
||||||
fout << 10 * t0 << "," << 10 * t1 << ",";
|
fout << 10 * t0 << "," << 10 * t1 << ",";
|
||||||
@ -492,7 +544,7 @@ bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_score(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
bool output_score(struct whisper_context * ctx, const char * fname, const whisper_params & /*params*/, std::vector<std::vector<float>> /*pcmf32s*/) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||||
|
|
||||||
@ -511,7 +563,12 @@ bool output_score(struct whisper_context * ctx, const char * fname, const whispe
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_json(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
bool output_json(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
const char * fname,
|
||||||
|
const whisper_params & params,
|
||||||
|
std::vector<std::vector<float>> pcmf32s,
|
||||||
|
bool full) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
int indent = 0;
|
int indent = 0;
|
||||||
|
|
||||||
@ -528,7 +585,7 @@ bool output_json(struct whisper_context * ctx, const char * fname, const whisper
|
|||||||
auto end_arr = [&](bool end) {
|
auto end_arr = [&](bool end) {
|
||||||
indent--;
|
indent--;
|
||||||
doindent();
|
doindent();
|
||||||
fout << (end ? "]\n" : "},\n");
|
fout << (end ? "]\n" : "],\n");
|
||||||
};
|
};
|
||||||
|
|
||||||
auto start_obj = [&](const char *name) {
|
auto start_obj = [&](const char *name) {
|
||||||
@ -569,12 +626,29 @@ bool output_json(struct whisper_context * ctx, const char * fname, const whisper
|
|||||||
end_value(end);
|
end_value(end);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
auto value_f = [&](const char *name, const float val, bool end) {
|
||||||
|
start_value(name);
|
||||||
|
fout << val;
|
||||||
|
end_value(end);
|
||||||
|
};
|
||||||
|
|
||||||
auto value_b = [&](const char *name, const bool val, bool end) {
|
auto value_b = [&](const char *name, const bool val, bool end) {
|
||||||
start_value(name);
|
start_value(name);
|
||||||
fout << (val ? "true" : "false");
|
fout << (val ? "true" : "false");
|
||||||
end_value(end);
|
end_value(end);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
auto times_o = [&](int64_t t0, int64_t t1, bool end) {
|
||||||
|
start_obj("timestamps");
|
||||||
|
value_s("from", to_timestamp(t0, true).c_str(), false);
|
||||||
|
value_s("to", to_timestamp(t1, true).c_str(), true);
|
||||||
|
end_obj(false);
|
||||||
|
start_obj("offsets");
|
||||||
|
value_i("from", t0 * 10, false);
|
||||||
|
value_i("to", t1 * 10, true);
|
||||||
|
end_obj(end);
|
||||||
|
};
|
||||||
|
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
return false;
|
return false;
|
||||||
@ -620,15 +694,27 @@ bool output_json(struct whisper_context * ctx, const char * fname, const whisper
|
|||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
|
|
||||||
start_obj(nullptr);
|
start_obj(nullptr);
|
||||||
start_obj("timestamps");
|
times_o(t0, t1, false);
|
||||||
value_s("from", to_timestamp(t0, true).c_str(), false);
|
value_s("text", text, !params.diarize && !params.tinydiarize && !full);
|
||||||
value_s("to", to_timestamp(t1, true).c_str(), true);
|
|
||||||
end_obj(false);
|
if (full) {
|
||||||
start_obj("offsets");
|
start_arr("tokens");
|
||||||
value_i("from", t0 * 10, false);
|
const int n = whisper_full_n_tokens(ctx, i);
|
||||||
value_i("to", t1 * 10, true);
|
for (int j = 0; j < n; ++j) {
|
||||||
end_obj(false);
|
auto token = whisper_full_get_token_data(ctx, i, j);
|
||||||
value_s("text", text, !params.diarize && !params.tinydiarize);
|
start_obj(nullptr);
|
||||||
|
value_s("text", whisper_token_to_str(ctx, token.id), false);
|
||||||
|
if(token.t0 > -1 && token.t1 > -1) {
|
||||||
|
// If we have per-token timestamps, write them out
|
||||||
|
times_o(token.t0, token.t1, false);
|
||||||
|
}
|
||||||
|
value_i("id", token.id, false);
|
||||||
|
value_f("p", token.p, false);
|
||||||
|
value_f("t_dtw", token.t_dtw, true);
|
||||||
|
end_obj(j == (n - 1));
|
||||||
|
}
|
||||||
|
end_arr(!params.diarize && !params.tinydiarize);
|
||||||
|
}
|
||||||
|
|
||||||
if (params.diarize && pcmf32s.size() == 2) {
|
if (params.diarize && pcmf32s.size() == 2) {
|
||||||
value_s("speaker", estimate_diarization_speaker(pcmf32s, t0, t1, true).c_str(), true);
|
value_s("speaker", estimate_diarization_speaker(pcmf32s, t0, t1, true).c_str(), true);
|
||||||
@ -813,14 +899,59 @@ bool output_lrc(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
whisper_params params;
|
whisper_params params;
|
||||||
|
|
||||||
|
// If the only argument starts with "@", read arguments line-by-line
|
||||||
|
// from the given file.
|
||||||
|
std::vector<std::string> vec_args;
|
||||||
|
if (argc == 2 && argv != nullptr && argv[1] != nullptr && argv[1][0] == '@') {
|
||||||
|
// Save the name of the executable.
|
||||||
|
vec_args.push_back(argv[0]);
|
||||||
|
|
||||||
|
// Open the response file.
|
||||||
|
char const * rspfile = argv[1] + sizeof(char);
|
||||||
|
std::ifstream fin(rspfile);
|
||||||
|
if (fin.is_open() == false) {
|
||||||
|
fprintf(stderr, "error: response file '%s' not found\n", rspfile);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the entire response file.
|
||||||
|
std::string line;
|
||||||
|
while (std::getline(fin, line)) {
|
||||||
|
vec_args.push_back(line);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the contents of the response file as the command-line arguments.
|
||||||
|
argc = static_cast<int>(vec_args.size());
|
||||||
|
argv = static_cast<char **>(alloca(argc * sizeof (char *)));
|
||||||
|
for (int i = 0; i < argc; ++i) {
|
||||||
|
argv[i] = const_cast<char *>(vec_args[i].c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (whisper_params_parse(argc, argv, params) == false) {
|
if (whisper_params_parse(argc, argv, params) == false) {
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// remove non-existent files
|
||||||
|
for (auto it = params.fname_inp.begin(); it != params.fname_inp.end();) {
|
||||||
|
const auto fname_inp = it->c_str();
|
||||||
|
|
||||||
|
if (*it != "-" && !is_file_exist(fname_inp)) {
|
||||||
|
fprintf(stderr, "error: input file not found '%s'\n", fname_inp);
|
||||||
|
it = params.fname_inp.erase(it);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
it++;
|
||||||
|
}
|
||||||
|
|
||||||
if (params.fname_inp.empty()) {
|
if (params.fname_inp.empty()) {
|
||||||
fprintf(stderr, "error: no input files specified\n");
|
fprintf(stderr, "error: no input files specified\n");
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -839,9 +970,40 @@ int main(int argc, char ** argv) {
|
|||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (params.no_prints) {
|
||||||
|
whisper_log_set(cb_log_disable, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file(params.model.c_str());
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
|
if (!params.dtw.empty()) {
|
||||||
|
cparams.dtw_token_timestamps = true;
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_NONE;
|
||||||
|
|
||||||
|
if (params.dtw == "tiny") cparams.dtw_aheads_preset = WHISPER_AHEADS_TINY;
|
||||||
|
if (params.dtw == "tiny.en") cparams.dtw_aheads_preset = WHISPER_AHEADS_TINY_EN;
|
||||||
|
if (params.dtw == "base") cparams.dtw_aheads_preset = WHISPER_AHEADS_BASE;
|
||||||
|
if (params.dtw == "base.en") cparams.dtw_aheads_preset = WHISPER_AHEADS_BASE_EN;
|
||||||
|
if (params.dtw == "small") cparams.dtw_aheads_preset = WHISPER_AHEADS_SMALL;
|
||||||
|
if (params.dtw == "small.en") cparams.dtw_aheads_preset = WHISPER_AHEADS_SMALL_EN;
|
||||||
|
if (params.dtw == "medium") cparams.dtw_aheads_preset = WHISPER_AHEADS_MEDIUM;
|
||||||
|
if (params.dtw == "medium.en") cparams.dtw_aheads_preset = WHISPER_AHEADS_MEDIUM_EN;
|
||||||
|
if (params.dtw == "large.v1") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V1;
|
||||||
|
if (params.dtw == "large.v2") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V2;
|
||||||
|
if (params.dtw == "large.v3") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V3;
|
||||||
|
|
||||||
|
if (cparams.dtw_aheads_preset == WHISPER_AHEADS_NONE) {
|
||||||
|
fprintf(stderr, "error: unknown DTW preset '%s'\n", params.dtw.c_str());
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
if (ctx == nullptr) {
|
if (ctx == nullptr) {
|
||||||
fprintf(stderr, "error: failed to initialize whisper context\n");
|
fprintf(stderr, "error: failed to initialize whisper context\n");
|
||||||
@ -851,6 +1013,29 @@ int main(int argc, char ** argv) {
|
|||||||
// initialize openvino encoder. this has no effect on whisper.cpp builds that don't have OpenVINO configured
|
// initialize openvino encoder. this has no effect on whisper.cpp builds that don't have OpenVINO configured
|
||||||
whisper_ctx_init_openvino_encoder(ctx, nullptr, params.openvino_encode_device.c_str(), nullptr);
|
whisper_ctx_init_openvino_encoder(ctx, nullptr, params.openvino_encode_device.c_str(), nullptr);
|
||||||
|
|
||||||
|
if (!params.grammar.empty()) {
|
||||||
|
auto & grammar = params.grammar_parsed;
|
||||||
|
if (is_file_exist(params.grammar.c_str())) {
|
||||||
|
// read grammar from file
|
||||||
|
std::ifstream ifs(params.grammar.c_str());
|
||||||
|
const std::string txt = std::string((std::istreambuf_iterator<char>(ifs)), std::istreambuf_iterator<char>());
|
||||||
|
grammar = grammar_parser::parse(txt.c_str());
|
||||||
|
} else {
|
||||||
|
// read grammar from string
|
||||||
|
grammar = grammar_parser::parse(params.grammar.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// will be empty (default) if there are parse errors
|
||||||
|
if (grammar.rules.empty()) {
|
||||||
|
fprintf(stderr, "error: failed to parse grammar \"%s\"\n", params.grammar.c_str());
|
||||||
|
return 4;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: grammar:\n", __func__);
|
||||||
|
grammar_parser::print_grammar(stderr, grammar);
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
||||||
const auto fname_inp = params.fname_inp[f];
|
const auto fname_inp = params.fname_inp[f];
|
||||||
const auto fname_out = f < (int) params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
const auto fname_out = f < (int) params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
||||||
@ -863,29 +1048,28 @@ int main(int argc, char ** argv) {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// print system information
|
if (!whisper_is_multilingual(ctx)) {
|
||||||
{
|
if (params.language != "en" || params.translate) {
|
||||||
|
params.language = "en";
|
||||||
|
params.translate = false;
|
||||||
|
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (params.detect_language) {
|
||||||
|
params.language = "auto";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!params.no_prints) {
|
||||||
|
// print system information
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||||
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
|
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
|
||||||
}
|
|
||||||
|
|
||||||
// print some info about the processing
|
// print some info about the processing
|
||||||
{
|
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
if (!whisper_is_multilingual(ctx)) {
|
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, %d beams + best of %d, lang = %s, task = %s, %stimestamps = %d ...\n",
|
||||||
if (params.language != "en" || params.translate) {
|
|
||||||
params.language = "en";
|
|
||||||
params.translate = false;
|
|
||||||
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (params.detect_language) {
|
|
||||||
params.language = "auto";
|
|
||||||
}
|
|
||||||
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, lang = %s, task = %s, %stimestamps = %d ...\n",
|
|
||||||
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
||||||
params.n_threads, params.n_processors,
|
params.n_threads, params.n_processors, params.beam_size, params.best_of,
|
||||||
params.language.c_str(),
|
params.language.c_str(),
|
||||||
params.translate ? "translate" : "transcribe",
|
params.translate ? "translate" : "transcribe",
|
||||||
params.tinydiarize ? "tdrz = 1, " : "",
|
params.tinydiarize ? "tdrz = 1, " : "",
|
||||||
@ -898,7 +1082,8 @@ int main(int argc, char ** argv) {
|
|||||||
{
|
{
|
||||||
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
||||||
|
|
||||||
wparams.strategy = params.beam_size > 1 ? WHISPER_SAMPLING_BEAM_SEARCH : WHISPER_SAMPLING_GREEDY;
|
const bool use_grammar = (!params.grammar_parsed.rules.empty() && !params.grammar_rule.empty());
|
||||||
|
wparams.strategy = (params.beam_size > 1 || use_grammar) ? WHISPER_SAMPLING_BEAM_SEARCH : WHISPER_SAMPLING_GREEDY;
|
||||||
|
|
||||||
wparams.print_realtime = false;
|
wparams.print_realtime = false;
|
||||||
wparams.print_progress = params.print_progress;
|
wparams.print_progress = params.print_progress;
|
||||||
@ -912,27 +1097,47 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.offset_ms = params.offset_t_ms;
|
wparams.offset_ms = params.offset_t_ms;
|
||||||
wparams.duration_ms = params.duration_ms;
|
wparams.duration_ms = params.duration_ms;
|
||||||
|
|
||||||
wparams.token_timestamps = params.output_wts || params.max_len > 0;
|
wparams.token_timestamps = params.output_wts || params.output_jsn_full || params.max_len > 0;
|
||||||
wparams.thold_pt = params.word_thold;
|
wparams.thold_pt = params.word_thold;
|
||||||
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
||||||
wparams.split_on_word = params.split_on_word;
|
wparams.split_on_word = params.split_on_word;
|
||||||
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
|
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
wparams.debug_mode = params.debug_mode;
|
wparams.debug_mode = params.debug_mode;
|
||||||
|
|
||||||
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
||||||
|
|
||||||
|
wparams.suppress_regex = params.suppress_regex.empty() ? nullptr : params.suppress_regex.c_str();
|
||||||
|
|
||||||
wparams.initial_prompt = params.prompt.c_str();
|
wparams.initial_prompt = params.prompt.c_str();
|
||||||
|
|
||||||
wparams.greedy.best_of = params.best_of;
|
wparams.greedy.best_of = params.best_of;
|
||||||
wparams.beam_search.beam_size = params.beam_size;
|
wparams.beam_search.beam_size = params.beam_size;
|
||||||
|
|
||||||
wparams.temperature_inc = params.no_fallback ? 0.0f : wparams.temperature_inc;
|
wparams.temperature_inc = params.no_fallback ? 0.0f : params.temperature_inc;
|
||||||
|
wparams.temperature = params.temperature;
|
||||||
|
|
||||||
wparams.entropy_thold = params.entropy_thold;
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
wparams.logprob_thold = params.logprob_thold;
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
|
|
||||||
|
wparams.no_timestamps = params.no_timestamps;
|
||||||
|
|
||||||
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
||||||
|
|
||||||
|
const auto & grammar_parsed = params.grammar_parsed;
|
||||||
|
auto grammar_rules = grammar_parsed.c_rules();
|
||||||
|
|
||||||
|
if (use_grammar) {
|
||||||
|
if (grammar_parsed.symbol_ids.find(params.grammar_rule) == grammar_parsed.symbol_ids.end()) {
|
||||||
|
fprintf(stderr, "%s: warning: grammar rule '%s' not found - skipping grammar sampling\n", __func__, params.grammar_rule.c_str());
|
||||||
|
} else {
|
||||||
|
wparams.grammar_rules = grammar_rules.data();
|
||||||
|
wparams.n_grammar_rules = grammar_rules.size();
|
||||||
|
wparams.i_start_rule = grammar_parsed.symbol_ids.at(params.grammar_rule);
|
||||||
|
wparams.grammar_penalty = params.grammar_penalty;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// this callback is called on each new segment
|
// this callback is called on each new segment
|
||||||
if (!wparams.print_realtime) {
|
if (!wparams.print_realtime) {
|
||||||
wparams.new_segment_callback = whisper_print_segment_callback;
|
wparams.new_segment_callback = whisper_print_segment_callback;
|
||||||
@ -944,8 +1149,9 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.progress_callback_user_data = &user_data;
|
wparams.progress_callback_user_data = &user_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
// example for abort mechanism
|
// examples for abort mechanism
|
||||||
// in this example, we do not abort the processing, but we could if the flag is set to true
|
// in examples below, we do not abort the processing, but we could if the flag is set to true
|
||||||
|
|
||||||
// the callback is called before every encoder run - if it returns false, the processing is aborted
|
// the callback is called before every encoder run - if it returns false, the processing is aborted
|
||||||
{
|
{
|
||||||
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
||||||
@ -957,6 +1163,17 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.encoder_begin_callback_user_data = &is_aborted;
|
wparams.encoder_begin_callback_user_data = &is_aborted;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// the callback is called before every computation - if it returns true, the computation is aborted
|
||||||
|
{
|
||||||
|
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
||||||
|
|
||||||
|
wparams.abort_callback = [](void * user_data) {
|
||||||
|
bool is_aborted = *(bool*)user_data;
|
||||||
|
return is_aborted;
|
||||||
|
};
|
||||||
|
wparams.abort_callback_user_data = &is_aborted;
|
||||||
|
}
|
||||||
|
|
||||||
if (whisper_full_parallel(ctx, wparams, pcmf32.data(), pcmf32.size(), params.n_processors) != 0) {
|
if (whisper_full_parallel(ctx, wparams, pcmf32.data(), pcmf32.size(), params.n_processors) != 0) {
|
||||||
fprintf(stderr, "%s: failed to process audio\n", argv[0]);
|
fprintf(stderr, "%s: failed to process audio\n", argv[0]);
|
||||||
return 10;
|
return 10;
|
||||||
@ -1000,7 +1217,7 @@ int main(int argc, char ** argv) {
|
|||||||
// output to JSON file
|
// output to JSON file
|
||||||
if (params.output_jsn) {
|
if (params.output_jsn) {
|
||||||
const auto fname_jsn = fname_out + ".json";
|
const auto fname_jsn = fname_out + ".json";
|
||||||
output_json(ctx, fname_jsn.c_str(), params, pcmf32s);
|
output_json(ctx, fname_jsn.c_str(), params, pcmf32s, params.output_jsn_full);
|
||||||
}
|
}
|
||||||
|
|
||||||
// output to LRC file
|
// output to LRC file
|
||||||
@ -1017,7 +1234,9 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
whisper_print_timings(ctx);
|
if (!params.no_prints) {
|
||||||
|
whisper_print_timings(ctx);
|
||||||
|
}
|
||||||
whisper_free(ctx);
|
whisper_free(ctx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
7
examples/python/test_whisper_processor.py
Normal file
7
examples/python/test_whisper_processor.py
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
import whisper_processor
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = whisper_processor.process_audio("./audio/wake_word_detected16k.wav", "base.en")
|
||||||
|
print(result)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
54
examples/python/whisper_processor.py
Normal file
54
examples/python/whisper_processor.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
def process_audio(wav_file, model_name="base.en"):
|
||||||
|
"""
|
||||||
|
Processes an audio file using a specified model and returns the processed string.
|
||||||
|
|
||||||
|
:param wav_file: Path to the WAV file
|
||||||
|
:param model_name: Name of the model to use
|
||||||
|
:return: Processed string output from the audio processing
|
||||||
|
:raises: Exception if an error occurs during processing
|
||||||
|
"""
|
||||||
|
|
||||||
|
model = f"./models/ggml-{model_name}.bin"
|
||||||
|
|
||||||
|
# Check if the file exists
|
||||||
|
if not os.path.exists(model):
|
||||||
|
raise FileNotFoundError(f"Model file not found: {model} \n\nDownload a model with this command:\n\n> bash ./models/download-ggml-model.sh {model_name}\n\n")
|
||||||
|
|
||||||
|
if not os.path.exists(wav_file):
|
||||||
|
raise FileNotFoundError(f"WAV file not found: {wav_file}")
|
||||||
|
|
||||||
|
full_command = f"./main -m {model} -f {wav_file} -np -nt"
|
||||||
|
|
||||||
|
# Execute the command
|
||||||
|
process = subprocess.Popen(full_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
# Get the output and error (if any)
|
||||||
|
output, error = process.communicate()
|
||||||
|
|
||||||
|
if error:
|
||||||
|
raise Exception(f"Error processing audio: {error.decode('utf-8')}")
|
||||||
|
|
||||||
|
# Process and return the output string
|
||||||
|
decoded_str = output.decode('utf-8').strip()
|
||||||
|
processed_str = decoded_str.replace('[BLANK_AUDIO]', '').strip()
|
||||||
|
|
||||||
|
return processed_str
|
||||||
|
|
||||||
|
def main():
|
||||||
|
if len(sys.argv) >= 2:
|
||||||
|
wav_file = sys.argv[1]
|
||||||
|
model_name = sys.argv[2] if len(sys.argv) == 3 else "base.en"
|
||||||
|
try:
|
||||||
|
result = process_audio(wav_file, model_name)
|
||||||
|
print(result)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
else:
|
||||||
|
print("Usage: python whisper_processor.py <wav_file> [<model_name>]")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
10
examples/server/CMakeLists.txt
Normal file
10
examples/server/CMakeLists.txt
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
set(TARGET server)
|
||||||
|
add_executable(${TARGET} server.cpp httplib.h)
|
||||||
|
|
||||||
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common json_cpp whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
if (WIN32)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE ws2_32)
|
||||||
|
endif()
|
69
examples/server/README.md
Normal file
69
examples/server/README.md
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# whisper.cpp http server
|
||||||
|
|
||||||
|
Simple http server. WAV Files are passed to the inference model via http requests.
|
||||||
|
|
||||||
|
https://github.com/ggerganov/whisper.cpp/assets/1991296/e983ee53-8741-4eb5-9048-afe5e4594b8f
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```
|
||||||
|
./server -h
|
||||||
|
|
||||||
|
usage: ./bin/server [options]
|
||||||
|
|
||||||
|
options:
|
||||||
|
-h, --help [default] show this help message and exit
|
||||||
|
-t N, --threads N [4 ] number of threads to use during computation
|
||||||
|
-p N, --processors N [1 ] number of processors to use during computation
|
||||||
|
-ot N, --offset-t N [0 ] time offset in milliseconds
|
||||||
|
-on N, --offset-n N [0 ] segment index offset
|
||||||
|
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
||||||
|
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
||||||
|
-ml N, --max-len N [0 ] maximum segment length in characters
|
||||||
|
-sow, --split-on-word [false ] split on word rather than on token
|
||||||
|
-bo N, --best-of N [2 ] number of best candidates to keep
|
||||||
|
-bs N, --beam-size N [-1 ] beam size for beam search
|
||||||
|
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
||||||
|
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
||||||
|
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
||||||
|
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
|
||||||
|
-tr, --translate [false ] translate from source language to english
|
||||||
|
-di, --diarize [false ] stereo audio diarization
|
||||||
|
-tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model)
|
||||||
|
-nf, --no-fallback [false ] do not use temperature fallback while decoding
|
||||||
|
-ps, --print-special [false ] print special tokens
|
||||||
|
-pc, --print-colors [false ] print colors
|
||||||
|
-pr, --print-realtime [false ] print output in realtime
|
||||||
|
-pp, --print-progress [false ] print progress
|
||||||
|
-nt, --no-timestamps [false ] do not print timestamps
|
||||||
|
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
||||||
|
-dl, --detect-language [false ] exit after automatically detecting language
|
||||||
|
--prompt PROMPT [ ] initial prompt
|
||||||
|
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
||||||
|
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
|
||||||
|
--host HOST, [127.0.0.1] Hostname/ip-adress for the server
|
||||||
|
--port PORT, [8080 ] Port number for the server
|
||||||
|
--convert, [false ] Convert audio to WAV, requires ffmpeg on the server
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> **Do not run the server example with administrative privileges and ensure it's operated in a sandbox environment, especially since it involves risky operations like accepting user file uploads and using ffmpeg for format conversions. Always validate and sanitize inputs to guard against potential security threats.**
|
||||||
|
|
||||||
|
## request examples
|
||||||
|
|
||||||
|
**/inference**
|
||||||
|
```
|
||||||
|
curl 127.0.0.1:8080/inference \
|
||||||
|
-H "Content-Type: multipart/form-data" \
|
||||||
|
-F file="@<file-path>" \
|
||||||
|
-F temperature="0.0" \
|
||||||
|
-F temperature_inc="0.2" \
|
||||||
|
-F response_format="json"
|
||||||
|
```
|
||||||
|
|
||||||
|
**/load**
|
||||||
|
```
|
||||||
|
curl 127.0.0.1:8080/load \
|
||||||
|
-H "Content-Type: multipart/form-data" \
|
||||||
|
-F model="<path-to-model-file>"
|
||||||
|
```
|
9262
examples/server/httplib.h
Normal file
9262
examples/server/httplib.h
Normal file
File diff suppressed because it is too large
Load Diff
1037
examples/server/server.cpp
Normal file
1037
examples/server/server.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -103,11 +103,11 @@ void stream_main(size_t index) {
|
|||||||
|
|
||||||
{
|
{
|
||||||
const int n_segments = whisper_full_n_segments(ctx);
|
const int n_segments = whisper_full_n_segments(ctx);
|
||||||
for (int i = n_segments - 1; i < n_segments; ++i) {
|
if (n_segments > 0) {
|
||||||
const char * text = whisper_full_get_segment_text(ctx, i);
|
const char * text = whisper_full_get_segment_text(ctx, n_segments - 1);
|
||||||
|
|
||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, n_segments - 1);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, n_segments - 1);
|
||||||
|
|
||||||
printf("transcribed: %s\n", text);
|
printf("transcribed: %s\n", text);
|
||||||
|
|
||||||
@ -132,7 +132,7 @@ EMSCRIPTEN_BINDINGS(stream) {
|
|||||||
emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
|
emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
|
||||||
for (size_t i = 0; i < g_contexts.size(); ++i) {
|
for (size_t i = 0; i < g_contexts.size(); ++i) {
|
||||||
if (g_contexts[i] == nullptr) {
|
if (g_contexts[i] == nullptr) {
|
||||||
g_contexts[i] = whisper_init_from_file(path_model.c_str());
|
g_contexts[i] = whisper_init_from_file_with_params(path_model.c_str(), whisper_context_default_params());
|
||||||
if (g_contexts[i] != nullptr) {
|
if (g_contexts[i] != nullptr) {
|
||||||
g_running = true;
|
g_running = true;
|
||||||
if (g_worker.joinable()) {
|
if (g_worker.joinable()) {
|
||||||
|
@ -4,7 +4,7 @@ This is a naive example of performing real-time inference on audio from your mic
|
|||||||
The `stream` tool samples the audio every half a second and runs the transcription continously.
|
The `stream` tool samples the audio every half a second and runs the transcription continously.
|
||||||
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a
|
|||||||
|
|
||||||
Setting the `--step` argument to `0` enables the sliding window mode:
|
Setting the `--step` argument to `0` enables the sliding window mode:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./stream -m ./models/ggml-small.en.bin -t 6 --step 0 --length 30000 -vth 0.6
|
./stream -m ./models/ggml-small.en.bin -t 6 --step 0 --length 30000 -vth 0.6
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -30,15 +30,33 @@ a transcription block that is suitable for parsing.
|
|||||||
The `stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
The `stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install SDL2 on Linux
|
# Install SDL2
|
||||||
|
# On Debian based linux distributions:
|
||||||
sudo apt-get install libsdl2-dev
|
sudo apt-get install libsdl2-dev
|
||||||
|
|
||||||
|
# On Fedora Linux:
|
||||||
|
sudo dnf install SDL2 SDL2-devel
|
||||||
|
|
||||||
# Install SDL2 on Mac OS
|
# Install SDL2 on Mac OS
|
||||||
brew install sdl2
|
brew install sdl2
|
||||||
|
|
||||||
make stream
|
make stream
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Ensure you are at the root of the repo when running `make stream`. Not within the `examples/stream` dir
|
||||||
|
as the libraries needed like `common-sdl.h` are located within `examples`. Attempting to compile within
|
||||||
|
`examples/steam` means your compiler cannot find them and it gives an error it cannot find the file.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
whisper.cpp/examples/stream$ make stream
|
||||||
|
g++ stream.cpp -o stream
|
||||||
|
stream.cpp:6:10: fatal error: common/sdl.h: No such file or directory
|
||||||
|
6 | #include "common/sdl.h"
|
||||||
|
| ^~~~~~~~~~~~~~
|
||||||
|
compilation terminated.
|
||||||
|
make: *** [<builtin>: stream] Error 1
|
||||||
|
```
|
||||||
|
|
||||||
## Web version
|
## Web version
|
||||||
|
|
||||||
This tool can also run in the browser: [examples/stream.wasm](/examples/stream.wasm)
|
This tool can also run in the browser: [examples/stream.wasm](/examples/stream.wasm)
|
||||||
|
@ -2,9 +2,8 @@
|
|||||||
//
|
//
|
||||||
// A very quick-n-dirty implementation serving mainly as a proof of concept.
|
// A very quick-n-dirty implementation serving mainly as a proof of concept.
|
||||||
//
|
//
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "common-sdl.h"
|
#include "common-sdl.h"
|
||||||
|
#include "common.h"
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
@ -14,19 +13,6 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
|
||||||
// 500 -> 00:05.000
|
|
||||||
// 6000 -> 01:00.000
|
|
||||||
std::string to_timestamp(int64_t t) {
|
|
||||||
int64_t sec = t/100;
|
|
||||||
int64_t msec = t - sec*100;
|
|
||||||
int64_t min = sec/60;
|
|
||||||
sec = sec - min*60;
|
|
||||||
|
|
||||||
char buf[32];
|
|
||||||
snprintf(buf, sizeof(buf), "%02d:%02d.%03d", (int) min, (int) sec, (int) msec);
|
|
||||||
|
|
||||||
return std::string(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
@ -41,13 +27,15 @@ struct whisper_params {
|
|||||||
float vad_thold = 0.6f;
|
float vad_thold = 0.6f;
|
||||||
float freq_thold = 100.0f;
|
float freq_thold = 100.0f;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool no_fallback = false;
|
bool no_fallback = false;
|
||||||
bool print_special = false;
|
bool print_special = false;
|
||||||
bool no_context = true;
|
bool no_context = true;
|
||||||
bool no_timestamps = false;
|
bool no_timestamps = false;
|
||||||
bool tinydiarize = false;
|
bool tinydiarize = false;
|
||||||
|
bool save_audio = false; // save audio to wav file
|
||||||
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
@ -64,24 +52,26 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
|
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
|
||||||
else if ( arg == "--step") { params.step_ms = std::stoi(argv[++i]); }
|
else if ( arg == "--step") { params.step_ms = std::stoi(argv[++i]); }
|
||||||
else if ( arg == "--length") { params.length_ms = std::stoi(argv[++i]); }
|
else if ( arg == "--length") { params.length_ms = std::stoi(argv[++i]); }
|
||||||
else if ( arg == "--keep") { params.keep_ms = std::stoi(argv[++i]); }
|
else if ( arg == "--keep") { params.keep_ms = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-c" || arg == "--capture") { params.capture_id = std::stoi(argv[++i]); }
|
else if (arg == "-c" || arg == "--capture") { params.capture_id = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-mt" || arg == "--max-tokens") { params.max_tokens = std::stoi(argv[++i]); }
|
else if (arg == "-mt" || arg == "--max-tokens") { params.max_tokens = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-nf" || arg == "--no-fallback") { params.no_fallback = true; }
|
||||||
else if (arg == "-nf" || arg == "--no-fallback") { params.no_fallback = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-kc" || arg == "--keep-context") { params.no_context = false; }
|
||||||
else if (arg == "-kc" || arg == "--keep-context") { params.no_context = false; }
|
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
||||||
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-f" || arg == "--file") { params.fname_out = argv[++i]; }
|
||||||
else if (arg == "-f" || arg == "--file") { params.fname_out = argv[++i]; }
|
else if (arg == "-tdrz" || arg == "--tinydiarize") { params.tinydiarize = true; }
|
||||||
else if (arg == "-tdrz" || arg == "--tinydiarize") { params.tinydiarize = true; }
|
else if (arg == "-sa" || arg == "--save-audio") { params.save_audio = true; }
|
||||||
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
|
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
@ -108,7 +98,6 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
||||||
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
||||||
fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -nf, --no-fallback [%-7s] do not use temperature fallback while decoding\n", params.no_fallback ? "true" : "false");
|
fprintf(stderr, " -nf, --no-fallback [%-7s] do not use temperature fallback while decoding\n", params.no_fallback ? "true" : "false");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
@ -116,7 +105,10 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] text output file name\n", params.fname_out.c_str());
|
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] text output file name\n", params.fname_out.c_str());
|
||||||
fprintf(stderr, " -tdrz, --tinydiarize [%-7s] enable tinydiarize (requires a tdrz model)\n", params.tinydiarize ? "true" : "false");
|
fprintf(stderr, " -tdrz, --tinydiarize [%-7s] enable tinydiarize (requires a tdrz model)\n", params.tinydiarize ? "true" : "false");
|
||||||
|
fprintf(stderr, " -sa, --save-audio [%-7s] save the recorded audio to a file\n", params.save_audio ? "true" : "false");
|
||||||
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU inference\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention during inference\n", params.flash_attn ? "true" : "false");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,14 +146,18 @@ int main(int argc, char ** argv) {
|
|||||||
audio.resume();
|
audio.resume();
|
||||||
|
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
if (params.language != "auto" && whisper_lang_id(params.language.c_str()) == -1){
|
if (params.language != "auto" && whisper_lang_id(params.language.c_str()) == -1){
|
||||||
fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str());
|
fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str());
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file(params.model.c_str());
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
std::vector<float> pcmf32 (n_samples_30s, 0.0f);
|
std::vector<float> pcmf32 (n_samples_30s, 0.0f);
|
||||||
std::vector<float> pcmf32_old;
|
std::vector<float> pcmf32_old;
|
||||||
@ -212,14 +208,28 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("[Start speaking]");
|
wav_writer wavWriter;
|
||||||
|
// save wav file
|
||||||
|
if (params.save_audio) {
|
||||||
|
// Get current date/time for filename
|
||||||
|
time_t now = time(0);
|
||||||
|
char buffer[80];
|
||||||
|
strftime(buffer, sizeof(buffer), "%Y%m%d%H%M%S", localtime(&now));
|
||||||
|
std::string filename = std::string(buffer) + ".wav";
|
||||||
|
|
||||||
|
wavWriter.open(filename, WHISPER_SAMPLE_RATE, 16, 1);
|
||||||
|
}
|
||||||
|
printf("[Start speaking]\n");
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
auto t_last = std::chrono::high_resolution_clock::now();
|
auto t_last = std::chrono::high_resolution_clock::now();
|
||||||
const auto t_start = t_last;
|
const auto t_start = t_last;
|
||||||
|
|
||||||
// main audio loop
|
// main audio loop
|
||||||
while (is_running) {
|
while (is_running) {
|
||||||
|
if (params.save_audio) {
|
||||||
|
wavWriter.write(pcmf32_new.data(), pcmf32_new.size());
|
||||||
|
}
|
||||||
// handle Ctrl + C
|
// handle Ctrl + C
|
||||||
is_running = sdl_poll_events();
|
is_running = sdl_poll_events();
|
||||||
|
|
||||||
@ -301,7 +311,6 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
|
||||||
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
||||||
|
|
||||||
@ -350,7 +359,7 @@ int main(int argc, char ** argv) {
|
|||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
|
|
||||||
std::string output = "[" + to_timestamp(t0) + " --> " + to_timestamp(t1) + "] " + text;
|
std::string output = "[" + to_timestamp(t0, false) + " --> " + to_timestamp(t1, false) + "] " + text;
|
||||||
|
|
||||||
if (whisper_full_get_segment_speaker_turn_next(ctx, i)) {
|
if (whisper_full_get_segment_speaker_turn_next(ctx, i)) {
|
||||||
output += " [SPEAKER_TURN]";
|
output += " [SPEAKER_TURN]";
|
||||||
@ -371,7 +380,7 @@ int main(int argc, char ** argv) {
|
|||||||
fout << std::endl;
|
fout << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_vad){
|
if (use_vad) {
|
||||||
printf("\n");
|
printf("\n");
|
||||||
printf("### Transcription %d END\n", n_iter);
|
printf("### Transcription %d END\n", n_iter);
|
||||||
}
|
}
|
||||||
|
9
examples/sycl/CMakeLists.txt
Normal file
9
examples/sycl/CMakeLists.txt
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# MIT license
|
||||||
|
# Copyright (C) 2024 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
set(TARGET ls-sycl-device)
|
||||||
|
add_executable(${TARGET} ls-sycl-device.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|
47
examples/sycl/README.md
Normal file
47
examples/sycl/README.md
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
# llama.cpp/example/sycl
|
||||||
|
|
||||||
|
This example program provide the tools for llama.cpp for SYCL on Intel GPU.
|
||||||
|
|
||||||
|
## Tool
|
||||||
|
|
||||||
|
|Tool Name| Function|Status|
|
||||||
|
|-|-|-|
|
||||||
|
|ls-sycl-device| List all SYCL devices with ID, compute capability, max work group size, ect.|Support|
|
||||||
|
|
||||||
|
### ls-sycl-device
|
||||||
|
|
||||||
|
List all SYCL devices with ID, compute capability, max work group size, ect.
|
||||||
|
|
||||||
|
1. Build the llama.cpp for SYCL for all targets.
|
||||||
|
|
||||||
|
2. Enable oneAPI running environment
|
||||||
|
|
||||||
|
```
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Execute
|
||||||
|
|
||||||
|
```
|
||||||
|
./build/bin/ls-sycl-device
|
||||||
|
```
|
||||||
|
|
||||||
|
Check the ID in startup log, like:
|
||||||
|
|
||||||
|
```
|
||||||
|
found 4 SYCL devices:
|
||||||
|
Device 0: Intel(R) Arc(TM) A770 Graphics, compute capability 1.3,
|
||||||
|
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
||||||
|
Device 1: Intel(R) FPGA Emulation Device, compute capability 1.2,
|
||||||
|
max compute_units 24, max work group size 67108864, max sub group size 64, global mem size 67065057280
|
||||||
|
Device 2: 13th Gen Intel(R) Core(TM) i7-13700K, compute capability 3.0,
|
||||||
|
max compute_units 24, max work group size 8192, max sub group size 64, global mem size 67065057280
|
||||||
|
Device 3: Intel(R) Arc(TM) A770 Graphics, compute capability 3.0,
|
||||||
|
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|Attribute|Note|
|
||||||
|
|-|-|
|
||||||
|
|compute capability 1.3|Level-zero running time, recommended |
|
||||||
|
|compute capability 3.0|OpenCL running time, slower than level-zero in most cases|
|
19
examples/sycl/build.sh
Normal file
19
examples/sycl/build.sh
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# MIT license
|
||||||
|
# Copyright (C) 2024 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
mkdir -p build
|
||||||
|
cd build
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
#for FP16
|
||||||
|
#cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DWHISPER_SYCL_F16=ON # faster for long-prompt inference
|
||||||
|
|
||||||
|
#for FP32
|
||||||
|
cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||||
|
|
||||||
|
#build example/main only
|
||||||
|
#cmake --build . --config Release --target main
|
||||||
|
|
||||||
|
#build all binary
|
||||||
|
cmake --build . --config Release -v
|
11
examples/sycl/ls-sycl-device.cpp
Normal file
11
examples/sycl/ls-sycl-device.cpp
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
/*MIT license
|
||||||
|
Copyright (C) 2024 Intel Corporation
|
||||||
|
SPDX-License-Identifier: MIT
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "ggml-sycl.h"
|
||||||
|
|
||||||
|
int main(int argc, char ** argv) {
|
||||||
|
ggml_backend_sycl_print_sycl_devices();
|
||||||
|
return 0;
|
||||||
|
}
|
17
examples/sycl/run-whisper.sh
Normal file
17
examples/sycl/run-whisper.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# MIT license
|
||||||
|
# Copyright (C) 2024 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
INPUT2="Building a website can be done in 10 simple steps:\nStep 1:"
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
if [ $# -gt 0 ]; then
|
||||||
|
export GGML_SYCL_DEVICE=$1
|
||||||
|
else
|
||||||
|
export GGML_SYCL_DEVICE=0
|
||||||
|
fi
|
||||||
|
echo GGML_SYCL_DEVICE=$GGML_SYCL_DEVICE
|
||||||
|
#export GGML_SYCL_DEBUG=1
|
||||||
|
./build/bin/main -m models/ggml-base.en.bin -f samples/jfk.wav
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user