mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-06-26 18:03:21 +00:00
Compare commits
1036 Commits
Author | SHA1 | Date | |
---|---|---|---|
b67bdc9430 | |||
5e966f7844 | |||
54005478af | |||
49c389b40a | |||
37c88027e1 | |||
9db070a3c5 | |||
7fd8d9c220 | |||
06e059b8f8 | |||
c9f49d5f9d | |||
f4c1d7df39 | |||
339b8e559c | |||
5f6d6919b4 | |||
8ee767732f | |||
45f1f9144f | |||
53589c8f12 | |||
7ac2f17fac | |||
48862c7b27 | |||
44f7d9f4e3 | |||
fd12302587 | |||
f80bef4630 | |||
161b443514 | |||
ef7fbe1c66 | |||
0879d3599e | |||
2a444dc5bd | |||
45cf1634dc | |||
dcb2922d1d | |||
3c5c751174 | |||
24ad19d0e9 | |||
bd574b05af | |||
7e0eafcb1e | |||
75670ae673 | |||
d4fcdf602b | |||
1bebb1a116 | |||
ee437cde59 | |||
c1506d38cf | |||
c9541741e6 | |||
6a55015dc4 | |||
7e86030d4d | |||
401fbea326 | |||
44d1cbdfe9 | |||
3216efef2e | |||
2c0484ebf7 | |||
3298916e5e | |||
746bf2596f | |||
5f7e094ccb | |||
6266a9f9e5 | |||
d24f981fb2 | |||
01d3bd7d5c | |||
bb12cd9b77 | |||
f02b40bcb4 | |||
83ac2842bd | |||
c4e95fb74d | |||
e23721f3fb | |||
c0a9f8ef85 | |||
6477b84eb6 | |||
24d706774d | |||
5089ab2d6a | |||
bdbb906817 | |||
fa2ebd336e | |||
21b01a21b6 | |||
b54ce5edc5 | |||
26a31b78e9 | |||
14d13c5f9f | |||
5e110c2eb5 | |||
4a9926d521 | |||
ae3c5642d0 | |||
e287a3b627 | |||
b890243690 | |||
b7b38f7d68 | |||
9f67aab211 | |||
8f0f785d88 | |||
d0b8335789 | |||
1550be79f1 | |||
807f848c2f | |||
42398f13b0 | |||
31c3482a4e | |||
50257af686 | |||
d111a0987e | |||
915bcd2c63 | |||
f69c8b6f1b | |||
8c9044bef0 | |||
5f8e928194 | |||
25da30bd60 | |||
542734100e | |||
b06b4c0c08 | |||
939d36fb4c | |||
1471e41180 | |||
35949192e9 | |||
9c817edb48 | |||
24a0feb5d9 | |||
2ab8cce7e3 | |||
b40c255e98 | |||
ec3e16445e | |||
0665168ef3 | |||
5f6b992eea | |||
3e231ab9cc | |||
371bfaca8c | |||
91e30a3a23 | |||
1e122d66f9 | |||
63a4e09a0f | |||
75dd198870 | |||
1d48457aa6 | |||
307712a903 | |||
fbc9a05ddf | |||
28496ac55e | |||
b1c06c09b0 | |||
498ac0dc27 | |||
03af461de8 | |||
f19463ece2 | |||
5f8a086e22 | |||
a28d82e373 | |||
5ccca19f0c | |||
300c07b94d | |||
31aea563a8 | |||
0377596b77 | |||
c65d0fd3c8 | |||
d9efb664ac | |||
b5b4b0f5de | |||
ab36d02560 | |||
6e67749c00 | |||
ab0385f43b | |||
10eb603a3c | |||
a3231b2f2e | |||
13db492f83 | |||
741c138aa1 | |||
25f9fee6fb | |||
7c1570bee6 | |||
4078e4c388 | |||
a4a22daa8f | |||
e1936eb2a5 | |||
28b044dad9 | |||
b8f11a0a17 | |||
ff5a838099 | |||
84713613be | |||
ded89c9d08 | |||
042e95d92f | |||
81110c0174 | |||
c313723860 | |||
e69b2371e2 | |||
1531259b2c | |||
44bc2767fd | |||
bd7ace7adc | |||
315364d7de | |||
80753d4da8 | |||
8f9bdca4c4 | |||
4e10afb5a9 | |||
aa037a60f3 | |||
19dca2bb14 | |||
55e422109b | |||
3f020fac9d | |||
1626b73b03 | |||
850f7b19d3 | |||
d4bc413505 | |||
fc49ee4479 | |||
c0ea41f6b2 | |||
0fbaac9c89 | |||
a5abfe6a90 | |||
d3f7137cc9 | |||
f7c99e49b3 | |||
1d5752fa42 | |||
b6049060dd | |||
06a1da9daf | |||
746d173592 | |||
fdbfb460ed | |||
ebca09a3d1 | |||
9f346d0084 | |||
6a94163b91 | |||
8a35b58c4f | |||
1789abca84 | |||
847f94fdeb | |||
6e40108a59 | |||
1ba185f4af | |||
396089f3cf | |||
941912467d | |||
0b1b094a67 | |||
40e52a76b9 | |||
cf977670e6 | |||
df2c364de7 | |||
1acfadb721 | |||
ea642144d2 | |||
282a8654c4 | |||
936cf3beb7 | |||
bc92c2f8f0 | |||
f7d55e0614 | |||
f62a546e03 | |||
2944cb72d9 | |||
ccc2547210 | |||
162a455402 | |||
ff2cb0811f | |||
5e9d6baa48 | |||
845f8d663e | |||
31fdf05fda | |||
0ac6666cd2 | |||
6c91da80b8 | |||
c245168ba3 | |||
280fee8fa0 | |||
78b4c1c25f | |||
1edea2eb4b | |||
96808786b7 | |||
bb57ecb85e | |||
abdb73c7cc | |||
391e548a43 | |||
2a29afd4c6 | |||
5963004ff9 | |||
ede1718f6d | |||
2ef717b293 | |||
8feb375fbd | |||
69339af2d1 | |||
0d2e2aed80 | |||
451e9ee92c | |||
1133ac98a8 | |||
76d27eec9a | |||
fe18c29ab8 | |||
234f9bd320 | |||
3b183cfae7 | |||
02285dff81 | |||
2fc1d20f9e | |||
08e8414f27 | |||
05c6139625 | |||
896c41ef30 | |||
c36ddc43c6 | |||
13f41af43e | |||
3fc5306b82 | |||
adf2474b10 | |||
008816a257 | |||
33e5a6612e | |||
f0a7d65b3d | |||
54e5095765 | |||
34291099fb | |||
d245d7aec7 | |||
d661283e68 | |||
c0761c95f5 | |||
138e20b697 | |||
a8d9abfa22 | |||
195afd6dc1 | |||
1fd78999e8 | |||
374e9e0c5e | |||
a2cb5b4183 | |||
288ae5176e | |||
d868122a5a | |||
2ba25fb122 | |||
4f4687cb74 | |||
66b00fad0d | |||
c6cc8d16c3 | |||
3f8f8a78a2 | |||
3e47686919 | |||
a53b69a003 | |||
d1c9b47360 | |||
32f659861a | |||
a785232bf9 | |||
0677293503 | |||
1fbdb813c0 | |||
67725ac8f3 | |||
dac89af357 | |||
26225f1fb0 | |||
3468983315 | |||
c7515b0995 | |||
253ce30004 | |||
03a6fae484 | |||
d37fd275fd | |||
195877fd72 | |||
9e715e1b96 | |||
6f5514b6e2 | |||
709a22b92d | |||
01e214a1d7 | |||
1cecfe6a02 | |||
3764bc974c | |||
fcffc912a9 | |||
38d40b9972 | |||
09149ee0ae | |||
6b7f37dd5c | |||
791812fb54 | |||
5d6dc19f04 | |||
34972dbe22 | |||
bea43e0c64 | |||
3853d83d73 | |||
5b1ce40fa8 | |||
049b3a0e53 | |||
a551933542 | |||
5caa19240d | |||
5236f02784 | |||
2abaf19e0d | |||
6eb7a0ffbd | |||
e8f0f9b5f0 | |||
d8e24b877d | |||
cc68f31577 | |||
4a4a52bf98 | |||
c96906d84d | |||
9600fc3eb1 | |||
e2e55a6fed | |||
c4e1861d2c | |||
da9809f243 | |||
9d754a56cf | |||
8cc90a0e80 | |||
82b5c56f63 | |||
b2ad484c89 | |||
d96a17848f | |||
0e7798677a | |||
58a36d2e3b | |||
24d8534bd8 | |||
9b16ddd3a5 | |||
32f88af17b | |||
9bf7250bf9 | |||
17e49d3ab2 | |||
58b725282a | |||
7e59afa1e0 | |||
5ac022140e | |||
0eaa67280c | |||
5a62fdb735 | |||
60098d6204 | |||
317293e6a7 | |||
488a966c07 | |||
8954769aa2 | |||
df06468d9e | |||
1fbd828a5d | |||
d2986f8b07 | |||
8bfa8574e2 | |||
376567bf4f | |||
c0fd64a9c0 | |||
6e9596f6de | |||
9e3c5345cd | |||
b6c05ce82f | |||
52c80cac00 | |||
3643120690 | |||
d65786ea54 | |||
7f78675008 | |||
22fcd5fd11 | |||
993f0df419 | |||
9b1788483c | |||
ad37d26983 | |||
81c999fe0a | |||
4b7de08bfd | |||
4b9c4de1ad | |||
be88ee1d75 | |||
3ab19c744e | |||
6eac06759b | |||
2e9a5bd2c4 | |||
58323bf8ed | |||
22058f2dbc | |||
5b7979a1e6 | |||
ee14c02365 | |||
ab39dd34e1 | |||
b1348d3530 | |||
90641b5cf4 | |||
4160b930f1 | |||
7a96e661e4 | |||
a902fb4ab2 | |||
6cb38c3673 | |||
9cf14ebcbc | |||
8e39ee171f | |||
d26250f78c | |||
5218ea21b8 | |||
e60be821ce | |||
19708df884 | |||
3f190addda | |||
b355ee7cfa | |||
49ac8872b4 | |||
8ef98ae7e3 | |||
e471adcfa5 | |||
aa816c922c | |||
b3264eb266 | |||
eb2eb87a58 | |||
83fcb0e486 | |||
f7bb412878 | |||
ef6dcf0d0c | |||
c7ea4fd235 | |||
525f190917 | |||
dd916a2852 | |||
0620fe00ec | |||
31d0a9a14f | |||
c06970dd72 | |||
7598acf525 | |||
43ddfce969 | |||
a7e6d2cd9c | |||
86506b0c5c | |||
11182fae34 | |||
0bc8bffe1d | |||
8c4f30497a | |||
b1ee3a8444 | |||
be9a16fd3f | |||
f4d9a95b0f | |||
a8ab3abe09 | |||
fb6a835938 | |||
8923bb4292 | |||
fcba6aa352 | |||
8807fe608b | |||
3e94c7a81d | |||
77af3254e1 | |||
d4b3cffec4 | |||
b852a4c5ca | |||
2157abaab4 | |||
68d609a12c | |||
5a8ae474f0 | |||
84493d7f3e | |||
15d71189e9 | |||
37e962580f | |||
db0ea7a2f2 | |||
5498b0e6c0 | |||
2af4a52c39 | |||
eee2fe882e | |||
0d1a11e5e2 | |||
b2ead7d6f4 | |||
8da6fd4dff | |||
ab8ec9e940 | |||
701265bf38 | |||
fe36c90971 | |||
6739eb83c3 | |||
f68298ce06 | |||
7ae885c1ef | |||
d207c68822 | |||
16d72504fe | |||
1c31f9d4a8 | |||
8ecb2f1f68 | |||
5226c3d45c | |||
dbf9c15e30 | |||
d3f6c34976 | |||
425e2910a3 | |||
49868aa851 | |||
ff08e30ab5 | |||
95f2a191c0 | |||
00422ec3cf | |||
c5b05321e9 | |||
5dc636a65a | |||
73703a144f | |||
e89fdceec2 | |||
29a2739d27 | |||
ee6d17f6b4 | |||
95e90823d9 | |||
005cc45df3 | |||
c2c60dc9ba | |||
4af3194b7c | |||
4a2ba1a065 | |||
f096cc6807 | |||
e4bc83ab47 | |||
db7e0dbe6e | |||
bf88c94da9 | |||
3eea171cab | |||
64a56ebf13 | |||
bec9836849 | |||
c118733a29 | |||
bb3dd45524 | |||
04e7fa6f4f | |||
9f7f36d4c9 | |||
4a62efbb95 | |||
0a55a70b9b | |||
dc8cc2dd6f | |||
3efedb9511 | |||
e30c679928 | |||
bf4cb4abad | |||
e293f17d34 | |||
5d950c4b8d | |||
820446e230 | |||
54d5823ebe | |||
5181494e9f | |||
4a6e6e8b30 | |||
de29b193f6 | |||
922971041b | |||
63a767a134 | |||
30841fa786 | |||
3b1ac03828 | |||
990de617b5 | |||
6975600b4b | |||
061eeb9f61 | |||
4942b1b428 | |||
3c7cc5c437 | |||
5cd42ee2cc | |||
ee718f3da6 | |||
63eac1f608 | |||
b17ba2815b | |||
7a489af2f3 | |||
4a4ea13d6d | |||
174a461fc6 | |||
d8b7a24bc9 | |||
acf3832c9c | |||
d29ac44303 | |||
12638dfef0 | |||
f100b3b523 | |||
a99e213a82 | |||
7483d2b61c | |||
1fe5948227 | |||
760497e1ab | |||
b172e7714c | |||
dc01aadb18 | |||
e08c62149b | |||
abab4500fa | |||
e666315fa8 | |||
3f869af14c | |||
cbacb7634c | |||
6cc3b022ee | |||
e5e38d4920 | |||
2a6bab5655 | |||
8c01c9b85c | |||
d1123d795e | |||
9b3d784020 | |||
a16137d13d | |||
5582039d0a | |||
9a16c643e2 | |||
10a8a23100 | |||
29cfeef77f | |||
e66e9ea25b | |||
276779a849 | |||
1f35ce61c1 | |||
4b19cc3ed4 | |||
a535d348dd | |||
8f5dc729d9 | |||
02fc147a0b | |||
109148ac84 | |||
3563473d2c | |||
046834198d | |||
0a2ad9de06 | |||
39b0640b09 | |||
8dca71de64 | |||
812787cbc5 | |||
68ef10805e | |||
96fdb90f5f | |||
e98f9ac554 | |||
02d481595b | |||
7091c7ab5a | |||
d70ccb75f5 | |||
5ee048eb67 | |||
37ed71c964 | |||
8cd7a3df37 | |||
04a3279320 | |||
45ddda8e0c | |||
c41317fd66 | |||
96b8419b27 | |||
3c63f4cf35 | |||
5848dfd9c8 | |||
29ab5d0326 | |||
c4d6958b3e | |||
c9dcb75118 | |||
bbdbc3fc62 | |||
28c207a541 | |||
c23f830983 | |||
caeeb32b41 | |||
584cc1177a | |||
cc1ae10989 | |||
eb26f55b40 | |||
eb2b086584 | |||
67919cfe11 | |||
bf5fc81a8a | |||
2b07dc3186 | |||
951c463d39 | |||
7f257b210f | |||
705fe30a02 | |||
45b5b95e29 | |||
f2c47d1e6a | |||
b4bb9b9036 | |||
2bc6483299 | |||
ec52f900e4 | |||
77d708fabb | |||
c00149c861 | |||
574661f2e6 | |||
7bd69349bf | |||
488ad99c13 | |||
7178cceeaa | |||
8d55ccdb8c | |||
37a72cb170 | |||
bf9b69284f | |||
c4de1e19df | |||
5b7073cae1 | |||
b29b3b2924 | |||
420b6abc54 | |||
99804b0f3e | |||
c55964c956 | |||
20c542c713 | |||
c2bdb960cd | |||
87acd6d629 | |||
f842d31171 | |||
ffef323c4c | |||
af5833e298 | |||
b87494bb8f | |||
ad130431aa | |||
e130b66642 | |||
c7b6988678 | |||
05042a782d | |||
a7dc2aab16 | |||
22d46b7ba4 | |||
c10db6ea28 | |||
1b51fdf170 | |||
adee3f9c1f | |||
4798be1f9a | |||
08981d1bac | |||
7094ea5e75 | |||
9d5771ae43 | |||
f56b8305c4 | |||
1056ad762c | |||
c451080c8b | |||
8e7c22fbdb | |||
e57e95eb0d | |||
130f43e4b8 | |||
d8356a1cc2 | |||
4ef8d9f44e | |||
3928dbd206 | |||
2ced6f0742 | |||
30f73109b8 | |||
17fa62d3d3 | |||
1da5edcde0 | |||
0bb05b113d | |||
f141b2b938 | |||
2b434c449e | |||
e93081f83f | |||
b6bbce4ae9 | |||
7705dc52da | |||
e6acaf9d91 | |||
2c81e6fd51 | |||
9506267ce5 | |||
fbeb80b5f0 | |||
3fa7d29876 | |||
fe179ae0cc | |||
40aeeeecc4 | |||
5a863fbe18 | |||
91c646c61d | |||
accada542a | |||
e54329da7b | |||
284fac39fb | |||
fe454b8d9e | |||
c114b75aee | |||
4be936b88b | |||
26c550f772 | |||
24f0aa460b | |||
69efc39d5c | |||
a2ad810118 | |||
1ae1a9cd56 | |||
b5521fea19 | |||
9b84195225 | |||
11c1df0436 | |||
c754494fdd | |||
1bce67999d | |||
6c39ea46b6 | |||
156a33a990 | |||
5167ebdfca | |||
b574646d75 | |||
388c3462a6 | |||
9ad202bee9 | |||
f0d3fb4a7e | |||
9d4c8b8aa5 | |||
ecfac1e240 | |||
6f7140f568 | |||
05b17112cf | |||
a15fb5cd79 | |||
63fd148d8f | |||
6c3971b29b | |||
a6d264f331 | |||
2959686019 | |||
c96b0a938e | |||
c97796aa0f | |||
7a4f7d825e | |||
fdb2c87350 | |||
98c0b77e0c | |||
9d6d50d933 | |||
c1320c1f0c | |||
66aaf03a7a | |||
00a0947c65 | |||
60f3713026 | |||
37e6757453 | |||
8dcefdf4a9 | |||
73d13ad19a | |||
b6680fab50 | |||
f760756078 | |||
58210d6a76 | |||
8fac6455ff | |||
22b6598cc9 | |||
858452d58d | |||
7f85e1d7fd | |||
b0c3cbf2e8 | |||
a750868428 | |||
7395c70a74 | |||
9fab28135c | |||
08d3eef97d | |||
1b5439a6c2 | |||
c7f95b7ca2 | |||
5c554c04ff | |||
c383f091a1 | |||
8f253ef3af | |||
c7dc37f97c | |||
526332873b | |||
1d2721ca72 | |||
219e601dab | |||
3b8aade3c2 | |||
52ccd4a3a8 | |||
5275074d37 | |||
c15b4cda7d | |||
d3cfb6ca2b | |||
956ef860bc | |||
671b4bde6c | |||
c8eeb93a6a | |||
319fe5146e | |||
13c22321d1 | |||
ccbe9d5676 | |||
81a3c41aa0 | |||
a50207c65d | |||
97878e53fd | |||
61b05815e0 | |||
1dce94cf26 | |||
f12e982c0b | |||
fa966b9b40 | |||
b83a9fc9d3 | |||
3adbf2fb03 | |||
700d146127 | |||
a74fde9b4c | |||
1d7657f409 | |||
ac283dbce7 | |||
1e8f28c42a | |||
fc366b807a | |||
9fb308d90f | |||
2948c740a2 | |||
1558ec5a16 | |||
fff24a0148 | |||
48a145207e | |||
79d5765e7e | |||
04e48094e4 | |||
741abb162c | |||
e7794a868f | |||
725350d4ea | |||
906c73b219 | |||
00d80ff965 | |||
1b553b9817 | |||
de4d067f1e | |||
e715f6a601 | |||
f60ccfd83b | |||
3753a2b2a8 | |||
592dd25615 | |||
c8709d4604 | |||
8932c2d6ce | |||
2bddfdd7c8 | |||
46e3c3f112 | |||
ef24ae0c7d | |||
a753926f02 | |||
9dc60fc02d | |||
d73a63629e | |||
f79d0d4f74 | |||
4f88940ff6 | |||
7bdb1de9ec | |||
653d2e8ff9 | |||
2fef660d0a | |||
24eba5a2ff | |||
6e9d3aa32d | |||
9ae0d18856 | |||
a56f435fd4 | |||
ec166499d8 | |||
ccf022f970 | |||
2852e1af55 | |||
ce945b50c3 | |||
2f5a5a66dd | |||
8e409d1113 | |||
05d1b61af4 | |||
647cae178a | |||
bae7c23fbf | |||
18ea187d42 | |||
1daeffca54 | |||
2f6f1d4465 | |||
7ff1894c34 | |||
8edfc54c2b | |||
9c399689ec | |||
9d9a405cfd | |||
edd8b38a75 | |||
ed76818700 | |||
9a0b59d990 | |||
93a84a143b | |||
bd26876267 | |||
21d295180d | |||
c3bfc9bfda | |||
422a6b16fc | |||
11dd0d4482 | |||
26dd2f06ac | |||
8cee7c08b6 | |||
2e2626b167 | |||
c0c0ae2dea | |||
897412b5b6 | |||
f22d27a385 | |||
ccd7c1d2da | |||
c713eb5e2a | |||
25d313b38b | |||
3168dbf23b | |||
1711bb3881 | |||
2533305596 | |||
0eca512ac8 | |||
013e394a4b | |||
d83f371b5f | |||
1c71816eab | |||
7b1d8ea7e0 | |||
b1f7223a0a | |||
8408a4be8e | |||
72849c24ba | |||
c19c28be71 | |||
0d8fd8483a | |||
3170841ed9 | |||
7a6e385c1b | |||
578e47e70c | |||
fac5b43830 | |||
9e7c5212a1 | |||
1cb64f7368 | |||
f18738f247 | |||
a0ddd8392c | |||
a2506909b1 | |||
7b1ff212d9 | |||
e5d06cfc0f | |||
31891db2e3 | |||
5fdb27ff80 | |||
6b16927d18 | |||
ce411498f6 | |||
208de95ac7 | |||
c2ce39c795 | |||
8daa534818 | |||
9fca69b410 | |||
b26c645420 | |||
1879ec556e | |||
c6e53cfc46 | |||
b19f2fb815 | |||
a6b0950916 | |||
d352dbd163 | |||
eb23f4ef16 | |||
c56344b509 | |||
59119f4f20 | |||
276615d708 | |||
b602819b6e | |||
c2c606f05b | |||
83afebe872 | |||
a4d8f9d559 | |||
5ec1e0edfa | |||
30a11b1ab8 | |||
f04e6b87d7 | |||
0c33928b55 | |||
0775374750 | |||
7d90bb035b | |||
2c1ad21ba8 | |||
eca5ff9868 | |||
1b25d2fa0a | |||
74a6acc999 | |||
a4ed8a0821 | |||
9f675e021c | |||
a38efcb9fd | |||
31591649a0 | |||
4f5c46a84f | |||
462ffc58db | |||
65faae0b6a | |||
dda4b0ed06 | |||
07d04280be | |||
917c56ded4 | |||
3d42463845 | |||
3ffc83d90a | |||
e3c5e2cba8 | |||
b742f13e70 | |||
52c529eeb1 | |||
551529290d | |||
25a90ffa38 | |||
866b67ca93 | |||
d7e9f58f7f | |||
04839bae22 | |||
3cc6e04a52 | |||
b7ef178b9c | |||
47dfe9d4db | |||
1d3270cc8f | |||
a6fb6ab597 | |||
163e74b6c3 | |||
f273e66dc6 | |||
02b4c52c12 | |||
518199c09e | |||
8b17a2f776 | |||
b6d2827914 | |||
9711bae0b3 | |||
eec38f63bd | |||
ef5e6b746f | |||
77bf6b5f56 | |||
b562fff9d0 | |||
b5dec374f4 | |||
fa0dc6167c | |||
55bcd62a4b | |||
0ed762d691 | |||
1b5bb7792e | |||
9b735cea77 | |||
12c462d656 | |||
fc7b0e2c28 | |||
f850a067ed | |||
f75e1197f1 | |||
aa8a75e287 | |||
80e8a2ea39 | |||
19f8048139 | |||
0f80e5a80a | |||
b6559333ff | |||
434b8f3b96 | |||
7a74e929c8 | |||
361ecebe90 | |||
807cbc672e | |||
98ae5276b7 | |||
6adb969b09 | |||
8a7d6ff51a | |||
25f650a8e8 | |||
44e517f074 | |||
cb9de61659 | |||
a2ef80d66f | |||
baa190446a | |||
8f5220d81f | |||
8e391fcf3a | |||
593657054e | |||
ae5c4f7340 | |||
baa30bacdb | |||
3e6fad07aa | |||
e72e4158de | |||
bd41733db2 | |||
23c648e98d | |||
75ab2d06f5 | |||
adc099edee | |||
52cce82493 | |||
ef3c9ed9eb | |||
7fe3ed5e00 | |||
6061241292 | |||
0878ab7c15 | |||
c65edd5b64 | |||
3c8d14e9c5 | |||
c3977cb2ce | |||
6da1661bc2 | |||
cc56540661 | |||
94c1ae8668 | |||
55d54359e0 | |||
d33c2ad354 | |||
9afa7ff624 | |||
0649289f02 | |||
aaeaa43878 | |||
078b8e23bf | |||
74da3e1757 | |||
2d2c93a798 | |||
4bbb60efce | |||
1cf679dec4 | |||
41026c1e4b | |||
d6b9be21d7 | |||
c0329acde8 | |||
fb466b3417 | |||
1f50a7d29f | |||
1de21b913d | |||
4aea058e5a | |||
fd10234363 | |||
8fb5c6a409 | |||
2fe5fbfcc2 | |||
01637e1a4c | |||
1b349eb1f9 | |||
138eaebead | |||
61b9192f27 | |||
161b51d91a | |||
f904b31a7d | |||
f6614155e4 | |||
f5f159c320 | |||
6ebba525f1 | |||
2a5874441d | |||
d08445c9ad | |||
4a945696cb | |||
dabc964d83 | |||
654baf693d | |||
f001a3b7b6 | |||
c615f2c335 | |||
d839dd0242 | |||
435847891c | |||
182f290808 | |||
447dfc11fc | |||
9aa9f3b84e | |||
396ebd1e80 | |||
12490f4398 | |||
db078a9ba8 | |||
a13a7da5ad | |||
519f8e8684 | |||
40ae0962f4 | |||
1560288048 | |||
1ad6fafd91 | |||
70840aed5f | |||
b24d18feb9 | |||
3fa98f4395 | |||
d05b7ee90e | |||
6dcee35129 | |||
5cb345f5e9 | |||
fbcb52d3cd | |||
6b01e3fedd | |||
f7908f9bb8 | |||
00b7a4be02 | |||
04b0a768b8 | |||
87670425f2 | |||
32e71a1861 | |||
9c857cf280 | |||
97b12212dd | |||
9fa34d79ec | |||
a0a64a19dd | |||
bbc23611fa | |||
e9783a1fb4 | |||
9e0cc28792 | |||
73072a7c73 | |||
a8ba1262ff | |||
e66a9a7806 | |||
338442d773 | |||
10651bddf6 | |||
53d4d0b30d | |||
2865e4710b | |||
c46a74a19d | |||
46dc49a6a1 | |||
cc7f872131 | |||
bcc1658cd0 | |||
c46886f599 | |||
29f78392c1 | |||
022756a872 | |||
3b8c2dff57 | |||
0b9af32a8b | |||
11b1b63b14 | |||
0e26a6c92e | |||
66d8f0b7f1 | |||
ba5bcde874 | |||
ab0a8593c5 | |||
668ffc9b23 | |||
9962371f71 | |||
993acb5d41 | |||
a3d0aa73d1 | |||
14c57952f7 | |||
6c369d6788 | |||
4cdd9aad9b | |||
f38c057503 | |||
1e5544b39b | |||
d5673af79f | |||
a28dacec65 | |||
dbe29d4e33 | |||
fe3a67c546 | |||
b138ff2be3 | |||
cf6f1e4181 | |||
620a223814 | |||
f39f9690ec | |||
f9ca90256b | |||
2623640cd6 | |||
d87de61ae6 | |||
f5f485f899 | |||
e77b27c331 | |||
a5cc3dc8a2 | |||
37a709f655 | |||
3a5302108d | |||
d2ee117a0a | |||
db8ccdb850 | |||
d2419030b0 | |||
8986690c2a | |||
9286d3f584 | |||
940de9dbe9 |
@ -12,7 +12,7 @@ FROM ${BASE_CUDA_DEV_CONTAINER} as build
|
|||||||
ARG CUDA_DOCKER_ARCH=all
|
ARG CUDA_DOCKER_ARCH=all
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y build-essential git cmake
|
apt-get install -y build-essential git cmake libsdl2-dev
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
@ -21,7 +21,7 @@ COPY . .
|
|||||||
# Set nvcc architecture
|
# Set nvcc architecture
|
||||||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||||
# Enable cuBLAS
|
# Enable cuBLAS
|
||||||
ENV WHISPER_CUBLAS=1
|
ENV GGML_CUDA=1
|
||||||
|
|
||||||
RUN make
|
RUN make
|
||||||
|
|
||||||
|
40
.devops/main-cuda.Dockerfile
Normal file
40
.devops/main-cuda.Dockerfile
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
ARG UBUNTU_VERSION=22.04
|
||||||
|
# This needs to generally match the container host's environment.
|
||||||
|
ARG CUDA_VERSION=12.3.1
|
||||||
|
# Target the CUDA build image
|
||||||
|
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||||
|
# Target the CUDA runtime image
|
||||||
|
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
|
||||||
|
|
||||||
|
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Unless otherwise specified, we make a fat build.
|
||||||
|
ARG CUDA_DOCKER_ARCH=all
|
||||||
|
# Set nvcc architecture
|
||||||
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||||
|
# Enable cuBLAS
|
||||||
|
ENV GGML_CUDA=1
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential libsdl2-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
|
# Ref: https://stackoverflow.com/a/53464012
|
||||||
|
ENV CUDA_MAIN_VERSION=12.3
|
||||||
|
ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH
|
||||||
|
|
||||||
|
COPY .. .
|
||||||
|
RUN make
|
||||||
|
|
||||||
|
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
|
||||||
|
ENV CUDA_MAIN_VERSION=12.3
|
||||||
|
ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y curl ffmpeg \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
|
COPY --from=build /app /app
|
||||||
|
ENTRYPOINT [ "bash", "-c" ]
|
19
.devops/main.Dockerfile
Normal file
19
.devops/main.Dockerfile
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
FROM ubuntu:22.04 AS build
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
|
COPY .. .
|
||||||
|
RUN make
|
||||||
|
|
||||||
|
FROM ubuntu:22.04 AS runtime
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y curl ffmpeg libsdl2-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
|
COPY --from=build /app /app
|
||||||
|
ENTRYPOINT [ "bash", "-c" ]
|
6
.github/workflows/bindings-go.yml
vendored
6
.github/workflows/bindings-go.yml
vendored
@ -13,10 +13,10 @@ jobs:
|
|||||||
ubuntu-latest:
|
ubuntu-latest:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '^1.19'
|
go-version: '^1.23'
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v4
|
||||||
- run: |
|
- run: |
|
||||||
cd bindings/go
|
cd bindings/go
|
||||||
make test
|
make test
|
||||||
|
65
.github/workflows/bindings-ruby.yml
vendored
65
.github/workflows/bindings-ruby.yml
vendored
@ -3,20 +3,73 @@ on:
|
|||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- bindings/ruby/**
|
- bindings/ruby/**
|
||||||
- whisper.h
|
- src/whisper.cpp
|
||||||
|
- include/whisper.h
|
||||||
|
- ggml/src/ggml.c
|
||||||
|
- ggml/src/ggml-impl.h
|
||||||
|
- ggml/src/ggml-aarch64.h
|
||||||
|
- ggml/src/ggml-aarch64.c
|
||||||
|
- ggml/src/ggml-alloc.c
|
||||||
|
- ggml/src/ggml-backend-impl.h
|
||||||
|
- ggml/src/ggml-backend.cpp
|
||||||
|
- ggml/src/ggml-common.h
|
||||||
|
- ggml/src/ggml-quants.h
|
||||||
|
- ggml/src/ggml-quants.c
|
||||||
|
- ggml/src/ggml-cpu-impl.h
|
||||||
|
- ggml/src/ggml-metal.m
|
||||||
|
- ggml/src/ggml-metal.metal
|
||||||
|
- ggml/src/ggml-blas.cpp
|
||||||
|
- ggml/include/ggml.h
|
||||||
|
- ggml/include/ggml-alloc.h
|
||||||
|
- ggml/include/ggml-backend.h
|
||||||
|
- ggml/include/ggml-cuda.h
|
||||||
|
- ggml/include/ggml-kompute.h
|
||||||
|
- ggml/include/ggml-metal.h
|
||||||
|
- ggml/include/ggml-sycl.h
|
||||||
|
- ggml/include/ggml-vulkan.h
|
||||||
|
- ggml/include/ggml-blas.h
|
||||||
|
- scripts/get-flags.mk
|
||||||
|
- examples/dr_wav.h
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- bindings/ruby/**
|
- bindings/ruby/**
|
||||||
- whisper.h
|
- src/whisper.cpp
|
||||||
|
- include/whisper.h
|
||||||
|
- ggml/src/ggml.c
|
||||||
|
- ggml/src/ggml-impl.h
|
||||||
|
- ggml/src/ggml-aarch64.h
|
||||||
|
- ggml/src/ggml-aarch64.c
|
||||||
|
- ggml/src/ggml-alloc.c
|
||||||
|
- ggml/src/ggml-backend-impl.h
|
||||||
|
- ggml/src/ggml-backend.cpp
|
||||||
|
- ggml/src/ggml-common.h
|
||||||
|
- ggml/src/ggml-quants.h
|
||||||
|
- ggml/src/ggml-quants.c
|
||||||
|
- ggml/src/ggml-cpu-impl.h
|
||||||
|
- ggml/src/ggml-metal.m
|
||||||
|
- ggml/src/ggml-metal.metal
|
||||||
|
- ggml/src/ggml-blas.cpp
|
||||||
|
- ggml/include/ggml.h
|
||||||
|
- ggml/include/ggml-alloc.h
|
||||||
|
- ggml/include/ggml-backend.h
|
||||||
|
- ggml/include/ggml-cuda.h
|
||||||
|
- ggml/include/ggml-kompute.h
|
||||||
|
- ggml/include/ggml-metal.h
|
||||||
|
- ggml/include/ggml-sycl.h
|
||||||
|
- ggml/include/ggml-vulkan.h
|
||||||
|
- ggml/include/ggml-blas.h
|
||||||
|
- scripts/get-flags.mk
|
||||||
|
- examples/dr_wav.h
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest:
|
ubuntu-latest:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: bindings/ruby
|
||||||
steps:
|
steps:
|
||||||
- uses: ruby/setup-ruby@v1
|
- uses: ruby/setup-ruby@v1
|
||||||
with:
|
with:
|
||||||
ruby-version: '3.0'
|
ruby-version: '3.0'
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v4
|
||||||
- run: |
|
- run: rake test
|
||||||
cd bindings/ruby/ext
|
|
||||||
ruby extconf.rb && make
|
|
||||||
|
410
.github/workflows/build.yml
vendored
410
.github/workflows/build.yml
vendored
@ -3,6 +3,7 @@ on: [push, pull_request]
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
ubuntu_image: "ubuntu:22.04"
|
ubuntu_image: "ubuntu:22.04"
|
||||||
|
VCPKG_BINARY_SOURCES: "clear;x-gha,readwrite"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest:
|
ubuntu-latest:
|
||||||
@ -15,10 +16,10 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Build ${{ matrix.arch }}
|
- name: Build ${{ matrix.arch }}
|
||||||
run: |
|
run: |
|
||||||
@ -36,7 +37,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
@ -53,13 +54,13 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
uses: cross-platform-actions/action@v0.15.0
|
uses: cross-platform-actions/action@v0.24.0
|
||||||
with:
|
with:
|
||||||
operating_system: freebsd
|
operating_system: freebsd
|
||||||
version: '13.2'
|
version: '13.3'
|
||||||
run: |
|
run: |
|
||||||
sudo pkg update
|
sudo pkg update
|
||||||
sudo pkg install -y gmake sdl2
|
sudo pkg install -y gmake sdl2
|
||||||
@ -77,10 +78,10 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Build ${{ matrix.arch }}
|
- name: Build ${{ matrix.arch }}
|
||||||
run: |
|
run: |
|
||||||
@ -101,14 +102,17 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
build: [Debug, Release]
|
build: [Debug, Release]
|
||||||
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
#arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
||||||
|
# TODO: arm/v7 disabled due to clang bug
|
||||||
|
# https://github.com/ggerganov/whisper.cpp/actions/runs/9657764109/job/26637633042?pr=2256#step:4:1990
|
||||||
|
arch: [linux/amd64, linux/arm64, linux/ppc64le]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Build ${{ matrix.arch }}
|
- name: Build ${{ matrix.arch }}
|
||||||
run: |
|
run: |
|
||||||
@ -117,7 +121,6 @@ jobs:
|
|||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
set -e
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y clang
|
|
||||||
apt install -y clang build-essential cmake libsdl2-dev
|
apt install -y clang build-essential cmake libsdl2-dev
|
||||||
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang
|
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang
|
||||||
make
|
make
|
||||||
@ -134,10 +137,10 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Build ${{ matrix.arch }}
|
- name: Build ${{ matrix.arch }}
|
||||||
run: |
|
run: |
|
||||||
@ -151,6 +154,164 @@ jobs:
|
|||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure'
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
|
ubuntu-22-cmake-sycl:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
dwhisper_sycl: [ON]
|
||||||
|
dcmake_c_compiler: [icx]
|
||||||
|
dcmake_cxx_compiler: [icpx]
|
||||||
|
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
||||||
|
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: add oneAPI to apt
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd /tmp
|
||||||
|
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
rm GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main"
|
||||||
|
|
||||||
|
- name: install oneAPI dpcpp compiler
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install intel-oneapi-compiler-dpcpp-cpp
|
||||||
|
|
||||||
|
- name: install oneAPI MKL library
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt install intel-oneapi-mkl-devel
|
||||||
|
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
|
||||||
|
cmake --build . --config Release -j $(nproc)
|
||||||
|
|
||||||
|
ubuntu-22-cmake-sycl-fp16:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
dwhisper_sycl: [ON]
|
||||||
|
dcmake_c_compiler: [icx]
|
||||||
|
dcmake_cxx_compiler: [icpx]
|
||||||
|
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
||||||
|
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: add oneAPI to apt
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd /tmp
|
||||||
|
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
rm GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
|
||||||
|
sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main"
|
||||||
|
|
||||||
|
- name: install oneAPI dpcpp compiler
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install intel-oneapi-compiler-dpcpp-cpp
|
||||||
|
|
||||||
|
- name: install oneAPI MKL library
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt install intel-oneapi-mkl-devel
|
||||||
|
|
||||||
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -DGGML_SYCL_F16=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
|
||||||
|
cmake --build . --config Release -j $(nproc)
|
||||||
|
|
||||||
|
windows-msys2:
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- { sys: UCRT64, env: ucrt-x86_64, build: Release }
|
||||||
|
- { sys: CLANG64, env: clang-x86_64, build: Release }
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup ${{ matrix.sys }}
|
||||||
|
uses: msys2/setup-msys2@v2
|
||||||
|
with:
|
||||||
|
update: true
|
||||||
|
msystem: ${{matrix.sys}}
|
||||||
|
install: >-
|
||||||
|
base-devel
|
||||||
|
mingw-w64-${{matrix.env}}-toolchain
|
||||||
|
mingw-w64-${{matrix.env}}-cmake
|
||||||
|
mingw-w64-${{matrix.env}}-SDL2
|
||||||
|
mingw-w64-${{matrix.env}}-openblas
|
||||||
|
|
||||||
|
- name: Build using make
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
make -j $(nproc)
|
||||||
|
|
||||||
|
- name: Clean after building using make
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
make clean
|
||||||
|
|
||||||
|
- name: Build using make w/ OpenBLAS
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
make GGML_OPENBLAS=1 -j $(nproc)
|
||||||
|
|
||||||
|
- name: Build using CMake
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
cmake -B build
|
||||||
|
cmake --build build --config ${{ matrix.build }} -j $(nproc)
|
||||||
|
|
||||||
|
- name: Clean after building using CMake
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
rm -rf build
|
||||||
|
|
||||||
|
- name: Build using CMake w/ OpenBLAS
|
||||||
|
shell: msys2 {0}
|
||||||
|
run: |
|
||||||
|
cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
|
cmake --build build --config ${{ matrix.build }} -j $(nproc)
|
||||||
|
|
||||||
windows:
|
windows:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
|
||||||
@ -167,14 +328,14 @@ jobs:
|
|||||||
s2arc: x64
|
s2arc: x64
|
||||||
jnaPath: win32-x86-64
|
jnaPath: win32-x86-64
|
||||||
- sdl2: ON
|
- sdl2: ON
|
||||||
s2ver: 2.26.0
|
s2ver: 2.28.5
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
uses: microsoft/setup-msbuild@v1
|
uses: microsoft/setup-msbuild@v2
|
||||||
|
|
||||||
- name: Fetch SDL2 and set SDL2_DIR
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
@ -199,14 +360,14 @@ jobs:
|
|||||||
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Upload dll
|
- name: Upload dll
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.jnaPath }}_whisper.dll
|
name: ${{ matrix.jnaPath }}_whisper.dll
|
||||||
path: build/bin/${{ matrix.build }}/whisper.dll
|
path: build/bin/${{ matrix.build }}/whisper.dll
|
||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: whisper-bin-${{ matrix.arch }}
|
name: whisper-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
@ -222,29 +383,31 @@ jobs:
|
|||||||
sdl2: [ON]
|
sdl2: [ON]
|
||||||
include:
|
include:
|
||||||
- arch: Win32
|
- arch: Win32
|
||||||
obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x86.zip
|
|
||||||
s2arc: x86
|
s2arc: x86
|
||||||
- arch: x64
|
- arch: x64
|
||||||
obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x64.zip
|
|
||||||
s2arc: x64
|
s2arc: x64
|
||||||
- sdl2: ON
|
- sdl2: ON
|
||||||
s2ver: 2.26.0
|
s2ver: 2.28.5
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Export GitHub Actions cache environment variables
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || '');
|
||||||
|
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
uses: microsoft/setup-msbuild@v1
|
uses: microsoft/setup-msbuild@v2
|
||||||
|
|
||||||
- name: Fetch OpenBLAS
|
- name: Install OpenBLAS and pkgconfiglite
|
||||||
if: matrix.blas == 'ON'
|
if: matrix.blas == 'ON'
|
||||||
run: |
|
run: |
|
||||||
C:/msys64/usr/bin/wget.exe -qO blas.zip ${{ matrix.obzip }}
|
vcpkg install --triplet=${{ matrix.s2arc }}-windows openblas
|
||||||
7z x blas.zip -oblas -y
|
choco install pkgconfiglite
|
||||||
copy blas/include/cblas.h .
|
|
||||||
copy blas/include/openblas_config.h .
|
|
||||||
echo "OPENBLAS_PATH=$env:GITHUB_WORKSPACE/blas" >> $env:GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Fetch SDL2 and set SDL2_DIR
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
@ -256,9 +419,10 @@ jobs:
|
|||||||
- name: Configure
|
- name: Configure
|
||||||
run: >
|
run: >
|
||||||
cmake -S . -B ./build -A ${{ matrix.arch }}
|
cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||||
|
-DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake"
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
-DWHISPER_OPENBLAS=${{ matrix.blas }}
|
-DGGML_BLAS=${{ matrix.blas }}
|
||||||
-DCMAKE_LIBRARY_PATH="$env:OPENBLAS_PATH/lib"
|
-DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
-DWHISPER_SDL2=${{ matrix.sdl2 }}
|
-DWHISPER_SDL2=${{ matrix.sdl2 }}
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
@ -266,9 +430,9 @@ jobs:
|
|||||||
cd ./build
|
cd ./build
|
||||||
msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
||||||
|
|
||||||
- name: Copy libopenblas.dll
|
- name: Copy openblas.dll
|
||||||
if: matrix.blas == 'ON'
|
if: matrix.blas == 'ON'
|
||||||
run: copy "$env:OPENBLAS_PATH/bin/libopenblas.dll" build/bin/${{ matrix.build }}
|
run: copy "C:/vcpkg/packages/openblas_${{ matrix.s2arc }}-windows/bin/openblas.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Copy SDL2.dll
|
- name: Copy SDL2.dll
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
@ -276,13 +440,13 @@ jobs:
|
|||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
|
if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: whisper-blas-bin-${{ matrix.arch }}
|
name: whisper-blas-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
windows-cublas:
|
windows-cublas:
|
||||||
runs-on: windows-latest
|
runs-on: windows-2019
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@ -295,18 +459,18 @@ jobs:
|
|||||||
- arch: x64
|
- arch: x64
|
||||||
s2arc: x64
|
s2arc: x64
|
||||||
- sdl2: ON
|
- sdl2: ON
|
||||||
s2ver: 2.26.0
|
s2ver: 2.28.5
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
uses: microsoft/setup-msbuild@v1
|
uses: microsoft/setup-msbuild@v2
|
||||||
|
|
||||||
- name: Install CUDA Toolkit
|
- name: Install CUDA Toolkit
|
||||||
id: cuda-toolkit
|
id: cuda-toolkit
|
||||||
uses: Jimver/cuda-toolkit@v0.2.11
|
uses: Jimver/cuda-toolkit@v0.2.15
|
||||||
with:
|
with:
|
||||||
cuda: '${{ matrix.cuda-toolkit }}'
|
cuda: '${{ matrix.cuda-toolkit }}'
|
||||||
|
|
||||||
@ -321,7 +485,8 @@ jobs:
|
|||||||
run: >
|
run: >
|
||||||
cmake -S . -B ./build -A ${{ matrix.arch }}
|
cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
-DWHISPER_CUBLAS=1
|
-DGGML_CUDA=${{ matrix.cublas }}
|
||||||
|
-DWHISPER_SDL2=${{ matrix.sdl2 }}
|
||||||
|
|
||||||
- name: Build ${{ matrix.cuda-toolkit }}
|
- name: Build ${{ matrix.cuda-toolkit }}
|
||||||
run: |
|
run: |
|
||||||
@ -341,7 +506,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}
|
name: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
@ -355,10 +520,10 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Setup emsdk
|
- name: Setup emsdk
|
||||||
uses: mymindstorm/setup-emsdk@v12
|
uses: mymindstorm/setup-emsdk@v14
|
||||||
|
|
||||||
- name: Verify
|
- name: Verify
|
||||||
run: emcc -v
|
run: emcc -v
|
||||||
@ -377,15 +542,16 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: |
|
run: |
|
||||||
cp models/for-tests-ggml-base.en.bin models/ggml-base.en.bin
|
cp models/for-tests-ggml-base.en.bin models/ggml-base.en.bin
|
||||||
mkdir models/ggml-base.en-encoder.mlmodelc
|
mkdir models/ggml-base.en-encoder.mlmodelc
|
||||||
|
|
||||||
- name: Build objc example
|
# TODO: disabled because it fails for some reason with Github Actions
|
||||||
run: xcodebuild -project examples/whisper.objc/whisper.objc.xcodeproj -scheme whisper.objc -configuration ${{ matrix.build }} -sdk iphonesimulator build
|
# - name: Build objc example
|
||||||
|
# run: xcodebuild -project examples/whisper.objc/whisper.objc.xcodeproj -scheme whisper.objc -configuration ${{ matrix.build }} -sdk iphonesimulator build
|
||||||
|
|
||||||
- name: Build swiftui example
|
- name: Build swiftui example
|
||||||
run: xcodebuild -project examples/whisper.swiftui/whisper.swiftui.xcodeproj -scheme WhisperCppDemo -configuration ${{ matrix.build }} -sdk iphonesimulator build
|
run: xcodebuild -project examples/whisper.swiftui/whisper.swiftui.xcodeproj -scheme WhisperCppDemo -configuration ${{ matrix.build }} -sdk iphonesimulator build
|
||||||
@ -395,96 +561,106 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
path: whisper
|
||||||
|
|
||||||
- name: Install Java
|
- name: Install Java
|
||||||
uses: actions/setup-java@v3
|
uses: actions/setup-java@v4
|
||||||
with:
|
with:
|
||||||
distribution: zulu
|
distribution: zulu
|
||||||
java-version: 17
|
java-version: 21
|
||||||
|
|
||||||
- name: Setup Android SDK
|
- name: Setup Android SDK
|
||||||
uses: android-actions/setup-android@v2
|
uses: android-actions/setup-android@v3
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
cd examples/whisper.android
|
cd whisper/examples/whisper.android
|
||||||
./gradlew assembleRelease --no-daemon
|
./gradlew assembleRelease --no-daemon
|
||||||
|
|
||||||
android_java:
|
- name: Build with external ggml
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Clone
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: set up JDK 11
|
|
||||||
uses: actions/setup-java@v3
|
|
||||||
with:
|
|
||||||
java-version: '11'
|
|
||||||
distribution: 'temurin'
|
|
||||||
cache: gradle
|
|
||||||
|
|
||||||
- name: Setup Android SDK
|
|
||||||
uses: android-actions/setup-android@v2
|
|
||||||
with:
|
|
||||||
api-level: 30
|
|
||||||
build-tools-version: 30.0.3
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
run: |
|
run: |
|
||||||
cd examples/whisper.android.java
|
export PATH_TO_GGML=$PWD/ggml
|
||||||
chmod +x ./gradlew
|
cd whisper/examples/whisper.android
|
||||||
./gradlew assembleRelease
|
./gradlew assembleRelease --no-daemon
|
||||||
|
|
||||||
java:
|
# TODO: disable because of following fail: https://github.com/ggerganov/whisper.cpp/actions/runs/11019444420/job/30627193602
|
||||||
needs: [ 'windows' ]
|
# android_java:
|
||||||
runs-on: windows-latest
|
# runs-on: ubuntu-latest
|
||||||
steps:
|
#
|
||||||
- uses: actions/checkout@v3
|
# steps:
|
||||||
|
# - name: Clone
|
||||||
|
# uses: actions/checkout@v4
|
||||||
|
#
|
||||||
|
# - name: set up JDK 11
|
||||||
|
# uses: actions/setup-java@v4
|
||||||
|
# with:
|
||||||
|
# java-version: '11'
|
||||||
|
# distribution: 'temurin'
|
||||||
|
# cache: gradle
|
||||||
|
#
|
||||||
|
# - name: Setup Android SDK
|
||||||
|
# uses: android-actions/setup-android@v3
|
||||||
|
# with:
|
||||||
|
# cmdline-tools-version: 9.0
|
||||||
|
#
|
||||||
|
# - name: Build
|
||||||
|
# run: |
|
||||||
|
# cd examples/whisper.android.java
|
||||||
|
# chmod +x ./gradlew
|
||||||
|
# ./gradlew assembleRelease
|
||||||
|
|
||||||
- name: Install Java
|
# TODO: disabled because of following fail: https://github.com/ggerganov/whisper.cpp/actions/runs/9686220096/job/26735899598
|
||||||
uses: actions/setup-java@v1
|
# java:
|
||||||
with:
|
# needs: [ 'windows' ]
|
||||||
java-version: 17
|
# runs-on: windows-latest
|
||||||
|
# steps:
|
||||||
- name: Download Windows lib
|
# - uses: actions/checkout@v4
|
||||||
uses: actions/download-artifact@v3
|
#
|
||||||
with:
|
# - name: Install Java
|
||||||
name: win32-x86-64_whisper.dll
|
# uses: actions/setup-java@v4
|
||||||
path: bindings/java/build/generated/resources/main/win32-x86-64
|
# with:
|
||||||
|
# distribution: zulu
|
||||||
- name: Build
|
# java-version: 20
|
||||||
run: |
|
#
|
||||||
models\download-ggml-model.cmd tiny.en
|
# - name: Download Windows lib
|
||||||
cd bindings/java
|
# uses: actions/download-artifact@v4
|
||||||
chmod +x ./gradlew
|
# with:
|
||||||
./gradlew build
|
# name: win32-x86-64_whisper.dll
|
||||||
|
# path: bindings/java/build/generated/resources/main/win32-x86-64
|
||||||
- name: Upload jar
|
#
|
||||||
uses: actions/upload-artifact@v3
|
# - name: Build
|
||||||
with:
|
# run: |
|
||||||
name: whispercpp.jar
|
# models\download-ggml-model.cmd tiny.en
|
||||||
path: bindings/java/build/libs/whispercpp-*.jar
|
# cd bindings/java
|
||||||
|
# chmod +x ./gradlew
|
||||||
- name: Publish package
|
# ./gradlew build
|
||||||
if: ${{ github.ref == 'refs/heads/master' }}
|
#
|
||||||
uses: gradle/gradle-build-action@v2.4.2
|
# - name: Upload jar
|
||||||
with:
|
# uses: actions/upload-artifact@v4
|
||||||
arguments: publish
|
# with:
|
||||||
build-root-directory: bindings/java
|
# name: whispercpp.jar
|
||||||
env:
|
# path: bindings/java/build/libs/whispercpp-*.jar
|
||||||
MAVEN_USERNAME: ${{ secrets.JIRA_USER }}
|
#
|
||||||
MAVEN_PASSWORD: ${{ secrets.JIRA_PASS }}
|
# - name: Publish package
|
||||||
PGP_SECRET: ${{ secrets.GPG_PRIVATE_KEY }}
|
# if: ${{ github.ref == 'refs/heads/master' }}
|
||||||
PGP_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
# uses: gradle/gradle-build-action@v2.4.2
|
||||||
|
# with:
|
||||||
|
# arguments: publish
|
||||||
|
# build-root-directory: bindings/java
|
||||||
|
# env:
|
||||||
|
# MAVEN_USERNAME: ${{ secrets.JIRA_USER }}
|
||||||
|
# MAVEN_PASSWORD: ${{ secrets.JIRA_PASS }}
|
||||||
|
# PGP_SECRET: ${{ secrets.GPG_PRIVATE_KEY }}
|
||||||
|
# PGP_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
||||||
|
|
||||||
quantize:
|
quantize:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Test quantize
|
- name: Test quantize
|
||||||
run: |
|
run: |
|
||||||
|
59
.github/workflows/docker.yml
vendored
Normal file
59
.github/workflows/docker.yml
vendored
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
name: Publish Docker image
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
push_to_registry:
|
||||||
|
name: Push Docker image to Docker Hub
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
COMMIT_SHA: ${{ github.sha }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
config:
|
||||||
|
- { tag: "main", dockerfile: ".devops/main.Dockerfile", platform: "linux/amd64,linux/arm64" }
|
||||||
|
#TODO: the cuda image keeps failing - disable for now
|
||||||
|
# https://github.com/ggerganov/whisper.cpp/actions/runs/11019444428/job/30602020339
|
||||||
|
#- { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out the repo
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Log in to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push Docker image (versioned)
|
||||||
|
if: github.event_name == 'push'
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
platforms: ${{ matrix.config.platform }}
|
||||||
|
tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
|
||||||
|
file: ${{ matrix.config.dockerfile }}
|
||||||
|
|
||||||
|
- name: Build and push Docker image (tagged)
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: ${{ github.event_name == 'push' }}
|
||||||
|
platforms: ${{ matrix.config.platform }}
|
||||||
|
tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}"
|
||||||
|
file: ${{ matrix.config.dockerfile }}
|
2
.github/workflows/examples.yml
vendored
2
.github/workflows/examples.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
|||||||
run: npm install
|
run: npm install
|
||||||
|
|
||||||
- name: Compile addon.node
|
- name: Compile addon.node
|
||||||
run: npx cmake-js compile -T whisper-addon -B Release
|
run: npx cmake-js compile -T addon.node -B Release
|
||||||
|
|
||||||
- name: Download test model
|
- name: Download test model
|
||||||
run: |
|
run: |
|
||||||
|
21
.gitignore
vendored
21
.gitignore
vendored
@ -1,29 +1,28 @@
|
|||||||
*.o
|
*.o
|
||||||
*.a
|
*.a
|
||||||
|
*.d
|
||||||
.cache/
|
.cache/
|
||||||
.coreml/
|
.coreml/
|
||||||
.test/
|
.test/
|
||||||
|
.venv/
|
||||||
.vs/
|
.vs/
|
||||||
.vscode/
|
.vscode/
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
.vimspector.json
|
||||||
|
/CMakeSettings.json
|
||||||
|
/talk-llama.dSYM/
|
||||||
|
|
||||||
build/
|
build/
|
||||||
build-coreml/
|
build-*/
|
||||||
build-em/
|
|
||||||
build-debug/
|
|
||||||
build-release/
|
|
||||||
build-rwdi/
|
|
||||||
build-static/
|
|
||||||
build-cublas/
|
|
||||||
build-no-accel/
|
|
||||||
build-sanitize-addr/
|
|
||||||
build-sanitize-thread/
|
|
||||||
|
|
||||||
# SPM
|
# SPM
|
||||||
.build/
|
.build/
|
||||||
.swiftpm
|
.swiftpm
|
||||||
*.metallib
|
*.metallib
|
||||||
|
|
||||||
|
ggml-metal-embed.metal
|
||||||
|
ggml-metal-embed.metal.tmp
|
||||||
|
|
||||||
/main
|
/main
|
||||||
/stream
|
/stream
|
||||||
/command
|
/command
|
||||||
@ -58,4 +57,4 @@ benchmark_results.csv
|
|||||||
cmake-build-debug/
|
cmake-build-debug/
|
||||||
.cxx/
|
.cxx/
|
||||||
.gradle/
|
.gradle/
|
||||||
local.properties
|
local.properties
|
||||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +0,0 @@
|
|||||||
[submodule "bindings/ios"]
|
|
||||||
path = bindings/ios
|
|
||||||
url = https://github.com/ggerganov/whisper.spm
|
|
||||||
|
301
AUTHORS
Normal file
301
AUTHORS
Normal file
@ -0,0 +1,301 @@
|
|||||||
|
# date: Tue Apr 9 20:27:03 EEST 2024
|
||||||
|
# this file is auto-generated by scripts/gen-authors.sh
|
||||||
|
|
||||||
|
0/0 <zero@imaskeleton.me>
|
||||||
|
0cc4m <picard12@live.de>
|
||||||
|
0xsourcecode <134374803+0xsourcecode@users.noreply.github.com>
|
||||||
|
AT <manyoso@users.noreply.github.com>
|
||||||
|
Aarni Koskela <akx@iki.fi>
|
||||||
|
Aaron Pham <29749331+aarnphm@users.noreply.github.com>
|
||||||
|
Aaron Taylor <aaron@exphat.com>
|
||||||
|
Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com>
|
||||||
|
Abitofevrything <54505189+abitofevrything@users.noreply.github.com>
|
||||||
|
AfryMask <AfryMask@163.com>
|
||||||
|
Ahmad Bilal <ahmad.bilal@empglabs.com>
|
||||||
|
AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com>
|
||||||
|
Akash Mahajan <akash7190@gmail.com>
|
||||||
|
Akash Mahajan <akashmjn@stanford.edu>
|
||||||
|
Al Hoang <3811822-hoanga@users.noreply.gitlab.com>
|
||||||
|
Alan <unknown>
|
||||||
|
Aleksander Andrzejewski <18704749+aleksanderandrzejewski@users.noreply.github.com>
|
||||||
|
Alex Azarov <alex@azarov.by>
|
||||||
|
Alex Bacart <13940752+alex-bacart@users.noreply.github.com>
|
||||||
|
Alex Evgrashin <aevgrashin@yandex.ru>
|
||||||
|
Alexandr Graschenkov <alexandr.graschenkov91@gmail.com>
|
||||||
|
Alexandru Mariuti <alex@mariuti.com>
|
||||||
|
Alexey Kharlamov <alexey@kharlamov.biz>
|
||||||
|
Alfredo Montesinos <alfredo.montesinos@g.austincc.edu>
|
||||||
|
Ali Alameh <ali.alameh@isae.edu.lb>
|
||||||
|
Ananta Bastola <anantarajbastola@gmail.com>
|
||||||
|
Andreu Huguet <andreuhuguet@gmail.com>
|
||||||
|
Andrew Huynh <a5thuynh@gmail.com>
|
||||||
|
Andrew S <andrews54757@gmail.com>
|
||||||
|
Andy Maloney <asmaloney@gmail.com>
|
||||||
|
Anton Kostin <masguit42@users.noreply.github.com>
|
||||||
|
Artyom Mezin <psycho.fading@gmail.com>
|
||||||
|
Asad Memon <asad.lionpk@gmail.com>
|
||||||
|
Ashraful Islam <ashraful.meche@gmail.com>
|
||||||
|
AsukaMinato <asukaminato@nyan.eu.org>
|
||||||
|
AustinMroz <austinmroz@utexas.edu>
|
||||||
|
Avik Sengupta <avik@sengupta.net>
|
||||||
|
Bader-eddine Ouaich <49657842+baderouaich@users.noreply.github.com>
|
||||||
|
Baffin Lee <baffinlee@gmail.com>
|
||||||
|
Ben Nortier <bjnortier@gmail.com>
|
||||||
|
Benjamin Heiniger <benjamin.heiniger@bluewin.ch>
|
||||||
|
Bo-Yi Wu <appleboy.tw@gmail.com>
|
||||||
|
Boris Bliznioukov <blib@mail.com>
|
||||||
|
Borislav Stanimirov <b.stanimirov@abv.bg>
|
||||||
|
Brad Murray <59848399+bradmurray-dt@users.noreply.github.com>
|
||||||
|
Brian Murray <brian@bmurray.ca>
|
||||||
|
CRD716 <crd716@gmail.com>
|
||||||
|
Canis Lupus <Canis-UK@users.noreply.github.com>
|
||||||
|
Carolinabanana <140120812+Carolinabanana@users.noreply.github.com>
|
||||||
|
ChangSeok Oh <shivamidow@users.noreply.github.com>
|
||||||
|
Chaoqun <27287694+OpenWaygate@users.noreply.github.com>
|
||||||
|
Chia-Hsiang Cheng <88014292+garychia@users.noreply.github.com>
|
||||||
|
Chidi Williams <williamschidi1@gmail.com>
|
||||||
|
Christian <12550267+iceychris@users.noreply.github.com>
|
||||||
|
Clifford Heath <clifford.heath@gmail.com>
|
||||||
|
Colin <github@whoisc.cc>
|
||||||
|
DGdev91 <DGdev91@users.noreply.github.com>
|
||||||
|
Damian Czaja <trojan295@protonmail.com>
|
||||||
|
Daniel Bevenius <daniel.bevenius@gmail.com>
|
||||||
|
David <dnhkng@gmail.com>
|
||||||
|
David Thorpe <djt@mutablelogic.com>
|
||||||
|
Davidson Francis <davidsondfgl@gmail.com>
|
||||||
|
Dener Stassun <denerstassun@gmail.com>
|
||||||
|
Didzis Gosko <didzis@users.noreply.github.com>
|
||||||
|
Digipom <admin@digipom.com>
|
||||||
|
Dimo <dimo@ieee.org>
|
||||||
|
Dody Suria Wijaya <dodysw@gmail.com>
|
||||||
|
Dr. Tom Murphy VII Ph.D <499244+tom7@users.noreply.github.com>
|
||||||
|
Duncan McConnell <ddmcconnell4@gmail.com>
|
||||||
|
Egor Egorov <me@egorfine.com>
|
||||||
|
Elkana Bardugo <ttv200@gmail.com>
|
||||||
|
Emmanuel Schmidbauer <eschmidbauer@gmail.com>
|
||||||
|
Engininja2 <139037756+Engininja2@users.noreply.github.com>
|
||||||
|
Eric Swanson <eswanson@alloscomp.com>
|
||||||
|
Eric Tendian <erictendian@gmail.com>
|
||||||
|
Erik Scholz <Green-Sky@users.noreply.github.com>
|
||||||
|
Evan Jones <evan.q.jones@gmail.com>
|
||||||
|
Evan Martin <evan.martin@gmail.com>
|
||||||
|
Eve <139727413+netrunnereve@users.noreply.github.com>
|
||||||
|
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
||||||
|
F1L1P <78918286+F1L1Pv2@users.noreply.github.com>
|
||||||
|
Fangjun Kuang <csukuangfj@gmail.com>
|
||||||
|
Felix <stenbackfelix@gmail.com>
|
||||||
|
Finn Voorhees <finnvoorhees@gmail.com>
|
||||||
|
FlippFuzz <41221030+FlippFuzz@users.noreply.github.com>
|
||||||
|
Gang Chen <goncha@gmail.com>
|
||||||
|
Gavin Cai <gavin1818@hotmail.com>
|
||||||
|
George Hindle <george@georgehindle.com>
|
||||||
|
Georgi Gerganov <ggerganov@gmail.com>
|
||||||
|
GitAritron <103900385+GitAritron@users.noreply.github.com>
|
||||||
|
GiviMAD <GiviMAD@users.noreply.github.com>
|
||||||
|
Gleicon Moraes <gleicon@gmail.com>
|
||||||
|
Gregor Jasny <gjasny@googlemail.com>
|
||||||
|
Guillaume Wenzek <gwenzek@users.noreply.github.com>
|
||||||
|
HY. Kelvin Lee <34256578+hykelvinlee42@users.noreply.github.com>
|
||||||
|
Halalaluyafail3 <55773281+Halalaluyafail3@users.noreply.github.com>
|
||||||
|
Hang <bebound@gmail.com>
|
||||||
|
Herman Semenov <GermanAizek@yandex.ru>
|
||||||
|
Hrishikesh Barman <geekodour@users.noreply.github.com>
|
||||||
|
Ian Bicking <ian@ianbicking.org>
|
||||||
|
Ian Bull <irbull@eclipsesource.com>
|
||||||
|
Ikko Ashimine <eltociear@gmail.com>
|
||||||
|
InconsolableCellist <23345188+InconsolableCellist@users.noreply.github.com>
|
||||||
|
Ismatulla Mansurov <47342870+sapoepsilon@users.noreply.github.com>
|
||||||
|
Ivan Gorin <ivangorin21@gmail.com>
|
||||||
|
JJ <103335846+computerscienceiscool@users.noreply.github.com>
|
||||||
|
Jack Mousseau <jmousseau@users.noreply.github.com>
|
||||||
|
JacobLinCool <jacoblincool@gmail.com>
|
||||||
|
Jakub Ráček <blizzcz@gmail.com>
|
||||||
|
Jared Van Bortel <jared@nomic.ai>
|
||||||
|
Jay Binks <jaybinks@gmail.com>
|
||||||
|
Jhen-Jie Hong <developer@jhen.me>
|
||||||
|
Jhen-Jie Hong <iainst0409@gmail.com>
|
||||||
|
JidongZhang-THU <1119708529@qq.com>
|
||||||
|
Jo Liss <joliss42@gmail.com>
|
||||||
|
Johan <jr.raffin@gmail.com>
|
||||||
|
Johannes Gäßler <johannesg@5d6.de>
|
||||||
|
John Balis <phobossystems@gmail.com>
|
||||||
|
Jonathan Soo <jcsoo@agora.com>
|
||||||
|
Jonno <1160532+razodactyl@users.noreply.github.com>
|
||||||
|
Joonas Pihlajamaa <joonas.pihlajamaa@iki.fi>
|
||||||
|
Jose <34888496+Jerry-Master@users.noreply.github.com>
|
||||||
|
Josh Bleecher Snyder <josharian@gmail.com>
|
||||||
|
Judd <foldl@users.noreply.github.com>
|
||||||
|
Jumper775 <78500318+jumpers775@users.noreply.github.com>
|
||||||
|
Justine Tunney <jtunney@gmail.com>
|
||||||
|
KP Kaiser <kirk@zothcorp.com>
|
||||||
|
Kamilake <exjang0@gmail.com>
|
||||||
|
Kartik Saranathan <278928+Kartiku@users.noreply.github.com>
|
||||||
|
Kasumi <90275229+kasumi-1@users.noreply.github.com>
|
||||||
|
Kawrakow <48489457+ikawrakow@users.noreply.github.com>
|
||||||
|
Kevin Brothaler <admin@digipom.com>
|
||||||
|
Konstantin Zhuravlyov <konstantin.zhuravlyov@amd.com>
|
||||||
|
Kreijstal <rainb@tfwno.gf>
|
||||||
|
Kylin <56434533+KyL0N@users.noreply.github.com>
|
||||||
|
LBlue <153975653+lbluep@users.noreply.github.com>
|
||||||
|
Larry Battle <larry.battle.tech@gmail.com>
|
||||||
|
Laytan Laats <laytanlaats@hotmail.com>
|
||||||
|
Leo Moll <leo.moll@yeasoft.com>
|
||||||
|
Lexevolution <31176843+Lexevolution@users.noreply.github.com>
|
||||||
|
LittleLoli <26589867+WhichWho@users.noreply.github.com>
|
||||||
|
Lucas Zanek <57494138+LucasZNK@users.noreply.github.com>
|
||||||
|
Luis Herrera <herrera-luis@users.noreply.github.com>
|
||||||
|
Lukas Rist <glaslos@gmail.com>
|
||||||
|
M. A. Ali <73258591+MightyStud@users.noreply.github.com>
|
||||||
|
M. Eren Akbiyik <erenakbiyik@gmail.com>
|
||||||
|
Maciek <maciek.mab122@gmail.com>
|
||||||
|
Marcin Mielniczuk <marmistrz.dev@zoho.eu>
|
||||||
|
Martin Warnaar <martinwarnaar@gmail.com>
|
||||||
|
Matheus de Sousa <23645013+keyehzy@users.noreply.github.com>
|
||||||
|
Mathijs de Bruin <mathijs@mathijsfietst.nl>
|
||||||
|
Matija Pevec <mightymatth@users.noreply.github.com>
|
||||||
|
Maximiliano Levi <8160966+maxilevi@users.noreply.github.com>
|
||||||
|
Meng, Hengyu <hengyu.meng@intel.com>
|
||||||
|
Michael Podvitskiy <podvitskiymichael@gmail.com>
|
||||||
|
Michael Rienstra <mrienstra@gmail.com>
|
||||||
|
Mikhail Grigorev <sleuthhound@gmail.com>
|
||||||
|
Mohammadreza Hendiani <hendiani.mohammadreza@gmail.com>
|
||||||
|
Mohit Agarwal <mohit@sdf.org>
|
||||||
|
Murilo Santana <mvrilo@gmail.com>
|
||||||
|
Neil Chudleigh <nchudleigh@users.noreply.github.com>
|
||||||
|
Neo Zhang Jianyu <jianyu.zhang@intel.com>
|
||||||
|
Neuman Vong <neuman.vong@gmail.com>
|
||||||
|
Nicholas Albion <nalbion@yahoo.com>
|
||||||
|
Niels Mayer <Niels.Mayer@gmail.com>
|
||||||
|
Okabintaro <103938900+Okabintaro@users.noreply.github.com>
|
||||||
|
Oleg Sidorov <me@whitebox.io>
|
||||||
|
Oleg Sidorov <oleg@sidorov.nl>
|
||||||
|
Ondrej Kokes <ondrej.kokes@gmail.com>
|
||||||
|
Ouadie EL FAROUKI <ouadie.elfarouki@codeplay.com>
|
||||||
|
Paul Tsochantaris <ptsochantaris@icloud.com>
|
||||||
|
Philipp Zabel <philipp.zabel@gmail.com>
|
||||||
|
Philippe Normand <phil@base-art.net>
|
||||||
|
Przemysław Pawełczyk <przemoc@gmail.com>
|
||||||
|
Qianhe Chen <54462604+chenqianhe@users.noreply.github.com>
|
||||||
|
Radosław Gryta <radek.gryta@gmail.com>
|
||||||
|
Reinforce-II <fate@eastal.com>
|
||||||
|
Reinis Muiznieks <muiznieks.reinis@gmail.com>
|
||||||
|
RelatedTitle <r3latedtitle@gmail.com>
|
||||||
|
RhinoDevel <RhinoDevel@users.noreply.github.com>
|
||||||
|
Rich Jones <miserlou@gmail.com>
|
||||||
|
Robin <robin.xw@hotmail.com>
|
||||||
|
Roddur Dasgupta <roddurd@gmail.com>
|
||||||
|
Roland Rabien <figbug@gmail.com>
|
||||||
|
Rotem Dan <rotemdan@gmail.com>
|
||||||
|
Ryan Hitchman <hitchmanr@gmail.com>
|
||||||
|
Ryan Metcalfe <107415876+RyanMetcalfeInt8@users.noreply.github.com>
|
||||||
|
RyanChang <ftes90015@gmail.com>
|
||||||
|
Sam <49637763+Onlyartist9@users.noreply.github.com>
|
||||||
|
Sam Pullara <spullara@gmail.com>
|
||||||
|
Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com>
|
||||||
|
Sergio López <slp@sinrega.org>
|
||||||
|
Siddharth Ramakrishnan <srr2141@columbia.edu>
|
||||||
|
Simon Moisselin <simon.moisstoll@gmail.com>
|
||||||
|
Sindre Sorhus <sindresorhus@gmail.com>
|
||||||
|
Slava Primenko <primenko.s@gmail.com>
|
||||||
|
Syahmi Azhar <prsyahmi@gmail.com>
|
||||||
|
Syed Jafri <syedjafri97@gmail.com>
|
||||||
|
Sơn Phan Trung <phantrungson17@gmail.com>
|
||||||
|
Taisei Mima <bhbstar.me@gmail.com>
|
||||||
|
Takeshi Inoue <inoue.takeshi@gmail.com>
|
||||||
|
Tamotsu Takahashi <ttakah+github@gmail.com>
|
||||||
|
Taras Glek <taras@thegp.com>
|
||||||
|
Tauseef Mohiuddin <35351464+tauseefmohammed2@users.noreply.github.com>
|
||||||
|
Thijs Raymakers <thijs@raymakers.nl>
|
||||||
|
Thomas Fitzsimmons <fitzsim@fitzsim.org>
|
||||||
|
Tiago Fassoni <tiagofassoni@users.noreply.github.com>
|
||||||
|
Tienshiao Ma <tienshiao@tienshiao.org>
|
||||||
|
Timothy Cronin <40186632+4imothy@users.noreply.github.com>
|
||||||
|
Tobrun <tobrun.van.nuland@gmail.com>
|
||||||
|
Todd <taf2@users.noreply.github.com>
|
||||||
|
Tong Li <31761981+litongjava@users.noreply.github.com>
|
||||||
|
Topping1 <78745143+Topping1@users.noreply.github.com>
|
||||||
|
Travis Cline <travis.cline@gmail.com>
|
||||||
|
UEXTM.com <84163508+uextm@users.noreply.github.com>
|
||||||
|
Vadim Peretokin <vperetokin@hey.com>
|
||||||
|
Valentin Gosu <1454649+valenting@users.noreply.github.com>
|
||||||
|
Vulcan <93451215+trholding@users.noreply.github.com>
|
||||||
|
WhiteOlivierus <36532695+WhiteOlivierus@users.noreply.github.com>
|
||||||
|
Xiang (Kevin) Li <kevinli020508@gmail.com>
|
||||||
|
Xiao-Yong Jin <jinxiaoyong@gmail.com>
|
||||||
|
XiaotaoChen <chenxiaotao1234@gmail.com>
|
||||||
|
Yajing Tang <phillis@google.com>
|
||||||
|
Yang Shen <aplshenyang@gmail.com>
|
||||||
|
Yunès <jean.baptiste.yunes@free.fr>
|
||||||
|
ZaBlazzingZephyrus <119159668+blazingzephyr@users.noreply.github.com>
|
||||||
|
Zigfrid Zvezdin <ziggerZZ@gmail.com>
|
||||||
|
Zollner <24618122+Zolliner@users.noreply.github.com>
|
||||||
|
ai-at-home <149282006+ai-at-home@users.noreply.github.com>
|
||||||
|
alonfaraj <alonfaraj@gmail.com>
|
||||||
|
andypayne <apayne@gmail.com>
|
||||||
|
ardfork <134447697+ardfork@users.noreply.github.com>
|
||||||
|
automaticcat <daogiatuank54@gmail.com>
|
||||||
|
be-next <jerome.ramette@gmail.com>
|
||||||
|
bert hubert <bert@hubertnet.nl>
|
||||||
|
bmwl <brian.marshall@tolko.com>
|
||||||
|
bobqianic <129547291+bobqianic@users.noreply.github.com>
|
||||||
|
bocytko <bocytko+github@gmail.com>
|
||||||
|
boolemancer <48014766+boolemancer@users.noreply.github.com>
|
||||||
|
boolemancer <boolemancer@gmail.com>
|
||||||
|
bradmit <151883577+bradmit@users.noreply.github.com>
|
||||||
|
brunofaustino <b.fa.amorim@gmail.com>
|
||||||
|
bssrdf <merlintiger@hotmail.com>
|
||||||
|
byte-6174 <88070277+byte-6174@users.noreply.github.com>
|
||||||
|
cdosoftei <ciprian.dosoftei@gmail.com>
|
||||||
|
clach04 <Chris.Clark@actian.com>
|
||||||
|
compilade <113953597+compilade@users.noreply.github.com>
|
||||||
|
conradg <conradjgodfrey@gmail.com>
|
||||||
|
ddpasa <112642920+ddpasa@users.noreply.github.com>
|
||||||
|
denersc <denerstassun@gmail.com>
|
||||||
|
dscripka <dscripka@users.noreply.github.com>
|
||||||
|
duthils <duthils@duthils.net>
|
||||||
|
ecneladis <ecneladis@users.noreply.github.com>
|
||||||
|
faker <nspyia2002@gmail.com>
|
||||||
|
fitzsim <fitzsim@fitzsim.org>
|
||||||
|
fraxy-v <65565042+fraxy-v@users.noreply.github.com>
|
||||||
|
genevera (she/her) <genevera@users.noreply.github.com>
|
||||||
|
geniusnut <geniusnut@gmail.com>
|
||||||
|
greeshmay <greeshmay@gmail.com>
|
||||||
|
hydai <z54981220@gmail.com>
|
||||||
|
iamthad <thadeus.j.fleming@gmail.com>
|
||||||
|
james wolf <contractorwolf@hotmail.com>
|
||||||
|
joecryptotoo <80373433+joecryptotoo@users.noreply.github.com>
|
||||||
|
jorismertz <35079666+jorismertz@users.noreply.github.com>
|
||||||
|
junkfood <69683722+JunkFood02@users.noreply.github.com>
|
||||||
|
jwijffels <jwijffels@bnosac.be>
|
||||||
|
kamranjon <kamranjon@gmail.com>
|
||||||
|
katsu560 <katsu560oo-@docomo.ne.jp>
|
||||||
|
kennethge <57784063+kenneth-ge@users.noreply.github.com>
|
||||||
|
keyehzy <msamuel@aluno.puc-rio.br>
|
||||||
|
leejet <leejet714@gmail.com>
|
||||||
|
litong <31761981+litongjava@users.noreply.github.com>
|
||||||
|
lnyan <lkwq007@gmail.com>
|
||||||
|
m.bell <m.bell@techsmith.com>
|
||||||
|
mkiol <mkiol@users.noreply.github.com>
|
||||||
|
novag <7754358+novag@users.noreply.github.com>
|
||||||
|
pajowu <pajowu@pajowu.de>
|
||||||
|
polarmoon <90010972+polarmoon@users.noreply.github.com>
|
||||||
|
rlapray <lapray.romain@gmail.com>
|
||||||
|
sandrohanea <40202887+sandrohanea@users.noreply.github.com>
|
||||||
|
semiformal-net <84111142+semiformal-net@users.noreply.github.com>
|
||||||
|
shibukazu <61775791+shibukazu@users.noreply.github.com>
|
||||||
|
shikokuchuo <53399081+shikokuchuo@users.noreply.github.com>
|
||||||
|
slaren <slarengh@gmail.com>
|
||||||
|
slashlib <slashlib@users.noreply.github.com>
|
||||||
|
snadampal <87143774+snadampal@users.noreply.github.com>
|
||||||
|
st-gr <38470677+st-gr@users.noreply.github.com>
|
||||||
|
texmex76 <40733439+texmex76@users.noreply.github.com>
|
||||||
|
thefinaldegree <thefinaldegree@gmail.com>
|
||||||
|
trixirt <trix@redhat.com>
|
||||||
|
ulatekh <ulatekh@yahoo.com>
|
||||||
|
undef <undefdev@gmail.com>
|
||||||
|
venkr <venkateshrameshkumar+1@gmail.com>
|
||||||
|
vicalloy <zbirder@gmail.com>
|
||||||
|
xdrudis <xavierdrudis@yahoo.es>
|
||||||
|
zhouwg <6889919+zhouwg@users.noreply.github.com>
|
||||||
|
布客飞龙 <562826179@qq.com>
|
||||||
|
Артём Земляк <azemlyak@smart-consulting.ru>
|
624
CMakeLists.txt
624
CMakeLists.txt
@ -1,21 +1,31 @@
|
|||||||
cmake_minimum_required (VERSION 3.5)
|
cmake_minimum_required(VERSION 3.5) # for add_link_options and implicit target directories.
|
||||||
|
project("whisper.cpp" C CXX)
|
||||||
|
project("whisper.cpp" VERSION 1.7.2)
|
||||||
|
include(CheckIncludeFileCXX)
|
||||||
|
|
||||||
project(whisper.cpp VERSION 1.5.2)
|
set(SOVERSION 1)
|
||||||
|
|
||||||
|
#set(CMAKE_WARN_DEPRECATED YES)
|
||||||
|
set(CMAKE_WARN_UNUSED_CLI YES)
|
||||||
|
|
||||||
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||||
|
|
||||||
|
if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
|
||||||
|
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
|
||||||
|
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Add path to modules
|
# Add path to modules
|
||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||||
|
|
||||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||||
|
|
||||||
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
|
if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
|
||||||
set(WHISPER_STANDALONE ON)
|
set(WHISPER_STANDALONE ON)
|
||||||
include(GitVars)
|
|
||||||
include(BuildTypes)
|
include(git-vars)
|
||||||
|
|
||||||
# configure project version
|
# configure project version
|
||||||
if (EXISTS "${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl")
|
|
||||||
configure_file(${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl ${CMAKE_SOURCE_DIR}/bindings/ios/Makefile @ONLY)
|
|
||||||
endif()
|
|
||||||
configure_file(${CMAKE_SOURCE_DIR}/bindings/javascript/package-tmpl.json ${CMAKE_SOURCE_DIR}/bindings/javascript/package.json @ONLY)
|
configure_file(${CMAKE_SOURCE_DIR}/bindings/javascript/package-tmpl.json ${CMAKE_SOURCE_DIR}/bindings/javascript/package.json @ONLY)
|
||||||
else()
|
else()
|
||||||
set(WHISPER_STANDALONE OFF)
|
set(WHISPER_STANDALONE OFF)
|
||||||
@ -25,6 +35,11 @@ if (EMSCRIPTEN)
|
|||||||
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
||||||
|
|
||||||
option(WHISPER_WASM_SINGLE_FILE "whisper: embed WASM inside the generated whisper.js" ON)
|
option(WHISPER_WASM_SINGLE_FILE "whisper: embed WASM inside the generated whisper.js" ON)
|
||||||
|
|
||||||
|
# TODO: without these, we get the following error:
|
||||||
|
# wasm-ld: error: --shared-memory is disallowed by whisper.cpp.o because it was not compiled with 'atomics' or 'bulk-memory' features.
|
||||||
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread -s TOTAL_STACK=5242880")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -s TOTAL_STACK=5242880")
|
||||||
else()
|
else()
|
||||||
if (MINGW)
|
if (MINGW)
|
||||||
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
||||||
@ -33,531 +48,136 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# options
|
option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT})
|
||||||
|
|
||||||
if (APPLE)
|
#
|
||||||
set(WHISPER_METAL_DEFAULT ON)
|
# option list
|
||||||
else()
|
#
|
||||||
set(WHISPER_METAL_DEFAULT OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
option(BUILD_SHARED_LIBS "whisper: build shared libs" ${BUILD_SHARED_LIBS_DEFAULT})
|
# general
|
||||||
|
option(WHISPER_CCACHE "whisper: use ccache if available" ON)
|
||||||
|
|
||||||
|
# debug
|
||||||
option(WHISPER_ALL_WARNINGS "whisper: enable all compiler warnings" ON)
|
option(WHISPER_ALL_WARNINGS "whisper: enable all compiler warnings" ON)
|
||||||
option(WHISPER_ALL_WARNINGS_3RD_PARTY "whisper: enable all compiler warnings in 3rd party libs" OFF)
|
option(WHISPER_ALL_WARNINGS_3RD_PARTY "whisper: enable all compiler warnings in 3rd party libs" OFF)
|
||||||
|
|
||||||
option(WHISPER_SANITIZE_THREAD "whisper: enable thread sanitizer" OFF)
|
# build
|
||||||
option(WHISPER_SANITIZE_ADDRESS "whisper: enable address sanitizer" OFF)
|
option(WHISPER_FATAL_WARNINGS "whisper: enable -Werror flag" OFF)
|
||||||
option(WHISPER_SANITIZE_UNDEFINED "whisper: enable undefined sanitizer" OFF)
|
|
||||||
|
|
||||||
option(WHISPER_BUILD_TESTS "whisper: build tests" ${WHISPER_STANDALONE})
|
|
||||||
option(WHISPER_BUILD_EXAMPLES "whisper: build examples" ${WHISPER_STANDALONE})
|
|
||||||
|
|
||||||
option(WHISPER_SDL2 "whisper: support for libSDL2" OFF)
|
|
||||||
|
|
||||||
option(WHISPER_NO_AVX "whisper: disable AVX" OFF)
|
|
||||||
option(WHISPER_NO_AVX2 "whisper: disable AVX2" OFF)
|
|
||||||
option(WHISPER_NO_FMA "whisper: disable FMA" OFF)
|
|
||||||
option(WHISPER_NO_F16C "whisper: disable F16c" OFF)
|
|
||||||
|
|
||||||
option(WHISPER_OPENVINO "whisper: support for OpenVINO" OFF)
|
|
||||||
|
|
||||||
if (APPLE)
|
|
||||||
option(WHISPER_NO_ACCELERATE "whisper: disable Accelerate framework" OFF)
|
|
||||||
option(WHISPER_METAL "whisper: use Metal" ${WHISPER_METAL_DEFAULT})
|
|
||||||
option(WHISPER_METAL_NDEBUG "whisper: disable Metal debugging" OFF)
|
|
||||||
option(WHISPER_COREML "whisper: enable Core ML framework" OFF)
|
|
||||||
option(WHISPER_COREML_ALLOW_FALLBACK "whisper: allow non-CoreML fallback" OFF)
|
|
||||||
else()
|
|
||||||
option(WHISPER_BLAS "whisper: use BLAS libraries" OFF)
|
|
||||||
option(WHISPER_BLAS_VENDOR "whisper: BLAS library vendor" Generic)
|
|
||||||
option(WHISPER_OPENBLAS "whisper: prefer OpenBLAS" OFF)
|
|
||||||
option(WHISPER_CUBLAS "whisper: support for cuBLAS" OFF)
|
|
||||||
option(WHISPER_HIPBLAS "whisper: support for hipBLAS" OFF)
|
|
||||||
option(WHISPER_CLBLAST "whisper: use CLBlast" OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
option(WHISPER_PERF "whisper: enable perf timings" OFF)
|
|
||||||
|
|
||||||
# sanitizers
|
# sanitizers
|
||||||
|
option(WHISPER_SANITIZE_THREAD "whisper: enable thread sanitizer" OFF)
|
||||||
|
option(WHISPER_SANITIZE_ADDRESS "whisper: enable address sanitizer" OFF)
|
||||||
|
option(WHISPER_SANITIZE_UNDEFINED "whisper: enable undefined sanitizer" OFF)
|
||||||
|
|
||||||
if (NOT MSVC)
|
# extra artifacts
|
||||||
if (WHISPER_SANITIZE_THREAD)
|
option(WHISPER_BUILD_TESTS "whisper: build tests" ${WHISPER_STANDALONE})
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread")
|
option(WHISPER_BUILD_EXAMPLES "whisper: build examples" ${WHISPER_STANDALONE})
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread")
|
option(WHISPER_BUILD_SERVER "whisper: build server example" ${WHISPER_STANDALONE})
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_SANITIZE_ADDRESS)
|
# 3rd party libs
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address -fno-omit-frame-pointer")
|
option(WHISPER_CURL "whisper: use libcurl to download model from an URL" OFF)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fno-omit-frame-pointer")
|
option(WHISPER_SDL2 "whisper: support for libSDL2" OFF)
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_SANITIZE_UNDEFINED)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ffast-math")
|
|
||||||
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
|
|
||||||
|
|
||||||
# dependencies
|
|
||||||
|
|
||||||
find_package(Threads REQUIRED)
|
|
||||||
|
|
||||||
# on APPLE
|
|
||||||
if (APPLE)
|
|
||||||
# include Accelerate framework
|
|
||||||
if (NOT WHISPER_NO_ACCELERATE)
|
|
||||||
find_library(ACCELERATE_FRAMEWORK Accelerate)
|
|
||||||
|
|
||||||
if (ACCELERATE_FRAMEWORK)
|
|
||||||
message(STATUS "Accelerate framework found")
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK})
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_ACCELERATE)
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "Accelerate framework not found")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_METAL)
|
|
||||||
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
|
||||||
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
|
||||||
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
|
||||||
|
|
||||||
if (METAL_FRAMEWORK)
|
|
||||||
message(STATUS "Metal framework found")
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS}
|
|
||||||
${FOUNDATION_LIBRARY}
|
|
||||||
${METAL_FRAMEWORK}
|
|
||||||
${METALKIT_FRAMEWORK}
|
|
||||||
)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_METAL)
|
|
||||||
|
|
||||||
if (WHISPER_METAL_NDEBUG)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_METAL_NDEBUG)
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "Metal framework not found")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
|
|
||||||
|
|
||||||
# copy ggml-metal.metal to bin directory
|
|
||||||
configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_COREML)
|
|
||||||
find_library(FOUNDATION_FRAMEWORK Foundation)
|
|
||||||
find_library(COREML_FRAMEWORK CoreML)
|
|
||||||
|
|
||||||
if (COREML_FRAMEWORK)
|
|
||||||
message(STATUS "CoreML framework found")
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_COREML)
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "CoreML framework not found")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_COREML_ALLOW_FALLBACK)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_COREML_ALLOW_FALLBACK)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_OPENBLAS)
|
|
||||||
set(WHISPER_BLAS_VENDOR "OpenBLAS")
|
|
||||||
set(WHISPER_BLAS ON)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_BLAS)
|
|
||||||
if (WIN32)
|
|
||||||
if(DEFINED ENV{OPENBLAS_PATH})
|
|
||||||
set(BLAS_LIBRARIES $ENV{OPENBLAS_PATH}/lib/libopenblas.dll.a)
|
|
||||||
message(STATUS "Libraries ${BLAS_LIBRARIES}")
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
|
||||||
include_directories($ENV{OPENBLAS_PATH}/include)
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${BLAS_LIBRARIES})
|
|
||||||
else ()
|
|
||||||
message(FATAL_ERROR "BLAS library was not found. Environment variable OPENBLAS_PATH not defined.")
|
|
||||||
endif ()
|
|
||||||
else ()
|
|
||||||
set(BLA_STATIC 1)
|
|
||||||
set(BLA_VENDOR ${WHISPER_BLAS_VENDOR})
|
|
||||||
set(BLA_SIZEOF_INTEGER 8)
|
|
||||||
set(BLA_PREFER_PKGCONFIG 1)
|
|
||||||
find_package(BLAS)
|
|
||||||
|
|
||||||
if(BLAS_FOUND)
|
|
||||||
message(STATUS "BLAS compatible library found")
|
|
||||||
message(STATUS "Libraries ${BLAS_LIBRARIES}")
|
|
||||||
find_path(BLAS_INCLUDE_DIRS cblas.h /usr/include/openblas /usr/local/include/openblas $ENV{BLAS_HOME}/include)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
|
||||||
include_directories(${BLAS_INCLUDE_DIRS})
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${BLAS_LIBRARIES})
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "BLAS library was not found")
|
|
||||||
endif()
|
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (WHISPER_CUBLAS)
|
|
||||||
cmake_minimum_required(VERSION 3.17)
|
|
||||||
|
|
||||||
find_package(CUDAToolkit)
|
|
||||||
|
|
||||||
if (CUDAToolkit_FOUND)
|
|
||||||
message(STATUS "cuBLAS found")
|
|
||||||
|
|
||||||
enable_language(CUDA)
|
|
||||||
|
|
||||||
set(GGML_SOURCES_CUDA ggml-cuda.cu ggml-cuda.h)
|
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CUBLAS)
|
|
||||||
|
|
||||||
if (WHISPER_STATIC)
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
|
|
||||||
else()
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "cuBLAS not found")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
|
||||||
if (WHISPER_HIPBLAS)
|
|
||||||
list(APPEND CMAKE_PREFIX_PATH /opt/rocm)
|
|
||||||
if (NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang")
|
|
||||||
message(WARNING "Only LLVM is supported for HIP, hint: CC=/opt/rocm/llvm/bin/clang")
|
|
||||||
endif()
|
|
||||||
if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
|
||||||
message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
find_package(hip)
|
|
||||||
find_package(hipblas)
|
|
||||||
find_package(rocblas)
|
|
||||||
|
|
||||||
if (${hipblas_FOUND} AND ${hip_FOUND})
|
|
||||||
message(STATUS "HIP and hipBLAS found")
|
|
||||||
add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUBLAS)
|
|
||||||
add_library(ggml-rocm OBJECT ggml-cuda.cu ggml-cuda.h)
|
|
||||||
set_property(TARGET ggml-rocm PROPERTY POSITION_INDEPENDENT_CODE ON)
|
|
||||||
set_source_files_properties(ggml-cuda.cu PROPERTIES LANGUAGE CXX)
|
|
||||||
target_link_libraries(ggml-rocm PRIVATE hip::device PUBLIC hip::host roc::rocblas roc::hipblas)
|
|
||||||
|
|
||||||
if (WHISPER_STATIC)
|
|
||||||
message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
|
|
||||||
endif()
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ggml-rocm)
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "hipBLAS or HIP not found. Try setting CMAKE_PREFIX_PATH=/opt/rocm")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_CLBLAST)
|
|
||||||
find_package(CLBlast)
|
|
||||||
if (CLBlast_FOUND)
|
|
||||||
message(STATUS "CLBlast found")
|
|
||||||
|
|
||||||
set(GGML_SOURCES_OPENCL ggml-opencl.cpp ggml-opencl.h)
|
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CLBLAST)
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} clblast)
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "CLBlast not found")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if( WHISPER_OPENVINO )
|
|
||||||
find_package(OpenVINO REQUIRED COMPONENTS Runtime)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# compiler flags
|
|
||||||
|
|
||||||
if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
|
||||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
|
|
||||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "RelWithDebInfo")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (WHISPER_ALL_WARNINGS)
|
|
||||||
if (NOT MSVC)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} \
|
|
||||||
-Wall \
|
|
||||||
-Wextra \
|
|
||||||
-Wpedantic \
|
|
||||||
-Wshadow \
|
|
||||||
-Wcast-qual \
|
|
||||||
-Wstrict-prototypes \
|
|
||||||
-Wpointer-arith \
|
|
||||||
-Wno-unused-function \
|
|
||||||
")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} \
|
|
||||||
-Wall \
|
|
||||||
-Wextra \
|
|
||||||
-Wpedantic \
|
|
||||||
-Wcast-qual \
|
|
||||||
")
|
|
||||||
else()
|
|
||||||
# todo : msvc
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (NOT MSVC)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=vla")
|
|
||||||
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-math-errno -ffinite-math-only -funsafe-math-optimizations")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}")
|
|
||||||
|
|
||||||
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
|
||||||
message(STATUS "ARM detected")
|
|
||||||
elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le")
|
|
||||||
message(STATUS "PowerPC detected")
|
|
||||||
else()
|
|
||||||
message(STATUS "x86 detected")
|
|
||||||
if (MSVC)
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /utf-8")
|
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /utf-8")
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /utf-8")
|
|
||||||
if(NOT WHISPER_NO_AVX2)
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
|
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX2")
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX2")
|
|
||||||
else()
|
|
||||||
if(NOT WHISPER_NO_AVX)
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX")
|
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX")
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
if (EMSCRIPTEN)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
|
|
||||||
else()
|
|
||||||
if(NOT WHISPER_NO_AVX)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
|
||||||
endif()
|
|
||||||
if(NOT WHISPER_NO_AVX2)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
|
||||||
endif()
|
|
||||||
if(NOT WHISPER_NO_FMA)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
|
||||||
endif()
|
|
||||||
if(NOT WHISPER_NO_F16C)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mf16c")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
#
|
|
||||||
# POSIX conformance
|
|
||||||
#
|
|
||||||
|
|
||||||
# clock_gettime came in POSIX.1b (1993)
|
|
||||||
# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional
|
|
||||||
# posix_memalign came in POSIX.1-2001 / SUSv3
|
|
||||||
# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985)
|
|
||||||
add_compile_definitions(_XOPEN_SOURCE=600)
|
|
||||||
|
|
||||||
# Somehow in OpenBSD whenever POSIX conformance is specified
|
|
||||||
# some string functions rely on locale_t availability,
|
|
||||||
# which was introduced in POSIX.1-2008, forcing us to go higher
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
|
||||||
remove_definitions(-D_XOPEN_SOURCE=600)
|
|
||||||
add_compile_definitions(_XOPEN_SOURCE=700)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Data types, macros and functions related to controlling CPU affinity
|
|
||||||
# are available on Linux through GNU extensions in libc
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
add_compile_definitions(_GNU_SOURCE)
|
option(WHISPER_FFMPEG "whisper: support building and linking with ffmpeg libs (avcodec, swresample, ...)" OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1,
|
option(WHISPER_COREML "whisper: enable Core ML framework" OFF)
|
||||||
# and on macOS its availability depends on enabling Darwin extensions
|
option(WHISPER_COREML_ALLOW_FALLBACK "whisper: allow non-CoreML fallback" OFF)
|
||||||
# similarly on DragonFly, enabling BSD extensions is necessary
|
option(WHISPER_OPENVINO "whisper: support for OpenVINO" OFF)
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
|
||||||
add_compile_definitions(_DARWIN_C_SOURCE)
|
|
||||||
endif()
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "DragonFly")
|
|
||||||
add_compile_definitions(_DARWIN_C_SOURCE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# alloca is a non-standard interface that is not visible on BSDs when
|
# Required for relocatable CMake package
|
||||||
# POSIX conformance is specified, but not all of them provide a clean way
|
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
|
||||||
# to enable it in such cases
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
|
||||||
add_compile_definitions(__BSD_VISIBLE)
|
|
||||||
endif()
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "NetBSD")
|
|
||||||
add_compile_definitions(_NETBSD_SOURCE)
|
|
||||||
endif()
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
|
||||||
add_compile_definitions(_BSD_SOURCE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_PERF)
|
# override ggml options
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_PERF)
|
set(GGML_CCACHE ${WHISPER_CCACHE})
|
||||||
endif()
|
set(GGML_SANITIZE_THREAD ${WHISPER_SANITIZE_THREAD})
|
||||||
|
set(GGML_SANITIZE_ADDRESS ${WHISPER_SANITIZE_ADDRESS})
|
||||||
|
set(GGML_SANITIZE_UNDEFINED ${WHISPER_SANITIZE_UNDEFINED})
|
||||||
|
set(GGML_ALL_WARNINGS ${WHISPER_ALL_WARNINGS})
|
||||||
|
set(GGML_FATAL_WARNINGS ${WHISPER_FATAL_WARNINGS})
|
||||||
|
|
||||||
#
|
# transition helpers
|
||||||
# whisper.coreml - Core ML support
|
function (whisper_option_depr TYPE OLD NEW)
|
||||||
#
|
if (${OLD})
|
||||||
|
message(${TYPE} "${OLD} is deprecated and will be removed in the future.\nUse ${NEW} instead\n")
|
||||||
if (WHISPER_COREML)
|
set(${NEW} ON)
|
||||||
set(TARGET whisper.coreml)
|
|
||||||
|
|
||||||
add_library(${TARGET}
|
|
||||||
coreml/whisper-encoder.h
|
|
||||||
coreml/whisper-encoder.mm
|
|
||||||
coreml/whisper-encoder-impl.h
|
|
||||||
coreml/whisper-encoder-impl.m
|
|
||||||
)
|
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC
|
|
||||||
.
|
|
||||||
)
|
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE ${FOUNDATION_FRAMEWORK} ${COREML_FRAMEWORK})
|
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES
|
|
||||||
COMPILE_FLAGS "-fobjc-arc"
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_OPENVINO)
|
|
||||||
set(TARGET whisper.openvino)
|
|
||||||
|
|
||||||
add_library(${TARGET} OBJECT
|
|
||||||
openvino/whisper-openvino-encoder.h
|
|
||||||
openvino/whisper-openvino-encoder.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC
|
|
||||||
.
|
|
||||||
)
|
|
||||||
|
|
||||||
set_property(TARGET ${TARGET} PROPERTY POSITION_INDEPENDENT_CODE ON)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_OPENVINO)
|
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE openvino::runtime)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
#
|
|
||||||
# whisper - this is the main library of the project
|
|
||||||
#
|
|
||||||
|
|
||||||
set(TARGET whisper)
|
|
||||||
|
|
||||||
add_library(${TARGET}
|
|
||||||
ggml.h
|
|
||||||
ggml.c
|
|
||||||
ggml-alloc.h
|
|
||||||
ggml-alloc.c
|
|
||||||
ggml-backend.h
|
|
||||||
ggml-backend.c
|
|
||||||
ggml-quants.h
|
|
||||||
ggml-quants.c
|
|
||||||
${GGML_SOURCES_METAL}
|
|
||||||
${GGML_SOURCES_CUDA}
|
|
||||||
${GGML_SOURCES_OPENCL}
|
|
||||||
whisper.h
|
|
||||||
whisper.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC
|
|
||||||
.
|
|
||||||
)
|
|
||||||
|
|
||||||
if (WHISPER_COREML)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper.coreml)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_OPENVINO)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper.openvino)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (MSVC)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -D_CRT_SECURE_NO_WARNINGS)
|
|
||||||
else()
|
|
||||||
target_link_libraries(${TARGET} PRIVATE m ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (BUILD_SHARED_LIBS)
|
|
||||||
target_link_libraries(${TARGET} PUBLIC
|
|
||||||
${CMAKE_DL_LIBS}
|
|
||||||
)
|
|
||||||
|
|
||||||
target_compile_definitions(${TARGET} PUBLIC
|
|
||||||
WHISPER_SHARED
|
|
||||||
GGML_SHARED
|
|
||||||
)
|
|
||||||
|
|
||||||
target_compile_definitions(${TARGET} PRIVATE
|
|
||||||
WHISPER_BUILD
|
|
||||||
GGML_BUILD
|
|
||||||
)
|
|
||||||
|
|
||||||
if (WHISPER_METAL)
|
|
||||||
# TODO: I think this should make ggml-metal.m "see" the ggml-metal.metal file from the "bin" directory
|
|
||||||
# but for some reason it does not work here like it does in llama.cpp
|
|
||||||
set_target_properties(${TARGET} PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
|
|
||||||
endif()
|
endif()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
whisper_option_depr(FATAL_ERROR WHISPER_CUBLAS GGML_CUDA)
|
||||||
|
whisper_option_depr(WARNING WHISPER_CUDA GGML_CUDA)
|
||||||
|
whisper_option_depr(WARNING WHISPER_KOMPUTE GGML_KOMPUTE)
|
||||||
|
whisper_option_depr(WARNING WHISPER_METAL GGML_METAL)
|
||||||
|
whisper_option_depr(WARNING WHISPER_METAL_EMBED_LIBRARY GGML_METAL_EMBED_LIBRARY)
|
||||||
|
whisper_option_depr(WARNING WHISPER_NATIVE GGML_NATIVE)
|
||||||
|
whisper_option_depr(WARNING WHISPER_OPENMP GGML_OPENMP)
|
||||||
|
whisper_option_depr(WARNING WHISPER_RPC GGML_RPC)
|
||||||
|
whisper_option_depr(WARNING WHISPER_SYCL GGML_SYCL)
|
||||||
|
whisper_option_depr(WARNING WHISPER_SYCL_F16 GGML_SYCL_F16)
|
||||||
|
|
||||||
|
#
|
||||||
|
# build the library
|
||||||
|
#
|
||||||
|
|
||||||
|
if (NOT TARGET ggml)
|
||||||
|
add_subdirectory(ggml)
|
||||||
|
# ... otherwise assume ggml is added by a parent CMakeLists.txt
|
||||||
endif()
|
endif()
|
||||||
|
add_subdirectory(src)
|
||||||
|
|
||||||
if (GGML_SOURCES_CUDA)
|
#
|
||||||
message(STATUS "GGML CUDA sources found, configuring CUDA architecture")
|
# install
|
||||||
set_property(TARGET whisper PROPERTY CUDA_ARCHITECTURES OFF)
|
#
|
||||||
set_property(TARGET whisper PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (EMSCRIPTEN)
|
|
||||||
set_target_properties(${TARGET} PROPERTIES COMPILE_FLAGS "-msimd128")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
target_compile_definitions(${TARGET} PUBLIC
|
|
||||||
${WHISPER_EXTRA_FLAGS}
|
|
||||||
)
|
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES PUBLIC_HEADER "ggml.h;whisper.h")
|
|
||||||
|
|
||||||
include(GNUInstallDirs)
|
include(GNUInstallDirs)
|
||||||
|
include(CMakePackageConfigHelpers)
|
||||||
|
|
||||||
install(TARGETS ${TARGET}
|
set(WHISPER_BUILD_NUMBER ${BUILD_NUMBER})
|
||||||
LIBRARY DESTINATION lib
|
set(WHISPER_BUILD_COMMIT ${BUILD_COMMIT})
|
||||||
ARCHIVE DESTINATION lib/static
|
set(WHISPER_INSTALL_VERSION ${CMAKE_PROJECT_VERSION})
|
||||||
RUNTIME DESTINATION bin
|
|
||||||
RESOURCE DESTINATION bin
|
|
||||||
PUBLIC_HEADER DESTINATION include
|
|
||||||
)
|
|
||||||
|
|
||||||
#
|
set(WHISPER_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files")
|
||||||
# bindings
|
set(WHISPER_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files")
|
||||||
#
|
set(WHISPER_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files")
|
||||||
|
|
||||||
add_subdirectory(bindings)
|
get_directory_property(WHISPER_TRANSIENT_DEFINES COMPILE_DEFINITIONS)
|
||||||
|
|
||||||
|
set_target_properties(whisper PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/include/whisper.h)
|
||||||
|
install(TARGETS whisper LIBRARY PUBLIC_HEADER)
|
||||||
|
|
||||||
|
configure_package_config_file(
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/cmake/whisper-config.cmake.in
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/whisper-config.cmake
|
||||||
|
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/whisper
|
||||||
|
PATH_VARS
|
||||||
|
WHISPER_INCLUDE_INSTALL_DIR
|
||||||
|
WHISPER_LIB_INSTALL_DIR
|
||||||
|
WHISPER_BIN_INSTALL_DIR )
|
||||||
|
|
||||||
|
write_basic_package_version_file(
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/whisper-version.cmake
|
||||||
|
VERSION ${WHISPER_INSTALL_VERSION}
|
||||||
|
COMPATIBILITY SameMajorVersion)
|
||||||
|
|
||||||
|
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/whisper-config.cmake
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/whisper-version.cmake
|
||||||
|
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/whisper)
|
||||||
|
|
||||||
|
configure_file(cmake/whisper.pc.in
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/whisper.pc"
|
||||||
|
@ONLY)
|
||||||
|
|
||||||
|
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/whisper.pc"
|
||||||
|
DESTINATION lib/pkgconfig)
|
||||||
|
|
||||||
#
|
#
|
||||||
# programs, examples and tests
|
# programs, examples and tests
|
||||||
#
|
#
|
||||||
|
|
||||||
if (WHISPER_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
if (WHISPER_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
||||||
enable_testing()
|
#include(CTest)
|
||||||
add_subdirectory(tests)
|
#add_subdirectory(tests)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (WHISPER_BUILD_EXAMPLES)
|
if (WHISPER_BUILD_EXAMPLES)
|
||||||
|
2
LICENSE
2
LICENSE
@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023 Georgi Gerganov
|
Copyright (c) 2023-2024 The ggml authors
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
@ -18,33 +18,40 @@ let package = Package(
|
|||||||
name: "whisper",
|
name: "whisper",
|
||||||
path: ".",
|
path: ".",
|
||||||
exclude: [
|
exclude: [
|
||||||
|
"build",
|
||||||
"bindings",
|
"bindings",
|
||||||
"cmake",
|
"cmake",
|
||||||
"coreml",
|
|
||||||
"examples",
|
"examples",
|
||||||
"extra",
|
"scripts",
|
||||||
"models",
|
"models",
|
||||||
"samples",
|
"samples",
|
||||||
"tests",
|
"tests",
|
||||||
"CMakeLists.txt",
|
"CMakeLists.txt",
|
||||||
"ggml-cuda.cu",
|
"Makefile",
|
||||||
"ggml-cuda.h",
|
"ggml/src/ggml-metal/ggml-metal-embed.metal"
|
||||||
"Makefile"
|
|
||||||
],
|
],
|
||||||
sources: [
|
sources: [
|
||||||
"ggml.c",
|
"ggml/src/ggml.c",
|
||||||
"whisper.cpp",
|
"src/whisper.cpp",
|
||||||
"ggml-alloc.c",
|
"ggml/src/ggml-aarch64.c",
|
||||||
"ggml-backend.c",
|
"ggml/src/ggml-alloc.c",
|
||||||
"ggml-quants.c",
|
"ggml/src/ggml-backend.cpp",
|
||||||
"ggml-metal.m"
|
"ggml/src/ggml-backend-reg.cpp",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu.c",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu.cpp",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu-aarch64.c",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu-quants.c",
|
||||||
|
"ggml/src/ggml-quants.c",
|
||||||
|
"ggml/src/ggml-threading.cpp",
|
||||||
|
"ggml/src/ggml-metal/ggml-metal.m"
|
||||||
],
|
],
|
||||||
resources: [.process("ggml-metal.metal")],
|
resources: [.process("ggml/src/ggml-metal/ggml-metal.metal")],
|
||||||
publicHeadersPath: "spm-headers",
|
publicHeadersPath: "spm-headers",
|
||||||
cSettings: [
|
cSettings: [
|
||||||
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
||||||
.define("GGML_USE_ACCELERATE"),
|
|
||||||
.unsafeFlags(["-fno-objc-arc"]),
|
.unsafeFlags(["-fno-objc-arc"]),
|
||||||
|
.headerSearchPath("ggml/src"),
|
||||||
|
.define("GGML_USE_ACCELERATE"),
|
||||||
.define("GGML_USE_METAL")
|
.define("GGML_USE_METAL")
|
||||||
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
||||||
// We should consider add this in the future when we drop support for iOS 14
|
// We should consider add this in the future when we drop support for iOS 14
|
||||||
|
335
README.md
335
README.md
@ -4,24 +4,26 @@
|
|||||||
|
|
||||||
[](https://github.com/ggerganov/whisper.cpp/actions)
|
[](https://github.com/ggerganov/whisper.cpp/actions)
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
|
[](https://conan.io/center/whisper-cpp)
|
||||||
[](https://www.npmjs.com/package/whisper.cpp/)
|
[](https://www.npmjs.com/package/whisper.cpp/)
|
||||||
|
|
||||||
Stable: [v1.5.2](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.5.2) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
Stable: [v1.7.2](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.7.2) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||||
|
|
||||||
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
||||||
|
|
||||||
- Plain C/C++ implementation without dependencies
|
- Plain C/C++ implementation without dependencies
|
||||||
- Apple Silicon first-class citizen - optimized via ARM NEON, Accelerate framework, Metal and [Core ML](https://github.com/ggerganov/whisper.cpp#core-ml-support)
|
- Apple Silicon first-class citizen - optimized via ARM NEON, Accelerate framework, Metal and [Core ML](#core-ml-support)
|
||||||
- AVX intrinsics support for x86 architectures
|
- AVX intrinsics support for x86 architectures
|
||||||
- VSX intrinsics support for POWER architectures
|
- VSX intrinsics support for POWER architectures
|
||||||
- Mixed F16 / F32 precision
|
- Mixed F16 / F32 precision
|
||||||
- [4-bit and 5-bit integer quantization support](https://github.com/ggerganov/whisper.cpp#quantization)
|
- [Integer quantization support](#quantization)
|
||||||
- Zero memory allocations at runtime
|
- Zero memory allocations at runtime
|
||||||
|
- [Vulkan support](#vulkan-gpu-support)
|
||||||
- Support for CPU-only inference
|
- Support for CPU-only inference
|
||||||
- [Efficient GPU support for NVIDIA](https://github.com/ggerganov/whisper.cpp#nvidia-gpu-support-via-cublas)
|
- [Efficient GPU support for NVIDIA](#nvidia-gpu-support)
|
||||||
- [Partial OpenCL GPU support via CLBlast](https://github.com/ggerganov/whisper.cpp#opencl-gpu-support-via-clblast)
|
- [OpenVINO Support](#openvino-support)
|
||||||
- [OpenVINO Support](https://github.com/ggerganov/whisper.cpp#openvino-support)
|
- [Ascend NPU Support](#ascend-npu-support)
|
||||||
- [C-style API](https://github.com/ggerganov/whisper.cpp/blob/master/whisper.h)
|
- [C-style API](https://github.com/ggerganov/whisper.cpp/blob/master/include/whisper.h)
|
||||||
|
|
||||||
Supported platforms:
|
Supported platforms:
|
||||||
|
|
||||||
@ -33,9 +35,10 @@ Supported platforms:
|
|||||||
- [x] [WebAssembly](examples/whisper.wasm)
|
- [x] [WebAssembly](examples/whisper.wasm)
|
||||||
- [x] Windows ([MSVC](https://github.com/ggerganov/whisper.cpp/blob/master/.github/workflows/build.yml#L117-L144) and [MinGW](https://github.com/ggerganov/whisper.cpp/issues/168)]
|
- [x] Windows ([MSVC](https://github.com/ggerganov/whisper.cpp/blob/master/.github/workflows/build.yml#L117-L144) and [MinGW](https://github.com/ggerganov/whisper.cpp/issues/168)]
|
||||||
- [x] [Raspberry Pi](https://github.com/ggerganov/whisper.cpp/discussions/166)
|
- [x] [Raspberry Pi](https://github.com/ggerganov/whisper.cpp/discussions/166)
|
||||||
|
- [x] [Docker](https://github.com/ggerganov/whisper.cpp/pkgs/container/whisper.cpp)
|
||||||
|
|
||||||
The entire high-level implementation of the model is contained in [whisper.h](whisper.h) and [whisper.cpp](whisper.cpp).
|
The entire high-level implementation of the model is contained in [whisper.h](include/whisper.h) and [whisper.cpp](src/whisper.cpp).
|
||||||
The rest of the code is part of the [ggml](https://github.com/ggerganov/ggml) machine learning library.
|
The rest of the code is part of the [`ggml`](https://github.com/ggerganov/ggml) machine learning library.
|
||||||
|
|
||||||
Having such a lightweight implementation of the model allows to easily integrate it in different platforms and applications.
|
Having such a lightweight implementation of the model allows to easily integrate it in different platforms and applications.
|
||||||
As an example, here is a video of running the model on an iPhone 13 device - fully offline, on-device: [whisper.objc](examples/whisper.objc)
|
As an example, here is a video of running the model on an iPhone 13 device - fully offline, on-device: [whisper.objc](examples/whisper.objc)
|
||||||
@ -54,33 +57,39 @@ Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm)
|
|||||||
|
|
||||||
## Implementation details
|
## Implementation details
|
||||||
|
|
||||||
- The core tensor operations are implemented in C ([ggml.h](ggml.h) / [ggml.c](ggml.c))
|
- The core tensor operations are implemented in C ([ggml.h](ggml/include/ggml.h) / [ggml.c](ggml/src/ggml.c))
|
||||||
- The transformer model and the high-level C-style API are implemented in C++ ([whisper.h](whisper.h) / [whisper.cpp](whisper.cpp))
|
- The transformer model and the high-level C-style API are implemented in C++ ([whisper.h](include/whisper.h) / [whisper.cpp](src/whisper.cpp))
|
||||||
- Sample usage is demonstrated in [main.cpp](examples/main)
|
- Sample usage is demonstrated in [main.cpp](examples/main)
|
||||||
- Sample real-time audio transcription from the microphone is demonstrated in [stream.cpp](examples/stream)
|
- Sample real-time audio transcription from the microphone is demonstrated in [stream.cpp](examples/stream)
|
||||||
- Various other examples are available in the [examples](examples) folder
|
- Various other examples are available in the [examples](examples) folder
|
||||||
|
|
||||||
The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD
|
The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products.
|
||||||
intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since
|
|
||||||
the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products.
|
|
||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
|
|
||||||
First clone the repository.
|
First clone the repository:
|
||||||
|
|
||||||
Then, download one of the Whisper models converted in [ggml format](models). For example:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash ./models/download-ggml-model.sh base.en
|
git clone https://github.com/ggerganov/whisper.cpp.git
|
||||||
```
|
```
|
||||||
|
|
||||||
If you wish to convert the Whisper models to ggml format yourself, instructions are in [models/README.md](models/README.md).
|
Navigate into the directory:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd whisper.cpp
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, download one of the Whisper [models](models/README.md) converted in [`ggml` format](#ggml-format). For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sh ./models/download-ggml-model.sh base.en
|
||||||
|
```
|
||||||
|
|
||||||
Now build the [main](examples/main) example and transcribe an audio file like this:
|
Now build the [main](examples/main) example and transcribe an audio file like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# build the main example
|
# build the main example
|
||||||
make
|
make -j
|
||||||
|
|
||||||
# transcribe an audio file
|
# transcribe an audio file
|
||||||
./main -f samples/jfk.wav
|
./main -f samples/jfk.wav
|
||||||
@ -90,8 +99,8 @@ make
|
|||||||
|
|
||||||
For a quick demo, simply run `make base.en`:
|
For a quick demo, simply run `make base.en`:
|
||||||
|
|
||||||
```java
|
```text
|
||||||
$ make base.en
|
$ make -j base.en
|
||||||
|
|
||||||
cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o
|
cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o
|
||||||
c++ -I. -I./examples -O3 -std=c++11 -pthread -c whisper.cpp -o whisper.o
|
c++ -I. -I./examples -O3 -std=c++11 -pthread -c whisper.cpp -o whisper.o
|
||||||
@ -144,7 +153,7 @@ options:
|
|||||||
-ng, --no-gpu [false ] disable GPU
|
-ng, --no-gpu [false ] disable GPU
|
||||||
|
|
||||||
|
|
||||||
bash ./models/download-ggml-model.sh base.en
|
sh ./models/download-ggml-model.sh base.en
|
||||||
Downloading ggml model base.en ...
|
Downloading ggml model base.en ...
|
||||||
ggml-base.en.bin 100%[========================>] 141.11M 6.34MB/s in 24s
|
ggml-base.en.bin 100%[========================>] 141.11M 6.34MB/s in 24s
|
||||||
Done! Model 'base.en' saved in 'models/ggml-base.en.bin'
|
Done! Model 'base.en' saved in 'models/ggml-base.en.bin'
|
||||||
@ -206,7 +215,7 @@ For detailed usage instructions, run: `./main -h`
|
|||||||
Note that the [main](examples/main) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
|
Note that the [main](examples/main) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
|
||||||
For example, you can use `ffmpeg` like this:
|
For example, you can use `ffmpeg` like this:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
ffmpeg -i input.mp3 -ar 16000 -ac 1 -c:a pcm_s16le output.wav
|
ffmpeg -i input.mp3 -ar 16000 -ac 1 -c:a pcm_s16le output.wav
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -215,7 +224,7 @@ ffmpeg -i input.mp3 -ar 16000 -ac 1 -c:a pcm_s16le output.wav
|
|||||||
If you want some extra audio samples to play with, simply run:
|
If you want some extra audio samples to play with, simply run:
|
||||||
|
|
||||||
```
|
```
|
||||||
make samples
|
make -j samples
|
||||||
```
|
```
|
||||||
|
|
||||||
This will download a few more audio files from Wikipedia and convert them to 16-bit WAV format via `ffmpeg`.
|
This will download a few more audio files from Wikipedia and convert them to 16-bit WAV format via `ffmpeg`.
|
||||||
@ -223,24 +232,25 @@ This will download a few more audio files from Wikipedia and convert them to 16-
|
|||||||
You can download and run the other models as follows:
|
You can download and run the other models as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
make tiny.en
|
make -j tiny.en
|
||||||
make tiny
|
make -j tiny
|
||||||
make base.en
|
make -j base.en
|
||||||
make base
|
make -j base
|
||||||
make small.en
|
make -j small.en
|
||||||
make small
|
make -j small
|
||||||
make medium.en
|
make -j medium.en
|
||||||
make medium
|
make -j medium
|
||||||
make large-v1
|
make -j large-v1
|
||||||
make large-v2
|
make -j large-v2
|
||||||
make large-v3
|
make -j large-v3
|
||||||
|
make -j large-v3-turbo
|
||||||
```
|
```
|
||||||
|
|
||||||
## Memory usage
|
## Memory usage
|
||||||
|
|
||||||
| Model | Disk | Mem |
|
| Model | Disk | Mem |
|
||||||
| --- | --- | --- |
|
| ------ | ------- | ------- |
|
||||||
| tiny | 75 MiB | ~273 MB |
|
| tiny | 75 MiB | ~273 MB |
|
||||||
| base | 142 MiB | ~388 MB |
|
| base | 142 MiB | ~388 MB |
|
||||||
| small | 466 MiB | ~852 MB |
|
| small | 466 MiB | ~852 MB |
|
||||||
| medium | 1.5 GiB | ~2.1 GB |
|
| medium | 1.5 GiB | ~2.1 GB |
|
||||||
@ -255,7 +265,7 @@ Here are the steps for creating and using a quantized model:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# quantize a model with Q5_0 method
|
# quantize a model with Q5_0 method
|
||||||
make quantize
|
make -j quantize
|
||||||
./quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0
|
./quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0
|
||||||
|
|
||||||
# run the examples as usual, specifying the quantized model file
|
# run the examples as usual, specifying the quantized model file
|
||||||
@ -277,7 +287,8 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in
|
|||||||
|
|
||||||
- To ensure `coremltools` operates correctly, please confirm that [Xcode](https://developer.apple.com/xcode/) is installed and execute `xcode-select --install` to install the command-line tools.
|
- To ensure `coremltools` operates correctly, please confirm that [Xcode](https://developer.apple.com/xcode/) is installed and execute `xcode-select --install` to install the command-line tools.
|
||||||
- Python 3.10 is recommended.
|
- Python 3.10 is recommended.
|
||||||
- [OPTIONAL] It is recommended to utilize a Python version management system, such as [Miniconda](https://docs.conda.io/en/latest/miniconda.html) for this step:
|
- MacOS Sonoma (version 14) or newer is recommended, as older versions of MacOS might experience issues with transcription hallucination.
|
||||||
|
- [OPTIONAL] It is recommended to utilize a Python version management system, such as [Miniconda](https://docs.conda.io/en/latest/miniconda.html) for this step:
|
||||||
- To create an environment, use: `conda create -n py310-whisper python=3.10 -y`
|
- To create an environment, use: `conda create -n py310-whisper python=3.10 -y`
|
||||||
- To activate the environment, use: `conda activate py310-whisper`
|
- To activate the environment, use: `conda activate py310-whisper`
|
||||||
|
|
||||||
@ -303,8 +314,8 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in
|
|||||||
|
|
||||||
- Run the examples as usual. For example:
|
- Run the examples as usual. For example:
|
||||||
|
|
||||||
```bash
|
```text
|
||||||
./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
@ -332,21 +343,23 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
- First, setup python virtual env. and install python dependencies. Python 3.10 is recommended.
|
- First, setup python virtual env. and install python dependencies. Python 3.10 is recommended.
|
||||||
|
|
||||||
Windows:
|
Windows:
|
||||||
```
|
|
||||||
|
```powershell
|
||||||
cd models
|
cd models
|
||||||
python -m venv openvino_conv_env
|
python -m venv openvino_conv_env
|
||||||
openvino_conv_env\Scripts\activate
|
openvino_conv_env\Scripts\activate
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install -r openvino-conversion-requirements.txt
|
pip install -r requirements-openvino.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Linux and macOS:
|
Linux and macOS:
|
||||||
```
|
|
||||||
|
```bash
|
||||||
cd models
|
cd models
|
||||||
python3 -m venv openvino_conv_env
|
python3 -m venv openvino_conv_env
|
||||||
source openvino_conv_env/bin/activate
|
source openvino_conv_env/bin/activate
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install -r openvino-conversion-requirements.txt
|
pip install -r requirements-openvino.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
- Generate an OpenVINO encoder model. For example, to generate a `base.en` model, use:
|
- Generate an OpenVINO encoder model. For example, to generate a `base.en` model, use:
|
||||||
@ -355,7 +368,7 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
python convert-whisper-to-openvino.py --model base.en
|
python convert-whisper-to-openvino.py --model base.en
|
||||||
```
|
```
|
||||||
|
|
||||||
This will produce ggml-base.en-encoder-openvino.xml/.bin IR model files. It's recommended to relocate these to the same folder as ggml models, as that
|
This will produce ggml-base.en-encoder-openvino.xml/.bin IR model files. It's recommended to relocate these to the same folder as `ggml` models, as that
|
||||||
is the default location that the OpenVINO extension will search at runtime.
|
is the default location that the OpenVINO extension will search at runtime.
|
||||||
|
|
||||||
- Build `whisper.cpp` with OpenVINO support:
|
- Build `whisper.cpp` with OpenVINO support:
|
||||||
@ -365,24 +378,28 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
After downloading & extracting package onto your development system, set up required environment by sourcing setupvars script. For example:
|
After downloading & extracting package onto your development system, set up required environment by sourcing setupvars script. For example:
|
||||||
|
|
||||||
Linux:
|
Linux:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
source /path/to/l_openvino_toolkit_ubuntu22_2023.0.0.10926.b4452d56304_x86_64/setupvars.sh
|
source /path/to/l_openvino_toolkit_ubuntu22_2023.0.0.10926.b4452d56304_x86_64/setupvars.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Windows (cmd):
|
Windows (cmd):
|
||||||
```
|
|
||||||
|
```powershell
|
||||||
C:\Path\To\w_openvino_toolkit_windows_2023.0.0.10926.b4452d56304_x86_64\setupvars.bat
|
C:\Path\To\w_openvino_toolkit_windows_2023.0.0.10926.b4452d56304_x86_64\setupvars.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
And then build the project using cmake:
|
And then build the project using cmake:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cmake -B build -DWHISPER_OPENVINO=1
|
cmake -B build -DWHISPER_OPENVINO=1
|
||||||
cmake --build build -j --config Release
|
cmake --build build -j --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
- Run the examples as usual. For example:
|
- Run the examples as usual. For example:
|
||||||
```bash
|
|
||||||
./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
```text
|
||||||
|
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
@ -406,36 +423,23 @@ For more information about the Core ML implementation please refer to PR [#1037]
|
|||||||
With NVIDIA cards the processing of the models is done efficiently on the GPU via cuBLAS and custom CUDA kernels.
|
With NVIDIA cards the processing of the models is done efficiently on the GPU via cuBLAS and custom CUDA kernels.
|
||||||
First, make sure you have installed `cuda`: https://developer.nvidia.com/cuda-downloads
|
First, make sure you have installed `cuda`: https://developer.nvidia.com/cuda-downloads
|
||||||
|
|
||||||
Now build `whisper.cpp` with cuBLAS support:
|
Now build `whisper.cpp` with CUDA support:
|
||||||
|
|
||||||
```
|
```
|
||||||
make clean
|
make clean
|
||||||
WHISPER_CUBLAS=1 make -j
|
GGML_CUDA=1 make -j
|
||||||
```
|
```
|
||||||
|
|
||||||
## OpenCL GPU support via CLBlast
|
## Vulkan GPU support
|
||||||
|
Cross-vendor solution which allows you to accelerate workload on your GPU.
|
||||||
For cards and integrated GPUs that support OpenCL, the Encoder processing can be largely offloaded to the GPU through CLBlast. This is especially useful for users with AMD APUs or low end devices for up to ~2x speedup.
|
First, make sure your graphics card driver provides support for Vulkan API.
|
||||||
|
|
||||||
First, make sure you have installed `CLBlast` for your OS or Distribution: https://github.com/CNugteren/CLBlast
|
|
||||||
|
|
||||||
Now build `whisper.cpp` with CLBlast support:
|
|
||||||
|
|
||||||
|
Now build `whisper.cpp` with Vulkan support:
|
||||||
```
|
```
|
||||||
Makefile:
|
|
||||||
cd whisper.cpp
|
|
||||||
make clean
|
make clean
|
||||||
WHISPER_CLBLAST=1 make -j
|
make GGML_VULKAN=1 -j
|
||||||
|
|
||||||
CMake:
|
|
||||||
cd whisper.cpp
|
|
||||||
cmake -B build -DWHISPER_CLBLAST=ON
|
|
||||||
cmake --build build -j --config Release
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Run all the examples as usual.
|
|
||||||
|
|
||||||
## BLAS CPU support via OpenBLAS
|
## BLAS CPU support via OpenBLAS
|
||||||
|
|
||||||
Encoder processing can be accelerated on the CPU via OpenBLAS.
|
Encoder processing can be accelerated on the CPU via OpenBLAS.
|
||||||
@ -445,9 +449,99 @@ Now build `whisper.cpp` with OpenBLAS support:
|
|||||||
|
|
||||||
```
|
```
|
||||||
make clean
|
make clean
|
||||||
WHISPER_OPENBLAS=1 make -j
|
GGML_OPENBLAS=1 make -j
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## BLAS CPU support via Intel MKL
|
||||||
|
|
||||||
|
Encoder processing can be accelerated on the CPU via the BLAS compatible interface of Intel's Math Kernel Library.
|
||||||
|
First, make sure you have installed Intel's MKL runtime and development packages: https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl-download.html
|
||||||
|
|
||||||
|
Now build `whisper.cpp` with Intel MKL BLAS support:
|
||||||
|
|
||||||
|
```
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -DWHISPER_MKL=ON ..
|
||||||
|
WHISPER_MKL=1 make -j
|
||||||
|
```
|
||||||
|
|
||||||
|
## Ascend NPU support
|
||||||
|
|
||||||
|
Ascend NPU provides inference acceleration via [`CANN`](https://www.hiascend.com/en/software/cann) and AI cores.
|
||||||
|
|
||||||
|
First, check if your Ascend NPU device is supported:
|
||||||
|
|
||||||
|
**Verified devices**
|
||||||
|
| Ascend NPU | Status |
|
||||||
|
|:-----------------------------:|:-------:|
|
||||||
|
| Atlas 300T A2 | Support |
|
||||||
|
|
||||||
|
Then, make sure you have installed [`CANN toolkit`](https://www.hiascend.com/en/software/cann/community) . The lasted version of CANN is recommanded.
|
||||||
|
|
||||||
|
Now build `whisper.cpp` with CANN support:
|
||||||
|
|
||||||
|
```
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -D GGML_CANN=on
|
||||||
|
make -j
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the inference examples as usual, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
./build/bin/main -f samples/jfk.wav -m models/ggml-base.en.bin -t 8
|
||||||
|
```
|
||||||
|
|
||||||
|
*Notes:*
|
||||||
|
|
||||||
|
- If you have trouble with Ascend NPU device, please create a issue with **[CANN]** prefix/tag.
|
||||||
|
- If you run successfully with your Ascend NPU device, please help update the table `Verified devices`.
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- Docker must be installed and running on your system.
|
||||||
|
- Create a folder to store big models & intermediate files (ex. /whisper/models)
|
||||||
|
|
||||||
|
### Images
|
||||||
|
|
||||||
|
We have two Docker images available for this project:
|
||||||
|
|
||||||
|
1. `ghcr.io/ggerganov/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`)
|
||||||
|
2. `ghcr.io/ggerganov/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# download model and persist it in a local folder
|
||||||
|
docker run -it --rm \
|
||||||
|
-v path/to/models:/models \
|
||||||
|
whisper.cpp:main "./models/download-ggml-model.sh base /models"
|
||||||
|
# transcribe an audio file
|
||||||
|
docker run -it --rm \
|
||||||
|
-v path/to/models:/models \
|
||||||
|
-v path/to/audios:/audios \
|
||||||
|
whisper.cpp:main "./main -m /models/ggml-base.bin -f /audios/jfk.wav"
|
||||||
|
# transcribe an audio file in samples folder
|
||||||
|
docker run -it --rm \
|
||||||
|
-v path/to/models:/models \
|
||||||
|
whisper.cpp:main "./main -m /models/ggml-base.bin -f ./samples/jfk.wav"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installing with Conan
|
||||||
|
|
||||||
|
You can install pre-built binaries for whisper.cpp or build it from source using [Conan](https://conan.io/). Use the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
conan install --requires="whisper-cpp/[*]" --build=missing
|
||||||
|
```
|
||||||
|
|
||||||
|
For detailed instructions on how to use Conan, please refer to the [Conan documentation](https://docs.conan.io/2/).
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
- Inference only
|
- Inference only
|
||||||
@ -460,7 +554,7 @@ in about half a minute on a MacBook M1 Pro, using `medium.en` model:
|
|||||||
<details>
|
<details>
|
||||||
<summary>Expand to see the result</summary>
|
<summary>Expand to see the result</summary>
|
||||||
|
|
||||||
```java
|
```text
|
||||||
$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
|
$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
|
||||||
|
|
||||||
whisper_init_from_file: loading model from 'models/ggml-medium.en.bin'
|
whisper_init_from_file: loading model from 'models/ggml-medium.en.bin'
|
||||||
@ -532,6 +626,7 @@ whisper_print_timings: encode time = 18665.10 ms / 9 runs ( 2073.90 ms per
|
|||||||
whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run)
|
whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run)
|
||||||
whisper_print_timings: total time = 32733.52 ms
|
whisper_print_timings: total time = 32733.52 ms
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## Real-time audio input example
|
## Real-time audio input example
|
||||||
@ -540,8 +635,8 @@ This is a naive example of performing real-time inference on audio from your mic
|
|||||||
The [stream](examples/stream) tool samples the audio every half a second and runs the transcription continuously.
|
The [stream](examples/stream) tool samples the audio every half a second and runs the transcription continuously.
|
||||||
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
make stream
|
make stream -j
|
||||||
./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -552,7 +647,7 @@ https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a
|
|||||||
Adding the `--print-colors` argument will print the transcribed text using an experimental color coding strategy
|
Adding the `--print-colors` argument will print the transcribed text using an experimental color coding strategy
|
||||||
to highlight words with high or low confidence:
|
to highlight words with high or low confidence:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./main -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
|
./main -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -562,8 +657,8 @@ to highlight words with high or low confidence:
|
|||||||
|
|
||||||
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
||||||
|
|
||||||
```java
|
```text
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
@ -586,8 +681,8 @@ main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 pr
|
|||||||
|
|
||||||
The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`:
|
The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`:
|
||||||
|
|
||||||
```java
|
```text
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
|
$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
@ -655,9 +750,9 @@ The [main](examples/main) example provides support for output of karaoke-style m
|
|||||||
currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script.
|
currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script.
|
||||||
This requires to have `ffmpeg` installed.
|
This requires to have `ffmpeg` installed.
|
||||||
|
|
||||||
Here are a few *"typical"* examples:
|
Here are a few _"typical"_ examples:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
|
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
|
||||||
source ./samples/jfk.wav.wts
|
source ./samples/jfk.wav.wts
|
||||||
ffplay ./samples/jfk.wav.mp4
|
ffplay ./samples/jfk.wav.mp4
|
||||||
@ -667,7 +762,7 @@ https://user-images.githubusercontent.com/1991296/199337465-dbee4b5e-9aeb-48a3-b
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
|
./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
|
||||||
source ./samples/mm0.wav.wts
|
source ./samples/mm0.wav.wts
|
||||||
ffplay ./samples/mm0.wav.mp4
|
ffplay ./samples/mm0.wav.mp4
|
||||||
@ -677,7 +772,7 @@ https://user-images.githubusercontent.com/1991296/199337504-cc8fd233-0cb7-4920-9
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
|
./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
|
||||||
source ./samples/gb0.wav.wts
|
source ./samples/gb0.wav.wts
|
||||||
ffplay ./samples/gb0.wav.mp4
|
ffplay ./samples/gb0.wav.mp4
|
||||||
@ -689,10 +784,10 @@ https://user-images.githubusercontent.com/1991296/199337538-b7b0c7a3-2753-4a88-a
|
|||||||
|
|
||||||
## Video comparison of different models
|
## Video comparison of different models
|
||||||
|
|
||||||
Use the [extra/bench-wts.sh](https://github.com/ggerganov/whisper.cpp/blob/master/extra/bench-wts.sh) script to generate a video in the following format:
|
Use the [scripts/bench-wts.sh](https://github.com/ggerganov/whisper.cpp/blob/master/scripts/bench-wts.sh) script to generate a video in the following format:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./extra/bench-wts.sh samples/jfk.wav
|
./scripts/bench-wts.sh samples/jfk.wav
|
||||||
ffplay ./samples/jfk.wav.all.mp4
|
ffplay ./samples/jfk.wav.all.mp4
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -708,20 +803,19 @@ took to execute it. The results are summarized in the following Github issue:
|
|||||||
|
|
||||||
[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89)
|
[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89)
|
||||||
|
|
||||||
Additionally a script to run whisper.cpp with different models and audio files is provided [bench.py](bench.py).
|
Additionally a script to run whisper.cpp with different models and audio files is provided [bench.py](scripts/bench.py).
|
||||||
|
|
||||||
You can run it with the following command, by default it will run against any standard model in the models folder.
|
You can run it with the following command, by default it will run against any standard model in the models folder.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python3 extra/bench.py -f samples/jfk.wav -t 2,4,8 -p 1,2
|
python3 scripts/bench.py -f samples/jfk.wav -t 2,4,8 -p 1,2
|
||||||
```
|
```
|
||||||
|
|
||||||
It is written in python with the intention of being easy to modify and extend for your benchmarking use case.
|
It is written in python with the intention of being easy to modify and extend for your benchmarking use case.
|
||||||
|
|
||||||
It outputs a csv file with the results of the benchmarking.
|
It outputs a csv file with the results of the benchmarking.
|
||||||
|
|
||||||
|
## `ggml` format
|
||||||
## ggml format
|
|
||||||
|
|
||||||
The original models are converted to a custom binary format. This allows to pack everything needed into a single file:
|
The original models are converted to a custom binary format. This allows to pack everything needed into a single file:
|
||||||
|
|
||||||
@ -736,51 +830,52 @@ or manually from here:
|
|||||||
- https://huggingface.co/ggerganov/whisper.cpp
|
- https://huggingface.co/ggerganov/whisper.cpp
|
||||||
- https://ggml.ggerganov.com
|
- https://ggml.ggerganov.com
|
||||||
|
|
||||||
For more details, see the conversion script [models/convert-pt-to-ggml.py](models/convert-pt-to-ggml.py) or the README
|
For more details, see the conversion script [models/convert-pt-to-ggml.py](models/convert-pt-to-ggml.py) or [models/README.md](models/README.md).
|
||||||
in [models](models).
|
|
||||||
|
|
||||||
## [Bindings](https://github.com/ggerganov/whisper.cpp/discussions/categories/bindings)
|
## [Bindings](https://github.com/ggerganov/whisper.cpp/discussions/categories/bindings)
|
||||||
|
|
||||||
- [X] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs) | [#310](https://github.com/ggerganov/whisper.cpp/discussions/310)
|
- [x] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs) | [#310](https://github.com/ggerganov/whisper.cpp/discussions/310)
|
||||||
- [X] JavaScript: [bindings/javascript](bindings/javascript) | [#309](https://github.com/ggerganov/whisper.cpp/discussions/309)
|
- [x] JavaScript: [bindings/javascript](bindings/javascript) | [#309](https://github.com/ggerganov/whisper.cpp/discussions/309)
|
||||||
- React Native (iOS / Android): [whisper.rn](https://github.com/mybigday/whisper.rn)
|
- React Native (iOS / Android): [whisper.rn](https://github.com/mybigday/whisper.rn)
|
||||||
- [X] Go: [bindings/go](bindings/go) | [#312](https://github.com/ggerganov/whisper.cpp/discussions/312)
|
- [x] Go: [bindings/go](bindings/go) | [#312](https://github.com/ggerganov/whisper.cpp/discussions/312)
|
||||||
- [X] Java:
|
- [x] Java:
|
||||||
- [GiviMAD/whisper-jni](https://github.com/GiviMAD/whisper-jni)
|
- [GiviMAD/whisper-jni](https://github.com/GiviMAD/whisper-jni)
|
||||||
- [X] Ruby: [bindings/ruby](bindings/ruby) | [#507](https://github.com/ggerganov/whisper.cpp/discussions/507)
|
- [x] Ruby: [bindings/ruby](bindings/ruby) | [#507](https://github.com/ggerganov/whisper.cpp/discussions/507)
|
||||||
- [X] Objective-C / Swift: [ggerganov/whisper.spm](https://github.com/ggerganov/whisper.spm) | [#313](https://github.com/ggerganov/whisper.cpp/discussions/313)
|
- [x] Objective-C / Swift: [ggerganov/whisper.spm](https://github.com/ggerganov/whisper.spm) | [#313](https://github.com/ggerganov/whisper.cpp/discussions/313)
|
||||||
- [exPHAT/SwiftWhisper](https://github.com/exPHAT/SwiftWhisper)
|
- [exPHAT/SwiftWhisper](https://github.com/exPHAT/SwiftWhisper)
|
||||||
- [X] .NET: | [#422](https://github.com/ggerganov/whisper.cpp/discussions/422)
|
- [x] .NET: | [#422](https://github.com/ggerganov/whisper.cpp/discussions/422)
|
||||||
- [sandrohanea/whisper.net](https://github.com/sandrohanea/whisper.net)
|
- [sandrohanea/whisper.net](https://github.com/sandrohanea/whisper.net)
|
||||||
- [NickDarvey/whisper](https://github.com/NickDarvey/whisper)
|
- [NickDarvey/whisper](https://github.com/NickDarvey/whisper)
|
||||||
- [X] Python: | [#9](https://github.com/ggerganov/whisper.cpp/issues/9)
|
- [x] Python: | [#9](https://github.com/ggerganov/whisper.cpp/issues/9)
|
||||||
- [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython)
|
- [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython)
|
||||||
|
- [AIWintermuteAI/whispercpp](https://github.com/AIWintermuteAI/whispercpp) (Updated fork of aarnphm/whispercpp)
|
||||||
- [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11)
|
- [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11)
|
||||||
- [X] R: [bnosac/audio.whisper](https://github.com/bnosac/audio.whisper)
|
- [abdeladim-s/pywhispercpp](https://github.com/abdeladim-s/pywhispercpp) (Pybind11)
|
||||||
- [X] Unity: [macoron/whisper.unity](https://github.com/Macoron/whisper.unity)
|
- [x] R: [bnosac/audio.whisper](https://github.com/bnosac/audio.whisper)
|
||||||
|
- [x] Unity: [macoron/whisper.unity](https://github.com/Macoron/whisper.unity)
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
There are various examples of using the library for different projects in the [examples](examples) folder.
|
There are various examples of using the library for different projects in the [examples](examples) folder.
|
||||||
Some of the examples are even ported to run in the browser using WebAssembly. Check them out!
|
Some of the examples are even ported to run in the browser using WebAssembly. Check them out!
|
||||||
|
|
||||||
| Example | Web | Description |
|
| Example | Web | Description |
|
||||||
| --- | --- | --- |
|
| --------------------------------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| [main](examples/main) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
|
| [main](examples/main) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
|
||||||
| [bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
|
| [bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
|
||||||
| [stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
|
| [stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
|
||||||
| [command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
|
| [command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
|
||||||
| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess |
|
| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess |
|
||||||
| [talk](examples/talk) | [talk.wasm](examples/talk.wasm) | Talk with a GPT-2 bot |
|
| [talk](examples/talk) | [talk.wasm](examples/talk.wasm) | Talk with a GPT-2 bot |
|
||||||
| [talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
|
| [talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
|
||||||
| [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp |
|
| [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp |
|
||||||
| [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp |
|
| [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp |
|
||||||
| [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp |
|
| [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp |
|
||||||
| [whisper.nvim](examples/whisper.nvim) | | Speech-to-text plugin for Neovim |
|
| [whisper.nvim](examples/whisper.nvim) | | Speech-to-text plugin for Neovim |
|
||||||
| [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture |
|
| [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture |
|
||||||
| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) |
|
| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) |
|
||||||
| [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) |
|
| [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) |
|
||||||
| [server](examples/server) | | HTTP transcription server with OAI-like API |
|
| [server](examples/server) | | HTTP transcription server with OAI-like API |
|
||||||
|
|
||||||
## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions)
|
## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions)
|
||||||
|
|
||||||
|
249
README_sycl.md
Normal file
249
README_sycl.md
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
# whisper.cpp for SYCL
|
||||||
|
|
||||||
|
[Background](#background)
|
||||||
|
|
||||||
|
[OS](#os)
|
||||||
|
|
||||||
|
[Intel GPU](#intel-gpu)
|
||||||
|
|
||||||
|
[Linux](#linux)
|
||||||
|
|
||||||
|
[Environment Variable](#environment-variable)
|
||||||
|
|
||||||
|
[Known Issue](#known-issue)
|
||||||
|
|
||||||
|
[Todo](#todo)
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators<72>such as CPUs, GPUs, and FPGAs. It is a single-source embedded domain-specific language based on pure C++17.
|
||||||
|
|
||||||
|
oneAPI is a specification that is open and standards-based, supporting multiple architecture types including but not limited to GPU, CPU, and FPGA. The spec has both direct programming and API-based programming paradigms.
|
||||||
|
|
||||||
|
Intel uses the SYCL as direct programming language to support CPU, GPUs and FPGAs.
|
||||||
|
|
||||||
|
To avoid re-inventing the wheel, this code refers other code paths in llama.cpp (like OpenBLAS, cuBLAS, CLBlast). We use a open-source tool [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) (Commercial release [Intel<EFBFBD> DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) migrate to SYCL.
|
||||||
|
|
||||||
|
The whisper.cpp for SYCL is used to support Intel GPUs.
|
||||||
|
|
||||||
|
For Intel CPU, recommend to use whisper.cpp for X86 (Intel MKL build).
|
||||||
|
|
||||||
|
## OS
|
||||||
|
|
||||||
|
|OS|Status|Verified|
|
||||||
|
|-|-|-|
|
||||||
|
|Linux|Support|Ubuntu 22.04|
|
||||||
|
|Windows|Ongoing| |
|
||||||
|
|
||||||
|
|
||||||
|
## Intel GPU
|
||||||
|
|
||||||
|
|Intel GPU| Status | Verified Model|
|
||||||
|
|-|-|-|
|
||||||
|
|Intel Data Center Max Series| Support| Max 1550|
|
||||||
|
|Intel Data Center Flex Series| Support| Flex 170|
|
||||||
|
|Intel Arc Series| Support| Arc 770|
|
||||||
|
|Intel built-in Arc GPU| Support| built-in Arc GPU in Meteor Lake|
|
||||||
|
|Intel iGPU| Support| iGPU in i5-1250P, i7-1165G7|
|
||||||
|
|
||||||
|
|
||||||
|
## Linux
|
||||||
|
|
||||||
|
### Setup Environment
|
||||||
|
|
||||||
|
1. Install Intel GPU driver.
|
||||||
|
|
||||||
|
a. Please install Intel GPU driver by official guide: [Install GPU Drivers](https://dgpu-docs.intel.com/driver/installation.html).
|
||||||
|
|
||||||
|
Note: for iGPU, please install the client GPU driver.
|
||||||
|
|
||||||
|
b. Add user to group: video, render.
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo usermod -aG render username
|
||||||
|
sudo usermod -aG video username
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: re-login to enable it.
|
||||||
|
|
||||||
|
c. Check
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt install clinfo
|
||||||
|
sudo clinfo -l
|
||||||
|
```
|
||||||
|
|
||||||
|
Output (example):
|
||||||
|
|
||||||
|
```
|
||||||
|
Platform #0: Intel(R) OpenCL Graphics
|
||||||
|
`-- Device #0: Intel(R) Arc(TM) A770 Graphics
|
||||||
|
|
||||||
|
|
||||||
|
Platform #0: Intel(R) OpenCL HD Graphics
|
||||||
|
`-- Device #0: Intel(R) Iris(R) Xe Graphics [0x9a49]
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install Intel<65> oneAPI Base toolkit.
|
||||||
|
|
||||||
|
|
||||||
|
a. Please follow the procedure in [Get the Intel<65> oneAPI Base Toolkit ](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html).
|
||||||
|
|
||||||
|
Recommend to install to default folder: **/opt/intel/oneapi**.
|
||||||
|
|
||||||
|
Following guide use the default folder as example. If you use other folder, please modify the following guide info with your folder.
|
||||||
|
|
||||||
|
b. Check
|
||||||
|
|
||||||
|
```
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
sycl-ls
|
||||||
|
```
|
||||||
|
|
||||||
|
There should be one or more level-zero devices. Like **[ext_oneapi_level_zero:gpu:0]**.
|
||||||
|
|
||||||
|
Output (example):
|
||||||
|
```
|
||||||
|
[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000]
|
||||||
|
[opencl:cpu:1] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i7-13700K OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000]
|
||||||
|
[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics OpenCL 3.0 NEO [23.30.26918.50]
|
||||||
|
[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Arc(TM) A770 Graphics 1.3 [1.3.26918]
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Build locally:
|
||||||
|
|
||||||
|
```
|
||||||
|
mkdir -p build
|
||||||
|
cd build
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
|
#for FP16
|
||||||
|
#cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DWHISPER_SYCL_F16=ON
|
||||||
|
|
||||||
|
#for FP32
|
||||||
|
cmake .. -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||||
|
|
||||||
|
#build example/main only
|
||||||
|
#cmake --build . --config Release --target main
|
||||||
|
|
||||||
|
#build all binary
|
||||||
|
cmake --build . --config Release -v
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
```
|
||||||
|
./examples/sycl/build.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Note:
|
||||||
|
|
||||||
|
- By default, it will build for all binary files. It will take more time. To reduce the time, we recommend to build for **example/main** only.
|
||||||
|
|
||||||
|
### Run
|
||||||
|
|
||||||
|
1. Put model file to folder **models**
|
||||||
|
|
||||||
|
2. Enable oneAPI running environment
|
||||||
|
|
||||||
|
```
|
||||||
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
3. List device ID
|
||||||
|
|
||||||
|
Run without parameter:
|
||||||
|
|
||||||
|
```
|
||||||
|
./build/bin/ls-sycl-device
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
./build/bin/main
|
||||||
|
```
|
||||||
|
|
||||||
|
Check the ID in startup log, like:
|
||||||
|
|
||||||
|
```
|
||||||
|
found 4 SYCL devices:
|
||||||
|
Device 0: Intel(R) Arc(TM) A770 Graphics, compute capability 1.3,
|
||||||
|
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
||||||
|
Device 1: Intel(R) FPGA Emulation Device, compute capability 1.2,
|
||||||
|
max compute_units 24, max work group size 67108864, max sub group size 64, global mem size 67065057280
|
||||||
|
Device 2: 13th Gen Intel(R) Core(TM) i7-13700K, compute capability 3.0,
|
||||||
|
max compute_units 24, max work group size 8192, max sub group size 64, global mem size 67065057280
|
||||||
|
Device 3: Intel(R) Arc(TM) A770 Graphics, compute capability 3.0,
|
||||||
|
max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|Attribute|Note|
|
||||||
|
|-|-|
|
||||||
|
|compute capability 1.3|Level-zero running time, recommended |
|
||||||
|
|compute capability 3.0|OpenCL running time, slower than level-zero in most cases|
|
||||||
|
|
||||||
|
4. Set device ID and execute whisper.cpp
|
||||||
|
|
||||||
|
Set device ID = 0 by **GGML_SYCL_DEVICE=0**
|
||||||
|
|
||||||
|
```
|
||||||
|
GGML_SYCL_DEVICE=0 ./build/bin/main -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
```
|
||||||
|
or run by script:
|
||||||
|
|
||||||
|
```
|
||||||
|
./examples/sycl/run_whisper.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
5. Check the device ID in output
|
||||||
|
|
||||||
|
Like:
|
||||||
|
```
|
||||||
|
Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Environment Variable
|
||||||
|
|
||||||
|
#### Build
|
||||||
|
|
||||||
|
|Name|Value|Function|
|
||||||
|
|-|-|-|
|
||||||
|
|WHISPER_SYCL|ON (mandatory)|Enable build with SYCL code path. <br>For FP32/FP16, WHISPER_SYCL=ON is mandatory.|
|
||||||
|
|WHISPER_SYCL_F16|ON (optional)|Enable FP16 build with SYCL code path.For FP32, do not set it.|
|
||||||
|
|CMAKE_C_COMPILER|icx|Use icx compiler for SYCL code path|
|
||||||
|
|CMAKE_CXX_COMPILER|icpx|use icpx for SYCL code path|
|
||||||
|
|
||||||
|
#### Running
|
||||||
|
|
||||||
|
|
||||||
|
|Name|Value|Function|
|
||||||
|
|-|-|-|
|
||||||
|
|GGML_SYCL_DEVICE|0 (default) or 1|Set the device id used. Check the device ids by default running output|
|
||||||
|
|GGML_SYCL_DEBUG|0 (default) or 1|Enable log function by macro: GGML_SYCL_DEBUG|
|
||||||
|
|
||||||
|
## Known Issue
|
||||||
|
|
||||||
|
- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`.
|
||||||
|
|
||||||
|
Miss to enable oneAPI running environment.
|
||||||
|
|
||||||
|
Install oneAPI base toolkit and enable it by: `source /opt/intel/oneapi/setvars.sh`.
|
||||||
|
|
||||||
|
|
||||||
|
- Hang during startup
|
||||||
|
|
||||||
|
llama.cpp use mmap as default way to read model file and copy to GPU. In some system, memcpy will be abnormal and block.
|
||||||
|
|
||||||
|
Solution: add **--no-mmap**.
|
||||||
|
|
||||||
|
## Todo
|
||||||
|
|
||||||
|
- Support to build in Windows.
|
||||||
|
|
||||||
|
- Support multiple cards.
|
@ -14,9 +14,14 @@ GGML_METAL_PATH_RESOURCES := $(abspath ../..)
|
|||||||
BUILD_DIR := build
|
BUILD_DIR := build
|
||||||
MODELS_DIR := models
|
MODELS_DIR := models
|
||||||
EXAMPLES_DIR := $(wildcard examples/*)
|
EXAMPLES_DIR := $(wildcard examples/*)
|
||||||
INCLUDE_PATH := $(abspath ../..)
|
INCLUDE_PATH := $(abspath ../../include):$(abspath ../../ggml/include)
|
||||||
LIBRARY_PATH := $(abspath ../..)
|
LIBRARY_PATH := $(abspath ../..)
|
||||||
|
|
||||||
|
ifeq ($(GGML_CUDA),1)
|
||||||
|
LIBRARY_PATH := $(LIBRARY_PATH):$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib/
|
||||||
|
BUILD_FLAGS := -ldflags "-extldflags '-lcudart -lcuda -lcublas'"
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Darwin)
|
||||||
EXT_LDFLAGS := -framework Foundation -framework Metal -framework MetalKit
|
EXT_LDFLAGS := -framework Foundation -framework Metal -framework MetalKit
|
||||||
endif
|
endif
|
||||||
|
@ -62,6 +62,12 @@ This will compile a static `libwhisper.a` in a `build` folder, download a model
|
|||||||
make examples
|
make examples
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To build using cuda support add `GGML_CUDA=1`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GGML_CUDA=1 make examples
|
||||||
|
```
|
||||||
|
|
||||||
The examples are placed in the `build` directory. Once built, you can download all the models with the following command:
|
The examples are placed in the `build` directory. Once built, you can download all the models with the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -24,7 +24,7 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// The models which will be downloaded, if no model is specified as an argument
|
// The models which will be downloaded, if no model is specified as an argument
|
||||||
modelNames = []string{"ggml-tiny.en", "ggml-tiny", "ggml-base.en", "ggml-base", "ggml-small.en", "ggml-small", "ggml-medium.en", "ggml-medium", "ggml-large-v1", "ggml-large-v2", "ggml-large-v3"}
|
modelNames = []string{"ggml-tiny.en", "ggml-tiny", "ggml-base.en", "ggml-base", "ggml-small.en", "ggml-small", "ggml-medium.en", "ggml-medium", "ggml-large-v1", "ggml-large-v2", "ggml-large-v3", "large-v3-turbo"}
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -68,10 +68,6 @@ func (flags *Flags) GetOut() string {
|
|||||||
return strings.ToLower(flags.Lookup("out").Value.String())
|
return strings.ToLower(flags.Lookup("out").Value.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (flags *Flags) IsSpeedup() bool {
|
|
||||||
return flags.Lookup("speedup").Value.String() == "true"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flags *Flags) IsTokens() bool {
|
func (flags *Flags) IsTokens() bool {
|
||||||
return flags.Lookup("tokens").Value.String() == "true"
|
return flags.Lookup("tokens").Value.String() == "true"
|
||||||
}
|
}
|
||||||
@ -111,10 +107,6 @@ func (flags *Flags) SetParams(context whisper.Context) error {
|
|||||||
fmt.Fprintf(flags.Output(), "Setting duration to %v\n", duration)
|
fmt.Fprintf(flags.Output(), "Setting duration to %v\n", duration)
|
||||||
context.SetDuration(duration)
|
context.SetDuration(duration)
|
||||||
}
|
}
|
||||||
if flags.IsSpeedup() {
|
|
||||||
fmt.Fprintf(flags.Output(), "Setting speedup to true\n")
|
|
||||||
context.SetSpeedup(true)
|
|
||||||
}
|
|
||||||
if threads := flags.GetThreads(); threads != 0 {
|
if threads := flags.GetThreads(); threads != 0 {
|
||||||
fmt.Fprintf(flags.Output(), "Setting threads to %d\n", threads)
|
fmt.Fprintf(flags.Output(), "Setting threads to %d\n", threads)
|
||||||
context.SetThreads(threads)
|
context.SetThreads(threads)
|
||||||
@ -146,7 +138,6 @@ func registerFlags(flag *Flags) {
|
|||||||
flag.Duration("offset", 0, "Time offset")
|
flag.Duration("offset", 0, "Time offset")
|
||||||
flag.Duration("duration", 0, "Duration of audio to process")
|
flag.Duration("duration", 0, "Duration of audio to process")
|
||||||
flag.Uint("threads", 0, "Number of threads to use")
|
flag.Uint("threads", 0, "Number of threads to use")
|
||||||
flag.Bool("speedup", false, "Enable speedup")
|
|
||||||
flag.Uint("max-len", 0, "Maximum segment length in characters")
|
flag.Uint("max-len", 0, "Maximum segment length in characters")
|
||||||
flag.Uint("max-tokens", 0, "Maximum tokens per segment")
|
flag.Uint("max-tokens", 0, "Maximum tokens per segment")
|
||||||
flag.Float64("word-thold", 0, "Maximum segment score")
|
flag.Float64("word-thold", 0, "Maximum segment score")
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
module github.com/ggerganov/whisper.cpp/bindings/go
|
module github.com/ggerganov/whisper.cpp/bindings/go
|
||||||
|
|
||||||
go 1.19
|
go 1.23
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-audio/wav v1.1.0
|
github.com/go-audio/wav v1.1.0
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.9.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4=
|
github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4=
|
||||||
@ -9,15 +8,9 @@ github.com/go-audio/wav v1.1.0 h1:jQgLtbqBzY7G+BM8fXF7AHUk1uHUviWS4X39d5rsL2g=
|
|||||||
github.com/go-audio/wav v1.1.0/go.mod h1:mpe9qfwbScEbkd8uybLuIpTgHyrISw/OTuvjUW2iGtE=
|
github.com/go-audio/wav v1.1.0/go.mod h1:mpe9qfwbScEbkd8uybLuIpTgHyrISw/OTuvjUW2iGtE=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
|
||||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
@ -47,10 +47,6 @@ func (p *Params) SetPrintTimestamps(v bool) {
|
|||||||
p.print_timestamps = toBool(v)
|
p.print_timestamps = toBool(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Params) SetSpeedup(v bool) {
|
|
||||||
p.speed_up = toBool(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set language id
|
// Set language id
|
||||||
func (p *Params) SetLanguage(lang int) error {
|
func (p *Params) SetLanguage(lang int) error {
|
||||||
if lang == -1 {
|
if lang == -1 {
|
||||||
@ -123,6 +119,33 @@ func (p *Params) SetAudioCtx(n int) {
|
|||||||
p.audio_ctx = C.int(n)
|
p.audio_ctx = C.int(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Params) SetMaxContext(n int) {
|
||||||
|
p.n_max_text_ctx = C.int(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Params) SetBeamSize(n int) {
|
||||||
|
p.beam_search.beam_size = C.int(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Params) SetEntropyThold(t float32) {
|
||||||
|
p.entropy_thold = C.float(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Params) SetTemperature(t float32) {
|
||||||
|
p.temperature = C.float(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the fallback temperature incrementation
|
||||||
|
// Pass -1.0 to disable this feature
|
||||||
|
func (p *Params) SetTemperatureFallback(t float32) {
|
||||||
|
p.temperature_inc = C.float(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set initial prompt
|
||||||
|
func (p *Params) SetInitialPrompt(prompt string) {
|
||||||
|
p.initial_prompt = C.CString(prompt)
|
||||||
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// PRIVATE METHODS
|
// PRIVATE METHODS
|
||||||
|
|
||||||
@ -147,6 +170,11 @@ func (p *Params) String() string {
|
|||||||
str += fmt.Sprintf(" offset_ms=%d", p.offset_ms)
|
str += fmt.Sprintf(" offset_ms=%d", p.offset_ms)
|
||||||
str += fmt.Sprintf(" duration_ms=%d", p.duration_ms)
|
str += fmt.Sprintf(" duration_ms=%d", p.duration_ms)
|
||||||
str += fmt.Sprintf(" audio_ctx=%d", p.audio_ctx)
|
str += fmt.Sprintf(" audio_ctx=%d", p.audio_ctx)
|
||||||
|
str += fmt.Sprintf(" initial_prompt=%s", C.GoString(p.initial_prompt))
|
||||||
|
str += fmt.Sprintf(" entropy_thold=%f", p.entropy_thold)
|
||||||
|
str += fmt.Sprintf(" temperature=%f", p.temperature)
|
||||||
|
str += fmt.Sprintf(" temperature_inc=%f", p.temperature_inc)
|
||||||
|
str += fmt.Sprintf(" beam_size=%d", p.beam_search.beam_size)
|
||||||
if p.translate {
|
if p.translate {
|
||||||
str += " translate"
|
str += " translate"
|
||||||
}
|
}
|
||||||
@ -171,9 +199,6 @@ func (p *Params) String() string {
|
|||||||
if p.token_timestamps {
|
if p.token_timestamps {
|
||||||
str += " token_timestamps"
|
str += " token_timestamps"
|
||||||
}
|
}
|
||||||
if p.speed_up {
|
|
||||||
str += " speed_up"
|
|
||||||
}
|
|
||||||
|
|
||||||
return str + ">"
|
return str + ">"
|
||||||
}
|
}
|
||||||
|
@ -76,11 +76,6 @@ func (context *context) SetTranslate(v bool) {
|
|||||||
context.params.SetTranslate(v)
|
context.params.SetTranslate(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set speedup flag
|
|
||||||
func (context *context) SetSpeedup(v bool) {
|
|
||||||
context.params.SetSpeedup(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (context *context) SetSplitOnWord(v bool) {
|
func (context *context) SetSplitOnWord(v bool) {
|
||||||
context.params.SetSplitOnWord(v)
|
context.params.SetSplitOnWord(v)
|
||||||
}
|
}
|
||||||
@ -130,6 +125,37 @@ func (context *context) SetAudioCtx(n uint) {
|
|||||||
context.params.SetAudioCtx(int(n))
|
context.params.SetAudioCtx(int(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set maximum number of text context tokens to store
|
||||||
|
func (context *context) SetMaxContext(n int) {
|
||||||
|
context.params.SetMaxContext(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Beam Size
|
||||||
|
func (context *context) SetBeamSize(n int) {
|
||||||
|
context.params.SetBeamSize(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Entropy threshold
|
||||||
|
func (context *context) SetEntropyThold(t float32) {
|
||||||
|
context.params.SetEntropyThold(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Temperature
|
||||||
|
func (context *context) SetTemperature(t float32) {
|
||||||
|
context.params.SetTemperature(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the fallback temperature incrementation
|
||||||
|
// Pass -1.0 to disable this feature
|
||||||
|
func (context *context) SetTemperatureFallback(t float32) {
|
||||||
|
context.params.SetTemperatureFallback(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set initial prompt
|
||||||
|
func (context *context) SetInitialPrompt(prompt string) {
|
||||||
|
context.params.SetInitialPrompt(prompt)
|
||||||
|
}
|
||||||
|
|
||||||
// ResetTimings resets the mode timings. Should be called before processing
|
// ResetTimings resets the mode timings. Should be called before processing
|
||||||
func (context *context) ResetTimings() {
|
func (context *context) ResetTimings() {
|
||||||
context.model.ctx.Whisper_reset_timings()
|
context.model.ctx.Whisper_reset_timings()
|
||||||
|
@ -4,52 +4,90 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
// Packages
|
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
||||||
whisper "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
"github.com/go-audio/wav"
|
||||||
assert "github.com/stretchr/testify/assert"
|
assert "github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
func TestSetLanguage(t *testing.T) {
|
||||||
ModelPath = "../../models/ggml-tiny.bin"
|
|
||||||
SamplePath = "../../samples/jfk.wav"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Test_Whisper_000(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
if _, err := os.Stat(ModelPath); os.IsNotExist(err) {
|
|
||||||
t.Skip("Skipping test, model not found:", ModelPath)
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(SamplePath); os.IsNotExist(err) {
|
|
||||||
t.Skip("Skipping test, sample not found:", SamplePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load model
|
|
||||||
model, err := whisper.New(ModelPath)
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.NotNil(model)
|
|
||||||
assert.NoError(model.Close())
|
|
||||||
|
|
||||||
t.Log("languages=", model.Languages())
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_Whisper_001(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
if _, err := os.Stat(ModelPath); os.IsNotExist(err) {
|
|
||||||
t.Skip("Skipping test, model not found:", ModelPath)
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(SamplePath); os.IsNotExist(err) {
|
|
||||||
t.Skip("Skipping test, sample not found:", SamplePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load model
|
|
||||||
model, err := whisper.New(ModelPath)
|
model, err := whisper.New(ModelPath)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.NotNil(model)
|
assert.NotNil(model)
|
||||||
defer model.Close()
|
defer model.Close()
|
||||||
|
|
||||||
// Get context for decoding
|
context, err := model.NewContext()
|
||||||
ctx, err := model.NewContext()
|
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.NotNil(ctx)
|
|
||||||
|
|
||||||
|
// This returns an error since
|
||||||
|
// the model 'models/ggml-small.en.bin'
|
||||||
|
// that is loaded is not multilingual
|
||||||
|
err = context.SetLanguage("en")
|
||||||
|
assert.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestContextModelIsMultilingual(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
context, err := model.NewContext()
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
isMultilingual := context.IsMultilingual()
|
||||||
|
|
||||||
|
// This returns false since
|
||||||
|
// the model 'models/ggml-small.en.bin'
|
||||||
|
// that is loaded is not multilingual
|
||||||
|
assert.False(isMultilingual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLanguage(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
context, err := model.NewContext()
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
// This always returns en since
|
||||||
|
// the model 'models/ggml-small.en.bin'
|
||||||
|
// that is loaded is not multilingual
|
||||||
|
expectedLanguage := "en"
|
||||||
|
actualLanguage := context.Language()
|
||||||
|
assert.Equal(expectedLanguage, actualLanguage)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcess(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
fh, err := os.Open(SamplePath)
|
||||||
|
assert.NoError(err)
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
// Decode the WAV file - load the full buffer
|
||||||
|
dec := wav.NewDecoder(fh)
|
||||||
|
buf, err := dec.FullPCMBuffer()
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Equal(uint16(1), dec.NumChans)
|
||||||
|
|
||||||
|
data := buf.AsFloat32Buffer().Data
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
context, err := model.NewContext()
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
err = context.Process(data, nil, nil)
|
||||||
|
assert.NoError(err)
|
||||||
}
|
}
|
||||||
|
@ -38,17 +38,22 @@ type Context interface {
|
|||||||
IsMultilingual() bool // Return true if the model is multilingual.
|
IsMultilingual() bool // Return true if the model is multilingual.
|
||||||
Language() string // Get language
|
Language() string // Get language
|
||||||
|
|
||||||
SetOffset(time.Duration) // Set offset
|
SetOffset(time.Duration) // Set offset
|
||||||
SetDuration(time.Duration) // Set duration
|
SetDuration(time.Duration) // Set duration
|
||||||
SetThreads(uint) // Set number of threads to use
|
SetThreads(uint) // Set number of threads to use
|
||||||
SetSpeedup(bool) // Set speedup flag
|
SetSplitOnWord(bool) // Set split on word flag
|
||||||
SetSplitOnWord(bool) // Set split on word flag
|
SetTokenThreshold(float32) // Set timestamp token probability threshold
|
||||||
SetTokenThreshold(float32) // Set timestamp token probability threshold
|
SetTokenSumThreshold(float32) // Set timestamp token sum probability threshold
|
||||||
SetTokenSumThreshold(float32) // Set timestamp token sum probability threshold
|
SetMaxSegmentLength(uint) // Set max segment length in characters
|
||||||
SetMaxSegmentLength(uint) // Set max segment length in characters
|
SetTokenTimestamps(bool) // Set token timestamps flag
|
||||||
SetTokenTimestamps(bool) // Set token timestamps flag
|
SetMaxTokensPerSegment(uint) // Set max tokens per segment (0 = no limit)
|
||||||
SetMaxTokensPerSegment(uint) // Set max tokens per segment (0 = no limit)
|
SetAudioCtx(uint) // Set audio encoder context
|
||||||
SetAudioCtx(uint) // Set audio encoder context
|
SetMaxContext(n int) // Set maximum number of text context tokens to store
|
||||||
|
SetBeamSize(n int) // Set Beam Size
|
||||||
|
SetEntropyThold(t float32) // Set Entropy threshold
|
||||||
|
SetInitialPrompt(prompt string) // Set initial prompt
|
||||||
|
SetTemperature(t float32) // Set temperature
|
||||||
|
SetTemperatureFallback(t float32) // Set temperature incrementation
|
||||||
|
|
||||||
// Process mono audio data and return any errors.
|
// Process mono audio data and return any errors.
|
||||||
// If defined, newly generated segments are passed to the
|
// If defined, newly generated segments are passed to the
|
||||||
|
91
bindings/go/pkg/whisper/model_test.go
Normal file
91
bindings/go/pkg/whisper/model_test.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
package whisper_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
||||||
|
assert "github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNew(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
t.Run("valid model path", func(t *testing.T) {
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid model path", func(t *testing.T) {
|
||||||
|
invalidModelPath := "invalid-model-path.bin"
|
||||||
|
model, err := whisper.New(invalidModelPath)
|
||||||
|
assert.Error(err)
|
||||||
|
assert.Nil(model)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClose(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
|
||||||
|
err = model.Close()
|
||||||
|
assert.NoError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewContext(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
context, err := model.NewContext()
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(context)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsMultilingual(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
isMultilingual := model.IsMultilingual()
|
||||||
|
|
||||||
|
// This returns false since
|
||||||
|
// the model 'models/ggml-small.en.bin'
|
||||||
|
// that is loaded is not multilingual
|
||||||
|
assert.False(isMultilingual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLanguages(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
expectedLanguages := []string{
|
||||||
|
"en", "zh", "de", "es", "ru", "ko", "fr", "ja", "pt", "tr", "pl",
|
||||||
|
"ca", "nl", "ar", "sv", "it", "id", "hi", "fi", "vi", "he", "uk",
|
||||||
|
"el", "ms", "cs", "ro", "da", "hu", "ta", "no", "th", "ur", "hr",
|
||||||
|
"bg", "lt", "la", "mi", "ml", "cy", "sk", "te", "fa", "lv", "bn",
|
||||||
|
"sr", "az", "sl", "kn", "et", "mk", "br", "eu", "is", "hy", "ne",
|
||||||
|
"mn", "bs", "kk", "sq", "sw", "gl", "mr", "pa", "si", "km", "sn",
|
||||||
|
"yo", "so", "af", "oc", "ka", "be", "tg", "sd", "gu", "am", "yi",
|
||||||
|
"lo", "uz", "fo", "ht", "ps", "tk", "nn", "mt", "sa", "lb", "my",
|
||||||
|
"bo", "tl", "mg", "as", "tt", "haw", "ln", "ha", "ba", "jw", "su",
|
||||||
|
}
|
||||||
|
|
||||||
|
actualLanguages := model.Languages()
|
||||||
|
|
||||||
|
assert.Equal(expectedLanguages, actualLanguages)
|
||||||
|
}
|
6
bindings/go/pkg/whisper/util_test.go
Normal file
6
bindings/go/pkg/whisper/util_test.go
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package whisper_test
|
||||||
|
|
||||||
|
const (
|
||||||
|
ModelPath = "../../models/ggml-small.en.bin"
|
||||||
|
SamplePath = "../../samples/jfk.wav"
|
||||||
|
)
|
@ -9,8 +9,8 @@ import (
|
|||||||
// CGO
|
// CGO
|
||||||
|
|
||||||
/*
|
/*
|
||||||
#cgo LDFLAGS: -lwhisper -lm -lstdc++
|
#cgo LDFLAGS: -lwhisper -lm -lstdc++ -fopenmp
|
||||||
#cgo darwin LDFLAGS: -framework Accelerate
|
#cgo darwin LDFLAGS: -framework Accelerate -framework Metal -framework Foundation -framework CoreGraphics
|
||||||
#include <whisper.h>
|
#include <whisper.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
Submodule bindings/ios deleted from 88c28eb833
@ -20,7 +20,7 @@ public interface WhisperCppJnaLibrary extends Library {
|
|||||||
* @return Whisper context on success, null on failure
|
* @return Whisper context on success, null on failure
|
||||||
*/
|
*/
|
||||||
Pointer whisper_init_from_file(String path_model);
|
Pointer whisper_init_from_file(String path_model);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provides default params which can be used with `whisper_init_from_file_with_params()` etc.
|
* Provides default params which can be used with `whisper_init_from_file_with_params()` etc.
|
||||||
* Because this function allocates memory for the params, the caller must call either:
|
* Because this function allocates memory for the params, the caller must call either:
|
||||||
@ -304,14 +304,6 @@ public interface WhisperCppJnaLibrary extends Library {
|
|||||||
/** Language id associated with the provided state */
|
/** Language id associated with the provided state */
|
||||||
int whisper_full_lang_id_from_state(Pointer state);
|
int whisper_full_lang_id_from_state(Pointer state);
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert RAW PCM audio to log mel spectrogram but applies a Phase Vocoder to speed up the audio x2.
|
|
||||||
* The resulting spectrogram is stored inside the default state of the provided whisper context.
|
|
||||||
* @return 0 on success
|
|
||||||
*/
|
|
||||||
int whisper_pcm_to_mel_phase_vocoder(Pointer ctx, final float[] samples, int n_samples, int n_threads);
|
|
||||||
|
|
||||||
int whisper_pcm_to_mel_phase_vocoder_with_state(Pointer ctx, Pointer state, final float[] samples, int n_samples, int n_threads);
|
|
||||||
|
|
||||||
/** Get the start time of the specified segment. */
|
/** Get the start time of the specified segment. */
|
||||||
long whisper_full_get_segment_t0(Pointer ctx, int i_segment);
|
long whisper_full_get_segment_t0(Pointer ctx, int i_segment);
|
||||||
|
@ -129,14 +129,6 @@ public class WhisperFullParams extends Structure {
|
|||||||
/** Maximum tokens per segment (0, default = no limit) */
|
/** Maximum tokens per segment (0, default = no limit) */
|
||||||
public int max_tokens;
|
public int max_tokens;
|
||||||
|
|
||||||
/** Flag to speed up the audio by 2x using Phase Vocoder. (default = false) */
|
|
||||||
public CBool speed_up;
|
|
||||||
|
|
||||||
/** Flag to speed up the audio by 2x using Phase Vocoder. (default = false) */
|
|
||||||
public void speedUp(boolean enable) {
|
|
||||||
speed_up = enable ? CBool.TRUE : CBool.FALSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Overwrite the audio context size (0 = use default). */
|
/** Overwrite the audio context size (0 = use default). */
|
||||||
public int audio_ctx;
|
public int audio_ctx;
|
||||||
|
|
||||||
@ -148,6 +140,9 @@ public class WhisperFullParams extends Structure {
|
|||||||
tdrz_enable = enable ? CBool.TRUE : CBool.FALSE;
|
tdrz_enable = enable ? CBool.TRUE : CBool.FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Regular expression matching tokens to suppress. */
|
||||||
|
public String suppress_regex;
|
||||||
|
|
||||||
/** Tokens to provide to the whisper decoder as an initial prompt.
|
/** Tokens to provide to the whisper decoder as an initial prompt.
|
||||||
* These are prepended to any existing text context from a previous call. */
|
* These are prepended to any existing text context from a previous call. */
|
||||||
public String initial_prompt;
|
public String initial_prompt;
|
||||||
@ -318,8 +313,8 @@ public class WhisperFullParams extends Structure {
|
|||||||
return Arrays.asList("strategy", "n_threads", "n_max_text_ctx", "offset_ms", "duration_ms", "translate",
|
return Arrays.asList("strategy", "n_threads", "n_max_text_ctx", "offset_ms", "duration_ms", "translate",
|
||||||
"no_context", "single_segment", "no_timestamps",
|
"no_context", "single_segment", "no_timestamps",
|
||||||
"print_special", "print_progress", "print_realtime", "print_timestamps", "token_timestamps",
|
"print_special", "print_progress", "print_realtime", "print_timestamps", "token_timestamps",
|
||||||
"thold_pt", "thold_ptsum", "max_len", "split_on_word", "max_tokens", "speed_up", "audio_ctx",
|
"thold_pt", "thold_ptsum", "max_len", "split_on_word", "max_tokens", "audio_ctx",
|
||||||
"tdrz_enable", "initial_prompt", "prompt_tokens", "prompt_n_tokens", "language", "detect_language",
|
"tdrz_enable", "suppress_regex", "initial_prompt", "prompt_tokens", "prompt_n_tokens", "language", "detect_language",
|
||||||
"suppress_blank", "suppress_non_speech_tokens", "temperature", "max_initial_ts", "length_penalty",
|
"suppress_blank", "suppress_non_speech_tokens", "temperature", "max_initial_ts", "length_penalty",
|
||||||
"temperature_inc", "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", "beam_search",
|
"temperature_inc", "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", "beam_search",
|
||||||
"new_segment_callback", "new_segment_callback_user_data",
|
"new_segment_callback", "new_segment_callback_user_data",
|
||||||
|
@ -41,7 +41,7 @@ make publish-npm
|
|||||||
|
|
||||||
## Sample run
|
## Sample run
|
||||||
|
|
||||||
```java
|
```text
|
||||||
$ node --experimental-wasm-threads --experimental-wasm-simd ../tests/test-whisper.js
|
$ node --experimental-wasm-threads --experimental-wasm-simd ../tests/test-whisper.js
|
||||||
|
|
||||||
whisper_model_load: loading model from 'whisper.bin'
|
whisper_model_load: loading model from 'whisper.bin'
|
||||||
@ -63,7 +63,7 @@ whisper_model_load: ggml ctx size = 140.60 MB
|
|||||||
whisper_model_load: memory size = 22.83 MB
|
whisper_model_load: memory size = 22.83 MB
|
||||||
whisper_model_load: model size = 140.54 MB
|
whisper_model_load: model size = 140.54 MB
|
||||||
|
|
||||||
system_info: n_threads = 8 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | NEON = 0 | F16C = 0 | FP16_VA = 0 | WASM_SIMD = 1 | BLAS = 0 |
|
system_info: n_threads = 8 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | NEON = 0 | F16C = 0 | FP16_VA = 0 | WASM_SIMD = 1 | BLAS = 0 |
|
||||||
|
|
||||||
operator(): processing 176000 samples, 11.0 sec, 8 threads, 1 processors, lang = en, task = transcribe ...
|
operator(): processing 176000 samples, 11.0 sec, 8 threads, 1 processors, lang = en, task = transcribe ...
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "whisper.cpp",
|
"name": "whisper.cpp",
|
||||||
"version": "1.5.2",
|
"version": "1.7.2",
|
||||||
"description": "Whisper speech recognition",
|
"description": "Whisper speech recognition",
|
||||||
"main": "whisper.js",
|
"main": "whisper.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
3
bindings/ruby/.gitignore
vendored
Normal file
3
bindings/ruby/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
LICENSE
|
||||||
|
pkg/
|
||||||
|
lib/whisper.*
|
169
bindings/ruby/README.md
Normal file
169
bindings/ruby/README.md
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
whispercpp
|
||||||
|
==========
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Ruby bindings for [whisper.cpp][], an interface of automatic speech recognition model.
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
Install the gem and add to the application's Gemfile by executing:
|
||||||
|
|
||||||
|
$ bundle add whispercpp
|
||||||
|
|
||||||
|
If bundler is not being used to manage dependencies, install the gem by executing:
|
||||||
|
|
||||||
|
$ gem install whispercpp
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
require "whisper"
|
||||||
|
|
||||||
|
whisper = Whisper::Context.new("path/to/model.bin")
|
||||||
|
|
||||||
|
params = Whisper::Params.new
|
||||||
|
params.language = "en"
|
||||||
|
params.offset = 10_000
|
||||||
|
params.duration = 60_000
|
||||||
|
params.max_text_tokens = 300
|
||||||
|
params.translate = true
|
||||||
|
params.print_timestamps = false
|
||||||
|
params.initial_prompt = "Initial prompt here."
|
||||||
|
|
||||||
|
whisper.transcribe("path/to/audio.wav", params) do |whole_text|
|
||||||
|
puts whole_text
|
||||||
|
end
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Preparing model ###
|
||||||
|
|
||||||
|
Use script to download model file(s):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/ggerganov/whisper.cpp.git
|
||||||
|
cd whisper.cpp
|
||||||
|
sh ./models/download-ggml-model.sh base.en
|
||||||
|
```
|
||||||
|
|
||||||
|
There are some types of models. See [models][] page for details.
|
||||||
|
|
||||||
|
### Preparing audio file ###
|
||||||
|
|
||||||
|
Currently, whisper.cpp accepts only 16-bit WAV files.
|
||||||
|
|
||||||
|
### API ###
|
||||||
|
|
||||||
|
Once `Whisper::Context#transcribe` called, you can retrieve segments by `#each_segment`:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
def format_time(time_ms)
|
||||||
|
sec, decimal_part = time_ms.divmod(1000)
|
||||||
|
min, sec = sec.divmod(60)
|
||||||
|
hour, min = min.divmod(60)
|
||||||
|
"%02d:%02d:%02d.%03d" % [hour, min, sec, decimal_part]
|
||||||
|
end
|
||||||
|
|
||||||
|
whisper.transcribe("path/to/audio.wav", params)
|
||||||
|
|
||||||
|
whisper.each_segment.with_index do |segment, index|
|
||||||
|
line = "[%{nth}: %{st} --> %{ed}] %{text}" % {
|
||||||
|
nth: index + 1,
|
||||||
|
st: format_time(segment.start_time),
|
||||||
|
ed: format_time(segment.end_time),
|
||||||
|
text: segment.text
|
||||||
|
}
|
||||||
|
line << " (speaker turned)" if segment.speaker_next_turn?
|
||||||
|
puts line
|
||||||
|
end
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also add hook to params called on new segment:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
def format_time(time_ms)
|
||||||
|
sec, decimal_part = time_ms.divmod(1000)
|
||||||
|
min, sec = sec.divmod(60)
|
||||||
|
hour, min = min.divmod(60)
|
||||||
|
"%02d:%02d:%02d.%03d" % [hour, min, sec, decimal_part]
|
||||||
|
end
|
||||||
|
|
||||||
|
# Add hook before calling #transcribe
|
||||||
|
params.on_new_segment do |segment|
|
||||||
|
line = "[%{st} --> %{ed}] %{text}" % {
|
||||||
|
st: format_time(segment.start_time),
|
||||||
|
ed: format_time(segment.end_time),
|
||||||
|
text: segment.text
|
||||||
|
}
|
||||||
|
line << " (speaker turned)" if segment.speaker_next_turn?
|
||||||
|
puts line
|
||||||
|
end
|
||||||
|
|
||||||
|
whisper.transcribe("path/to/audio.wav", params)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
You can see model information:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
whisper = Whisper::Context.new("path/to/model.bin")
|
||||||
|
model = whisper.model
|
||||||
|
|
||||||
|
model.n_vocab # => 51864
|
||||||
|
model.n_audio_ctx # => 1500
|
||||||
|
model.n_audio_state # => 512
|
||||||
|
model.n_audio_head # => 8
|
||||||
|
model.n_audio_layer # => 6
|
||||||
|
model.n_text_ctx # => 448
|
||||||
|
model.n_text_state # => 512
|
||||||
|
model.n_text_head # => 8
|
||||||
|
model.n_text_layer # => 6
|
||||||
|
model.n_mels # => 80
|
||||||
|
model.ftype # => 1
|
||||||
|
model.type # => "base"
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
You can set log callback:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
prefix = "[MyApp] "
|
||||||
|
log_callback = ->(level, buffer, user_data) {
|
||||||
|
case level
|
||||||
|
when Whisper::LOG_LEVEL_NONE
|
||||||
|
puts "#{user_data}none: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_INFO
|
||||||
|
puts "#{user_data}info: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_WARN
|
||||||
|
puts "#{user_data}warn: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_ERROR
|
||||||
|
puts "#{user_data}error: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_DEBUG
|
||||||
|
puts "#{user_data}debug: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_CONT
|
||||||
|
puts "#{user_data}same to previous: #{buffer}"
|
||||||
|
end
|
||||||
|
}
|
||||||
|
Whisper.log_set log_callback, prefix
|
||||||
|
```
|
||||||
|
|
||||||
|
Using this feature, you are also able to suppress log:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
Whisper.log_set ->(level, buffer, user_data) {
|
||||||
|
# do nothing
|
||||||
|
}, nil
|
||||||
|
Whisper::Context.new(MODEL)
|
||||||
|
```
|
||||||
|
|
||||||
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
|
The same to [whisper.cpp][].
|
||||||
|
|
||||||
|
[whisper.cpp]: https://github.com/ggerganov/whisper.cpp
|
||||||
|
[models]: https://github.com/ggerganov/whisper.cpp/tree/master/models
|
68
bindings/ruby/Rakefile
Normal file
68
bindings/ruby/Rakefile
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
require 'rake/clean'
|
||||||
|
require "bundler/gem_tasks"
|
||||||
|
require "pathname"
|
||||||
|
require "yaml"
|
||||||
|
require "rake/testtask"
|
||||||
|
|
||||||
|
extsources = YAML.load_file("extsources.yaml")
|
||||||
|
SOURCES = FileList[]
|
||||||
|
extsources.each do |src|
|
||||||
|
basename = src.pathmap("%f")
|
||||||
|
dest = basename == "LICENSE" ? basename : basename.pathmap("ext/%f")
|
||||||
|
file src
|
||||||
|
file dest => src do |t|
|
||||||
|
cp t.source, t.name
|
||||||
|
end
|
||||||
|
SOURCES.include dest
|
||||||
|
end
|
||||||
|
CLEAN.include SOURCES
|
||||||
|
CLEAN.include FileList[
|
||||||
|
"ext/*.o",
|
||||||
|
"ext/*.metal",
|
||||||
|
"ext/whisper.{so,bundle,dll}",
|
||||||
|
"ext/depend"
|
||||||
|
]
|
||||||
|
|
||||||
|
task build: FileList[
|
||||||
|
"ext/Makefile",
|
||||||
|
"ext/ruby_whisper.h",
|
||||||
|
"ext/ruby_whisper.cpp",
|
||||||
|
"whispercpp.gemspec",
|
||||||
|
]
|
||||||
|
|
||||||
|
directory "pkg"
|
||||||
|
CLOBBER.include "pkg"
|
||||||
|
|
||||||
|
TEST_MODEL = "../../models/ggml-base.en.bin"
|
||||||
|
LIB_NAME = "whisper".ext(RbConfig::CONFIG["DLEXT"])
|
||||||
|
SO_FILE = File.join("ext", LIB_NAME)
|
||||||
|
LIB_FILE = File.join("lib", LIB_NAME)
|
||||||
|
|
||||||
|
file "ext/Makefile" => ["ext/extconf.rb", "ext/ruby_whisper.h", "ext/ruby_whisper.cpp"] + SOURCES do |t|
|
||||||
|
Dir.chdir "ext" do
|
||||||
|
ruby "extconf.rb"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
file SO_FILE => "ext/Makefile" do |t|
|
||||||
|
Dir.chdir "ext" do
|
||||||
|
sh "make"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
CLEAN.include LIB_FILE
|
||||||
|
|
||||||
|
directory "lib"
|
||||||
|
file LIB_FILE => [SO_FILE, "lib"] do |t|
|
||||||
|
copy t.source, t.name
|
||||||
|
end
|
||||||
|
|
||||||
|
Rake::TestTask.new do |t|
|
||||||
|
t.test_files = FileList["tests/test_*.rb"]
|
||||||
|
end
|
||||||
|
task test: [TEST_MODEL, LIB_FILE]
|
||||||
|
|
||||||
|
file TEST_MODEL do
|
||||||
|
Dir.chdir "../.." do
|
||||||
|
sh "./models/download-ggml-model.sh base.en"
|
||||||
|
end
|
||||||
|
end
|
28
bindings/ruby/ext/.gitignore
vendored
28
bindings/ruby/ext/.gitignore
vendored
@ -3,7 +3,33 @@ ggml.c
|
|||||||
ggml.h
|
ggml.h
|
||||||
ggml-alloc.c
|
ggml-alloc.c
|
||||||
ggml-alloc.h
|
ggml-alloc.h
|
||||||
whisper.bundle
|
ggml-aarch64.c
|
||||||
|
ggml-aarch64.h
|
||||||
|
ggml-backend.cpp
|
||||||
|
ggml-backend-impl.h
|
||||||
|
ggml-backend.c
|
||||||
|
ggml-backend.h
|
||||||
|
ggml-common.h
|
||||||
|
ggml-cpu-impl.h
|
||||||
|
ggml-metal.m
|
||||||
|
ggml-metal.metal
|
||||||
|
ggml-metal-embed.metal
|
||||||
|
ggml-blas.cpp
|
||||||
|
ggml-cuda.h
|
||||||
|
ggml-impl.h
|
||||||
|
ggml-kompute.h
|
||||||
|
ggml-metal.h
|
||||||
|
ggml-opencl.h
|
||||||
|
ggml-quants.c
|
||||||
|
ggml-quants.h
|
||||||
|
ggml-sycl.h
|
||||||
|
ggml-vulkan.h
|
||||||
|
ggml-blas.h
|
||||||
|
get-flags.mk
|
||||||
whisper.cpp
|
whisper.cpp
|
||||||
whisper.h
|
whisper.h
|
||||||
dr_wav.h
|
dr_wav.h
|
||||||
|
depend
|
||||||
|
whisper.bundle
|
||||||
|
whisper.so
|
||||||
|
whisper.dll
|
||||||
|
@ -1,21 +1,10 @@
|
|||||||
require 'mkmf'
|
require 'mkmf'
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.cpp')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.c')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-impl.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-alloc.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-alloc.c')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend-impl.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend.c')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-quants.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-quants.c')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','examples','dr_wav.h')} .")
|
|
||||||
|
|
||||||
|
|
||||||
# need to use c++ compiler flags
|
# need to use c++ compiler flags
|
||||||
$CXXFLAGS << ' -std=c++11'
|
$CXXFLAGS << ' -std=c++11'
|
||||||
|
|
||||||
|
$LDFLAGS << ' -lstdc++'
|
||||||
|
|
||||||
# Set to true when building binary gems
|
# Set to true when building binary gems
|
||||||
if enable_config('static-stdlib', false)
|
if enable_config('static-stdlib', false)
|
||||||
$LDFLAGS << ' -static-libgcc -static-libstdc++'
|
$LDFLAGS << ' -static-libgcc -static-libstdc++'
|
||||||
@ -26,4 +15,180 @@ if enable_config('march-tune-native', false)
|
|||||||
$CXXFLAGS << ' -march=native -mtune=native'
|
$CXXFLAGS << ' -march=native -mtune=native'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if ENV['WHISPER_METAL']
|
||||||
|
$GGML_METAL ||= true
|
||||||
|
$DEPRECATE_WARNING ||= true
|
||||||
|
end
|
||||||
|
|
||||||
|
$UNAME_S = `uname -s`.chomp
|
||||||
|
$UNAME_P = `uname -p`.chomp
|
||||||
|
$UNAME_M = `uname -m`.chomp
|
||||||
|
|
||||||
|
if $UNAME_S == 'Darwin'
|
||||||
|
unless ENV['GGML_NO_METAL']
|
||||||
|
$GGML_METAL ||= true
|
||||||
|
end
|
||||||
|
$GGML_NO_OPENMP ||= true
|
||||||
|
end
|
||||||
|
|
||||||
|
if $GGML_METAL
|
||||||
|
$GGML_METAL_EMBED_LIBRARY = true
|
||||||
|
end
|
||||||
|
|
||||||
|
$MK_CPPFLAGS = ''
|
||||||
|
$MK_CFLAGS = '-std=c11 -fPIC'
|
||||||
|
$MK_CXXFLAGS = '-std=c++11 -fPIC'
|
||||||
|
$MK_NVCCFLAGS = '-std=c++11'
|
||||||
|
$MK_LDFLAGS = ''
|
||||||
|
|
||||||
|
$OBJ_GGML = []
|
||||||
|
$OBJ_WHISPER = []
|
||||||
|
$OBJ_COMMON = []
|
||||||
|
$OBJ_SDL = []
|
||||||
|
|
||||||
|
$MK_CPPFLAGS << ' -D_XOPEN_SOURCE=600'
|
||||||
|
|
||||||
|
if $UNAME_S == 'Linux'
|
||||||
|
$MK_CPPFLAGS << ' -D_GNU_SOURCE'
|
||||||
|
end
|
||||||
|
|
||||||
|
if $UNAME_S == 'Darwin'
|
||||||
|
$MK_CPPFLAGS << ' -D_DARWIN_C_SOURCE'
|
||||||
|
end
|
||||||
|
|
||||||
|
if ENV['WHISPER_DEBUG']
|
||||||
|
$MK_CFLAGS << ' -O0 -g'
|
||||||
|
$MK_CXXFLAGS << ' -O0 -g'
|
||||||
|
$MK_LDFLAGS << ' -g'
|
||||||
|
$MK_NVCCFLAGS << ' -O0 -g'
|
||||||
|
else
|
||||||
|
$MK_CPPFLAGS << ' -DNDEBUG'
|
||||||
|
$MK_CFLAGS << ' -O3'
|
||||||
|
$MK_CXXFLAGS << ' -O3'
|
||||||
|
$MK_NVCCFLAGS << ' -O3'
|
||||||
|
end
|
||||||
|
|
||||||
|
$WARN_FLAGS =
|
||||||
|
' -Wall' <<
|
||||||
|
' -Wextra' <<
|
||||||
|
' -Wpedantic' <<
|
||||||
|
' -Wcast-qual' <<
|
||||||
|
' -Wno-unused-function'
|
||||||
|
|
||||||
|
$MK_CFLAGS <<
|
||||||
|
$WARN_FLAGS <<
|
||||||
|
' -Wshadow' <<
|
||||||
|
' -Wstrict-prototypes' <<
|
||||||
|
' -Wpointer-arith' <<
|
||||||
|
' -Wmissing-prototypes' <<
|
||||||
|
' -Werror=implicit-int' <<
|
||||||
|
' -Werror=implicit-function-declaration'
|
||||||
|
|
||||||
|
$MK_CXXFLAGS <<
|
||||||
|
$WARN_FLAGS <<
|
||||||
|
' -Wmissing-declarations' <<
|
||||||
|
' -Wmissing-noreturn'
|
||||||
|
|
||||||
|
unless `#{cc_command} #{$LDFLAGS} -Wl,-v 2>&1`.chomp.include? 'dyld-1015.7'
|
||||||
|
$MK_CPPFLAGS << ' -DHAVE_BUGGY_APPLE_LINKER'
|
||||||
|
end
|
||||||
|
|
||||||
|
if %w[Linux Darwin FreeBSD NetBSD OpenBSD Haiku].include? $UNAME_S
|
||||||
|
$MK_CFLAGS << ' -pthread'
|
||||||
|
$MK_CXXFLAGS << ' -pthread'
|
||||||
|
end
|
||||||
|
|
||||||
|
unless $_WIN32
|
||||||
|
$DSO_EXT = '.so'
|
||||||
|
else
|
||||||
|
$DSO_EXT = '.dll'
|
||||||
|
end
|
||||||
|
|
||||||
|
unless ENV['RISCV']
|
||||||
|
if %w[x86_64 i686 amd64].include? $UNAME_M
|
||||||
|
$HOST_CXXFLAGS ||= ''
|
||||||
|
|
||||||
|
$MK_CFLAGS << ' -march=native -mtune=native'
|
||||||
|
$HOST_CXXFLAGS << ' -march=native -mtune=native'
|
||||||
|
end
|
||||||
|
|
||||||
|
if $UNAME_M.match? /aarch64.*/
|
||||||
|
$MK_CFLAGS << ' -mcpu=native'
|
||||||
|
$MK_CXXFLAGS << ' -mcpu=native'
|
||||||
|
end
|
||||||
|
else
|
||||||
|
$MK_CFLAGS << ' -march=rv64gcv -mabi=lp64d'
|
||||||
|
$MK_CXXFLAGS << ' -march=rv64gcv -mabi=lp64d'
|
||||||
|
end
|
||||||
|
|
||||||
|
unless ENV['GGML_NO_ACCELERATE']
|
||||||
|
if $UNAME_S == 'Darwin'
|
||||||
|
$MK_CPPFLAGS << ' -DGGML_USE_ACCELERATE -DGGML_USE_BLAS'
|
||||||
|
$MK_CPPFLAGS << ' -DACCELERATE_NEW_LAPACK'
|
||||||
|
$MK_CPPFLAGS << ' -DACCELERATE_LAPACK_ILP64'
|
||||||
|
$MK_LDFLAGS << ' -framework Accelerate'
|
||||||
|
$OBJ_GGML << 'ggml-blas.o'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if ENV['GGML_OPENBLAS']
|
||||||
|
$MK_CPPFLAGS << " -DGGML_USE_BLAS #{`pkg-config --cflags-only-I openblas`.chomp}"
|
||||||
|
$MK_CFLAGS << " #{`pkg-config --cflags-only-other openblas)`.chomp}"
|
||||||
|
$MK_LDFLAGS << " #{`pkg-config --libs openblas`}"
|
||||||
|
$OBJ_GGML << 'ggml-blas.o'
|
||||||
|
end
|
||||||
|
|
||||||
|
if ENV['GGML_OPENBLAS64']
|
||||||
|
$MK_CPPFLAGS << " -DGGML_USE_BLAS #{`pkg-config --cflags-only-I openblas64`.chomp}"
|
||||||
|
$MK_CFLAGS << " #{`pkg-config --cflags-only-other openblas64)`.chomp}"
|
||||||
|
$MK_LDFLAGS << " #{`pkg-config --libs openblas64`}"
|
||||||
|
$OBJ_GGML << 'ggml-blas.o'
|
||||||
|
end
|
||||||
|
|
||||||
|
if $GGML_METAL
|
||||||
|
$MK_CPPFLAGS << ' -DGGML_USE_METAL'
|
||||||
|
$MK_LDFLAGS << ' -framework Foundation -framework Metal -framework MetalKit'
|
||||||
|
$OBJ_GGML << 'ggml-metal.o'
|
||||||
|
|
||||||
|
if ENV['GGML_METAL_NDEBUG']
|
||||||
|
$MK_CPPFLAGS << ' -DGGML_METAL_NDEBUG'
|
||||||
|
end
|
||||||
|
|
||||||
|
if $GGML_METAL_EMBED_LIBRARY
|
||||||
|
$MK_CPPFLAGS << ' -DGGML_METAL_EMBED_LIBRARY'
|
||||||
|
$OBJ_GGML << 'ggml-metal-embed.o'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
$OBJ_GGML <<
|
||||||
|
'ggml.o' <<
|
||||||
|
'ggml-cpu.o' <<
|
||||||
|
'ggml-alloc.o' <<
|
||||||
|
'ggml-backend.o' <<
|
||||||
|
'ggml-quants.o' <<
|
||||||
|
'ggml-aarch64.o'
|
||||||
|
|
||||||
|
$OBJ_WHISPER <<
|
||||||
|
'whisper.o'
|
||||||
|
|
||||||
|
$objs = $OBJ_GGML + $OBJ_WHISPER + $OBJ_COMMON + $OBJ_SDL
|
||||||
|
$objs << "ruby_whisper.o"
|
||||||
|
|
||||||
|
$CPPFLAGS = "#{$MK_CPPFLAGS} #{$CPPFLAGS}"
|
||||||
|
$CFLAGS = "#{$CPPFLAGS} #{$MK_CFLAGS} #{$GF_CFLAGS} #{$CFLAGS}"
|
||||||
|
$BASE_CXXFLAGS = "#{$MK_CXXFLAGS} #{$CXXFLAGS}"
|
||||||
|
$CXXFLAGS = "#{$BASE_CXXFLAGS} #{$HOST_CXXFLAGS} #{$GF_CXXFLAGS} #{$CPPFLAGS}"
|
||||||
|
$NVCCFLAGS = "#{$MK_NVCCFLAGS} #{$NVCCFLAGS}"
|
||||||
|
$LDFLAGS = "#{$MK_LDFLAGS} #{$LDFLAGS}"
|
||||||
|
|
||||||
create_makefile('whisper')
|
create_makefile('whisper')
|
||||||
|
|
||||||
|
File.open 'Makefile', 'a' do |file|
|
||||||
|
file.puts 'include get-flags.mk'
|
||||||
|
|
||||||
|
if $GGML_METAL
|
||||||
|
if $GGML_METAL_EMBED_LIBRARY
|
||||||
|
file.puts 'include metal-embed.mk'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
@ -1,87 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
// ggml-backend internal header
|
|
||||||
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend buffer
|
|
||||||
//
|
|
||||||
|
|
||||||
typedef void * ggml_backend_buffer_context_t;
|
|
||||||
|
|
||||||
struct ggml_backend_buffer_i {
|
|
||||||
void (*free_buffer) (ggml_backend_buffer_t buffer);
|
|
||||||
void * (*get_base) (ggml_backend_buffer_t buffer); // get base pointer
|
|
||||||
size_t (*get_alloc_size)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-allocation callback
|
|
||||||
void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // post-allocation callback
|
|
||||||
void (*free_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-free callback
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ggml_backend_buffer {
|
|
||||||
struct ggml_backend_buffer_i iface;
|
|
||||||
|
|
||||||
ggml_backend_t backend;
|
|
||||||
ggml_backend_buffer_context_t context;
|
|
||||||
|
|
||||||
size_t size;
|
|
||||||
};
|
|
||||||
|
|
||||||
GGML_API ggml_backend_buffer_t ggml_backend_buffer_init(
|
|
||||||
struct ggml_backend * backend,
|
|
||||||
struct ggml_backend_buffer_i iface,
|
|
||||||
ggml_backend_buffer_context_t context,
|
|
||||||
size_t size);
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend
|
|
||||||
//
|
|
||||||
|
|
||||||
typedef void * ggml_backend_context_t;
|
|
||||||
|
|
||||||
struct ggml_backend_i {
|
|
||||||
const char * (*get_name)(ggml_backend_t backend);
|
|
||||||
|
|
||||||
void (*free)(ggml_backend_t backend);
|
|
||||||
|
|
||||||
// buffer allocation
|
|
||||||
ggml_backend_buffer_t (*alloc_buffer)(ggml_backend_t backend, size_t size);
|
|
||||||
|
|
||||||
// get buffer alignment
|
|
||||||
size_t (*get_alignment)(ggml_backend_t backend);
|
|
||||||
|
|
||||||
// tensor data access
|
|
||||||
// these functions can be asynchronous, helper functions are provided for synchronous access that automatically call synchronize
|
|
||||||
void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
||||||
void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
||||||
void (*synchronize) (ggml_backend_t backend);
|
|
||||||
|
|
||||||
// (optional) copy tensor between different backends, allow for single-copy tranfers
|
|
||||||
void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
|
|
||||||
void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
|
|
||||||
|
|
||||||
// compute graph with a plan
|
|
||||||
ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
|
||||||
void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
|
|
||||||
// compute graph without a plan
|
|
||||||
void (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
|
||||||
|
|
||||||
// check if the backend supports an operation
|
|
||||||
bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ggml_backend {
|
|
||||||
struct ggml_backend_i iface;
|
|
||||||
|
|
||||||
ggml_backend_context_t context;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,950 +0,0 @@
|
|||||||
#include "ggml-backend-impl.h"
|
|
||||||
#include "ggml-alloc.h"
|
|
||||||
#include "ggml-impl.h"
|
|
||||||
|
|
||||||
#include <assert.h>
|
|
||||||
#include <limits.h>
|
|
||||||
#include <stdarg.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#define UNUSED GGML_UNUSED
|
|
||||||
|
|
||||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
||||||
|
|
||||||
// backend buffer
|
|
||||||
|
|
||||||
ggml_backend_buffer_t ggml_backend_buffer_init(
|
|
||||||
struct ggml_backend * backend,
|
|
||||||
struct ggml_backend_buffer_i iface,
|
|
||||||
ggml_backend_buffer_context_t context,
|
|
||||||
size_t size) {
|
|
||||||
ggml_backend_buffer_t buffer = malloc(sizeof(struct ggml_backend_buffer));
|
|
||||||
|
|
||||||
GGML_ASSERT(iface.get_base != NULL);
|
|
||||||
|
|
||||||
(*buffer) = (struct ggml_backend_buffer) {
|
|
||||||
/* .interface = */ iface,
|
|
||||||
/* .backend = */ backend,
|
|
||||||
/* .context = */ context,
|
|
||||||
/* .size = */ size,
|
|
||||||
};
|
|
||||||
|
|
||||||
return buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
|
|
||||||
if (buffer == NULL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (buffer->iface.free_buffer != NULL) {
|
|
||||||
buffer->iface.free_buffer(buffer);
|
|
||||||
}
|
|
||||||
free(buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) {
|
|
||||||
return ggml_backend_get_alignment(buffer->backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) {
|
|
||||||
return buffer->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) {
|
|
||||||
void * base = buffer->iface.get_base(buffer);
|
|
||||||
|
|
||||||
GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL");
|
|
||||||
|
|
||||||
return base;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
|
||||||
// get_alloc_size is optional, defaults to ggml_nbytes
|
|
||||||
if (buffer->iface.get_alloc_size) {
|
|
||||||
return buffer->iface.get_alloc_size(buffer, tensor);
|
|
||||||
}
|
|
||||||
return ggml_nbytes(tensor);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
|
||||||
// init_tensor is optional
|
|
||||||
if (buffer->iface.init_tensor) {
|
|
||||||
buffer->iface.init_tensor(buffer, tensor);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_buffer_free_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
|
||||||
// free_tensor is optional
|
|
||||||
if (buffer->iface.free_tensor) {
|
|
||||||
buffer->iface.free_tensor(buffer, tensor);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// backend
|
|
||||||
|
|
||||||
ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor) {
|
|
||||||
return tensor->buffer ? tensor->buffer->backend : NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const char * ggml_backend_name(ggml_backend_t backend) {
|
|
||||||
if (backend == NULL) {
|
|
||||||
return "NULL";
|
|
||||||
}
|
|
||||||
return backend->iface.get_name(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_free(ggml_backend_t backend) {
|
|
||||||
if (backend == NULL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
backend->iface.free(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) {
|
|
||||||
return backend->iface.alloc_buffer(backend, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ggml_backend_get_alignment(ggml_backend_t backend) {
|
|
||||||
return backend->iface.get_alignment(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_tensor_set_async(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
|
||||||
ggml_get_backend(tensor)->iface.set_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
|
||||||
ggml_get_backend(tensor)->iface.get_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
|
||||||
ggml_backend_t backend = ggml_get_backend(tensor);
|
|
||||||
|
|
||||||
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
|
|
||||||
GGML_ASSERT(backend != NULL && "tensor backend not set");
|
|
||||||
|
|
||||||
backend->iface.set_tensor_async(backend, tensor, data, offset, size);
|
|
||||||
backend->iface.synchronize(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
|
||||||
ggml_backend_t backend = ggml_get_backend(tensor);
|
|
||||||
|
|
||||||
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
|
|
||||||
GGML_ASSERT(backend != NULL && "tensor backend not set");
|
|
||||||
|
|
||||||
backend->iface.get_tensor_async(backend, tensor, data, offset, size);
|
|
||||||
backend->iface.synchronize(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_synchronize(ggml_backend_t backend) {
|
|
||||||
backend->iface.synchronize(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
|
||||||
return backend->iface.graph_plan_create(backend, cgraph);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
|
|
||||||
backend->iface.graph_plan_free(backend, plan);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
|
|
||||||
backend->iface.graph_plan_compute(backend, plan);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
|
||||||
backend->iface.graph_compute(backend, cgraph);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
|
||||||
return backend->iface.supports_op(backend, op);
|
|
||||||
}
|
|
||||||
|
|
||||||
// backend copy
|
|
||||||
|
|
||||||
static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
|
|
||||||
if (a->type != b->type) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
|
||||||
if (a->ne[i] != b->ne[i]) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (a->nb[i] != b->nb[i]) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) {
|
|
||||||
//printf("src: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", src->name, (int)src->ne[0], (int)src->ne[1], (int)src->ne[2], (int)src->ne[3], (int)src->nb[0], (int)src->nb[1], (int)src->nb[2], (int)src->nb[3]);
|
|
||||||
//printf("dst: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", dst->name, (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], (int)dst->nb[0], (int)dst->nb[1], (int)dst->nb[2], (int)dst->nb[3]);
|
|
||||||
GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
|
|
||||||
|
|
||||||
// fprintf(stderr, "cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src));
|
|
||||||
|
|
||||||
if (src == dst) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: allow backends to support copy to/from same backend
|
|
||||||
|
|
||||||
if (ggml_get_backend(dst)->iface.cpy_tensor_from != NULL) {
|
|
||||||
ggml_get_backend(dst)->iface.cpy_tensor_from(ggml_get_backend(dst)->context, src, dst);
|
|
||||||
} else if (ggml_get_backend(src)->iface.cpy_tensor_to != NULL) {
|
|
||||||
ggml_get_backend(src)->iface.cpy_tensor_to(ggml_get_backend(src)->context, src, dst);
|
|
||||||
} else {
|
|
||||||
// shouldn't be hit when copying from/to CPU
|
|
||||||
#ifndef NDEBUG
|
|
||||||
fprintf(stderr, "ggml_backend_tensor_copy: neither cpy_tensor_from nor cpy_tensor_to are implemented for backends %s and %s, falling back to get/set\n", ggml_backend_name(src->buffer->backend), ggml_backend_name(dst->buffer->backend));
|
|
||||||
#endif
|
|
||||||
size_t nbytes = ggml_nbytes(src);
|
|
||||||
void * data = malloc(nbytes);
|
|
||||||
ggml_backend_tensor_get(src, data, 0, nbytes);
|
|
||||||
ggml_backend_tensor_set(dst, data, 0, nbytes);
|
|
||||||
free(data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// backend CPU
|
|
||||||
|
|
||||||
struct ggml_backend_cpu_context {
|
|
||||||
int n_threads;
|
|
||||||
void * work_data;
|
|
||||||
size_t work_size;
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char * ggml_backend_cpu_name(ggml_backend_t backend) {
|
|
||||||
return "CPU";
|
|
||||||
|
|
||||||
UNUSED(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_free(ggml_backend_t backend) {
|
|
||||||
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
|
|
||||||
free(cpu_ctx->work_data);
|
|
||||||
free(cpu_ctx);
|
|
||||||
free(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
|
|
||||||
return (void *)buffer->context;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
|
||||||
free(buffer->context);
|
|
||||||
UNUSED(buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ggml_backend_buffer_i cpu_backend_buffer_i = {
|
|
||||||
/* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer,
|
|
||||||
/* .get_base = */ ggml_backend_cpu_buffer_get_base,
|
|
||||||
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
|
||||||
/* .init_tensor = */ NULL, // no initialization required
|
|
||||||
/* .free_tensor = */ NULL, // no cleanup required
|
|
||||||
};
|
|
||||||
|
|
||||||
// for buffers from ptr, free is not called
|
|
||||||
static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
|
|
||||||
/* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
|
|
||||||
/* .get_base = */ ggml_backend_cpu_buffer_get_base,
|
|
||||||
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
|
||||||
/* .init_tensor = */ NULL,
|
|
||||||
/* .free_tensor = */ NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512
|
|
||||||
|
|
||||||
static ggml_backend_buffer_t ggml_backend_cpu_alloc_buffer(ggml_backend_t backend, size_t size) {
|
|
||||||
size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
|
|
||||||
void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC?
|
|
||||||
|
|
||||||
GGML_ASSERT(data != NULL && "failed to allocate buffer");
|
|
||||||
|
|
||||||
return ggml_backend_buffer_init(backend, cpu_backend_buffer_i, data, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t ggml_backend_cpu_get_alignment(ggml_backend_t backend) {
|
|
||||||
return TENSOR_ALIGNMENT;
|
|
||||||
UNUSED(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_set_tensor_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
|
||||||
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
|
|
||||||
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
|
|
||||||
|
|
||||||
memcpy((char *)tensor->data + offset, data, size);
|
|
||||||
|
|
||||||
UNUSED(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_get_tensor_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
|
||||||
GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
|
|
||||||
GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
|
|
||||||
|
|
||||||
memcpy(data, (const char *)tensor->data + offset, size);
|
|
||||||
|
|
||||||
UNUSED(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_synchronize(ggml_backend_t backend) {
|
|
||||||
UNUSED(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_cpy_tensor_from(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
|
|
||||||
ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
|
|
||||||
|
|
||||||
UNUSED(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_cpy_tensor_to(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
|
|
||||||
ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
|
|
||||||
|
|
||||||
UNUSED(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_backend_plan_cpu {
|
|
||||||
struct ggml_cplan cplan;
|
|
||||||
struct ggml_cgraph cgraph;
|
|
||||||
};
|
|
||||||
|
|
||||||
static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
|
||||||
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
|
|
||||||
|
|
||||||
struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu));
|
|
||||||
|
|
||||||
cpu_plan->cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
|
|
||||||
cpu_plan->cgraph = *cgraph;
|
|
||||||
|
|
||||||
if (cpu_plan->cplan.work_size > 0) {
|
|
||||||
cpu_plan->cplan.work_data = malloc(cpu_plan->cplan.work_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return cpu_plan;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
|
|
||||||
struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
|
|
||||||
|
|
||||||
free(cpu_plan->cplan.work_data);
|
|
||||||
free(cpu_plan);
|
|
||||||
|
|
||||||
UNUSED(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
|
|
||||||
struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
|
|
||||||
|
|
||||||
ggml_graph_compute(&cpu_plan->cgraph, &cpu_plan->cplan);
|
|
||||||
|
|
||||||
UNUSED(backend);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
|
||||||
struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
|
|
||||||
|
|
||||||
struct ggml_cplan cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
|
|
||||||
|
|
||||||
if (cpu_ctx->work_size < cplan.work_size) {
|
|
||||||
// TODO: may be faster to free and use malloc to avoid the copy
|
|
||||||
cpu_ctx->work_data = realloc(cpu_ctx->work_data, cplan.work_size);
|
|
||||||
cpu_ctx->work_size = cplan.work_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
cplan.work_data = cpu_ctx->work_data;
|
|
||||||
|
|
||||||
ggml_graph_compute(cgraph, &cplan);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
|
|
||||||
return true;
|
|
||||||
UNUSED(backend);
|
|
||||||
UNUSED(op);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ggml_backend_i cpu_backend_i = {
|
|
||||||
/* .get_name = */ ggml_backend_cpu_name,
|
|
||||||
/* .free = */ ggml_backend_cpu_free,
|
|
||||||
/* .alloc_buffer = */ ggml_backend_cpu_alloc_buffer,
|
|
||||||
/* .get_alignment = */ ggml_backend_cpu_get_alignment,
|
|
||||||
/* .set_tensor_async = */ ggml_backend_cpu_set_tensor_async,
|
|
||||||
/* .get_tensor_async = */ ggml_backend_cpu_get_tensor_async,
|
|
||||||
/* .synchronize = */ ggml_backend_cpu_synchronize,
|
|
||||||
/* .cpy_tensor_from = */ ggml_backend_cpu_cpy_tensor_from,
|
|
||||||
/* .cpy_tensor_to = */ ggml_backend_cpu_cpy_tensor_to,
|
|
||||||
/* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create,
|
|
||||||
/* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free,
|
|
||||||
/* .graph_plan_compute = */ ggml_backend_cpu_graph_plan_compute,
|
|
||||||
/* .graph_compute = */ ggml_backend_cpu_graph_compute,
|
|
||||||
/* .supports_op = */ ggml_backend_cpu_supports_op,
|
|
||||||
};
|
|
||||||
|
|
||||||
ggml_backend_t ggml_backend_cpu_init(void) {
|
|
||||||
struct ggml_backend_cpu_context * ctx = malloc(sizeof(struct ggml_backend_cpu_context));
|
|
||||||
|
|
||||||
ctx->n_threads = GGML_DEFAULT_N_THREADS;
|
|
||||||
ctx->work_data = NULL;
|
|
||||||
ctx->work_size = 0;
|
|
||||||
|
|
||||||
ggml_backend_t cpu_backend = malloc(sizeof(struct ggml_backend));
|
|
||||||
|
|
||||||
*cpu_backend = (struct ggml_backend) {
|
|
||||||
/* .interface = */ cpu_backend_i,
|
|
||||||
/* .context = */ ctx
|
|
||||||
};
|
|
||||||
return cpu_backend;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ggml_backend_is_cpu(ggml_backend_t backend) {
|
|
||||||
return backend->iface.get_name == ggml_backend_cpu_name;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
|
|
||||||
GGML_ASSERT(ggml_backend_is_cpu(backend_cpu));
|
|
||||||
|
|
||||||
struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context;
|
|
||||||
ctx->n_threads = n_threads;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size) {
|
|
||||||
return ggml_backend_buffer_init(backend_cpu, cpu_backend_buffer_i_from_ptr, ptr, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// scheduler
|
|
||||||
|
|
||||||
#define GGML_MAX_BACKENDS 4
|
|
||||||
#define GGML_MAX_SPLITS 256
|
|
||||||
#define GGML_MAX_SPLIT_INPUTS 16
|
|
||||||
|
|
||||||
struct ggml_backend_sched_split {
|
|
||||||
ggml_tallocr_t tallocr;
|
|
||||||
int i_start;
|
|
||||||
int i_end;
|
|
||||||
struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS];
|
|
||||||
int n_inputs;
|
|
||||||
struct ggml_cgraph * graph;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ggml_backend_sched {
|
|
||||||
int n_backends;
|
|
||||||
ggml_backend_t backends[GGML_MAX_BACKENDS];
|
|
||||||
ggml_tallocr_t tallocs[GGML_MAX_BACKENDS];
|
|
||||||
|
|
||||||
ggml_gallocr_t galloc;
|
|
||||||
|
|
||||||
struct ggml_hash_set hash_set;
|
|
||||||
ggml_tallocr_t * node_talloc; // [hash_set.size]
|
|
||||||
struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // [hash_set.size][GGML_MAX_BACKENDS]
|
|
||||||
|
|
||||||
struct ggml_cgraph * graph;
|
|
||||||
struct ggml_backend_sched_split splits[GGML_MAX_SPLITS];
|
|
||||||
int n_splits;
|
|
||||||
|
|
||||||
struct ggml_context * ctx;
|
|
||||||
|
|
||||||
// align context_buffer to GGML_MEM_ALIGN
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
__declspec(align(GGML_MEM_ALIGN))
|
|
||||||
#else
|
|
||||||
__attribute__((aligned(GGML_MEM_ALIGN)))
|
|
||||||
#endif
|
|
||||||
char context_buffer[GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS*sizeof(struct ggml_tensor) + GGML_MAX_SPLITS*sizeof(struct ggml_cgraph)];
|
|
||||||
};
|
|
||||||
|
|
||||||
#define hash_id(node) ggml_hash_find_or_insert(sched->hash_set, node)
|
|
||||||
#define node_allocr(node) sched->node_talloc[hash_id(node)]
|
|
||||||
|
|
||||||
static bool ggml_is_view_op(enum ggml_op op) {
|
|
||||||
return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns the priority of the backend, lower is better
|
|
||||||
static int sched_backend_prio(ggml_backend_sched_t sched, ggml_backend_t backend) {
|
|
||||||
for (int i = 0; i < sched->n_backends; i++) {
|
|
||||||
if (sched->backends[i] == backend) {
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return INT_MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sched_allocr_prio(ggml_backend_sched_t sched, ggml_tallocr_t allocr) {
|
|
||||||
for (int i = 0; i < sched->n_backends; i++) {
|
|
||||||
if (sched->tallocs[i] == allocr) {
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return INT_MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns the backend that should be used for the node based on the current locations
|
|
||||||
char causes[GGML_DEFAULT_GRAPH_SIZE*4 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove
|
|
||||||
static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) {
|
|
||||||
// if the dst tensor is already allocated in a buffer, we must assume that it is critical to keep it there
|
|
||||||
// ie. kv cache updates
|
|
||||||
// note that this doesn't allow fallback to CPU. need to add output tensors to the splits to copy the data back to the original backend.
|
|
||||||
// dst
|
|
||||||
ggml_backend_t cur_backend = ggml_get_backend(node);
|
|
||||||
if (cur_backend != NULL) {
|
|
||||||
sprintf(causes[hash_id(node)], "1.dst");
|
|
||||||
return cur_backend;
|
|
||||||
}
|
|
||||||
|
|
||||||
// view_src
|
|
||||||
if (node->view_src != NULL && ggml_get_backend(node->view_src) != NULL) {
|
|
||||||
sprintf(causes[hash_id(node)], "1.vsrc");
|
|
||||||
return ggml_get_backend(node->view_src);
|
|
||||||
}
|
|
||||||
|
|
||||||
// src
|
|
||||||
int cur_prio = INT_MAX;
|
|
||||||
size_t cur_size = 0;
|
|
||||||
|
|
||||||
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
|
||||||
const struct ggml_tensor * src = node->src[i];
|
|
||||||
if (src == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
ggml_backend_t src_backend = ggml_get_backend(src);
|
|
||||||
if (src_backend != NULL) {
|
|
||||||
int src_prio = sched_backend_prio(sched, src_backend);
|
|
||||||
size_t src_size = ggml_nbytes(src);
|
|
||||||
if (src_prio < cur_prio && src_size >= cur_size) {
|
|
||||||
cur_prio = src_prio;
|
|
||||||
cur_size = src_size;
|
|
||||||
cur_backend = src_backend;
|
|
||||||
sprintf(causes[hash_id(node)], "1.src%d", i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cur_backend;
|
|
||||||
}
|
|
||||||
|
|
||||||
static char * fmt_size(size_t size) {
|
|
||||||
static char buffer[128];
|
|
||||||
if (size >= 1024*1024) {
|
|
||||||
sprintf(buffer, "%zuM", size/1024/1024);
|
|
||||||
} else {
|
|
||||||
sprintf(buffer, "%zuK", size/1024);
|
|
||||||
}
|
|
||||||
return buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
|
|
||||||
int cur_split = 0;
|
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
|
||||||
if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
|
|
||||||
ggml_backend_t split_backend = ggml_tallocr_get_buffer(sched->splits[cur_split].tallocr)->backend;
|
|
||||||
fprintf(stderr, "\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend), sched->splits[cur_split].n_inputs);
|
|
||||||
for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
|
|
||||||
fprintf(stderr, "[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name, fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
|
|
||||||
}
|
|
||||||
fprintf(stderr, "\n");
|
|
||||||
cur_split++;
|
|
||||||
}
|
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
|
||||||
if (ggml_is_view_op(node->op)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ggml_tallocr_t node_allocr = node_allocr(node);
|
|
||||||
ggml_backend_t node_backend = node_allocr ? ggml_tallocr_get_buffer(node_allocr)->backend : NULL;
|
|
||||||
fprintf(stderr, "node #%3d (%10.10s): %20.20s (%4.4s) [%4.4s %8.8s]:", i, ggml_op_name(node->op), node->name, fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", causes[hash_id(node)]);
|
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
|
||||||
struct ggml_tensor * src = node->src[j];
|
|
||||||
if (src == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
ggml_tallocr_t src_allocr = node_allocr(src);
|
|
||||||
ggml_backend_t src_backend = src_allocr ? ggml_tallocr_get_buffer(src_allocr)->backend : NULL;
|
|
||||||
fprintf(stderr, " %20.20s (%4.4s) [%4.4s %8.8s]", src->name, fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", causes[hash_id(src)]);
|
|
||||||
}
|
|
||||||
fprintf(stderr, "\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// creates a copy of the tensor with the same memory layout
|
|
||||||
static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
|
|
||||||
struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
|
|
||||||
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
|
||||||
dup->nb[i] = tensor->nb[i];
|
|
||||||
}
|
|
||||||
return dup;
|
|
||||||
}
|
|
||||||
|
|
||||||
// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
|
|
||||||
// TODO: merge passes
|
|
||||||
static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
|
|
||||||
// reset state
|
|
||||||
size_t hash_size = sched->hash_set.size;
|
|
||||||
memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size);
|
|
||||||
memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
|
|
||||||
memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);
|
|
||||||
sched->n_splits = 0;
|
|
||||||
|
|
||||||
struct ggml_init_params params = {
|
|
||||||
/*.mem_size = */ sizeof(sched->context_buffer),
|
|
||||||
/*.mem_buffer = */ sched->context_buffer,
|
|
||||||
/*.no_alloc = */ true
|
|
||||||
};
|
|
||||||
|
|
||||||
if (sched->ctx != NULL) {
|
|
||||||
ggml_free(sched->ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
sched->ctx = ggml_init(params);
|
|
||||||
|
|
||||||
// pass 1: assign backends to ops with allocated inputs
|
|
||||||
for (int i = 0; i < graph->n_leafs; i++) {
|
|
||||||
struct ggml_tensor * leaf = graph->leafs[i];
|
|
||||||
if (node_allocr(leaf) != NULL) {
|
|
||||||
// do not overwrite user assignments
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ggml_backend_t leaf_backend = ggml_get_backend(leaf);
|
|
||||||
if (leaf_backend == NULL && leaf->view_src != NULL) {
|
|
||||||
leaf_backend = ggml_get_backend(leaf->view_src);
|
|
||||||
}
|
|
||||||
if (leaf_backend != NULL) {
|
|
||||||
node_allocr(leaf) = ggml_backend_sched_get_tallocr(sched, leaf_backend);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
|
||||||
if (node_allocr(node) != NULL) {
|
|
||||||
// do not overwrite user assignments
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ggml_backend_t node_backend = sched_backend_from_cur(sched, node);
|
|
||||||
if (node_backend != NULL) {
|
|
||||||
node_allocr(node) = ggml_backend_sched_get_tallocr(sched, node_backend);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//printf("PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
|
||||||
|
|
||||||
// pass 2: assign backends to ops from current assignments
|
|
||||||
// TODO:
|
|
||||||
// - reuse sched_backend_from_cur
|
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
|
||||||
ggml_tallocr_t node_allocr = node_allocr(node);
|
|
||||||
if (node_allocr == NULL) {
|
|
||||||
int cur_prio = INT_MAX;
|
|
||||||
size_t cur_size = 0;
|
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
|
||||||
struct ggml_tensor * src = node->src[j];
|
|
||||||
if (src == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
ggml_tallocr_t src_allocr = node_allocr(src);
|
|
||||||
if (src_allocr != NULL) {
|
|
||||||
int src_prio = sched_allocr_prio(sched, src_allocr);
|
|
||||||
size_t src_size = ggml_nbytes(src);
|
|
||||||
if (src_prio < cur_prio && src_size >= cur_size) {
|
|
||||||
cur_prio = src_prio;
|
|
||||||
cur_size = src_size;
|
|
||||||
node_allocr = src_allocr;
|
|
||||||
sprintf(causes[hash_id(node)], "2.src%d", j);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (node_allocr != NULL) {
|
|
||||||
node_allocr(node) = node_allocr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//printf("PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
|
||||||
|
|
||||||
// pass 3: assign backends to remaining src from dst (should only be leafs)
|
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
|
||||||
ggml_tallocr_t node_allocr = node_allocr(node);
|
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
|
||||||
struct ggml_tensor * src = node->src[j];
|
|
||||||
if (src == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
ggml_tallocr_t src_allocr = node_allocr(src);
|
|
||||||
if (src_allocr == NULL) {
|
|
||||||
node_allocr(src) = node_allocr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//printf("PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
|
|
||||||
|
|
||||||
// pass 4: split graph, find tensors that need to be copied
|
|
||||||
// TODO:
|
|
||||||
// - when switching from a less preferred backend to a more preferred backend, check if it is possible to move the switch to an earlier point for the same cost
|
|
||||||
// find first backend
|
|
||||||
int cur_split = 0;
|
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
|
||||||
if (node->view_src == NULL) {
|
|
||||||
sched->splits[0].tallocr = node_allocr(node);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sched->splits[0].i_start = 0;
|
|
||||||
sched->splits[0].n_inputs = 0;
|
|
||||||
memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK
|
|
||||||
ggml_tallocr_t cur_allocr = sched->splits[0].tallocr;
|
|
||||||
size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr);
|
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
|
||||||
|
|
||||||
if (ggml_is_view_op(node->op)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tallocr_t node_allocr = node_allocr(node);
|
|
||||||
|
|
||||||
if (node_allocr != cur_allocr) {
|
|
||||||
sched->splits[cur_split].i_end = i;
|
|
||||||
cur_split++;
|
|
||||||
GGML_ASSERT(cur_split < GGML_MAX_SPLITS);
|
|
||||||
sched->splits[cur_split].tallocr = node_allocr;
|
|
||||||
sched->splits[cur_split].i_start = i;
|
|
||||||
sched->splits[cur_split].n_inputs = 0;
|
|
||||||
memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK
|
|
||||||
cur_allocr = node_allocr;
|
|
||||||
cur_backend_id = sched_allocr_prio(sched, cur_allocr);
|
|
||||||
}
|
|
||||||
|
|
||||||
// find inputs that are not on the same backend
|
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
|
||||||
struct ggml_tensor * src = node->src[j];
|
|
||||||
if (src == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
ggml_tallocr_t src_allocr = node_allocr(src);
|
|
||||||
if (src_allocr != node_allocr) {
|
|
||||||
int n_inputs = sched->splits[cur_split].n_inputs++;
|
|
||||||
GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
|
|
||||||
sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src;
|
|
||||||
|
|
||||||
// create copies
|
|
||||||
size_t id = hash_id(src);
|
|
||||||
if (sched->node_copies[id][cur_backend_id] == NULL) {
|
|
||||||
struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
|
|
||||||
sched->node_copies[id][cur_backend_id] = tensor_copy;
|
|
||||||
node_allocr(tensor_copy) = cur_allocr;
|
|
||||||
ggml_backend_t backend = ggml_tallocr_get_buffer(cur_allocr)->backend;
|
|
||||||
ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name);
|
|
||||||
}
|
|
||||||
node->src[j] = sched->node_copies[id][cur_backend_id];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sched->splits[cur_split].i_end = graph->n_nodes;
|
|
||||||
sched->n_splits = cur_split + 1;
|
|
||||||
|
|
||||||
//fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout);
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
// sanity check: all sources should have the same backend as the node
|
|
||||||
for (int i = 0; i < graph->n_nodes; i++) {
|
|
||||||
struct ggml_tensor * node = graph->nodes[i];
|
|
||||||
ggml_tallocr_t node_allocr = node_allocr(node);
|
|
||||||
if (node_allocr == NULL) {
|
|
||||||
fprintf(stderr, "!!!!!!! %s has no backend\n", node->name);
|
|
||||||
}
|
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
|
||||||
struct ggml_tensor * src = node->src[j];
|
|
||||||
if (src == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
ggml_tallocr_t src_allocr = node_allocr(src);
|
|
||||||
if (src_allocr != node_allocr /* && src_backend != NULL */) { // ignore nulls for now
|
|
||||||
fprintf(stderr, "!!!! %s has backend %s, src %d (%s) has backend %s\n",
|
|
||||||
node->name, node_allocr ? ggml_backend_name(ggml_tallocr_get_buffer(node_allocr)->backend) : "NULL",
|
|
||||||
j, src->name, src_allocr ? ggml_backend_name(ggml_tallocr_get_buffer(src_allocr)->backend) : "NULL");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// create copies of the graph for each split
|
|
||||||
// FIXME: avoid this copy, pass split inputs to ggml_gallocr_alloc_graph_n in some other way
|
|
||||||
struct ggml_cgraph * graph_copy = ggml_new_graph_custom(sched->ctx, graph->n_nodes + sched->n_splits*GGML_MAX_SPLIT_INPUTS, false);
|
|
||||||
for (int i = 0; i < sched->n_splits; i++) {
|
|
||||||
struct ggml_backend_sched_split * split = &sched->splits[i];
|
|
||||||
split->graph = ggml_graph_view(sched->ctx, graph, split->i_start, split->i_end);
|
|
||||||
|
|
||||||
// add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
|
|
||||||
for (int j = 0; j < split->n_inputs; j++) {
|
|
||||||
struct ggml_tensor * input = split->inputs[j];
|
|
||||||
struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)];
|
|
||||||
input_cpy->src[0] = input;
|
|
||||||
graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int j = split->i_start; j < split->i_end; j++) {
|
|
||||||
graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sched->graph = graph_copy;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sched_alloc_splits(ggml_backend_sched_t sched) {
|
|
||||||
ggml_gallocr_alloc_graph_n(
|
|
||||||
sched->galloc,
|
|
||||||
sched->graph,
|
|
||||||
sched->hash_set,
|
|
||||||
sched->node_talloc);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sched_compute_splits(ggml_backend_sched_t sched) {
|
|
||||||
uint64_t copy_us[GGML_MAX_BACKENDS] = {0};
|
|
||||||
uint64_t compute_us[GGML_MAX_BACKENDS] = {0};
|
|
||||||
|
|
||||||
struct ggml_backend_sched_split * splits = sched->splits;
|
|
||||||
|
|
||||||
for (int i = 0; i < sched->n_splits; i++) {
|
|
||||||
struct ggml_backend_sched_split * split = &splits[i];
|
|
||||||
ggml_backend_t split_backend = ggml_tallocr_get_buffer(split->tallocr)->backend;
|
|
||||||
int split_backend_id = sched_backend_prio(sched, split_backend);
|
|
||||||
|
|
||||||
// copy the input tensors to the split backend
|
|
||||||
uint64_t copy_start_us = ggml_time_us();
|
|
||||||
for (int j = 0; j < split->n_inputs; j++) {
|
|
||||||
struct ggml_tensor * input_cpy = sched->node_copies[hash_id(split->inputs[j])][sched_backend_prio(sched, split_backend)];
|
|
||||||
if (split->inputs[j]->buffer == NULL) {
|
|
||||||
if (split->inputs[j]->view_src == NULL) {
|
|
||||||
fprintf(stderr, "input %s has no buffer and no view_src\n", split->inputs[j]->name);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
struct ggml_tensor * view = split->inputs[j];
|
|
||||||
view->backend = view->view_src->backend;
|
|
||||||
view->buffer = view->view_src->buffer;
|
|
||||||
view->data = (char *)view->view_src->data + view->view_offs;
|
|
||||||
ggml_backend_buffer_init_tensor(ggml_backend_sched_get_buffer(sched, view->buffer->backend), view);
|
|
||||||
}
|
|
||||||
if (input_cpy->buffer == NULL) {
|
|
||||||
fprintf(stderr, "input_cpy %s has no buffer\n", input_cpy->name);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
GGML_ASSERT(split->inputs[j]->buffer->backend != input_cpy->buffer->backend);
|
|
||||||
GGML_ASSERT(input_cpy->buffer->backend == split_backend);
|
|
||||||
ggml_backend_tensor_copy(split->inputs[j], input_cpy);
|
|
||||||
}
|
|
||||||
// ggml_backend_synchronize(split_backend);
|
|
||||||
int64_t copy_end_us = ggml_time_us();
|
|
||||||
copy_us[split_backend_id] += copy_end_us - copy_start_us;
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
char split_filename[GGML_MAX_NAME];
|
|
||||||
snprintf(split_filename, GGML_MAX_NAME, "split_%i_%s.dot", i, ggml_backend_name(split_backend));
|
|
||||||
ggml_graph_dump_dot(split->graph, NULL, split_filename);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
uint64_t compute_start_us = ggml_time_us();
|
|
||||||
ggml_backend_graph_compute(split_backend, split->graph);
|
|
||||||
// ggml_backend_synchronize(split_backend);
|
|
||||||
uint64_t compute_end_us = ggml_time_us();
|
|
||||||
compute_us[split_backend_id] += compute_end_us - compute_start_us;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
// per-backend timings
|
|
||||||
fprintf(stderr, "sched_compute_splits times (%d splits):\n", sched->n_splits);
|
|
||||||
for (int i = 0; i < sched->n_backends; i++) {
|
|
||||||
if (copy_us[i] > 0 || compute_us[i] > 0) {
|
|
||||||
fprintf(stderr, "\t%5.5s: %lu us copy, %lu us compute\n", ggml_backend_name(sched->backends[i]), copy_us[i], compute_us[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sched_reset(ggml_backend_sched_t sched) {
|
|
||||||
for (int i = 0; i < sched->n_backends; i++) {
|
|
||||||
ggml_tallocr_reset(sched->tallocs[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends) {
|
|
||||||
GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS);
|
|
||||||
|
|
||||||
struct ggml_backend_sched * sched = malloc(sizeof(struct ggml_backend_sched));
|
|
||||||
memset(sched, 0, sizeof(struct ggml_backend_sched));
|
|
||||||
|
|
||||||
fprintf(stderr, "ggml_backend_sched size: %lu KB\n", sizeof(struct ggml_backend_sched)/1024);
|
|
||||||
|
|
||||||
sched->n_backends = n_backends;
|
|
||||||
for (int i = 0; i < n_backends; i++) {
|
|
||||||
sched->backends[i] = backends[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
sched->galloc = ggml_gallocr_new();
|
|
||||||
|
|
||||||
// init measure allocs for each backend
|
|
||||||
for (int i = 0; i < n_backends; i++) {
|
|
||||||
sched->tallocs[i] = ggml_tallocr_new_measure_from_backend(backends[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
return sched;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_sched_free(ggml_backend_sched_t sched) {
|
|
||||||
if (sched == NULL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
for (int i = 0; i < sched->n_backends; i++) {
|
|
||||||
ggml_tallocr_free(sched->tallocs[i]);
|
|
||||||
}
|
|
||||||
ggml_gallocr_free(sched->galloc);
|
|
||||||
free(sched->hash_set.keys);
|
|
||||||
free(sched->node_talloc);
|
|
||||||
free(sched->node_copies);
|
|
||||||
free(sched);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
|
|
||||||
// initialize hash tables
|
|
||||||
size_t hash_size = measure_graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS;
|
|
||||||
sched->hash_set.size = hash_size;
|
|
||||||
sched->hash_set.keys = malloc(sizeof(sched->hash_set.keys[0]) * hash_size);
|
|
||||||
sched->node_talloc = malloc(sizeof(sched->node_talloc[0]) * hash_size);
|
|
||||||
sched->node_copies = malloc(sizeof(sched->node_copies[0]) * hash_size);
|
|
||||||
|
|
||||||
sched_split_graph(sched, measure_graph);
|
|
||||||
sched_alloc_splits(sched);
|
|
||||||
|
|
||||||
// allocate buffers and reset allocators
|
|
||||||
for (int i = 0; i < sched->n_backends; i++) {
|
|
||||||
size_t size = ggml_tallocr_max_size(sched->tallocs[i]);
|
|
||||||
ggml_tallocr_free(sched->tallocs[i]);
|
|
||||||
sched->tallocs[i] = ggml_tallocr_new_from_backend(sched->backends[i], size);
|
|
||||||
}
|
|
||||||
|
|
||||||
sched_reset(sched);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
|
|
||||||
GGML_ASSERT(sched->hash_set.size >= graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
|
|
||||||
|
|
||||||
sched_split_graph(sched, graph);
|
|
||||||
sched_alloc_splits(sched);
|
|
||||||
sched_compute_splits(sched);
|
|
||||||
sched_reset(sched);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) {
|
|
||||||
int backend_index = sched_backend_prio(sched, backend);
|
|
||||||
return sched->tallocs[backend_index];
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) {
|
|
||||||
int backend_index = sched_backend_prio(sched, backend);
|
|
||||||
return ggml_tallocr_get_buffer(sched->tallocs[backend_index]);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
|
|
||||||
int backend_index = sched_backend_prio(sched, backend);
|
|
||||||
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
|
|
||||||
node_allocr(node) = sched->tallocs[backend_index];
|
|
||||||
}
|
|
@ -1,136 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
#include "ggml-alloc.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend buffer
|
|
||||||
//
|
|
||||||
|
|
||||||
struct ggml_backend_buffer;
|
|
||||||
typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
|
|
||||||
|
|
||||||
// backend buffer functions
|
|
||||||
GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
|
||||||
GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
|
||||||
GGML_API void ggml_backend_buffer_free_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend
|
|
||||||
//
|
|
||||||
|
|
||||||
struct ggml_backend;
|
|
||||||
typedef struct ggml_backend * ggml_backend_t;
|
|
||||||
typedef void * ggml_backend_graph_plan_t;
|
|
||||||
|
|
||||||
GGML_API ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor);
|
|
||||||
|
|
||||||
GGML_API const char * ggml_backend_name(ggml_backend_t backend);
|
|
||||||
GGML_API void ggml_backend_free(ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size);
|
|
||||||
|
|
||||||
GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_tensor_set_async( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
||||||
GGML_API void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
||||||
GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
GGML_API void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
GGML_API void ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
|
||||||
GGML_API bool ggml_backend_supports_op (ggml_backend_t backend, const struct ggml_tensor * op);
|
|
||||||
|
|
||||||
// tensor copy between different backends
|
|
||||||
GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
|
|
||||||
|
|
||||||
//
|
|
||||||
// CPU backend
|
|
||||||
//
|
|
||||||
|
|
||||||
GGML_API ggml_backend_t ggml_backend_cpu_init(void);
|
|
||||||
|
|
||||||
GGML_API bool ggml_backend_is_cpu(ggml_backend_t backend);
|
|
||||||
GGML_API void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads);
|
|
||||||
|
|
||||||
// Create a backend buffer from an existing pointer
|
|
||||||
GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size);
|
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend scheduler
|
|
||||||
//
|
|
||||||
|
|
||||||
// The backend scheduler allows for multiple backends to be used together
|
|
||||||
// Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
|
|
||||||
// The backends are selected based on:
|
|
||||||
// - the backend that supports the operation
|
|
||||||
// - the location of the pre-allocated tensors (e.g. the weights)
|
|
||||||
/*
|
|
||||||
Example usage:
|
|
||||||
|
|
||||||
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, num_backends);
|
|
||||||
// sched is initialized with measure allocators and cannot be used until allocated with a measure graph
|
|
||||||
|
|
||||||
// initialize buffers from a measure graph
|
|
||||||
measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed
|
|
||||||
|
|
||||||
// in build_graph:
|
|
||||||
build_graph(...) {
|
|
||||||
// allocating tensors in a specific backend (optional, recommended: pre-allocate inputs in a different buffer)
|
|
||||||
alloc_cpu = ggml_backend_sched_get_allocr(sched, backend_cpu);
|
|
||||||
ggml_allocr_alloc(alloc_cpu, tensor);
|
|
||||||
|
|
||||||
// manually assigning nodes to a backend (optional, shouldn't be needed in most cases)
|
|
||||||
struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
|
|
||||||
ggml_backend_sched_set_node_backend(sched, node, backend_gpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocate backend buffers from measure graph
|
|
||||||
ggml_backend_sched_init_measure(sched, measure_graph);
|
|
||||||
|
|
||||||
// the scheduler is now ready to compute graphs
|
|
||||||
|
|
||||||
// compute
|
|
||||||
graph = build_graph(sched);
|
|
||||||
ggml_backend_sched_graph_compute(sched, graph);
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct ggml_backend_sched;
|
|
||||||
typedef struct ggml_backend_sched * ggml_backend_sched_t;
|
|
||||||
|
|
||||||
// Initialize a backend scheduler
|
|
||||||
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
|
||||||
|
|
||||||
// Initialize backend buffers from a measure graph
|
|
||||||
GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
|
|
||||||
|
|
||||||
GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend);
|
|
||||||
GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
|
|
||||||
|
|
||||||
// Allocate a graph on the backend scheduler
|
|
||||||
GGML_API void ggml_backend_sched_graph_compute(
|
|
||||||
ggml_backend_sched_t sched,
|
|
||||||
struct ggml_cgraph * graph);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,249 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
|
|
||||||
// GGML internal header
|
|
||||||
|
|
||||||
#include <assert.h>
|
|
||||||
#include <stddef.h>
|
|
||||||
#include <stdbool.h>
|
|
||||||
#include <string.h> // memcpy
|
|
||||||
#include <math.h> // fabsf
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// static_assert should be a #define, but if it's not,
|
|
||||||
// fall back to the _Static_assert C11 keyword.
|
|
||||||
// if C99 - static_assert is noop
|
|
||||||
// ref: https://stackoverflow.com/a/53923785/4039976
|
|
||||||
#ifndef static_assert
|
|
||||||
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
|
||||||
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
|
||||||
#else
|
|
||||||
#define static_assert(cond, msg) struct global_scope_noop_trick
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
|
|
||||||
#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
|
|
||||||
#ifndef __FMA__
|
|
||||||
#define __FMA__
|
|
||||||
#endif
|
|
||||||
#ifndef __F16C__
|
|
||||||
#define __F16C__
|
|
||||||
#endif
|
|
||||||
#ifndef __SSE3__
|
|
||||||
#define __SSE3__
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#undef MIN
|
|
||||||
#undef MAX
|
|
||||||
|
|
||||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
||||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
||||||
|
|
||||||
// 16-bit float
|
|
||||||
// on Arm, we use __fp16
|
|
||||||
// on x86, we use uint16_t
|
|
||||||
#if defined(__ARM_NEON) && !defined(_MSC_VER)
|
|
||||||
|
|
||||||
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
|
||||||
//
|
|
||||||
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
|
||||||
//
|
|
||||||
#include <arm_neon.h>
|
|
||||||
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) (x)
|
|
||||||
|
|
||||||
#define GGML_FP16_TO_FP32(x) ((float) (x))
|
|
||||||
#define GGML_FP32_TO_FP16(x) (x)
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
#ifdef __wasm_simd128__
|
|
||||||
#include <wasm_simd128.h>
|
|
||||||
#else
|
|
||||||
#ifdef __POWER9_VECTOR__
|
|
||||||
#include <altivec.h>
|
|
||||||
#undef bool
|
|
||||||
#define bool _Bool
|
|
||||||
#else
|
|
||||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
|
||||||
#include <intrin.h>
|
|
||||||
#else
|
|
||||||
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
|
|
||||||
#if !defined(__riscv)
|
|
||||||
#include <immintrin.h>
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __riscv_v_intrinsic
|
|
||||||
#include <riscv_vector.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __F16C__
|
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
|
|
||||||
#else
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#elif defined(__POWER9_VECTOR__)
|
|
||||||
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
||||||
/* the inline asm below is about 12% faster than the lookup method */
|
|
||||||
#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
|
|
||||||
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
|
||||||
|
|
||||||
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
||||||
register float f;
|
|
||||||
register double d;
|
|
||||||
__asm__(
|
|
||||||
"mtfprd %0,%2\n"
|
|
||||||
"xscvhpdp %0,%0\n"
|
|
||||||
"frsp %1,%0\n" :
|
|
||||||
/* temp */ "=d"(d),
|
|
||||||
/* out */ "=f"(f):
|
|
||||||
/* in */ "r"(h));
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
||||||
register double d;
|
|
||||||
register ggml_fp16_t r;
|
|
||||||
__asm__( /* xscvdphp can work on double or single precision */
|
|
||||||
"xscvdphp %0,%2\n"
|
|
||||||
"mffprd %1,%0\n" :
|
|
||||||
/* temp */ "=d"(d),
|
|
||||||
/* out */ "=r"(r):
|
|
||||||
/* in */ "f"(f));
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
// FP16 <-> FP32
|
|
||||||
// ref: https://github.com/Maratyszcza/FP16
|
|
||||||
|
|
||||||
static inline float fp32_from_bits(uint32_t w) {
|
|
||||||
union {
|
|
||||||
uint32_t as_bits;
|
|
||||||
float as_value;
|
|
||||||
} fp32;
|
|
||||||
fp32.as_bits = w;
|
|
||||||
return fp32.as_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline uint32_t fp32_to_bits(float f) {
|
|
||||||
union {
|
|
||||||
float as_value;
|
|
||||||
uint32_t as_bits;
|
|
||||||
} fp32;
|
|
||||||
fp32.as_value = f;
|
|
||||||
return fp32.as_bits;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
||||||
const uint32_t w = (uint32_t) h << 16;
|
|
||||||
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
||||||
const uint32_t two_w = w + w;
|
|
||||||
|
|
||||||
const uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
|
||||||
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
|
||||||
const float exp_scale = 0x1.0p-112f;
|
|
||||||
#else
|
|
||||||
const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
|
|
||||||
#endif
|
|
||||||
const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
|
||||||
|
|
||||||
const uint32_t magic_mask = UINT32_C(126) << 23;
|
|
||||||
const float magic_bias = 0.5f;
|
|
||||||
const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
|
||||||
|
|
||||||
const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
|
||||||
const uint32_t result = sign |
|
|
||||||
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
|
|
||||||
return fp32_from_bits(result);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
||||||
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
|
||||||
const float scale_to_inf = 0x1.0p+112f;
|
|
||||||
const float scale_to_zero = 0x1.0p-110f;
|
|
||||||
#else
|
|
||||||
const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
|
|
||||||
const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
|
|
||||||
#endif
|
|
||||||
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
|
||||||
|
|
||||||
const uint32_t w = fp32_to_bits(f);
|
|
||||||
const uint32_t shl1_w = w + w;
|
|
||||||
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
||||||
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
|
||||||
if (bias < UINT32_C(0x71000000)) {
|
|
||||||
bias = UINT32_C(0x71000000);
|
|
||||||
}
|
|
||||||
|
|
||||||
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
|
||||||
const uint32_t bits = fp32_to_bits(base);
|
|
||||||
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
|
||||||
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
|
||||||
const uint32_t nonsign = exp_bits + mantissa_bits;
|
|
||||||
return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
||||||
|
|
||||||
#endif // __F16C__
|
|
||||||
|
|
||||||
#endif // __ARM_NEON
|
|
||||||
|
|
||||||
// precomputed f32 table for f16 (256 KB)
|
|
||||||
// defined in ggml.c, initialized in ggml_init()
|
|
||||||
extern float ggml_table_f32_f16[1 << 16];
|
|
||||||
|
|
||||||
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
|
|
||||||
// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
|
|
||||||
// This is also true for POWER9.
|
|
||||||
#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
|
|
||||||
|
|
||||||
inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
|
|
||||||
uint16_t s;
|
|
||||||
memcpy(&s, &f, sizeof(uint16_t));
|
|
||||||
return ggml_table_f32_f16[s];
|
|
||||||
}
|
|
||||||
|
|
||||||
#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
|
|
||||||
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define GGML_HASHTABLE_FULL ((size_t)-1)
|
|
||||||
#define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
|
|
||||||
|
|
||||||
bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
|
||||||
|
|
||||||
// returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
|
||||||
size_t ggml_hash_find (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
|
||||||
|
|
||||||
// returns GGML_HAHSHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
|
||||||
size_t ggml_hash_insert ( struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
|
||||||
|
|
||||||
// return index, asserts if table is full
|
|
||||||
size_t ggml_hash_find_or_insert( struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
File diff suppressed because it is too large
Load Diff
@ -1,224 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml-impl.h"
|
|
||||||
|
|
||||||
// GGML internal header
|
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <stddef.h>
|
|
||||||
|
|
||||||
#define QK4_0 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
uint8_t qs[QK4_0 / 2]; // nibbles / quants
|
|
||||||
} block_q4_0;
|
|
||||||
static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
|
|
||||||
|
|
||||||
#define QK4_1 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
ggml_fp16_t m; // min
|
|
||||||
uint8_t qs[QK4_1 / 2]; // nibbles / quants
|
|
||||||
} block_q4_1;
|
|
||||||
static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
|
|
||||||
|
|
||||||
#define QK5_0 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
uint8_t qh[4]; // 5-th bit of quants
|
|
||||||
uint8_t qs[QK5_0 / 2]; // nibbles / quants
|
|
||||||
} block_q5_0;
|
|
||||||
static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
|
|
||||||
|
|
||||||
#define QK5_1 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
ggml_fp16_t m; // min
|
|
||||||
uint8_t qh[4]; // 5-th bit of quants
|
|
||||||
uint8_t qs[QK5_1 / 2]; // nibbles / quants
|
|
||||||
} block_q5_1;
|
|
||||||
static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
|
|
||||||
|
|
||||||
#define QK8_0 32
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // delta
|
|
||||||
int8_t qs[QK8_0]; // quants
|
|
||||||
} block_q8_0;
|
|
||||||
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
|
|
||||||
|
|
||||||
#define QK8_1 32
|
|
||||||
typedef struct {
|
|
||||||
float d; // delta
|
|
||||||
float s; // d * sum(qs[i])
|
|
||||||
int8_t qs[QK8_1]; // quants
|
|
||||||
} block_q8_1;
|
|
||||||
static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
|
|
||||||
|
|
||||||
//
|
|
||||||
// Super-block quantization structures
|
|
||||||
//
|
|
||||||
|
|
||||||
// Super-block size
|
|
||||||
#ifdef GGML_QKK_64
|
|
||||||
#define QK_K 64
|
|
||||||
#define K_SCALE_SIZE 4
|
|
||||||
#else
|
|
||||||
#define QK_K 256
|
|
||||||
#define K_SCALE_SIZE 12
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 2-bit quantization
|
|
||||||
// weight is represented as x = a * q + b
|
|
||||||
// 16 blocks of 16 elements each
|
|
||||||
// Effectively 2.5625 bits per weight
|
|
||||||
typedef struct {
|
|
||||||
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
|
||||||
uint8_t qs[QK_K/4]; // quants
|
|
||||||
ggml_fp16_t d; // super-block scale for quantized scales
|
|
||||||
ggml_fp16_t dmin; // super-block scale for quantized mins
|
|
||||||
} block_q2_K;
|
|
||||||
static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
|
|
||||||
|
|
||||||
// 3-bit quantization
|
|
||||||
// weight is represented as x = a * q
|
|
||||||
// 16 blocks of 16 elements each
|
|
||||||
// Effectively 3.4375 bits per weight
|
|
||||||
#ifdef GGML_QKK_64
|
|
||||||
typedef struct {
|
|
||||||
uint8_t hmask[QK_K/8]; // quants - high bit
|
|
||||||
uint8_t qs[QK_K/4]; // quants - low 2 bits
|
|
||||||
uint8_t scales[2];
|
|
||||||
ggml_fp16_t d; // super-block scale
|
|
||||||
} block_q3_K;
|
|
||||||
static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding");
|
|
||||||
#else
|
|
||||||
typedef struct {
|
|
||||||
uint8_t hmask[QK_K/8]; // quants - high bit
|
|
||||||
uint8_t qs[QK_K/4]; // quants - low 2 bits
|
|
||||||
uint8_t scales[12]; // scales, quantized with 6 bits
|
|
||||||
ggml_fp16_t d; // super-block scale
|
|
||||||
} block_q3_K;
|
|
||||||
static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 4-bit quantization
|
|
||||||
// 8 blocks of 32 elements each
|
|
||||||
// weight is represented as x = a * q + b
|
|
||||||
// Effectively 4.5 bits per weight
|
|
||||||
#ifdef GGML_QKK_64
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d[2]; // super-block scales/mins
|
|
||||||
uint8_t scales[2]; // 4-bit block scales/mins
|
|
||||||
uint8_t qs[QK_K/2]; // 4--bit quants
|
|
||||||
} block_q4_K;
|
|
||||||
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + QK_K/2 + 2, "wrong q4_K block size/padding");
|
|
||||||
#else
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // super-block scale for quantized scales
|
|
||||||
ggml_fp16_t dmin; // super-block scale for quantized mins
|
|
||||||
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
|
||||||
uint8_t qs[QK_K/2]; // 4--bit quants
|
|
||||||
} block_q4_K;
|
|
||||||
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 5-bit quantization
|
|
||||||
// 8 blocks of 32 elements each
|
|
||||||
// weight is represented as x = a * q + b
|
|
||||||
// Effectively 5.5 bits per weight
|
|
||||||
#ifdef GGML_QKK_64
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // super-block scale
|
|
||||||
int8_t scales[QK_K/16]; // 8-bit block scales
|
|
||||||
uint8_t qh[QK_K/8]; // quants, high bit
|
|
||||||
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
|
||||||
} block_q5_K;
|
|
||||||
static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
|
|
||||||
#else
|
|
||||||
typedef struct {
|
|
||||||
ggml_fp16_t d; // super-block scale for quantized scales
|
|
||||||
ggml_fp16_t dmin; // super-block scale for quantized mins
|
|
||||||
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
|
||||||
uint8_t qh[QK_K/8]; // quants, high bit
|
|
||||||
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
|
||||||
} block_q5_K;
|
|
||||||
static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 6-bit quantization
|
|
||||||
// weight is represented as x = a * q
|
|
||||||
// 16 blocks of 16 elements each
|
|
||||||
// Effectively 6.5625 bits per weight
|
|
||||||
typedef struct {
|
|
||||||
uint8_t ql[QK_K/2]; // quants, lower 4 bits
|
|
||||||
uint8_t qh[QK_K/4]; // quants, upper 2 bits
|
|
||||||
int8_t scales[QK_K/16]; // scales, quantized with 8 bits
|
|
||||||
ggml_fp16_t d; // super-block scale
|
|
||||||
} block_q6_K;
|
|
||||||
static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding");
|
|
||||||
|
|
||||||
// This is only used for intermediate quantization and dot products
|
|
||||||
typedef struct {
|
|
||||||
float d; // delta
|
|
||||||
int8_t qs[QK_K]; // quants
|
|
||||||
int16_t bsums[QK_K/16]; // sum of quants in groups of 16
|
|
||||||
} block_q8_K;
|
|
||||||
static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
|
|
||||||
|
|
||||||
|
|
||||||
// Quantization
|
|
||||||
void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k);
|
|
||||||
void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k);
|
|
||||||
void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k);
|
|
||||||
void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k);
|
|
||||||
void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k);
|
|
||||||
void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k);
|
|
||||||
|
|
||||||
void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k);
|
|
||||||
void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k);
|
|
||||||
void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k);
|
|
||||||
void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);
|
|
||||||
void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);
|
|
||||||
void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);
|
|
||||||
|
|
||||||
void quantize_row_q4_0(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q4_1(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q5_0(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q5_1(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q8_0(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q8_1(const float * restrict x, void * restrict y, int k);
|
|
||||||
|
|
||||||
void quantize_row_q2_K(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q3_K(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q4_K(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);
|
|
||||||
void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);
|
|
||||||
|
|
||||||
// Dequantization
|
|
||||||
void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k);
|
|
||||||
void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k);
|
|
||||||
void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k);
|
|
||||||
void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k);
|
|
||||||
void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k);
|
|
||||||
//void dequantize_row_q8_1(const block_q8_1 * restrict x, float * restrict y, int k);
|
|
||||||
|
|
||||||
void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k);
|
|
||||||
void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k);
|
|
||||||
void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k);
|
|
||||||
void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);
|
|
||||||
void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);
|
|
||||||
void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);
|
|
||||||
|
|
||||||
// Dot product
|
|
||||||
void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
||||||
void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
||||||
void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
||||||
void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
||||||
void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
||||||
|
|
||||||
void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
||||||
void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
||||||
void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
||||||
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
||||||
void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
14
bindings/ruby/ext/metal-embed.mk
Normal file
14
bindings/ruby/ext/metal-embed.mk
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
ggml-metal-embed.o: \
|
||||||
|
ggml-metal.metal \
|
||||||
|
ggml-common.h
|
||||||
|
@echo "Embedding Metal library"
|
||||||
|
@sed -e '/#include "ggml-common.h"/r ggml-common.h' -e '/#include "ggml-common.h"/d' < ggml-metal.metal > ggml-metal-embed.metal
|
||||||
|
$(eval TEMP_ASSEMBLY=$(shell mktemp))
|
||||||
|
@echo ".section __DATA, __ggml_metallib" > $(TEMP_ASSEMBLY)
|
||||||
|
@echo ".globl _ggml_metallib_start" >> $(TEMP_ASSEMBLY)
|
||||||
|
@echo "_ggml_metallib_start:" >> $(TEMP_ASSEMBLY)
|
||||||
|
@echo ".incbin \"ggml-metal-embed.metal\"" >> $(TEMP_ASSEMBLY)
|
||||||
|
@echo ".globl _ggml_metallib_end" >> $(TEMP_ASSEMBLY)
|
||||||
|
@echo "_ggml_metallib_end:" >> $(TEMP_ASSEMBLY)
|
||||||
|
@$(AS) $(TEMP_ASSEMBLY) -o $@
|
||||||
|
@rm -f ${TEMP_ASSEMBLY}
|
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,13 @@
|
|||||||
|
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
VALUE *context;
|
||||||
|
VALUE user_data;
|
||||||
|
VALUE callback;
|
||||||
|
VALUE callbacks;
|
||||||
|
} ruby_whisper_callback_container;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
struct whisper_context *context;
|
struct whisper_context *context;
|
||||||
} ruby_whisper;
|
} ruby_whisper;
|
||||||
@ -10,6 +17,9 @@ typedef struct {
|
|||||||
typedef struct {
|
typedef struct {
|
||||||
struct whisper_full_params params;
|
struct whisper_full_params params;
|
||||||
bool diarize;
|
bool diarize;
|
||||||
|
ruby_whisper_callback_container *new_segment_callback_container;
|
||||||
|
ruby_whisper_callback_container *progress_callback_container;
|
||||||
|
ruby_whisper_callback_container *abort_callback_container;
|
||||||
} ruby_whisper_params;
|
} ruby_whisper_params;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
31
bindings/ruby/extsources.yaml
Normal file
31
bindings/ruby/extsources.yaml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
- ../../src/whisper.cpp
|
||||||
|
- ../../include/whisper.h
|
||||||
|
- ../../ggml/src/ggml.c
|
||||||
|
- ../../ggml/src/ggml-cpu.c
|
||||||
|
- ../../ggml/src/ggml-impl.h
|
||||||
|
- ../../ggml/src/ggml-aarch64.h
|
||||||
|
- ../../ggml/src/ggml-aarch64.c
|
||||||
|
- ../../ggml/src/ggml-alloc.c
|
||||||
|
- ../../ggml/src/ggml-backend-impl.h
|
||||||
|
- ../../ggml/src/ggml-backend.cpp
|
||||||
|
- ../../ggml/src/ggml-common.h
|
||||||
|
- ../../ggml/src/ggml-quants.h
|
||||||
|
- ../../ggml/src/ggml-quants.c
|
||||||
|
- ../../ggml/src/ggml-cpu-impl.h
|
||||||
|
- ../../ggml/src/ggml-metal.m
|
||||||
|
- ../../ggml/src/ggml-metal.metal
|
||||||
|
- ../../ggml/src/ggml-blas.cpp
|
||||||
|
- ../../ggml/include/ggml.h
|
||||||
|
- ../../ggml/include/ggml-alloc.h
|
||||||
|
- ../../ggml/include/ggml-backend.h
|
||||||
|
- ../../ggml/include/ggml-cpu.h
|
||||||
|
- ../../ggml/include/ggml-cuda.h
|
||||||
|
- ../../ggml/include/ggml-kompute.h
|
||||||
|
- ../../ggml/include/ggml-metal.h
|
||||||
|
- ../../ggml/include/ggml-sycl.h
|
||||||
|
- ../../ggml/include/ggml-vulkan.h
|
||||||
|
- ../../ggml/include/ggml-blas.h
|
||||||
|
- ../../scripts/get-flags.mk
|
||||||
|
- ../../examples/dr_wav.h
|
||||||
|
- ../../LICENSE
|
7
bindings/ruby/tests/helper.rb
Normal file
7
bindings/ruby/tests/helper.rb
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
require "test/unit"
|
||||||
|
require "whisper"
|
||||||
|
|
||||||
|
class TestBase < Test::Unit::TestCase
|
||||||
|
MODEL = File.join(__dir__, "..", "..", "..", "models", "ggml-base.en.bin")
|
||||||
|
AUDIO = File.join(__dir__, "..", "..", "..", "samples", "jfk.wav")
|
||||||
|
end
|
163
bindings/ruby/tests/test_callback.rb
Normal file
163
bindings/ruby/tests/test_callback.rb
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
require "test/unit"
|
||||||
|
require "whisper"
|
||||||
|
|
||||||
|
class TestCallback < Test::Unit::TestCase
|
||||||
|
TOPDIR = File.expand_path(File.join(File.dirname(__FILE__), '..'))
|
||||||
|
|
||||||
|
def setup
|
||||||
|
GC.start
|
||||||
|
@params = Whisper::Params.new
|
||||||
|
@whisper = Whisper::Context.new(File.join(TOPDIR, '..', '..', 'models', 'ggml-base.en.bin'))
|
||||||
|
@audio = File.join(TOPDIR, '..', '..', 'samples', 'jfk.wav')
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_segment_callback
|
||||||
|
@params.new_segment_callback = ->(context, state, n_new, user_data) {
|
||||||
|
assert_kind_of Integer, n_new
|
||||||
|
assert n_new > 0
|
||||||
|
assert_same @whisper, context
|
||||||
|
|
||||||
|
n_segments = context.full_n_segments
|
||||||
|
n_new.times do |i|
|
||||||
|
i_segment = n_segments - 1 + i
|
||||||
|
start_time = context.full_get_segment_t0(i_segment) * 10
|
||||||
|
end_time = context.full_get_segment_t1(i_segment) * 10
|
||||||
|
text = context.full_get_segment_text(i_segment)
|
||||||
|
|
||||||
|
assert_kind_of Integer, start_time
|
||||||
|
assert start_time >= 0
|
||||||
|
assert_kind_of Integer, end_time
|
||||||
|
assert end_time > 0
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, text if i_segment == 0
|
||||||
|
end
|
||||||
|
}
|
||||||
|
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_segment_callback_closure
|
||||||
|
search_word = "what"
|
||||||
|
@params.new_segment_callback = ->(context, state, n_new, user_data) {
|
||||||
|
n_segments = context.full_n_segments
|
||||||
|
n_new.times do |i|
|
||||||
|
i_segment = n_segments - 1 + i
|
||||||
|
text = context.full_get_segment_text(i_segment)
|
||||||
|
if text.include?(search_word)
|
||||||
|
t0 = context.full_get_segment_t0(i_segment)
|
||||||
|
t1 = context.full_get_segment_t1(i_segment)
|
||||||
|
raise "search word '#{search_word}' found at between #{t0} and #{t1}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_raise RuntimeError do
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_segment_callback_user_data
|
||||||
|
udata = Object.new
|
||||||
|
@params.new_segment_callback_user_data = udata
|
||||||
|
@params.new_segment_callback = ->(context, state, n_new, user_data) {
|
||||||
|
assert_same udata, user_data
|
||||||
|
}
|
||||||
|
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_segment_callback_user_data_gc
|
||||||
|
@params.new_segment_callback_user_data = "My user data"
|
||||||
|
@params.new_segment_callback = ->(context, state, n_new, user_data) {
|
||||||
|
assert_equal "My user data", user_data
|
||||||
|
}
|
||||||
|
GC.start
|
||||||
|
|
||||||
|
assert_same @whisper, @whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_progress_callback
|
||||||
|
first = nil
|
||||||
|
last = nil
|
||||||
|
@params.progress_callback = ->(context, state, progress, user_data) {
|
||||||
|
assert_kind_of Integer, progress
|
||||||
|
assert 0 <= progress && progress <= 100
|
||||||
|
assert_same @whisper, context
|
||||||
|
first = progress if first.nil?
|
||||||
|
last = progress
|
||||||
|
}
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert_equal 0, first
|
||||||
|
assert_equal 100, last
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_progress_callback_user_data
|
||||||
|
udata = Object.new
|
||||||
|
@params.progress_callback_user_data = udata
|
||||||
|
@params.progress_callback = ->(context, state, n_new, user_data) {
|
||||||
|
assert_same udata, user_data
|
||||||
|
}
|
||||||
|
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_on_progress
|
||||||
|
first = nil
|
||||||
|
last = nil
|
||||||
|
@params.on_progress do |progress|
|
||||||
|
assert_kind_of Integer, progress
|
||||||
|
assert 0 <= progress && progress <= 100
|
||||||
|
first = progress if first.nil?
|
||||||
|
last = progress
|
||||||
|
end
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert_equal 0, first
|
||||||
|
assert_equal 100, last
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_abort_callback
|
||||||
|
i = 0
|
||||||
|
@params.abort_callback = ->(user_data) {
|
||||||
|
assert_nil user_data
|
||||||
|
i += 1
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert i > 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_abort_callback_abort
|
||||||
|
i = 0
|
||||||
|
@params.abort_callback = ->(user_data) {
|
||||||
|
i += 1
|
||||||
|
return i == 3
|
||||||
|
}
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert_equal 3, i
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_abort_callback_user_data
|
||||||
|
udata = Object.new
|
||||||
|
@params.abort_callback_user_data = udata
|
||||||
|
yielded = nil
|
||||||
|
@params.abort_callback = ->(user_data) {
|
||||||
|
yielded = user_data
|
||||||
|
}
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert_same udata, yielded
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_abort_on
|
||||||
|
do_abort = false
|
||||||
|
aborted_from_callback = false
|
||||||
|
@params.on_new_segment do |segment|
|
||||||
|
do_abort = true if segment.text.match? /ask/
|
||||||
|
end
|
||||||
|
i = 0
|
||||||
|
@params.abort_on do
|
||||||
|
i += 1
|
||||||
|
do_abort
|
||||||
|
end
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert i > 0
|
||||||
|
end
|
||||||
|
end
|
44
bindings/ruby/tests/test_model.rb
Normal file
44
bindings/ruby/tests/test_model.rb
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
|
||||||
|
class TestModel < TestBase
|
||||||
|
def test_model
|
||||||
|
whisper = Whisper::Context.new(MODEL)
|
||||||
|
assert_instance_of Whisper::Model, whisper.model
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_attributes
|
||||||
|
whisper = Whisper::Context.new(MODEL)
|
||||||
|
model = whisper.model
|
||||||
|
|
||||||
|
assert_equal 51864, model.n_vocab
|
||||||
|
assert_equal 1500, model.n_audio_ctx
|
||||||
|
assert_equal 512, model.n_audio_state
|
||||||
|
assert_equal 8, model.n_audio_head
|
||||||
|
assert_equal 6, model.n_audio_layer
|
||||||
|
assert_equal 448, model.n_text_ctx
|
||||||
|
assert_equal 512, model.n_text_state
|
||||||
|
assert_equal 8, model.n_text_head
|
||||||
|
assert_equal 6, model.n_text_layer
|
||||||
|
assert_equal 80, model.n_mels
|
||||||
|
assert_equal 1, model.ftype
|
||||||
|
assert_equal "base", model.type
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_gc
|
||||||
|
model = Whisper::Context.new(MODEL).model
|
||||||
|
GC.start
|
||||||
|
|
||||||
|
assert_equal 51864, model.n_vocab
|
||||||
|
assert_equal 1500, model.n_audio_ctx
|
||||||
|
assert_equal 512, model.n_audio_state
|
||||||
|
assert_equal 8, model.n_audio_head
|
||||||
|
assert_equal 6, model.n_audio_layer
|
||||||
|
assert_equal 448, model.n_text_ctx
|
||||||
|
assert_equal 512, model.n_text_state
|
||||||
|
assert_equal 8, model.n_text_head
|
||||||
|
assert_equal 6, model.n_text_layer
|
||||||
|
assert_equal 80, model.n_mels
|
||||||
|
assert_equal 1, model.ftype
|
||||||
|
assert_equal "base", model.type
|
||||||
|
end
|
||||||
|
end
|
31
bindings/ruby/tests/test_package.rb
Normal file
31
bindings/ruby/tests/test_package.rb
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
require 'tempfile'
|
||||||
|
require 'tmpdir'
|
||||||
|
require 'shellwords'
|
||||||
|
|
||||||
|
class TestPackage < TestBase
|
||||||
|
def test_build
|
||||||
|
Tempfile.create do |file|
|
||||||
|
assert system("gem", "build", "whispercpp.gemspec", "--output", file.to_path.shellescape, exception: true)
|
||||||
|
assert file.size > 0
|
||||||
|
assert_path_exist file.to_path
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
sub_test_case "Building binary on installation" do
|
||||||
|
def setup
|
||||||
|
system "rake", "build", exception: true
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_install
|
||||||
|
match_data = `rake -Tbuild`.match(/(whispercpp-(.+)\.gem)/)
|
||||||
|
filename = match_data[1]
|
||||||
|
version = match_data[2]
|
||||||
|
basename = "whisper.#{RbConfig::CONFIG["DLEXT"]}"
|
||||||
|
Dir.mktmpdir do |dir|
|
||||||
|
system "gem", "install", "--install-dir", dir.shellescape, "pkg/#{filename.shellescape}", exception: true
|
||||||
|
assert_path_exist File.join(dir, "gems/whispercpp-#{version}/lib", basename)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
154
bindings/ruby/tests/test_params.rb
Normal file
154
bindings/ruby/tests/test_params.rb
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
|
||||||
|
class TestParams < TestBase
|
||||||
|
def setup
|
||||||
|
@params = Whisper::Params.new
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_language
|
||||||
|
@params.language = "en"
|
||||||
|
assert_equal @params.language, "en"
|
||||||
|
@params.language = "auto"
|
||||||
|
assert_equal @params.language, "auto"
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_offset
|
||||||
|
@params.offset = 10_000
|
||||||
|
assert_equal @params.offset, 10_000
|
||||||
|
@params.offset = 0
|
||||||
|
assert_equal @params.offset, 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_duration
|
||||||
|
@params.duration = 60_000
|
||||||
|
assert_equal @params.duration, 60_000
|
||||||
|
@params.duration = 0
|
||||||
|
assert_equal @params.duration, 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_max_text_tokens
|
||||||
|
@params.max_text_tokens = 300
|
||||||
|
assert_equal @params.max_text_tokens, 300
|
||||||
|
@params.max_text_tokens = 0
|
||||||
|
assert_equal @params.max_text_tokens, 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_translate
|
||||||
|
@params.translate = true
|
||||||
|
assert @params.translate
|
||||||
|
@params.translate = false
|
||||||
|
assert !@params.translate
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_no_context
|
||||||
|
@params.no_context = true
|
||||||
|
assert @params.no_context
|
||||||
|
@params.no_context = false
|
||||||
|
assert !@params.no_context
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_single_segment
|
||||||
|
@params.single_segment = true
|
||||||
|
assert @params.single_segment
|
||||||
|
@params.single_segment = false
|
||||||
|
assert !@params.single_segment
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_special
|
||||||
|
@params.print_special = true
|
||||||
|
assert @params.print_special
|
||||||
|
@params.print_special = false
|
||||||
|
assert !@params.print_special
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_progress
|
||||||
|
@params.print_progress = true
|
||||||
|
assert @params.print_progress
|
||||||
|
@params.print_progress = false
|
||||||
|
assert !@params.print_progress
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_realtime
|
||||||
|
@params.print_realtime = true
|
||||||
|
assert @params.print_realtime
|
||||||
|
@params.print_realtime = false
|
||||||
|
assert !@params.print_realtime
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_timestamps
|
||||||
|
@params.print_timestamps = true
|
||||||
|
assert @params.print_timestamps
|
||||||
|
@params.print_timestamps = false
|
||||||
|
assert !@params.print_timestamps
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_suppress_blank
|
||||||
|
@params.suppress_blank = true
|
||||||
|
assert @params.suppress_blank
|
||||||
|
@params.suppress_blank = false
|
||||||
|
assert !@params.suppress_blank
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_suppress_non_speech_tokens
|
||||||
|
@params.suppress_non_speech_tokens = true
|
||||||
|
assert @params.suppress_non_speech_tokens
|
||||||
|
@params.suppress_non_speech_tokens = false
|
||||||
|
assert !@params.suppress_non_speech_tokens
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_token_timestamps
|
||||||
|
@params.token_timestamps = true
|
||||||
|
assert @params.token_timestamps
|
||||||
|
@params.token_timestamps = false
|
||||||
|
assert !@params.token_timestamps
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_split_on_word
|
||||||
|
@params.split_on_word = true
|
||||||
|
assert @params.split_on_word
|
||||||
|
@params.split_on_word = false
|
||||||
|
assert !@params.split_on_word
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_initial_prompt
|
||||||
|
assert_nil @params.initial_prompt
|
||||||
|
@params.initial_prompt = "You are a polite person."
|
||||||
|
assert_equal "You are a polite person.", @params.initial_prompt
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_temperature
|
||||||
|
assert_equal 0.0, @params.temperature
|
||||||
|
@params.temperature = 0.5
|
||||||
|
assert_equal 0.5, @params.temperature
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_max_initial_ts
|
||||||
|
assert_equal 1.0, @params.max_initial_ts
|
||||||
|
@params.max_initial_ts = 600.0
|
||||||
|
assert_equal 600.0, @params.max_initial_ts
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_length_penalty
|
||||||
|
assert_equal -1.0, @params.length_penalty
|
||||||
|
@params.length_penalty = 0.5
|
||||||
|
assert_equal 0.5, @params.length_penalty
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_temperature_inc
|
||||||
|
assert_in_delta 0.2, @params.temperature_inc
|
||||||
|
@params.temperature_inc = 0.5
|
||||||
|
assert_in_delta 0.5, @params.temperature_inc
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_entropy_thold
|
||||||
|
assert_in_delta 2.4, @params.entropy_thold
|
||||||
|
@params.entropy_thold = 3.0
|
||||||
|
assert_in_delta 3.0, @params.entropy_thold
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_logprob_thold
|
||||||
|
assert_in_delta -1.0, @params.logprob_thold
|
||||||
|
@params.logprob_thold = -0.5
|
||||||
|
assert_in_delta -0.5, @params.logprob_thold
|
||||||
|
end
|
||||||
|
end
|
83
bindings/ruby/tests/test_segment.rb
Normal file
83
bindings/ruby/tests/test_segment.rb
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
|
||||||
|
class TestSegment < TestBase
|
||||||
|
class << self
|
||||||
|
attr_reader :whisper
|
||||||
|
|
||||||
|
def startup
|
||||||
|
@whisper = Whisper::Context.new(TestBase::MODEL)
|
||||||
|
params = Whisper::Params.new
|
||||||
|
params.print_timestamps = false
|
||||||
|
@whisper.transcribe(TestBase::AUDIO, params)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_iteration
|
||||||
|
whisper.each_segment do |segment|
|
||||||
|
assert_instance_of Whisper::Segment, segment
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_enumerator
|
||||||
|
enum = whisper.each_segment
|
||||||
|
assert_instance_of Enumerator, enum
|
||||||
|
enum.to_a.each_with_index do |segment, index|
|
||||||
|
assert_instance_of Whisper::Segment, segment
|
||||||
|
assert_kind_of Integer, index
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_start_time
|
||||||
|
i = 0
|
||||||
|
whisper.each_segment do |segment|
|
||||||
|
assert_equal 0, segment.start_time if i == 0
|
||||||
|
i += 1
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_end_time
|
||||||
|
i = 0
|
||||||
|
whisper.each_segment do |segment|
|
||||||
|
assert_equal whisper.full_get_segment_t1(i) * 10, segment.end_time
|
||||||
|
i += 1
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_on_new_segment
|
||||||
|
params = Whisper::Params.new
|
||||||
|
seg = nil
|
||||||
|
index = 0
|
||||||
|
params.on_new_segment do |segment|
|
||||||
|
assert_instance_of Whisper::Segment, segment
|
||||||
|
if index == 0
|
||||||
|
seg = segment
|
||||||
|
assert_equal 0, segment.start_time
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, segment.text
|
||||||
|
end
|
||||||
|
index += 1
|
||||||
|
end
|
||||||
|
whisper.transcribe(AUDIO, params)
|
||||||
|
assert_equal 0, seg.start_time
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, seg.text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_on_new_segment_twice
|
||||||
|
params = Whisper::Params.new
|
||||||
|
seg = nil
|
||||||
|
params.on_new_segment do |segment|
|
||||||
|
seg = segment
|
||||||
|
return
|
||||||
|
end
|
||||||
|
params.on_new_segment do |segment|
|
||||||
|
assert_same seg, segment
|
||||||
|
return
|
||||||
|
end
|
||||||
|
whisper.transcribe(AUDIO, params)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def whisper
|
||||||
|
self.class.whisper
|
||||||
|
end
|
||||||
|
end
|
@ -1,138 +1,127 @@
|
|||||||
TOPDIR = File.expand_path(File.join(File.dirname(__FILE__), '..'))
|
require_relative "helper"
|
||||||
EXTDIR = File.join(TOPDIR, 'ext')
|
require "stringio"
|
||||||
#$LIBDIR = File.join(TOPDIR, 'lib')
|
|
||||||
#$:.unshift(LIBDIR)
|
|
||||||
$:.unshift(EXTDIR)
|
|
||||||
|
|
||||||
require 'whisper'
|
# Exists to detect memory-related bug
|
||||||
require 'test/unit'
|
Whisper.log_set ->(level, buffer, user_data) {}, nil
|
||||||
|
|
||||||
class TestWhisper < Test::Unit::TestCase
|
class TestWhisper < TestBase
|
||||||
def setup
|
def setup
|
||||||
@params = Whisper::Params.new
|
@params = Whisper::Params.new
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_language
|
|
||||||
@params.language = "en"
|
|
||||||
assert_equal @params.language, "en"
|
|
||||||
@params.language = "auto"
|
|
||||||
assert_equal @params.language, "auto"
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_offset
|
|
||||||
@params.offset = 10_000
|
|
||||||
assert_equal @params.offset, 10_000
|
|
||||||
@params.offset = 0
|
|
||||||
assert_equal @params.offset, 0
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_duration
|
|
||||||
@params.duration = 60_000
|
|
||||||
assert_equal @params.duration, 60_000
|
|
||||||
@params.duration = 0
|
|
||||||
assert_equal @params.duration, 0
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_max_text_tokens
|
|
||||||
@params.max_text_tokens = 300
|
|
||||||
assert_equal @params.max_text_tokens, 300
|
|
||||||
@params.max_text_tokens = 0
|
|
||||||
assert_equal @params.max_text_tokens, 0
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_translate
|
|
||||||
@params.translate = true
|
|
||||||
assert @params.translate
|
|
||||||
@params.translate = false
|
|
||||||
assert !@params.translate
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_no_context
|
|
||||||
@params.no_context = true
|
|
||||||
assert @params.no_context
|
|
||||||
@params.no_context = false
|
|
||||||
assert !@params.no_context
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_single_segment
|
|
||||||
@params.single_segment = true
|
|
||||||
assert @params.single_segment
|
|
||||||
@params.single_segment = false
|
|
||||||
assert !@params.single_segment
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_print_special
|
|
||||||
@params.print_special = true
|
|
||||||
assert @params.print_special
|
|
||||||
@params.print_special = false
|
|
||||||
assert !@params.print_special
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_print_progress
|
|
||||||
@params.print_progress = true
|
|
||||||
assert @params.print_progress
|
|
||||||
@params.print_progress = false
|
|
||||||
assert !@params.print_progress
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_print_realtime
|
|
||||||
@params.print_realtime = true
|
|
||||||
assert @params.print_realtime
|
|
||||||
@params.print_realtime = false
|
|
||||||
assert !@params.print_realtime
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_print_timestamps
|
|
||||||
@params.print_timestamps = true
|
|
||||||
assert @params.print_timestamps
|
|
||||||
@params.print_timestamps = false
|
|
||||||
assert !@params.print_timestamps
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_suppress_blank
|
|
||||||
@params.suppress_blank = true
|
|
||||||
assert @params.suppress_blank
|
|
||||||
@params.suppress_blank = false
|
|
||||||
assert !@params.suppress_blank
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_suppress_non_speech_tokens
|
|
||||||
@params.suppress_non_speech_tokens = true
|
|
||||||
assert @params.suppress_non_speech_tokens
|
|
||||||
@params.suppress_non_speech_tokens = false
|
|
||||||
assert !@params.suppress_non_speech_tokens
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_token_timestamps
|
|
||||||
@params.token_timestamps = true
|
|
||||||
assert @params.token_timestamps
|
|
||||||
@params.token_timestamps = false
|
|
||||||
assert !@params.token_timestamps
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_split_on_word
|
|
||||||
@params.split_on_word = true
|
|
||||||
assert @params.split_on_word
|
|
||||||
@params.split_on_word = false
|
|
||||||
assert !@params.split_on_word
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_speed_up
|
|
||||||
@params.speed_up = true
|
|
||||||
assert @params.speed_up
|
|
||||||
@params.speed_up = false
|
|
||||||
assert !@params.speed_up
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_whisper
|
def test_whisper
|
||||||
@whisper = Whisper::Context.new(File.join(TOPDIR, '..', '..', 'models', 'ggml-base.en.bin'))
|
@whisper = Whisper::Context.new(MODEL)
|
||||||
params = Whisper::Params.new
|
params = Whisper::Params.new
|
||||||
params.print_timestamps = false
|
params.print_timestamps = false
|
||||||
|
|
||||||
jfk = File.join(TOPDIR, '..', '..', 'samples', 'jfk.wav')
|
@whisper.transcribe(AUDIO, params) {|text|
|
||||||
@whisper.transcribe(jfk, params) {|text|
|
|
||||||
assert_match /ask not what your country can do for you, ask what you can do for your country/, text
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, text
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
sub_test_case "After transcription" do
|
||||||
|
class << self
|
||||||
|
attr_reader :whisper
|
||||||
|
|
||||||
|
def startup
|
||||||
|
@whisper = Whisper::Context.new(TestBase::MODEL)
|
||||||
|
params = Whisper::Params.new
|
||||||
|
params.print_timestamps = false
|
||||||
|
@whisper.transcribe(TestBase::AUDIO, params)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def whisper
|
||||||
|
self.class.whisper
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_n_segments
|
||||||
|
assert_equal 1, whisper.full_n_segments
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_lang_id
|
||||||
|
assert_equal 0, whisper.full_lang_id
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_t0
|
||||||
|
assert_equal 0, whisper.full_get_segment_t0(0)
|
||||||
|
assert_raise IndexError do
|
||||||
|
whisper.full_get_segment_t0(whisper.full_n_segments)
|
||||||
|
end
|
||||||
|
assert_raise IndexError do
|
||||||
|
whisper.full_get_segment_t0(-1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_t1
|
||||||
|
t1 = whisper.full_get_segment_t1(0)
|
||||||
|
assert_kind_of Integer, t1
|
||||||
|
assert t1 > 0
|
||||||
|
assert_raise IndexError do
|
||||||
|
whisper.full_get_segment_t1(whisper.full_n_segments)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_speaker_turn_next
|
||||||
|
assert_false whisper.full_get_segment_speaker_turn_next(0)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_text
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, whisper.full_get_segment_text(0)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_lang_max_id
|
||||||
|
assert_kind_of Integer, Whisper.lang_max_id
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_lang_id
|
||||||
|
assert_equal 0, Whisper.lang_id("en")
|
||||||
|
assert_raise ArgumentError do
|
||||||
|
Whisper.lang_id("non existing language")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_lang_str
|
||||||
|
assert_equal "en", Whisper.lang_str(0)
|
||||||
|
assert_raise IndexError do
|
||||||
|
Whisper.lang_str(Whisper.lang_max_id + 1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_lang_str_full
|
||||||
|
assert_equal "english", Whisper.lang_str_full(0)
|
||||||
|
assert_raise IndexError do
|
||||||
|
Whisper.lang_str_full(Whisper.lang_max_id + 1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_log_set
|
||||||
|
user_data = Object.new
|
||||||
|
logs = []
|
||||||
|
log_callback = ->(level, buffer, udata) {
|
||||||
|
logs << [level, buffer, udata]
|
||||||
|
}
|
||||||
|
Whisper.log_set log_callback, user_data
|
||||||
|
Whisper::Context.new(MODEL)
|
||||||
|
|
||||||
|
assert logs.length > 30
|
||||||
|
logs.each do |log|
|
||||||
|
assert_equal Whisper::LOG_LEVEL_INFO, log[0]
|
||||||
|
assert_same user_data, log[2]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_log_suppress
|
||||||
|
stderr = $stderr
|
||||||
|
Whisper.log_set ->(level, buffer, user_data) {
|
||||||
|
# do nothing
|
||||||
|
}, nil
|
||||||
|
dev = StringIO.new("")
|
||||||
|
$stderr = dev
|
||||||
|
Whisper::Context.new(MODEL)
|
||||||
|
assert_empty dev.string
|
||||||
|
ensure
|
||||||
|
$stderr = stderr
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
36
bindings/ruby/whispercpp.gemspec
Normal file
36
bindings/ruby/whispercpp.gemspec
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
require "yaml"
|
||||||
|
|
||||||
|
Gem::Specification.new do |s|
|
||||||
|
s.name = "whispercpp"
|
||||||
|
s.authors = ["Georgi Gerganov", "Todd A. Fisher"]
|
||||||
|
s.version = '1.3.0'
|
||||||
|
s.date = '2024-05-14'
|
||||||
|
s.description = %q{High-performance inference of OpenAI's Whisper automatic speech recognition (ASR) model via Ruby}
|
||||||
|
s.email = 'todd.fisher@gmail.com'
|
||||||
|
s.extra_rdoc_files = ['LICENSE', 'README.md']
|
||||||
|
|
||||||
|
s.files = `git ls-files . -z`.split("\x0") +
|
||||||
|
YAML.load_file("extsources.yaml").collect {|file|
|
||||||
|
basename = File.basename(file)
|
||||||
|
if s.extra_rdoc_files.include?(basename)
|
||||||
|
basename
|
||||||
|
else
|
||||||
|
File.join("ext", basename)
|
||||||
|
end
|
||||||
|
}
|
||||||
|
|
||||||
|
s.summary = %q{Ruby whisper.cpp bindings}
|
||||||
|
s.test_files = ["tests/test_whisper.rb"]
|
||||||
|
|
||||||
|
s.extensions << 'ext/extconf.rb'
|
||||||
|
|
||||||
|
|
||||||
|
#### Documentation and testing.
|
||||||
|
s.homepage = 'https://github.com/ggerganov/whisper.cpp'
|
||||||
|
s.rdoc_options = ['--main', '../../README.md']
|
||||||
|
|
||||||
|
|
||||||
|
s.platform = Gem::Platform::RUBY
|
||||||
|
|
||||||
|
s.licenses = ['MIT']
|
||||||
|
end
|
@ -1,54 +0,0 @@
|
|||||||
# Add new build types
|
|
||||||
|
|
||||||
# ReleaseGG - Release with enabled asserts
|
|
||||||
|
|
||||||
SET(CMAKE_CXX_FLAGS_RELEASEGG
|
|
||||||
"-O3"
|
|
||||||
CACHE STRING "Flags used by the c++ compiler during release builds with enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_C_FLAGS_RELEASEGG
|
|
||||||
"-O3"
|
|
||||||
CACHE STRING "Flags used by the compiler during release builds with enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_EXE_LINKER_FLAGS_RELEASEGG
|
|
||||||
""
|
|
||||||
CACHE STRING "Flags used for linking binaries during release builds with enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_SHARED_LINKER_FLAGS_RELEASEGG
|
|
||||||
""
|
|
||||||
CACHE STRING "Flags used by the shared libraries linker during release builds with enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
MARK_AS_ADVANCED(
|
|
||||||
CMAKE_CXX_FLAGS_RELEASEGG
|
|
||||||
CMAKE_C_FLAGS_RELEASEGG
|
|
||||||
CMAKE_EXE_LINKER_FLAGS_RELEASEGG
|
|
||||||
CMAKE_SHARED_LINKER_FLAGS_RELEASEGG )
|
|
||||||
|
|
||||||
# RelWithDebInfoGG - RelWithDebInfo with enabled asserts
|
|
||||||
|
|
||||||
SET(CMAKE_CXX_FLAGS_RELWITHDEBINFOGG
|
|
||||||
"-O2 -g"
|
|
||||||
CACHE STRING "Flags used by the c++ compiler during release builds with debug symbols and enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_C_FLAGS_RELWITHDEBINFOGG
|
|
||||||
"-O2 -g"
|
|
||||||
CACHE STRING "Flags used by the compiler during release builds with debug symbols and enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFOGG
|
|
||||||
""
|
|
||||||
CACHE STRING "Flags used for linking binaries during release builds with debug symbols and enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFOGG
|
|
||||||
""
|
|
||||||
CACHE STRING "Flags used by the shared libraries linker during release builds with debug symbols and enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
MARK_AS_ADVANCED(
|
|
||||||
CMAKE_CXX_FLAGS_RELWITHDEBINFOGG
|
|
||||||
CMAKE_C_FLAGS_RELWITHDEBINFOGG
|
|
||||||
CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFOGG
|
|
||||||
CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFOGG )
|
|
||||||
|
|
||||||
if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
|
|
||||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
|
|
||||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo" "ReleaseGG" "RelWithDebInfoGG")
|
|
||||||
endif()
|
|
@ -13,5 +13,5 @@ set_target_properties(${TARGET}
|
|||||||
PROPERTIES
|
PROPERTIES
|
||||||
EXPORT_COMPILE_COMMANDS ON
|
EXPORT_COMPILE_COMMANDS ON
|
||||||
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin"
|
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin"
|
||||||
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
|
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
|
||||||
)
|
)
|
||||||
|
163
cmake/FindFFmpeg.cmake
Normal file
163
cmake/FindFFmpeg.cmake
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
# From
|
||||||
|
# https://github.com/snikulov/cmake-modules/blob/master/FindFFmpeg.cmake
|
||||||
|
#
|
||||||
|
# vim: ts=2 sw=2
|
||||||
|
# - Try to find the required ffmpeg components(default: AVFORMAT, AVUTIL, AVCODEC)
|
||||||
|
#
|
||||||
|
# Once done this will define
|
||||||
|
# FFMPEG_FOUND - System has the all required components.
|
||||||
|
# FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers.
|
||||||
|
# FFMPEG_LIBRARIES - Link these to use the required ffmpeg components.
|
||||||
|
# FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components.
|
||||||
|
#
|
||||||
|
# For each of the components it will additionally set.
|
||||||
|
# - AVCODEC
|
||||||
|
# - AVDEVICE
|
||||||
|
# - AVFORMAT
|
||||||
|
# - AVFILTER
|
||||||
|
# - AVUTIL
|
||||||
|
# - POSTPROC
|
||||||
|
# - SWSCALE
|
||||||
|
# the following variables will be defined
|
||||||
|
# <component>_FOUND - System has <component>
|
||||||
|
# <component>_INCLUDE_DIRS - Include directory necessary for using the <component> headers
|
||||||
|
# <component>_LIBRARIES - Link these to use <component>
|
||||||
|
# <component>_DEFINITIONS - Compiler switches required for using <component>
|
||||||
|
# <component>_VERSION - The components version
|
||||||
|
#
|
||||||
|
# Copyright (c) 2006, Matthias Kretz, <kretz@kde.org>
|
||||||
|
# Copyright (c) 2008, Alexander Neundorf, <neundorf@kde.org>
|
||||||
|
# Copyright (c) 2011, Michael Jansen, <kde@michael-jansen.biz>
|
||||||
|
#
|
||||||
|
# Redistribution and use is allowed according to the terms of the BSD license.
|
||||||
|
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
|
||||||
|
# The default components were taken from a survey over other FindFFMPEG.cmake files
|
||||||
|
if (NOT FFmpeg_FIND_COMPONENTS)
|
||||||
|
set(FFmpeg_FIND_COMPONENTS AVFORMAT AVCODEC AVUTIL SWRESAMPLE)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
#
|
||||||
|
### Macro: set_component_found
|
||||||
|
#
|
||||||
|
# Marks the given component as found if both *_LIBRARIES AND *_INCLUDE_DIRS is present.
|
||||||
|
#
|
||||||
|
macro(set_component_found _component )
|
||||||
|
if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS)
|
||||||
|
message(DEBUG " - ${_component} found.")
|
||||||
|
set(${_component}_FOUND TRUE)
|
||||||
|
else ()
|
||||||
|
message(DEBUG " - ${_component} not found.")
|
||||||
|
endif ()
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
#
|
||||||
|
### Macro: find_component
|
||||||
|
#
|
||||||
|
# Checks for the given component by invoking pkgconfig and then looking up the libraries and
|
||||||
|
# include directories.
|
||||||
|
#
|
||||||
|
macro(find_component _component _pkgconfig _library _header)
|
||||||
|
|
||||||
|
if (NOT WIN32)
|
||||||
|
# use pkg-config to get the directories and then use these values
|
||||||
|
# in the FIND_PATH() and FIND_LIBRARY() calls
|
||||||
|
find_package(PkgConfig)
|
||||||
|
if (PKG_CONFIG_FOUND)
|
||||||
|
pkg_check_modules(PC_${_component} ${_pkgconfig})
|
||||||
|
message(STATUS "Pkgconfig found: ${PC_${_component}_INCLUDEDIR}")
|
||||||
|
message(STATUS "Pkgconfig found: ${PC_${_component}_INCLUDE_DIRS}")
|
||||||
|
message(STATUS "${PC_${_component}_CFLAGS}")
|
||||||
|
endif ()
|
||||||
|
endif (NOT WIN32)
|
||||||
|
|
||||||
|
|
||||||
|
find_path(${_component}_INCLUDE_DIRS ${_header}
|
||||||
|
HINTS
|
||||||
|
${PC_${_component}_INCLUDEDIR}
|
||||||
|
${PC_${_component}_INCLUDE_DIRS}
|
||||||
|
PATH_SUFFIXES
|
||||||
|
ffmpeg
|
||||||
|
)
|
||||||
|
|
||||||
|
# CMake's default is to search first for shared libraries and then for static libraries.
|
||||||
|
# Todo later: add option to prefer static libs over dynamic:
|
||||||
|
find_library(${_component}_LIBRARIES NAMES ${_library} lib${_library}.a
|
||||||
|
HINTS
|
||||||
|
${PC_${_component}_LIBDIR}
|
||||||
|
${PC_${_component}_LIBRARY_DIRS}
|
||||||
|
)
|
||||||
|
|
||||||
|
set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.")
|
||||||
|
set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.")
|
||||||
|
|
||||||
|
set_component_found(${_component})
|
||||||
|
|
||||||
|
mark_as_advanced(
|
||||||
|
${_component}_INCLUDE_DIRS
|
||||||
|
${_component}_LIBRARIES
|
||||||
|
${_component}_DEFINITIONS
|
||||||
|
${_component}_VERSION)
|
||||||
|
|
||||||
|
endmacro()
|
||||||
|
|
||||||
|
|
||||||
|
# Check for cached results. If there are skip the costly part.
|
||||||
|
if (NOT FFMPEG_LIBRARIES)
|
||||||
|
|
||||||
|
# Check for all possible component.
|
||||||
|
find_component(AVCODEC libavcodec avcodec libavcodec/avcodec.h)
|
||||||
|
find_component(AVFORMAT libavformat avformat libavformat/avformat.h)
|
||||||
|
find_component(AVDEVICE libavdevice avdevice libavdevice/avdevice.h)
|
||||||
|
#find_component(AVRESAMPLE libavresample avresample libavresample/avresample.h) # old name for swresample
|
||||||
|
find_component(AVUTIL libavutil avutil libavutil/avutil.h)
|
||||||
|
find_component(AVFILTER libavfilter avfilter libavfilter/avfilter.h)
|
||||||
|
find_component(SWSCALE libswscale swscale libswscale/swscale.h)
|
||||||
|
find_component(POSTPROC libpostproc postproc libpostproc/postprocess.h)
|
||||||
|
find_component(SWRESAMPLE libswresample swresample libswresample/swresample.h)
|
||||||
|
|
||||||
|
# Check if the required components were found and add their stuff to the FFMPEG_* vars.
|
||||||
|
foreach (_component ${FFmpeg_FIND_COMPONENTS})
|
||||||
|
if (${_component}_FOUND)
|
||||||
|
# message(STATUS "Required component ${_component} present.")
|
||||||
|
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} ${${_component}_LIBRARIES})
|
||||||
|
set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} ${${_component}_DEFINITIONS})
|
||||||
|
list(APPEND FFMPEG_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS})
|
||||||
|
else ()
|
||||||
|
# message(STATUS "Required component ${_component} missing.")
|
||||||
|
endif ()
|
||||||
|
endforeach ()
|
||||||
|
|
||||||
|
# Build the include path with duplicates removed.
|
||||||
|
if (FFMPEG_INCLUDE_DIRS)
|
||||||
|
list(REMOVE_DUPLICATES FFMPEG_INCLUDE_DIRS)
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# cache the vars.
|
||||||
|
set(FFMPEG_INCLUDE_DIRS ${FFMPEG_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE)
|
||||||
|
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE)
|
||||||
|
set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE)
|
||||||
|
|
||||||
|
mark_as_advanced(FFMPEG_INCLUDE_DIRS
|
||||||
|
FFMPEG_LIBRARIES
|
||||||
|
FFMPEG_DEFINITIONS)
|
||||||
|
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
# Now set the noncached _FOUND vars for the components.
|
||||||
|
# whisper.cpp does not need SWSCALE
|
||||||
|
foreach (_component AVCODEC AVDEVICE AVFORMAT AVRESAMPLE AVUTIL POSTPROCESS)
|
||||||
|
set_component_found(${_component})
|
||||||
|
endforeach ()
|
||||||
|
|
||||||
|
# Compile the list of required vars
|
||||||
|
set(_FFmpeg_REQUIRED_VARS FFMPEG_LIBRARIES FFMPEG_INCLUDE_DIRS)
|
||||||
|
foreach (_component ${FFmpeg_FIND_COMPONENTS})
|
||||||
|
list(APPEND _FFmpeg_REQUIRED_VARS ${_component}_LIBRARIES ${_component}_INCLUDE_DIRS)
|
||||||
|
endforeach ()
|
||||||
|
|
||||||
|
# Give a nice error message if some of the required vars are missing.
|
||||||
|
find_package_handle_standard_args(FFmpeg DEFAULT_MSG ${_FFmpeg_REQUIRED_VARS})
|
||||||
|
|
58
cmake/build-info.cmake
Normal file
58
cmake/build-info.cmake
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
set(BUILD_NUMBER 0)
|
||||||
|
set(BUILD_COMMIT "unknown")
|
||||||
|
set(BUILD_COMPILER "unknown")
|
||||||
|
set(BUILD_TARGET "unknown")
|
||||||
|
|
||||||
|
# Look for git
|
||||||
|
find_package(Git)
|
||||||
|
if(NOT Git_FOUND)
|
||||||
|
find_program(GIT_EXECUTABLE NAMES git git.exe)
|
||||||
|
if(GIT_EXECUTABLE)
|
||||||
|
set(Git_FOUND TRUE)
|
||||||
|
message(STATUS "Found Git: ${GIT_EXECUTABLE}")
|
||||||
|
else()
|
||||||
|
message(WARNING "Git not found. Build info will not be accurate.")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Get the commit count and hash
|
||||||
|
if(Git_FOUND)
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
OUTPUT_VARIABLE HEAD
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
RESULT_VARIABLE RES
|
||||||
|
)
|
||||||
|
if (RES EQUAL 0)
|
||||||
|
set(BUILD_COMMIT ${HEAD})
|
||||||
|
endif()
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${GIT_EXECUTABLE} rev-list --count HEAD
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
OUTPUT_VARIABLE COUNT
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
RESULT_VARIABLE RES
|
||||||
|
)
|
||||||
|
if (RES EQUAL 0)
|
||||||
|
set(BUILD_NUMBER ${COUNT})
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(MSVC)
|
||||||
|
set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
|
||||||
|
set(BUILD_TARGET ${CMAKE_VS_PLATFORM_NAME})
|
||||||
|
else()
|
||||||
|
execute_process(
|
||||||
|
COMMAND sh -c "$@ --version | head -1" _ ${CMAKE_C_COMPILER}
|
||||||
|
OUTPUT_VARIABLE OUT
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
)
|
||||||
|
set(BUILD_COMPILER ${OUT})
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${CMAKE_C_COMPILER} -dumpmachine
|
||||||
|
OUTPUT_VARIABLE OUT
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
)
|
||||||
|
set(BUILD_TARGET ${OUT})
|
||||||
|
endif()
|
65
cmake/whisper-config.cmake.in
Normal file
65
cmake/whisper-config.cmake.in
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
set(WHISPER_VERSION @WHISPER_INSTALL_VERSION@)
|
||||||
|
set(WHISPER_BUILD_COMMIT @WHISPER_BUILD_COMMIT@)
|
||||||
|
set(WHISPER_BUILD_NUMBER @WHISPER_BUILD_NUMBER@)
|
||||||
|
set(WHISPER_SHARED_LIB @BUILD_SHARED_LIBS@)
|
||||||
|
|
||||||
|
set(GGML_BLAS @GGML_BLAS@)
|
||||||
|
set(GGML_CUDA @GGML_CUDA@)
|
||||||
|
set(GGML_METAL @GGML_METAL@)
|
||||||
|
set(GGML_HIPBLAS @GGML_HIPBLAS@)
|
||||||
|
set(GGML_ACCELERATE @GGML_ACCELERATE@)
|
||||||
|
|
||||||
|
@PACKAGE_INIT@
|
||||||
|
|
||||||
|
set_and_check(WHISPER_INCLUDE_DIR "@PACKAGE_WHISPER_INCLUDE_INSTALL_DIR@")
|
||||||
|
set_and_check(WHISPER_LIB_DIR "@PACKAGE_WHISPER_LIB_INSTALL_DIR@")
|
||||||
|
set_and_check(WHISPER_BIN_DIR "@PACKAGE_WHISPER_BIN_INSTALL_DIR@")
|
||||||
|
|
||||||
|
# Ensure transient dependencies satisfied
|
||||||
|
|
||||||
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
|
if (APPLE AND GGML_ACCELERATE)
|
||||||
|
find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (GGML_BLAS)
|
||||||
|
find_package(BLAS REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (GGML_CUDA)
|
||||||
|
find_package(CUDAToolkit REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (GGML_METAL)
|
||||||
|
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
||||||
|
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
||||||
|
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (GGML_HIPBLAS)
|
||||||
|
find_package(hip REQUIRED)
|
||||||
|
find_package(hipblas REQUIRED)
|
||||||
|
find_package(rocblas REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
find_library(whisper_LIBRARY whisper
|
||||||
|
REQUIRED
|
||||||
|
HINTS ${WHISPER_LIB_DIR})
|
||||||
|
|
||||||
|
set(_whisper_link_deps "Threads::Threads" "@WHISPER_EXTRA_LIBS@")
|
||||||
|
set(_whisper_transient_defines "@WHISPER_TRANSIENT_DEFINES@")
|
||||||
|
|
||||||
|
add_library(whisper UNKNOWN IMPORTED)
|
||||||
|
|
||||||
|
set_target_properties(whisper
|
||||||
|
PROPERTIES
|
||||||
|
INTERFACE_INCLUDE_DIRECTORIES "${WHISPER_INCLUDE_DIR}"
|
||||||
|
INTERFACE_LINK_LIBRARIES "${_whisper_link_deps}"
|
||||||
|
INTERFACE_COMPILE_DEFINITIONS "${_whisper_transient_defines}"
|
||||||
|
IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
|
||||||
|
IMPORTED_LOCATION "${whisper_LIBRARY}"
|
||||||
|
INTERFACE_COMPILE_FEATURES cxx_std_11
|
||||||
|
POSITION_INDEPENDENT_CODE ON )
|
||||||
|
|
||||||
|
check_required_components(whisper)
|
10
cmake/whisper.pc.in
Normal file
10
cmake/whisper.pc.in
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
prefix=@CMAKE_INSTALL_PREFIX@
|
||||||
|
exec_prefix=${prefix}
|
||||||
|
libdir=@CMAKE_INSTALL_FULL_LIBDIR@
|
||||||
|
includedir=${prefix}/include
|
||||||
|
|
||||||
|
Name: whisper
|
||||||
|
Description: Port of OpenAI's Whisper model in C/C++
|
||||||
|
Version: @PROJECT_VERSION@
|
||||||
|
Libs: -L${libdir} -lwhisper
|
||||||
|
Cflags: -I${includedir}
|
@ -11,26 +11,62 @@ if (WHISPER_SDL2)
|
|||||||
string(STRIP "${SDL2_LIBRARIES}" SDL2_LIBRARIES)
|
string(STRIP "${SDL2_LIBRARIES}" SDL2_LIBRARIES)
|
||||||
|
|
||||||
message(STATUS "SDL2_INCLUDE_DIRS = ${SDL2_INCLUDE_DIRS}")
|
message(STATUS "SDL2_INCLUDE_DIRS = ${SDL2_INCLUDE_DIRS}")
|
||||||
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (WHISPER_CLBLAST)
|
||||||
|
find_package(CLBlast REQUIRED)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# common
|
# common
|
||||||
|
|
||||||
set(TARGET common)
|
set(TARGET common)
|
||||||
|
|
||||||
|
unset(COMMON_EXTRA_LIBS)
|
||||||
|
|
||||||
|
if (WHISPER_FFMPEG)
|
||||||
|
# As of cmake 3.27, there is no official cmake support for FindFFmpeg.
|
||||||
|
# Consequnelty we added a FindFFmpeg.cmake script the cmake subfolder:
|
||||||
|
# whisper.cpp does not need the full ffmpeg libs, just AVFORMAT AVCODEC AVUTIL SWRESAMPLE
|
||||||
|
# libswresample performs highly optimized audio resampling, rematrixing and sample format conversion operations
|
||||||
|
# libavcodec provides a generic encoding/decoding framework and contains multiple decoders and encoders for audio, video and subtitle streams, and several bitstream filters.
|
||||||
|
# libavformat provides a generic framework for multiplexing and demultiplexing (muxing and demuxing) audio, video and subtitle streams.
|
||||||
|
find_package(FFmpeg REQUIRED)
|
||||||
|
|
||||||
|
if (NOT ${FFMPEG_FOUND})
|
||||||
|
message(FATAL_ERROR "Cannot find ffmpeg libs/headers")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
message(STATUS "Found ffmpeg libs: ${FFMPEG_LIBRARIES}")
|
||||||
|
message(STATUS "Found ffmpeg headers in: ${FFMPEG_INCLUDE_DIRS}")
|
||||||
|
message(STATUS "ffmpeg definitions: ${FFMPEG_DEFINITIONS}")
|
||||||
|
message(STATUS "Found avformat ${AVFORMAT_VERSION}")
|
||||||
|
|
||||||
|
include_directories(${FFMPEG_INCLUDE_DIRS})
|
||||||
|
add_compile_definitions(WHISPER_FFMPEG)
|
||||||
|
|
||||||
|
list(APPEND COMMON_EXTRA_LIBS ${FFMPEG_LIBRARIES})
|
||||||
|
|
||||||
|
set(COMMON_SOURCES_FFMPEG ffmpeg-transcode.cpp)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
add_library(${TARGET} STATIC
|
add_library(${TARGET} STATIC
|
||||||
common.h
|
common.h
|
||||||
common.cpp
|
common.cpp
|
||||||
common-ggml.h
|
common-ggml.h
|
||||||
common-ggml.cpp
|
common-ggml.cpp
|
||||||
|
grammar-parser.h
|
||||||
grammar-parser.cpp
|
grammar-parser.cpp
|
||||||
|
${COMMON_SOURCES_FFMPEG}
|
||||||
)
|
)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper)
|
target_link_libraries(${TARGET} PRIVATE whisper ${COMMON_EXTRA_LIBS})
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
|
|
||||||
if (WHISPER_SDL2)
|
if (WHISPER_SDL2)
|
||||||
# common-sdl
|
# common-sdl
|
||||||
@ -44,34 +80,69 @@ if (WHISPER_SDL2)
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC ${SDL2_INCLUDE_DIRS})
|
target_include_directories(${TARGET} PUBLIC ${SDL2_INCLUDE_DIRS})
|
||||||
target_link_libraries(${TARGET} PRIVATE ${SDL2_LIBRARIES})
|
target_link_libraries (${TARGET} PRIVATE ${SDL2_LIBRARIES})
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# add json lib
|
||||||
|
add_library(json_cpp INTERFACE)
|
||||||
|
target_include_directories(json_cpp INTERFACE ${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
|
||||||
# examples
|
# examples
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
|
||||||
if (EMSCRIPTEN)
|
if (EMSCRIPTEN)
|
||||||
add_subdirectory(whisper.wasm)
|
add_subdirectory(whisper.wasm)
|
||||||
|
set_target_properties(libmain PROPERTIES FOLDER "libs")
|
||||||
add_subdirectory(stream.wasm)
|
add_subdirectory(stream.wasm)
|
||||||
|
set_target_properties(libstream PROPERTIES FOLDER "libs")
|
||||||
add_subdirectory(command.wasm)
|
add_subdirectory(command.wasm)
|
||||||
add_subdirectory(talk.wasm)
|
set_target_properties(libcommand PROPERTIES FOLDER "libs")
|
||||||
|
#add_subdirectory(talk.wasm)
|
||||||
|
#set_target_properties(libtalk PROPERTIES FOLDER "libs")
|
||||||
add_subdirectory(bench.wasm)
|
add_subdirectory(bench.wasm)
|
||||||
|
set_target_properties(libbench PROPERTIES FOLDER "libs")
|
||||||
elseif(CMAKE_JS_VERSION)
|
elseif(CMAKE_JS_VERSION)
|
||||||
add_subdirectory(addon.node)
|
add_subdirectory(addon.node)
|
||||||
|
set_target_properties(addon.node PROPERTIES FOLDER "examples")
|
||||||
else()
|
else()
|
||||||
add_subdirectory(main)
|
add_subdirectory(main)
|
||||||
|
set_target_properties(main PROPERTIES FOLDER "examples")
|
||||||
|
if (WHISPER_SDL2)
|
||||||
add_subdirectory(stream)
|
add_subdirectory(stream)
|
||||||
|
set_target_properties(stream PROPERTIES FOLDER "examples")
|
||||||
|
endif (WHISPER_SDL2)
|
||||||
add_subdirectory(server)
|
add_subdirectory(server)
|
||||||
|
set_target_properties(server PROPERTIES FOLDER "examples")
|
||||||
|
if (WHISPER_SDL2)
|
||||||
add_subdirectory(command)
|
add_subdirectory(command)
|
||||||
|
set_target_properties(command PROPERTIES FOLDER "examples")
|
||||||
|
endif (WHISPER_SDL2)
|
||||||
add_subdirectory(bench)
|
add_subdirectory(bench)
|
||||||
|
set_target_properties(bench PROPERTIES FOLDER "examples")
|
||||||
add_subdirectory(quantize)
|
add_subdirectory(quantize)
|
||||||
add_subdirectory(talk)
|
set_target_properties(quantize PROPERTIES FOLDER "examples")
|
||||||
|
if (WHISPER_SDL2)
|
||||||
|
# TODO: disabled until update
|
||||||
|
# https://github.com/ggerganov/whisper.cpp/issues/1818
|
||||||
|
#add_subdirectory(talk)
|
||||||
|
#set_target_properties(talk PROPERTIES FOLDER "examples")
|
||||||
add_subdirectory(talk-llama)
|
add_subdirectory(talk-llama)
|
||||||
|
set_target_properties(talk-llama PROPERTIES FOLDER "examples")
|
||||||
add_subdirectory(lsp)
|
add_subdirectory(lsp)
|
||||||
|
set_target_properties(lsp PROPERTIES FOLDER "examples")
|
||||||
|
if (GGML_SYCL)
|
||||||
|
add_subdirectory(sycl)
|
||||||
|
set_target_properties(ls-sycl-device PROPERTIES FOLDER "examples")
|
||||||
|
endif()
|
||||||
|
endif (WHISPER_SDL2)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_subdirectory(wchess)
|
if (WHISPER_SDL2)
|
||||||
|
add_subdirectory(wchess)
|
||||||
|
set_target_properties(wchess PROPERTIES FOLDER "examples")
|
||||||
|
endif (WHISPER_SDL2)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set(TARGET whisper-addon)
|
set(TARGET addon.node)
|
||||||
|
|
||||||
# Base settings
|
# Base settings
|
||||||
#==================================================================
|
#==================================================================
|
||||||
|
@ -14,14 +14,14 @@ npm install
|
|||||||
Make sure it is in the project root directory and compiled with make-js.
|
Make sure it is in the project root directory and compiled with make-js.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
npx cmake-js compile -T whisper-addon -B Release
|
npx cmake-js compile -T addon.node -B Release
|
||||||
```
|
```
|
||||||
|
|
||||||
For Electron addon and cmake-js options, you can see [cmake-js](https://github.com/cmake-js/cmake-js) and make very few configuration changes.
|
For Electron addon and cmake-js options, you can see [cmake-js](https://github.com/cmake-js/cmake-js) and make very few configuration changes.
|
||||||
|
|
||||||
> Such as appointing special cmake path:
|
> Such as appointing special cmake path:
|
||||||
> ```shell
|
> ```shell
|
||||||
> npx cmake-js compile -c 'xxx/cmake' -T whisper-addon -B Release
|
> npx cmake-js compile -c 'xxx/cmake' -T addon.node -B Release
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
## Run
|
## Run
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
const path = require("path");
|
const path = require("path");
|
||||||
const { whisper } = require(path.join(
|
const { whisper } = require(path.join(
|
||||||
__dirname,
|
__dirname,
|
||||||
"../../../build/Release/whisper-addon"
|
"../../../build/Release/addon.node"
|
||||||
));
|
));
|
||||||
const { promisify } = require("util");
|
const { promisify } = require("util");
|
||||||
|
|
||||||
@ -12,6 +12,12 @@ const whisperParamsMock = {
|
|||||||
model: path.join(__dirname, "../../../models/ggml-base.en.bin"),
|
model: path.join(__dirname, "../../../models/ggml-base.en.bin"),
|
||||||
fname_inp: path.join(__dirname, "../../../samples/jfk.wav"),
|
fname_inp: path.join(__dirname, "../../../samples/jfk.wav"),
|
||||||
use_gpu: true,
|
use_gpu: true,
|
||||||
|
flash_attn: false,
|
||||||
|
no_prints: true,
|
||||||
|
comma_in_time: false,
|
||||||
|
translate: true,
|
||||||
|
no_timestamps: false,
|
||||||
|
audio_ctx: 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
describe("Run whisper.node", () => {
|
describe("Run whisper.node", () => {
|
||||||
|
@ -19,12 +19,12 @@ struct whisper_params {
|
|||||||
int32_t max_len = 0;
|
int32_t max_len = 0;
|
||||||
int32_t best_of = 5;
|
int32_t best_of = 5;
|
||||||
int32_t beam_size = -1;
|
int32_t beam_size = -1;
|
||||||
|
int32_t audio_ctx = 0;
|
||||||
|
|
||||||
float word_thold = 0.01f;
|
float word_thold = 0.01f;
|
||||||
float entropy_thold = 2.4f;
|
float entropy_thold = 2.4f;
|
||||||
float logprob_thold = -1.0f;
|
float logprob_thold = -1.0f;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool diarize = false;
|
bool diarize = false;
|
||||||
bool output_txt = false;
|
bool output_txt = false;
|
||||||
@ -36,7 +36,10 @@ struct whisper_params {
|
|||||||
bool print_colors = false;
|
bool print_colors = false;
|
||||||
bool print_progress = false;
|
bool print_progress = false;
|
||||||
bool no_timestamps = false;
|
bool no_timestamps = false;
|
||||||
|
bool no_prints = false;
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
bool comma_in_time = true;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt;
|
std::string prompt;
|
||||||
@ -44,6 +47,8 @@ struct whisper_params {
|
|||||||
|
|
||||||
std::vector<std::string> fname_inp = {};
|
std::vector<std::string> fname_inp = {};
|
||||||
std::vector<std::string> fname_out = {};
|
std::vector<std::string> fname_out = {};
|
||||||
|
|
||||||
|
std::vector<float> pcmf32 = {}; // mono-channel F32 PCM
|
||||||
};
|
};
|
||||||
|
|
||||||
struct whisper_print_user_data {
|
struct whisper_print_user_data {
|
||||||
@ -52,27 +57,6 @@ struct whisper_print_user_data {
|
|||||||
const std::vector<std::vector<float>> * pcmf32s;
|
const std::vector<std::vector<float>> * pcmf32s;
|
||||||
};
|
};
|
||||||
|
|
||||||
// 500 -> 00:05.000
|
|
||||||
// 6000 -> 01:00.000
|
|
||||||
std::string to_timestamp(int64_t t, bool comma = false) {
|
|
||||||
int64_t msec = t * 10;
|
|
||||||
int64_t hr = msec / (1000 * 60 * 60);
|
|
||||||
msec = msec - hr * (1000 * 60 * 60);
|
|
||||||
int64_t min = msec / (1000 * 60);
|
|
||||||
msec = msec - min * (1000 * 60);
|
|
||||||
int64_t sec = msec / 1000;
|
|
||||||
msec = msec - sec * 1000;
|
|
||||||
|
|
||||||
char buf[32];
|
|
||||||
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
|
||||||
|
|
||||||
return std::string(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
int timestamp_to_sample(int64_t t, int n_samples) {
|
|
||||||
return std::max(0, std::min((int) n_samples - 1, (int) ((t*WHISPER_SAMPLE_RATE)/100)));
|
|
||||||
}
|
|
||||||
|
|
||||||
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * state, int n_new, void * user_data) {
|
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * state, int n_new, void * user_data) {
|
||||||
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||||
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||||
@ -104,8 +88,8 @@ void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper
|
|||||||
if (params.diarize && pcmf32s.size() == 2) {
|
if (params.diarize && pcmf32s.size() == 2) {
|
||||||
const int64_t n_samples = pcmf32s[0].size();
|
const int64_t n_samples = pcmf32s[0].size();
|
||||||
|
|
||||||
const int64_t is0 = timestamp_to_sample(t0, n_samples);
|
const int64_t is0 = timestamp_to_sample(t0, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
const int64_t is1 = timestamp_to_sample(t1, n_samples);
|
const int64_t is1 = timestamp_to_sample(t1, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
|
|
||||||
double energy0 = 0.0f;
|
double energy0 = 0.0f;
|
||||||
double energy1 = 0.0f;
|
double energy1 = 0.0f;
|
||||||
@ -141,9 +125,15 @@ void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void cb_log_disable(enum ggml_log_level, const char *, void *) {}
|
||||||
|
|
||||||
int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
||||||
if (params.fname_inp.empty()) {
|
if (params.no_prints) {
|
||||||
fprintf(stderr, "error: no input files specified\n");
|
whisper_log_set(cb_log_disable, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.fname_inp.empty() && params.pcmf32.empty()) {
|
||||||
|
fprintf(stderr, "error: no input files or audio buffer specified\n");
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,8 +144,9 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
|
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
struct whisper_context_params cparams;
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
cparams.use_gpu = params.use_gpu;
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
if (ctx == nullptr) {
|
if (ctx == nullptr) {
|
||||||
@ -163,6 +154,14 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if params.pcmf32 is provided, set params.fname_inp to "buffer"
|
||||||
|
// this is simpler than further modifications in the code
|
||||||
|
if (!params.pcmf32.empty()) {
|
||||||
|
fprintf(stderr, "info: using audio buffer as input\n");
|
||||||
|
params.fname_inp.clear();
|
||||||
|
params.fname_inp.emplace_back("buffer");
|
||||||
|
}
|
||||||
|
|
||||||
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
||||||
const auto fname_inp = params.fname_inp[f];
|
const auto fname_inp = params.fname_inp[f];
|
||||||
const auto fname_out = f < (int)params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
const auto fname_out = f < (int)params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
||||||
@ -170,20 +169,25 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
std::vector<float> pcmf32; // mono-channel F32 PCM
|
std::vector<float> pcmf32; // mono-channel F32 PCM
|
||||||
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
||||||
|
|
||||||
if (!::read_wav(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
// read the input audio file if params.pcmf32 is not provided
|
||||||
fprintf(stderr, "error: failed to read WAV file '%s'\n", fname_inp.c_str());
|
if (params.pcmf32.empty()) {
|
||||||
continue;
|
if (!::read_wav(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
||||||
|
fprintf(stderr, "error: failed to read WAV file '%s'\n", fname_inp.c_str());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pcmf32 = params.pcmf32;
|
||||||
}
|
}
|
||||||
|
|
||||||
// print system information
|
// print system information
|
||||||
{
|
if (!params.no_prints) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||||
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
|
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
|
||||||
}
|
}
|
||||||
|
|
||||||
// print some info about the processing
|
// print some info about the processing
|
||||||
{
|
if (!params.no_prints) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
if (!whisper_is_multilingual(ctx)) {
|
if (!whisper_is_multilingual(ctx)) {
|
||||||
if (params.language != "en" || params.translate) {
|
if (params.language != "en" || params.translate) {
|
||||||
@ -192,12 +196,13 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, lang = %s, task = %s, timestamps = %d ...\n",
|
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, lang = %s, task = %s, timestamps = %d, audio_ctx = %d ...\n",
|
||||||
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
||||||
params.n_threads, params.n_processors,
|
params.n_threads, params.n_processors,
|
||||||
params.language.c_str(),
|
params.language.c_str(),
|
||||||
params.translate ? "translate" : "transcribe",
|
params.translate ? "translate" : "transcribe",
|
||||||
params.no_timestamps ? 0 : 1);
|
params.no_timestamps ? 0 : 1,
|
||||||
|
params.audio_ctx);
|
||||||
|
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
@ -224,14 +229,15 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
wparams.entropy_thold = params.entropy_thold;
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
wparams.logprob_thold = params.logprob_thold;
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
||||||
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
|
||||||
wparams.greedy.best_of = params.best_of;
|
wparams.greedy.best_of = params.best_of;
|
||||||
wparams.beam_search.beam_size = params.beam_size;
|
wparams.beam_search.beam_size = params.beam_size;
|
||||||
|
|
||||||
wparams.initial_prompt = params.prompt.c_str();
|
wparams.initial_prompt = params.prompt.c_str();
|
||||||
|
|
||||||
|
wparams.no_timestamps = params.no_timestamps;
|
||||||
|
|
||||||
whisper_print_user_data user_data = { ¶ms, &pcmf32s };
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s };
|
||||||
|
|
||||||
// this callback is called on each new segment
|
// this callback is called on each new segment
|
||||||
@ -267,8 +273,8 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
|
|
||||||
result[i].emplace_back(to_timestamp(t0, true));
|
result[i].emplace_back(to_timestamp(t0, params.comma_in_time));
|
||||||
result[i].emplace_back(to_timestamp(t1, true));
|
result[i].emplace_back(to_timestamp(t1, params.comma_in_time));
|
||||||
result[i].emplace_back(text);
|
result[i].emplace_back(text);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,11 +325,33 @@ Napi::Value whisper(const Napi::CallbackInfo& info) {
|
|||||||
std::string model = whisper_params.Get("model").As<Napi::String>();
|
std::string model = whisper_params.Get("model").As<Napi::String>();
|
||||||
std::string input = whisper_params.Get("fname_inp").As<Napi::String>();
|
std::string input = whisper_params.Get("fname_inp").As<Napi::String>();
|
||||||
bool use_gpu = whisper_params.Get("use_gpu").As<Napi::Boolean>();
|
bool use_gpu = whisper_params.Get("use_gpu").As<Napi::Boolean>();
|
||||||
|
bool flash_attn = whisper_params.Get("flash_attn").As<Napi::Boolean>();
|
||||||
|
bool no_prints = whisper_params.Get("no_prints").As<Napi::Boolean>();
|
||||||
|
bool no_timestamps = whisper_params.Get("no_timestamps").As<Napi::Boolean>();
|
||||||
|
int32_t audio_ctx = whisper_params.Get("audio_ctx").As<Napi::Number>();
|
||||||
|
bool comma_in_time = whisper_params.Get("comma_in_time").As<Napi::Boolean>();
|
||||||
|
|
||||||
|
Napi::Value pcmf32Value = whisper_params.Get("pcmf32");
|
||||||
|
std::vector<float> pcmf32_vec;
|
||||||
|
if (pcmf32Value.IsTypedArray()) {
|
||||||
|
Napi::Float32Array pcmf32 = pcmf32Value.As<Napi::Float32Array>();
|
||||||
|
size_t length = pcmf32.ElementLength();
|
||||||
|
pcmf32_vec.reserve(length);
|
||||||
|
for (size_t i = 0; i < length; i++) {
|
||||||
|
pcmf32_vec.push_back(pcmf32[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
params.language = language;
|
params.language = language;
|
||||||
params.model = model;
|
params.model = model;
|
||||||
params.fname_inp.emplace_back(input);
|
params.fname_inp.emplace_back(input);
|
||||||
params.use_gpu = use_gpu;
|
params.use_gpu = use_gpu;
|
||||||
|
params.flash_attn = flash_attn;
|
||||||
|
params.no_prints = no_prints;
|
||||||
|
params.no_timestamps = no_timestamps;
|
||||||
|
params.audio_ctx = audio_ctx;
|
||||||
|
params.pcmf32 = pcmf32_vec;
|
||||||
|
params.comma_in_time = comma_in_time;
|
||||||
|
|
||||||
Napi::Function callback = info[1].As<Napi::Function>();
|
Napi::Function callback = info[1].As<Napi::Function>();
|
||||||
Worker* worker = new Worker(callback, params);
|
Worker* worker = new Worker(callback, params);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
const path = require("path");
|
const path = require("path");
|
||||||
const { whisper } = require(path.join(
|
const { whisper } = require(path.join(
|
||||||
__dirname,
|
__dirname,
|
||||||
"../../build/Release/whisper-addon"
|
"../../build/Release/addon.node"
|
||||||
));
|
));
|
||||||
const { promisify } = require("util");
|
const { promisify } = require("util");
|
||||||
|
|
||||||
@ -10,15 +10,27 @@ const whisperAsync = promisify(whisper);
|
|||||||
const whisperParams = {
|
const whisperParams = {
|
||||||
language: "en",
|
language: "en",
|
||||||
model: path.join(__dirname, "../../models/ggml-base.en.bin"),
|
model: path.join(__dirname, "../../models/ggml-base.en.bin"),
|
||||||
fname_inp: "../../samples/jfk.wav",
|
fname_inp: path.join(__dirname, "../../samples/jfk.wav"),
|
||||||
use_gpu: true,
|
use_gpu: true,
|
||||||
|
flash_attn: false,
|
||||||
|
no_prints: true,
|
||||||
|
comma_in_time: false,
|
||||||
|
translate: true,
|
||||||
|
no_timestamps: false,
|
||||||
|
audio_ctx: 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
const arguments = process.argv.slice(2);
|
const arguments = process.argv.slice(2);
|
||||||
const params = Object.fromEntries(
|
const params = Object.fromEntries(
|
||||||
arguments.reduce((pre, item) => {
|
arguments.reduce((pre, item) => {
|
||||||
if (item.startsWith("--")) {
|
if (item.startsWith("--")) {
|
||||||
return [...pre, item.slice(2).split("=")];
|
const [key, value] = item.slice(2).split("=");
|
||||||
|
if (key === "audio_ctx") {
|
||||||
|
whisperParams[key] = parseInt(value);
|
||||||
|
} else {
|
||||||
|
whisperParams[key] = value;
|
||||||
|
}
|
||||||
|
return pre;
|
||||||
}
|
}
|
||||||
return pre;
|
return pre;
|
||||||
}, [])
|
}, [])
|
||||||
@ -33,5 +45,6 @@ for (const key in params) {
|
|||||||
console.log("whisperParams =", whisperParams);
|
console.log("whisperParams =", whisperParams);
|
||||||
|
|
||||||
whisperAsync(whisperParams).then((result) => {
|
whisperAsync(whisperParams).then((result) => {
|
||||||
console.log(`Result from whisper: ${result}`);
|
console.log();
|
||||||
|
console.log(result);
|
||||||
});
|
});
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"name": "whisper-addon",
|
"name": "addon.node",
|
||||||
"version": "0.0.0",
|
"version": "0.0.0",
|
||||||
"description": "",
|
"description": "",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
|
@ -8,16 +8,17 @@
|
|||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t what = 0; // what to benchmark: 0 - whisper ecoder, 1 - memcpy, 2 - ggml_mul_mat
|
int32_t what = 0; // what to benchmark: 0 - whisper encoder, 1 - memcpy, 2 - ggml_mul_mat
|
||||||
|
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
|
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
@ -25,10 +26,11 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
|
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else if (arg == "-w" || arg == "--what") { params.what = atoi(argv[++i]); }
|
else if (arg == "-w" || arg == "--what") { params.what = atoi(argv[++i]); }
|
||||||
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -49,17 +51,20 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -w N, --what N [%-7d] what to benchmark:\n", params.what);
|
fprintf(stderr, " -w N, --what N [%-7d] what to benchmark:\n", params.what);
|
||||||
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] enable flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
fprintf(stderr, " %-7s 0 - whisper\n", "");
|
fprintf(stderr, " %-7s 0 - whisper\n", "");
|
||||||
fprintf(stderr, " %-7s 1 - memcpy\n", "");
|
fprintf(stderr, " %-7s 1 - memcpy\n", "");
|
||||||
fprintf(stderr, " %-7s 2 - ggml_mul_mat\n", "");
|
fprintf(stderr, " %-7s 2 - ggml_mul_mat\n", "");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
int whisper_bench_full(const whisper_params & params) {
|
static int whisper_bench_full(const whisper_params & params) {
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
struct whisper_context_params cparams;
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
cparams.use_gpu = params.use_gpu;
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
|
@ -37,9 +37,13 @@ https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9
|
|||||||
The `command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
The `command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install SDL2 on Linux
|
# Install SDL2
|
||||||
|
# On Debian based linux distributions:
|
||||||
sudo apt-get install libsdl2-dev
|
sudo apt-get install libsdl2-dev
|
||||||
|
|
||||||
|
# On Fedora Linux:
|
||||||
|
sudo dnf install SDL2 SDL2-devel
|
||||||
|
|
||||||
# Install SDL2 on Mac OS
|
# Install SDL2 on Mac OS
|
||||||
brew install sdl2
|
brew install sdl2
|
||||||
|
|
||||||
|
@ -22,11 +22,6 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
|
||||||
bool file_exists(const std::string & fname) {
|
|
||||||
std::ifstream f(fname.c_str());
|
|
||||||
return f.good();
|
|
||||||
}
|
|
||||||
|
|
||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
@ -43,12 +38,12 @@ struct whisper_params {
|
|||||||
|
|
||||||
grammar_parser::parse_state grammar_parsed;
|
grammar_parser::parse_state grammar_parsed;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool print_special = false;
|
bool print_special = false;
|
||||||
bool print_energy = false;
|
bool print_energy = false;
|
||||||
bool no_timestamps = true;
|
bool no_timestamps = true;
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
@ -57,11 +52,14 @@ struct whisper_params {
|
|||||||
std::string prompt;
|
std::string prompt;
|
||||||
std::string context;
|
std::string context;
|
||||||
std::string grammar;
|
std::string grammar;
|
||||||
|
|
||||||
|
// A regular expression that matches tokens to suppress
|
||||||
|
std::string suppress_regex;
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
@ -77,11 +75,11 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-pe" || arg == "--print-energy") { params.print_energy = true; }
|
else if (arg == "-pe" || arg == "--print-energy") { params.print_energy = true; }
|
||||||
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else if (arg == "-f" || arg == "--file") { params.fname_out = argv[++i]; }
|
else if (arg == "-f" || arg == "--file") { params.fname_out = argv[++i]; }
|
||||||
@ -90,6 +88,7 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-ctx" || arg == "--context") { params.context = argv[++i]; }
|
else if (arg == "-ctx" || arg == "--context") { params.context = argv[++i]; }
|
||||||
else if ( arg == "--grammar") { params.grammar = argv[++i]; }
|
else if ( arg == "--grammar") { params.grammar = argv[++i]; }
|
||||||
else if ( arg == "--grammar-penalty") { params.grammar_penalty = std::stof(argv[++i]); }
|
else if ( arg == "--grammar-penalty") { params.grammar_penalty = std::stof(argv[++i]); }
|
||||||
|
else if ( arg == "--suppress-regex") { params.suppress_regex = argv[++i]; }
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -114,11 +113,11 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
||||||
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
||||||
fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
fprintf(stderr, " -pe, --print-energy [%-7s] print sound energy (for debugging)\n", params.print_energy ? "true" : "false");
|
fprintf(stderr, " -pe, --print-energy [%-7s] print sound energy (for debugging)\n", params.print_energy ? "true" : "false");
|
||||||
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] text output file name\n", params.fname_out.c_str());
|
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] text output file name\n", params.fname_out.c_str());
|
||||||
@ -127,10 +126,11 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -ctx, --context [%-7s] sample text to help the transcription\n", params.context.c_str());
|
fprintf(stderr, " -ctx, --context [%-7s] sample text to help the transcription\n", params.context.c_str());
|
||||||
fprintf(stderr, " --grammar GRAMMAR [%-7s] GBNF grammar to guide decoding\n", params.grammar.c_str());
|
fprintf(stderr, " --grammar GRAMMAR [%-7s] GBNF grammar to guide decoding\n", params.grammar.c_str());
|
||||||
fprintf(stderr, " --grammar-penalty N [%-7.1f] scales down logits of nongrammar tokens\n", params.grammar_penalty);
|
fprintf(stderr, " --grammar-penalty N [%-7.1f] scales down logits of nongrammar tokens\n", params.grammar_penalty);
|
||||||
|
fprintf(stderr, " --suppress-regex REGEX [%-7s] regular expression matching tokens to suppress\n", params.suppress_regex.c_str());
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string transcribe(
|
static std::string transcribe(
|
||||||
whisper_context * ctx,
|
whisper_context * ctx,
|
||||||
const whisper_params & params,
|
const whisper_params & params,
|
||||||
const std::vector<float> & pcmf32,
|
const std::vector<float> & pcmf32,
|
||||||
@ -162,7 +162,6 @@ std::string transcribe(
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
|
||||||
wparams.temperature = 0.4f;
|
wparams.temperature = 0.4f;
|
||||||
wparams.temperature_inc = 1.0f;
|
wparams.temperature_inc = 1.0f;
|
||||||
@ -172,6 +171,8 @@ std::string transcribe(
|
|||||||
|
|
||||||
wparams.initial_prompt = params.context.data();
|
wparams.initial_prompt = params.context.data();
|
||||||
|
|
||||||
|
wparams.suppress_regex = params.suppress_regex.c_str();
|
||||||
|
|
||||||
const auto & grammar_parsed = params.grammar_parsed;
|
const auto & grammar_parsed = params.grammar_parsed;
|
||||||
auto grammar_rules = grammar_parsed.c_rules();
|
auto grammar_rules = grammar_parsed.c_rules();
|
||||||
|
|
||||||
@ -215,7 +216,7 @@ std::string transcribe(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> read_allowed_commands(const std::string & fname) {
|
static std::vector<std::string> read_allowed_commands(const std::string & fname) {
|
||||||
std::vector<std::string> allowed_commands;
|
std::vector<std::string> allowed_commands;
|
||||||
|
|
||||||
std::ifstream ifs(fname);
|
std::ifstream ifs(fname);
|
||||||
@ -237,7 +238,7 @@ std::vector<std::string> read_allowed_commands(const std::string & fname) {
|
|||||||
return allowed_commands;
|
return allowed_commands;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> get_words(const std::string &txt) {
|
static std::vector<std::string> get_words(const std::string &txt) {
|
||||||
std::vector<std::string> words;
|
std::vector<std::string> words;
|
||||||
|
|
||||||
std::istringstream iss(txt);
|
std::istringstream iss(txt);
|
||||||
@ -251,7 +252,7 @@ std::vector<std::string> get_words(const std::string &txt) {
|
|||||||
|
|
||||||
// command-list mode
|
// command-list mode
|
||||||
// guide the transcription to match the most likely command from a provided list
|
// guide the transcription to match the most likely command from a provided list
|
||||||
int process_command_list(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
static int process_command_list(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s: guided mode\n", __func__);
|
fprintf(stderr, "%s: guided mode\n", __func__);
|
||||||
|
|
||||||
@ -366,7 +367,6 @@ int process_command_list(struct whisper_context * ctx, audio_async &audio, const
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
|
||||||
wparams.prompt_tokens = k_tokens.data();
|
wparams.prompt_tokens = k_tokens.data();
|
||||||
wparams.prompt_n_tokens = k_tokens.size();
|
wparams.prompt_n_tokens = k_tokens.size();
|
||||||
@ -463,7 +463,7 @@ int process_command_list(struct whisper_context * ctx, audio_async &audio, const
|
|||||||
|
|
||||||
// always-prompt mode
|
// always-prompt mode
|
||||||
// transcribe the voice into text after valid prompt
|
// transcribe the voice into text after valid prompt
|
||||||
int always_prompt_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
static int always_prompt_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
||||||
bool is_running = true;
|
bool is_running = true;
|
||||||
bool ask_prompt = true;
|
bool ask_prompt = true;
|
||||||
|
|
||||||
@ -543,7 +543,7 @@ int always_prompt_transcription(struct whisper_context * ctx, audio_async & audi
|
|||||||
|
|
||||||
// general-purpose mode
|
// general-purpose mode
|
||||||
// freely transcribe the voice into text
|
// freely transcribe the voice into text
|
||||||
int process_general_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
static int process_general_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
||||||
bool is_running = true;
|
bool is_running = true;
|
||||||
bool have_prompt = false;
|
bool have_prompt = false;
|
||||||
bool ask_prompt = true;
|
bool ask_prompt = true;
|
||||||
@ -693,8 +693,10 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
struct whisper_context_params cparams;
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
cparams.use_gpu = params.use_gpu;
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
@ -736,7 +738,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
if (!params.grammar.empty()) {
|
if (!params.grammar.empty()) {
|
||||||
auto & grammar = params.grammar_parsed;
|
auto & grammar = params.grammar_parsed;
|
||||||
if (file_exists(params.grammar.c_str())) {
|
if (is_file_exist(params.grammar.c_str())) {
|
||||||
// read grammar from file
|
// read grammar from file
|
||||||
std::ifstream ifs(params.grammar.c_str());
|
std::ifstream ifs(params.grammar.c_str());
|
||||||
const std::string txt = std::string((std::istreambuf_iterator<char>(ifs)), std::istreambuf_iterator<char>());
|
const std::string txt = std::string((std::istreambuf_iterator<char>(ifs)), std::istreambuf_iterator<char>());
|
||||||
|
@ -62,6 +62,19 @@ bool ggml_common_quantize_0(
|
|||||||
case GGML_FTYPE_ALL_F32:
|
case GGML_FTYPE_ALL_F32:
|
||||||
case GGML_FTYPE_MOSTLY_F16:
|
case GGML_FTYPE_MOSTLY_F16:
|
||||||
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16:
|
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ2_XXS:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ2_XS:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ2_S:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ3_XXS:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ3_S:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ1_S:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ4_NL:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ4_XS:
|
||||||
|
case GGML_FTYPE_MOSTLY_IQ1_M:
|
||||||
|
case GGML_FTYPE_MOSTLY_BF16:
|
||||||
|
case GGML_FTYPE_MOSTLY_Q4_0_4_4:
|
||||||
|
case GGML_FTYPE_MOSTLY_Q4_0_4_8:
|
||||||
|
case GGML_FTYPE_MOSTLY_Q4_0_8_8:
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: invalid model type %d\n", __func__, ftype);
|
fprintf(stderr, "%s: invalid model type %d\n", __func__, ftype);
|
||||||
return false;
|
return false;
|
||||||
@ -82,8 +95,6 @@ bool ggml_common_quantize_0(
|
|||||||
std::vector<ggml_fp16_t> data_f16;
|
std::vector<ggml_fp16_t> data_f16;
|
||||||
std::vector<float> data_f32;
|
std::vector<float> data_f32;
|
||||||
|
|
||||||
std::vector<int64_t> hist_all(1 << 4, 0);
|
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
int32_t n_dims;
|
int32_t n_dims;
|
||||||
int32_t length;
|
int32_t length;
|
||||||
@ -168,8 +179,6 @@ bool ggml_common_quantize_0(
|
|||||||
work.resize(nelements); // for quantization
|
work.resize(nelements); // for quantization
|
||||||
|
|
||||||
size_t cur_size = 0;
|
size_t cur_size = 0;
|
||||||
std::vector<int64_t> hist_cur(1 << 4, 0);
|
|
||||||
|
|
||||||
switch ((ggml_type) ttype) {
|
switch ((ggml_type) ttype) {
|
||||||
case GGML_TYPE_Q4_0:
|
case GGML_TYPE_Q4_0:
|
||||||
case GGML_TYPE_Q4_1:
|
case GGML_TYPE_Q4_1:
|
||||||
@ -182,15 +191,32 @@ bool ggml_common_quantize_0(
|
|||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
{
|
{
|
||||||
cur_size = ggml_quantize_chunk((ggml_type) ttype, data_f32.data(), work.data(), 0, nelements, hist_cur.data());
|
cur_size = ggml_quantize_chunk((ggml_type) ttype, data_f32.data(), work.data(), 0, nelements/ne[0], ne[0], nullptr);
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
case GGML_TYPE_F16:
|
case GGML_TYPE_F16:
|
||||||
case GGML_TYPE_I8:
|
case GGML_TYPE_I8:
|
||||||
case GGML_TYPE_I16:
|
case GGML_TYPE_I16:
|
||||||
case GGML_TYPE_I32:
|
case GGML_TYPE_I32:
|
||||||
|
case GGML_TYPE_I64:
|
||||||
|
case GGML_TYPE_F64:
|
||||||
case GGML_TYPE_Q8_1:
|
case GGML_TYPE_Q8_1:
|
||||||
case GGML_TYPE_Q8_K:
|
case GGML_TYPE_Q8_K:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
|
case GGML_TYPE_IQ2_XS:
|
||||||
|
case GGML_TYPE_IQ2_S:
|
||||||
|
case GGML_TYPE_IQ3_XXS:
|
||||||
|
case GGML_TYPE_IQ3_S:
|
||||||
|
case GGML_TYPE_IQ1_S:
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
case GGML_TYPE_IQ4_XS:
|
||||||
|
case GGML_TYPE_IQ1_M:
|
||||||
|
case GGML_TYPE_BF16:
|
||||||
|
case GGML_TYPE_Q4_0_4_4:
|
||||||
|
case GGML_TYPE_Q4_0_4_8:
|
||||||
|
case GGML_TYPE_Q4_0_8_8:
|
||||||
|
case GGML_TYPE_TQ1_0:
|
||||||
|
case GGML_TYPE_TQ2_0:
|
||||||
case GGML_TYPE_COUNT:
|
case GGML_TYPE_COUNT:
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type) ttype));
|
fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type) ttype));
|
||||||
@ -201,15 +227,7 @@ bool ggml_common_quantize_0(
|
|||||||
fout.write(reinterpret_cast<char *>(work.data()), cur_size);
|
fout.write(reinterpret_cast<char *>(work.data()), cur_size);
|
||||||
total_size_new += cur_size;
|
total_size_new += cur_size;
|
||||||
|
|
||||||
printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
|
printf("size = %8.2f MB -> %8.2f MB\n", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
|
||||||
for (int i = 0; i < (int) hist_cur.size(); ++i) {
|
|
||||||
hist_all[i] += hist_cur[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < (int) hist_cur.size(); ++i) {
|
|
||||||
printf("%5.3f ", hist_cur[i] / (float)nelements);
|
|
||||||
}
|
|
||||||
printf("\n");
|
|
||||||
} else {
|
} else {
|
||||||
printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0);
|
printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0);
|
||||||
fout.write(reinterpret_cast<char *>(data_u8.data()), data_u8.size());
|
fout.write(reinterpret_cast<char *>(data_u8.data()), data_u8.size());
|
||||||
@ -222,18 +240,5 @@ bool ggml_common_quantize_0(
|
|||||||
printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
||||||
printf("%s: quant size = %8.2f MB | ftype = %d (%s)\n", __func__, total_size_new/1024.0/1024.0, ftype, ggml_type_name(qtype));
|
printf("%s: quant size = %8.2f MB | ftype = %d (%s)\n", __func__, total_size_new/1024.0/1024.0, ftype, ggml_type_name(qtype));
|
||||||
|
|
||||||
{
|
|
||||||
int64_t sum_all = 0;
|
|
||||||
for (int i = 0; i < (int) hist_all.size(); ++i) {
|
|
||||||
sum_all += hist_all[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("%s: hist: ", __func__);
|
|
||||||
for (int i = 0; i < (int) hist_all.size(); ++i) {
|
|
||||||
printf("%5.3f ", hist_all[i] / (float)sum_all);
|
|
||||||
}
|
|
||||||
printf("\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -219,7 +219,7 @@ bool sdl_poll_events() {
|
|||||||
case SDL_QUIT:
|
case SDL_QUIT:
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
} break;
|
}
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -19,8 +19,18 @@
|
|||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <io.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef WHISPER_FFMPEG
|
||||||
|
// as implemented in ffmpeg_trancode.cpp only embedded in common lib if whisper built with ffmpeg support
|
||||||
|
extern bool ffmpeg_decode_audio(const std::string & ifname, std::vector<uint8_t> & wav_data);
|
||||||
|
#endif
|
||||||
|
|
||||||
// Function to check if the next argument exists
|
// Function to check if the next argument exists
|
||||||
std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
|
static std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
|
||||||
if (i + 1 < argc && argv[i + 1][0] != '-') {
|
if (i + 1 < argc && argv[i + 1][0] != '-') {
|
||||||
return argv[++i];
|
return argv[++i];
|
||||||
} else {
|
} else {
|
||||||
@ -137,7 +147,6 @@ std::string gpt_random_prompt(std::mt19937 & rng) {
|
|||||||
case 7: return "He";
|
case 7: return "He";
|
||||||
case 8: return "She";
|
case 8: return "She";
|
||||||
case 9: return "They";
|
case 9: return "They";
|
||||||
default: return "To";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return "The";
|
return "The";
|
||||||
@ -336,7 +345,7 @@ std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::stri
|
|||||||
return tokens;
|
return tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, char delimiter) {
|
static std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, char delimiter) {
|
||||||
std::vector<gpt_vocab::id> output;
|
std::vector<gpt_vocab::id> output;
|
||||||
std::stringstream ss(input);
|
std::stringstream ss(input);
|
||||||
std::string token;
|
std::string token;
|
||||||
@ -348,7 +357,7 @@ std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, ch
|
|||||||
return output;
|
return output;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::map<std::string, std::vector<gpt_vocab::id>> extract_tests_from_file(const std::string & fpath_test){
|
static std::map<std::string, std::vector<gpt_vocab::id>> extract_tests_from_file(const std::string & fpath_test){
|
||||||
if (fpath_test.empty()){
|
if (fpath_test.empty()){
|
||||||
fprintf(stderr, "%s : No test file found.\n", __func__);
|
fprintf(stderr, "%s : No test file found.\n", __func__);
|
||||||
return std::map<std::string, std::vector<gpt_vocab::id>>();
|
return std::map<std::string, std::vector<gpt_vocab::id>>();
|
||||||
@ -615,12 +624,31 @@ gpt_vocab::id gpt_sample_top_k_top_p_repeat(
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_wav_buffer(const std::string buf) {
|
||||||
|
// RIFF ref: https://en.wikipedia.org/wiki/Resource_Interchange_File_Format
|
||||||
|
// WAV ref: https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
|
||||||
|
if (buf.size() < 12 || buf.substr(0, 4) != "RIFF" || buf.substr(8, 4) != "WAVE") {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t chunk_size = *reinterpret_cast<const uint32_t*>(buf.data() + 4);
|
||||||
|
if (chunk_size + 8 != buf.size()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool read_wav(const std::string & fname, std::vector<float>& pcmf32, std::vector<std::vector<float>>& pcmf32s, bool stereo) {
|
bool read_wav(const std::string & fname, std::vector<float>& pcmf32, std::vector<std::vector<float>>& pcmf32s, bool stereo) {
|
||||||
drwav wav;
|
drwav wav;
|
||||||
std::vector<uint8_t> wav_data; // used for pipe input from stdin
|
std::vector<uint8_t> wav_data; // used for pipe input from stdin or ffmpeg decoding output
|
||||||
|
|
||||||
if (fname == "-") {
|
if (fname == "-") {
|
||||||
{
|
{
|
||||||
|
#ifdef _WIN32
|
||||||
|
_setmode(_fileno(stdin), _O_BINARY);
|
||||||
|
#endif
|
||||||
|
|
||||||
uint8_t buf[1024];
|
uint8_t buf[1024];
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
@ -639,28 +667,49 @@ bool read_wav(const std::string & fname, std::vector<float>& pcmf32, std::vector
|
|||||||
|
|
||||||
fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size());
|
fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size());
|
||||||
}
|
}
|
||||||
|
else if (is_wav_buffer(fname)) {
|
||||||
|
if (drwav_init_memory(&wav, fname.c_str(), fname.size(), nullptr) == false) {
|
||||||
|
fprintf(stderr, "error: failed to open WAV file from fname buffer\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
else if (drwav_init_file(&wav, fname.c_str(), nullptr) == false) {
|
else if (drwav_init_file(&wav, fname.c_str(), nullptr) == false) {
|
||||||
|
#if defined(WHISPER_FFMPEG)
|
||||||
|
if (ffmpeg_decode_audio(fname, wav_data) != 0) {
|
||||||
|
fprintf(stderr, "error: failed to ffmpeg decode '%s' \n", fname.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), nullptr) == false) {
|
||||||
|
fprintf(stderr, "error: failed to read wav data as wav \n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#else
|
||||||
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname.c_str());
|
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname.c_str());
|
||||||
return false;
|
return false;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wav.channels != 1 && wav.channels != 2) {
|
if (wav.channels != 1 && wav.channels != 2) {
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", __func__, fname.c_str());
|
||||||
|
drwav_uninit(&wav);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stereo && wav.channels != 2) {
|
if (stereo && wav.channels != 2) {
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be stereo for diarization\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: WAV file '%s' must be stereo for diarization\n", __func__, fname.c_str());
|
||||||
|
drwav_uninit(&wav);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wav.sampleRate != COMMON_SAMPLE_RATE) {
|
if (wav.sampleRate != COMMON_SAMPLE_RATE) {
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be %i kHz\n", __func__, fname.c_str(), COMMON_SAMPLE_RATE/1000);
|
fprintf(stderr, "%s: WAV file '%s' must be %i kHz\n", __func__, fname.c_str(), COMMON_SAMPLE_RATE/1000);
|
||||||
|
drwav_uninit(&wav);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wav.bitsPerSample != 16) {
|
if (wav.bitsPerSample != 16) {
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", __func__, fname.c_str());
|
||||||
|
drwav_uninit(&wav);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -815,3 +864,48 @@ void sam_print_usage(int /*argc*/, char ** argv, const sam_params & params) {
|
|||||||
fprintf(stderr, " output file (default: %s)\n", params.fname_out.c_str());
|
fprintf(stderr, " output file (default: %s)\n", params.fname_out.c_str());
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 500 -> 00:05.000
|
||||||
|
// 6000 -> 01:00.000
|
||||||
|
std::string to_timestamp(int64_t t, bool comma) {
|
||||||
|
int64_t msec = t * 10;
|
||||||
|
int64_t hr = msec / (1000 * 60 * 60);
|
||||||
|
msec = msec - hr * (1000 * 60 * 60);
|
||||||
|
int64_t min = msec / (1000 * 60);
|
||||||
|
msec = msec - min * (1000 * 60);
|
||||||
|
int64_t sec = msec / 1000;
|
||||||
|
msec = msec - sec * 1000;
|
||||||
|
|
||||||
|
char buf[32];
|
||||||
|
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
||||||
|
|
||||||
|
return std::string(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
int timestamp_to_sample(int64_t t, int n_samples, int whisper_sample_rate) {
|
||||||
|
return std::max(0, std::min((int) n_samples - 1, (int) ((t*whisper_sample_rate)/100)));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_file_exist(const char *fileName)
|
||||||
|
{
|
||||||
|
std::ifstream infile(fileName);
|
||||||
|
return infile.good();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool speak_with_file(const std::string & command, const std::string & text, const std::string & path, int voice_id)
|
||||||
|
{
|
||||||
|
std::ofstream speak_file(path.c_str());
|
||||||
|
if (speak_file.fail()) {
|
||||||
|
fprintf(stderr, "%s: failed to open speak_file\n", __func__);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
speak_file.write(text.c_str(), text.size());
|
||||||
|
speak_file.close();
|
||||||
|
int ret = system((command + " " + std::to_string(voice_id) + " " + path).c_str());
|
||||||
|
if (ret != 0) {
|
||||||
|
fprintf(stderr, "%s: failed to speak\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <thread>
|
#include <thread>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
#define COMMON_SAMPLE_RATE 16000
|
#define COMMON_SAMPLE_RATE 16000
|
||||||
|
|
||||||
@ -21,7 +22,7 @@ struct gpt_params {
|
|||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t n_predict = 200; // new tokens to predict
|
int32_t n_predict = 200; // new tokens to predict
|
||||||
int32_t n_parallel = 1; // number of parallel streams
|
int32_t n_parallel = 1; // number of parallel streams
|
||||||
int32_t n_batch = 8; // batch size for prompt processing
|
int32_t n_batch = 32; // batch size for prompt processing
|
||||||
int32_t n_ctx = 2048; // context size (this is the KV cache max size)
|
int32_t n_ctx = 2048; // context size (this is the KV cache max size)
|
||||||
int32_t n_gpu_layers = 0; // number of layers to offlload to the GPU
|
int32_t n_gpu_layers = 0; // number of layers to offlload to the GPU
|
||||||
|
|
||||||
@ -135,7 +136,11 @@ gpt_vocab::id gpt_sample_top_k_top_p_repeat(
|
|||||||
// Audio utils
|
// Audio utils
|
||||||
//
|
//
|
||||||
|
|
||||||
|
// Check if a buffer is a WAV audio file
|
||||||
|
bool is_wav_buffer(const std::string buf);
|
||||||
|
|
||||||
// Read WAV audio file and store the PCM data into pcmf32
|
// Read WAV audio file and store the PCM data into pcmf32
|
||||||
|
// fname can be a buffer of WAV data instead of a filename
|
||||||
// The sample rate of the audio must be equal to COMMON_SAMPLE_RATE
|
// The sample rate of the audio must be equal to COMMON_SAMPLE_RATE
|
||||||
// If stereo flag is set and the audio has 2 channels, the pcmf32s will contain 2 channel PCM
|
// If stereo flag is set and the audio has 2 channels, the pcmf32s will contain 2 channel PCM
|
||||||
bool read_wav(
|
bool read_wav(
|
||||||
@ -181,7 +186,7 @@ private:
|
|||||||
// It is assumed that PCM data is normalized to a range from -1 to 1
|
// It is assumed that PCM data is normalized to a range from -1 to 1
|
||||||
bool write_audio(const float * data, size_t length) {
|
bool write_audio(const float * data, size_t length) {
|
||||||
for (size_t i = 0; i < length; ++i) {
|
for (size_t i = 0; i < length; ++i) {
|
||||||
const int16_t intSample = data[i] * 32767;
|
const int16_t intSample = int16_t(data[i] * 32767);
|
||||||
file.write(reinterpret_cast<const char *>(&intSample), sizeof(int16_t));
|
file.write(reinterpret_cast<const char *>(&intSample), sizeof(int16_t));
|
||||||
dataSize += sizeof(int16_t);
|
dataSize += sizeof(int16_t);
|
||||||
}
|
}
|
||||||
@ -277,3 +282,62 @@ struct sam_params {
|
|||||||
bool sam_params_parse(int argc, char ** argv, sam_params & params);
|
bool sam_params_parse(int argc, char ** argv, sam_params & params);
|
||||||
|
|
||||||
void sam_print_usage(int argc, char ** argv, const sam_params & params);
|
void sam_print_usage(int argc, char ** argv, const sam_params & params);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Terminal utils
|
||||||
|
//
|
||||||
|
|
||||||
|
#define SQR(X) ((X) * (X))
|
||||||
|
#define UNCUBE(x) x < 48 ? 0 : x < 115 ? 1 : (x - 35) / 40
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Quantizes 24-bit RGB to xterm256 code range [16,256).
|
||||||
|
*/
|
||||||
|
static int rgb2xterm256(int r, int g, int b) {
|
||||||
|
unsigned char cube[] = {0, 0137, 0207, 0257, 0327, 0377};
|
||||||
|
int av, ir, ig, ib, il, qr, qg, qb, ql;
|
||||||
|
av = r * .299 + g * .587 + b * .114 + .5;
|
||||||
|
ql = (il = av > 238 ? 23 : (av - 3) / 10) * 10 + 8;
|
||||||
|
qr = cube[(ir = UNCUBE(r))];
|
||||||
|
qg = cube[(ig = UNCUBE(g))];
|
||||||
|
qb = cube[(ib = UNCUBE(b))];
|
||||||
|
if (SQR(qr - r) + SQR(qg - g) + SQR(qb - b) <=
|
||||||
|
SQR(ql - r) + SQR(ql - g) + SQR(ql - b))
|
||||||
|
return ir * 36 + ig * 6 + ib + 020;
|
||||||
|
return il + 0350;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string set_xterm256_foreground(int r, int g, int b) {
|
||||||
|
int x = rgb2xterm256(r, g, b);
|
||||||
|
std::ostringstream oss;
|
||||||
|
oss << "\033[38;5;" << x << "m";
|
||||||
|
return oss.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lowest is red, middle is yellow, highest is green. Color scheme from
|
||||||
|
// Paul Tol; it is colorblind friendly https://personal.sron.nl/~pault/
|
||||||
|
const std::vector<std::string> k_colors = {
|
||||||
|
set_xterm256_foreground(220, 5, 12),
|
||||||
|
set_xterm256_foreground(232, 96, 28),
|
||||||
|
set_xterm256_foreground(241, 147, 45),
|
||||||
|
set_xterm256_foreground(246, 193, 65),
|
||||||
|
set_xterm256_foreground(247, 240, 86),
|
||||||
|
set_xterm256_foreground(144, 201, 135),
|
||||||
|
set_xterm256_foreground( 78, 178, 101),
|
||||||
|
};
|
||||||
|
|
||||||
|
//
|
||||||
|
// Other utils
|
||||||
|
//
|
||||||
|
|
||||||
|
// convert timestamp to string, 6000 -> 01:00.000
|
||||||
|
std::string to_timestamp(int64_t t, bool comma = false);
|
||||||
|
|
||||||
|
// given a timestamp get the sample
|
||||||
|
int timestamp_to_sample(int64_t t, int n_samples, int whisper_sample_rate);
|
||||||
|
|
||||||
|
// check if file exists using ifstream
|
||||||
|
bool is_file_exist(const char *fileName);
|
||||||
|
|
||||||
|
// write text to file, and call system("command voice_id file")
|
||||||
|
bool speak_with_file(const std::string & command, const std::string & text, const std::string & path, int voice_id);
|
||||||
|
4513
examples/dr_wav.h
4513
examples/dr_wav.h
File diff suppressed because it is too large
Load Diff
348
examples/ffmpeg-transcode.cpp
Normal file
348
examples/ffmpeg-transcode.cpp
Normal file
@ -0,0 +1,348 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* transcode.c - convert audio file to WAVE
|
||||||
|
*
|
||||||
|
* Copyright (C) 2019 Andrew Clayton <andrew@digital-domain.net>
|
||||||
|
* Copyright (C) 2024 William Tambellini <william.tambellini@gmail.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Just for conveninent C++ API
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
// C
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <sys/mman.h>
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#include <libavutil/opt.h>
|
||||||
|
#include <libavcodec/avcodec.h>
|
||||||
|
#include <libavformat/avformat.h>
|
||||||
|
#include <libswresample/swresample.h>
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef uint64_t u64;
|
||||||
|
typedef int64_t s64;
|
||||||
|
typedef uint32_t u32;
|
||||||
|
typedef int32_t s32;
|
||||||
|
typedef uint16_t u16;
|
||||||
|
typedef int16_t s16;
|
||||||
|
typedef uint8_t u8;
|
||||||
|
typedef int8_t s8;
|
||||||
|
|
||||||
|
#define WAVE_SAMPLE_RATE 16000
|
||||||
|
#define AVIO_CTX_BUF_SZ 4096
|
||||||
|
|
||||||
|
static const char* ffmpegLog = getenv("FFMPEG_LOG");
|
||||||
|
// Todo: add __FILE__ __LINE__
|
||||||
|
#define LOG(...) \
|
||||||
|
do { if (ffmpegLog) fprintf(stderr, __VA_ARGS__); } while(0) // C99
|
||||||
|
|
||||||
|
/*
|
||||||
|
* WAVE file header based on definition from
|
||||||
|
* https://gist.github.com/Jon-Schneider/8b7c53d27a7a13346a643dac9c19d34f
|
||||||
|
*
|
||||||
|
* We must ensure this structure doesn't have any holes or
|
||||||
|
* padding so we can just map it straight to the WAVE data.
|
||||||
|
*/
|
||||||
|
struct wave_hdr {
|
||||||
|
/* RIFF Header: "RIFF" */
|
||||||
|
char riff_header[4];
|
||||||
|
/* size of audio data + sizeof(struct wave_hdr) - 8 */
|
||||||
|
int wav_size;
|
||||||
|
/* "WAVE" */
|
||||||
|
char wav_header[4];
|
||||||
|
|
||||||
|
/* Format Header */
|
||||||
|
/* "fmt " (includes trailing space) */
|
||||||
|
char fmt_header[4];
|
||||||
|
/* Should be 16 for PCM */
|
||||||
|
int fmt_chunk_size;
|
||||||
|
/* Should be 1 for PCM. 3 for IEEE Float */
|
||||||
|
s16 audio_format;
|
||||||
|
s16 num_channels;
|
||||||
|
int sample_rate;
|
||||||
|
/*
|
||||||
|
* Number of bytes per second
|
||||||
|
* sample_rate * num_channels * bit_depth/8
|
||||||
|
*/
|
||||||
|
int byte_rate;
|
||||||
|
/* num_channels * bytes per sample */
|
||||||
|
s16 sample_alignment;
|
||||||
|
/* bits per sample */
|
||||||
|
s16 bit_depth;
|
||||||
|
|
||||||
|
/* Data Header */
|
||||||
|
/* "data" */
|
||||||
|
char data_header[4];
|
||||||
|
/*
|
||||||
|
* size of audio
|
||||||
|
* number of samples * num_channels * bit_depth/8
|
||||||
|
*/
|
||||||
|
int data_bytes;
|
||||||
|
} __attribute__((__packed__));
|
||||||
|
|
||||||
|
struct audio_buffer {
|
||||||
|
u8 *ptr;
|
||||||
|
int size; /* size left in the buffer */
|
||||||
|
};
|
||||||
|
|
||||||
|
static void set_wave_hdr(wave_hdr& wh, size_t size) {
|
||||||
|
memcpy(&wh.riff_header, "RIFF", 4);
|
||||||
|
wh.wav_size = size + sizeof(struct wave_hdr) - 8;
|
||||||
|
memcpy(&wh.wav_header, "WAVE", 4);
|
||||||
|
memcpy(&wh.fmt_header, "fmt ", 4);
|
||||||
|
wh.fmt_chunk_size = 16;
|
||||||
|
wh.audio_format = 1;
|
||||||
|
wh.num_channels = 1;
|
||||||
|
wh.sample_rate = WAVE_SAMPLE_RATE;
|
||||||
|
wh.sample_alignment = 2;
|
||||||
|
wh.bit_depth = 16;
|
||||||
|
wh.byte_rate = wh.sample_rate * wh.sample_alignment;
|
||||||
|
memcpy(&wh.data_header, "data", 4);
|
||||||
|
wh.data_bytes = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_wave_hdr(int fd, size_t size) {
|
||||||
|
struct wave_hdr wh;
|
||||||
|
set_wave_hdr(wh, size);
|
||||||
|
write(fd, &wh, sizeof(struct wave_hdr));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int map_file(int fd, u8 **ptr, size_t *size)
|
||||||
|
{
|
||||||
|
struct stat sb;
|
||||||
|
|
||||||
|
fstat(fd, &sb);
|
||||||
|
*size = sb.st_size;
|
||||||
|
|
||||||
|
*ptr = (u8*)mmap(NULL, *size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
|
||||||
|
if (*ptr == MAP_FAILED) {
|
||||||
|
perror("mmap");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int read_packet(void *opaque, u8 *buf, int buf_size)
|
||||||
|
{
|
||||||
|
struct audio_buffer *audio_buf = (audio_buffer*)opaque;
|
||||||
|
|
||||||
|
buf_size = FFMIN(buf_size, audio_buf->size);
|
||||||
|
|
||||||
|
/* copy internal buffer data to buf */
|
||||||
|
memcpy(buf, audio_buf->ptr, buf_size);
|
||||||
|
audio_buf->ptr += buf_size;
|
||||||
|
audio_buf->size -= buf_size;
|
||||||
|
|
||||||
|
return buf_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void convert_frame(struct SwrContext *swr, AVCodecContext *codec,
|
||||||
|
AVFrame *frame, s16 **data, int *size, bool flush)
|
||||||
|
{
|
||||||
|
int nr_samples;
|
||||||
|
s64 delay;
|
||||||
|
u8 *buffer;
|
||||||
|
|
||||||
|
delay = swr_get_delay(swr, codec->sample_rate);
|
||||||
|
nr_samples = av_rescale_rnd(delay + frame->nb_samples,
|
||||||
|
WAVE_SAMPLE_RATE, codec->sample_rate,
|
||||||
|
AV_ROUND_UP);
|
||||||
|
av_samples_alloc(&buffer, NULL, 1, nr_samples, AV_SAMPLE_FMT_S16, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* !flush is used to check if we are flushing any remaining
|
||||||
|
* conversion buffers...
|
||||||
|
*/
|
||||||
|
nr_samples = swr_convert(swr, &buffer, nr_samples,
|
||||||
|
!flush ? (const u8 **)frame->data : NULL,
|
||||||
|
!flush ? frame->nb_samples : 0);
|
||||||
|
|
||||||
|
*data = (s16*)realloc(*data, (*size + nr_samples) * sizeof(s16));
|
||||||
|
memcpy(*data + *size, buffer, nr_samples * sizeof(s16));
|
||||||
|
*size += nr_samples;
|
||||||
|
av_freep(&buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool is_audio_stream(const AVStream *stream)
|
||||||
|
{
|
||||||
|
if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return non zero on error, 0 on success
|
||||||
|
// audio_buffer: input memory
|
||||||
|
// data: decoded output audio data (wav file)
|
||||||
|
// size: size of output data
|
||||||
|
static int decode_audio(struct audio_buffer *audio_buf, s16 **data, int *size)
|
||||||
|
{
|
||||||
|
LOG("decode_audio: input size: %d\n", audio_buf->size);
|
||||||
|
AVFormatContext *fmt_ctx;
|
||||||
|
AVIOContext *avio_ctx;
|
||||||
|
AVStream *stream;
|
||||||
|
AVCodecContext *codec;
|
||||||
|
AVPacket packet;
|
||||||
|
AVFrame *frame;
|
||||||
|
struct SwrContext *swr;
|
||||||
|
u8 *avio_ctx_buffer;
|
||||||
|
unsigned int i;
|
||||||
|
int stream_index = -1;
|
||||||
|
int err;
|
||||||
|
const size_t errbuffsize = 1024;
|
||||||
|
char errbuff[errbuffsize];
|
||||||
|
|
||||||
|
fmt_ctx = avformat_alloc_context();
|
||||||
|
avio_ctx_buffer = (u8*)av_malloc(AVIO_CTX_BUF_SZ);
|
||||||
|
LOG("Creating an avio context: AVIO_CTX_BUF_SZ=%d\n", AVIO_CTX_BUF_SZ);
|
||||||
|
avio_ctx = avio_alloc_context(avio_ctx_buffer, AVIO_CTX_BUF_SZ, 0, audio_buf, &read_packet, NULL, NULL);
|
||||||
|
fmt_ctx->pb = avio_ctx;
|
||||||
|
|
||||||
|
// open the input stream and read header
|
||||||
|
err = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
|
||||||
|
if (err) {
|
||||||
|
LOG("Could not read audio buffer: %d: %s\n", err, av_make_error_string(errbuff, errbuffsize, err));
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = avformat_find_stream_info(fmt_ctx, NULL);
|
||||||
|
if (err < 0) {
|
||||||
|
LOG("Could not retrieve stream info from audio buffer: %d\n", err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < fmt_ctx->nb_streams; i++) {
|
||||||
|
if (is_audio_stream(fmt_ctx->streams[i])) {
|
||||||
|
stream_index = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stream_index == -1) {
|
||||||
|
LOG("Could not retrieve audio stream from buffer\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
stream = fmt_ctx->streams[stream_index];
|
||||||
|
codec = avcodec_alloc_context3(
|
||||||
|
avcodec_find_decoder(stream->codecpar->codec_id));
|
||||||
|
avcodec_parameters_to_context(codec, stream->codecpar);
|
||||||
|
err = avcodec_open2(codec, avcodec_find_decoder(codec->codec_id),
|
||||||
|
NULL);
|
||||||
|
if (err) {
|
||||||
|
LOG("Failed to open decoder for stream #%d in audio buffer\n", stream_index);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* prepare resampler */
|
||||||
|
swr = swr_alloc();
|
||||||
|
|
||||||
|
av_opt_set_int(swr, "in_channel_count", codec->channels, 0);
|
||||||
|
av_opt_set_int(swr, "out_channel_count", 1, 0);
|
||||||
|
av_opt_set_int(swr, "in_channel_layout", codec->channel_layout, 0);
|
||||||
|
av_opt_set_int(swr, "out_channel_layout", AV_CH_LAYOUT_MONO, 0);
|
||||||
|
av_opt_set_int(swr, "in_sample_rate", codec->sample_rate, 0);
|
||||||
|
av_opt_set_int(swr, "out_sample_rate", WAVE_SAMPLE_RATE, 0);
|
||||||
|
av_opt_set_sample_fmt(swr, "in_sample_fmt", codec->sample_fmt, 0);
|
||||||
|
av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||||
|
|
||||||
|
swr_init(swr);
|
||||||
|
if (!swr_is_initialized(swr)) {
|
||||||
|
LOG("Resampler has not been properly initialized\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
av_init_packet(&packet);
|
||||||
|
frame = av_frame_alloc();
|
||||||
|
if (!frame) {
|
||||||
|
LOG("Error allocating the frame\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* iterate through frames */
|
||||||
|
*data = NULL;
|
||||||
|
*size = 0;
|
||||||
|
while (av_read_frame(fmt_ctx, &packet) >= 0) {
|
||||||
|
avcodec_send_packet(codec, &packet);
|
||||||
|
|
||||||
|
err = avcodec_receive_frame(codec, frame);
|
||||||
|
if (err == AVERROR(EAGAIN))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
convert_frame(swr, codec, frame, data, size, false);
|
||||||
|
}
|
||||||
|
/* Flush any remaining conversion buffers... */
|
||||||
|
convert_frame(swr, codec, frame, data, size, true);
|
||||||
|
|
||||||
|
av_frame_free(&frame);
|
||||||
|
swr_free(&swr);
|
||||||
|
//avio_context_free(); // todo?
|
||||||
|
avcodec_close(codec);
|
||||||
|
avformat_close_input(&fmt_ctx);
|
||||||
|
avformat_free_context(fmt_ctx);
|
||||||
|
|
||||||
|
if (avio_ctx) {
|
||||||
|
av_freep(&avio_ctx->buffer);
|
||||||
|
av_freep(&avio_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// in mem decoding/conversion/resampling:
|
||||||
|
// ifname: input file path
|
||||||
|
// owav_data: in mem wav file. Can be forwarded as it to whisper/drwav
|
||||||
|
// return 0 on success
|
||||||
|
int ffmpeg_decode_audio(const std::string &ifname, std::vector<uint8_t>& owav_data) {
|
||||||
|
LOG("ffmpeg_decode_audio: %s\n", ifname.c_str());
|
||||||
|
int ifd = open(ifname.c_str(), O_RDONLY);
|
||||||
|
if (ifd == -1) {
|
||||||
|
fprintf(stderr, "Couldn't open input file %s\n", ifname.c_str());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
u8 *ibuf = NULL;
|
||||||
|
size_t ibuf_size;
|
||||||
|
int err = map_file(ifd, &ibuf, &ibuf_size);
|
||||||
|
if (err) {
|
||||||
|
LOG("Couldn't map input file %s\n", ifname.c_str());
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
LOG("Mapped input file: %s size: %d\n", ibuf, (int) ibuf_size);
|
||||||
|
struct audio_buffer inaudio_buf;
|
||||||
|
inaudio_buf.ptr = ibuf;
|
||||||
|
inaudio_buf.size = ibuf_size;
|
||||||
|
|
||||||
|
s16 *odata=NULL;
|
||||||
|
int osize=0;
|
||||||
|
|
||||||
|
err = decode_audio(&inaudio_buf, &odata, &osize);
|
||||||
|
LOG("decode_audio returned %d \n", err);
|
||||||
|
if (err != 0) {
|
||||||
|
LOG("decode_audio failed\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
LOG("decode_audio output size: %d\n", osize);
|
||||||
|
|
||||||
|
wave_hdr wh;
|
||||||
|
const size_t outdatasize = osize * sizeof(s16);
|
||||||
|
set_wave_hdr(wh, outdatasize);
|
||||||
|
owav_data.resize(sizeof(wave_hdr) + outdatasize);
|
||||||
|
// header:
|
||||||
|
memcpy(owav_data.data(), &wh, sizeof(wave_hdr));
|
||||||
|
// the data:
|
||||||
|
memcpy(owav_data.data() + sizeof(wave_hdr), odata, osize* sizeof(s16));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -9,7 +9,7 @@
|
|||||||
namespace grammar_parser {
|
namespace grammar_parser {
|
||||||
// NOTE: assumes valid utf8 (but checks for overrun)
|
// NOTE: assumes valid utf8 (but checks for overrun)
|
||||||
// copied from whisper.cpp
|
// copied from whisper.cpp
|
||||||
std::pair<uint32_t, const char *> decode_utf8(const char * src) {
|
static std::pair<uint32_t, const char *> decode_utf8(const char * src) {
|
||||||
static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
|
static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
|
||||||
uint8_t first_byte = static_cast<uint8_t>(*src);
|
uint8_t first_byte = static_cast<uint8_t>(*src);
|
||||||
uint8_t highbits = first_byte >> 4;
|
uint8_t highbits = first_byte >> 4;
|
||||||
@ -24,19 +24,19 @@ namespace grammar_parser {
|
|||||||
return std::make_pair(value, pos);
|
return std::make_pair(value, pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t get_symbol_id(parse_state & state, const char * src, size_t len) {
|
static uint32_t get_symbol_id(parse_state & state, const char * src, size_t len) {
|
||||||
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
||||||
auto result = state.symbol_ids.insert(std::make_pair(std::string(src, len), next_id));
|
auto result = state.symbol_ids.insert(std::make_pair(std::string(src, len), next_id));
|
||||||
return result.first->second;
|
return result.first->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t generate_symbol_id(parse_state & state, const std::string & base_name) {
|
static uint32_t generate_symbol_id(parse_state & state, const std::string & base_name) {
|
||||||
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
||||||
state.symbol_ids[base_name + '_' + std::to_string(next_id)] = next_id;
|
state.symbol_ids[base_name + '_' + std::to_string(next_id)] = next_id;
|
||||||
return next_id;
|
return next_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_rule(
|
static void add_rule(
|
||||||
parse_state & state,
|
parse_state & state,
|
||||||
uint32_t rule_id,
|
uint32_t rule_id,
|
||||||
const std::vector<whisper_grammar_element> & rule) {
|
const std::vector<whisper_grammar_element> & rule) {
|
||||||
@ -46,11 +46,11 @@ namespace grammar_parser {
|
|||||||
state.rules[rule_id] = rule;
|
state.rules[rule_id] = rule;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_word_char(char c) {
|
static bool is_word_char(char c) {
|
||||||
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '-' || ('0' <= c && c <= '9');
|
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '-' || ('0' <= c && c <= '9');
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<uint32_t, const char *> parse_hex(const char * src, int size) {
|
static std::pair<uint32_t, const char *> parse_hex(const char * src, int size) {
|
||||||
const char * pos = src;
|
const char * pos = src;
|
||||||
const char * end = src + size;
|
const char * end = src + size;
|
||||||
uint32_t value = 0;
|
uint32_t value = 0;
|
||||||
@ -73,7 +73,7 @@ namespace grammar_parser {
|
|||||||
return std::make_pair(value, pos);
|
return std::make_pair(value, pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_space(const char * src, bool newline_ok) {
|
static const char * parse_space(const char * src, bool newline_ok) {
|
||||||
const char * pos = src;
|
const char * pos = src;
|
||||||
while (*pos == ' ' || *pos == '\t' || *pos == '#' ||
|
while (*pos == ' ' || *pos == '\t' || *pos == '#' ||
|
||||||
(newline_ok && (*pos == '\r' || *pos == '\n'))) {
|
(newline_ok && (*pos == '\r' || *pos == '\n'))) {
|
||||||
@ -88,7 +88,7 @@ namespace grammar_parser {
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_name(const char * src) {
|
static const char * parse_name(const char * src) {
|
||||||
const char * pos = src;
|
const char * pos = src;
|
||||||
while (is_word_char(*pos)) {
|
while (is_word_char(*pos)) {
|
||||||
pos++;
|
pos++;
|
||||||
@ -99,7 +99,7 @@ namespace grammar_parser {
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<uint32_t, const char *> parse_char(const char * src) {
|
static std::pair<uint32_t, const char *> parse_char(const char * src) {
|
||||||
if (*src == '\\') {
|
if (*src == '\\') {
|
||||||
switch (src[1]) {
|
switch (src[1]) {
|
||||||
case 'x': return parse_hex(src + 2, 2);
|
case 'x': return parse_hex(src + 2, 2);
|
||||||
@ -122,14 +122,14 @@ namespace grammar_parser {
|
|||||||
throw std::runtime_error("unexpected end of input");
|
throw std::runtime_error("unexpected end of input");
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_alternates(
|
static const char * parse_alternates(
|
||||||
parse_state & state,
|
parse_state & state,
|
||||||
const char * src,
|
const char * src,
|
||||||
const std::string & rule_name,
|
const std::string & rule_name,
|
||||||
uint32_t rule_id,
|
uint32_t rule_id,
|
||||||
bool is_nested);
|
bool is_nested);
|
||||||
|
|
||||||
const char * parse_sequence(
|
static const char * parse_sequence(
|
||||||
parse_state & state,
|
parse_state & state,
|
||||||
const char * src,
|
const char * src,
|
||||||
const std::string & rule_name,
|
const std::string & rule_name,
|
||||||
@ -190,7 +190,7 @@ namespace grammar_parser {
|
|||||||
pos = parse_space(pos + 1, is_nested);
|
pos = parse_space(pos + 1, is_nested);
|
||||||
} else if (*pos == '*' || *pos == '+' || *pos == '?') { // repetition operator
|
} else if (*pos == '*' || *pos == '+' || *pos == '?') { // repetition operator
|
||||||
if (last_sym_start == out_elements.size()) {
|
if (last_sym_start == out_elements.size()) {
|
||||||
throw std::runtime_error(std::string("expecting preceeding item to */+/? at ") + pos);
|
throw std::runtime_error(std::string("expecting preceding item to */+/? at ") + pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
// apply transformation to previous symbol (last_sym_start to end) according to
|
// apply transformation to previous symbol (last_sym_start to end) according to
|
||||||
@ -229,7 +229,7 @@ namespace grammar_parser {
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_alternates(
|
static const char * parse_alternates(
|
||||||
parse_state & state,
|
parse_state & state,
|
||||||
const char * src,
|
const char * src,
|
||||||
const std::string & rule_name,
|
const std::string & rule_name,
|
||||||
@ -247,7 +247,7 @@ namespace grammar_parser {
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_rule(parse_state & state, const char * src) {
|
static const char * parse_rule(parse_state & state, const char * src) {
|
||||||
const char * name_end = parse_name(src);
|
const char * name_end = parse_name(src);
|
||||||
const char * pos = parse_space(name_end, false);
|
const char * pos = parse_space(name_end, false);
|
||||||
size_t name_len = name_end - src;
|
size_t name_len = name_end - src;
|
||||||
@ -285,7 +285,7 @@ namespace grammar_parser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_grammar_char(FILE * file, uint32_t c) {
|
static void print_grammar_char(FILE * file, uint32_t c) {
|
||||||
if (0x20 <= c && c <= 0x7f) {
|
if (0x20 <= c && c <= 0x7f) {
|
||||||
fprintf(file, "%c", static_cast<char>(c));
|
fprintf(file, "%c", static_cast<char>(c));
|
||||||
} else {
|
} else {
|
||||||
@ -294,7 +294,7 @@ namespace grammar_parser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_char_element(whisper_grammar_element elem) {
|
static bool is_char_element(whisper_grammar_element elem) {
|
||||||
switch (elem.type) {
|
switch (elem.type) {
|
||||||
case WHISPER_GRETYPE_CHAR: return true;
|
case WHISPER_GRETYPE_CHAR: return true;
|
||||||
case WHISPER_GRETYPE_CHAR_NOT: return true;
|
case WHISPER_GRETYPE_CHAR_NOT: return true;
|
||||||
@ -304,7 +304,7 @@ namespace grammar_parser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_rule_binary(FILE * file, const std::vector<whisper_grammar_element> & rule) {
|
static void print_rule_binary(FILE * file, const std::vector<whisper_grammar_element> & rule) {
|
||||||
for (auto elem : rule) {
|
for (auto elem : rule) {
|
||||||
switch (elem.type) {
|
switch (elem.type) {
|
||||||
case WHISPER_GRETYPE_END: fprintf(file, "END"); break;
|
case WHISPER_GRETYPE_END: fprintf(file, "END"); break;
|
||||||
@ -334,7 +334,7 @@ namespace grammar_parser {
|
|||||||
fprintf(file, "\n");
|
fprintf(file, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_rule(
|
static void print_rule(
|
||||||
FILE * file,
|
FILE * file,
|
||||||
uint32_t rule_id,
|
uint32_t rule_id,
|
||||||
const std::vector<whisper_grammar_element> & rule,
|
const std::vector<whisper_grammar_element> & rule,
|
||||||
@ -413,7 +413,7 @@ namespace grammar_parser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<const whisper_grammar_element *> parse_state::c_rules() const{
|
std::vector<const whisper_grammar_element *> parse_state::c_rules() const {
|
||||||
std::vector<const whisper_grammar_element *> ret;
|
std::vector<const whisper_grammar_element *> ret;
|
||||||
for (const auto & rule : rules) {
|
for (const auto & rule : rules) {
|
||||||
ret.push_back(rule.data());
|
ret.push_back(rule.data());
|
||||||
|
@ -34,9 +34,6 @@ async function fetchRemote(url, cbProgress, cbPrint) {
|
|||||||
url,
|
url,
|
||||||
{
|
{
|
||||||
method: 'GET',
|
method: 'GET',
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/octet-stream',
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ if [ -n "$3" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Whisper models
|
# Whisper models
|
||||||
models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large-v1" "large-v2" "large-v3" )
|
models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large-v1" "large-v2" "large-v3" "large-v3-turbo" )
|
||||||
|
|
||||||
# list available models
|
# list available models
|
||||||
function list_models {
|
function list_models {
|
||||||
|
@ -5,5 +5,5 @@ if (WHISPER_SDL2)
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common json_cpp common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -26,11 +26,11 @@ struct whisper_params {
|
|||||||
float vad_thold = 0.6f;
|
float vad_thold = 0.6f;
|
||||||
float freq_thold = 100.0f;
|
float freq_thold = 100.0f;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool print_special = false;
|
bool print_special = false;
|
||||||
bool print_energy = false;
|
bool print_energy = false;
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
@ -53,7 +53,7 @@ struct commandset {
|
|||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
@ -69,11 +69,11 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-pe" || arg == "--print-energy") { params.print_energy = true; }
|
else if (arg == "-pe" || arg == "--print-energy") { params.print_energy = true; }
|
||||||
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else {
|
else {
|
||||||
@ -100,16 +100,16 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
||||||
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
||||||
fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
fprintf(stderr, " -pe, --print-energy [%-7s] print sound energy (for debugging)\n", params.print_energy ? "true" : "false");
|
fprintf(stderr, " -pe, --print-energy [%-7s] print sound energy (for debugging)\n", params.print_energy ? "true" : "false");
|
||||||
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str());
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
uint64_t wait_for_vad(audio_async & audio, json jparams, const whisper_params & params, uint64_t maxlength_ms, std::vector<float> & pcmf32) {
|
static uint64_t wait_for_vad(audio_async & audio, json jparams, const whisper_params & params, uint64_t maxlength_ms, std::vector<float> & pcmf32) {
|
||||||
using namespace std::chrono;
|
using namespace std::chrono;
|
||||||
uint64_t time_now = time_point_cast<milliseconds>(system_clock::now()).time_since_epoch().count();
|
uint64_t time_now = time_point_cast<milliseconds>(system_clock::now()).time_since_epoch().count();
|
||||||
uint64_t start_time = time_now;
|
uint64_t start_time = time_now;
|
||||||
@ -153,7 +153,7 @@ uint64_t wait_for_vad(audio_async & audio, json jparams, const whisper_params &
|
|||||||
return time_now;
|
return time_now;
|
||||||
}
|
}
|
||||||
|
|
||||||
json unguided_transcription(struct whisper_context * ctx, audio_async &audio, json jparams, const whisper_params ¶ms) {
|
static json unguided_transcription(struct whisper_context * ctx, audio_async &audio, json jparams, const whisper_params ¶ms) {
|
||||||
std::vector<whisper_token> prompt_tokens;
|
std::vector<whisper_token> prompt_tokens;
|
||||||
std::vector<float> pcmf32;
|
std::vector<float> pcmf32;
|
||||||
uint64_t unprocessed_audio_timestamp = wait_for_vad(audio, jparams, params, 10000U, pcmf32);
|
uint64_t unprocessed_audio_timestamp = wait_for_vad(audio, jparams, params, 10000U, pcmf32);
|
||||||
@ -181,7 +181,6 @@ json unguided_transcription(struct whisper_context * ctx, audio_async &audio, js
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
wparams.suppress_non_speech_tokens = true;
|
wparams.suppress_non_speech_tokens = true;
|
||||||
// run the transformer and a single decoding pass
|
// run the transformer and a single decoding pass
|
||||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||||
@ -200,7 +199,7 @@ json unguided_transcription(struct whisper_context * ctx, audio_async &audio, js
|
|||||||
|
|
||||||
// command-list mode
|
// command-list mode
|
||||||
// guide the transcription to match the most likely command from a provided list
|
// guide the transcription to match the most likely command from a provided list
|
||||||
json guided_transcription(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms, json jparams, std::vector<struct commandset> commandset_list) {
|
static json guided_transcription(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms, json jparams, std::vector<struct commandset> commandset_list) {
|
||||||
struct commandset cs = commandset_list[jparams.value("commandset_index", commandset_list.size()-1)];
|
struct commandset cs = commandset_list[jparams.value("commandset_index", commandset_list.size()-1)];
|
||||||
std::vector<float> pcmf32;
|
std::vector<float> pcmf32;
|
||||||
uint64_t unprocessed_audio_timestamp = wait_for_vad(audio, jparams, params, 2000U, pcmf32);
|
uint64_t unprocessed_audio_timestamp = wait_for_vad(audio, jparams, params, 2000U, pcmf32);
|
||||||
@ -220,7 +219,6 @@ json guided_transcription(struct whisper_context * ctx, audio_async &audio, cons
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
|
||||||
// TODO: Do some time testing. Does an overly long prompt slow down processing?
|
// TODO: Do some time testing. Does an overly long prompt slow down processing?
|
||||||
// Set up command sets/precompute prompts
|
// Set up command sets/precompute prompts
|
||||||
@ -287,7 +285,7 @@ json guided_transcription(struct whisper_context * ctx, audio_async &audio, cons
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
json register_commandset(struct whisper_context * ctx, json jparams, std::vector<struct commandset> &commandset_list) {
|
static json register_commandset(struct whisper_context * ctx, json jparams, std::vector<struct commandset> &commandset_list) {
|
||||||
// TODO: check for token collision
|
// TODO: check for token collision
|
||||||
struct commandset cs;
|
struct commandset cs;
|
||||||
|
|
||||||
@ -327,7 +325,8 @@ json register_commandset(struct whisper_context * ctx, json jparams, std::vector
|
|||||||
commandset_list.push_back(cs);
|
commandset_list.push_back(cs);
|
||||||
return json{{"index",index}};
|
return json{{"index",index}};
|
||||||
}
|
}
|
||||||
json seek(struct whisper_context * /*ctx*/, audio_async & /*audio*/, json /*params*/) {
|
|
||||||
|
static json seek(struct whisper_context * /*ctx*/, audio_async & /*audio*/, json /*params*/) {
|
||||||
// whisper_state has the pertinent offsets, but there also seem to be a large
|
// whisper_state has the pertinent offsets, but there also seem to be a large
|
||||||
// number of scratch buffers that would prevent rewinding context in a manner similar to llama
|
// number of scratch buffers that would prevent rewinding context in a manner similar to llama
|
||||||
// I'll give this a another pass once everything else is implemented,
|
// I'll give this a another pass once everything else is implemented,
|
||||||
@ -337,7 +336,8 @@ json seek(struct whisper_context * /*ctx*/, audio_async & /*audio*/, json /*para
|
|||||||
{"message", "Seeking is not yet supported."}
|
{"message", "Seeking is not yet supported."}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
json parse_job(const json &body, struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms, std::vector<struct commandset> &commandset_list) {
|
|
||||||
|
static json parse_job(const json &body, struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms, std::vector<struct commandset> &commandset_list) {
|
||||||
// See: https://www.jsonrpc.org/specification
|
// See: https://www.jsonrpc.org/specification
|
||||||
json id = body.at("id");
|
json id = body.at("id");
|
||||||
try {
|
try {
|
||||||
@ -377,7 +377,7 @@ json parse_job(const json &body, struct whisper_context * ctx, audio_async &audi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void process_loop(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
static void process_loop(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
||||||
std::deque<json> jobqueue;
|
std::deque<json> jobqueue;
|
||||||
std::vector<struct commandset> commandset_list;
|
std::vector<struct commandset> commandset_list;
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -435,8 +435,11 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// whisper init
|
// whisper init
|
||||||
struct whisper_context_params cparams;
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
cparams.use_gpu = params.use_gpu;
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
// init audio
|
// init audio
|
||||||
|
|
||||||
|
@ -3,4 +3,4 @@ add_executable(${TARGET} main.cpp)
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common whisper ${FFMPEG_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
#include "grammar-parser.h"
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
#include <regex>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -14,36 +16,8 @@
|
|||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Terminal color map. 10 colors grouped in ranges [0.0, 0.1, ..., 0.9]
|
|
||||||
// Lowest is red, middle is yellow, highest is green.
|
|
||||||
const std::vector<std::string> k_colors = {
|
|
||||||
"\033[38;5;196m", "\033[38;5;202m", "\033[38;5;208m", "\033[38;5;214m", "\033[38;5;220m",
|
|
||||||
"\033[38;5;226m", "\033[38;5;190m", "\033[38;5;154m", "\033[38;5;118m", "\033[38;5;82m",
|
|
||||||
};
|
|
||||||
|
|
||||||
// 500 -> 00:05.000
|
|
||||||
// 6000 -> 01:00.000
|
|
||||||
std::string to_timestamp(int64_t t, bool comma = false) {
|
|
||||||
int64_t msec = t * 10;
|
|
||||||
int64_t hr = msec / (1000 * 60 * 60);
|
|
||||||
msec = msec - hr * (1000 * 60 * 60);
|
|
||||||
int64_t min = msec / (1000 * 60);
|
|
||||||
msec = msec - min * (1000 * 60);
|
|
||||||
int64_t sec = msec / 1000;
|
|
||||||
msec = msec - sec * 1000;
|
|
||||||
|
|
||||||
char buf[32];
|
|
||||||
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
|
||||||
|
|
||||||
return std::string(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
int timestamp_to_sample(int64_t t, int n_samples) {
|
|
||||||
return std::max(0, std::min((int) n_samples - 1, (int) ((t*WHISPER_SAMPLE_RATE)/100)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// helper function to replace substrings
|
// helper function to replace substrings
|
||||||
void replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
||||||
for (size_t pos = 0; ; pos += replace.length()) {
|
for (size_t pos = 0; ; pos += replace.length()) {
|
||||||
pos = s.find(search, pos);
|
pos = s.find(search, pos);
|
||||||
if (pos == std::string::npos) break;
|
if (pos == std::string::npos) break;
|
||||||
@ -54,22 +28,25 @@ void replace_all(std::string & s, const std::string & search, const std::string
|
|||||||
|
|
||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t n_processors = 1;
|
int32_t n_processors = 1;
|
||||||
int32_t offset_t_ms = 0;
|
int32_t offset_t_ms = 0;
|
||||||
int32_t offset_n = 0;
|
int32_t offset_n = 0;
|
||||||
int32_t duration_ms = 0;
|
int32_t duration_ms = 0;
|
||||||
int32_t progress_step = 5;
|
int32_t progress_step = 5;
|
||||||
int32_t max_context = -1;
|
int32_t max_context = -1;
|
||||||
int32_t max_len = 0;
|
int32_t max_len = 0;
|
||||||
int32_t best_of = whisper_full_default_params(WHISPER_SAMPLING_GREEDY).greedy.best_of;
|
int32_t best_of = whisper_full_default_params(WHISPER_SAMPLING_GREEDY).greedy.best_of;
|
||||||
int32_t beam_size = whisper_full_default_params(WHISPER_SAMPLING_BEAM_SEARCH).beam_search.beam_size;
|
int32_t beam_size = whisper_full_default_params(WHISPER_SAMPLING_BEAM_SEARCH).beam_search.beam_size;
|
||||||
|
int32_t audio_ctx = 0;
|
||||||
|
|
||||||
float word_thold = 0.01f;
|
float word_thold = 0.01f;
|
||||||
float entropy_thold = 2.40f;
|
float entropy_thold = 2.40f;
|
||||||
float logprob_thold = -1.00f;
|
float logprob_thold = -1.00f;
|
||||||
|
float grammar_penalty = 100.0f;
|
||||||
|
float temperature = 0.0f;
|
||||||
|
float temperature_inc = 0.2f;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool debug_mode = false;
|
bool debug_mode = false;
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool detect_language = false;
|
bool detect_language = false;
|
||||||
@ -85,30 +62,49 @@ struct whisper_params {
|
|||||||
bool output_jsn = false;
|
bool output_jsn = false;
|
||||||
bool output_jsn_full = false;
|
bool output_jsn_full = false;
|
||||||
bool output_lrc = false;
|
bool output_lrc = false;
|
||||||
|
bool no_prints = false;
|
||||||
bool print_special = false;
|
bool print_special = false;
|
||||||
bool print_colors = false;
|
bool print_colors = false;
|
||||||
bool print_progress = false;
|
bool print_progress = false;
|
||||||
bool no_timestamps = false;
|
bool no_timestamps = false;
|
||||||
bool log_score = false;
|
bool log_score = false;
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt;
|
std::string prompt;
|
||||||
std::string font_path = "/System/Library/Fonts/Supplemental/Courier New Bold.ttf";
|
std::string font_path = "/System/Library/Fonts/Supplemental/Courier New Bold.ttf";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
|
std::string grammar;
|
||||||
|
std::string grammar_rule;
|
||||||
|
|
||||||
// [TDRZ] speaker turn string
|
// [TDRZ] speaker turn string
|
||||||
std::string tdrz_speaker_turn = " [SPEAKER_TURN]"; // TODO: set from command line
|
std::string tdrz_speaker_turn = " [SPEAKER_TURN]"; // TODO: set from command line
|
||||||
|
|
||||||
|
// A regular expression that matches tokens to suppress
|
||||||
|
std::string suppress_regex;
|
||||||
|
|
||||||
std::string openvino_encode_device = "CPU";
|
std::string openvino_encode_device = "CPU";
|
||||||
|
|
||||||
|
std::string dtw = "";
|
||||||
|
|
||||||
std::vector<std::string> fname_inp = {};
|
std::vector<std::string> fname_inp = {};
|
||||||
std::vector<std::string> fname_out = {};
|
std::vector<std::string> fname_out = {};
|
||||||
|
|
||||||
|
grammar_parser::parse_state grammar_parsed;
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
static void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static char * whisper_param_turn_lowercase(char * in){
|
||||||
|
int string_len = strlen(in);
|
||||||
|
for (int i = 0; i < string_len; i++){
|
||||||
|
*(in+i) = tolower((unsigned char)*(in+i));
|
||||||
|
}
|
||||||
|
return in;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
@ -135,10 +131,12 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
|
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(argv[++i]); }
|
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
|
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
|
||||||
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
|
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(argv[++i]); }
|
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(argv[++i]); }
|
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(argv[++i]); }
|
||||||
// else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
else if (arg == "-tp" || arg == "--temperature") { params.temperature = std::stof(argv[++i]); }
|
||||||
|
else if (arg == "-tpi" || arg == "--temperature-inc") { params.temperature_inc = std::stof(argv[++i]); }
|
||||||
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
||||||
@ -155,18 +153,25 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
|
else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
|
||||||
else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; }
|
else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; }
|
||||||
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
|
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
|
||||||
|
else if (arg == "-np" || arg == "--no-prints") { params.no_prints = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
||||||
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
||||||
else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
|
else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
|
||||||
else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; }
|
else if (arg == "-l" || arg == "--language") { params.language = whisper_param_turn_lowercase(argv[++i]); }
|
||||||
else if (arg == "-dl" || arg == "--detect-language") { params.detect_language = true; }
|
else if (arg == "-dl" || arg == "--detect-language") { params.detect_language = true; }
|
||||||
else if ( arg == "--prompt") { params.prompt = argv[++i]; }
|
else if ( arg == "--prompt") { params.prompt = argv[++i]; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else if (arg == "-f" || arg == "--file") { params.fname_inp.emplace_back(argv[++i]); }
|
else if (arg == "-f" || arg == "--file") { params.fname_inp.emplace_back(argv[++i]); }
|
||||||
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = argv[++i]; }
|
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = argv[++i]; }
|
||||||
|
else if (arg == "-dtw" || arg == "--dtw") { params.dtw = argv[++i]; }
|
||||||
else if (arg == "-ls" || arg == "--log-score") { params.log_score = true; }
|
else if (arg == "-ls" || arg == "--log-score") { params.log_score = true; }
|
||||||
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
|
else if ( arg == "--suppress-regex") { params.suppress_regex = argv[++i]; }
|
||||||
|
else if ( arg == "--grammar") { params.grammar = argv[++i]; }
|
||||||
|
else if ( arg == "--grammar-rule") { params.grammar_rule = argv[++i]; }
|
||||||
|
else if ( arg == "--grammar-penalty") { params.grammar_penalty = std::stof(argv[++i]); }
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -177,7 +182,7 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params) {
|
static void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "usage: %s [options] file0.wav file1.wav ...\n", argv[0]);
|
fprintf(stderr, "usage: %s [options] file0.wav file1.wav ...\n", argv[0]);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
@ -193,10 +198,12 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -sow, --split-on-word [%-7s] split on word rather than on token\n", params.split_on_word ? "true" : "false");
|
fprintf(stderr, " -sow, --split-on-word [%-7s] split on word rather than on token\n", params.split_on_word ? "true" : "false");
|
||||||
fprintf(stderr, " -bo N, --best-of N [%-7d] number of best candidates to keep\n", params.best_of);
|
fprintf(stderr, " -bo N, --best-of N [%-7d] number of best candidates to keep\n", params.best_of);
|
||||||
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
|
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
|
||||||
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
||||||
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
||||||
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
||||||
// fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
fprintf(stderr, " -tp, --temperature N [%-7.2f] The sampling temperature, between 0 and 1\n", params.temperature);
|
||||||
|
fprintf(stderr, " -tpi, --temperature-inc N [%-7.2f] The increment of temperature, between 0 and 1\n",params.temperature_inc);
|
||||||
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -di, --diarize [%-7s] stereo audio diarization\n", params.diarize ? "true" : "false");
|
fprintf(stderr, " -di, --diarize [%-7s] stereo audio diarization\n", params.diarize ? "true" : "false");
|
||||||
@ -212,18 +219,25 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -oj, --output-json [%-7s] output result in a JSON file\n", params.output_jsn ? "true" : "false");
|
fprintf(stderr, " -oj, --output-json [%-7s] output result in a JSON file\n", params.output_jsn ? "true" : "false");
|
||||||
fprintf(stderr, " -ojf, --output-json-full [%-7s] include more information in the JSON file\n", params.output_jsn_full ? "true" : "false");
|
fprintf(stderr, " -ojf, --output-json-full [%-7s] include more information in the JSON file\n", params.output_jsn_full ? "true" : "false");
|
||||||
fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
|
fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
|
||||||
|
fprintf(stderr, " -np, --no-prints [%-7s] do not print anything other than the results\n", params.no_prints ? "true" : "false");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
fprintf(stderr, " -pc, --print-colors [%-7s] print colors\n", params.print_colors ? "true" : "false");
|
fprintf(stderr, " -pc, --print-colors [%-7s] print colors\n", params.print_colors ? "true" : "false");
|
||||||
fprintf(stderr, " -pp, --print-progress [%-7s] print progress\n", params.print_progress ? "true" : "false");
|
fprintf(stderr, " -pp, --print-progress [%-7s] print progress\n", params.print_progress ? "true" : "false");
|
||||||
fprintf(stderr, " -nt, --no-timestamps [%-7s] do not print timestamps\n", params.no_timestamps ? "true" : "false");
|
fprintf(stderr, " -nt, --no-timestamps [%-7s] do not print timestamps\n", params.no_timestamps ? "true" : "false");
|
||||||
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language ('auto' for auto-detect)\n", params.language.c_str());
|
fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language ('auto' for auto-detect)\n", params.language.c_str());
|
||||||
fprintf(stderr, " -dl, --detect-language [%-7s] exit after automatically detecting language\n", params.detect_language ? "true" : "false");
|
fprintf(stderr, " -dl, --detect-language [%-7s] exit after automatically detecting language\n", params.detect_language ? "true" : "false");
|
||||||
fprintf(stderr, " --prompt PROMPT [%-7s] initial prompt\n", params.prompt.c_str());
|
fprintf(stderr, " --prompt PROMPT [%-7s] initial prompt (max n_text_ctx/2 tokens)\n", params.prompt.c_str());
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] input WAV file path\n", "");
|
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] input WAV file path\n", "");
|
||||||
fprintf(stderr, " -oved D, --ov-e-device DNAME [%-7s] the OpenVINO device used for encode inference\n", params.openvino_encode_device.c_str());
|
fprintf(stderr, " -oved D, --ov-e-device DNAME [%-7s] the OpenVINO device used for encode inference\n", params.openvino_encode_device.c_str());
|
||||||
|
fprintf(stderr, " -dtw MODEL --dtw MODEL [%-7s] compute token-level timestamps\n", params.dtw.c_str());
|
||||||
fprintf(stderr, " -ls, --log-score [%-7s] log best decoder scores of tokens\n", params.log_score?"true":"false");
|
fprintf(stderr, " -ls, --log-score [%-7s] log best decoder scores of tokens\n", params.log_score?"true":"false");
|
||||||
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
|
fprintf(stderr, " --suppress-regex REGEX [%-7s] regular expression matching tokens to suppress\n", params.suppress_regex.c_str());
|
||||||
|
fprintf(stderr, " --grammar GRAMMAR [%-7s] GBNF grammar to guide decoding\n", params.grammar.c_str());
|
||||||
|
fprintf(stderr, " --grammar-rule RULE [%-7s] top-level GBNF grammar rule name\n", params.grammar_rule.c_str());
|
||||||
|
fprintf(stderr, " --grammar-penalty N [%-7.1f] scales down logits of nongrammar tokens\n", params.grammar_penalty);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,12 +248,12 @@ struct whisper_print_user_data {
|
|||||||
int progress_prev;
|
int progress_prev;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string estimate_diarization_speaker(std::vector<std::vector<float>> pcmf32s, int64_t t0, int64_t t1, bool id_only = false) {
|
static std::string estimate_diarization_speaker(std::vector<std::vector<float>> pcmf32s, int64_t t0, int64_t t1, bool id_only = false) {
|
||||||
std::string speaker = "";
|
std::string speaker = "";
|
||||||
const int64_t n_samples = pcmf32s[0].size();
|
const int64_t n_samples = pcmf32s[0].size();
|
||||||
|
|
||||||
const int64_t is0 = timestamp_to_sample(t0, n_samples);
|
const int64_t is0 = timestamp_to_sample(t0, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
const int64_t is1 = timestamp_to_sample(t1, n_samples);
|
const int64_t is1 = timestamp_to_sample(t1, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
|
|
||||||
double energy0 = 0.0f;
|
double energy0 = 0.0f;
|
||||||
double energy1 = 0.0f;
|
double energy1 = 0.0f;
|
||||||
@ -266,7 +280,8 @@ std::string estimate_diarization_speaker(std::vector<std::vector<float>> pcmf32s
|
|||||||
|
|
||||||
return speaker;
|
return speaker;
|
||||||
}
|
}
|
||||||
void whisper_print_progress_callback(struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, int progress, void * user_data) {
|
|
||||||
|
static void whisper_print_progress_callback(struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, int progress, void * user_data) {
|
||||||
int progress_step = ((whisper_print_user_data *) user_data)->params->progress_step;
|
int progress_step = ((whisper_print_user_data *) user_data)->params->progress_step;
|
||||||
int * progress_prev = &(((whisper_print_user_data *) user_data)->progress_prev);
|
int * progress_prev = &(((whisper_print_user_data *) user_data)->progress_prev);
|
||||||
if (progress >= *progress_prev + progress_step) {
|
if (progress >= *progress_prev + progress_step) {
|
||||||
@ -275,7 +290,7 @@ void whisper_print_progress_callback(struct whisper_context * /*ctx*/, struct wh
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * /*state*/, int n_new, void * user_data) {
|
static void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * /*state*/, int n_new, void * user_data) {
|
||||||
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||||
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||||
|
|
||||||
@ -344,7 +359,7 @@ void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_txt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_txt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -371,7 +386,7 @@ bool output_txt(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_vtt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_vtt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -403,7 +418,7 @@ bool output_vtt(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_srt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_srt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -432,7 +447,7 @@ bool output_srt(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
char *escape_double_quotes_and_backslashes(const char *str) {
|
static char * escape_double_quotes_and_backslashes(const char * str) {
|
||||||
if (str == NULL) {
|
if (str == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -445,7 +460,7 @@ char *escape_double_quotes_and_backslashes(const char *str) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
char *escaped = (char *)calloc(escaped_length, 1); // pre-zeroed
|
char * escaped = (char *)calloc(escaped_length, 1); // pre-zeroed
|
||||||
if (escaped == NULL) {
|
if (escaped == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -463,7 +478,39 @@ char *escape_double_quotes_and_backslashes(const char *str) {
|
|||||||
return escaped;
|
return escaped;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
// double quote should be escaped by another double quote. (rfc4180)
|
||||||
|
static char * escape_double_quotes_in_csv(const char * str) {
|
||||||
|
if (str == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t escaped_length = strlen(str) + 1;
|
||||||
|
|
||||||
|
for (size_t i = 0; str[i] != '\0'; i++) {
|
||||||
|
if (str[i] == '"') {
|
||||||
|
escaped_length++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char *escaped = (char *)calloc(escaped_length, 1); // pre-zeroed
|
||||||
|
if (escaped == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t pos = 0;
|
||||||
|
for (size_t i = 0; str[i] != '\0'; i++) {
|
||||||
|
if (str[i] == '"') {
|
||||||
|
escaped[pos++] = '"';
|
||||||
|
}
|
||||||
|
escaped[pos++] = str[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
// no need to set zero due to calloc() being used prior
|
||||||
|
|
||||||
|
return escaped;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -484,7 +531,7 @@ bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
const char * text = whisper_full_get_segment_text(ctx, i);
|
const char * text = whisper_full_get_segment_text(ctx, i);
|
||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
char * text_escaped = escape_double_quotes_and_backslashes(text);
|
char * text_escaped = escape_double_quotes_in_csv(text);
|
||||||
|
|
||||||
//need to multiply times returned from whisper_full_get_segment_t{0,1}() by 10 to get milliseconds.
|
//need to multiply times returned from whisper_full_get_segment_t{0,1}() by 10 to get milliseconds.
|
||||||
fout << 10 * t0 << "," << 10 * t1 << ",";
|
fout << 10 * t0 << "," << 10 * t1 << ",";
|
||||||
@ -498,7 +545,7 @@ bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_score(struct whisper_context * ctx, const char * fname, const whisper_params & /*params*/, std::vector<std::vector<float>> /*pcmf32s*/) {
|
static bool output_score(struct whisper_context * ctx, const char * fname, const whisper_params & /*params*/, std::vector<std::vector<float>> /*pcmf32s*/) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||||
|
|
||||||
@ -517,7 +564,7 @@ bool output_score(struct whisper_context * ctx, const char * fname, const whispe
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_json(
|
static bool output_json(
|
||||||
struct whisper_context * ctx,
|
struct whisper_context * ctx,
|
||||||
const char * fname,
|
const char * fname,
|
||||||
const whisper_params & params,
|
const whisper_params & params,
|
||||||
@ -663,7 +710,8 @@ bool output_json(
|
|||||||
times_o(token.t0, token.t1, false);
|
times_o(token.t0, token.t1, false);
|
||||||
}
|
}
|
||||||
value_i("id", token.id, false);
|
value_i("id", token.id, false);
|
||||||
value_f("p", token.p, true);
|
value_f("p", token.p, false);
|
||||||
|
value_f("t_dtw", token.t_dtw, true);
|
||||||
end_obj(j == (n - 1));
|
end_obj(j == (n - 1));
|
||||||
}
|
}
|
||||||
end_arr(!params.diarize && !params.tinydiarize);
|
end_arr(!params.diarize && !params.tinydiarize);
|
||||||
@ -687,7 +735,7 @@ bool output_json(
|
|||||||
// karaoke video generation
|
// karaoke video generation
|
||||||
// outputs a bash script that uses ffmpeg to generate a video with the subtitles
|
// outputs a bash script that uses ffmpeg to generate a video with the subtitles
|
||||||
// TODO: font parameter adjustments
|
// TODO: font parameter adjustments
|
||||||
bool output_wts(struct whisper_context * ctx, const char * fname, const char * fname_inp, const whisper_params & params, float t_sec, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_wts(struct whisper_context * ctx, const char * fname, const char * fname_inp, const whisper_params & params, float t_sec, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
|
|
||||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||||
@ -812,7 +860,7 @@ bool output_wts(struct whisper_context * ctx, const char * fname, const char * f
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_lrc(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_lrc(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -852,14 +900,59 @@ bool output_lrc(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
whisper_params params;
|
whisper_params params;
|
||||||
|
|
||||||
|
// If the only argument starts with "@", read arguments line-by-line
|
||||||
|
// from the given file.
|
||||||
|
std::vector<std::string> vec_args;
|
||||||
|
if (argc == 2 && argv != nullptr && argv[1] != nullptr && argv[1][0] == '@') {
|
||||||
|
// Save the name of the executable.
|
||||||
|
vec_args.push_back(argv[0]);
|
||||||
|
|
||||||
|
// Open the response file.
|
||||||
|
char const * rspfile = argv[1] + sizeof(char);
|
||||||
|
std::ifstream fin(rspfile);
|
||||||
|
if (fin.is_open() == false) {
|
||||||
|
fprintf(stderr, "error: response file '%s' not found\n", rspfile);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the entire response file.
|
||||||
|
std::string line;
|
||||||
|
while (std::getline(fin, line)) {
|
||||||
|
vec_args.push_back(line);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the contents of the response file as the command-line arguments.
|
||||||
|
argc = static_cast<int>(vec_args.size());
|
||||||
|
argv = static_cast<char **>(alloca(argc * sizeof (char *)));
|
||||||
|
for (int i = 0; i < argc; ++i) {
|
||||||
|
argv[i] = const_cast<char *>(vec_args[i].c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (whisper_params_parse(argc, argv, params) == false) {
|
if (whisper_params_parse(argc, argv, params) == false) {
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// remove non-existent files
|
||||||
|
for (auto it = params.fname_inp.begin(); it != params.fname_inp.end();) {
|
||||||
|
const auto fname_inp = it->c_str();
|
||||||
|
|
||||||
|
if (*it != "-" && !is_file_exist(fname_inp)) {
|
||||||
|
fprintf(stderr, "error: input file not found '%s'\n", fname_inp);
|
||||||
|
it = params.fname_inp.erase(it);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
it++;
|
||||||
|
}
|
||||||
|
|
||||||
if (params.fname_inp.empty()) {
|
if (params.fname_inp.empty()) {
|
||||||
fprintf(stderr, "error: no input files specified\n");
|
fprintf(stderr, "error: no input files specified\n");
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -878,10 +971,39 @@ int main(int argc, char ** argv) {
|
|||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (params.no_prints) {
|
||||||
|
whisper_log_set(cb_log_disable, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
struct whisper_context_params cparams;
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
cparams.use_gpu = params.use_gpu;
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
|
if (!params.dtw.empty()) {
|
||||||
|
cparams.dtw_token_timestamps = true;
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_NONE;
|
||||||
|
|
||||||
|
if (params.dtw == "tiny") cparams.dtw_aheads_preset = WHISPER_AHEADS_TINY;
|
||||||
|
if (params.dtw == "tiny.en") cparams.dtw_aheads_preset = WHISPER_AHEADS_TINY_EN;
|
||||||
|
if (params.dtw == "base") cparams.dtw_aheads_preset = WHISPER_AHEADS_BASE;
|
||||||
|
if (params.dtw == "base.en") cparams.dtw_aheads_preset = WHISPER_AHEADS_BASE_EN;
|
||||||
|
if (params.dtw == "small") cparams.dtw_aheads_preset = WHISPER_AHEADS_SMALL;
|
||||||
|
if (params.dtw == "small.en") cparams.dtw_aheads_preset = WHISPER_AHEADS_SMALL_EN;
|
||||||
|
if (params.dtw == "medium") cparams.dtw_aheads_preset = WHISPER_AHEADS_MEDIUM;
|
||||||
|
if (params.dtw == "medium.en") cparams.dtw_aheads_preset = WHISPER_AHEADS_MEDIUM_EN;
|
||||||
|
if (params.dtw == "large.v1") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V1;
|
||||||
|
if (params.dtw == "large.v2") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V2;
|
||||||
|
if (params.dtw == "large.v3") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V3;
|
||||||
|
if (params.dtw == "large.v3.turbo") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V3_TURBO;
|
||||||
|
|
||||||
|
if (cparams.dtw_aheads_preset == WHISPER_AHEADS_NONE) {
|
||||||
|
fprintf(stderr, "error: unknown DTW preset '%s'\n", params.dtw.c_str());
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
@ -893,6 +1015,29 @@ int main(int argc, char ** argv) {
|
|||||||
// initialize openvino encoder. this has no effect on whisper.cpp builds that don't have OpenVINO configured
|
// initialize openvino encoder. this has no effect on whisper.cpp builds that don't have OpenVINO configured
|
||||||
whisper_ctx_init_openvino_encoder(ctx, nullptr, params.openvino_encode_device.c_str(), nullptr);
|
whisper_ctx_init_openvino_encoder(ctx, nullptr, params.openvino_encode_device.c_str(), nullptr);
|
||||||
|
|
||||||
|
if (!params.grammar.empty()) {
|
||||||
|
auto & grammar = params.grammar_parsed;
|
||||||
|
if (is_file_exist(params.grammar.c_str())) {
|
||||||
|
// read grammar from file
|
||||||
|
std::ifstream ifs(params.grammar.c_str());
|
||||||
|
const std::string txt = std::string((std::istreambuf_iterator<char>(ifs)), std::istreambuf_iterator<char>());
|
||||||
|
grammar = grammar_parser::parse(txt.c_str());
|
||||||
|
} else {
|
||||||
|
// read grammar from string
|
||||||
|
grammar = grammar_parser::parse(params.grammar.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// will be empty (default) if there are parse errors
|
||||||
|
if (grammar.rules.empty()) {
|
||||||
|
fprintf(stderr, "error: failed to parse grammar \"%s\"\n", params.grammar.c_str());
|
||||||
|
return 4;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: grammar:\n", __func__);
|
||||||
|
grammar_parser::print_grammar(stderr, grammar);
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
||||||
const auto fname_inp = params.fname_inp[f];
|
const auto fname_inp = params.fname_inp[f];
|
||||||
const auto fname_out = f < (int) params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
const auto fname_out = f < (int) params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
||||||
@ -905,26 +1050,25 @@ int main(int argc, char ** argv) {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// print system information
|
if (!whisper_is_multilingual(ctx)) {
|
||||||
{
|
if (params.language != "en" || params.translate) {
|
||||||
|
params.language = "en";
|
||||||
|
params.translate = false;
|
||||||
|
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (params.detect_language) {
|
||||||
|
params.language = "auto";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!params.no_prints) {
|
||||||
|
// print system information
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||||
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
|
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
|
||||||
}
|
|
||||||
|
|
||||||
// print some info about the processing
|
// print some info about the processing
|
||||||
{
|
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
if (!whisper_is_multilingual(ctx)) {
|
|
||||||
if (params.language != "en" || params.translate) {
|
|
||||||
params.language = "en";
|
|
||||||
params.translate = false;
|
|
||||||
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (params.detect_language) {
|
|
||||||
params.language = "auto";
|
|
||||||
}
|
|
||||||
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, %d beams + best of %d, lang = %s, task = %s, %stimestamps = %d ...\n",
|
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, %d beams + best of %d, lang = %s, task = %s, %stimestamps = %d ...\n",
|
||||||
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
||||||
params.n_threads, params.n_processors, params.beam_size, params.best_of,
|
params.n_threads, params.n_processors, params.beam_size, params.best_of,
|
||||||
@ -940,7 +1084,8 @@ int main(int argc, char ** argv) {
|
|||||||
{
|
{
|
||||||
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
||||||
|
|
||||||
wparams.strategy = params.beam_size > 1 ? WHISPER_SAMPLING_BEAM_SEARCH : WHISPER_SAMPLING_GREEDY;
|
const bool use_grammar = (!params.grammar_parsed.rules.empty() && !params.grammar_rule.empty());
|
||||||
|
wparams.strategy = (params.beam_size > 1 || use_grammar) ? WHISPER_SAMPLING_BEAM_SEARCH : WHISPER_SAMPLING_GREEDY;
|
||||||
|
|
||||||
wparams.print_realtime = false;
|
wparams.print_realtime = false;
|
||||||
wparams.print_progress = params.print_progress;
|
wparams.print_progress = params.print_progress;
|
||||||
@ -958,23 +1103,43 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.thold_pt = params.word_thold;
|
wparams.thold_pt = params.word_thold;
|
||||||
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
||||||
wparams.split_on_word = params.split_on_word;
|
wparams.split_on_word = params.split_on_word;
|
||||||
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
|
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
wparams.debug_mode = params.debug_mode;
|
wparams.debug_mode = params.debug_mode;
|
||||||
|
|
||||||
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
||||||
|
|
||||||
|
wparams.suppress_regex = params.suppress_regex.empty() ? nullptr : params.suppress_regex.c_str();
|
||||||
|
|
||||||
wparams.initial_prompt = params.prompt.c_str();
|
wparams.initial_prompt = params.prompt.c_str();
|
||||||
|
|
||||||
wparams.greedy.best_of = params.best_of;
|
wparams.greedy.best_of = params.best_of;
|
||||||
wparams.beam_search.beam_size = params.beam_size;
|
wparams.beam_search.beam_size = params.beam_size;
|
||||||
|
|
||||||
wparams.temperature_inc = params.no_fallback ? 0.0f : wparams.temperature_inc;
|
wparams.temperature_inc = params.no_fallback ? 0.0f : params.temperature_inc;
|
||||||
|
wparams.temperature = params.temperature;
|
||||||
|
|
||||||
wparams.entropy_thold = params.entropy_thold;
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
wparams.logprob_thold = params.logprob_thold;
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
|
|
||||||
|
wparams.no_timestamps = params.no_timestamps;
|
||||||
|
|
||||||
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
||||||
|
|
||||||
|
const auto & grammar_parsed = params.grammar_parsed;
|
||||||
|
auto grammar_rules = grammar_parsed.c_rules();
|
||||||
|
|
||||||
|
if (use_grammar) {
|
||||||
|
if (grammar_parsed.symbol_ids.find(params.grammar_rule) == grammar_parsed.symbol_ids.end()) {
|
||||||
|
fprintf(stderr, "%s: warning: grammar rule '%s' not found - skipping grammar sampling\n", __func__, params.grammar_rule.c_str());
|
||||||
|
} else {
|
||||||
|
wparams.grammar_rules = grammar_rules.data();
|
||||||
|
wparams.n_grammar_rules = grammar_rules.size();
|
||||||
|
wparams.i_start_rule = grammar_parsed.symbol_ids.at(params.grammar_rule);
|
||||||
|
wparams.grammar_penalty = params.grammar_penalty;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// this callback is called on each new segment
|
// this callback is called on each new segment
|
||||||
if (!wparams.print_realtime) {
|
if (!wparams.print_realtime) {
|
||||||
wparams.new_segment_callback = whisper_print_segment_callback;
|
wparams.new_segment_callback = whisper_print_segment_callback;
|
||||||
@ -1071,7 +1236,9 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
whisper_print_timings(ctx);
|
if (!params.no_prints) {
|
||||||
|
whisper_print_timings(ctx);
|
||||||
|
}
|
||||||
whisper_free(ctx);
|
whisper_free(ctx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
7
examples/python/test_whisper_processor.py
Normal file
7
examples/python/test_whisper_processor.py
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
import whisper_processor
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = whisper_processor.process_audio("./audio/wake_word_detected16k.wav", "base.en")
|
||||||
|
print(result)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
54
examples/python/whisper_processor.py
Normal file
54
examples/python/whisper_processor.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
def process_audio(wav_file, model_name="base.en"):
|
||||||
|
"""
|
||||||
|
Processes an audio file using a specified model and returns the processed string.
|
||||||
|
|
||||||
|
:param wav_file: Path to the WAV file
|
||||||
|
:param model_name: Name of the model to use
|
||||||
|
:return: Processed string output from the audio processing
|
||||||
|
:raises: Exception if an error occurs during processing
|
||||||
|
"""
|
||||||
|
|
||||||
|
model = f"./models/ggml-{model_name}.bin"
|
||||||
|
|
||||||
|
# Check if the file exists
|
||||||
|
if not os.path.exists(model):
|
||||||
|
raise FileNotFoundError(f"Model file not found: {model} \n\nDownload a model with this command:\n\n> bash ./models/download-ggml-model.sh {model_name}\n\n")
|
||||||
|
|
||||||
|
if not os.path.exists(wav_file):
|
||||||
|
raise FileNotFoundError(f"WAV file not found: {wav_file}")
|
||||||
|
|
||||||
|
full_command = f"./main -m {model} -f {wav_file} -nt"
|
||||||
|
|
||||||
|
# Execute the command
|
||||||
|
process = subprocess.Popen(full_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
# Get the output and error (if any)
|
||||||
|
output, error = process.communicate()
|
||||||
|
|
||||||
|
if error:
|
||||||
|
raise Exception(f"Error processing audio: {error.decode('utf-8')}")
|
||||||
|
|
||||||
|
# Process and return the output string
|
||||||
|
decoded_str = output.decode('utf-8').strip()
|
||||||
|
processed_str = decoded_str.replace('[BLANK_AUDIO]', '').strip()
|
||||||
|
|
||||||
|
return processed_str
|
||||||
|
|
||||||
|
def main():
|
||||||
|
if len(sys.argv) >= 2:
|
||||||
|
wav_file = sys.argv[1]
|
||||||
|
model_name = sys.argv[2] if len(sys.argv) == 3 else "base.en"
|
||||||
|
try:
|
||||||
|
result = process_audio(wav_file, model_name)
|
||||||
|
print(result)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
else:
|
||||||
|
print("Usage: python whisper_processor.py <wav_file> [<model_name>]")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -36,7 +36,7 @@ struct whisper_filters {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// quantize a model
|
// quantize a model
|
||||||
bool whisper_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_ftype ftype) {
|
static bool whisper_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_ftype ftype) {
|
||||||
gpt_vocab vocab;
|
gpt_vocab vocab;
|
||||||
|
|
||||||
printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
|
printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
|
||||||
|
@ -1,12 +1,10 @@
|
|||||||
set(TARGET server)
|
set(TARGET server)
|
||||||
add_executable(${TARGET} server.cpp httplib.h json.hpp)
|
add_executable(${TARGET} server.cpp httplib.h)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common json_cpp whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
# Check if the compiler is MinGW
|
if (WIN32)
|
||||||
if(MINGW)
|
target_link_libraries(${TARGET} PRIVATE ws2_32)
|
||||||
# Link the necessary libraries for SSL and Winsock
|
|
||||||
target_link_libraries(${TARGET} PRIVATE -lcrypt32 -lssl -lcrypto -lws2_32)
|
|
||||||
endif()
|
endif()
|
||||||
|
@ -46,7 +46,7 @@ options:
|
|||||||
--convert, [false ] Convert audio to WAV, requires ffmpeg on the server
|
--convert, [false ] Convert audio to WAV, requires ffmpeg on the server
|
||||||
```
|
```
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> **Do not run the server example with administrative privileges and ensure it's operated in a sandbox environment, especially since it involves risky operations like accepting user file uploads and using ffmpeg for format conversions. Always validate and sanitize inputs to guard against potential security threats.**
|
> **Do not run the server example with administrative privileges and ensure it's operated in a sandbox environment, especially since it involves risky operations like accepting user file uploads and using ffmpeg for format conversions. Always validate and sanitize inputs to guard against potential security threats.**
|
||||||
|
|
||||||
## request examples
|
## request examples
|
||||||
@ -56,8 +56,9 @@ options:
|
|||||||
curl 127.0.0.1:8080/inference \
|
curl 127.0.0.1:8080/inference \
|
||||||
-H "Content-Type: multipart/form-data" \
|
-H "Content-Type: multipart/form-data" \
|
||||||
-F file="@<file-path>" \
|
-F file="@<file-path>" \
|
||||||
-F temperature="0.2" \
|
-F temperature="0.0" \
|
||||||
-F response-format="json"
|
-F temperature_inc="0.2" \
|
||||||
|
-F response_format="json"
|
||||||
```
|
```
|
||||||
|
|
||||||
**/load**
|
**/load**
|
||||||
|
24596
examples/server/json.hpp
24596
examples/server/json.hpp
File diff suppressed because it is too large
Load Diff
@ -18,17 +18,10 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
using namespace httplib;
|
using namespace httplib;
|
||||||
using json = nlohmann::json;
|
using json = nlohmann::ordered_json;
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
// Terminal color map. 10 colors grouped in ranges [0.0, 0.1, ..., 0.9]
|
|
||||||
// Lowest is red, middle is yellow, highest is green.
|
|
||||||
const std::vector<std::string> k_colors = {
|
|
||||||
"\033[38;5;196m", "\033[38;5;202m", "\033[38;5;208m", "\033[38;5;214m", "\033[38;5;220m",
|
|
||||||
"\033[38;5;226m", "\033[38;5;190m", "\033[38;5;154m", "\033[38;5;118m", "\033[38;5;82m",
|
|
||||||
};
|
|
||||||
|
|
||||||
// output formats
|
// output formats
|
||||||
const std::string json_format = "json";
|
const std::string json_format = "json";
|
||||||
const std::string text_format = "text";
|
const std::string text_format = "text";
|
||||||
@ -40,32 +33,35 @@ struct server_params
|
|||||||
{
|
{
|
||||||
std::string hostname = "127.0.0.1";
|
std::string hostname = "127.0.0.1";
|
||||||
std::string public_path = "examples/server/public";
|
std::string public_path = "examples/server/public";
|
||||||
|
std::string request_path = "";
|
||||||
|
std::string inference_path = "/inference";
|
||||||
|
|
||||||
int32_t port = 8080;
|
int32_t port = 8080;
|
||||||
int32_t read_timeout = 600;
|
int32_t read_timeout = 600;
|
||||||
int32_t write_timeout = 600;
|
int32_t write_timeout = 600;
|
||||||
|
|
||||||
bool ffmpeg_converter = false;
|
bool ffmpeg_converter = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t n_processors = 1;
|
int32_t n_processors = 1;
|
||||||
int32_t offset_t_ms = 0;
|
int32_t offset_t_ms = 0;
|
||||||
int32_t offset_n = 0;
|
int32_t offset_n = 0;
|
||||||
int32_t duration_ms = 0;
|
int32_t duration_ms = 0;
|
||||||
int32_t progress_step = 5;
|
int32_t progress_step = 5;
|
||||||
int32_t max_context = -1;
|
int32_t max_context = -1;
|
||||||
int32_t max_len = 0;
|
int32_t max_len = 0;
|
||||||
int32_t best_of = 2;
|
int32_t best_of = 2;
|
||||||
int32_t beam_size = -1;
|
int32_t beam_size = -1;
|
||||||
|
int32_t audio_ctx = 0;
|
||||||
|
|
||||||
float word_thold = 0.01f;
|
float word_thold = 0.01f;
|
||||||
float entropy_thold = 2.40f;
|
float entropy_thold = 2.40f;
|
||||||
float logprob_thold = -1.00f;
|
float logprob_thold = -1.00f;
|
||||||
float userdef_temp = 0.20f;
|
float temperature = 0.00f;
|
||||||
|
float temperature_inc = 0.20f;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool debug_mode = false;
|
bool debug_mode = false;
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool detect_language = false;
|
bool detect_language = false;
|
||||||
@ -79,6 +75,7 @@ struct whisper_params {
|
|||||||
bool print_progress = false;
|
bool print_progress = false;
|
||||||
bool no_timestamps = false;
|
bool no_timestamps = false;
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
@ -91,37 +88,11 @@ struct whisper_params {
|
|||||||
std::string tdrz_speaker_turn = " [SPEAKER_TURN]"; // TODO: set from command line
|
std::string tdrz_speaker_turn = " [SPEAKER_TURN]"; // TODO: set from command line
|
||||||
|
|
||||||
std::string openvino_encode_device = "CPU";
|
std::string openvino_encode_device = "CPU";
|
||||||
|
|
||||||
|
std::string dtw = "";
|
||||||
};
|
};
|
||||||
|
|
||||||
// 500 -> 00:05.000
|
void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params, const server_params& sparams) {
|
||||||
// 6000 -> 01:00.000
|
|
||||||
std::string to_timestamp(int64_t t, bool comma = false) {
|
|
||||||
int64_t msec = t * 10;
|
|
||||||
int64_t hr = msec / (1000 * 60 * 60);
|
|
||||||
msec = msec - hr * (1000 * 60 * 60);
|
|
||||||
int64_t min = msec / (1000 * 60);
|
|
||||||
msec = msec - min * (1000 * 60);
|
|
||||||
int64_t sec = msec / 1000;
|
|
||||||
msec = msec - sec * 1000;
|
|
||||||
|
|
||||||
char buf[32];
|
|
||||||
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
|
||||||
|
|
||||||
return std::string(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
int timestamp_to_sample(int64_t t, int n_samples) {
|
|
||||||
return std::max(0, std::min((int) n_samples - 1, (int) ((t*WHISPER_SAMPLE_RATE)/100)));
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_file_exist(const char *fileName)
|
|
||||||
{
|
|
||||||
std::ifstream infile(fileName);
|
|
||||||
return infile.good();
|
|
||||||
}
|
|
||||||
|
|
||||||
void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params,
|
|
||||||
const server_params& sparams) {
|
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "usage: %s [options] \n", argv[0]);
|
fprintf(stderr, "usage: %s [options] \n", argv[0]);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
@ -137,10 +108,10 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -sow, --split-on-word [%-7s] split on word rather than on token\n", params.split_on_word ? "true" : "false");
|
fprintf(stderr, " -sow, --split-on-word [%-7s] split on word rather than on token\n", params.split_on_word ? "true" : "false");
|
||||||
fprintf(stderr, " -bo N, --best-of N [%-7d] number of best candidates to keep\n", params.best_of);
|
fprintf(stderr, " -bo N, --best-of N [%-7d] number of best candidates to keep\n", params.best_of);
|
||||||
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
|
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
|
||||||
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
||||||
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
||||||
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
||||||
// fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
|
||||||
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -di, --diarize [%-7s] stereo audio diarization\n", params.diarize ? "true" : "false");
|
fprintf(stderr, " -di, --diarize [%-7s] stereo audio diarization\n", params.diarize ? "true" : "false");
|
||||||
@ -157,9 +128,12 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -oved D, --ov-e-device DNAME [%-7s] the OpenVINO device used for encode inference\n", params.openvino_encode_device.c_str());
|
fprintf(stderr, " -oved D, --ov-e-device DNAME [%-7s] the OpenVINO device used for encode inference\n", params.openvino_encode_device.c_str());
|
||||||
// server params
|
// server params
|
||||||
|
fprintf(stderr, " -dtw MODEL --dtw MODEL [%-7s] compute token-level timestamps\n", params.dtw.c_str());
|
||||||
fprintf(stderr, " --host HOST, [%-7s] Hostname/ip-adress for the server\n", sparams.hostname.c_str());
|
fprintf(stderr, " --host HOST, [%-7s] Hostname/ip-adress for the server\n", sparams.hostname.c_str());
|
||||||
fprintf(stderr, " --port PORT, [%-7d] Port number for the server\n", sparams.port);
|
fprintf(stderr, " --port PORT, [%-7d] Port number for the server\n", sparams.port);
|
||||||
fprintf(stderr, " --public PATH, [%-7s] Path to the public folder\n", sparams.public_path.c_str());
|
fprintf(stderr, " --public PATH, [%-7s] Path to the public folder\n", sparams.public_path.c_str());
|
||||||
|
fprintf(stderr, " --request-path PATH, [%-7s] Request path for all requests\n", sparams.request_path.c_str());
|
||||||
|
fprintf(stderr, " --inference-path PATH, [%-7s] Inference path for all requests\n", sparams.inference_path.c_str());
|
||||||
fprintf(stderr, " --convert, [%-7s] Convert audio to WAV, requires ffmpeg on the server", sparams.ffmpeg_converter ? "true" : "false");
|
fprintf(stderr, " --convert, [%-7s] Convert audio to WAV, requires ffmpeg on the server", sparams.ffmpeg_converter ? "true" : "false");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
@ -181,10 +155,10 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params, serve
|
|||||||
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
|
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(argv[++i]); }
|
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
|
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
|
||||||
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
|
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(argv[++i]); }
|
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(argv[++i]); }
|
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(argv[++i]); }
|
||||||
// else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
|
||||||
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
||||||
@ -202,11 +176,15 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params, serve
|
|||||||
else if ( arg == "--prompt") { params.prompt = argv[++i]; }
|
else if ( arg == "--prompt") { params.prompt = argv[++i]; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
||||||
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = argv[++i]; }
|
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = argv[++i]; }
|
||||||
|
else if (arg == "-dtw" || arg == "--dtw") { params.dtw = argv[++i]; }
|
||||||
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
// server params
|
// server params
|
||||||
else if ( arg == "--port") { sparams.port = std::stoi(argv[++i]); }
|
else if ( arg == "--port") { sparams.port = std::stoi(argv[++i]); }
|
||||||
else if ( arg == "--host") { sparams.hostname = argv[++i]; }
|
else if ( arg == "--host") { sparams.hostname = argv[++i]; }
|
||||||
else if ( arg == "--public") { sparams.public_path = argv[++i]; }
|
else if ( arg == "--public") { sparams.public_path = argv[++i]; }
|
||||||
|
else if ( arg == "--request-path") { sparams.request_path = argv[++i]; }
|
||||||
|
else if ( arg == "--inference-path") { sparams.inference_path = argv[++i]; }
|
||||||
else if ( arg == "--convert") { sparams.ffmpeg_converter = true; }
|
else if ( arg == "--convert") { sparams.ffmpeg_converter = true; }
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
@ -241,7 +219,7 @@ void check_ffmpeg_availibility() {
|
|||||||
bool convert_to_wav(const std::string & temp_filename, std::string & error_resp) {
|
bool convert_to_wav(const std::string & temp_filename, std::string & error_resp) {
|
||||||
std::ostringstream cmd_stream;
|
std::ostringstream cmd_stream;
|
||||||
std::string converted_filename_temp = temp_filename + "_temp.wav";
|
std::string converted_filename_temp = temp_filename + "_temp.wav";
|
||||||
cmd_stream << "ffmpeg -i \"" << temp_filename << "\" -ar 16000 -ac 1 -c:a pcm_s16le \"" << converted_filename_temp << "\" 2>&1";
|
cmd_stream << "ffmpeg -i \"" << temp_filename << "\" -y -ar 16000 -ac 1 -c:a pcm_s16le \"" << converted_filename_temp << "\" 2>&1";
|
||||||
std::string cmd = cmd_stream.str();
|
std::string cmd = cmd_stream.str();
|
||||||
|
|
||||||
int status = std::system(cmd.c_str());
|
int status = std::system(cmd.c_str());
|
||||||
@ -268,8 +246,8 @@ std::string estimate_diarization_speaker(std::vector<std::vector<float>> pcmf32s
|
|||||||
std::string speaker = "";
|
std::string speaker = "";
|
||||||
const int64_t n_samples = pcmf32s[0].size();
|
const int64_t n_samples = pcmf32s[0].size();
|
||||||
|
|
||||||
const int64_t is0 = timestamp_to_sample(t0, n_samples);
|
const int64_t is0 = timestamp_to_sample(t0, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
const int64_t is1 = timestamp_to_sample(t1, n_samples);
|
const int64_t is1 = timestamp_to_sample(t1, n_samples, WHISPER_SAMPLE_RATE);
|
||||||
|
|
||||||
double energy0 = 0.0f;
|
double energy0 = 0.0f;
|
||||||
double energy1 = 0.0f;
|
double energy1 = 0.0f;
|
||||||
@ -393,36 +371,106 @@ std::string output_str(struct whisper_context * ctx, const whisper_params & para
|
|||||||
return result.str();
|
return result.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool parse_str_to_bool(const std::string & s) {
|
||||||
|
if (s == "true" || s == "1" || s == "yes" || s == "y") {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
void get_req_parameters(const Request & req, whisper_params & params)
|
void get_req_parameters(const Request & req, whisper_params & params)
|
||||||
{
|
{
|
||||||
// user model configu.has_fileion
|
if (req.has_file("offset_t"))
|
||||||
if (req.has_file("offset-t"))
|
|
||||||
{
|
{
|
||||||
params.offset_t_ms = std::stoi(req.get_file_value("offset-t").content);
|
params.offset_t_ms = std::stoi(req.get_file_value("offset_t").content);
|
||||||
}
|
}
|
||||||
if (req.has_file("offset-n"))
|
if (req.has_file("offset_n"))
|
||||||
{
|
{
|
||||||
params.offset_n = std::stoi(req.get_file_value("offset-n").content);
|
params.offset_n = std::stoi(req.get_file_value("offset_n").content);
|
||||||
}
|
}
|
||||||
if (req.has_file("duration"))
|
if (req.has_file("duration"))
|
||||||
{
|
{
|
||||||
params.duration_ms = std::stoi(req.get_file_value("duration").content);
|
params.duration_ms = std::stoi(req.get_file_value("duration").content);
|
||||||
}
|
}
|
||||||
if (req.has_file("max-context"))
|
if (req.has_file("max_context"))
|
||||||
{
|
{
|
||||||
params.max_context = std::stoi(req.get_file_value("max-context").content);
|
params.max_context = std::stoi(req.get_file_value("max_context").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("max_len"))
|
||||||
|
{
|
||||||
|
params.max_len = std::stoi(req.get_file_value("max_len").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("best_of"))
|
||||||
|
{
|
||||||
|
params.best_of = std::stoi(req.get_file_value("best_of").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("beam_size"))
|
||||||
|
{
|
||||||
|
params.beam_size = std::stoi(req.get_file_value("beam_size").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("audio_ctx"))
|
||||||
|
{
|
||||||
|
params.audio_ctx = std::stof(req.get_file_value("audio_ctx").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("word_thold"))
|
||||||
|
{
|
||||||
|
params.word_thold = std::stof(req.get_file_value("word_thold").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("entropy_thold"))
|
||||||
|
{
|
||||||
|
params.entropy_thold = std::stof(req.get_file_value("entropy_thold").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("logprob_thold"))
|
||||||
|
{
|
||||||
|
params.logprob_thold = std::stof(req.get_file_value("logprob_thold").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("debug_mode"))
|
||||||
|
{
|
||||||
|
params.debug_mode = parse_str_to_bool(req.get_file_value("debug_mode").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("translate"))
|
||||||
|
{
|
||||||
|
params.translate = parse_str_to_bool(req.get_file_value("translate").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("diarize"))
|
||||||
|
{
|
||||||
|
params.diarize = parse_str_to_bool(req.get_file_value("diarize").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("tinydiarize"))
|
||||||
|
{
|
||||||
|
params.tinydiarize = parse_str_to_bool(req.get_file_value("tinydiarize").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("split_on_word"))
|
||||||
|
{
|
||||||
|
params.split_on_word = parse_str_to_bool(req.get_file_value("split_on_word").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("no_timestamps"))
|
||||||
|
{
|
||||||
|
params.no_timestamps = parse_str_to_bool(req.get_file_value("no_timestamps").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("language"))
|
||||||
|
{
|
||||||
|
params.language = req.get_file_value("language").content;
|
||||||
|
}
|
||||||
|
if (req.has_file("detect_language"))
|
||||||
|
{
|
||||||
|
params.detect_language = parse_str_to_bool(req.get_file_value("detect_language").content);
|
||||||
}
|
}
|
||||||
if (req.has_file("prompt"))
|
if (req.has_file("prompt"))
|
||||||
{
|
{
|
||||||
params.prompt = req.get_file_value("prompt").content;
|
params.prompt = req.get_file_value("prompt").content;
|
||||||
}
|
}
|
||||||
if (req.has_file("response-format"))
|
if (req.has_file("response_format"))
|
||||||
{
|
{
|
||||||
params.response_format = req.get_file_value("response-format").content;
|
params.response_format = req.get_file_value("response_format").content;
|
||||||
}
|
}
|
||||||
if (req.has_file("temperature"))
|
if (req.has_file("temperature"))
|
||||||
{
|
{
|
||||||
params.userdef_temp = std::stof(req.get_file_value("temperature").content);
|
params.temperature = std::stof(req.get_file_value("temperature").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("temperature_inc"))
|
||||||
|
{
|
||||||
|
params.temperature_inc = std::stof(req.get_file_value("temperature_inc").content);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -455,8 +503,54 @@ int main(int argc, char ** argv) {
|
|||||||
check_ffmpeg_availibility();
|
check_ffmpeg_availibility();
|
||||||
}
|
}
|
||||||
// whisper init
|
// whisper init
|
||||||
struct whisper_context_params cparams;
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
cparams.use_gpu = params.use_gpu;
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
|
if (!params.dtw.empty()) {
|
||||||
|
cparams.dtw_token_timestamps = true;
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_NONE;
|
||||||
|
|
||||||
|
if (params.dtw == "tiny") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_TINY;
|
||||||
|
}
|
||||||
|
if (params.dtw == "tiny.en") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_TINY_EN;
|
||||||
|
}
|
||||||
|
if (params.dtw == "base") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_BASE;
|
||||||
|
}
|
||||||
|
if (params.dtw == "base.en") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_BASE_EN;
|
||||||
|
}
|
||||||
|
if (params.dtw == "small") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_SMALL;
|
||||||
|
}
|
||||||
|
if (params.dtw == "small.en") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_SMALL_EN;
|
||||||
|
}
|
||||||
|
if (params.dtw == "medium") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_MEDIUM;
|
||||||
|
}
|
||||||
|
if (params.dtw == "medium.en") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_MEDIUM_EN;
|
||||||
|
}
|
||||||
|
if (params.dtw == "large.v1") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V1;
|
||||||
|
}
|
||||||
|
if (params.dtw == "large.v2") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V2;
|
||||||
|
}
|
||||||
|
if (params.dtw == "large.v3") {
|
||||||
|
cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V3;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cparams.dtw_aheads_preset == WHISPER_AHEADS_NONE) {
|
||||||
|
fprintf(stderr, "error: unknown DTW preset '%s'\n", params.dtw.c_str());
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
@ -471,19 +565,94 @@ int main(int argc, char ** argv) {
|
|||||||
Server svr;
|
Server svr;
|
||||||
svr.set_default_headers({{"Server", "whisper.cpp"},
|
svr.set_default_headers({{"Server", "whisper.cpp"},
|
||||||
{"Access-Control-Allow-Origin", "*"},
|
{"Access-Control-Allow-Origin", "*"},
|
||||||
{"Access-Control-Allow-Headers", "content-type"}});
|
{"Access-Control-Allow-Headers", "content-type, authorization"}});
|
||||||
|
|
||||||
std::string const default_content = "<html>hello</html>";
|
std::string const default_content = R"(
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>Whisper.cpp Server</title>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<meta name="viewport" content="width=device-width">
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
font-family: sans-serif;
|
||||||
|
}
|
||||||
|
form {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: flex-start;
|
||||||
|
}
|
||||||
|
label {
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
}
|
||||||
|
input, select {
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
}
|
||||||
|
button {
|
||||||
|
margin-top: 1rem;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Whisper.cpp Server</h1>
|
||||||
|
|
||||||
|
<h2>/inference</h2>
|
||||||
|
<pre>
|
||||||
|
curl 127.0.0.1:)" + std::to_string(sparams.port) + R"(/inference \
|
||||||
|
-H "Content-Type: multipart/form-data" \
|
||||||
|
-F file="@<file-path>" \
|
||||||
|
-F temperature="0.0" \
|
||||||
|
-F temperature_inc="0.2" \
|
||||||
|
-F response_format="json"
|
||||||
|
</pre>
|
||||||
|
|
||||||
|
<h2>/load</h2>
|
||||||
|
<pre>
|
||||||
|
curl 127.0.0.1:)" + std::to_string(sparams.port) + R"(/load \
|
||||||
|
-H "Content-Type: multipart/form-data" \
|
||||||
|
-F model="<path-to-model-file>"
|
||||||
|
</pre>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<h2>Try it out</h2>
|
||||||
|
<form action="/inference" method="POST" enctype="multipart/form-data">
|
||||||
|
<label for="file">Choose an audio file:</label>
|
||||||
|
<input type="file" id="file" name="file" accept="audio/*" required><br>
|
||||||
|
|
||||||
|
<label for="temperature">Temperature:</label>
|
||||||
|
<input type="number" id="temperature" name="temperature" value="0.0" step="0.01" placeholder="e.g., 0.0"><br>
|
||||||
|
|
||||||
|
<label for="response_format">Response Format:</label>
|
||||||
|
<select id="response_format" name="response_format">
|
||||||
|
<option value="verbose_json">Verbose JSON</option>
|
||||||
|
<option value="json">JSON</option>
|
||||||
|
<option value="text">Text</option>
|
||||||
|
<option value="srt">SRT</option>
|
||||||
|
<option value="vtt">VTT</option>
|
||||||
|
</select><br>
|
||||||
|
|
||||||
|
<button type="submit">Submit</button>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
)";
|
||||||
|
|
||||||
|
// store default params so we can reset after each inference request
|
||||||
|
whisper_params default_params = params;
|
||||||
|
|
||||||
// this is only called if no index.html is found in the public --path
|
// this is only called if no index.html is found in the public --path
|
||||||
svr.Get("/", [&default_content](const Request &, Response &res){
|
svr.Get(sparams.request_path + "/", [&default_content](const Request &, Response &res){
|
||||||
res.set_content(default_content, "text/html");
|
res.set_content(default_content, "text/html");
|
||||||
return false;
|
return false;
|
||||||
});
|
});
|
||||||
|
|
||||||
svr.Post("/inference", [&](const Request &req, Response &res){
|
svr.Options(sparams.request_path + sparams.inference_path, [&](const Request &, Response &){
|
||||||
|
});
|
||||||
|
|
||||||
|
svr.Post(sparams.request_path + sparams.inference_path, [&](const Request &req, Response &res){
|
||||||
// acquire whisper model mutex lock
|
// acquire whisper model mutex lock
|
||||||
whisper_mutex.lock();
|
std::lock_guard<std::mutex> lock(whisper_mutex);
|
||||||
|
|
||||||
// first check user requested fields of the request
|
// first check user requested fields of the request
|
||||||
if (!req.has_file("file"))
|
if (!req.has_file("file"))
|
||||||
@ -491,7 +660,6 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "error: no 'file' field in the request\n");
|
fprintf(stderr, "error: no 'file' field in the request\n");
|
||||||
const std::string error_resp = "{\"error\":\"no 'file' field in the request\"}";
|
const std::string error_resp = "{\"error\":\"no 'file' field in the request\"}";
|
||||||
res.set_content(error_resp, "application/json");
|
res.set_content(error_resp, "application/json");
|
||||||
whisper_mutex.unlock();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto audio_file = req.get_file_value("file");
|
auto audio_file = req.get_file_value("file");
|
||||||
@ -506,35 +674,43 @@ int main(int argc, char ** argv) {
|
|||||||
std::vector<float> pcmf32; // mono-channel F32 PCM
|
std::vector<float> pcmf32; // mono-channel F32 PCM
|
||||||
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
||||||
|
|
||||||
// write to temporary file
|
|
||||||
const std::string temp_filename = "whisper_server_temp_file.wav";
|
|
||||||
std::ofstream temp_file{temp_filename, std::ios::binary};
|
|
||||||
temp_file << audio_file.content;
|
|
||||||
temp_file.close();
|
|
||||||
|
|
||||||
// if file is not wav, convert to wav
|
|
||||||
|
|
||||||
if (sparams.ffmpeg_converter) {
|
if (sparams.ffmpeg_converter) {
|
||||||
|
// if file is not wav, convert to wav
|
||||||
|
// write to temporary file
|
||||||
|
const std::string temp_filename_base = std::tmpnam(nullptr);
|
||||||
|
const std::string temp_filename = temp_filename_base + ".wav";
|
||||||
|
std::ofstream temp_file{temp_filename, std::ios::binary};
|
||||||
|
temp_file << audio_file.content;
|
||||||
|
temp_file.close();
|
||||||
|
|
||||||
std::string error_resp = "{\"error\":\"Failed to execute ffmpeg command.\"}";
|
std::string error_resp = "{\"error\":\"Failed to execute ffmpeg command.\"}";
|
||||||
const bool is_converted = convert_to_wav(temp_filename, error_resp);
|
const bool is_converted = convert_to_wav(temp_filename, error_resp);
|
||||||
if (!is_converted) {
|
if (!is_converted) {
|
||||||
res.set_content(error_resp, "application/json");
|
res.set_content(error_resp, "application/json");
|
||||||
whisper_mutex.unlock();
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// read wav content into pcmf32
|
||||||
|
if (!::read_wav(temp_filename, pcmf32, pcmf32s, params.diarize))
|
||||||
|
{
|
||||||
|
fprintf(stderr, "error: failed to read WAV file '%s'\n", temp_filename.c_str());
|
||||||
|
const std::string error_resp = "{\"error\":\"failed to read WAV file\"}";
|
||||||
|
res.set_content(error_resp, "application/json");
|
||||||
|
std::remove(temp_filename.c_str());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// remove temp file
|
||||||
|
std::remove(temp_filename.c_str());
|
||||||
|
} else {
|
||||||
|
if (!::read_wav(audio_file.content, pcmf32, pcmf32s, params.diarize))
|
||||||
|
{
|
||||||
|
fprintf(stderr, "error: failed to read WAV file\n");
|
||||||
|
const std::string error_resp = "{\"error\":\"failed to read WAV file\"}";
|
||||||
|
res.set_content(error_resp, "application/json");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// read wav content into pcmf32
|
|
||||||
if (!::read_wav(temp_filename, pcmf32, pcmf32s, params.diarize)) {
|
|
||||||
fprintf(stderr, "error: failed to read WAV file '%s'\n", temp_filename.c_str());
|
|
||||||
const std::string error_resp = "{\"error\":\"failed to read WAV file\"}";
|
|
||||||
res.set_content(error_resp, "application/json");
|
|
||||||
std::remove(temp_filename.c_str());
|
|
||||||
whisper_mutex.unlock();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// remove temp file
|
|
||||||
std::remove(temp_filename.c_str());
|
|
||||||
|
|
||||||
printf("Successfully loaded %s\n", filename.c_str());
|
printf("Successfully loaded %s\n", filename.c_str());
|
||||||
|
|
||||||
@ -591,8 +767,8 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.thold_pt = params.word_thold;
|
wparams.thold_pt = params.word_thold;
|
||||||
wparams.max_len = params.max_len == 0 ? 60 : params.max_len;
|
wparams.max_len = params.max_len == 0 ? 60 : params.max_len;
|
||||||
wparams.split_on_word = params.split_on_word;
|
wparams.split_on_word = params.split_on_word;
|
||||||
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
|
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
wparams.debug_mode = params.debug_mode;
|
wparams.debug_mode = params.debug_mode;
|
||||||
|
|
||||||
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
||||||
@ -602,10 +778,14 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.greedy.best_of = params.best_of;
|
wparams.greedy.best_of = params.best_of;
|
||||||
wparams.beam_search.beam_size = params.beam_size;
|
wparams.beam_search.beam_size = params.beam_size;
|
||||||
|
|
||||||
wparams.temperature_inc = params.userdef_temp;
|
wparams.temperature = params.temperature;
|
||||||
|
wparams.temperature_inc = params.temperature_inc;
|
||||||
wparams.entropy_thold = params.entropy_thold;
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
wparams.logprob_thold = params.logprob_thold;
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
|
|
||||||
|
wparams.no_timestamps = params.no_timestamps;
|
||||||
|
wparams.token_timestamps = !params.no_timestamps && params.response_format == vjson_format;
|
||||||
|
|
||||||
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
||||||
|
|
||||||
// this callback is called on each new segment
|
// this callback is called on each new segment
|
||||||
@ -648,7 +828,6 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "%s: failed to process audio\n", argv[0]);
|
fprintf(stderr, "%s: failed to process audio\n", argv[0]);
|
||||||
const std::string error_resp = "{\"error\":\"failed to process audio\"}";
|
const std::string error_resp = "{\"error\":\"failed to process audio\"}";
|
||||||
res.set_content(error_resp, "application/json");
|
res.set_content(error_resp, "application/json");
|
||||||
whisper_mutex.unlock();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -657,7 +836,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (params.response_format == text_format)
|
if (params.response_format == text_format)
|
||||||
{
|
{
|
||||||
std::string results = output_str(ctx, params, pcmf32s);
|
std::string results = output_str(ctx, params, pcmf32s);
|
||||||
res.set_content(results.c_str(), "text/html");
|
res.set_content(results.c_str(), "text/html; charset=utf-8");
|
||||||
}
|
}
|
||||||
else if (params.response_format == srt_format)
|
else if (params.response_format == srt_format)
|
||||||
{
|
{
|
||||||
@ -702,6 +881,60 @@ int main(int argc, char ** argv) {
|
|||||||
ss << speaker << text << "\n\n";
|
ss << speaker << text << "\n\n";
|
||||||
}
|
}
|
||||||
res.set_content(ss.str(), "text/vtt");
|
res.set_content(ss.str(), "text/vtt");
|
||||||
|
} else if (params.response_format == vjson_format) {
|
||||||
|
/* try to match openai/whisper's Python format */
|
||||||
|
std::string results = output_str(ctx, params, pcmf32s);
|
||||||
|
json jres = json{
|
||||||
|
{"task", params.translate ? "translate" : "transcribe"},
|
||||||
|
{"language", whisper_lang_str_full(whisper_full_lang_id(ctx))},
|
||||||
|
{"duration", float(pcmf32.size())/WHISPER_SAMPLE_RATE},
|
||||||
|
{"text", results},
|
||||||
|
{"segments", json::array()}
|
||||||
|
};
|
||||||
|
const int n_segments = whisper_full_n_segments(ctx);
|
||||||
|
for (int i = 0; i < n_segments; ++i)
|
||||||
|
{
|
||||||
|
json segment = json{
|
||||||
|
{"id", i},
|
||||||
|
{"text", whisper_full_get_segment_text(ctx, i)},
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!params.no_timestamps) {
|
||||||
|
segment["start"] = whisper_full_get_segment_t0(ctx, i) * 0.01;
|
||||||
|
segment["end"] = whisper_full_get_segment_t1(ctx, i) * 0.01;
|
||||||
|
}
|
||||||
|
|
||||||
|
float total_logprob = 0;
|
||||||
|
const int n_tokens = whisper_full_n_tokens(ctx, i);
|
||||||
|
for (int j = 0; j < n_tokens; ++j) {
|
||||||
|
whisper_token_data token = whisper_full_get_token_data(ctx, i, j);
|
||||||
|
if (token.id >= whisper_token_eot(ctx)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
segment["tokens"].push_back(token.id);
|
||||||
|
json word = json{{"word", whisper_full_get_token_text(ctx, i, j)}};
|
||||||
|
if (!params.no_timestamps) {
|
||||||
|
word["start"] = token.t0 * 0.01;
|
||||||
|
word["end"] = token.t1 * 0.01;
|
||||||
|
word["t_dtw"] = token.t_dtw;
|
||||||
|
}
|
||||||
|
word["probability"] = token.p;
|
||||||
|
total_logprob += token.plog;
|
||||||
|
segment["words"].push_back(word);
|
||||||
|
}
|
||||||
|
|
||||||
|
segment["temperature"] = params.temperature;
|
||||||
|
segment["avg_logprob"] = total_logprob / n_tokens;
|
||||||
|
|
||||||
|
// TODO compression_ratio and no_speech_prob are not implemented yet
|
||||||
|
// segment["compression_ratio"] = 0;
|
||||||
|
// segment["no_speech_prob"] = 0;
|
||||||
|
|
||||||
|
jres["segments"].push_back(segment);
|
||||||
|
}
|
||||||
|
res.set_content(jres.dump(-1, ' ', false, json::error_handler_t::replace),
|
||||||
|
"application/json");
|
||||||
}
|
}
|
||||||
// TODO add more output formats
|
// TODO add more output formats
|
||||||
else
|
else
|
||||||
@ -714,17 +947,16 @@ int main(int argc, char ** argv) {
|
|||||||
"application/json");
|
"application/json");
|
||||||
}
|
}
|
||||||
|
|
||||||
// return whisper model mutex lock
|
// reset params to their defaults
|
||||||
whisper_mutex.unlock();
|
params = default_params;
|
||||||
});
|
});
|
||||||
svr.Post("/load", [&](const Request &req, Response &res){
|
svr.Post(sparams.request_path + "/load", [&](const Request &req, Response &res){
|
||||||
whisper_mutex.lock();
|
std::lock_guard<std::mutex> lock(whisper_mutex);
|
||||||
if (!req.has_file("model"))
|
if (!req.has_file("model"))
|
||||||
{
|
{
|
||||||
fprintf(stderr, "error: no 'model' field in the request\n");
|
fprintf(stderr, "error: no 'model' field in the request\n");
|
||||||
const std::string error_resp = "{\"error\":\"no 'model' field in the request\"}";
|
const std::string error_resp = "{\"error\":\"no 'model' field in the request\"}";
|
||||||
res.set_content(error_resp, "application/json");
|
res.set_content(error_resp, "application/json");
|
||||||
whisper_mutex.unlock();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
std::string model = req.get_file_value("model").content;
|
std::string model = req.get_file_value("model").content;
|
||||||
@ -733,7 +965,6 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "error: 'model': %s not found!\n", model.c_str());
|
fprintf(stderr, "error: 'model': %s not found!\n", model.c_str());
|
||||||
const std::string error_resp = "{\"error\":\"model not found!\"}";
|
const std::string error_resp = "{\"error\":\"model not found!\"}";
|
||||||
res.set_content(error_resp, "application/json");
|
res.set_content(error_resp, "application/json");
|
||||||
whisper_mutex.unlock();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -756,7 +987,6 @@ int main(int argc, char ** argv) {
|
|||||||
res.set_content(success, "application/text");
|
res.set_content(success, "application/text");
|
||||||
|
|
||||||
// check if the model is in the file system
|
// check if the model is in the file system
|
||||||
whisper_mutex.unlock();
|
|
||||||
});
|
});
|
||||||
|
|
||||||
svr.set_exception_handler([](const Request &, Response &res, std::exception_ptr ep) {
|
svr.set_exception_handler([](const Request &, Response &res, std::exception_ptr ep) {
|
||||||
@ -773,11 +1003,11 @@ int main(int argc, char ** argv) {
|
|||||||
res.status = 500;
|
res.status = 500;
|
||||||
});
|
});
|
||||||
|
|
||||||
svr.set_error_handler([](const Request &, Response &res) {
|
svr.set_error_handler([](const Request &req, Response &res) {
|
||||||
if (res.status == 400) {
|
if (res.status == 400) {
|
||||||
res.set_content("Invalid request", "text/plain");
|
res.set_content("Invalid request", "text/plain");
|
||||||
} else if (res.status != 500) {
|
} else if (res.status != 500) {
|
||||||
res.set_content("File Not Found", "text/plain");
|
res.set_content("File Not Found (" + req.path + ")", "text/plain");
|
||||||
res.status = 404;
|
res.status = 404;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -103,11 +103,11 @@ void stream_main(size_t index) {
|
|||||||
|
|
||||||
{
|
{
|
||||||
const int n_segments = whisper_full_n_segments(ctx);
|
const int n_segments = whisper_full_n_segments(ctx);
|
||||||
for (int i = n_segments - 1; i < n_segments; ++i) {
|
if (n_segments > 0) {
|
||||||
const char * text = whisper_full_get_segment_text(ctx, i);
|
const char * text = whisper_full_get_segment_text(ctx, n_segments - 1);
|
||||||
|
|
||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, n_segments - 1);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, n_segments - 1);
|
||||||
|
|
||||||
printf("transcribed: %s\n", text);
|
printf("transcribed: %s\n", text);
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ This is a naive example of performing real-time inference on audio from your mic
|
|||||||
The `stream` tool samples the audio every half a second and runs the transcription continously.
|
The `stream` tool samples the audio every half a second and runs the transcription continously.
|
||||||
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a
|
|||||||
|
|
||||||
Setting the `--step` argument to `0` enables the sliding window mode:
|
Setting the `--step` argument to `0` enables the sliding window mode:
|
||||||
|
|
||||||
```java
|
```bash
|
||||||
./stream -m ./models/ggml-small.en.bin -t 6 --step 0 --length 30000 -vth 0.6
|
./stream -m ./models/ggml-small.en.bin -t 6 --step 0 --length 30000 -vth 0.6
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -30,17 +30,21 @@ a transcription block that is suitable for parsing.
|
|||||||
The `stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
The `stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install SDL2 on Linux
|
# Install SDL2
|
||||||
|
# On Debian based linux distributions:
|
||||||
sudo apt-get install libsdl2-dev
|
sudo apt-get install libsdl2-dev
|
||||||
|
|
||||||
|
# On Fedora Linux:
|
||||||
|
sudo dnf install SDL2 SDL2-devel
|
||||||
|
|
||||||
# Install SDL2 on Mac OS
|
# Install SDL2 on Mac OS
|
||||||
brew install sdl2
|
brew install sdl2
|
||||||
|
|
||||||
make stream
|
make stream
|
||||||
```
|
```
|
||||||
|
|
||||||
Ensure you are at the root of the repo when running `make stream`. Not within the `examples/stream` dir
|
Ensure you are at the root of the repo when running `make stream`. Not within the `examples/stream` dir
|
||||||
as the libraries needed like `common-sdl.h` are located within `examples`. Attempting to compile within
|
as the libraries needed like `common-sdl.h` are located within `examples`. Attempting to compile within
|
||||||
`examples/steam` means your compiler cannot find them and it gives an error it cannot find the file.
|
`examples/steam` means your compiler cannot find them and it gives an error it cannot find the file.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -14,20 +14,6 @@
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
|
||||||
|
|
||||||
// 500 -> 00:05.000
|
|
||||||
// 6000 -> 01:00.000
|
|
||||||
std::string to_timestamp(int64_t t) {
|
|
||||||
int64_t sec = t/100;
|
|
||||||
int64_t msec = t - sec*100;
|
|
||||||
int64_t min = sec/60;
|
|
||||||
sec = sec - min*60;
|
|
||||||
|
|
||||||
char buf[32];
|
|
||||||
snprintf(buf, sizeof(buf), "%02d:%02d.%03d", (int) min, (int) sec, (int) msec);
|
|
||||||
|
|
||||||
return std::string(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
@ -41,7 +27,6 @@ struct whisper_params {
|
|||||||
float vad_thold = 0.6f;
|
float vad_thold = 0.6f;
|
||||||
float freq_thold = 100.0f;
|
float freq_thold = 100.0f;
|
||||||
|
|
||||||
bool speed_up = false;
|
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool no_fallback = false;
|
bool no_fallback = false;
|
||||||
bool print_special = false;
|
bool print_special = false;
|
||||||
@ -50,6 +35,7 @@ struct whisper_params {
|
|||||||
bool tinydiarize = false;
|
bool tinydiarize = false;
|
||||||
bool save_audio = false; // save audio to wav file
|
bool save_audio = false; // save audio to wav file
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
|
bool flash_attn = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
@ -58,7 +44,7 @@ struct whisper_params {
|
|||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
@ -75,7 +61,6 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-nf" || arg == "--no-fallback") { params.no_fallback = true; }
|
else if (arg == "-nf" || arg == "--no-fallback") { params.no_fallback = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
@ -86,6 +71,7 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-tdrz" || arg == "--tinydiarize") { params.tinydiarize = true; }
|
else if (arg == "-tdrz" || arg == "--tinydiarize") { params.tinydiarize = true; }
|
||||||
else if (arg == "-sa" || arg == "--save-audio") { params.save_audio = true; }
|
else if (arg == "-sa" || arg == "--save-audio") { params.save_audio = true; }
|
||||||
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
|
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
@ -112,7 +98,6 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
||||||
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
||||||
fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -nf, --no-fallback [%-7s] do not use temperature fallback while decoding\n", params.no_fallback ? "true" : "false");
|
fprintf(stderr, " -nf, --no-fallback [%-7s] do not use temperature fallback while decoding\n", params.no_fallback ? "true" : "false");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
@ -123,6 +108,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -tdrz, --tinydiarize [%-7s] enable tinydiarize (requires a tdrz model)\n", params.tinydiarize ? "true" : "false");
|
fprintf(stderr, " -tdrz, --tinydiarize [%-7s] enable tinydiarize (requires a tdrz model)\n", params.tinydiarize ? "true" : "false");
|
||||||
fprintf(stderr, " -sa, --save-audio [%-7s] save the recorded audio to a file\n", params.save_audio ? "true" : "false");
|
fprintf(stderr, " -sa, --save-audio [%-7s] save the recorded audio to a file\n", params.save_audio ? "true" : "false");
|
||||||
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU inference\n", params.use_gpu ? "false" : "true");
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU inference\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention during inference\n", params.flash_attn ? "true" : "false");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,8 +152,10 @@ int main(int argc, char ** argv) {
|
|||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct whisper_context_params cparams;
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
cparams.use_gpu = params.use_gpu;
|
|
||||||
|
cparams.use_gpu = params.use_gpu;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams);
|
||||||
|
|
||||||
@ -323,7 +311,6 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.speed_up = params.speed_up;
|
|
||||||
|
|
||||||
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
wparams.tdrz_enable = params.tinydiarize; // [TDRZ]
|
||||||
|
|
||||||
@ -372,7 +359,7 @@ int main(int argc, char ** argv) {
|
|||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
|
|
||||||
std::string output = "[" + to_timestamp(t0) + " --> " + to_timestamp(t1) + "] " + text;
|
std::string output = "[" + to_timestamp(t0, false) + " --> " + to_timestamp(t1, false) + "] " + text;
|
||||||
|
|
||||||
if (whisper_full_get_segment_speaker_turn_next(ctx, i)) {
|
if (whisper_full_get_segment_speaker_turn_next(ctx, i)) {
|
||||||
output += " [SPEAKER_TURN]";
|
output += " [SPEAKER_TURN]";
|
||||||
|
9
examples/sycl/CMakeLists.txt
Normal file
9
examples/sycl/CMakeLists.txt
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# MIT license
|
||||||
|
# Copyright (C) 2024 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
set(TARGET ls-sycl-device)
|
||||||
|
add_executable(${TARGET} ls-sycl-device.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user