diff --git a/tests/librispeech/Makefile b/tests/librispeech/Makefile index 79fdd07a..0afa2465 100644 --- a/tests/librispeech/Makefile +++ b/tests/librispeech/Makefile @@ -8,13 +8,6 @@ eval: clean: $(MAKE) -f eval.mk clean -setup-venv: - python3 -m venv venv - ./venv/bin/pip install -r requirements.txt - -clean-venv: - rm -r venv - get-audio: wget -c $(TAR_URL) tar -xf test-clean.tar.gz diff --git a/tests/librispeech/README.md b/tests/librispeech/README.md index 026605ce..85478a0f 100644 --- a/tests/librispeech/README.md +++ b/tests/librispeech/README.md @@ -29,7 +29,15 @@ performance of whisper.cpp on LibriSpeech corpus. 3. Set up the environment to compute WER score. ``` - $ make setup-venv + $ pip install -r requirements.txt + ``` + + For example, if you use `virtualenv`, you can set up it as follows: + + ``` + $ python3 -m venv venv + $ . venv/bin/activate + $ pip install -r requirements.txt ``` 4. Run the benchmark test. diff --git a/tests/librispeech/eval.mk b/tests/librispeech/eval.mk index 67dd33d4..6d504c8b 100644 --- a/tests/librispeech/eval.mk +++ b/tests/librispeech/eval.mk @@ -1,4 +1,4 @@ -PYTHON = ./venv/bin/python3 +PYTHON = python WHISPER_PREFIX = ../../ WHISPER_MODEL = tiny