diff --git a/backend/python/openvoice/test.py b/backend/python/openvoice/test.py index 262917b3..82f08785 100644 --- a/backend/python/openvoice/test.py +++ b/backend/python/openvoice/test.py @@ -19,7 +19,7 @@ class TestBackendServicer(unittest.TestCase): This method sets up the gRPC service by starting the server """ self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) - time.sleep(10) + time.sleep(30) def tearDown(self) -> None: """ diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index 022cf8bf..9078b81b 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -13,7 +13,9 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" fi -if [ "x${BUILD_TYPE}" == "x" ]; then +# We don't embed this into the images as it is a large dependency and not always needed. +# Besides, the speed inference are not actually usable in the current state for production use-cases. +if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE}" == "xtrue" ]; then ensureVenv # https://docs.vllm.ai/en/v0.6.1/getting_started/cpu-installation.html if [ ! -d vllm ]; then