mirror of
https://github.com/mudler/LocalAI.git
synced 2025-02-21 17:46:41 +00:00
chore(vall-e-x): Drop backend (#4619)
There are many new architectures that are SOTA and replaces vall-e-x nowadays. Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
d08d97bebf
commit
7d0ac1ea3f
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@ -85,10 +85,6 @@ updates:
|
|||||||
directory: "/backend/python/transformers-musicgen"
|
directory: "/backend/python/transformers-musicgen"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "weekly"
|
||||||
- package-ecosystem: "pip"
|
|
||||||
directory: "/backend/python/vall-e-x"
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
||||||
- package-ecosystem: "pip"
|
- package-ecosystem: "pip"
|
||||||
directory: "/backend/python/vllm"
|
directory: "/backend/python/vllm"
|
||||||
schedule:
|
schedule:
|
||||||
|
20
.github/workflows/test-extra.yml
vendored
20
.github/workflows/test-extra.yml
vendored
@ -260,26 +260,6 @@ jobs:
|
|||||||
# run: |
|
# run: |
|
||||||
# make --jobs=5 --output-sync=target -C backend/python/vllm
|
# make --jobs=5 --output-sync=target -C backend/python/vllm
|
||||||
# make --jobs=5 --output-sync=target -C backend/python/vllm test
|
# make --jobs=5 --output-sync=target -C backend/python/vllm test
|
||||||
tests-vallex:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Clone
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
- name: Dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install build-essential ffmpeg
|
|
||||||
# Install UV
|
|
||||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
||||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
|
||||||
sudo apt-get install -y libopencv-dev
|
|
||||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
|
||||||
- name: Test vall-e-x
|
|
||||||
run: |
|
|
||||||
make --jobs=5 --output-sync=target -C backend/python/vall-e-x
|
|
||||||
make --jobs=5 --output-sync=target -C backend/python/vall-e-x test
|
|
||||||
|
|
||||||
tests-coqui:
|
tests-coqui:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -15,7 +15,7 @@ ARG TARGETARCH
|
|||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,openvoice:/build/backend/python/openvoice/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh"
|
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,openvoice:/build/backend/python/openvoice/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh"
|
||||||
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
@ -453,10 +453,7 @@ RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAG
|
|||||||
make -C backend/python/transformers-musicgen \
|
make -C backend/python/transformers-musicgen \
|
||||||
; fi
|
; fi
|
||||||
|
|
||||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vall-e-x" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/vall-e-x \
|
|
||||||
; fi && \
|
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/kokoro \
|
make -C backend/python/kokoro \
|
||||||
; fi && \
|
; fi && \
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "openvoice" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
if [[ ( "${EXTRA_BACKENDS}" =~ "openvoice" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
|
13
Makefile
13
Makefile
@ -583,10 +583,10 @@ protogen-go-clean:
|
|||||||
$(RM) bin/*
|
$(RM) bin/*
|
||||||
|
|
||||||
.PHONY: protogen-python
|
.PHONY: protogen-python
|
||||||
protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen mamba-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen vall-e-x-protogen kokoro-protogen vllm-protogen openvoice-protogen
|
protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen mamba-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen kokoro-protogen vllm-protogen openvoice-protogen
|
||||||
|
|
||||||
.PHONY: protogen-python-clean
|
.PHONY: protogen-python-clean
|
||||||
protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean mamba-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean vall-e-x-protogen-clean kokoro-protogen-clean vllm-protogen-clean openvoice-protogen-clean
|
protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean mamba-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean kokoro-protogen-clean vllm-protogen-clean openvoice-protogen-clean
|
||||||
|
|
||||||
.PHONY: autogptq-protogen
|
.PHONY: autogptq-protogen
|
||||||
autogptq-protogen:
|
autogptq-protogen:
|
||||||
@ -676,14 +676,6 @@ transformers-musicgen-protogen:
|
|||||||
transformers-musicgen-protogen-clean:
|
transformers-musicgen-protogen-clean:
|
||||||
$(MAKE) -C backend/python/transformers-musicgen protogen-clean
|
$(MAKE) -C backend/python/transformers-musicgen protogen-clean
|
||||||
|
|
||||||
.PHONY: vall-e-x-protogen
|
|
||||||
vall-e-x-protogen:
|
|
||||||
$(MAKE) -C backend/python/vall-e-x protogen
|
|
||||||
|
|
||||||
.PHONY: vall-e-x-protogen-clean
|
|
||||||
vall-e-x-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/vall-e-x protogen-clean
|
|
||||||
|
|
||||||
.PHONY: kokoro-protogen
|
.PHONY: kokoro-protogen
|
||||||
kokoro-protogen:
|
kokoro-protogen:
|
||||||
$(MAKE) -C backend/python/kokoro protogen
|
$(MAKE) -C backend/python/kokoro protogen
|
||||||
@ -722,7 +714,6 @@ prepare-extra-conda-environments: protogen-python
|
|||||||
$(MAKE) -C backend/python/transformers
|
$(MAKE) -C backend/python/transformers
|
||||||
$(MAKE) -C backend/python/transformers-musicgen
|
$(MAKE) -C backend/python/transformers-musicgen
|
||||||
$(MAKE) -C backend/python/parler-tts
|
$(MAKE) -C backend/python/parler-tts
|
||||||
$(MAKE) -C backend/python/vall-e-x
|
|
||||||
$(MAKE) -C backend/python/kokoro
|
$(MAKE) -C backend/python/kokoro
|
||||||
$(MAKE) -C backend/python/openvoice
|
$(MAKE) -C backend/python/openvoice
|
||||||
$(MAKE) -C backend/python/exllama2
|
$(MAKE) -C backend/python/exllama2
|
||||||
|
1
backend/python/vall-e-x/.gitignore
vendored
1
backend/python/vall-e-x/.gitignore
vendored
@ -1 +0,0 @@
|
|||||||
source
|
|
@ -1,33 +0,0 @@
|
|||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
||||||
export SKIP_CONDA=1
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: ttsvalle
|
|
||||||
ttsvalle: protogen
|
|
||||||
bash install.sh
|
|
||||||
|
|
||||||
.PHONY: run
|
|
||||||
run: protogen
|
|
||||||
@echo "Running ttsvalle..."
|
|
||||||
bash run.sh
|
|
||||||
@echo "ttsvalle run."
|
|
||||||
|
|
||||||
.PHONY: test
|
|
||||||
test: protogen
|
|
||||||
@echo "Testing valle..."
|
|
||||||
bash test.sh
|
|
||||||
@echo "valle tested."
|
|
||||||
|
|
||||||
.PHONY: protogen
|
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
|
||||||
protogen-clean:
|
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
|
||||||
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
|
|
||||||
|
|
||||||
.PHONY: clean
|
|
||||||
clean: protogen-clean
|
|
||||||
rm -rf source venv __pycache__
|
|
@ -1,5 +0,0 @@
|
|||||||
# Creating a separate environment for the ttsvalle project
|
|
||||||
|
|
||||||
```
|
|
||||||
make ttsvalle
|
|
||||||
```
|
|
@ -1,141 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
from concurrent import futures
|
|
||||||
import argparse
|
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import backend_pb2
|
|
||||||
import backend_pb2_grpc
|
|
||||||
|
|
||||||
import grpc
|
|
||||||
|
|
||||||
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
|
|
||||||
from scipy.io.wavfile import write as write_wav
|
|
||||||
from utils.prompt_making import make_prompt
|
|
||||||
|
|
||||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
|
||||||
|
|
||||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
|
||||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
|
||||||
|
|
||||||
# Implement the BackendServicer class with the service methods
|
|
||||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|
||||||
"""
|
|
||||||
gRPC servicer for backend services.
|
|
||||||
"""
|
|
||||||
def Health(self, request, context):
|
|
||||||
"""
|
|
||||||
Health check service.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
request: A backend_pb2.HealthRequest instance.
|
|
||||||
context: A grpc.ServicerContext instance.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A backend_pb2.Reply instance with message "OK".
|
|
||||||
"""
|
|
||||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
|
||||||
|
|
||||||
def LoadModel(self, request, context):
|
|
||||||
"""
|
|
||||||
Load model service.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
request: A backend_pb2.LoadModelRequest instance.
|
|
||||||
context: A grpc.ServicerContext instance.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A backend_pb2.Result instance with message "Model loaded successfully" and success=True if successful.
|
|
||||||
A backend_pb2.Result instance with success=False and error message if unsuccessful.
|
|
||||||
"""
|
|
||||||
model_name = request.Model
|
|
||||||
try:
|
|
||||||
print("Preparing models, please wait", file=sys.stderr)
|
|
||||||
# download and load all models
|
|
||||||
preload_models()
|
|
||||||
self.clonedVoice = False
|
|
||||||
# Assume directory from request.ModelFile.
|
|
||||||
# Only if request.LoraAdapter it's not an absolute path
|
|
||||||
if request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath):
|
|
||||||
# get base path of modelFile
|
|
||||||
modelFileBase = os.path.dirname(request.ModelFile)
|
|
||||||
# modify LoraAdapter to be relative to modelFileBase
|
|
||||||
request.AudioPath = os.path.join(modelFileBase, request.AudioPath)
|
|
||||||
if request.AudioPath != "":
|
|
||||||
print("Generating model", file=sys.stderr)
|
|
||||||
make_prompt(name=model_name, audio_prompt_path=request.AudioPath)
|
|
||||||
self.clonedVoice = True
|
|
||||||
### Use given transcript
|
|
||||||
##make_prompt(name=model_name, audio_prompt_path="paimon_prompt.wav",
|
|
||||||
## transcript="Just, what was that? Paimon thought we were gonna get eaten.")
|
|
||||||
except Exception as err:
|
|
||||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
|
||||||
# Implement your logic here for the LoadModel service
|
|
||||||
# Replace this with your desired response
|
|
||||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
|
||||||
|
|
||||||
def TTS(self, request, context):
|
|
||||||
"""
|
|
||||||
Text-to-speech service.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
request: A backend_pb2.TTSRequest instance.
|
|
||||||
context: A grpc.ServicerContext instance.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A backend_pb2.Result instance with success=True if successful.
|
|
||||||
A backend_pb2.Result instance with success=False and error message if unsuccessful.
|
|
||||||
"""
|
|
||||||
model = request.model
|
|
||||||
print(request, file=sys.stderr)
|
|
||||||
try:
|
|
||||||
audio_array = None
|
|
||||||
if model != "":
|
|
||||||
if self.clonedVoice:
|
|
||||||
model = os.path.basename(request.model)
|
|
||||||
audio_array = generate_audio(request.text, prompt=model)
|
|
||||||
else:
|
|
||||||
audio_array = generate_audio(request.text)
|
|
||||||
print("saving to", request.dst, file=sys.stderr)
|
|
||||||
# save audio to disk
|
|
||||||
write_wav(request.dst, SAMPLE_RATE, audio_array)
|
|
||||||
print("saved to", request.dst, file=sys.stderr)
|
|
||||||
print("tts for", file=sys.stderr)
|
|
||||||
print(request, file=sys.stderr)
|
|
||||||
except Exception as err:
|
|
||||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
|
||||||
return backend_pb2.Result(success=True)
|
|
||||||
|
|
||||||
def serve(address):
|
|
||||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS))
|
|
||||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
|
||||||
server.add_insecure_port(address)
|
|
||||||
server.start()
|
|
||||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
|
||||||
|
|
||||||
# Define the signal handler function
|
|
||||||
def signal_handler(sig, frame):
|
|
||||||
print("Received termination signal. Shutting down...")
|
|
||||||
server.stop(0)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# Set the signal handlers for SIGINT and SIGTERM
|
|
||||||
signal.signal(signal.SIGINT, signal_handler)
|
|
||||||
signal.signal(signal.SIGTERM, signal_handler)
|
|
||||||
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
server.stop(0)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
|
||||||
parser.add_argument(
|
|
||||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
serve(args.addr)
|
|
@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
VALL_E_X_VERSION=3faaf8ccadb154d63b38070caf518ce9309ea0f4
|
|
||||||
|
|
||||||
source $(dirname $0)/../common/libbackend.sh
|
|
||||||
|
|
||||||
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
|
||||||
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
|
||||||
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
|
||||||
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
|
||||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
|
||||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
|
||||||
fi
|
|
||||||
|
|
||||||
installRequirements
|
|
||||||
|
|
||||||
git clone https://github.com/Plachtaa/VALL-E-X.git ${MY_DIR}/source
|
|
||||||
pushd ${MY_DIR}/source && git checkout -b build ${VALL_E_X_VERSION} && popd
|
|
||||||
uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/source/requirements.txt
|
|
||||||
|
|
||||||
cp -v ./*py $MY_DIR/source/
|
|
@ -1,3 +0,0 @@
|
|||||||
accelerate
|
|
||||||
torch==2.4.1
|
|
||||||
torchaudio==2.4.1
|
|
@ -1,4 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
|
||||||
accelerate
|
|
||||||
torch==2.4.1+cu118
|
|
||||||
torchaudio==2.4.1+cu118
|
|
@ -1,3 +0,0 @@
|
|||||||
accelerate
|
|
||||||
torch==2.4.1
|
|
||||||
torchaudio==2.4.1
|
|
@ -1,4 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
|
||||||
accelerate
|
|
||||||
torch==2.3.0+rocm6.0
|
|
||||||
torchaudio==2.3.0+rocm6.0
|
|
@ -1,7 +0,0 @@
|
|||||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
|
||||||
intel-extension-for-pytorch==2.3.110+xpu
|
|
||||||
accelerate
|
|
||||||
torch==2.3.1+cxx11.abi
|
|
||||||
torchaudio==2.3.1+cxx11.abi
|
|
||||||
optimum[openvino]
|
|
||||||
oneccl_bind_pt==2.3.100+xpu
|
|
@ -1,4 +0,0 @@
|
|||||||
grpcio==1.69.0
|
|
||||||
protobuf
|
|
||||||
certifi
|
|
||||||
setuptools
|
|
@ -1,6 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
BACKEND_FILE="${MY_DIR}/source/backend.py"
|
|
||||||
|
|
||||||
source $(dirname $0)/../common/libbackend.sh
|
|
||||||
|
|
||||||
startBackend $@
|
|
@ -1,81 +0,0 @@
|
|||||||
"""
|
|
||||||
A test script to test the gRPC service
|
|
||||||
"""
|
|
||||||
import unittest
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
import backend_pb2
|
|
||||||
import backend_pb2_grpc
|
|
||||||
|
|
||||||
import grpc
|
|
||||||
|
|
||||||
|
|
||||||
class TestBackendServicer(unittest.TestCase):
|
|
||||||
"""
|
|
||||||
TestBackendServicer is the class that tests the gRPC service
|
|
||||||
"""
|
|
||||||
def setUp(self):
|
|
||||||
"""
|
|
||||||
This method sets up the gRPC service by starting the server
|
|
||||||
"""
|
|
||||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
|
||||||
time.sleep(10)
|
|
||||||
|
|
||||||
def tearDown(self) -> None:
|
|
||||||
"""
|
|
||||||
This method tears down the gRPC service by terminating the server
|
|
||||||
"""
|
|
||||||
self.service.terminate()
|
|
||||||
self.service.wait()
|
|
||||||
|
|
||||||
def test_server_startup(self):
|
|
||||||
"""
|
|
||||||
This method tests if the server starts up successfully
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self.setUp()
|
|
||||||
with grpc.insecure_channel("localhost:50051") as channel:
|
|
||||||
stub = backend_pb2_grpc.BackendStub(channel)
|
|
||||||
response = stub.Health(backend_pb2.HealthMessage())
|
|
||||||
self.assertEqual(response.message, b'OK')
|
|
||||||
except Exception as err:
|
|
||||||
print(err)
|
|
||||||
self.fail("Server failed to start")
|
|
||||||
finally:
|
|
||||||
self.tearDown()
|
|
||||||
|
|
||||||
def test_load_model(self):
|
|
||||||
"""
|
|
||||||
This method tests if the model is loaded successfully
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self.setUp()
|
|
||||||
with grpc.insecure_channel("localhost:50051") as channel:
|
|
||||||
stub = backend_pb2_grpc.BackendStub(channel)
|
|
||||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen"))
|
|
||||||
self.assertTrue(response.success)
|
|
||||||
self.assertEqual(response.message, "Model loaded successfully")
|
|
||||||
except Exception as err:
|
|
||||||
print(err)
|
|
||||||
self.fail("LoadModel service failed")
|
|
||||||
finally:
|
|
||||||
self.tearDown()
|
|
||||||
|
|
||||||
def test_tts(self):
|
|
||||||
"""
|
|
||||||
This method tests if the embeddings are generated successfully
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self.setUp()
|
|
||||||
with grpc.insecure_channel("localhost:50051") as channel:
|
|
||||||
stub = backend_pb2_grpc.BackendStub(channel)
|
|
||||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen"))
|
|
||||||
self.assertTrue(response.success)
|
|
||||||
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story")
|
|
||||||
tts_response = stub.TTS(tts_request)
|
|
||||||
self.assertIsNotNone(tts_response)
|
|
||||||
except Exception as err:
|
|
||||||
print(err)
|
|
||||||
self.fail("TTS service failed")
|
|
||||||
finally:
|
|
||||||
self.tearDown()
|
|
@ -1,7 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
TEST_FILE="./source/test.py"
|
|
||||||
|
|
||||||
source $(dirname $0)/../common/libbackend.sh
|
|
||||||
|
|
||||||
runUnittests
|
|
@ -140,7 +140,7 @@ func grpcModelOpts(c config.BackendConfig) *pb.ModelOptions {
|
|||||||
NBatch: int32(b),
|
NBatch: int32(b),
|
||||||
NoMulMatQ: c.NoMulMatQ,
|
NoMulMatQ: c.NoMulMatQ,
|
||||||
DraftModel: c.DraftModel,
|
DraftModel: c.DraftModel,
|
||||||
AudioPath: c.VallE.AudioPath,
|
AudioPath: c.AudioPath,
|
||||||
Quantization: c.Quantization,
|
Quantization: c.Quantization,
|
||||||
LoadFormat: c.LoadFormat,
|
LoadFormat: c.LoadFormat,
|
||||||
GPUMemoryUtilization: c.GPUMemoryUtilization,
|
GPUMemoryUtilization: c.GPUMemoryUtilization,
|
||||||
|
@ -21,8 +21,7 @@ type TTSConfig struct {
|
|||||||
// Voice wav path or id
|
// Voice wav path or id
|
||||||
Voice string `yaml:"voice"`
|
Voice string `yaml:"voice"`
|
||||||
|
|
||||||
// Vall-e-x
|
AudioPath string `yaml:"audio_path"`
|
||||||
VallE VallE `yaml:"vall-e"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendConfig struct {
|
type BackendConfig struct {
|
||||||
@ -82,10 +81,6 @@ type File struct {
|
|||||||
URI downloader.URI `yaml:"uri" json:"uri"`
|
URI downloader.URI `yaml:"uri" json:"uri"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type VallE struct {
|
|
||||||
AudioPath string `yaml:"audio_path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type FeatureFlag map[string]*bool
|
type FeatureFlag map[string]*bool
|
||||||
|
|
||||||
func (ff FeatureFlag) Enabled(s string) bool {
|
func (ff FeatureFlag) Enabled(s string) bool {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user