mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-18 20:27:57 +00:00
parent
a10a952085
commit
8ad669339e
22
.github/workflows/test-extra.yml
vendored
22
.github/workflows/test-extra.yml
vendored
@ -123,6 +123,28 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
make --jobs=5 --output-sync=target -C backend/python/parler-tts
|
make --jobs=5 --output-sync=target -C backend/python/parler-tts
|
||||||
make --jobs=5 --output-sync=target -C backend/python/parler-tts test
|
make --jobs=5 --output-sync=target -C backend/python/parler-tts test
|
||||||
|
|
||||||
|
tests-openvoice:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
- name: Dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install build-essential ffmpeg
|
||||||
|
# Install UV
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||||
|
sudo apt-get install -y libopencv-dev
|
||||||
|
pip install --user grpcio-tools==1.63.0
|
||||||
|
|
||||||
|
- name: Test openvoice
|
||||||
|
run: |
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/openvoice
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/openvoice test
|
||||||
|
|
||||||
tests-transformers-musicgen:
|
tests-transformers-musicgen:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -12,7 +12,7 @@ ARG TARGETARCH
|
|||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,petals:/build/backend/python/petals/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,exllama:/build/backend/python/exllama/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh"
|
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,petals:/build/backend/python/petals/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,exllama:/build/backend/python/exllama/run.sh,openvoice:/build/backend/python/openvoice/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh"
|
||||||
|
|
||||||
ARG GO_TAGS="stablediffusion tinydream tts"
|
ARG GO_TAGS="stablediffusion tinydream tts"
|
||||||
|
|
||||||
@ -306,6 +306,9 @@ RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAG
|
|||||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vall-e-x" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vall-e-x" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/vall-e-x \
|
make -C backend/python/vall-e-x \
|
||||||
; fi && \
|
; fi && \
|
||||||
|
if [[ ( "${EXTRA_BACKENDS}" =~ "openvoice" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
|
make -C backend/python/openvoice \
|
||||||
|
; fi && \
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "petals" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
if [[ ( "${EXTRA_BACKENDS}" =~ "petals" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/petals \
|
make -C backend/python/petals \
|
||||||
; fi && \
|
; fi && \
|
||||||
|
13
Makefile
13
Makefile
@ -454,10 +454,10 @@ protogen-go-clean:
|
|||||||
$(RM) bin/*
|
$(RM) bin/*
|
||||||
|
|
||||||
.PHONY: protogen-python
|
.PHONY: protogen-python
|
||||||
protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama-protogen exllama2-protogen mamba-protogen petals-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen vall-e-x-protogen vllm-protogen
|
protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama-protogen exllama2-protogen mamba-protogen petals-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen vall-e-x-protogen vllm-protogen openvoice-protogen
|
||||||
|
|
||||||
.PHONY: protogen-python-clean
|
.PHONY: protogen-python-clean
|
||||||
protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama-protogen-clean exllama2-protogen-clean mamba-protogen-clean petals-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean vall-e-x-protogen-clean vllm-protogen-clean
|
protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama-protogen-clean exllama2-protogen-clean mamba-protogen-clean petals-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean vall-e-x-protogen-clean vllm-protogen-clean openvoice-protogen-clean
|
||||||
|
|
||||||
.PHONY: autogptq-protogen
|
.PHONY: autogptq-protogen
|
||||||
autogptq-protogen:
|
autogptq-protogen:
|
||||||
@ -571,6 +571,14 @@ vall-e-x-protogen:
|
|||||||
vall-e-x-protogen-clean:
|
vall-e-x-protogen-clean:
|
||||||
$(MAKE) -C backend/python/vall-e-x protogen-clean
|
$(MAKE) -C backend/python/vall-e-x protogen-clean
|
||||||
|
|
||||||
|
.PHONY: openvoice-protogen
|
||||||
|
openvoice-protogen:
|
||||||
|
$(MAKE) -C backend/python/openvoice protogen
|
||||||
|
|
||||||
|
.PHONY: openvoice-protogen-clean
|
||||||
|
openvoice-protogen-clean:
|
||||||
|
$(MAKE) -C backend/python/openvoice protogen-clean
|
||||||
|
|
||||||
.PHONY: vllm-protogen
|
.PHONY: vllm-protogen
|
||||||
vllm-protogen:
|
vllm-protogen:
|
||||||
$(MAKE) -C backend/python/vllm protogen
|
$(MAKE) -C backend/python/vllm protogen
|
||||||
@ -594,6 +602,7 @@ prepare-extra-conda-environments: protogen-python
|
|||||||
$(MAKE) -C backend/python/transformers-musicgen
|
$(MAKE) -C backend/python/transformers-musicgen
|
||||||
$(MAKE) -C backend/python/parler-tts
|
$(MAKE) -C backend/python/parler-tts
|
||||||
$(MAKE) -C backend/python/vall-e-x
|
$(MAKE) -C backend/python/vall-e-x
|
||||||
|
$(MAKE) -C backend/python/openvoice
|
||||||
$(MAKE) -C backend/python/exllama
|
$(MAKE) -C backend/python/exllama
|
||||||
$(MAKE) -C backend/python/petals
|
$(MAKE) -C backend/python/petals
|
||||||
$(MAKE) -C backend/python/exllama2
|
$(MAKE) -C backend/python/exllama2
|
||||||
|
25
backend/python/openvoice/Makefile
Normal file
25
backend/python/openvoice/Makefile
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
.DEFAULT_GOAL := install
|
||||||
|
|
||||||
|
.PHONY: install
|
||||||
|
install: protogen
|
||||||
|
bash install.sh
|
||||||
|
|
||||||
|
.PHONY: protogen
|
||||||
|
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
|
.PHONY: protogen-clean
|
||||||
|
protogen-clean:
|
||||||
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
|
backend_pb2_grpc.py backend_pb2.py:
|
||||||
|
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
|
||||||
|
|
||||||
|
.PHONY: clean
|
||||||
|
clean: protogen-clean
|
||||||
|
rm -rf venv __pycache__
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test: protogen
|
||||||
|
@echo "Testing openvoice..."
|
||||||
|
bash test.sh
|
||||||
|
@echo "openvoice tested."
|
158
backend/python/openvoice/backend.py
Executable file
158
backend/python/openvoice/backend.py
Executable file
@ -0,0 +1,158 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Extra gRPC server for OpenVoice models.
|
||||||
|
"""
|
||||||
|
from concurrent import futures
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import torch
|
||||||
|
from openvoice import se_extractor
|
||||||
|
from openvoice.api import ToneColorConverter
|
||||||
|
from melo.api import TTS
|
||||||
|
|
||||||
|
import time
|
||||||
|
import backend_pb2
|
||||||
|
import backend_pb2_grpc
|
||||||
|
|
||||||
|
import grpc
|
||||||
|
|
||||||
|
|
||||||
|
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||||
|
|
||||||
|
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||||
|
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||||
|
|
||||||
|
# Implement the BackendServicer class with the service methods
|
||||||
|
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||||
|
"""
|
||||||
|
A gRPC servicer for the backend service.
|
||||||
|
|
||||||
|
This class implements the gRPC methods for the backend service, including Health, LoadModel, and Embedding.
|
||||||
|
"""
|
||||||
|
def Health(self, request, context):
|
||||||
|
"""
|
||||||
|
A gRPC method that returns the health status of the backend service.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: A HealthRequest object that contains the request parameters.
|
||||||
|
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A Reply object that contains the health status of the backend service.
|
||||||
|
"""
|
||||||
|
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||||
|
|
||||||
|
def LoadModel(self, request, context):
|
||||||
|
"""
|
||||||
|
A gRPC method that loads a model into memory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: A LoadModelRequest object that contains the request parameters.
|
||||||
|
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A Result object that contains the result of the LoadModel operation.
|
||||||
|
"""
|
||||||
|
model_name = request.Model
|
||||||
|
try:
|
||||||
|
|
||||||
|
self.clonedVoice = False
|
||||||
|
# Assume directory from request.ModelFile.
|
||||||
|
# Only if request.LoraAdapter it's not an absolute path
|
||||||
|
if request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath):
|
||||||
|
# get base path of modelFile
|
||||||
|
modelFileBase = os.path.dirname(request.ModelFile)
|
||||||
|
request.AudioPath = os.path.join(modelFileBase, request.AudioPath)
|
||||||
|
if request.AudioPath != "":
|
||||||
|
self.clonedVoice = True
|
||||||
|
|
||||||
|
self.modelpath = request.ModelFile
|
||||||
|
self.speaker = request.Type
|
||||||
|
self.ClonedVoicePath = request.AudioPath
|
||||||
|
|
||||||
|
ckpt_converter = request.Model+'/converter'
|
||||||
|
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
||||||
|
self.device = device
|
||||||
|
self.tone_color_converter = None
|
||||||
|
if self.clonedVoice:
|
||||||
|
self.tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device)
|
||||||
|
self.tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth')
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||||
|
|
||||||
|
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||||
|
|
||||||
|
def TTS(self, request, context):
|
||||||
|
model_name = request.model
|
||||||
|
if model_name == "":
|
||||||
|
return backend_pb2.Result(success=False, message="request.model is required")
|
||||||
|
try:
|
||||||
|
# Speed is adjustable
|
||||||
|
speed = 1.0
|
||||||
|
voice = "EN"
|
||||||
|
if request.voice:
|
||||||
|
voice = request.voice
|
||||||
|
model = TTS(language=voice, device=self.device)
|
||||||
|
speaker_ids = model.hps.data.spk2id
|
||||||
|
speaker_key = self.speaker
|
||||||
|
modelpath = self.modelpath
|
||||||
|
for s in speaker_ids.keys():
|
||||||
|
print(f"Speaker: {s} - ID: {speaker_ids[s]}")
|
||||||
|
speaker_id = speaker_ids[speaker_key]
|
||||||
|
speaker_key = speaker_key.lower().replace('_', '-')
|
||||||
|
source_se = torch.load(f'{modelpath}/base_speakers/ses/{speaker_key}.pth', map_location=self.device)
|
||||||
|
model.tts_to_file(request.text, speaker_id, request.dst, speed=speed)
|
||||||
|
if self.clonedVoice:
|
||||||
|
reference_speaker = self.ClonedVoicePath
|
||||||
|
target_se, audio_name = se_extractor.get_se(reference_speaker, self.tone_color_converter, vad=False)
|
||||||
|
# Run the tone color converter
|
||||||
|
encode_message = "@MyShell"
|
||||||
|
self.tone_color_converter.convert(
|
||||||
|
audio_src_path=request.dst,
|
||||||
|
src_se=source_se,
|
||||||
|
tgt_se=target_se,
|
||||||
|
output_path=request.dst,
|
||||||
|
message=encode_message)
|
||||||
|
|
||||||
|
print("[OpenVoice] TTS generated!", file=sys.stderr)
|
||||||
|
print("[OpenVoice] TTS saved to", request.dst, file=sys.stderr)
|
||||||
|
print(request, file=sys.stderr)
|
||||||
|
except Exception as err:
|
||||||
|
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||||
|
return backend_pb2.Result(success=True)
|
||||||
|
|
||||||
|
def serve(address):
|
||||||
|
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS))
|
||||||
|
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||||
|
server.add_insecure_port(address)
|
||||||
|
server.start()
|
||||||
|
print("[OpenVoice] Server started. Listening on: " + address, file=sys.stderr)
|
||||||
|
|
||||||
|
# Define the signal handler function
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
print("[OpenVoice] Received termination signal. Shutting down...")
|
||||||
|
server.stop(0)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Set the signal handlers for SIGINT and SIGTERM
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
server.stop(0)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
print(f"[OpenVoice] startup: {args}", file=sys.stderr)
|
||||||
|
serve(args.addr)
|
16
backend/python/openvoice/install.sh
Executable file
16
backend/python/openvoice/install.sh
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
source $(dirname $0)/../common/libbackend.sh
|
||||||
|
|
||||||
|
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
||||||
|
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
||||||
|
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
||||||
|
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
||||||
|
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||||
|
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||||
|
fi
|
||||||
|
|
||||||
|
installRequirements
|
||||||
|
|
||||||
|
python -m unidic download
|
23
backend/python/openvoice/requirements-intel.txt
Normal file
23
backend/python/openvoice/requirements-intel.txt
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
|
intel-extension-for-pytorch
|
||||||
|
torch
|
||||||
|
optimum[openvino]
|
||||||
|
grpcio==1.63.0
|
||||||
|
protobuf
|
||||||
|
librosa==0.9.1
|
||||||
|
faster-whisper==0.9.0
|
||||||
|
pydub==0.25.1
|
||||||
|
wavmark==0.0.3
|
||||||
|
numpy==1.22.0
|
||||||
|
eng_to_ipa==0.0.2
|
||||||
|
inflect==7.0.0
|
||||||
|
unidecode==1.3.7
|
||||||
|
whisper-timestamped==1.14.2
|
||||||
|
openai
|
||||||
|
python-dotenv
|
||||||
|
pypinyin==0.50.0
|
||||||
|
cn2an==0.5.22
|
||||||
|
jieba==0.42.1
|
||||||
|
gradio==3.48.0
|
||||||
|
langid==1.1.6
|
||||||
|
git+https://github.com/myshell-ai/MeloTTS.git
|
20
backend/python/openvoice/requirements.txt
Normal file
20
backend/python/openvoice/requirements.txt
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
grpcio==1.63.0
|
||||||
|
protobuf
|
||||||
|
librosa==0.9.1
|
||||||
|
faster-whisper==0.9.0
|
||||||
|
pydub==0.25.1
|
||||||
|
wavmark==0.0.3
|
||||||
|
numpy==1.22.0
|
||||||
|
eng_to_ipa==0.0.2
|
||||||
|
inflect==7.0.0
|
||||||
|
unidecode==1.3.7
|
||||||
|
whisper-timestamped==1.14.2
|
||||||
|
openai
|
||||||
|
python-dotenv
|
||||||
|
pypinyin==0.50.0
|
||||||
|
cn2an==0.5.22
|
||||||
|
jieba==0.42.1
|
||||||
|
gradio==3.48.0
|
||||||
|
langid==1.1.6
|
||||||
|
git+https://github.com/myshell-ai/MeloTTS.git
|
||||||
|
git+https://github.com/myshell-ai/OpenVoice.git
|
4
backend/python/openvoice/run.sh
Executable file
4
backend/python/openvoice/run.sh
Executable file
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
source $(dirname $0)/../common/libbackend.sh
|
||||||
|
|
||||||
|
startBackend $@
|
82
backend/python/openvoice/test.py
Normal file
82
backend/python/openvoice/test.py
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
"""
|
||||||
|
A test script to test the gRPC service
|
||||||
|
"""
|
||||||
|
import unittest
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import backend_pb2
|
||||||
|
import backend_pb2_grpc
|
||||||
|
|
||||||
|
import grpc
|
||||||
|
|
||||||
|
|
||||||
|
class TestBackendServicer(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
TestBackendServicer is the class that tests the gRPC service
|
||||||
|
"""
|
||||||
|
def setUp(self):
|
||||||
|
"""
|
||||||
|
This method sets up the gRPC service by starting the server
|
||||||
|
"""
|
||||||
|
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
def tearDown(self) -> None:
|
||||||
|
"""
|
||||||
|
This method tears down the gRPC service by terminating the server
|
||||||
|
"""
|
||||||
|
self.service.terminate()
|
||||||
|
self.service.wait()
|
||||||
|
|
||||||
|
def test_server_startup(self):
|
||||||
|
"""
|
||||||
|
This method tests if the server starts up successfully
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.setUp()
|
||||||
|
with grpc.insecure_channel("localhost:50051") as channel:
|
||||||
|
stub = backend_pb2_grpc.BackendStub(channel)
|
||||||
|
response = stub.Health(backend_pb2.HealthMessage())
|
||||||
|
self.assertEqual(response.message, b'OK')
|
||||||
|
except Exception as err:
|
||||||
|
print(err)
|
||||||
|
self.fail("Server failed to start")
|
||||||
|
finally:
|
||||||
|
self.tearDown()
|
||||||
|
|
||||||
|
def test_load_model(self):
|
||||||
|
"""
|
||||||
|
This method tests if the model is loaded successfully
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.setUp()
|
||||||
|
with grpc.insecure_channel("localhost:50051") as channel:
|
||||||
|
stub = backend_pb2_grpc.BackendStub(channel)
|
||||||
|
response = stub.LoadModel(backend_pb2.ModelOptions(Model="checkpoints_v2",
|
||||||
|
Type="en-us"))
|
||||||
|
self.assertTrue(response.success)
|
||||||
|
self.assertEqual(response.message, "Model loaded successfully")
|
||||||
|
except Exception as err:
|
||||||
|
print(err)
|
||||||
|
self.fail("LoadModel service failed")
|
||||||
|
finally:
|
||||||
|
self.tearDown()
|
||||||
|
|
||||||
|
def test_tts(self):
|
||||||
|
"""
|
||||||
|
This method tests if the embeddings are generated successfully
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.setUp()
|
||||||
|
with grpc.insecure_channel("localhost:50051") as channel:
|
||||||
|
stub = backend_pb2_grpc.BackendStub(channel)
|
||||||
|
response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen"))
|
||||||
|
self.assertTrue(response.success)
|
||||||
|
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story", voice="EN")
|
||||||
|
tts_response = stub.TTS(tts_request)
|
||||||
|
self.assertIsNotNone(tts_response)
|
||||||
|
except Exception as err:
|
||||||
|
print(err)
|
||||||
|
self.fail("TTS service failed")
|
||||||
|
finally:
|
||||||
|
self.tearDown()
|
12
backend/python/openvoice/test.sh
Executable file
12
backend/python/openvoice/test.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
source $(dirname $0)/../common/libbackend.sh
|
||||||
|
|
||||||
|
# Download checkpoints if not present
|
||||||
|
if [ ! -d "checkpoints_v2" ]; then
|
||||||
|
wget https://myshell-public-repo-hosting.s3.amazonaws.com/openvoice/checkpoints_v2_0417.zip -O checkpoints_v2.zip
|
||||||
|
unzip checkpoints_v2.zip
|
||||||
|
fi
|
||||||
|
|
||||||
|
runUnittests
|
Loading…
Reference in New Issue
Block a user