From bae8b7c217d7d0151b8ed5e101aef81926411032 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 22 Jan 2025 19:36:37 +0100 Subject: [PATCH] feat(faster-whisper): add backend Signed-off-by: Ettore Di Giacinto --- Dockerfile | 5 +- Makefile | 13 ++- backend/python/faster-whisper/Makefile | 20 ++++ backend/python/faster-whisper/backend.py | 94 +++++++++++++++++++ backend/python/faster-whisper/install.sh | 14 +++ backend/python/faster-whisper/protogen.sh | 6 ++ .../faster-whisper/requirements-cpu.txt | 8 ++ .../faster-whisper/requirements-cublas11.txt | 9 ++ .../faster-whisper/requirements-cublas12.txt | 8 ++ .../faster-whisper/requirements-hipblas.txt | 3 + .../faster-whisper/requirements-intel.txt | 6 ++ .../python/faster-whisper/requirements.txt | 3 + backend/python/faster-whisper/run.sh | 4 + backend/python/faster-whisper/test.sh | 6 ++ 14 files changed, 196 insertions(+), 3 deletions(-) create mode 100644 backend/python/faster-whisper/Makefile create mode 100755 backend/python/faster-whisper/backend.py create mode 100755 backend/python/faster-whisper/install.sh create mode 100644 backend/python/faster-whisper/protogen.sh create mode 100644 backend/python/faster-whisper/requirements-cpu.txt create mode 100644 backend/python/faster-whisper/requirements-cublas11.txt create mode 100644 backend/python/faster-whisper/requirements-cublas12.txt create mode 100644 backend/python/faster-whisper/requirements-hipblas.txt create mode 100644 backend/python/faster-whisper/requirements-intel.txt create mode 100644 backend/python/faster-whisper/requirements.txt create mode 100755 backend/python/faster-whisper/run.sh create mode 100755 backend/python/faster-whisper/test.sh diff --git a/Dockerfile b/Dockerfile index 8594c2a1..b01f071d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ ARG TARGETARCH ARG TARGETVARIANT ENV DEBIAN_FRONTEND=noninteractive -ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,transformers:/build/backend/python/transformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,openvoice:/build/backend/python/openvoice/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh" +ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,transformers:/build/backend/python/transformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,faster-whisper:/build/backend/python/faster-whisper/run.sh,openvoice:/build/backend/python/openvoice/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh" RUN apt-get update && \ @@ -414,6 +414,9 @@ RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAG if [[ ( "${EXTRA_BACKENDS}" =~ "parler-tts" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ make -C backend/python/parler-tts \ ; fi && \ + if [[ ( "${EXTRA_BACKENDS}" =~ "faster-whisper" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ + make -C backend/python/parler-tts \ + ; fi && \ if [[ ( "${EXTRA_BACKENDS}" =~ "diffusers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \ make -C backend/python/diffusers \ ; fi diff --git a/Makefile b/Makefile index 312bfcc4..efc5812b 100644 --- a/Makefile +++ b/Makefile @@ -533,10 +533,10 @@ protogen-go-clean: $(RM) bin/* .PHONY: protogen-python -protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen mamba-protogen rerankers-protogen transformers-protogen parler-tts-protogen kokoro-protogen vllm-protogen openvoice-protogen +protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen mamba-protogen rerankers-protogen transformers-protogen parler-tts-protogen kokoro-protogen vllm-protogen openvoice-protogen faster-whisper-protogen .PHONY: protogen-python-clean -protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean mamba-protogen-clean rerankers-protogen-clean transformers-protogen-clean parler-tts-protogen-clean kokoro-protogen-clean vllm-protogen-clean openvoice-protogen-clean +protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean mamba-protogen-clean rerankers-protogen-clean transformers-protogen-clean parler-tts-protogen-clean kokoro-protogen-clean vllm-protogen-clean openvoice-protogen-clean faster-whisper-protogen-clean .PHONY: autogptq-protogen autogptq-protogen: @@ -570,6 +570,14 @@ diffusers-protogen: diffusers-protogen-clean: $(MAKE) -C backend/python/diffusers protogen-clean +.PHONY: faster-whisper-protogen +faster-whisper-protogen: + $(MAKE) -C backend/python/faster-whisper protogen + +.PHONY: faster-whisper-protogen-clean +faster-whisper-protogen-clean: + $(MAKE) -C backend/python/faster-whisper protogen-clean + .PHONY: exllama2-protogen exllama2-protogen: $(MAKE) -C backend/python/exllama2 protogen @@ -641,6 +649,7 @@ prepare-extra-conda-environments: protogen-python $(MAKE) -C backend/python/bark $(MAKE) -C backend/python/coqui $(MAKE) -C backend/python/diffusers + $(MAKE) -C backend/python/faster-whisper $(MAKE) -C backend/python/vllm $(MAKE) -C backend/python/mamba $(MAKE) -C backend/python/rerankers diff --git a/backend/python/faster-whisper/Makefile b/backend/python/faster-whisper/Makefile new file mode 100644 index 00000000..c0e5169f --- /dev/null +++ b/backend/python/faster-whisper/Makefile @@ -0,0 +1,20 @@ +.DEFAULT_GOAL := install + +.PHONY: install +install: + bash install.sh + $(MAKE) protogen + +.PHONY: protogen +protogen: backend_pb2_grpc.py backend_pb2.py + +.PHONY: protogen-clean +protogen-clean: + $(RM) backend_pb2_grpc.py backend_pb2.py + +backend_pb2_grpc.py backend_pb2.py: + bash protogen.sh + +.PHONY: clean +clean: protogen-clean + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/faster-whisper/backend.py b/backend/python/faster-whisper/backend.py new file mode 100755 index 00000000..dbb8b3d9 --- /dev/null +++ b/backend/python/faster-whisper/backend.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +""" +This is an extra gRPC server of LocalAI for Bark TTS +""" +from concurrent import futures +import time +import argparse +import signal +import sys +import os +import backend_pb2 +import backend_pb2_grpc + +from faster_whisper import WhisperModel + +import grpc + + +_ONE_DAY_IN_SECONDS = 60 * 60 * 24 + +# If MAX_WORKERS are specified in the environment use it, otherwise default to 1 +MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1')) +COQUI_LANGUAGE = os.environ.get('COQUI_LANGUAGE', None) + +# Implement the BackendServicer class with the service methods +class BackendServicer(backend_pb2_grpc.BackendServicer): + """ + BackendServicer is the class that implements the gRPC service + """ + def Health(self, request, context): + return backend_pb2.Reply(message=bytes("OK", 'utf-8')) + def LoadModel(self, request, context): + device = "cpu" + # Get device + # device = "cuda" if request.CUDA else "cpu" + if request.CUDA: + device = "cuda" + + try: + print("Preparing models, please wait", file=sys.stderr) + self.model = WhisperModel(request.Model, device=device, compute_type="float16") + except Exception as err: + return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") + # Implement your logic here for the LoadModel service + # Replace this with your desired response + return backend_pb2.Result(message="Model loaded successfully", success=True) + + def AudioTranscription(self, request, context): + resultSegments = [] + text = "" + try: + segments, info = self.model.transcribe(request.dst, beam_size=5, condition_on_previous_text=False) + id = 0 + for segment in segments: + print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text)) + resultSegments.append(backend_pb2.TranscriptSegment(id=id, start=segment.start, end=segment.end, text=segment.text)) + text += segment.text + id += 1 + except Exception as err: + print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr) + + return backend_pb2.TranscriptResult(segments=resultSegments, text=text) + +def serve(address): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)) + backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server) + server.add_insecure_port(address) + server.start() + print("Server started. Listening on: " + address, file=sys.stderr) + + # Define the signal handler function + def signal_handler(sig, frame): + print("Received termination signal. Shutting down...") + server.stop(0) + sys.exit(0) + + # Set the signal handlers for SIGINT and SIGTERM + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + try: + while True: + time.sleep(_ONE_DAY_IN_SECONDS) + except KeyboardInterrupt: + server.stop(0) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Run the gRPC server.") + parser.add_argument( + "--addr", default="localhost:50051", help="The address to bind the server to." + ) + args = parser.parse_args() + + serve(args.addr) diff --git a/backend/python/faster-whisper/install.sh b/backend/python/faster-whisper/install.sh new file mode 100755 index 00000000..36443ef1 --- /dev/null +++ b/backend/python/faster-whisper/install.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -e + +source $(dirname $0)/../common/libbackend.sh + +# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links. +# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match. +# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index +# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index +if [ "x${BUILD_PROFILE}" == "xintel" ]; then + EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" +fi + +installRequirements diff --git a/backend/python/faster-whisper/protogen.sh b/backend/python/faster-whisper/protogen.sh new file mode 100644 index 00000000..32f39fbb --- /dev/null +++ b/backend/python/faster-whisper/protogen.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +source $(dirname $0)/../common/libbackend.sh + +python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto \ No newline at end of file diff --git a/backend/python/faster-whisper/requirements-cpu.txt b/backend/python/faster-whisper/requirements-cpu.txt new file mode 100644 index 00000000..3e03f3ad --- /dev/null +++ b/backend/python/faster-whisper/requirements-cpu.txt @@ -0,0 +1,8 @@ +faster-whisper +opencv-python +accelerate +compel +peft +sentencepiece +torch==2.4.1 +optimum-quanto \ No newline at end of file diff --git a/backend/python/faster-whisper/requirements-cublas11.txt b/backend/python/faster-whisper/requirements-cublas11.txt new file mode 100644 index 00000000..b7453295 --- /dev/null +++ b/backend/python/faster-whisper/requirements-cublas11.txt @@ -0,0 +1,9 @@ +--extra-index-url https://download.pytorch.org/whl/cu118 +torch==2.4.1+cu118 +faster-whisper +opencv-python +accelerate +compel +peft +sentencepiece +optimum-quanto \ No newline at end of file diff --git a/backend/python/faster-whisper/requirements-cublas12.txt b/backend/python/faster-whisper/requirements-cublas12.txt new file mode 100644 index 00000000..8f46fa4a --- /dev/null +++ b/backend/python/faster-whisper/requirements-cublas12.txt @@ -0,0 +1,8 @@ +torch==2.4.1 +faster-whisper +opencv-python +accelerate +compel +peft +sentencepiece +optimum-quanto \ No newline at end of file diff --git a/backend/python/faster-whisper/requirements-hipblas.txt b/backend/python/faster-whisper/requirements-hipblas.txt new file mode 100644 index 00000000..29413f05 --- /dev/null +++ b/backend/python/faster-whisper/requirements-hipblas.txt @@ -0,0 +1,3 @@ +--extra-index-url https://download.pytorch.org/whl/rocm6.0 +torch +faster-whisper \ No newline at end of file diff --git a/backend/python/faster-whisper/requirements-intel.txt b/backend/python/faster-whisper/requirements-intel.txt new file mode 100644 index 00000000..417aa0b4 --- /dev/null +++ b/backend/python/faster-whisper/requirements-intel.txt @@ -0,0 +1,6 @@ +--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +intel-extension-for-pytorch==2.3.110+xpu +torch==2.3.1+cxx11.abi +oneccl_bind_pt==2.3.100+xpu +optimum[openvino] +faster-whisper \ No newline at end of file diff --git a/backend/python/faster-whisper/requirements.txt b/backend/python/faster-whisper/requirements.txt new file mode 100644 index 00000000..0f43df10 --- /dev/null +++ b/backend/python/faster-whisper/requirements.txt @@ -0,0 +1,3 @@ +grpcio==1.69.0 +protobuf +grpcio-tools \ No newline at end of file diff --git a/backend/python/faster-whisper/run.sh b/backend/python/faster-whisper/run.sh new file mode 100755 index 00000000..375c07e5 --- /dev/null +++ b/backend/python/faster-whisper/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +source $(dirname $0)/../common/libbackend.sh + +startBackend $@ \ No newline at end of file diff --git a/backend/python/faster-whisper/test.sh b/backend/python/faster-whisper/test.sh new file mode 100755 index 00000000..6940b066 --- /dev/null +++ b/backend/python/faster-whisper/test.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +source $(dirname $0)/../common/libbackend.sh + +runUnittests