From d4840285321e72b75c0be2f15710651e99b3db82 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 10 Apr 2025 09:55:51 +0200 Subject: [PATCH] feat(diffusers): add support for Lumina2Text2ImgPipeline (#4806) Signed-off-by: Ettore Di Giacinto --- backend/python/diffusers/backend.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/backend/python/diffusers/backend.py b/backend/python/diffusers/backend.py index 1724ee21..3668b016 100755 --- a/backend/python/diffusers/backend.py +++ b/backend/python/diffusers/backend.py @@ -19,7 +19,7 @@ import grpc from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \ EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel -from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline +from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline from diffusers.pipelines.stable_diffusion import safety_checker from diffusers.utils import load_image, export_to_video from compel import Compel, ReturnedEmbeddingsType @@ -287,6 +287,12 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): if request.LowVRAM: self.pipe.enable_model_cpu_offload() + elif request.PipelineType == "Lumina2Text2ImgPipeline": + self.pipe = Lumina2Text2ImgPipeline.from_pretrained( + request.Model, + torch_dtype=torch.bfloat16) + if request.LowVRAM: + self.pipe.enable_model_cpu_offload() elif request.PipelineType == "SanaPipeline": self.pipe = SanaPipeline.from_pretrained( request.Model,