diff --git a/backend/python/diffusers/backend.py b/backend/python/diffusers/backend.py index ec2dea60..1496fa94 100755 --- a/backend/python/diffusers/backend.py +++ b/backend/python/diffusers/backend.py @@ -17,7 +17,7 @@ import backend_pb2_grpc import grpc -from diffusers import StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, EulerAncestralDiscreteScheduler +from diffusers import StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, EulerAncestralDiscreteScheduler from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline from diffusers.pipelines.stable_diffusion import safety_checker from diffusers.utils import load_image,export_to_video @@ -225,6 +225,17 @@ class BackendServicer(backend_pb2_grpc.BackendServicer): torch_dtype=torchType, use_safetensors=True, variant=variant) + elif request.PipelineType == "StableDiffusion3Pipeline": + if fromSingleFile: + self.pipe = StableDiffusion3Pipeline.from_single_file(modelFile, + torch_dtype=torchType, + use_safetensors=True) + else: + self.pipe = StableDiffusion3Pipeline.from_pretrained( + request.Model, + torch_dtype=torchType, + use_safetensors=True, + variant=variant) if CLIPSKIP and request.CLIPSkip != 0: self.clip_skip = request.CLIPSkip diff --git a/backend/python/diffusers/requirements.txt b/backend/python/diffusers/requirements.txt index 1c663adc..188fb5dc 100644 --- a/backend/python/diffusers/requirements.txt +++ b/backend/python/diffusers/requirements.txt @@ -5,6 +5,7 @@ grpcio==1.64.0 opencv-python pillow protobuf +sentencepiece torch transformers -certifi \ No newline at end of file +certifi diff --git a/gallery/index.yaml b/gallery/index.yaml index a458b33d..e92f35db 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -2580,6 +2580,21 @@ - filename: DreamShaper_8_pruned.safetensors uri: huggingface://Lykon/DreamShaper/DreamShaper_8_pruned.safetensors sha256: 879db523c30d3b9017143d56705015e15a2cb5628762c11d086fed9538abd7fd +- name: stable-diffusion-3-medium + icon: https://huggingface.co/leo009/stable-diffusion-3-medium/resolve/main/sd3demo.jpg + license: other + description: | + Stable Diffusion 3 Medium is a Multimodal Diffusion Transformer (MMDiT) text-to-image model that features greatly improved performance in image quality, typography, complex prompt understanding, and resource-efficiency. + urls: + - https://huggingface.co/stabilityai/stable-diffusion-3-medium + - https://huggingface.co/leo009/stable-diffusion-3-medium + tags: + - text-to-image + - stablediffusion + - python + - sd-3 + - gpu + url: "github:mudler/LocalAI/gallery/stablediffusion3.yaml@master" - &whisper ## Whisper url: "github:mudler/LocalAI/gallery/whisper-base.yaml@master" diff --git a/gallery/stablediffusion3.yaml b/gallery/stablediffusion3.yaml new file mode 100644 index 00000000..855c8b51 --- /dev/null +++ b/gallery/stablediffusion3.yaml @@ -0,0 +1,14 @@ +--- +name: "stable-diffusion-3-medium" + +config_file: | + backend: diffusers + diffusers: + cuda: true + enable_parameters: negative_prompt,num_inference_steps + pipeline_type: StableDiffusion3Pipeline + f16: false + name: sd3 + parameters: + model: v2ray/stable-diffusion-3-medium-diffusers + step: 25