mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-29 17:08:52 +00:00
fix: guidance_scale not work in sd (#1488)
Signed-off-by: hibobmaster <32976627+hibobmaster@users.noreply.github.com>
This commit is contained in:
parent
6597881854
commit
7e2d101a46
@ -149,9 +149,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
local = False
|
local = False
|
||||||
modelFile = request.Model
|
modelFile = request.Model
|
||||||
|
|
||||||
cfg_scale = 7
|
self.cfg_scale = 7
|
||||||
if request.CFGScale != 0:
|
if request.CFGScale != 0:
|
||||||
cfg_scale = request.CFGScale
|
self.cfg_scale = request.CFGScale
|
||||||
|
|
||||||
clipmodel = "runwayml/stable-diffusion-v1-5"
|
clipmodel = "runwayml/stable-diffusion-v1-5"
|
||||||
if request.CLIPModel != "":
|
if request.CLIPModel != "":
|
||||||
@ -173,17 +173,14 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
if (request.PipelineType == "StableDiffusionImg2ImgPipeline") or (request.IMG2IMG and request.PipelineType == ""):
|
if (request.PipelineType == "StableDiffusionImg2ImgPipeline") or (request.IMG2IMG and request.PipelineType == ""):
|
||||||
if fromSingleFile:
|
if fromSingleFile:
|
||||||
self.pipe = StableDiffusionImg2ImgPipeline.from_single_file(modelFile,
|
self.pipe = StableDiffusionImg2ImgPipeline.from_single_file(modelFile,
|
||||||
torch_dtype=torchType,
|
torch_dtype=torchType)
|
||||||
guidance_scale=cfg_scale)
|
|
||||||
else:
|
else:
|
||||||
self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained(request.Model,
|
self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained(request.Model,
|
||||||
torch_dtype=torchType,
|
torch_dtype=torchType)
|
||||||
guidance_scale=cfg_scale)
|
|
||||||
|
|
||||||
elif request.PipelineType == "StableDiffusionDepth2ImgPipeline":
|
elif request.PipelineType == "StableDiffusionDepth2ImgPipeline":
|
||||||
self.pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(request.Model,
|
self.pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(request.Model,
|
||||||
torch_dtype=torchType,
|
torch_dtype=torchType)
|
||||||
guidance_scale=cfg_scale)
|
|
||||||
## img2vid
|
## img2vid
|
||||||
elif request.PipelineType == "StableVideoDiffusionPipeline":
|
elif request.PipelineType == "StableVideoDiffusionPipeline":
|
||||||
self.img2vid=True
|
self.img2vid=True
|
||||||
@ -197,38 +194,32 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
self.pipe = AutoPipelineForText2Image.from_pretrained(request.Model,
|
self.pipe = AutoPipelineForText2Image.from_pretrained(request.Model,
|
||||||
torch_dtype=torchType,
|
torch_dtype=torchType,
|
||||||
use_safetensors=SAFETENSORS,
|
use_safetensors=SAFETENSORS,
|
||||||
variant=variant,
|
variant=variant)
|
||||||
guidance_scale=cfg_scale)
|
|
||||||
elif request.PipelineType == "StableDiffusionPipeline":
|
elif request.PipelineType == "StableDiffusionPipeline":
|
||||||
if fromSingleFile:
|
if fromSingleFile:
|
||||||
self.pipe = StableDiffusionPipeline.from_single_file(modelFile,
|
self.pipe = StableDiffusionPipeline.from_single_file(modelFile,
|
||||||
torch_dtype=torchType,
|
torch_dtype=torchType)
|
||||||
guidance_scale=cfg_scale)
|
|
||||||
else:
|
else:
|
||||||
self.pipe = StableDiffusionPipeline.from_pretrained(request.Model,
|
self.pipe = StableDiffusionPipeline.from_pretrained(request.Model,
|
||||||
torch_dtype=torchType,
|
torch_dtype=torchType)
|
||||||
guidance_scale=cfg_scale)
|
|
||||||
elif request.PipelineType == "DiffusionPipeline":
|
elif request.PipelineType == "DiffusionPipeline":
|
||||||
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
||||||
torch_dtype=torchType,
|
torch_dtype=torchType)
|
||||||
guidance_scale=cfg_scale)
|
|
||||||
elif request.PipelineType == "VideoDiffusionPipeline":
|
elif request.PipelineType == "VideoDiffusionPipeline":
|
||||||
self.txt2vid=True
|
self.txt2vid=True
|
||||||
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
||||||
torch_dtype=torchType,
|
torch_dtype=torchType)
|
||||||
guidance_scale=cfg_scale)
|
|
||||||
elif request.PipelineType == "StableDiffusionXLPipeline":
|
elif request.PipelineType == "StableDiffusionXLPipeline":
|
||||||
if fromSingleFile:
|
if fromSingleFile:
|
||||||
self.pipe = StableDiffusionXLPipeline.from_single_file(modelFile,
|
self.pipe = StableDiffusionXLPipeline.from_single_file(modelFile,
|
||||||
torch_dtype=torchType, use_safetensors=True,
|
torch_dtype=torchType,
|
||||||
guidance_scale=cfg_scale)
|
use_safetensors=True)
|
||||||
else:
|
else:
|
||||||
self.pipe = StableDiffusionXLPipeline.from_pretrained(
|
self.pipe = StableDiffusionXLPipeline.from_pretrained(
|
||||||
request.Model,
|
request.Model,
|
||||||
torch_dtype=torchType,
|
torch_dtype=torchType,
|
||||||
use_safetensors=True,
|
use_safetensors=True,
|
||||||
variant=variant,
|
variant=variant)
|
||||||
guidance_scale=cfg_scale)
|
|
||||||
|
|
||||||
if CLIPSKIP and request.CLIPSkip != 0:
|
if CLIPSKIP and request.CLIPSkip != 0:
|
||||||
self.clip_skip = request.CLIPSkip
|
self.clip_skip = request.CLIPSkip
|
||||||
@ -384,12 +375,12 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
image = image.resize((1024, 576))
|
image = image.resize((1024, 576))
|
||||||
|
|
||||||
generator = torch.manual_seed(request.seed)
|
generator = torch.manual_seed(request.seed)
|
||||||
frames = self.pipe(image, decode_chunk_size=CHUNK_SIZE, generator=generator).frames[0]
|
frames = self.pipe(image, guidance_scale=self.cfg_scale, decode_chunk_size=CHUNK_SIZE, generator=generator).frames[0]
|
||||||
export_to_video(frames, request.dst, fps=FPS)
|
export_to_video(frames, request.dst, fps=FPS)
|
||||||
return backend_pb2.Result(message="Media generated successfully", success=True)
|
return backend_pb2.Result(message="Media generated successfully", success=True)
|
||||||
|
|
||||||
if self.txt2vid:
|
if self.txt2vid:
|
||||||
video_frames = self.pipe(prompt, num_inference_steps=steps, num_frames=int(FRAMES)).frames
|
video_frames = self.pipe(prompt, guidance_scale=self.cfg_scale, num_inference_steps=steps, num_frames=int(FRAMES)).frames
|
||||||
export_to_video(video_frames, request.dst)
|
export_to_video(video_frames, request.dst)
|
||||||
return backend_pb2.Result(message="Media generated successfully", success=True)
|
return backend_pb2.Result(message="Media generated successfully", success=True)
|
||||||
|
|
||||||
@ -399,12 +390,14 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
kwargs["prompt_embeds"]= conditioning
|
kwargs["prompt_embeds"]= conditioning
|
||||||
# pass the kwargs dictionary to the self.pipe method
|
# pass the kwargs dictionary to the self.pipe method
|
||||||
image = self.pipe(
|
image = self.pipe(
|
||||||
|
guidance_scale=self.cfg_scale,
|
||||||
**kwargs
|
**kwargs
|
||||||
).images[0]
|
).images[0]
|
||||||
else:
|
else:
|
||||||
# pass the kwargs dictionary to the self.pipe method
|
# pass the kwargs dictionary to the self.pipe method
|
||||||
image = self.pipe(
|
image = self.pipe(
|
||||||
prompt,
|
prompt,
|
||||||
|
guidance_scale=self.cfg_scale,
|
||||||
**kwargs
|
**kwargs
|
||||||
).images[0]
|
).images[0]
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user