This commit is contained in:
Saifeddine ALOUI 2024-06-13 22:08:06 +02:00
parent b9b95191bb
commit 1e11d05eb9
2 changed files with 64 additions and 16 deletions

View File

@ -0,0 +1,46 @@
# Lollms function call definition file
# File Name: download_youtube_transcript.py
# Author: ParisNeo
# Description: This function goes online to YouTube and downloads the transcript from any video
# Importing necessary libraries
from functools import partial
from typing import List
from lollms.utilities import PackageManager
from ascii_colors import trace_exception
# Installing necessary packages
if not PackageManager.check_package_installed("youtube-transcript-api"):
PackageManager.install_package("youtube-transcript-api")
# Importing the package after installation
from youtube_transcript_api import YouTubeTranscriptApi
def download_youtube_transcript(video_id: str) -> str:
"""
This function downloads the transcript of a YouTube video given its video ID.
Parameters:
video_id (str): The ID of the YouTube video.
Returns:
str: The transcript of the video.
"""
try:
# Fetching the transcript
transcript = YouTubeTranscriptApi.get_transcript(video_id)
# Combining the transcript into a single string
transcript_text = " ".join([entry['text'] for entry in transcript])
return transcript_text
except Exception as e:
return trace_exception(e)
def download_youtube_transcript_function():
return {
"function_name": "download_youtube_transcript",
"function": download_youtube_transcript,
"function_description": "This function goes online to YouTube and downloads the transcript from any video.",
"function_parameters": [{"name": "video_id", "type": "str"}]
}

View File

@ -126,22 +126,24 @@ class LollmsDiffusers(LollmsTTI):
PackageManager.install_or_update("diffusers")
PackageManager.install_or_update("sentencepiece")
PackageManager.install_or_update("accelerate")
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image#PixArtSigmaPipeline
self.model = AutoPipelineForText2Image.from_pretrained(
app.config.diffusers_model, torch_dtype=torch.float16, cache_dir=self.models_dir,
use_safetensors=True,
)
# self.model = StableDiffusionPipeline.from_pretrained(
# "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, cache_dir=self.models_dir,
# use_safetensors=True,
# ) # app.config.diffusers_model
# Enable memory optimizations.
if app.config.diffusers_offloading_mode=="sequential_cpu_offload":
self.model.enable_sequential_cpu_offload()
elif app.coinfig.diffusers_offloading_mode=="model_cpu_offload":
self.model.enable_model_cpu_offload()
try:
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image#PixArtSigmaPipeline
self.model = AutoPipelineForText2Image.from_pretrained(
app.config.diffusers_model, torch_dtype=torch.float16, cache_dir=self.models_dir,
use_safetensors=True,
)
# self.model = StableDiffusionPipeline.from_pretrained(
# "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, cache_dir=self.models_dir,
# use_safetensors=True,
# ) # app.config.diffusers_model
# Enable memory optimizations.
if app.config.diffusers_offloading_mode=="sequential_cpu_offload":
self.model.enable_sequential_cpu_offload()
elif app.coinfig.diffusers_offloading_mode=="model_cpu_offload":
self.model.enable_model_cpu_offload()
except Exception as ex:
self.model= None
trace_exception(ex)
@staticmethod
def verify(app:LollmsApplication):
# Clone repository