Added new services

This commit is contained in:
Saifeddine ALOUI 2024-01-10 21:21:53 +01:00
parent c86dbe7e59
commit 25334549ec
11 changed files with 254 additions and 37 deletions

View File

@ -1,6 +1,6 @@
from lollms.main_config import LOLLMSConfig from lollms.main_config import LOLLMSConfig
from lollms.paths import LollmsPaths from lollms.paths import LollmsPaths
from lollms.personality import PersonalityBuilder from lollms.personality import PersonalityBuilder, AIPersonality
from lollms.binding import LLMBinding, BindingBuilder, ModelBuilder from lollms.binding import LLMBinding, BindingBuilder, ModelBuilder
from lollms.extension import LOLLMSExtension, ExtensionBuilder from lollms.extension import LOLLMSExtension, ExtensionBuilder
from lollms.config import InstallOption from lollms.config import InstallOption
@ -45,7 +45,7 @@ class LollmsApplication(LoLLMsCom):
self.menu = MainMenu(self, callback) self.menu = MainMenu(self, callback)
self.mounted_personalities = [] self.mounted_personalities = []
self.personality = None self.personality:AIPersonality = None
self.mounted_extensions = [] self.mounted_extensions = []
self.binding = None self.binding = None
@ -53,17 +53,18 @@ class LollmsApplication(LoLLMsCom):
self.long_term_memory = None self.long_term_memory = None
self.tts = None self.tts = None
if not free_mode: if not free_mode:
if self.config.enable_voice_service and load_voice_service: if self.config.enable_voice_service and load_voice_service:
try: try:
from lollms.audio_gen_modules.lollms_xtts import LollmsXTTS from lollms.services.xtts.lollms_xtts import LollmsXTTS
self.tts = LollmsXTTS(self, voice_samples_path=lollms_paths.custom_voices_path, xtts_base_url=self.config.xtts_base_url) self.tts = LollmsXTTS(self, voice_samples_path=lollms_paths.custom_voices_path, xtts_base_url=self.config.xtts_base_url)
except: except:
self.warning(f"Couldn't load XTTS") self.warning(f"Couldn't load XTTS")
if self.config.enable_sd_service and load_sd_service: if self.config.enable_sd_service and load_sd_service:
try: try:
from lollms.image_gen_modules.lollms_sd import LollmsSD from lollms.services.sd.lollms_sd import LollmsSD
self.tts = LollmsSD(self, auto_sd_base_url=self.config.sd_base_url) self.tts = LollmsSD(self, auto_sd_base_url=self.config.sd_base_url)
except: except:
self.warning(f"Couldn't load SD") self.warning(f"Couldn't load SD")

View File

@ -679,6 +679,12 @@ Date: {{date}}
self._assets_list = contents self._assets_list = contents
return config return config
def settings_updated(self):
"""
To be implemented by the bindings when the settings have changed
"""
pass
def remove_file(self, path, callback=None): def remove_file(self, path, callback=None):
try: try:
if path in self.text_files: if path in self.text_files:
@ -1780,21 +1786,31 @@ class APScript(StateMachine):
def generate(self, prompt, max_size, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False ): def generate(self, prompt, max_size, temperature = None, top_k = None, top_p=None, repeat_penalty=None, repeat_last_n=None, callback=None, debug=False ):
return self.personality.generate(prompt, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug) return self.personality.generate(prompt, max_size, temperature, top_k, top_p, repeat_penalty, repeat_last_n, callback, debug=debug)
def run_workflow(self, prompt:str, previous_discussion_text:str="", callback: Callable[[str, MSG_TYPE, dict, list], bool]=None): def run_workflow(self, prompt:str, previous_discussion_text:str="", callback: Callable[[str, MSG_TYPE, dict, list], bool]=None, context_details=None):
""" """
Runs the workflow for processing the model input and output. This function generates code based on the given parameters.
This method should be called to execute the processing workflow.
Args: Args:
generate_fn (function): A function that generates model output based on the input prompt. full_prompt (str): The full prompt for code generation.
The function should take a single argument (prompt) and return the generated text. prompt (str): The prompt for code generation.
prompt (str): The input prompt for the model. context_details (dict): A dictionary containing the following context details for code generation:
previous_discussion_text (str, optional): The text of the previous discussion. Default is an empty string. - conditionning (str): The conditioning information.
- documentation (str): The documentation information.
- knowledge (str): The knowledge information.
- user_description (str): The user description information.
- discussion_messages (str): The discussion messages information.
- positive_boost (str): The positive boost information.
- negative_boost (str): The negative boost information.
- force_language (str): The force language information.
- ai_prefix (str): The AI prefix information.
n_predict (int): The number of predictions to generate.
client_id: The client ID for code generation.
callback (function, optional): The callback function for code generation.
Returns: Returns:
None None
""" """
return None return None
@ -1849,6 +1865,43 @@ class APScript(StateMachine):
self.step_end(f"Processing chunk : {i+1}/{len(chunks)}") self.step_end(f"Processing chunk : {i+1}/{len(chunks)}")
return "\n".join(summeries) return "\n".join(summeries)
def build_prompt(self, prompt_parts:List[str], sacrifice_id:int=-1, context_size:int=None, minimum_spare_context_size:int=None):
"""
Builds the prompt for code generation.
Args:
prompt_parts (List[str]): A list of strings representing the parts of the prompt.
sacrifice_id (int, optional): The ID of the part to sacrifice.
context_size (int, optional): The size of the context.
minimum_spare_context_size (int, optional): The minimum spare context size.
Returns:
str: The built prompt.
"""
if context_size is None:
context_size = self.personality.config.ctx_size
if minimum_spare_context_size is None:
minimum_spare_context_size = self.personality.config.min_n_predict
if sacrifice_id == -1 or len(prompt_parts[sacrifice_id])<50:
return "\n".join([s for s in prompt_parts if s!=""])
else:
part_tokens=[]
nb_tokens=0
for i,part in enumerate(prompt_parts):
tk = self.personality.model.tokenize(part)
part_tokens.append(tk)
if i != sacrifice_id:
nb_tokens += len(tk)
if len(part_tokens[sacrifice_id])>0:
sacrifice_tk = part_tokens[sacrifice_id]
sacrifice_tk= sacrifice_tk[-(context_size-nb_tokens-minimum_spare_context_size):]
sacrifice_text = self.personality.model.detokenize(sacrifice_tk)
else:
sacrifice_text = ""
prompt_parts[sacrifice_id] = sacrifice_text
return "\n".join([s for s in prompt_parts if s!=""])
# ================================================= Sending commands to ui =========================================== # ================================================= Sending commands to ui ===========================================
def step_start(self, step_text, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None): def step_start(self, step_text, callback: Callable[[str, MSG_TYPE, dict, list], bool]=None):

View File

@ -9,6 +9,7 @@ This class provides a singleton instance of the LoLLMS web UI, allowing access t
from lollms.app import LollmsApplication from lollms.app import LollmsApplication
from lollms.main_config import LOLLMSConfig from lollms.main_config import LOLLMSConfig
from lollms.paths import LollmsPaths from lollms.paths import LollmsPaths
from lollms.personality import AIPersonality
from pathlib import Path from pathlib import Path
class LOLLMSElfServer(LollmsApplication): class LOLLMSElfServer(LollmsApplication):

View File

@ -464,7 +464,7 @@ def get_active_personality_settings():
else: else:
return {} return {}
@router.post("/get_active_personality_settings") @router.post("/set_active_personality_settings")
def set_active_personality_settings(data): def set_active_personality_settings(data):
print("- Setting personality settings") print("- Setting personality settings")
@ -475,6 +475,7 @@ def set_active_personality_settings(data):
if lollmsElfServer.config.auto_save: if lollmsElfServer.config.auto_save:
ASCIIColors.info("Saving configuration") ASCIIColors.info("Saving configuration")
lollmsElfServer.config.save_config() lollmsElfServer.config.save_config()
lollmsElfServer.personality.settings_updated()
return {'status':True} return {'status':True}
else: else:
return {'status':False} return {'status':False}
@ -492,10 +493,3 @@ def post_to_personality(data):
else: else:
return {} return {}
@router.get("/get_current_personality_files_list")
def get_current_personality_files_list():
if lollmsElfServer.personality is None:
return {"state":False, "error":"No personality selected"}
return {"state":True, "files":[{"name":Path(f).name, "size":Path(f).stat().st_size} for f in lollmsElfServer.personality.text_files]+[{"name":Path(f).name, "size":Path(f).stat().st_size} for f in lollmsElfServer.personality.image_files]}

View File

@ -15,7 +15,7 @@ from fastapi.responses import FileResponse
from lollms.binding import BindingBuilder, InstallOption from lollms.binding import BindingBuilder, InstallOption
from ascii_colors import ASCIIColors from ascii_colors import ASCIIColors
from lollms.personality import MSG_TYPE, AIPersonality from lollms.personality import MSG_TYPE, AIPersonality
from lollms.utilities import load_config, trace_exception, gc, terminate_thread from lollms.utilities import load_config, trace_exception, gc, terminate_thread, run_async
from pathlib import Path from pathlib import Path
from typing import List from typing import List
import socketio import socketio
@ -68,10 +68,10 @@ def add_events(sio:socketio):
else: else:
result = lollmsElfServer.personality.add_file(file_path, partial(lollmsElfServer.process_chunk, client_id=client_id)) result = lollmsElfServer.personality.add_file(file_path, partial(lollmsElfServer.process_chunk, client_id=client_id))
lollmsElfServer.run_async(partial(sio.emit,'file_received', {'status': True, 'filename': filename})) run_async(partial(sio.emit,'file_received', {'status': True, 'filename': filename}))
else: else:
# Request the next chunk from the client # Request the next chunk from the client
lollmsElfServer.run_async(partial(sio.emit,'request_next_chunk', {'offset': offset + len(chunk)})) run_async(partial(sio.emit,'request_next_chunk', {'offset': offset + len(chunk)}))
@sio.on('execute_command') @sio.on('execute_command')
def execute_command(sid, data): def execute_command(sid, data):

View File

@ -0,0 +1,68 @@
#!/bin/sh
# This script installs Ollama on Linux.
# It detects the current operating system architecture and installs the appropriate version of Ollama.
set -eu
status() { echo ">>> $*" >&2; }
error() { echo "ERROR $*"; exit 1; }
warning() { echo "WARNING: $*"; }
OLLAMA_DIR=~/ollama
if [ ! -d $OLLAMA_DIR ]; then
mkdir $OLLAMA_DIR
echo "Folder $OLLAMA_DIR created successfully!"
else
echo "Folder $OLLAMA_DIR already exists."
fi
available() { command -v $1 >/dev/null; }
require() {
local MISSING=''
for TOOL in $*; do
if ! available $TOOL; then
MISSING="$MISSING $TOOL"
fi
done
echo $MISSING
}
[ "$(uname -s)" = "Linux" ] || error 'This script is intended to run on Linux only.'
ARCH=$(uname -m)
case "$ARCH" in
x86_64) ARCH="amd64" ;;
aarch64|arm64) ARCH="arm64" ;;
*) error "Unsupported architecture: $ARCH" ;;
esac
KERN=$(uname -r)
case "$KERN" in
*icrosoft*WSL2 | *icrosoft*wsl2) ;;
*icrosoft) error "Microsoft WSL1 is not currently supported. Please upgrade to WSL2 with 'wsl --set-version <distro> 2'" ;;
*) ;;
esac
NEEDS=$(require curl awk grep sed tee xargs)
if [ -n "$NEEDS" ]; then
status "ERROR: The following tools are required but missing:"
for NEED in $NEEDS; do
echo " - $NEED"
done
exit 1
fi
status "Downloading ollama..."
curl --fail --show-error --location --progress-bar -o $OLLAMA_DIR "https://ollama.ai/download/ollama-linux-$ARCH"
status "Installing ollama to OLLAMA_DIR..."
install_success() {
status 'The Ollama API is now available at 0.0.0.0:11434.'
status 'Install complete. Run "ollama" from the command line.'
}
trap install_success EXIT

View File

@ -0,0 +1,98 @@
# Title Ollama service
# Licence: MIT
# Author : Paris Neo
# This is a service launcher for the ollama server by Jeffrey Morgan (jmorganca)
# check it out : https://github.com/jmorganca/ollama
# Here is a copy of the LICENCE https://github.com/jmorganca/ollama/blob/main/LICENSE
# All rights are reserved
from pathlib import Path
import os
import sys
from lollms.app import LollmsApplication
from lollms.paths import LollmsPaths
from lollms.config import TypedConfig, ConfigTemplate, BaseConfig
import time
import io
import sys
import requests
import os
import base64
import subprocess
import time
import json
import platform
from dataclasses import dataclass
from PIL import Image, PngImagePlugin
from enum import Enum
from typing import List, Dict, Any
from ascii_colors import ASCIIColors, trace_exception
from lollms.paths import LollmsPaths
from lollms.utilities import git_pull
import subprocess
import platform
def verify_ollama(lollms_paths:LollmsPaths):
# Clone repository
root_dir = lollms_paths.personal_path
shared_folder = root_dir/"shared"
sd_folder = shared_folder / "auto_sd"
return sd_folder.exists()
def install_ollama():
if platform.system() == 'Windows':
if os.path.exists('C:\\Windows\\System32\\wsl.exe'):
subprocess.run(['wsl', 'bash', str(Path(__file__).parent / 'install.sh')])
else:
subprocess.run(['wsl', '--install', 'Ubuntu'])
subprocess.run(['wsl', 'bash', str(Path(__file__).parent / 'install.sh')])
else:
subprocess.run(['bash', str(Path(__file__).parent / 'install.sh')])
def get_sd(lollms_paths:LollmsPaths):
root_dir = lollms_paths.personal_path
shared_folder = root_dir/"shared"
sd_folder = shared_folder / "auto_sd"
sd_script_path = sd_folder / "lollms_sd.py"
git_pull(sd_folder)
if sd_script_path.exists():
ASCIIColors.success("lollms_sd found.")
ASCIIColors.success("Loading source file...",end="")
# use importlib to load the module from the file path
from lollms.services.sd.lollms_sd import LollmsSD
ASCIIColors.success("ok")
return LollmsSD
class Service:
def __init__(
self,
app:LollmsApplication,
base_url="http://127.0.0.1:11434",
wait_max_retries = 5
):
if base_url=="" or base_url=="http://127.0.0.1:7860":
base_url = None
# Get the current directory
lollms_paths = app.lollms_paths
self.app = app
root_dir = lollms_paths.personal_path
ASCIIColors.red(" __ _____ __ __ _____ _____ _____ __ __ _____ _____ _____ ")
ASCIIColors.red("| | | | | | | | | __| | | | | | | _ | | _ |")
ASCIIColors.red("| |__| | | |__| |__| | | |__ | | | | |__| |__| | | | | |")
ASCIIColors.red("|_____|_____|_____|_____|_|_|_|_____|_____|_____|_____|_____|__|__|_|_|_|__|__|")
ASCIIColors.red(" |_____| ")
ASCIIColors.red(" Launching ollama service by Jeffrey Morgan (jmorganca)")
ASCIIColors.red(" Integration in lollms by ParisNeo")
if not self.wait_for_service(1,False) and base_url is None:
ASCIIColors.info("Loading ollama service")
# Wait until the service is available at http://127.0.0.1:7860/
self.wait_for_service(max_retries=wait_max_retries)

View File

@ -0,0 +1,2 @@
ollama serve&
ollama run mistral

View File

@ -61,7 +61,7 @@ def get_sd(lollms_paths:LollmsPaths):
ASCIIColors.success("lollms_sd found.") ASCIIColors.success("lollms_sd found.")
ASCIIColors.success("Loading source file...",end="") ASCIIColors.success("Loading source file...",end="")
# use importlib to load the module from the file path # use importlib to load the module from the file path
from lollms.image_gen_modules.lollms_sd import LollmsSD from lollms.services.sd.lollms_sd import LollmsSD
ASCIIColors.success("ok") ASCIIColors.success("ok")
return LollmsSD return LollmsSD

View File

@ -63,7 +63,7 @@ def get_xtts(lollms_paths:LollmsPaths):
ASCIIColors.success("lollms_xtts found.") ASCIIColors.success("lollms_xtts found.")
ASCIIColors.success("Loading source file...",end="") ASCIIColors.success("Loading source file...",end="")
# use importlib to load the module from the file path # use importlib to load the module from the file path
from lollms.audio_gen_modules.lollms_xtts import LollmsXTTS from lollms.services.xtts.lollms_xtts import LollmsXTTS
ASCIIColors.success("ok") ASCIIColors.success("ok")
return LollmsXTTS return LollmsXTTS

View File

@ -26,7 +26,7 @@ def get_all_files(path):
setuptools.setup( setuptools.setup(
name="lollms", name="lollms",
version="7.0.0", version="7.1.0",
author="Saifeddine ALOUI", author="Saifeddine ALOUI",
author_email="aloui.saifeddine@gmail.com", author_email="aloui.saifeddine@gmail.com",
description="A python library for AI personality definition", description="A python library for AI personality definition",
@ -39,7 +39,7 @@ setuptools.setup(
entry_points={ entry_points={
'console_scripts': [ 'console_scripts': [
'lollms-elf = lollms.apps.elf:main', 'lollms-elf = lollms.apps.elf:main',
'lollms-server = lollms.apps.server:main', 'lollms-server = lollms.server.server:main',
'lollms-console = lollms.apps.console:main', 'lollms-console = lollms.apps.console:main',
'lollms-settings = lollms.apps.settings:main', 'lollms-settings = lollms.apps.settings:main',
'lollms-discord = lollms.apps.discord_bot:main', 'lollms-discord = lollms.apps.discord_bot:main',