Moving to the new lollms style V19

This commit is contained in:
Saifeddine ALOUI 2025-03-06 14:00:21 +01:00
parent c5033756cb
commit 9e7e2423d4
32 changed files with 1305 additions and 509 deletions

View File

@ -1,3 +1,10 @@
# LoLLMs v19.0 (alpha) Changelog
- Restructured main settings
- Added application settings to allow changing the application name, the application slogan, the application logo for a maximum flexibility.
- Completely changed the services system structure
- Services parameters are now flexible to allow for variaty of services to have options
- Function calls may now have static parameters in addition to their parameters
# LoLLMs v18.1 Changelog
- Working system-wide function calling.
- Full integration with lightrag.

7
app.py
View File

@ -326,8 +326,13 @@ if __name__ == "__main__":
from lollms.server.endpoints.lollms_skills_library import \
router as lollms_skills_library_router
from lollms.server.endpoints.lollms_tti import router as lollms_tti_router
from lollms.server.endpoints.lollms_stt import \
router as lollms_stt_add_router
from lollms.server.endpoints.lollms_tts import \
router as lollms_tts_add_router
from lollms.server.endpoints.lollms_ttm import \
router as lollms_ttm_add_router
from lollms.server.endpoints.lollms_ttv import \
router as lollms_ttv_router
@ -395,7 +400,9 @@ if __name__ == "__main__":
app.include_router(chat_bar_router)
app.include_router(help_router)
app.include_router(lollms_stt_add_router)
app.include_router(lollms_tts_add_router)
app.include_router(lollms_ttm_add_router)
app.include_router(lollms_ttv_router)
app.include_router(lollms_xtts_add_router)

View File

@ -1,5 +1,5 @@
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
version: 157
version: 159
# video viewing and news recovering
last_viewed_video: null
@ -31,6 +31,11 @@ host: localhost
port: 9600
app_custom_logo: ""
app_custom_name: "LoLLMS"
app_show_changelogs: true
app_custom_welcome_message: ""
app_custom_slogan: ""
app_show_fun_facts: true
# Genreration parameters
discussion_prompt_separator: "!@>"

View File

@ -1,7 +1,7 @@
<div class="container mx-auto px-4">
<div class="mb-6">
<div class="bg-gradient-welcome card p-8 rounded-lg shadow-lg transform hover:scale-102 transition-transform duration-300">
<h3 class="text-gradient-title text-4xl font-bold mb-6 animate-fade-in">🌐 Welcome to v18 "Matrix"! 🌐</h3>
<h3 class="text-gradient-title text-4xl font-bold mb-6 animate-fade-in">🔗 Welcome to v19 (alpha) "Omni"! 🔗</h3>
<!-- Feature Highlights with Matrix Theme -->
<div class="grid grid-cols-1 md:grid-cols-3 gap-6 mt-8">

@ -1 +1 @@
Subproject commit 0739f1c2794803a1464fec43354910333ec6a829
Subproject commit 324b8d220d2da4626302826d63b73d2358b22f7b

View File

@ -77,10 +77,10 @@ def terminate_thread(thread):
lollms_webui_version = {
"version_main":18,
"version_secondary":1,
"version_type":"",
"version_codename":"Matrix 💊"
"version_main":19,
"version_secondary":0,
"version_type":"alpha",
"version_codename":"Omni 🔗"
}
@ -1227,6 +1227,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
self.update_message_metadata(client_id, data)
return
elif operation_type == MSG_OPERATION_TYPE.MSG_OPERATION_TYPE_NEW_MESSAGE:
ASCIIColors.info("Building new message")
self.nb_received_tokens = 0
self.start_time = datetime.now()
self.update_message_step(
@ -1774,11 +1775,11 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
/ "services/xtts/voices"
)
if self.xtts.ready:
if self.tts.ready:
language = convert_language_name(
self.personality.language
)
self.xtts.set_speaker_folder(
self.tts.set_speaker_folder(
Path(self.personality.audio_samples[0]).parent
)
fn = (
@ -1788,7 +1789,7 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
)
fn = f"{fn}_{message_id}.wav"
url = f"audio/{fn}"
self.xtts.tts_file(
self.tts.tts_file(
client.generated_text,
Path(self.personality.audio_samples[0]).name,
f"{fn}",

17
web/dist/assets/index-Cq20xmwz.css vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,10 +1,62 @@
# Getting Started with LoLLMs
Welcome to LoLLMs! Here are some basic steps to get you started:
Welcome to LoLLMs (Lord of Large Language Multimodal Systems)! This guide provides detailed steps to install, configure, and start using LoLLMs, a versatile AI platform created by ParisNeo in September 2023. Designed for tasks ranging from text generation to multimodal operations, LoLLMs is primarily installed using release files (`lollms_setup.bat` for Windows and `lollms_setup.sh` for Linux) available on GitHub. Follow these steps to get up and running as of March 6, 2025.
1. **Open the chat interface**: Navigate to the main chat screen.
2. **Start a conversation**: Type your question or command in the chat bar.
3. **Explore available features**: Try out different commands to see what LoLLMs can do.
4. **Use the help command**: Type "help" in the chat to see a list of available commands and features.
## Step 1: Install LoLLMs Using Release Files
LoLLMs is best installed using pre-built release scripts from the [GitHub releases page](https://github.com/ParisNeo/lollms-webui/releases), automating the setup process for the web UI and core components.
Remember, LoLLMs is designed to assist you with various tasks and answer your questions. Don't hesitate to ask if you need more information!
### For Windows Users
- **Download the Script**: Visit [GitHub releases](https://github.com/ParisNeo/lollms-webui/releases) and download `lollms_setup.bat` (e.g., from v9.6, the latest as of March 2025).
- **Run the Script**: Double-click `lollms_setup.bat`. A command-line window will open, guiding you through:
- Setting up a personal folder for data and models.
- Selecting hardware (CPU or GPU with CUDA support).
- Installing dependencies and at least one model from the Models Zoo.
- **Follow Prompts**: Answer the on-screen questions (e.g., "Do you want to install a model now?"). If prompted about antivirus warnings, proceed as these are noted as false positives [LOLLMS WebUI Tutorial](https://parisneo.github.io/lollms-webui/tutorials/noobs_tutorial.html).
### For Linux Users
- **Download the Script**: From the same [GitHub releases page](https://github.com/ParisNeo/lollms-webui/releases), download `lollms_setup.sh`.
- **Run the Script**: Open a terminal, navigate to the download directory (e.g., `cd ~/Downloads`), and execute:
```bash
bash lollms_setup.sh
```
Ensure the script is executable (chmod +x lollms_setup.sh if needed).
The script clones the repository, sets up a Conda environment, and prompts for configuration (e.g., hardware and model selection).
- **Follow Prompts:** Respond to setup questions, such as installing CUDA for GPU support or opting for CPU-only mode.
- **Note:** While a graphical installer (lollms_setup.exe) exists for Windows Download LoLLMs, this guide focuses on .bat and .sh as the primary methods per user preference. The scripts ensure a complete setup, including the web UI, unlike the pip install lollms alternative, which is more developer-oriented.
## Step 2: Launch the Web UI
After installation, the script typically launches the LoLLMs Web UI automatically. If not, or after restarting your system:
- **Windows:** Run lollms.bat (created during setup) from the installation directory.
- **Linux:** Run bash lollms.sh from the same directory.
Access the Interface: Open your browser and go to http://localhost:7860. Youll see the main chat screen, ready for interaction.
## Step 3: Select a Personality
The web UI features a selector for the Personalities Zoo, offering over 500 AI personas for tasks like coding, image generation, or text analysis:
- Navigate to the personality dropdown or menu (exact location may vary by version).
- Choose a persona (e.g., a coding expert or creative writer) based on your task.
This selection tailors the AIs responses to your needs.
## Step 4: Start a Conversation
Open the Chat Interface: The main screen displays a chat bar at the bottom.
- **Type Your Query:** Enter a question or command (e.g., “Write a Python function” or “Generate an image of a cat”).
- **Press Enter:** The selected personality will respond, leveraging the installed model.
## Step 5: Explore Features and Commands
Use the Help Command: Type help in the chat bar to see a list of available commands and features specific to your chosen personality.
Try Multimodal Tasks: Experiment with capabilities like:
- **Text-to-Image: “Create an image of a sunset.”
- **Speech-to-Text:** Upload audio (if supported by the persona).
- **Text-to-Music:** “Compose a melody from this poem.”
- **Advanced Features:** Access the Services Zoo (backend integrations) or Function Calls Zoo (custom actions) via the UI or documentation for complex workflows.
Troubleshooting
- **Installation Issues:** If the script fails, ensure Python 3.10+ and Git are installed (Linux) or check antivirus settings (Windows).
- **Model Loading:** Verify sufficient disk space and review logs if models dont load Reddit Community for LoLLMs.
- **Community Support:** Seek help on Reddit or GitHub issues.
Tables for Clarity
| **Step** | **Action** | **Windows** | **Linux** |
|-----------------------|------------------------------------------------|-----------------------------|-----------------------------|
| Install | Download and run setup script | Double-click `lollms_setup.bat` | `bash lollms_setup.sh` |
| Launch Web UI | Start the interface | Run `lollms.bat` | `bash lollms.sh` |
| Access UI | Open browser to localhost | [http://localhost:7860](http://localhost:7860) | Same |
| **Component** | **Description** |
|-----------------------|--------------------------------------------------------------|
| Personalities Zoo | Over 500 AI personas for specialized tasks |
| Services Zoo | Backend services for API integrations, data processing |
| Function Calls Zoo | Callable functions for custom actions or workflows |

0
web/dist/help/image-generation.md vendored Normal file
View File

View File

@ -1,38 +1,83 @@
# About LoLLMs
# Survey Note: Enhanced Documentation for LoLLMs
## What is LoLLMs?
## Overview and Purpose
The documentation for LoLLMs (Lord of Large Language Multimodal Systems) has been expanded to include detailed sections on its **Services Zoo** and **Function Calls Zoo**, alongside its existing components. This update aims to offer a thorough resource for users, covering its features, history, development process, and practical usage. Created by ParisNeo in September 2023, LoLLMs is a versatile AI platform designed to assist with tasks ranging from text generation to multimodal operations, driven by a passion for AI and community collaboration.
LoLLMs (Lord of Large Language Multimodal Systems) is a powerful and versatile AI tool designed to assist users with a wide range of tasks. It's an ecosystem that includes:
## Detailed History and Creation
LoLLMs began in September 2023, with its initial commit on GitHub [LoLLMs Web UI GitHub Repository](https://github.com/ParisNeo/lollms-webui). As of March 2025, it has evolved over approximately one year and five months, aligning with its stated age of about one and a half years as of October 2024. ParisNeo, a self-described computer geek passionate about AI, developed LoLLMs to realize a childhood dream, aiming to push AI boundaries, provide a versatile tool, promote ethical development, and showcase the impact of passion-driven projects. Built in his free time as a hobby, it reflects a commitment to community benefit and personal satisfaction through technology.
- A vast array of applications in the LoLLMs zoo
- Thousands of AI models in the models zoo
- Over 500 distinct AI personas in the personalities zoo
## Features and Capabilities
LoLLMs is structured around five key components:
- **Applications Zoo**: A collection of diverse applications tailored for specific services or problem-solving, enhancing its utility across domains.
- **Models Zoo**: Thousands of pre-trained AI models, including language and multimodal systems, such as Manticore-13B.ggmlv3.q4_0.bin, supporting a broad range of functionalities [LoLLMs GitHub Repository](https://github.com/ParisNeo/lollms).
- **Personalities Zoo**: Over 500 unique AI personas specialized in tasks like coding, image generation, and text analysis, as noted in community discussions [Reddit Community for LoLLMs](https://www.reddit.com/r/lollms/).
- **Services Zoo**: A suite of backend services that power LoLLMs' operations, such as API integrations, data processing, and model hosting. These services enable seamless interaction between components and external systems, enhancing scalability and functionality.
- **Function Calls Zoo**: A repository of callable functions that allow users and developers to trigger specific actions within LoLLMs, such as generating text, processing images, or executing custom workflows. This component supports automation and customization, making LoLLMs highly extensible.
LoLLMs can perform various tasks, including text-to-text, text-to-image, image-to-text, speech-to-text, text-to-speech, text-to-music, and text-to-video operations.
The platforms multimodal capabilities include:
- **Text-to-Text**: Generating, summarizing, translating, or analyzing text using its large language models.
- **Text-to-Image**: Creating images from text descriptions, likely via personalities integrated with models like Stable Diffusion.
- **Image-to-Text**: Extracting text or generating descriptions from images, inferred from its multimodal scope.
- **Speech-to-Text**: Converting spoken language to text, part of its broad capabilities.
- **Text-to-Speech**: Converting text to spoken language, enhancing accessibility.
- **Text-to-Music**: Generating music from textual input, a unique feature.
- **Text-to-Video**: Creating video content from text, though less detailed in current resources.
## When was LoLLMs created?
These features, highlighted on the official website [LoLLMs Official Website](https://lollms.com/), position LoLLMs as a tool for tasks like language translation, creative content generation, and technical assistance.
LoLLMs was created about one and a half years ago (as of October 2024). It has grown from humble beginnings to become a tool that aims to "rule them all" in terms of AI capabilities.
## Development Process and Community Engagement
LoLLMs adheres to a rapid development cycle, with near-daily releases evidenced by over 1,200 commits on GitHub [LoLLMs Web UI GitHub Repository](https://github.com/ParisNeo/lollms-webui). It continuously improves by integrating community feedback and adapting to AI advancements. All developments are openly shared, fostering transparency and collaboration, with contributions welcomed via GitHub. The inclusion of a CODE_OF_CONDUCT.md file suggests a community-focused ethos, though specific ethical guidelines remain broad. ParisNeo, as the sole developer, drives this passion project, as noted in community discussions [Reddit Community for LoLLMs](https://www.reddit.com/r/lollms/).
## Why was LoLLMs created?
## Getting Started Guide
New users can begin with LoLLMs using these steps:
1. **Installation**: Download from [LoLLMs Official Website](https://lollms.com/) or install via `pip install --upgrade lollms` [LoLLMs GitHub Repository](https://github.com/ParisNeo/lollms).
2. **Configuration**: Launch `lollms_settings` to set up the environment, select a binding, and install a model (e.g., from Hugging Face).
3. **Exploring Components**: Access the personalities, services, and function calls zoos to utilize AI personas, backend services, or custom functions for specific tasks.
4. **Using the Web UI**: Start the LoLLMS Web UI for an intuitive interface, with tutorials available [LoLLMs Web UI Documentation](https://parisneo.github.io/lollms-webui/).
LoLLMs was created by ParisNeo, a computer geek passionate about AI. It represents the culmination of a childhood dream and embodies a powerful vision for the future of AI. The project aims to:
This guide ensures users can effectively navigate the platforms expanded ecosystem.
1. Push the boundaries of what's possible with AI
2. Provide a versatile tool that can assist with a wide range of tasks
3. Promote ethical AI development and community collaboration
4. Demonstrate how passion-driven projects can have a significant impact in the tech world
## Recent Updates and Future Outlook
Recent enhancements include:
- **LLMTester**: A personality for testing and rating AI models, improving evaluation capabilities [LoLLMs Official Website](https://lollms.com/).
- **pyconn_monitor**: A Python library for network connection monitoring, enhancing security.
- **lollms_client_js**: A Node.js front-end library for broader developer access.
- **CodeGuard**: A cybersecurity personality for code analysis, bolstering security features.
ParisNeo develops LoLLMs in his free time, treating it as a hobby and a way to give back to the community. The project is driven by a genuine desire to help people through technology and to find personal satisfaction in being useful.
The addition of the Services Zoo and Function Calls Zoo further expands LoLLMs flexibility, with ongoing updates anticipated based on community input.
## How is LoLLMs developed?
## Comparative Analysis and Community Feedback
Community feedback on Reddit praises LoLLMs extensive personalities and versatility but notes occasional installation challenges, highlighting areas for refinement [Reddit Community for LoLLMs](https://www.reddit.com/r/lollms/). The Services Zoo and Function Calls Zoo address customization needs, aligning with user demands for extensibility.
LoLLMs follows a rapid development cycle:
- New versions are released almost every day
- The project is continuously improved and expanded
- All developments are shared openly with the community
- The project encourages transparency and collaboration in AI development
## Tables for Clarity
The following tables summarize LoLLMs components and capabilities:
This approach allows LoLLMs to quickly adapt to new AI breakthroughs, integrate community feedback, and regularly introduce new capabilities.
| **Component** | **Description** |
|-----------------------|--------------------------------------------------------------|
| Applications Zoo | Diverse applications for specific services and problem-solving |
| Models Zoo | Thousands of pre-trained models, including language and multimodal |
| Personalities Zoo | Over 500 AI personas for specialized tasks like coding, image generation |
| Services Zoo | Backend services (e.g., APIs, data processing) for seamless operations |
| Function Calls Zoo | Callable functions for triggering actions like text generation or workflows |
LoLLMs stands as a testament to what can be achieved when cutting-edge technology is driven by passion, ethical considerations, and a community-focused approach to AI development.
| **Task** | **Description** |
|-----------------------|--------------------------------------------------------------|
| Text-to-Text | Generate, summarize, translate, or analyze text |
| Text-to-Image | Create images from textual descriptions |
| Image-to-Text | Extract text or describe images |
| Speech-to-Text | Convert spoken language to text |
| Text-to-Speech | Convert text to spoken language |
| Text-to-Music | Generate music from textual input |
| Text-to-Video | Create video content from textual descriptions |
These tables offer a quick reference for understanding LoLLMs structure and functionality.
## Conclusion
This enhanced documentation, now including the Services Zoo and Function Calls Zoo, provides a detailed, user-friendly resource for LoLLMs. It covers its history, expanded features, development, and usage, empowering users to leverage this versatile AI platform. LoLLMs stands as a testament to passion-driven innovation and community collaboration in AI development.
## Key Citations
- [LoLLMs Web UI GitHub Repository with over 1200 commits](https://github.com/ParisNeo/lollms-webui)
- [LoLLMs Official Website for news and downloads](https://lollms.com/)
- [LoLLMs GitHub Repository for installation details](https://github.com/ParisNeo/lollms)
- [LoLLMs Web UI Documentation for tutorials](https://parisneo.github.io/lollms-webui/)
- [Reddit Community for LoLLMs user discussions](https://www.reddit.com/r/lollms/)

View File

0
web/dist/help/music-generation.md vendored Normal file
View File

0
web/dist/help/text-generation.md vendored Normal file
View File

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<script src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-svg.js"></script>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI</title>
<script type="module" crossorigin src="/assets/index-odC1ARWo.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-DAHeP7Vs.css">
<script type="module" crossorigin src="/assets/index-Dxz__23e.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-Cq20xmwz.css">
</head>
<body>
<div id="app"></div>

View File

@ -1,10 +1,62 @@
# Getting Started with LoLLMs
Welcome to LoLLMs! Here are some basic steps to get you started:
Welcome to LoLLMs (Lord of Large Language Multimodal Systems)! This guide provides detailed steps to install, configure, and start using LoLLMs, a versatile AI platform created by ParisNeo in September 2023. Designed for tasks ranging from text generation to multimodal operations, LoLLMs is primarily installed using release files (`lollms_setup.bat` for Windows and `lollms_setup.sh` for Linux) available on GitHub. Follow these steps to get up and running as of March 6, 2025.
1. **Open the chat interface**: Navigate to the main chat screen.
2. **Start a conversation**: Type your question or command in the chat bar.
3. **Explore available features**: Try out different commands to see what LoLLMs can do.
4. **Use the help command**: Type "help" in the chat to see a list of available commands and features.
## Step 1: Install LoLLMs Using Release Files
LoLLMs is best installed using pre-built release scripts from the [GitHub releases page](https://github.com/ParisNeo/lollms-webui/releases), automating the setup process for the web UI and core components.
Remember, LoLLMs is designed to assist you with various tasks and answer your questions. Don't hesitate to ask if you need more information!
### For Windows Users
- **Download the Script**: Visit [GitHub releases](https://github.com/ParisNeo/lollms-webui/releases) and download `lollms_setup.bat` (e.g., from v9.6, the latest as of March 2025).
- **Run the Script**: Double-click `lollms_setup.bat`. A command-line window will open, guiding you through:
- Setting up a personal folder for data and models.
- Selecting hardware (CPU or GPU with CUDA support).
- Installing dependencies and at least one model from the Models Zoo.
- **Follow Prompts**: Answer the on-screen questions (e.g., "Do you want to install a model now?"). If prompted about antivirus warnings, proceed as these are noted as false positives [LOLLMS WebUI Tutorial](https://parisneo.github.io/lollms-webui/tutorials/noobs_tutorial.html).
### For Linux Users
- **Download the Script**: From the same [GitHub releases page](https://github.com/ParisNeo/lollms-webui/releases), download `lollms_setup.sh`.
- **Run the Script**: Open a terminal, navigate to the download directory (e.g., `cd ~/Downloads`), and execute:
```bash
bash lollms_setup.sh
```
Ensure the script is executable (chmod +x lollms_setup.sh if needed).
The script clones the repository, sets up a Conda environment, and prompts for configuration (e.g., hardware and model selection).
- **Follow Prompts:** Respond to setup questions, such as installing CUDA for GPU support or opting for CPU-only mode.
- **Note:** While a graphical installer (lollms_setup.exe) exists for Windows Download LoLLMs, this guide focuses on .bat and .sh as the primary methods per user preference. The scripts ensure a complete setup, including the web UI, unlike the pip install lollms alternative, which is more developer-oriented.
## Step 2: Launch the Web UI
After installation, the script typically launches the LoLLMs Web UI automatically. If not, or after restarting your system:
- **Windows:** Run lollms.bat (created during setup) from the installation directory.
- **Linux:** Run bash lollms.sh from the same directory.
Access the Interface: Open your browser and go to http://localhost:7860. Youll see the main chat screen, ready for interaction.
## Step 3: Select a Personality
The web UI features a selector for the Personalities Zoo, offering over 500 AI personas for tasks like coding, image generation, or text analysis:
- Navigate to the personality dropdown or menu (exact location may vary by version).
- Choose a persona (e.g., a coding expert or creative writer) based on your task.
This selection tailors the AIs responses to your needs.
## Step 4: Start a Conversation
Open the Chat Interface: The main screen displays a chat bar at the bottom.
- **Type Your Query:** Enter a question or command (e.g., “Write a Python function” or “Generate an image of a cat”).
- **Press Enter:** The selected personality will respond, leveraging the installed model.
## Step 5: Explore Features and Commands
Use the Help Command: Type help in the chat bar to see a list of available commands and features specific to your chosen personality.
Try Multimodal Tasks: Experiment with capabilities like:
- **Text-to-Image: “Create an image of a sunset.”
- **Speech-to-Text:** Upload audio (if supported by the persona).
- **Text-to-Music:** “Compose a melody from this poem.”
- **Advanced Features:** Access the Services Zoo (backend integrations) or Function Calls Zoo (custom actions) via the UI or documentation for complex workflows.
Troubleshooting
- **Installation Issues:** If the script fails, ensure Python 3.10+ and Git are installed (Linux) or check antivirus settings (Windows).
- **Model Loading:** Verify sufficient disk space and review logs if models dont load Reddit Community for LoLLMs.
- **Community Support:** Seek help on Reddit or GitHub issues.
Tables for Clarity
| **Step** | **Action** | **Windows** | **Linux** |
|-----------------------|------------------------------------------------|-----------------------------|-----------------------------|
| Install | Download and run setup script | Double-click `lollms_setup.bat` | `bash lollms_setup.sh` |
| Launch Web UI | Start the interface | Run `lollms.bat` | `bash lollms.sh` |
| Access UI | Open browser to localhost | [http://localhost:7860](http://localhost:7860) | Same |
| **Component** | **Description** |
|-----------------------|--------------------------------------------------------------|
| Personalities Zoo | Over 500 AI personas for specialized tasks |
| Services Zoo | Backend services for API integrations, data processing |
| Function Calls Zoo | Callable functions for custom actions or workflows |

View File

View File

@ -1,38 +1,83 @@
# About LoLLMs
# Survey Note: Enhanced Documentation for LoLLMs
## What is LoLLMs?
## Overview and Purpose
The documentation for LoLLMs (Lord of Large Language Multimodal Systems) has been expanded to include detailed sections on its **Services Zoo** and **Function Calls Zoo**, alongside its existing components. This update aims to offer a thorough resource for users, covering its features, history, development process, and practical usage. Created by ParisNeo in September 2023, LoLLMs is a versatile AI platform designed to assist with tasks ranging from text generation to multimodal operations, driven by a passion for AI and community collaboration.
LoLLMs (Lord of Large Language Multimodal Systems) is a powerful and versatile AI tool designed to assist users with a wide range of tasks. It's an ecosystem that includes:
## Detailed History and Creation
LoLLMs began in September 2023, with its initial commit on GitHub [LoLLMs Web UI GitHub Repository](https://github.com/ParisNeo/lollms-webui). As of March 2025, it has evolved over approximately one year and five months, aligning with its stated age of about one and a half years as of October 2024. ParisNeo, a self-described computer geek passionate about AI, developed LoLLMs to realize a childhood dream, aiming to push AI boundaries, provide a versatile tool, promote ethical development, and showcase the impact of passion-driven projects. Built in his free time as a hobby, it reflects a commitment to community benefit and personal satisfaction through technology.
- A vast array of applications in the LoLLMs zoo
- Thousands of AI models in the models zoo
- Over 500 distinct AI personas in the personalities zoo
## Features and Capabilities
LoLLMs is structured around five key components:
- **Applications Zoo**: A collection of diverse applications tailored for specific services or problem-solving, enhancing its utility across domains.
- **Models Zoo**: Thousands of pre-trained AI models, including language and multimodal systems, such as Manticore-13B.ggmlv3.q4_0.bin, supporting a broad range of functionalities [LoLLMs GitHub Repository](https://github.com/ParisNeo/lollms).
- **Personalities Zoo**: Over 500 unique AI personas specialized in tasks like coding, image generation, and text analysis, as noted in community discussions [Reddit Community for LoLLMs](https://www.reddit.com/r/lollms/).
- **Services Zoo**: A suite of backend services that power LoLLMs' operations, such as API integrations, data processing, and model hosting. These services enable seamless interaction between components and external systems, enhancing scalability and functionality.
- **Function Calls Zoo**: A repository of callable functions that allow users and developers to trigger specific actions within LoLLMs, such as generating text, processing images, or executing custom workflows. This component supports automation and customization, making LoLLMs highly extensible.
LoLLMs can perform various tasks, including text-to-text, text-to-image, image-to-text, speech-to-text, text-to-speech, text-to-music, and text-to-video operations.
The platforms multimodal capabilities include:
- **Text-to-Text**: Generating, summarizing, translating, or analyzing text using its large language models.
- **Text-to-Image**: Creating images from text descriptions, likely via personalities integrated with models like Stable Diffusion.
- **Image-to-Text**: Extracting text or generating descriptions from images, inferred from its multimodal scope.
- **Speech-to-Text**: Converting spoken language to text, part of its broad capabilities.
- **Text-to-Speech**: Converting text to spoken language, enhancing accessibility.
- **Text-to-Music**: Generating music from textual input, a unique feature.
- **Text-to-Video**: Creating video content from text, though less detailed in current resources.
## When was LoLLMs created?
These features, highlighted on the official website [LoLLMs Official Website](https://lollms.com/), position LoLLMs as a tool for tasks like language translation, creative content generation, and technical assistance.
LoLLMs was created about one and a half years ago (as of October 2024). It has grown from humble beginnings to become a tool that aims to "rule them all" in terms of AI capabilities.
## Development Process and Community Engagement
LoLLMs adheres to a rapid development cycle, with near-daily releases evidenced by over 1,200 commits on GitHub [LoLLMs Web UI GitHub Repository](https://github.com/ParisNeo/lollms-webui). It continuously improves by integrating community feedback and adapting to AI advancements. All developments are openly shared, fostering transparency and collaboration, with contributions welcomed via GitHub. The inclusion of a CODE_OF_CONDUCT.md file suggests a community-focused ethos, though specific ethical guidelines remain broad. ParisNeo, as the sole developer, drives this passion project, as noted in community discussions [Reddit Community for LoLLMs](https://www.reddit.com/r/lollms/).
## Why was LoLLMs created?
## Getting Started Guide
New users can begin with LoLLMs using these steps:
1. **Installation**: Download from [LoLLMs Official Website](https://lollms.com/) or install via `pip install --upgrade lollms` [LoLLMs GitHub Repository](https://github.com/ParisNeo/lollms).
2. **Configuration**: Launch `lollms_settings` to set up the environment, select a binding, and install a model (e.g., from Hugging Face).
3. **Exploring Components**: Access the personalities, services, and function calls zoos to utilize AI personas, backend services, or custom functions for specific tasks.
4. **Using the Web UI**: Start the LoLLMS Web UI for an intuitive interface, with tutorials available [LoLLMs Web UI Documentation](https://parisneo.github.io/lollms-webui/).
LoLLMs was created by ParisNeo, a computer geek passionate about AI. It represents the culmination of a childhood dream and embodies a powerful vision for the future of AI. The project aims to:
This guide ensures users can effectively navigate the platforms expanded ecosystem.
1. Push the boundaries of what's possible with AI
2. Provide a versatile tool that can assist with a wide range of tasks
3. Promote ethical AI development and community collaboration
4. Demonstrate how passion-driven projects can have a significant impact in the tech world
## Recent Updates and Future Outlook
Recent enhancements include:
- **LLMTester**: A personality for testing and rating AI models, improving evaluation capabilities [LoLLMs Official Website](https://lollms.com/).
- **pyconn_monitor**: A Python library for network connection monitoring, enhancing security.
- **lollms_client_js**: A Node.js front-end library for broader developer access.
- **CodeGuard**: A cybersecurity personality for code analysis, bolstering security features.
ParisNeo develops LoLLMs in his free time, treating it as a hobby and a way to give back to the community. The project is driven by a genuine desire to help people through technology and to find personal satisfaction in being useful.
The addition of the Services Zoo and Function Calls Zoo further expands LoLLMs flexibility, with ongoing updates anticipated based on community input.
## How is LoLLMs developed?
## Comparative Analysis and Community Feedback
Community feedback on Reddit praises LoLLMs extensive personalities and versatility but notes occasional installation challenges, highlighting areas for refinement [Reddit Community for LoLLMs](https://www.reddit.com/r/lollms/). The Services Zoo and Function Calls Zoo address customization needs, aligning with user demands for extensibility.
LoLLMs follows a rapid development cycle:
- New versions are released almost every day
- The project is continuously improved and expanded
- All developments are shared openly with the community
- The project encourages transparency and collaboration in AI development
## Tables for Clarity
The following tables summarize LoLLMs components and capabilities:
This approach allows LoLLMs to quickly adapt to new AI breakthroughs, integrate community feedback, and regularly introduce new capabilities.
| **Component** | **Description** |
|-----------------------|--------------------------------------------------------------|
| Applications Zoo | Diverse applications for specific services and problem-solving |
| Models Zoo | Thousands of pre-trained models, including language and multimodal |
| Personalities Zoo | Over 500 AI personas for specialized tasks like coding, image generation |
| Services Zoo | Backend services (e.g., APIs, data processing) for seamless operations |
| Function Calls Zoo | Callable functions for triggering actions like text generation or workflows |
LoLLMs stands as a testament to what can be achieved when cutting-edge technology is driven by passion, ethical considerations, and a community-focused approach to AI development.
| **Task** | **Description** |
|-----------------------|--------------------------------------------------------------|
| Text-to-Text | Generate, summarize, translate, or analyze text |
| Text-to-Image | Create images from textual descriptions |
| Image-to-Text | Extract text or describe images |
| Speech-to-Text | Convert spoken language to text |
| Text-to-Speech | Convert text to spoken language |
| Text-to-Music | Generate music from textual input |
| Text-to-Video | Create video content from textual descriptions |
These tables offer a quick reference for understanding LoLLMs structure and functionality.
## Conclusion
This enhanced documentation, now including the Services Zoo and Function Calls Zoo, provides a detailed, user-friendly resource for LoLLMs. It covers its history, expanded features, development, and usage, empowering users to leverage this versatile AI platform. LoLLMs stands as a testament to passion-driven innovation and community collaboration in AI development.
## Key Citations
- [LoLLMs Web UI GitHub Repository with over 1200 commits](https://github.com/ParisNeo/lollms-webui)
- [LoLLMs Official Website for news and downloads](https://lollms.com/)
- [LoLLMs GitHub Repository for installation details](https://github.com/ParisNeo/lollms)
- [LoLLMs Web UI Documentation for tutorials](https://parisneo.github.io/lollms-webui/)
- [Reddit Community for LoLLMs user discussions](https://www.reddit.com/r/lollms/)

View File

View File

View File

@ -20,6 +20,7 @@ import axios from 'axios';
import { marked } from 'marked';
import DOMPurify from 'dompurify'; // For security
import { nextTick } from 'vue'
export default {
name: 'ChangelogPopup',
data() {
@ -64,10 +65,27 @@ export default {
const lastViewedVersion = lastViewedResponse.data;
// Show popup if versions don't match
if (this.currentVersion !== lastViewedVersion) {
console.log("Showing changelog")
this.showChangelogPopup = true;
}
this.$nextTick(() => {
if (this.$store.state.config) {
if (this.currentVersion !== lastViewedVersion && this.$store.state.config.app_show_changelogs) {
console.log("Showing changelog");
this.showChangelogPopup = true;
}
} else {
// If config is not loaded yet, you can set up a watcher or retry after a delay
const unwatch = this.$watch('$store.state.config', (newConfig) => {
if (newConfig) {
if (this.currentVersion !== lastViewedVersion && newConfig.app_show_changelogs) {
console.log("Showing changelog");
this.showChangelogPopup = true;
}
unwatch(); // Stop watching once the config is loaded
}
});
}
});
} catch (error) {
console.error("Error checking changelog:", error);
}

View File

@ -1,17 +1,18 @@
<template>
<button
class="svg-button"
v-bind="$attrs"
v-on="$listeners"
>
<slot name="icon"></slot>
<slot></slot>
</button>
</template>
<script>
export default {
name: 'ChatBarButton',
}
</script>
<button
class="svg-button"
v-bind="$attrs"
@click="$emit('click', $event)"
>
<slot name="icon"></slot>
<slot></slot>
</button>
</template>
<script>
export default {
name: 'ChatBarButton',
// Define the events this component can emit
emits: ['click'],
}
</script>

View File

@ -312,6 +312,7 @@ import ProgressBar from "@/components/ProgressBar.vue";
import UniversalForm from '../components/UniversalForm.vue';
import Toast from '@/components/Toast.vue'
import YesNoDialog from '../components/YesNoDialog.vue';
import InputBox from "@/components/input_box.vue";
import axios from 'axios'
import feather from 'feather-icons'
@ -324,6 +325,7 @@ export default {
Toast,
MessageBox,
ProgressBar,
InputBox,
UniversalForm,
YesNoDialog
@ -353,6 +355,9 @@ export default {
}
},
computed:{
progress_visibility(){
return false;
},
isModelOK(){
return this.$store.state.isModelOk;
},

View File

@ -9,30 +9,39 @@
class="w-24 h-24 rounded-full absolute animate-rolling-ball"
>
</div>
<div class="flex flex-col items-start">
<div v-if="$store.state.config!=null&&$store.state.config.app_custom_name!=null&&$store.state.config.app_custom_name!=''" class="flex flex-col items-start">
<h1 class="text-6xl font-bold text-transparent bg-clip-text bg-gradient-to-r from-indigo-600 to-purple-600 dark:from-indigo-400 dark:to-purple-400">
{{$store.state.theme_vars.lollms_title}}
</h1>
<p class="text-2xl italic mt-2">
Lord of Large Language And Multimodal Systems
</p>
{{$store.state.config.app_custom_name}}
</h1>
</div>
<div v-else class="flex flex-col items-start">
<h1 class="text-6xl font-bold text-transparent bg-clip-text bg-gradient-to-r from-indigo-600 to-purple-600 dark:from-indigo-400 dark:to-purple-400">
{{$store.state.theme_vars.lollms_title}}
</h1>
<p class="text-2xl italic mt-2">
Lord of Large Language And Multimodal Systems
</p>
</div>
</div>
<div class="space-y-8 animate-fade-in-up">
<div v-if="$store.state.config!=null&&$store.state.config.app_custom_name!=null&&$store.state.config.app_custom_name!=''" class="space-y-8 animate-fade-in-up">
<p v-html="$store.state.config.app_custom_welcome_message">
</p>
</div>
<div v-else class="space-y-8 animate-fade-in-up">
<h2 class="text-4xl font-semibold">
{{$store.state.theme_vars.lollms_welcome_short_message}}
</h2>
<p class="text-xl max-w-3xl mx-auto">
{{$store.state.theme_vars.lollms_welcome_message}}
</p>
<!-- New section for latest news -->
<div v-if="latestNews" class="mt-12 p-6 rounded-lg shadow-md animate-fade-in-up overflow-y-scroll scrollbar-thin">
<h3>Latest LoLLMS News</h3>
<p v-html="latestNews"></p>
</div>
</div>
<!-- New section for latest news -->
<div v-if="latestNews" class="mt-12 p-6 rounded-lg shadow-md animate-fade-in-up overflow-y-scroll scrollbar-thin">
<h3>Latest LoLLMS News</h3>
<p v-html="latestNews"></p>
</div>
<div v-if="error" class="mt-6 text-red-500">{{ error }}</div>
</div>

View File

@ -97,6 +97,10 @@ export const store = createStore({
console.log("Can't uipdate personality beceause it was Not found:",newPersonality.full_path)
}
},
setInstalledModels(state, installedModels){
state.installedModels = installedModels;
},
setThemeVars(state, themeVars){
state.theme_vars = themeVars;
},

View File

@ -17,22 +17,23 @@
<div class="flex flex-col items-center text-center max-w-4xl w-full px-4 relative z-10">
<div class="mb-8 w-full">
<div class="text-5xl md:text-6xl font-bold mb-2 hover:scale-105 transition-transform lollms-title-style">
{{$store.state.theme_vars.lollms_title}}
{{$store.state.config!=null&&$store.state.config.app_custom_name!=null&&$store.state.config.app_custom_name!=""?$store.state.config.app_custom_name:$store.state.theme_vars.lollms_title}}
</div>
<p class="text-2xl italic">
One tool to rule them all
{{$store.state.config!=null&&$store.state.config.app_custom_slogan!=null&&$store.state.config.app_custom_slogan!=""?$store.state.config.app_custom_slogan:"One tool to rule them all"}}
</p>
<p class="text-xl mb-6">
<p v-if="shouldShowLollmsParagraphs" class="text-xl mb-6">
by ParisNeo
</p>
<p class="bottom-0 text-2xl italic">
{{ version_info }}
</p>
<!-- Clickable interesting fact card -->
<div class="interesting-facts transition-transform duration-300 cursor-pointer"
<div v-if="shouldShowLollmsFunFacts" class="interesting-facts transition-transform duration-300 cursor-pointer"
@click="updateRandomFact">
<p class="text-lg ">
<span class="font-semibold">🤔 Fun Fact: </span>
@ -74,14 +75,14 @@
<RouterLink :to="{ name: 'discussions' }" class="flex items-center space-x-2"> <!-- Added space-x-2 -->
<div class="logo-container"> <!-- Removed mr-1 -->
<img class="w-12 h-12 rounded-full object-cover logo-image"
:src="$store.state.config == null ? storeLogo : $store.state.config.app_custom_logo != '' ? '/user_infos/' + $store.state.config.app_custom_logo : storeLogo"
alt="Logo" title="LoLLMS WebUI">
:src="$store.state.config == null ? storeLogo : $store.state.config.app_custom_logo !=null && $store.state.config.app_custom_logo != '' ? '/user_infos/' + $store.state.config.app_custom_logo : storeLogo"
alt="Logo" :title="$store.state.config&&$store.state.config.app_custom_name&&$store.state.config.app_custom_name!=''?$store.state.config.app_custom_logo:'LoLLMS WebUI'">
</div>
<div class="flex flex-col justify-center">
<div class="text-center p-2">
<div class="text-md relative inline-block">
<span class="relative inline-block font-bold tracking-wide text-black dark:text-white">
LoLLMS
{{$store.state.config&&$store.state.config.app_custom_name&&$store.state.config.app_custom_name!=''?$store.state.config.app_custom_name:'LoLLMS WebUI'}}
</span>
<div class="absolute -bottom-0.5 left-0 w-full h-0.5
bg-black dark:bg-white
@ -91,7 +92,7 @@
</div>
<p class="text-gray-400 text-sm">One tool to rule them all</p>
<p class="text-gray-400 text-sm">{{$store.state.config&&$store.state.config.app_custom_slogan&&$store.state.config.app_custom_slogan!=''?$store.state.config.app_custom_slogan:'One tool to rule them all'}}</p>
</div>
</RouterLink>
@ -293,7 +294,6 @@
>
<img
:src="item.icon ? item.icon : modelImgPlaceholder"
@error="modelImgPlaceholder"
:alt="item.name"
class="w-full h-full object-cover"
:class="{'border-2 border-secondary': item.name == binding_name}"
@ -372,7 +372,6 @@
>
<img
:src="item.icon ? item.icon : modelImgPlaceholder"
@error="personalityImgPlacehodler"
:alt="item.name"
class="w-full h-full object-cover"
:class="{'border-2 border-secondary': item.name == model_name}"
@ -448,8 +447,7 @@
class="w-12 h-12 rounded-md overflow-hidden transition-transform duration-200 transform group-hover/item:scale-105 focus:outline-none"
>
<img
:src="getPersonailyAvatar(item)"
@error="personalityImgPlacehodler"
:src="item.avatar"
:alt="item.name"
class="w-full h-full object-cover"
:class="{'border-2 border-secondary': $store.state.active_personality_id == $store.state.personalities.indexOf(item.full_path)}"
@ -502,7 +500,7 @@
<div class="personalities-container">
<div @mouseenter="showPersonalitiesMenu" class="personalities-hover-area">
<MountedPersonalities ref="mountedPers" :onShowPersList="onShowPersList" :onReady="onPersonalitiesReady"/>
<MountedPersonalities ref="mountedPers" :onShowPersList="onShowPersListFun" :onReady="onPersonalitiesReadyFun"/>
</div>
</div>
</div>
@ -1250,6 +1248,8 @@ import skillsRegistry from "../assets/registry.svg"
import robot from "../assets/robot.svg"
import { mapState } from 'vuex';
import modelImgPlaceholder from "../assets/default_model.png"
import personalityImgPlacehodler from "../assets/default_model.png"
import MountedPersonalities from '@/components/MountedPersonalities.vue'
import ChatBarButton from '@/components/ChatBarButton.vue'
@ -1494,6 +1494,7 @@ export default {
discord:discord,
FastAPI:FastAPI,
modelImgPlaceholder:modelImgPlaceholder,
personalityImgPlacehodler:personalityImgPlacehodler,
customLanguage: '', // Holds the value of the custom language input
rebooting_audio: new Audio("rebooting.wav"),
connection_lost_audio: new Audio("connection_lost.wav"),
@ -1646,8 +1647,15 @@ export default {
methods: {
async getPersonailyAvatar(personality){
const avatar_path = this.bUrl + personality.avatar
return avatar_path
if(personality.avatar){
const avatar_path = "/" + personality.avatar
console.log("Personality avatar")
console.log(avatar_path)
return avatar_path
}
else{
return personalityImgPlacehodler
}
},
updateRandomFact() {
// Get a new random fact different from the current one
@ -1682,6 +1690,7 @@ export default {
}
}
},
onShowPersListFun(){},
onPersonalitiesReadyFun(){
this.$store.state.personalities_ready = true;
},
@ -1705,6 +1714,8 @@ export default {
this.personalityHoveredIndex = null
},
async onPersonalitySelected(pers) {
console.log("Selected personality")
console.log(pers)
this.hidePersonalitiesMenu()
// eslint-disable-next-line no-unused-vars
if (pers) {
@ -1738,7 +1749,6 @@ export default {
this.$store.state.toast.showToast("Error on select personality:\n" + pers.name, 4, false)
}
} else {
}
this.$emit('personalitySelected')
@ -3870,7 +3880,6 @@ export default {
WelcomeComponent,
ChoiceDialog,
ProgressBar,
InputBox,
SkillsLibraryViewer,
PersonalityEditor,
@ -3948,7 +3957,21 @@ export default {
},
computed: {
shouldShowLollmsParagraphs() {
const slp = !(this.$store.state.config &&
this.$store.state.config.app_custom_name &&
(this.$store.state.config.app_custom_name != ''));
console.log("shouldShowLollmsParagraphs")
console.log(slp)
return slp;
},
shouldShowLollmsFunFacts() {
const slf = !(this.$store.state.config &&
this.$store.state.config.app_show_fun_facts);
console.log("shouldShowLollmsFunFacts")
console.log(slf)
return slf;
},
// Get unique placeholders while preserving order
parsedPlaceholders() {
const uniqueMap = new Map();
@ -4170,7 +4193,6 @@ export default {
import Discussion from '../components/Discussion.vue'
import ChoiceDialog from '@/components/ChoiceDialog.vue'
import ProgressBar from "@/components/ProgressBar.vue";
import InputBox from "@/components/input_box.vue";
import SkillsLibraryViewer from "@/components/SkillsViewer.vue"
import Message from '../components/Message.vue'

View File

@ -1,17 +1,34 @@
<template>
<div class="help-view background-color p-6 w-full">
<div class="big-card w-full">
<h1 class="text-4xl md:text-5xl font-bold text-gray-800 dark:text-gray-100 mb-6">LoLLMs Help</h1>
<div class="help-sections-container">
<div class="help-sections space-y-4">
<div v-for="(section, index) in helpSections" :key="index" class="help-section message">
<h2 @click="toggleSection(index)" class="menu-item cursor-pointer flex justify-between items-center">
<div class="help-view bg-gradient-to-br from-gray-100 to-blue-50 dark:from-gray-900 dark:to-gray-800 min-h-screen flex">
<!-- Left Sidebar -->
<div class="left-bar w-64 bg-white dark:bg-gray-800 shadow-lg p-6 fixed h-full overflow-y-auto transition-all duration-300 md:w-72 lg:w-80">
<h2 class="text-2xl font-semibold text-gray-800 dark:text-gray-100 mb-6">Help Topics</h2>
<ul class="space-y-3">
<li v-for="(section, index) in helpSections" :key="index">
<a @click.prevent="scrollToSection(index)" class="block p-3 rounded-lg text-gray-700 dark:text-gray-300 hover:bg-blue-100 dark:hover:bg-gray-700 hover:text-blue-600 dark:hover:text-blue-400 transition-colors duration-200 cursor-pointer">
{{ section.title }}
</a>
</li>
</ul>
</div>
<!-- Main Content -->
<div class="main-content ml-64 md:ml-72 lg:ml-80 flex-1 p-8">
<div class="big-card bg-white dark:bg-gray-900 rounded-xl shadow-md p-8 max-w-4xl mx-auto">
<h1 class="text-4xl md:text-5xl font-bold text-gray-800 dark:text-gray-100 mb-8 flex items-center">
<span class="mr-2">📚</span> LoLLMs Help
</h1>
<div class="help-sections-container space-y-6">
<div v-for="(section, index) in helpSections" :key="index" :id="'section-' + index" class="help-section bg-gray-50 dark:bg-gray-800 rounded-lg p-6 transition-all duration-300">
<h2 @click="toggleSection(index)" class="menu-item cursor-pointer flex justify-between items-center text-xl font-semibold text-gray-800 dark:text-gray-200 hover:text-blue-600 dark:hover:text-blue-400">
{{ section.title }}
<span class="toggle-icon">{{ section.isOpen ? '▼' : '▶' }}</span>
<span class="toggle-icon text-gray-500 dark:text-gray-400">{{ section.isOpen ? '▼' : '▶' }}</span>
</h2>
<div v-if="section.isOpen" class="help-content mt-4">
<div v-html="section.content" class="prose dark:prose-invert"></div>
</div>
<transition name="fade">
<div v-if="section.isOpen" class="help-content mt-4 text-gray-600 dark:text-gray-300 prose dark:prose-invert">
<div v-html="section.content"></div>
</div>
</transition>
</div>
</div>
</div>
@ -27,12 +44,21 @@ export default {
data() {
return {
helpSections: []
}
};
},
methods: {
toggleSection(index) {
this.helpSections[index].isOpen = !this.helpSections[index].isOpen;
},
scrollToSection(index) {
this.helpSections.forEach((section, i) => {
section.isOpen = i === index; // Open only the clicked section
});
const element = document.getElementById(`section-${index}`);
if (element) {
element.scrollIntoView({ behavior: 'smooth' });
}
},
async loadMarkdownFile(filename) {
try {
const response = await fetch(`/help/${filename}`);
@ -50,7 +76,12 @@ export default {
{ title: 'Uploading Files', file: 'uploading-files.md' },
{ title: 'Sending Images', file: 'sending-images.md' },
{ title: 'Using Code Interpreter', file: 'code-interpreter.md' },
{ title: 'Internet Search', file: 'internet-search.md' }
{ title: 'Internet Search', file: 'internet-search.md' },
{ title: 'Text Generation', file: 'text-generation.md' },
{ title: 'Image Generation', file: 'image-generation.md' },
{ title: 'Music Generation', file: 'music-generation.md' },
{ title: 'Managing Personalities', file: 'managing-personalities.md' },
{ title: 'Troubleshooting', file: 'troubleshooting.md' }
];
for (const section of sectionFiles) {
@ -66,33 +97,68 @@ export default {
mounted() {
this.loadHelpSections();
}
}
};
</script>
<style scoped>
/* General Layout */
.help-view {
@apply min-h-screen;
@apply flex flex-col md:flex-row;
}
/* Sidebar Styling */
.left-bar {
scrollbar-width: thin;
scrollbar-color: #888 #f1f1f1;
}
.left-bar::-webkit-scrollbar {
width: 8px;
}
.left-bar::-webkit-scrollbar-track {
background: #f1f1f1;
border-radius: 10px;
}
.left-bar::-webkit-scrollbar-thumb {
background: #888;
border-radius: 10px;
}
.left-bar::-webkit-scrollbar-thumb:hover {
background: #555;
}
/* Main Content Styling */
.main-content {
@apply transition-all duration-300;
}
.big-card {
@apply bg-white dark:bg-gray-800 rounded-lg shadow-lg p-8 mx-auto;
@apply transform transition-all duration-300 hover:shadow-xl;
}
.help-sections-container {
@apply max-h-[70vh] overflow-y-auto pr-4;
@apply max-h-[70vh] overflow-y-auto;
}
.help-section {
@apply transition-all duration-300 ease-in-out;
@apply hover:bg-gray-100 dark:hover:bg-gray-700;
}
.help-content {
@apply text-gray-600 dark:text-gray-300;
/* Fade Transition */
.fade-enter-active, .fade-leave-active {
transition: opacity 0.3s ease;
}
/* Cute scrollbar styles */
.fade-enter-from, .fade-leave-to {
opacity: 0;
}
/* Scrollbar Styling for Main Content */
.help-sections-container::-webkit-scrollbar {
width: 12px;
width: 10px;
}
.help-sections-container::-webkit-scrollbar-track {
@ -103,16 +169,23 @@ export default {
.help-sections-container::-webkit-scrollbar-thumb {
background: #888;
border-radius: 10px;
border: 3px solid #f1f1f1;
border: 2px solid #f1f1f1;
}
.help-sections-container::-webkit-scrollbar-thumb:hover {
background: #555;
}
/* For Firefox */
.help-sections-container {
scrollbar-width: thin;
scrollbar-color: #888 #f1f1f1;
/* Responsive Design */
@media (max-width: 768px) {
.left-bar {
@apply w-full fixed top-0 left-0 h-auto z-10 transform -translate-x-full md:translate-x-0 md:w-64;
}
.main-content {
@apply ml-0;
}
.left-bar.open {
@apply translate-x-0;
}
}
</style>
</style>

File diff suppressed because it is too large Load Diff

@ -1 +1 @@
Subproject commit 415588a603744648a38bd1b1d59f50e6fd17e5b8
Subproject commit 3078044003f0e80d8d03c389ccfd6dcf96d6c283

@ -1 +1 @@
Subproject commit bff682ef0eb3e0cf5023d9815f4014c7137f52fb
Subproject commit 75225fb86bbfabf242b160f41f9bc6698dd89c0a