diff --git a/doc/server_endpoints.md b/doc/server_endpoints.md index 2657a1b..245fd86 100644 --- a/doc/server_endpoints.md +++ b/doc/server_endpoints.md @@ -308,3 +308,4 @@ Events generated: - `'buzzy'`: when the server is buzzy and can't process the request, it sends this event and returns. This event have parameter `message` containing a string. - `'text_chunk'`: Generated text chunks are emitted to the client through this event during the text generation process. The event has two parameters `chunk` and `type`. - `'text_generated'`: Once the text generation process is complete, the final generated text is emitted to the client through this event. The event has one parameter `text` containing the full generated text. +- `'generation_canceled'`: Answer to `cancel_generation` endpoint call diff --git a/examples/lollms_playground/README.md b/examples/lollms_playground/README.md new file mode 100644 index 0000000..a7e2dd5 --- /dev/null +++ b/examples/lollms_playground/README.md @@ -0,0 +1,79 @@ +``` +# LoLLMs Endpoint Test Tool + +This tool provides a web-based interface to test LoLLMs endpoints and generate text using a LoLLMs server. + +## Prerequisites + +To use this tool, you need to have [Node.js](https://nodejs.org) installed on your machine. + +## Installation + +1. Clone this repository or download the source code. + + ```bash + git clone https://github.com/ParisNeo/lollms-playground.git + ``` + +2. Navigate to the project directory. + + ```bash + cd socketio-endpoint-test-tool + ``` + +3. Install the dependencies. + + ```bash + npm install + ``` + +## Usage + +1. Start the LoLLMs server. You can use `lollms-server` to run the server with the desired configuration. Here are a few examples: + + - To run the server on `localhost` and port `9600`: + + ```bash + lollms-server --host localhost --port 9600 + ``` + + - To run the server on a different host and port: + + ```bash + lollms-server --host mydomain.com --port 8080 + ``` + + - For more information on the available options, you can use the `--help` flag: + + ```bash + lollms-server --help + ``` + +2. Start the web server for the LoLLMs Endpoint Test Tool. + + ```bash + npm start + ``` + +3. Open your web browser and visit `http://localhost:8080/lollms_playground.html` (or the appropriate URL) to access the LoLLMs Endpoint Test Tool. + +4. Fill in the host and port fields with the appropriate values for your LoLLMs server. + +5. Click the "Connect" button to establish a connection with the LoLLMs server. + +6. Once connected, you can enter a prompt and click the "Generate Text" button to initiate text generation. + +7. The generated text will be displayed in the output section of the page. + +## Customization + +You can customize the appearance and behavior of the tool by modifying the HTML, CSS, and JavaScript code in the `test_generation.html` file. + +## Contributing + +Contributions are welcome! If you find any issues or want to add new features, feel free to open an issue or submit a pull request. + +## License + +This project is licensed under the [MIT License](LICENSE). +``` diff --git a/examples/lollms_playground/lollms_playground.html b/examples/lollms_playground/lollms_playground.html new file mode 100644 index 0000000..56b7485 --- /dev/null +++ b/examples/lollms_playground/lollms_playground.html @@ -0,0 +1,154 @@ + + + + + + + LoLLMs Endpoint Test + + + + + +
+
+

LoLLMs Playground

+
+
+ + +
+
+ + +
+ + +
+ +
+
+ + + + + + diff --git a/lollms/server.py b/lollms/server.py index 521c7e0..2f3534c 100644 --- a/lollms/server.py +++ b/lollms/server.py @@ -324,6 +324,8 @@ class LoLLMsServer: def cancel_generation(data): client_id = request.sid self.clients[client_id]["requested_stop"]=False + print(f"Client {client_id} requested canceling generation") + emit("generation_canceled", {"message":"Generation is canceled."}) @self.socketio.on('generate_text') @@ -335,9 +337,9 @@ class LoLLMsServer: client_id = request.sid self.clients[client_id]["is_generating"]=True self.clients[client_id]["requested_stop"]=False - prompt = data['prompt'] - personality_id = data['personality'] - n_predicts = data["n_predicts"] + prompt = data['prompt'] + personality_id = data['personality'] + n_predicts = data["n_predicts"] if personality_id==-1: # Raw text generation print(f"Text generation requested by client: {client_id}") diff --git a/setup.py b/setup.py index 42bc616..42b2ab9 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ def get_all_files(path): setuptools.setup( name="lollms", - version="1.2.5", + version="1.2.6", author="Saifeddine ALOUI", author_email="aloui.saifeddine@gmail.com", description="A python library for AI personality definition", diff --git a/tests/endoints_unit_tests/node/README.md b/tests/endoints_unit_tests/node/README.md new file mode 100644 index 0000000..f9e6aff --- /dev/null +++ b/tests/endoints_unit_tests/node/README.md @@ -0,0 +1,79 @@ +``` +# LoLLMs Endpoint Test Tool + +This tool provides a web-based interface to test LoLLMs endpoints and generate text using a LoLLMs server. + +## Prerequisites + +To use this tool, you need to have [Node.js](https://nodejs.org) installed on your machine. + +## Installation + +1. Clone this repository or download the source code. + + ```bash + git clone https://github.com/ParisNeo/lollms-playground.git + ``` + +2. Navigate to the project directory. + + ```bash + cd socketio-endpoint-test-tool + ``` + +3. Install the dependencies. + + ```bash + npm install + ``` + +## Usage + +1. Start the LoLLMs server. You can use `lollms-server` to run the server with the desired configuration. Here are a few examples: + + - To run the server on `localhost` and port `9600`: + + ```bash + lollms-server --host localhost --port 9600 + ``` + + - To run the server on a different host and port: + + ```bash + lollms-server --host mydomain.com --port 8080 + ``` + + - For more information on the available options, you can use the `--help` flag: + + ```bash + lollms-server --help + ``` + +2. Start the web server for the LoLLMs Endpoint Test Tool. + + ```bash + npm start + ``` + +3. Open your web browser and visit `http://localhost:8080/lollms_playground.html` (or the appropriate URL) to access the LoLLMs Endpoint Test Tool. + +4. Fill in the host and port fields with the appropriate values for your LoLLMs server. + +5. Click the "Connect" button to establish a connection with the LoLLMs server. + +6. Once connected, you can enter a prompt and click the "Generate Text" button to initiate text generation. + +7. The generated text will be displayed in the output section of the page. + +## Customization + +You can customize the appearance and behavior of the tool by modifying the HTML, CSS, and JavaScript code in the `test_generation.html` file. + +## Contributing + +Contributions are welcome! If you find any issues or want to add new features, feel free to open an issue or submit a pull request. + +## License + +This project is licensed under the [Apache 2.0](LICENSE). +``` diff --git a/tests/endoints_unit_tests/test_generation.html b/tests/endoints_unit_tests/node/test_generation.html similarity index 51% rename from tests/endoints_unit_tests/test_generation.html rename to tests/endoints_unit_tests/node/test_generation.html index e9baf43..56b7485 100644 --- a/tests/endoints_unit_tests/test_generation.html +++ b/tests/endoints_unit_tests/node/test_generation.html @@ -4,7 +4,7 @@ - Socket.IO Endpoint Test + LoLLMs Endpoint Test - -
-
-

Socket.IO Endpoint Test

+ +
+
+

LoLLMs Playground

@@ -33,16 +33,22 @@
+
- +
+ +
+
+ +
@@ -52,13 +58,15 @@ const socket = io(); const connectButton = document.getElementById('connect-btn'); const generateButton = document.getElementById('generate-btn'); + const stopButton = document.getElementById('stop-btn'); const connectionSection = document.getElementById('connection-section'); const generationSection = document.getElementById('generation-section'); + const connectingText = document.getElementById('connecting'); - // Append the received chunks to the output div + // Append the received chunks to the text div function appendToOutput(chunk) { - const outputDiv = document.getElementById('output'); - outputDiv.innerHTML += chunk; + const outputDiv = document.getElementById('text'); + outputDiv.value += chunk; outputDiv.scrollTop = outputDiv.scrollHeight; } @@ -68,22 +76,45 @@ appendToOutput(data.chunk); }); + // Event handler for receiving generated text chunks + socket.on('text_generated', data => { + console.log('text generated:', data.text); + // Toggle button visibility + generateButton.classList.remove('hidden'); + stopButton.classList.add('hidden'); + }); + // Event handler for successful connection socket.on('connect', () => { - console.log('Connected to Socket.IO server'); + console.log('Connected to LoLLMs server'); connectButton.disabled = true; + connectingText.classList.add("hidden") connectionSection.classList.add('hidden'); generationSection.classList.remove('hidden'); }); // Event handler for error during text generation - socket.on('text_generated_error', error => { - console.error('Text generation error:', error); - const outputDiv = document.getElementById('output'); - outputDiv.innerHTML += `

Error: ${error.message}

`; + socket.on('buzzy', error => { + console.error('Server is busy. Wait for your turn', error); + const outputDiv = document.getElementById('text'); + outputDiv.value += `

Error: ${error.message}

`; outputDiv.scrollTop = outputDiv.scrollHeight; + // Toggle button visibility + generateButton.classList.remove('hidden'); + stopButton.classList.add('hidden'); + }); + // Event handler for error during text generation + socket.on('generation_canceled', error => { + // Toggle button visibility + generateButton.classList.remove('hidden'); + stopButton.classList.add('hidden'); + + }); + + + // Triggered when the "Connect" button is clicked connectButton.addEventListener('click', () => { const hostInput = document.getElementById('host'); @@ -94,25 +125,28 @@ if (host && port) { socket.io.uri = `http://${host}:${port}`; socket.connect(); - connectButton.disabled = true; - connectionSection.classList.add('hidden'); - generationSection.classList.remove('hidden'); + connectingText.classList.remove("hidden") } }); // Triggered when the "Generate Text" button is clicked generateButton.addEventListener('click', () => { - const promptInput = document.getElementById('prompt'); - const prompt = promptInput.value.trim(); + const outputDiv = document.getElementById('text'); + var prompt = outputDiv.value + console.log(prompt) + // Trigger the 'generate_text' event with the prompt + socket.emit('generate_text', { prompt, personality: -1, n_predicts: 1024 }); - if (prompt) { - // Clear output div - document.getElementById('output').innerHTML = ''; + // Toggle button visibility + generateButton.classList.add('hidden'); + stopButton.classList.remove('hidden'); + }); + + // Triggered when the "Stop Generation" button is clicked + stopButton.addEventListener('click', () => { + // Trigger the 'cancel_generation' event + socket.emit('cancel_generation',{}); - // Trigger the 'generate_text' event with the prompt - socket.emit('generate_text', { prompt, personality: -1, n_predicts: 1024 }); - promptInput.value = ''; - } }); diff --git a/tests/endoints_unit_tests/example_text_gen.txt b/tests/endoints_unit_tests/python/example_text_gen.txt similarity index 100% rename from tests/endoints_unit_tests/example_text_gen.txt rename to tests/endoints_unit_tests/python/example_text_gen.txt diff --git a/tests/endoints_unit_tests/test_generation.py b/tests/endoints_unit_tests/python/test_generation.py similarity index 100% rename from tests/endoints_unit_tests/test_generation.py rename to tests/endoints_unit_tests/python/test_generation.py