mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-21 21:27:47 +00:00
53 lines
3.0 KiB
Python
53 lines
3.0 KiB
Python
from llama_cpp import Llama
|
|
|
|
llm = Llama(
|
|
model_path=r"E:\lollms\test_lollms\personal_data\models\gguf\Mistral-7B-Instruct-v0.3-GGUF\Mistral-7B-Instruct-v0.3-IQ1_M.gguf",
|
|
n_gpu_layers=-1, # Uncomment to use GPU acceleration
|
|
# seed=1337, # Uncomment to set a specific seed
|
|
n_ctx=4096, # Uncomment to increase the context window
|
|
)
|
|
output = llm(
|
|
"""
|
|
!@>lollmz: Welcome! My name is LoLLM Z (An advanced version of Lollms persona).
|
|
My capability goes beyond that of a simple text assistant as I have access to functions that allow me to control many things.
|
|
How can I help you today?
|
|
!@>User: hi
|
|
!@>lollmz: Hello, how can I assist you today?
|
|
!@>User: write a poem about keyboards
|
|
Final prompt
|
|
!@>system:
|
|
Assistant's name: LoLLMz
|
|
Slogan: One tool to rule them all
|
|
Author : ParisNeo a computer geek pationed by AI
|
|
LoLLMz with z at the end is an enhanced version of lollms (Lord of Large Language and Multimodal Systems)
|
|
Lollmz has been trained on very high quality data in various subjects and has the capability to analyze and understand the user prompt and perform high quality tasks.
|
|
Users can send data to lollmz by pressing send file button on the chat bar and selecting the file or files to send.
|
|
Users can also send images if the currently selected model supports it.
|
|
Users can paste files (text or images) in the lollmz chatbar to send them
|
|
Lollms system has an in place code interpreter.
|
|
For lollmz to access internet, the user needs to use the send message with internet search so that the LLM receives research results.
|
|
This allows the LLM to actually generate svg images, graphviz diagrams, mermaid diagrams, html with javascroipt code, python code etc
|
|
Your mission is to assist user to perform various tasks and answer his questions using all available ingformation.
|
|
It is important to put the code inside a code tag with the code type specified for it to become intrerpretable.
|
|
If the user asks about date or time use the information from information section.
|
|
If the user asks for something that requires internet search and no websearch results are available, say :
|
|
Your question needs internet search. For this, please press the send message with internet search so that I can answer you based on internet results.
|
|
!@>information:
|
|
Today's date is Friday, June 07, 2024
|
|
Current time is 22:43:28
|
|
|
|
!@>lollmz: Welcome! My name is LoLLM Z (An advanced version of Lollms persona).
|
|
My capability goes beyond that of a simple text assistant as I have access to functions that allow me to control many things.
|
|
How can I help you today?
|
|
!@>User: hi
|
|
!@>lollmz: Hello, how can I assist you today?
|
|
!@>User: write a poem about keyboards
|
|
!@>LoLLMZ:
|
|
""", # Prompt
|
|
max_tokens=4096, # Generate up to 32 tokens, set to None to generate up to the end of the context window
|
|
stop=["!@>"], # Stop generating just before the model would generate a new question
|
|
echo=True, # Echo the prompt back in the output
|
|
stream=True
|
|
) # Generate a completion, can also call create_completion
|
|
for chunk in output:
|
|
print(chunk["choices"][0]["text"],end="", flush=True) |