mirror of
https://github.com/ParisNeo/lollms-webui.git
synced 2024-12-19 04:17:52 +00:00
Merge branch 'nomic-ai:main' into main
This commit is contained in:
commit
31eeef6ff6
@ -19,6 +19,7 @@ import threading
|
|||||||
import time
|
import time
|
||||||
import requests
|
import requests
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
__author__ = "parisneo"
|
__author__ = "parisneo"
|
||||||
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
|
__github__ = "https://github.com/nomic-ai/gpt4all-ui"
|
||||||
@ -38,6 +39,7 @@ class ModelProcess:
|
|||||||
self.started_queue = mp.Queue()
|
self.started_queue = mp.Queue()
|
||||||
self.process = None
|
self.process = None
|
||||||
self.is_generating = mp.Value('i', 0)
|
self.is_generating = mp.Value('i', 0)
|
||||||
|
self.model_ready = mp.Value('i', 0)
|
||||||
self.ready = False
|
self.ready = False
|
||||||
|
|
||||||
def load_backend(self, backend_path):
|
def load_backend(self, backend_path):
|
||||||
@ -103,6 +105,7 @@ class ModelProcess:
|
|||||||
model_file = Path("models")/self.config["backend"]/self.config["model"]
|
model_file = Path("models")/self.config["backend"]/self.config["model"]
|
||||||
print(f"Loading model : {model_file}")
|
print(f"Loading model : {model_file}")
|
||||||
self.model = self.backend(self.config)
|
self.model = self.backend(self.config)
|
||||||
|
self.model_ready.value = 1
|
||||||
print("Model created successfully")
|
print("Model created successfully")
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print("Couldn't build model")
|
print("Couldn't build model")
|
||||||
@ -138,12 +141,15 @@ class ModelProcess:
|
|||||||
def _run(self):
|
def _run(self):
|
||||||
self._rebuild_model()
|
self._rebuild_model()
|
||||||
self._rebuild_personality()
|
self._rebuild_personality()
|
||||||
self._generate("I",0,1)
|
if self.model_ready.value == 1:
|
||||||
print()
|
self._generate("I",0,1)
|
||||||
print("Ready to receive data")
|
print()
|
||||||
print(f"Listening on :http://{self.config['host']}:{self.config['port']}")
|
print("Ready to receive data")
|
||||||
self.ready = True
|
else:
|
||||||
|
print("No model loaded. Waiting for new configuration instructions")
|
||||||
|
|
||||||
|
self.ready = True
|
||||||
|
print(f"Listening on :http://{self.config['host']}:{self.config['port']}")
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
self._check_set_config_queue()
|
self._check_set_config_queue()
|
||||||
@ -312,7 +318,6 @@ class GPT4AllAPI():
|
|||||||
|
|
||||||
if installation_path.exists():
|
if installation_path.exists():
|
||||||
print("Error: Model already exists")
|
print("Error: Model already exists")
|
||||||
data.installing = False
|
|
||||||
socketio.emit('install_progress',{'status': 'failed', 'error': 'model already exists'})
|
socketio.emit('install_progress',{'status': 'failed', 'error': 'model already exists'})
|
||||||
|
|
||||||
socketio.emit('install_progress',{'status': 'progress', 'progress': progress})
|
socketio.emit('install_progress',{'status': 'progress', 'progress': progress})
|
||||||
@ -342,20 +347,33 @@ class GPT4AllAPI():
|
|||||||
|
|
||||||
@socketio.on('generate_msg')
|
@socketio.on('generate_msg')
|
||||||
def generate_msg(data):
|
def generate_msg(data):
|
||||||
if self.current_discussion is None:
|
if self.process.model_ready.value==1:
|
||||||
if self.db.does_last_discussion_have_messages():
|
if self.current_discussion is None:
|
||||||
self.current_discussion = self.db.create_discussion()
|
if self.db.does_last_discussion_have_messages():
|
||||||
else:
|
self.current_discussion = self.db.create_discussion()
|
||||||
self.current_discussion = self.db.load_last_discussion()
|
else:
|
||||||
|
self.current_discussion = self.db.load_last_discussion()
|
||||||
|
|
||||||
message = data["prompt"]
|
message = data["prompt"]
|
||||||
message_id = self.current_discussion.add_message(
|
message_id = self.current_discussion.add_message(
|
||||||
"user", message, parent=self.message_id
|
"user", message, parent=self.message_id
|
||||||
)
|
)
|
||||||
|
|
||||||
self.current_user_message_id = message_id
|
self.current_user_message_id = message_id
|
||||||
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
|
tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id))
|
||||||
tpe.start()
|
tpe.start()
|
||||||
|
else:
|
||||||
|
self.socketio.emit('infos',
|
||||||
|
{
|
||||||
|
"status":'model_not_ready',
|
||||||
|
"type": "input_message_infos",
|
||||||
|
"bot": self.personality.name,
|
||||||
|
"user": self.personality.user_name,
|
||||||
|
"message":"",
|
||||||
|
"user_message_id": self.current_user_message_id,
|
||||||
|
"ai_message_id": self.current_ai_message_id,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
@socketio.on('generate_msg_from')
|
@socketio.on('generate_msg_from')
|
||||||
def handle_connection(data):
|
def handle_connection(data):
|
||||||
@ -390,22 +408,41 @@ class GPT4AllAPI():
|
|||||||
|
|
||||||
def download_file(self, url, installation_path, callback=None):
|
def download_file(self, url, installation_path, callback=None):
|
||||||
"""
|
"""
|
||||||
Downloads a file from a URL and displays the download progress using tqdm.
|
Downloads a file from a URL, reports the download progress using a callback function, and displays a progress bar.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
url (str): The URL of the file to download.
|
url (str): The URL of the file to download.
|
||||||
|
installation_path (str): The path where the file should be saved.
|
||||||
callback (function, optional): A callback function to be called during the download
|
callback (function, optional): A callback function to be called during the download
|
||||||
with the progress percentage as an argument. Defaults to None.
|
with the progress percentage as an argument. Defaults to None.
|
||||||
"""
|
"""
|
||||||
def report_hook(count, block_size, total_size):
|
try:
|
||||||
|
response = requests.get(url, stream=True)
|
||||||
|
|
||||||
|
# Get the file size from the response headers
|
||||||
|
total_size = int(response.headers.get('content-length', 0))
|
||||||
|
|
||||||
|
with open(installation_path, 'wb') as file:
|
||||||
|
downloaded_size = 0
|
||||||
|
with tqdm(total=total_size, unit='B', unit_scale=True, ncols=80) as progress_bar:
|
||||||
|
for chunk in response.iter_content(chunk_size=8192):
|
||||||
|
if chunk:
|
||||||
|
file.write(chunk)
|
||||||
|
downloaded_size += len(chunk)
|
||||||
|
if callback is not None:
|
||||||
|
percentage = (downloaded_size / total_size) * 100
|
||||||
|
callback(percentage)
|
||||||
|
progress_bar.update(len(chunk))
|
||||||
|
|
||||||
if callback is not None:
|
if callback is not None:
|
||||||
percentage = (count * block_size / total_size) * 100
|
callback(100.0)
|
||||||
callback(percentage)
|
|
||||||
|
print("File downloaded successfully")
|
||||||
|
except Exception as e:
|
||||||
|
print("Couldn't download file:", str(e))
|
||||||
|
|
||||||
|
|
||||||
urllib.request.urlretrieve(url, installation_path, reporthook=report_hook)
|
|
||||||
|
|
||||||
if callback is not None:
|
|
||||||
callback(100.0)
|
|
||||||
|
|
||||||
def load_backend(self, backend_path):
|
def load_backend(self, backend_path):
|
||||||
|
|
||||||
@ -544,6 +581,7 @@ class GPT4AllAPI():
|
|||||||
) # first the content is empty, but we'll fill it at the end
|
) # first the content is empty, but we'll fill it at the end
|
||||||
self.socketio.emit('infos',
|
self.socketio.emit('infos',
|
||||||
{
|
{
|
||||||
|
"status":'generation_started',
|
||||||
"type": "input_message_infos",
|
"type": "input_message_infos",
|
||||||
"bot": self.personality.name,
|
"bot": self.personality.name,
|
||||||
"user": self.personality.user_name,
|
"user": self.personality.user_name,
|
||||||
|
@ -22,14 +22,24 @@ socket.on('disconnect', function() {
|
|||||||
console.log("Disconnected")
|
console.log("Disconnected")
|
||||||
});
|
});
|
||||||
socket.on('infos', function(msg) {
|
socket.on('infos', function(msg) {
|
||||||
if(globals.user_msg){
|
console.log(msg)
|
||||||
globals.user_msg.setSender(msg.user);
|
if(msg["status"]=="generation_started"){
|
||||||
globals.user_msg.setMessage(msg.message);
|
if(globals.user_msg){
|
||||||
globals.user_msg.setID(msg.id);
|
globals.user_msg.setSender(msg.user);
|
||||||
}
|
globals.user_msg.setMessage(msg.message);
|
||||||
globals.bot_msg.setSender(msg.bot);
|
globals.user_msg.setID(msg.id);
|
||||||
globals.bot_msg.setID(msg.ai_message_id);
|
}
|
||||||
globals.bot_msg.messageTextElement.innerHTML = `Generating answer. Please stand by...`;
|
globals.bot_msg.setSender(msg.bot);
|
||||||
|
globals.bot_msg.setID(msg.ai_message_id);
|
||||||
|
globals.bot_msg.messageTextElement.innerHTML = `Generating answer. Please stand by...`;
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
globals.sendbtn.style.display="block";
|
||||||
|
globals.waitAnimation.style.display="none";
|
||||||
|
globals.stopGeneration.style.display = "none";
|
||||||
|
globals.is_generating = false
|
||||||
|
alert("It seems that no model has been loaded. Please download and install a model first, then try again.");
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
socket.on('waiter', function(msg) {
|
socket.on('waiter', function(msg) {
|
||||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
4
web/dist/index.html
vendored
4
web/dist/index.html
vendored
@ -6,8 +6,8 @@
|
|||||||
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
<title>GPT4All - WEBUI</title>
|
<title>GPT4All - WEBUI</title>
|
||||||
<script type="module" crossorigin src="/assets/index-afd2c992.js"></script>
|
<script type="module" crossorigin src="/assets/index-8b53a73b.js"></script>
|
||||||
<link rel="stylesheet" href="/assets/index-98afc6c1.css">
|
<link rel="stylesheet" href="/assets/index-94947680.css">
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div id="app"></div>
|
<div id="app"></div>
|
||||||
|
@ -26,9 +26,9 @@
|
|||||||
<template v-if="installing">
|
<template v-if="installing">
|
||||||
<div class="flex items-center space-x-2">
|
<div class="flex items-center space-x-2">
|
||||||
<div class="h-2 w-20 bg-gray-300 rounded">
|
<div class="h-2 w-20 bg-gray-300 rounded">
|
||||||
<div :style="{ width: progress + '%' }" class="h-full bg-green-500"></div>
|
<div :style="{ width: progress + '%'}" class="h-full bg-red-500 rounded"></div>
|
||||||
</div>
|
</div>
|
||||||
<span>Installing...</span>
|
<span>Installing...{{ Math.floor(progress) }}%</span>
|
||||||
</div>
|
</div>
|
||||||
</template>
|
</template>
|
||||||
<template v-else-if="uninstalling">
|
<template v-else-if="uninstalling">
|
||||||
@ -51,10 +51,6 @@
|
|||||||
import socket from '@/services/websocket.js'
|
import socket from '@/services/websocket.js'
|
||||||
export default {
|
export default {
|
||||||
props: {
|
props: {
|
||||||
progress: {
|
|
||||||
type: Number,
|
|
||||||
default: 0
|
|
||||||
},
|
|
||||||
title: String,
|
title: String,
|
||||||
icon: String,
|
icon: String,
|
||||||
path: String,
|
path: String,
|
||||||
@ -67,6 +63,7 @@ export default {
|
|||||||
},
|
},
|
||||||
data() {
|
data() {
|
||||||
return {
|
return {
|
||||||
|
progress: 0,
|
||||||
installing: false,
|
installing: false,
|
||||||
uninstalling: false
|
uninstalling: false
|
||||||
};
|
};
|
||||||
|
@ -5,6 +5,16 @@
|
|||||||
role="alert">
|
role="alert">
|
||||||
<div class="flex flex-row">
|
<div class="flex flex-row">
|
||||||
<slot>
|
<slot>
|
||||||
|
<div v-if="success"
|
||||||
|
class="inline-flex items-center justify-center flex-shrink-0 w-8 h-8 text-green-500 bg-green-100 rounded-lg dark:bg-green-800 dark:text-green-200">
|
||||||
|
<i data-feather="check"></i>
|
||||||
|
<span class="sr-only">Check icon</span>
|
||||||
|
</div>
|
||||||
|
<div v-if="!success" class="inline-flex items-center justify-center flex-shrink-0 w-8 h-8 text-red-500 bg-red-100 rounded-lg dark:bg-red-800 dark:text-red-200">
|
||||||
|
<i data-feather="x"></i>
|
||||||
|
<span class="sr-only">Cross icon</span>
|
||||||
|
</div>
|
||||||
|
<div class="ml-3 text-sm font-normal">{{ message }}</div>
|
||||||
|
|
||||||
</slot>
|
</slot>
|
||||||
</div>
|
</div>
|
||||||
@ -19,6 +29,7 @@
|
|||||||
clip-rule="evenodd"></path>
|
clip-rule="evenodd"></path>
|
||||||
</svg>
|
</svg>
|
||||||
</button>
|
</button>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</template>
|
</template>
|
||||||
@ -34,12 +45,23 @@ export default {
|
|||||||
data() {
|
data() {
|
||||||
return {
|
return {
|
||||||
show: false,
|
show: false,
|
||||||
|
success: true,
|
||||||
|
message: ''
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
methods: {
|
methods: {
|
||||||
close() {
|
close() {
|
||||||
this.$emit('close')
|
this.$emit('close')
|
||||||
this.show = false
|
this.show = false
|
||||||
|
},
|
||||||
|
showToast(message, duration_s=3, success= true){
|
||||||
|
this.success = success;
|
||||||
|
this.message = message;
|
||||||
|
this.show = true;
|
||||||
|
setTimeout(() => {
|
||||||
|
this.$emit('close')
|
||||||
|
this.show = false
|
||||||
|
}, duration_s*1000);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
watch: {
|
watch: {
|
||||||
|
@ -131,13 +131,7 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
<Toast :showProp="isCopiedToClipboard" @close="isCopiedToClipboard = false">
|
<Toast ref="toast">
|
||||||
<div
|
|
||||||
class="inline-flex items-center justify-center flex-shrink-0 w-8 h-8 text-green-500 bg-green-100 rounded-lg dark:bg-green-800 dark:text-green-200">
|
|
||||||
<i data-feather="check"></i>
|
|
||||||
<span class="sr-only">Check icon</span>
|
|
||||||
</div>
|
|
||||||
<div class="ml-3 text-sm font-normal">Message content copied to clipboard!</div>
|
|
||||||
</Toast>
|
</Toast>
|
||||||
|
|
||||||
</template>
|
</template>
|
||||||
@ -166,7 +160,7 @@ export default {
|
|||||||
isSelectAll: false,
|
isSelectAll: false,
|
||||||
showConfirmation: false,
|
showConfirmation: false,
|
||||||
chime: new Audio("chime_aud.wav"),
|
chime: new Audio("chime_aud.wav"),
|
||||||
isCopiedToClipboard: false,
|
showToast: false,
|
||||||
isSearch: false,
|
isSearch: false,
|
||||||
isDiscussionBottom: false,
|
isDiscussionBottom: false,
|
||||||
}
|
}
|
||||||
@ -427,40 +421,48 @@ export default {
|
|||||||
// Update previous message with reponse user data
|
// Update previous message with reponse user data
|
||||||
//
|
//
|
||||||
// msgObj
|
// msgObj
|
||||||
//
|
// "status": "if the model is not ready this will inform the user that he can't promt the model"
|
||||||
// "type": "input_message_infos",
|
// "type": "input_message_infos",
|
||||||
// "bot": self.personality.name,
|
// "bot": self.personality.name,
|
||||||
// "user": self.personality.user_name,
|
// "user": self.personality.user_name,
|
||||||
// "message":message,#markdown.markdown(message),
|
// "message":message,#markdown.markdown(message),
|
||||||
// "user_message_id": self.current_user_message_id,
|
// "user_message_id": self.current_user_message_id,
|
||||||
// "ai_message_id": self.current_ai_message_id,
|
// "ai_message_id": self.current_ai_message_id,
|
||||||
|
console.log(msgObj);
|
||||||
this.updateLastUserMsg(msgObj)
|
if(msgObj["status"]=="generation_started"){
|
||||||
// Create response message
|
this.updateLastUserMsg(msgObj)
|
||||||
let responseMessage = {
|
// Create response message
|
||||||
content: "✍ please stand by ...",//msgObj.message,
|
let responseMessage = {
|
||||||
id: msgObj.ai_message_id,
|
content: "✍ please stand by ...",//msgObj.message,
|
||||||
parent: msgObj.user_message_id,
|
id: msgObj.ai_message_id,
|
||||||
rank: 0,
|
parent: msgObj.user_message_id,
|
||||||
sender: msgObj.bot,
|
rank: 0,
|
||||||
//type: msgObj.type
|
sender: msgObj.bot,
|
||||||
}
|
//type: msgObj.type
|
||||||
this.discussionArr.push(responseMessage)
|
|
||||||
nextTick(() => {
|
|
||||||
const msgList = document.getElementById('messages-list')
|
|
||||||
|
|
||||||
this.scrollBottom(msgList)
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
if (this.currentDiscussion.title === '' || this.currentDiscussion.title === null) {
|
|
||||||
if (msgObj.type == "input_message_infos") {
|
|
||||||
// This is a user input
|
|
||||||
this.changeTitleUsingUserMSG(this.currentDiscussion.id, msgObj.message)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
this.discussionArr.push(responseMessage)
|
||||||
|
nextTick(() => {
|
||||||
|
const msgList = document.getElementById('messages-list')
|
||||||
|
|
||||||
|
this.scrollBottom(msgList)
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
if (this.currentDiscussion.title === '' || this.currentDiscussion.title === null) {
|
||||||
|
if (msgObj.type == "input_message_infos") {
|
||||||
|
// This is a user input
|
||||||
|
this.changeTitleUsingUserMSG(this.currentDiscussion.id, msgObj.message)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log("infos", msgObj)
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
this.$refs.toast.showToast("It seems that no model has been loaded. Please download and install a model first, then try again.",4, false)
|
||||||
|
this.isGenerating = false
|
||||||
|
this.setDiscussionLoading(this.currentDiscussion.id, this.isGenerating)
|
||||||
|
this.chime.play()
|
||||||
}
|
}
|
||||||
console.log("infos", msgObj)
|
|
||||||
},
|
},
|
||||||
sendMsg(msg) {
|
sendMsg(msg) {
|
||||||
// Sends message to backend
|
// Sends message to backend
|
||||||
@ -746,15 +748,14 @@ export default {
|
|||||||
this.chime.play()
|
this.chime.play()
|
||||||
},
|
},
|
||||||
copyToClipBoard(content) {
|
copyToClipBoard(content) {
|
||||||
|
this.$refs.toast.showToast("Copied to clipboard successfully")
|
||||||
this.isCopiedToClipboard = true
|
|
||||||
nextTick(() => {
|
nextTick(() => {
|
||||||
feather.replace()
|
feather.replace()
|
||||||
|
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
closeToast() {
|
closeToast() {
|
||||||
this.isCopiedToClipboard = false
|
this.showToast = false
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
|
@ -360,7 +360,7 @@ export default {
|
|||||||
console.log("received something");
|
console.log("received something");
|
||||||
if (response.status === 'progress') {
|
if (response.status === 'progress') {
|
||||||
console.log(`Progress = ${response.progress}`);
|
console.log(`Progress = ${response.progress}`);
|
||||||
this.progress = response.progress;
|
model_object.progress = response.progress
|
||||||
} else if (response.status === 'succeeded') {
|
} else if (response.status === 'succeeded') {
|
||||||
socket.off('install_progress', progressListener);
|
socket.off('install_progress', progressListener);
|
||||||
// Update the isInstalled property of the corresponding model
|
// Update the isInstalled property of the corresponding model
|
||||||
@ -373,7 +373,7 @@ export default {
|
|||||||
// Installation failed or encountered an error
|
// Installation failed or encountered an error
|
||||||
model_object.installing = false;
|
model_object.installing = false;
|
||||||
this.showProgress = false;
|
this.showProgress = false;
|
||||||
console.error('Installation failed:', message.error);
|
console.error('Installation failed:', response.error);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -385,7 +385,7 @@ export default {
|
|||||||
console.log("uninstalling model...")
|
console.log("uninstalling model...")
|
||||||
const progressListener = (response) => {
|
const progressListener = (response) => {
|
||||||
if (response.status === 'progress') {
|
if (response.status === 'progress') {
|
||||||
this.progress = message.progress;
|
this.progress = response.progress;
|
||||||
} else if (response.status === 'succeeded') {
|
} else if (response.status === 'succeeded') {
|
||||||
console.log(model_object)
|
console.log(model_object)
|
||||||
// Installation completed
|
// Installation completed
|
||||||
|
@ -1,88 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"md5sum": "81a09a0ddf89690372fc296ff7f625af",
|
|
||||||
"filename": "ggml-gpt4all-j-v1.3-groovy.bin",
|
|
||||||
"filesize": "3785248281",
|
|
||||||
"isDefault": "true",
|
|
||||||
"bestGPTJ": "true",
|
|
||||||
"description": "Current best commercially licensable model based on GPT-J and trained by Nomic AI on the latest curated GPT4All dataset."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "91f886b68fbce697e9a3cd501951e455",
|
|
||||||
"filename": "ggml-gpt4all-l13b-snoozy.bin",
|
|
||||||
"filesize": "8136770688",
|
|
||||||
"bestLlama": "true",
|
|
||||||
"description": "Current best non-commercially licensable model based on Llama 13b and trained by Nomic AI on the latest curated GPT4All dataset."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "756249d3d6abe23bde3b1ae272628640",
|
|
||||||
"filename": "ggml-mpt-7b-chat.bin",
|
|
||||||
"filesize": "4854401050",
|
|
||||||
"isDefault": "true",
|
|
||||||
"bestMPT": "true",
|
|
||||||
"requires": "2.4.1",
|
|
||||||
"description": "Current best non-commercially licensable chat model based on MPT and trained by Mosaic ML."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "879344aaa9d62fdccbda0be7a09e7976",
|
|
||||||
"filename": "ggml-gpt4all-j-v1.2-jazzy.bin",
|
|
||||||
"filesize": "3785248281",
|
|
||||||
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v2 GPT4All dataset."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "61d48a82cb188cceb14ebb8082bfec37",
|
|
||||||
"filename": "ggml-gpt4all-j-v1.1-breezy.bin",
|
|
||||||
"filesize": "3785248281",
|
|
||||||
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v1 GPT4All dataset."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "5b5a3f9b858d33b29b52b89692415595",
|
|
||||||
"filename": "ggml-gpt4all-j.bin",
|
|
||||||
"filesize": "3785248281",
|
|
||||||
"description": "A commercially licensable model based on GPT-J and trained by Nomic AI on the v0 GPT4All dataset."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "29119f8fa11712704c6b22ac5ab792ea",
|
|
||||||
"filename": "ggml-vicuna-7b-1.1-q4_2.bin",
|
|
||||||
"filesize": "4212859520",
|
|
||||||
"description": "A non-commercially licensable model based on Llama 7b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "95999b7b0699e2070af63bf5d34101a8",
|
|
||||||
"filename": "ggml-vicuna-13b-1.1-q4_2.bin",
|
|
||||||
"filesize": "8136770688",
|
|
||||||
"description": "A non-commercially licensable model based on Llama 13b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "99e6d129745a3f1fb1121abed747b05a",
|
|
||||||
"filename": "ggml-wizardLM-7B.q4_2.bin",
|
|
||||||
"filesize": "4212864640",
|
|
||||||
"description": "A non-commercially licensable model based on Llama 7b and trained by Microsoft and Peking University."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "6cb4ee297537c9133bddab9692879de0",
|
|
||||||
"filename": "ggml-stable-vicuna-13B.q4_2.bin",
|
|
||||||
"filesize": "8136777088",
|
|
||||||
"description": "A non-commercially licensable model based on Llama 13b and RLHF trained by Stable AI."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "120c32a51d020066288df045ef5d52b9",
|
|
||||||
"filename": "ggml-mpt-7b-base.bin",
|
|
||||||
"filesize": "4854401028",
|
|
||||||
"requires": "2.4.1",
|
|
||||||
"description": "A commercially licensable model base pre-trained by Mosaic ML."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe",
|
|
||||||
"filename": "ggml-nous-gpt4-vicuna-13b.bin",
|
|
||||||
"filesize": "8136777088",
|
|
||||||
"description": "A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"md5sum": "1cfa4958f489f0a0d1ffdf6b37322809",
|
|
||||||
"filename": "ggml-mpt-7b-instruct.bin",
|
|
||||||
"filesize": "4854401028",
|
|
||||||
"requires": "2.4.1",
|
|
||||||
"description": "A commericially licensable instruct model based on MPT and trained by Mosaic ML."
|
|
||||||
}
|
|
||||||
]
|
|
Loading…
Reference in New Issue
Block a user