diff --git a/lollms_core b/lollms_core index 1244a867..7d368776 160000 --- a/lollms_core +++ b/lollms_core @@ -1 +1 @@ -Subproject commit 1244a8677e2be9c9c3e716b4ec5d573ecccc5d5e +Subproject commit 7d368776cef58e56b3a9a8371e0244d0440c53ab diff --git a/lollms_webui.py b/lollms_webui.py index dd1f3561..4b81c5ca 100644 --- a/lollms_webui.py +++ b/lollms_webui.py @@ -1149,7 +1149,7 @@ class LOLLMSWebUI(LOLLMSElfServer): self.set_active_model(self.routing_model) models = [f"{k}" for k,v in self.config.smart_routing_models_description.items()] - output_id, explanation = self.personality.multichoice_question("Select most suitable model to answer the user request given the context. Answer with the selected model index followed by an explanation in a new line.", [f"{k}: {v}" for k,v in self.config.smart_routing_models_description.items()], "user request:" + prompt, return_explanation=True) + output_id, explanation = self.personality.multichoice_question("Select most suitable model to answer the user request given the context. Answer with the selected model index followed by an explanation in a new line.", [f"{k}: {v}" for k,v in self.config.smart_routing_models_description.items()], "!@>user prompt:" + context_details["prompt"], return_explanation=True) if output_id >=0 and output_id