mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-19 20:57:58 +00:00
sync
This commit is contained in:
parent
4c9592c0b0
commit
d29bde9d0a
@ -3744,7 +3744,7 @@ class APScript(StateMachine):
|
|||||||
"""
|
"""
|
||||||
return self.personality.fast_gen(prompt=prompt,max_generation_size=max_generation_size,placeholders=placeholders, sacrifice=sacrifice, debug=debug, callback=callback, show_progress=show_progress)
|
return self.personality.fast_gen(prompt=prompt,max_generation_size=max_generation_size,placeholders=placeholders, sacrifice=sacrifice, debug=debug, callback=callback, show_progress=show_progress)
|
||||||
|
|
||||||
def mix_it_up(self, prompt: str, models, master_model, max_generation_size: int= None, placeholders: dict = {}, sacrifice: list = ["previous_discussion"], debug: bool = False, callback=None, show_progress=False) -> dict:
|
def mix_it_up(self, prompt: str, models, master_model, nb_rounds=2, max_generation_size: int= None, placeholders: dict = {}, sacrifice: list = ["previous_discussion"], debug: bool = False, callback=None, show_progress=False) -> dict:
|
||||||
"""
|
"""
|
||||||
Fast generates text using multiple LLMs with detailed round tracking. Each LLM sees the initial prompt plus the formatted outputs of the previous rounds.
|
Fast generates text using multiple LLMs with detailed round tracking. Each LLM sees the initial prompt plus the formatted outputs of the previous rounds.
|
||||||
The master model then completes the job by creating a unique answer inspired by the last round outputs.
|
The master model then completes the job by creating a unique answer inspired by the last round outputs.
|
||||||
@ -3769,22 +3769,22 @@ class APScript(StateMachine):
|
|||||||
'initial_prompt': prompt,
|
'initial_prompt': prompt,
|
||||||
'rounds': []
|
'rounds': []
|
||||||
}
|
}
|
||||||
|
for rounds in range(nb_rounds):
|
||||||
for idx, model_id in enumerate(models):
|
for idx, model_id in enumerate(models):
|
||||||
binding_name, model_name = model_id.split("::")
|
binding_name, model_name = model_id.split("::")
|
||||||
self.select_model(binding_name, model_name)
|
self.select_model(binding_name, model_name)
|
||||||
|
|
||||||
# Concatenate previous outputs with formatting
|
# Concatenate previous outputs with formatting
|
||||||
formatted_previous_outputs = "\n".join([f"Model {m}: {o}" for m, o in previous_outputs])
|
formatted_previous_outputs = "\n".join([f"Model {m}: {o}" for m, o in previous_outputs])
|
||||||
round_prompt = context + "\n" + formatted_previous_outputs
|
round_prompt = context + "\n" + formatted_previous_outputs
|
||||||
output = self.fast_gen(prompt=round_prompt, max_generation_size=max_generation_size, placeholders=placeholders, sacrifice=sacrifice, debug=debug, callback=callback, show_progress=show_progress)
|
output = self.fast_gen(prompt=round_prompt, max_generation_size=max_generation_size, placeholders=placeholders, sacrifice=sacrifice, debug=debug, callback=callback, show_progress=show_progress)
|
||||||
|
|
||||||
rounds_info['rounds'].append({
|
rounds_info['rounds'].append({
|
||||||
'model': model_id,
|
'model': model_id,
|
||||||
'round_prompt': round_prompt,
|
'round_prompt': round_prompt,
|
||||||
'output': output
|
'output': output
|
||||||
})
|
})
|
||||||
previous_outputs.append((model_id, output)) # Update for the next round
|
previous_outputs.append((model_id, output)) # Update for the next round
|
||||||
|
|
||||||
# Final round with the master model
|
# Final round with the master model
|
||||||
self.select_model(*master_model.split("::"))
|
self.select_model(*master_model.split("::"))
|
||||||
|
Loading…
Reference in New Issue
Block a user