mirror of
https://github.com/ParisNeo/lollms.git
synced 2025-02-02 09:18:16 +00:00
enhanced generation
This commit is contained in:
parent
113e5531e5
commit
dcd80a126c
@ -68,7 +68,7 @@ class AudioRecorder:
|
||||
def start_recording(self):
|
||||
if self.whisper_model is None:
|
||||
self.lollmsCom.info("Loading whisper model")
|
||||
self.whisper_model=whisper.load_model("base")
|
||||
self.whisper_model=whisper.load_model("base.en")
|
||||
try:
|
||||
self.is_recording = True
|
||||
self.audio_stream = pyaudio.PyAudio().open(
|
||||
@ -123,6 +123,7 @@ class AudioRecorder:
|
||||
# Convert to float
|
||||
|
||||
audio_data = self.audio_frames.astype(np.float32)
|
||||
|
||||
# Transcribe the audio using the whisper model
|
||||
text = self.whisper_model.transcribe(audio_data[non_silent_start:non_silent_end])
|
||||
|
||||
|
@ -715,7 +715,7 @@ Date: {{date}}
|
||||
idx = pth.index("uploads")
|
||||
pth = "/".join(pth[idx:])
|
||||
self.new_message("",MSG_TYPE.MSG_TYPE_FULL)
|
||||
output = f'<img src="{pth}" width="300">\n\n'
|
||||
output = f'<img src="{pth}" width="800">\n\n'
|
||||
self.full(output)
|
||||
|
||||
if self.model.binding_type not in [BindingType.TEXT_IMAGE, BindingType.TEXT_IMAGE_VIDEO]:
|
||||
|
@ -576,18 +576,22 @@ class PromptReshaper:
|
||||
def fill_template(template, data):
|
||||
for key, value in data.items():
|
||||
placeholder = "{{" + key + "}}"
|
||||
template = template.replace(placeholder, value)
|
||||
n_text_tokens = len(tokenize(template))
|
||||
if key in place_holders_to_sacrifice:
|
||||
n_remaining = max_nb_tokens - n_text_tokens
|
||||
t_value = tokenize(value)
|
||||
n_value = len(t_value)
|
||||
if n_value<n_remaining:
|
||||
template = template.replace(placeholder, value)
|
||||
else:
|
||||
value = detokenize(t_value[-n_remaining:])
|
||||
template = template.replace(placeholder, value)
|
||||
|
||||
else:
|
||||
template = template.replace(placeholder, value)
|
||||
return template
|
||||
|
||||
if max_nb_tokens-all_count>0 or len(place_holders_to_sacrifice)==0:
|
||||
return fill_template(self.template, placeholders)
|
||||
else:
|
||||
to_remove = -int((max_nb_tokens - all_count)/len(place_holders_to_sacrifice))
|
||||
for placeholder, text in placeholders.items():
|
||||
if placeholder in place_holders_to_sacrifice:
|
||||
text_tokens = tokenize(text)[to_remove:]
|
||||
placeholders[placeholder]=detokenize(text_tokens)
|
||||
return fill_template(self.template, placeholders)
|
||||
return fill_template(self.template, placeholders)
|
||||
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user