Make special #vars work on chat nodes

This commit is contained in:
Ian Arawjo 2023-08-01 14:56:19 -04:00
parent 2ab50142bb
commit 3fbc88abb8
9 changed files with 73 additions and 56 deletions

View File

@ -1,15 +1,15 @@
{
"files": {
"main.css": "/static/css/main.a4e8271c.css",
"main.js": "/static/js/main.05c73878.js",
"main.js": "/static/js/main.69c090b9.js",
"static/js/787.4c72bb55.chunk.js": "/static/js/787.4c72bb55.chunk.js",
"index.html": "/index.html",
"main.a4e8271c.css.map": "/static/css/main.a4e8271c.css.map",
"main.05c73878.js.map": "/static/js/main.05c73878.js.map",
"main.69c090b9.js.map": "/static/js/main.69c090b9.js.map",
"787.4c72bb55.chunk.js.map": "/static/js/787.4c72bb55.chunk.js.map"
},
"entrypoints": [
"static/css/main.a4e8271c.css",
"static/js/main.05c73878.js"
"static/js/main.69c090b9.js"
]
}

View File

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><script async src="https://www.googletagmanager.com/gtag/js?id=G-RN3FDBLMCR"></script><script>function gtag(){dataLayer.push(arguments)}window.dataLayer=window.dataLayer||[],gtag("js",new Date),gtag("config","G-RN3FDBLMCR")</script><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="A visual programming environment for prompt engineering"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>ChainForge</title><script defer="defer" src="/static/js/main.05c73878.js"></script><link href="/static/css/main.a4e8271c.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><script async src="https://www.googletagmanager.com/gtag/js?id=G-RN3FDBLMCR"></script><script>function gtag(){dataLayer.push(arguments)}window.dataLayer=window.dataLayer||[],gtag("js",new Date),gtag("config","G-RN3FDBLMCR")</script><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="A visual programming environment for prompt engineering"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>ChainForge</title><script defer="defer" src="/static/js/main.69c090b9.js"></script><link href="/static/css/main.a4e8271c.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

View File

@ -454,37 +454,40 @@ export async function countQueries(prompt: string,
const cache_llm_responses = load_from_cache(cache_filename);
// Iterate through all prompt permutations and check if how many responses there are in the cache with that prompt
all_prompt_permutations.forEach(perm => {
const prompt_str = perm.toString();
all_prompt_permutations.forEach(prompt => {
let prompt_str = prompt.toString();
add_to_num_responses_req(llm_key, n * chat_hists.length);
if (prompt_str in cache_llm_responses) {
// For each chat history, find an indivdual response obj that matches it
// (chat_hist be undefined, in which case the cache'd response obj must similarly have an undefined chat history in order to match):
for (const chat_hist of chat_hists) {
// If there's chat history, we need to fill any special (#) vars from the carried chat_history vars and metavars:
if (chat_hist !== undefined) {
prompt.fill_special_vars({...chat_hist?.fill_history, ...chat_hist?.metavars});
prompt_str = prompt.toString();
}
// Get the cache of responses with respect to this prompt, + normalize format so it's always an array (of size >= 0)
const cache_bucket = cache_llm_responses[prompt_str];
let cached_resps: LLMResponseObject[] = Array.isArray(cache_bucket) ? cache_bucket : (cache_bucket === undefined ? [] : [ cache_bucket ]);
// For each chat history, find an indivdual response obj that matches it
// (chat_hist be undefined, in which case the cache'd response obj must similarly have an undefined chat history in order to match):
for (const chat_hist of chat_hists) {
let found_resp = false;
for (const cached_resp of cached_resps) {
if (isEqualChatHistory(cached_resp.chat_history, chat_hist?.messages)) {
// Match found. Note it and count response length:
found_resp = true;
const num_resps = cached_resp.responses.length;
if (n > num_resps)
add_to_missing_queries(llm_key, prompt_str, n - num_resps);
break;
}
let found_resp = false;
for (const cached_resp of cached_resps) {
if (isEqualChatHistory(cached_resp.chat_history, chat_hist?.messages)) {
// Match found. Note it and count response length:
found_resp = true;
const num_resps = cached_resp.responses.length;
if (n > num_resps)
add_to_missing_queries(llm_key, prompt_str, n - num_resps);
break;
}
if (!found_resp)
add_to_missing_queries(llm_key, prompt_str, n);
}
} else {
// There was no cache'd item for this query; add it as missing:
add_to_missing_queries(llm_key, prompt_str, n * chat_hists.length);
// If a cache'd response wasn't found, add n required:
if (!found_resp)
add_to_missing_queries(llm_key, prompt_str, n);
}
});

View File

@ -148,21 +148,28 @@ export class PromptPipeline {
let num_queries_sent = -1;
// Generate concrete prompts one by one. Yield response from the cache or make async call to LLM.
for (const prompt of this.gen_prompts(vars)) {
if (!prompt.is_concrete())
throw Error(`Cannot send a prompt '${prompt}' to LLM: Prompt is a template.`)
for (let prompt of this.gen_prompts(vars)) {
const prompt_str = prompt.toString();
let prompt_str = prompt.toString();
const info = prompt.fill_history;
const metavars = prompt.metavars;
// Get the cache of responses with respect to this prompt, + normalize format so it's always an array (of size >= 0)
const cache_bucket = responses[prompt_str];
let cached_resps: LLMResponseObject[] = Array.isArray(cache_bucket) ? cache_bucket : (cache_bucket === undefined ? [] : [ cache_bucket ]);
// Loop over any present chat histories. (If none, will have a single pass with 'undefined' as chat_history value.)
for (const chat_history of _chat_histories) {
// If there's chat history, we need to fill any special (#) vars from the carried chat_history vars and metavars:
if (chat_history !== undefined) {
prompt.fill_special_vars({...chat_history?.fill_history, ...chat_history?.metavars});
prompt_str = prompt.toString();
}
if (!prompt.is_concrete())
throw Error(`Cannot send a prompt '${prompt}' to LLM: Prompt is a template.`)
// Get the cache of responses with respect to this prompt, + normalize format so it's always an array (of size >= 0)
const cache_bucket = responses[prompt_str];
let cached_resps: LLMResponseObject[] = Array.isArray(cache_bucket) ? cache_bucket : (cache_bucket === undefined ? [] : [ cache_bucket ]);
// Check if there's a cached response with the same prompt + (if present) chat history:
let cached_resp: LLMResponseObject | undefined = undefined;
let cached_resp_idx: number = -1;

View File

@ -272,6 +272,30 @@ export class PromptTemplate {
return filled_pt;
}
/**
* Fills in any 'special' variables with # before them, by using the passed fill_history dict.
* Modifies the prompt template in place.
* @param fill_history A fill history dict.
*/
fill_special_vars(fill_history: {[key: string]: any}): void {
// Special variables {#...} denotes filling a variable from a matching var in fill_history or metavars.
// Find any special variables:
const unfilled_vars = (new StringTemplate(this.template)).get_vars();
let special_vars_to_fill: {[key: string]: string} = {};
for (const v of unfilled_vars) {
if (v.length > 0 && v[0] === '#') { // special template variables must begin with #
const svar = v.substring(1);
if (svar in fill_history)
special_vars_to_fill[v] = fill_history[svar];
else
console.warn(`Could not find a value to fill special var ${v} in prompt template.`);
}
}
// Fill any special variables, using the fill history of the template in question:
if (Object.keys(special_vars_to_fill).length > 0)
this.template = new StringTemplate(this.template).safe_substitute(special_vars_to_fill);
}
}
export class PromptPermutationGenerator {
@ -380,24 +404,7 @@ export class PromptPermutationGenerator {
}
for (let p of this._gen_perm(template, Object.keys(paramDict), paramDict)) {
// Special variables {#...} denotes filling a variable from a matching var in fill_history or metavars.
// Find any special variables:
const unfilled_vars = (new StringTemplate(p.template)).get_vars();
let special_vars_to_fill: {[key: string]: string} = {};
for (const v of unfilled_vars) {
if (v.length > 0 && v[0] === '#') { // special template variables must begin with #
const svar = v.substring(1);
if (svar in p.fill_history)
special_vars_to_fill[v] = p.fill_history[svar];
else if (svar in p.metavars)
special_vars_to_fill[v] = p.metavars[svar];
else
console.warn(`Could not find a value to fill special var ${v} in prompt template.`);
}
}
// Fill any special variables, using the fill history of the template in question:
if (Object.keys(special_vars_to_fill).length > 0)
p.template = new StringTemplate(p.template).safe_substitute(special_vars_to_fill);
p.fill_special_vars({...p.fill_history, ...p.metavars});
// Yield the final prompt template
yield p;

View File

@ -6,7 +6,7 @@ def readme():
setup(
name='chainforge',
version='0.2.5.1',
version='0.2.5.2',
packages=find_packages(),
author="Ian Arawjo",
description="A Visual Programming Environment for Prompt Engineering",