Add prompts previews on hover and click of PromptNode (#91)

* Added Prompt Preview panes

* Update package version and react app
This commit is contained in:
ianarawjo 2023-07-07 20:35:50 -04:00 committed by GitHub
parent e62be7eaf1
commit 318f81e1df
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 115 additions and 52 deletions

View File

@ -1,15 +1,15 @@
{
"files": {
"main.css": "/static/css/main.7434a74f.css",
"main.js": "/static/js/main.e5607ccc.js",
"main.css": "/static/css/main.26e6dbb2.css",
"main.js": "/static/js/main.0ddb49f0.js",
"static/js/787.4c72bb55.chunk.js": "/static/js/787.4c72bb55.chunk.js",
"index.html": "/index.html",
"main.7434a74f.css.map": "/static/css/main.7434a74f.css.map",
"main.e5607ccc.js.map": "/static/js/main.e5607ccc.js.map",
"main.26e6dbb2.css.map": "/static/css/main.26e6dbb2.css.map",
"main.0ddb49f0.js.map": "/static/js/main.0ddb49f0.js.map",
"787.4c72bb55.chunk.js.map": "/static/js/787.4c72bb55.chunk.js.map"
},
"entrypoints": [
"static/css/main.7434a74f.css",
"static/js/main.e5607ccc.js"
"static/css/main.26e6dbb2.css",
"static/js/main.0ddb49f0.js"
]
}

View File

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><script async src="https://www.googletagmanager.com/gtag/js?id=G-RN3FDBLMCR"></script><script>function gtag(){dataLayer.push(arguments)}window.dataLayer=window.dataLayer||[],gtag("js",new Date),gtag("config","G-RN3FDBLMCR")</script><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="A visual programming environment for prompt engineering"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>ChainForge</title><script defer="defer" src="/static/js/main.e5607ccc.js"></script><link href="/static/css/main.7434a74f.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><script async src="https://www.googletagmanager.com/gtag/js?id=G-RN3FDBLMCR"></script><script>function gtag(){dataLayer.push(arguments)}window.dataLayer=window.dataLayer||[],gtag("js",new Date),gtag("config","G-RN3FDBLMCR")</script><link rel="icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="A visual programming environment for prompt engineering"/><link rel="apple-touch-icon" href="/logo192.png"/><link rel="manifest" href="/manifest.json"/><title>ChainForge</title><script defer="defer" src="/static/js/main.0ddb49f0.js"></script><link href="/static/css/main.26e6dbb2.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -41,7 +41,7 @@ export default function NodeLabel({ title, nodeId, icon, onEdit, onSave, editabl
useEffect(() => {
if(handleRunClick !== undefined) {
const run_btn = (<button style={{zIndex: 8000}} className="AmitSahoo45-button-3 nodrag" onClick={handleRunClick} onPointerEnter={handleRunHover}>&#9654;</button>);
const run_btn = (<button className="AmitSahoo45-button-3 nodrag" onClick={handleRunClick} onPointerEnter={handleRunHover}>&#9654;</button>);
if (runButtonTooltip)
setRunButton(
<Tooltip label={runButtonTooltip} withArrow arrowSize={6} arrowRadius={2} zIndex={1001} withinPortal={true} >

View File

@ -1,8 +1,9 @@
import React, { useEffect, useState, useRef, useCallback } from 'react';
import { Handle } from 'react-flow-renderer';
import { Menu, Button, Progress, Textarea } from '@mantine/core';
import { Menu, Button, Progress, Textarea, Text, Popover, Center, Modal, Box } from '@mantine/core';
import { useDisclosure } from '@mantine/hooks';
import { v4 as uuid } from 'uuid';
import { IconSearch } from '@tabler/icons-react';
import { IconSearch, IconList } from '@tabler/icons-react';
import useStore from './store';
import NodeLabel from './NodeLabelComponent'
import TemplateHooks, { extractBracketedSubstrings } from './TemplateHooksComponent'
@ -43,6 +44,46 @@ const getUniqueLLMMetavarKey = (responses) => {
return `LLM_${i}`;
};
class PromptInfo {
prompt; // string
constructor(prompt) {
this.prompt = prompt;
}
}
const displayPromptInfos = (promptInfos) =>
promptInfos.map((info, idx) => (
<div key={idx}>
<pre className='prompt-preview'>{info.prompt}</pre>
</div>
));
const PromptListPopover = ({ promptInfos, onHover, onClick }) => {
const [opened, { close, open }] = useDisclosure(false);
const _onHover = useCallback(() => {
onHover();
open();
}, [onHover, open]);
return (
<Popover width={400} position="right-start" withArrow withinPortal shadow="rgb(38, 57, 77) 0px 10px 30px -14px" key="query-info" opened={opened} styles={{dropdown: {maxHeight: '500px', overflowY: 'auto', backgroundColor: '#fff'}}}>
<Popover.Target>
<button className='custom-button' onMouseEnter={_onHover} onMouseLeave={close} onClick={onClick} style={{border:'none'}}>
<IconList size='12pt' color='gray' style={{marginBottom: '-4px'}} />
</button>
</Popover.Target>
<Popover.Dropdown sx={{ pointerEvents: 'none' }}>
<Center><Text size='xs' fw={500} color='#666'>Preview of generated prompts ({promptInfos.length} total)</Text></Center>
{displayPromptInfos(promptInfos)}
</Popover.Dropdown>
</Popover>
);
};
const PromptNode = ({ data, id }) => {
// Get state from the Zustand store:
@ -68,6 +109,10 @@ const PromptNode = ({ data, id }) => {
// For a way to inspect responses without having to attach a dedicated node
const inspectModal = useRef(null);
// For an info pop-up that shows all the prompts that will be sent off
// NOTE: This is the 'full' version of the PromptListPopover that activates on hover.
const [infoModalOpened, { open: openInfoModal, close: closeInfoModal }] = useDisclosure(false);
// Selecting LLM models to prompt
const [llmItems, setLLMItems] = useState(data.llms || initLLMs.map((i) => ({key: uuid(), settings: getDefaultModelSettings(i.base_model), ...i})));
const [llmItemsCurrState, setLLMItemsCurrState] = useState([]);
@ -225,33 +270,6 @@ const PromptNode = ({ data, id }) => {
};
get_outputs(templateVars, id);
// Get Pythonic version of the prompt, by adding a $ before any template variables in braces:
// const str_to_py_template_format = toPyTemplateFormat; // (str) => str.replace(/(?<!\\){(.*?)(?<!\\)}/g, "${$1}")
// const to_py_template_format = (str_or_obj) => {
// if (typeof str_or_obj === 'object') {
// let new_obj = { text: str_to_py_template_format(str_or_obj.text), fill_history: {}};
// // Convert fill history vars to py template format
// if (str_or_obj.fill_history) {
// Object.keys(str_or_obj.fill_history).forEach(v => {
// new_obj.fill_history[v] = str_to_py_template_format(str_or_obj.fill_history[v]);
// });
// }
// // Carry all other properties of the object over:
// Object.keys(str_or_obj).forEach(key => {
// if (key !== 'text' && key !== 'fill_history')
// new_obj[key] = str_or_obj[key];
// });
// return new_obj;
// } else
// return str_to_py_template_format(str_or_obj);
// };
// const py_prompt_template = to_py_template_format(promptText);
// Do the same for the vars, since vars can themselves be prompt templates:
// Object.keys(pulled_data).forEach(varname => {
// pulled_data[varname] = pulled_data[varname].map(val => to_py_template_format(val));
// });
return [promptText, pulled_data];
};
@ -271,6 +289,19 @@ const PromptNode = ({ data, id }) => {
}, rejected);
};
// On hover over the 'info' button, to preview the prompts that will be sent out
const [promptPreviews, setPromptPreviews] = useState([]);
const handlePreviewHover = () => {
// Pull input data and prompt
const [root_prompt, pulled_vars] = pullInputData();
fetch_from_backend('generatePrompts', {
prompt: root_prompt,
vars: pulled_vars,
}).then(prompts => {
setPromptPreviews(prompts.map(p => (new PromptInfo(p))));
});
};
// On hover over the 'Run' button, request how many responses are required and update the tooltip. Soft fails.
const handleRunHover = () => {
// Check if there's at least one model in the list; if not, nothing to run on.
@ -286,12 +317,12 @@ const PromptNode = ({ data, id }) => {
}
// Get input data and prompt
const [py_prompt, pulled_vars] = pullInputData();
const [root_prompt, pulled_vars] = pullInputData();
const llms = llmItemsCurrState.map(item => item.model);
const num_llms = llms.length;
// Fetch response counts from backend
fetchResponseCounts(py_prompt, pulled_vars, llmItemsCurrState, (err) => {
fetchResponseCounts(root_prompt, pulled_vars, llmItemsCurrState, (err) => {
console.warn(err.message); // soft fail
}).then(([counts, total_num_responses]) => {
// Check for empty counts (means no requests will be sent!)
@ -366,7 +397,7 @@ const PromptNode = ({ data, id }) => {
setJSONResponses([]);
setProgressAnimated(true);
const [py_prompt_template, pulled_data] = pullInputData();
const [prompt_template, pulled_data] = pullInputData();
let FINISHED_QUERY = false;
const rejected = (err) => {
@ -382,7 +413,7 @@ const PromptNode = ({ data, id }) => {
// Fetch info about the number of queries we'll need to make
const fetch_resp_count = () => fetchResponseCounts(
py_prompt_template, pulled_data, llmItemsCurrState, rejected);
prompt_template, pulled_data, llmItemsCurrState, rejected);
// Initialize progress bars to small amounts
setProgress({ success: 2, error: 0 });
@ -431,7 +462,7 @@ const PromptNode = ({ data, id }) => {
return fetch_from_backend('queryllm', {
id: id,
llm: llmItemsCurrState, // deep clone it first
prompt: py_prompt_template,
prompt: prompt_template,
vars: pulled_data,
n: numGenerations,
api_keys: (apiKeys ? apiKeys : {}),
@ -580,8 +611,15 @@ const PromptNode = ({ data, id }) => {
handleRunClick={handleRunClick}
handleRunHover={handleRunHover}
runButtonTooltip={runTooltip}
/>
customButtons={[
<PromptListPopover promptInfos={promptPreviews} onHover={handlePreviewHover} onClick={openInfoModal} />
]} />
<LLMResponseInspectorModal ref={inspectModal} jsonResponses={jsonResponses} prompt={promptText} />
<Modal title={'List of prompts that will be sent to LLMs (' + promptPreviews.length + ' total)'} size='xl' opened={infoModalOpened} onClose={closeInfoModal} styles={{header: {backgroundColor: '#FFD700'}, root: {position: 'relative', left: '-80px'}}}>
<Box size={600} m='lg' mt='xl'>
{displayPromptInfos(promptPreviews)}
</Box>
</Modal>
<Textarea ref={setRef}
className="prompt-field-fixed nodrag nowheel"
minRows="4"

View File

@ -356,6 +356,18 @@ function run_over_responses(eval_func: (resp: ResponseInfo) => any, responses: A
// ===================
// """
/**
*
* @param root_prompt The prompt template to start from
* @param vars a dict of the template variables to fill the prompt template with, by name. (See countQueries docstring for more info).
* @returns An array of strings representing the prompts that will be sent out. Note that this could include unfilled template vars.
*/
export async function generatePrompts(root_prompt: string, vars: Dict): Promise<string[]> {
const gen_prompts = new PromptPermutationGenerator(root_prompt);
const all_prompt_permutations = Array.from(gen_prompts.generate(vars)).map(p => p.toString());
return all_prompt_permutations;
}
/**
* Calculates how many queries we need to make, given the passed prompt and vars.
*

View File

@ -1,7 +1,7 @@
import { queryLLM, executejs, executepy,
fetchExampleFlow, fetchOpenAIEval, importCache,
exportCache, countQueries, grabResponses,
createProgressFile } from "./backend/backend";
createProgressFile, generatePrompts} from "./backend/backend";
const clone = (obj) => JSON.parse(JSON.stringify(obj));
@ -11,6 +11,8 @@ async function _route_to_js_backend(route, params) {
return grabResponses(params.responses);
case 'countQueriesRequired':
return countQueries(params.prompt, clone(params.vars), clone(params.llms), params.n, params.id);
case 'generatePrompts':
return generatePrompts(params.prompt, clone(params.vars));
case 'createProgressFile':
return createProgressFile(params.id);
case 'queryllm':

View File

@ -340,6 +340,17 @@
font-weight: bold;
}
.prompt-preview {
font-size: 10pt;
font-family: monospace;
border-radius: 6px;
padding: 8px;
margin: 10px 0px;
background-color: #ddd;
color: #444;
white-space: pre-wrap;
}
.small-response {
font-size: 8pt;
font-family: monospace;

View File

@ -6,7 +6,7 @@ def readme():
setup(
name='chainforge',
version='0.2.0.5',
version='0.2.0.6',
packages=find_packages(),
author="Ian Arawjo",
description="A Visual Programming Environment for Prompt Engineering",