Tested and working JS Evaluator execution.

This commit is contained in:
Ian Arawjo 2023-06-27 13:03:33 -04:00
parent 11a896d8c2
commit 5276f683bf
5 changed files with 45 additions and 20 deletions

View File

@ -106,9 +106,14 @@ const App = () => {
const { x, y } = getViewportCenter();
addNode({ id: 'promptNode-'+Date.now(), type: 'prompt', data: { prompt: '' }, position: {x: x-200, y:y-100} });
};
const addEvalNode = (event) => {
const addEvalNode = (progLang) => {
const { x, y } = getViewportCenter();
addNode({ id: 'evalNode-'+Date.now(), type: 'evaluator', data: { code: "def evaluate(response):\n return len(response.text)" }, position: {x: x-200, y:y-100} });
let code = "";
if (progLang === 'python')
code = "def evaluate(response):\n return len(response.text)";
else if (progLang === 'javascript')
code = "function evaluate(resp) {\n return resp.text.length;\n}";
addNode({ id: 'evalNode-'+Date.now(), type: 'evaluator', data: { language: progLang, code: code }, position: {x: x-200, y:y-100} });
};
const addVisNode = (event) => {
const { x, y } = getViewportCenter();
@ -465,7 +470,8 @@ const App = () => {
<Menu.Dropdown>
<Menu.Item onClick={addTextFieldsNode} icon={<IconTextPlus size="16px" />}> TextFields </Menu.Item>
<Menu.Item onClick={addPromptNode} icon={'💬'}> Prompt Node </Menu.Item>
<Menu.Item onClick={addEvalNode} icon={<IconTerminal size="16px" />}> Evaluator Node </Menu.Item>
<Menu.Item onClick={() => addEvalNode('javascript')} icon={<IconTerminal size="16px" />}> JavaScript Evaluator Node </Menu.Item>
<Menu.Item onClick={() => addEvalNode('python')} icon={<IconTerminal size="16px" />}> Python Evaluator Node </Menu.Item>
<Menu.Item onClick={addVisNode} icon={'📊'}> Vis Node </Menu.Item>
<Menu.Item onClick={addInspectNode} icon={'🔍'}> Inspect Node </Menu.Item>
<Menu.Item onClick={addCsvNode} icon={<IconCsv size="16px" />}> CSV Node </Menu.Item>

View File

@ -9,6 +9,7 @@ import LLMResponseInspectorModal from './LLMResponseInspectorModal';
// Ace code editor
import AceEditor from "react-ace";
import "ace-builds/src-noconflict/mode-python";
import "ace-builds/src-noconflict/mode-javascript";
import "ace-builds/src-noconflict/theme-xcode";
import "ace-builds/src-noconflict/ext-language_tools";
import fetch_from_backend from './fetch_from_backend';
@ -28,8 +29,14 @@ const EvaluatorNode = ({ data, id }) => {
// For a way to inspect responses without having to attach a dedicated node
const inspectModal = useRef(null);
// The programming language for the editor. Also determines what 'execute'
// function will ultimately be called.
const [progLang, setProgLang] = useState(data.language || 'python');
// The text in the code editor.
const [codeText, setCodeText] = useState(data.code);
const [codeTextOnLastRun, setCodeTextOnLastRun] = useState(false);
const [lastRunLogs, setLastRunLogs] = useState("");
const [lastResponses, setLastResponses] = useState([]);
const [lastRunSuccess, setLastRunSuccess] = useState(true);
@ -85,7 +92,8 @@ const EvaluatorNode = ({ data, id }) => {
}
// Double-check that the code includes an 'evaluate' function:
if (codeText.search(/def\s+evaluate\s*(.*):/) === -1) {
const find_evalfunc_regex = progLang === 'python' ? /def\s+evaluate\s*(.*):/ : /function\s+evaluate\s*(.*)/;
if (codeText.search(find_evalfunc_regex) === -1) {
const err_msg = `Could not find required function 'evaluate'. Make sure you have defined an 'evaluate' function.`;
setStatus('error');
alertModal.current.trigger(err_msg);
@ -107,11 +115,12 @@ const EvaluatorNode = ({ data, id }) => {
// Run evaluator in backend
const codeTextOnRun = codeText + '';
fetch_from_backend('execute', {
const execute_route = (progLang === 'python') ? 'execute' : 'executejs';
fetch_from_backend(execute_route, {
id: id,
code: codeTextOnRun,
scope: mapScope,
responses: input_node_ids,
scope: mapScope,
reduce_vars: [],
script_paths: script_paths,
}, rejected).then(function(json) {
@ -163,7 +172,10 @@ const EvaluatorNode = ({ data, id }) => {
return (
<div className="evaluator-node cfnode">
<NodeLabel title={data.title || 'Python Evaluator Node'}
<NodeLabel title={data.title ||
progLang === 'python' ?
'Python Evaluator Node'
: 'JavaScript Evaluator Node' }
nodeId={id}
onEdit={hideStatusIndicator}
icon={<IconTerminal size="16px" />}
@ -197,7 +209,7 @@ const EvaluatorNode = ({ data, id }) => {
{/* <span className="code-style">response</span>: */}
<div className="ace-editor-container nodrag">
<AceEditor
mode="python"
mode={progLang}
theme="xcode"
onChange={handleCodeChange}
value={data.code}
@ -206,6 +218,7 @@ const EvaluatorNode = ({ data, id }) => {
width='100%'
height='100px'
style={{minWidth:'310px'}}
setOptions={{useWorker: false}}
tabSize={2}
onLoad={editorInstance => { // Make Ace Editor div resizeable.
editorInstance.container.style.resize = "both";

View File

@ -3,7 +3,7 @@
*/
import { LLM } from '../models';
import { expect, test } from '@jest/globals';
import { queryLLM, execute, ResponseInfo } from '../backend';
import { queryLLM, executejs, ResponseInfo } from '../backend';
import { StandardizedLLMResponse } from '../typing';
import StorageCache from '../cache';
@ -48,7 +48,7 @@ test('run evaluate func over responses', async () => {
// `;
// Execute the code, and map the evaluate function over all responses
const {responses, logs, error} = await execute('evalid', code, ['dummy_response_id'], 'response');
const {responses, logs, error} = await executejs('evalid', code, ['dummy_response_id'], 'response');
// There should be no errors
if (error)

View File

@ -642,20 +642,22 @@ export async function queryLLM(id: string,
* @param response_ids the cache'd response to run on, which must be a unique ID or list of unique IDs of cache'd data
* @param scope the scope of responses to run on --a single response, or all across each batch. (If batch, evaluate() func has access to 'responses'.)
*/
export async function execute(id: string,
code: string | ((rinfo: ResponseInfo) => any),
response_ids: string | string[],
scope: 'response' | 'batch'): Promise<Dict> {
export async function executejs(id: string,
code: string | ((rinfo: ResponseInfo) => any),
response_ids: string | string[],
scope: 'response' | 'batch'): Promise<Dict> {
// Check format of response_ids
if (!Array.isArray(response_ids))
response_ids = [ response_ids ];
response_ids = response_ids as Array<string>;
console.log('executing js');
// const iframe = document.createElement('iframe');
// Instantiate the evaluator function by eval'ing the passed code
// DANGER DANGER!!
let iframe: HTMLIFrameElement | undefined;
let iframe: HTMLElement | undefined;
if (typeof code === 'string') {
try {
/*
@ -667,7 +669,7 @@ export async function execute(id: string,
The Evaluate node in the front-end has a hidden iframe with the following id.
We need to get this iframe element.
*/
let iframe = document.getElementById(`${id}-iframe`);
iframe = document.getElementById(`${id}-iframe`);
if (!iframe)
throw new Error("Could not find iframe sandbox for evaluator node.");

View File

@ -1,5 +1,4 @@
// import { queryLLM, execute } from "./backend/backend.ts";
import { queryLLM, execute } from "./backend/backend";
import { queryLLM, executejs } from "./backend/backend";
const BACKEND_TYPES = {
FLASK: 'flask',
@ -15,7 +14,7 @@ async function _route_to_js_backend(route, params) {
case 'queryllm':
return queryLLM(...Object.values(params));
case 'executejs':
return execute(...Object.values(params));
return executejs(params.id, params.code, params.responses, params.scope);
default:
throw new Error(`Could not find backend function for route named ${route}`);
}
@ -30,6 +29,11 @@ async function _route_to_js_backend(route, params) {
*/
export default function fetch_from_backend(route, params, rejected) {
rejected = rejected || ((err) => {throw new Error(err)});
if (route === 'executejs') {
return _route_to_js_backend(route, params);
}
switch (BACKEND_TYPE) {
case BACKEND_TYPES.FLASK: // Fetch from Flask (python) backend
return fetch(`${FLASK_BASE_URL}app/${route}`, {
@ -49,4 +53,4 @@ export default function fetch_from_backend(route, params, rejected) {
export function set_backend_type(t) {
BACKEND_TYPE = t;
}
}