Change all fetch() calls to fetch_from_backend switcher

This commit is contained in:
Ian Arawjo 2023-06-24 11:12:08 -04:00
parent 54f6de0184
commit 188c10f71f
7 changed files with 258 additions and 203 deletions

View File

@ -27,7 +27,8 @@ import './text-fields-node.css';
// State management (from https://reactflow.dev/docs/guides/state-management/)
import { shallow } from 'zustand/shallow';
import useStore, { BASE_URL } from './store';
import useStore from './store';
import fetch_from_backend from './fetch_from_backend';
const selector = (state) => ({
nodes: state.nodes,
@ -234,12 +235,8 @@ const App = () => {
// Then we grab all the relevant cache files from the backend
const all_node_ids = nodes.map(n => n.id);
fetch(BASE_URL + 'app/exportCache', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
'ids': all_node_ids,
}),
fetch_from_backend('exportCache', {
'ids': all_node_ids,
}).then(function(res) {
return res.json();
}).then(function(json) {
@ -259,12 +256,8 @@ const App = () => {
// Import data to the cache stored on the local filesystem (in backend)
const importCache = (cache_data) => {
return fetch(BASE_URL + 'app/importCache', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
'files': cache_data,
}),
return fetch_from_backend('importCache', {
'files': cache_data,
}, handleError).then(function(res) {
return res.json();
}, handleError).then(function(json) {
@ -337,12 +330,8 @@ const App = () => {
// Downloads the selected OpenAI eval file (preconverted to a .cforge flow)
const importFlowFromOpenAIEval = (evalname) => {
fetch(BASE_URL + 'app/fetchOpenAIEval', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
name: evalname,
}),
fetch_from_backend('fetchOpenAIEval', {
name: evalname,
}, handleError).then(function(response) {
return response.json();
}, handleError).then(function(json) {
@ -375,12 +364,8 @@ const App = () => {
}
// Fetch the example flow data from the backend
fetch(BASE_URL + 'app/fetchExampleFlow', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
'name': name,
}),
fetch_from_backend('fetchExampleFlow', {
'name': name,
}, handleError).then(function(res) {
return res.json();
}, handleError).then(function(json) {

View File

@ -12,6 +12,7 @@ import AceEditor from "react-ace";
import "ace-builds/src-noconflict/mode-python";
import "ace-builds/src-noconflict/theme-xcode";
import "ace-builds/src-noconflict/ext-language_tools";
import fetch_from_backend from './fetch_from_backend';
const EvaluatorNode = ({ data, id }) => {
@ -38,21 +39,17 @@ const EvaluatorNode = ({ data, id }) => {
// On initialization
useEffect(() => {
// Attempt to grab cache'd responses
fetch(BASE_URL + 'app/grabResponses', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
responses: [id],
}),
}).then(function(res) {
fetch_from_backend('grabResponses', {
responses: [id],
}).then(function(res) {
return res.json();
}).then(function(json) {
}).then(function(json) {
if (json.responses && json.responses.length > 0) {
// Store responses and set status to green checkmark
setLastResponses(json.responses);
setStatus('ready');
}
});
});
}, []);
const handleCodeChange = (code) => {
@ -99,18 +96,14 @@ const EvaluatorNode = ({ data, id }) => {
// Run evaluator in backend
const codeTextOnRun = codeText + '';
fetch(BASE_URL + 'app/execute', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
id: id,
code: codeTextOnRun,
scope: mapScope,
responses: input_node_ids,
reduce_vars: [], // reduceMethod === 'avg' ? reduceVars : [],
script_paths: script_paths,
// write an extra part here that takes in reduce func
}),
fetch_from_backend('execute', {
id: id,
code: codeTextOnRun,
scope: mapScope,
responses: input_node_ids,
reduce_vars: [],
script_paths: script_paths,
// write an extra part here that takes in reduce func
}, rejected).then(function(response) {
return response.json();
}, rejected).then(function(json) {

View File

@ -3,7 +3,7 @@ import { Handle } from 'react-flow-renderer';
import useStore from './store';
import NodeLabel from './NodeLabelComponent'
import LLMResponseInspector, { exportToExcel } from './LLMResponseInspector';
import {BASE_URL} from './store';
import fetch_from_backend from './fetch_from_backend';
const InspectorNode = ({ data, id }) => {
@ -26,12 +26,8 @@ const InspectorNode = ({ data, id }) => {
is_fetching = true;
// Grab responses associated with those ids:
fetch(BASE_URL + 'app/grabResponses', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
'responses': input_node_ids,
}),
fetch_from_backend('grabResponses', {
'responses': input_node_ids
}).then(function(res) {
return res.json();
}).then(function(json) {

View File

@ -8,9 +8,9 @@ import NodeLabel from './NodeLabelComponent'
import TemplateHooks, { extractBracketedSubstrings, toPyTemplateFormat } from './TemplateHooksComponent'
import LLMList from './LLMListComponent'
import LLMResponseInspectorModal from './LLMResponseInspectorModal';
import {BASE_URL} from './store';
import io from 'socket.io-client';
import { getDefaultModelSettings, AvailableLLMs } from './ModelSettingSchemas'
import fetch_from_backend from './fetch_from_backend';
// The LLM(s) to include by default on a PromptNode whenever one is created.
// Defaults to ChatGPT (GPT3.5).
@ -181,12 +181,8 @@ const PromptNode = ({ data, id }) => {
refreshTemplateHooks(promptText);
// Attempt to grab cache'd responses
fetch(BASE_URL + 'app/grabResponses', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
responses: [id],
}),
fetch_from_backend('grabResponses', {
responses: [id],
}).then(function(res) {
return res.json();
}).then(function(json) {
@ -269,16 +265,13 @@ const PromptNode = ({ data, id }) => {
// Ask the backend how many responses it needs to collect, given the input data:
const fetchResponseCounts = (prompt, vars, llms, rejected) => {
return fetch(BASE_URL + 'app/countQueriesRequired', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
prompt: prompt,
vars: vars,
llms: llms,
id: id,
n: numGenerations,
})}, rejected).then(function(response) {
return fetch_from_backend('countQueriesRequired', {
prompt: prompt,
vars: vars,
llms: llms,
id: id,
n: numGenerations,
}, rejected).then(function(response) {
return response.json();
}, rejected).then(function(json) {
if (!json || !json.counts) {
@ -394,12 +387,7 @@ const PromptNode = ({ data, id }) => {
// Ask the backend to reset the scratchpad for counting queries:
const create_progress_scratchpad = () => {
return fetch(BASE_URL + 'app/createProgressFile', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
id: id,
})}, rejected);
return fetch_from_backend('createProgressFile', {id: id}, rejected);
};
// Fetch info about the number of queries we'll need to make
@ -483,18 +471,14 @@ const PromptNode = ({ data, id }) => {
// Run all prompt permutations through the LLM to generate + cache responses:
const query_llms = () => {
return fetch(BASE_URL + 'app/queryllm', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify({
id: id,
llm: llmItemsCurrState,
prompt: py_prompt_template,
vars: pulled_data,
n: numGenerations,
api_keys: (apiKeys ? apiKeys : {}),
no_cache: false,
}),
return fetch_from_backend('queryllm', {
id: id,
llm: llmItemsCurrState,
prompt: py_prompt_template,
vars: pulled_data,
n: numGenerations,
api_keys: (apiKeys ? apiKeys : {}),
no_cache: false,
}, rejected).then(function(response) {
return response.json();
}, rejected).then(function(json) {

View File

@ -1,4 +1,3 @@
function len(o: object | string | Array<any>): number {
// Acts akin to Python's builtin 'len' method
if (Array.isArray(o)) {
@ -17,12 +16,6 @@ function isDict(o: any): boolean {
return typeof o === 'object' && !Array.isArray(o);
}
function escape_dollar_signs(s: string): string {
const pattern = /\$(?![{])/g;
const replaced_string = s.replace(pattern, '$$');
return replaced_string;
}
class StringTemplate {
val: string;
/**
@ -34,15 +27,74 @@ class StringTemplate {
this.val = str;
}
safe_substitute(sub_dict: { [key: string]: any }): string {
// Safely substitutes the template variables 'key' for the passed values,
// soft-failing for any keys which were not found.
return "Not yet implemented";
/** Safely substitutes the template variables 'key' for the passed values,
* soft-failing for any keys which were not found.
*
* NOTE: We don't use Regex here for compatibility of browsers
* that don't support negative lookbehinds/aheads (e.g., Safari).
*
* This algorithm is O(N) complexity.
*/
safe_substitute(sub_dict: {[key: string]: string}): string {
let template = this.val;
let prev_c = '';
let group_start_idx = -1;
for (let i = 0; i < template.length; i += 1) {
const c = template.charAt(i);
if (prev_c !== '\\') { // Skip escaped braces
if (group_start_idx === -1 && c === '{') // Identify the start of a capture {group}
group_start_idx = i;
else if (group_start_idx > -1 && c === '}') { // Identify the end of a capture {group}
if (group_start_idx + 1 < i) { // Ignore {} empty braces
// We identified a capture group. First check if its key is in the substitution dict:
const varname = template.substring(group_start_idx+1, i);
if (varname in sub_dict) {
// Replace '{varname}' with the substitution value:
const replacement = sub_dict[varname];
template = template.substring(0, group_start_idx) + replacement + template.substring(i+1);
// Reset the iterator to point to the very next character upon the start of the next loop:
i = group_start_idx + replacement.length - 1;
}
// Because this is safe_substitute, we don't do anything if varname was not in sub_dict.
}
group_start_idx = -1;
}
}
prev_c = c;
}
return template;
}
has_vars(): boolean {
// Returns whether or not the template string has variables ${}
// TO BE IMPLEMENTED!
/**
* Returns true if the template string has:
* - at least one variable {}, if no varnames given
* - has at least one varname in passed varnames
*/
has_vars(varnames?: Array<string>): boolean {
let template = this.val;
let prev_c = '';
let group_start_idx = -1;
for (let i = 0; i < template.length; i += 1) {
const c = template.charAt(i);
if (prev_c !== '\\') { // Skip escaped braces
if (group_start_idx === -1 && c === '{') // Identify the start of a capture {group}
group_start_idx = i;
else if (group_start_idx > -1 && c === '}') { // Identify the end of a capture {group}
if (group_start_idx + 1 < i) { // Ignore {} empty braces
if (varnames !== undefined) {
if (varnames.includes(template.substring(group_start_idx+1, i)))
return true;
// If varnames was specified but none matched this capture group, continue.
}
else {
return true; // We identified a capture group.
}
}
group_start_idx = -1;
}
}
prev_c = c;
}
return false;
}
}
@ -74,10 +126,8 @@ class PromptTemplate {
Initialize a PromptTemplate with a string in string.Template format.
(See https://docs.python.org/3/library/string.html#template-strings for more details.)
NOTE: ChainForge only supports placeholders with braces {}
We detect any $ without { to the right of them, and insert a '$' before it to escape the $.
NOTE: ChainForge only supports placeholders with braces {} without \ escape before them.
*/
templateStr = escape_dollar_signs(templateStr);
try {
new StringTemplate(templateStr);
} catch (err) {
@ -97,34 +147,31 @@ class PromptTemplate {
return this.toString();
}
/** Returns True if the template has a variable with the given name. */
has_var(varname: string): boolean {
// Returns True if the template has a variable with the given name.
let sub_dict = {};
sub_dict[varname] = '_';
const subbed_str = new StringTemplate(this.template).safe_substitute({varname: '_'});
return subbed_str !== this.template; // if the strings differ, a replacement occurred
return (new StringTemplate(this.template).has_vars([varname]));
}
/** Returns True if no template variables are left in template string. */
is_concrete(): boolean {
// Returns True if no template variables are left in template string.
return new StringTemplate(this.template).has_vars();
return !(new StringTemplate(this.template).has_vars());
}
/**
Formats the template string with the given parameters, returning a new PromptTemplate.
Can return a partial completion.
NOTE: paramDict values can be in a special form: {text: <str>, fill_history: {varname: <str>}}
in order to bundle in any past fill history that is lost in the current text.
Example usage:
prompt = prompt_template.fill({
"className": className,
"library": "Kivy",
"PL": "Python"
});
*/
fill(paramDict: { [key: string]: any }): PromptTemplate {
/**
Formats the template string with the given parameters, returning a new PromptTemplate.
Can return a partial completion.
NOTE: paramDict values can be in a special form: {text: <str>, fill_history: {varname: <str>}}
in order to bundle in any past fill history that is lost in the current text.
Example usage:
prompt = prompt_template.fill({
"className": className,
"library": "Kivy",
"PL": "Python"
});
*/
// Check for special 'past fill history' format:
let past_fill_history = {};
let past_metavars = {};
@ -140,7 +187,7 @@ class PromptTemplate {
});
// Recreate the param dict from just the 'text' property of the fill object
let newParamDict = {};
let newParamDict: { [key: string]: any } = {};
Object.entries(paramDict).forEach(([param, obj]) => {
newParamDict[param] = obj['text'];
});
@ -158,7 +205,7 @@ class PromptTemplate {
// Append any past history passed as vars:
Object.entries(past_fill_history).forEach(([key, val]) => {
if (key in filled_pt.fill_history)
console.log(`"Warning: PromptTemplate already has fill history for key ${key}.`);
console.log(`Warning: PromptTemplate already has fill history for key ${key}.`);
filled_pt.fill_history[key] = val;
});
@ -170,7 +217,7 @@ class PromptTemplate {
// Add the new fill history using the passed parameters that we just filled in
Object.entries(paramDict).forEach(([key, val]) => {
if (key in filled_pt.fill_history)
console.log(`"Warning: PromptTemplate already has fill history for key ${key}.`);
console.log(`Warning: PromptTemplate already has fill history for key ${key}.`);
filled_pt.fill_history[key] = val;
});
@ -192,7 +239,7 @@ class PromptPermutationGenerator {
"domain": ["rent", "food", "energy"]})):
console.log(prompt)
*/
template: string | PromptTemplate;
template: PromptTemplate;
constructor(template: PromptTemplate | string) {
if (typeof template === 'string')
@ -200,8 +247,8 @@ class PromptPermutationGenerator {
this.template = template;
}
_gen_perm(template: PromptTemplate, params_to_fill: Array<string>, paramDict: { [key: string]: any }): Array<PromptTemplate> {
if (len(params_to_fill) === 0) return [];
*_gen_perm(template: PromptTemplate, params_to_fill: Array<string>, paramDict: { [key: string]: any }): Generator<PromptTemplate, boolean, undefined> {
if (len(params_to_fill) === 0) return true;
// Extract the first param that occurs in the current template
let param: string | undefined = undefined;
@ -215,8 +262,10 @@ class PromptPermutationGenerator {
}
}
if (param === undefined)
return [template];
if (param === undefined) {
yield template;
return true;
}
// Generate new prompts by filling in its value(s) into the PromptTemplate
let val = paramDict[param];
@ -225,7 +274,7 @@ class PromptPermutationGenerator {
val.forEach(v => {
if (param === undefined) return;
let param_fill_dict = {};
let param_fill_dict: {[key: string]: any} = {};
param_fill_dict[param] = v;
/* If this var has an "associate_id", then it wants to "carry with"
@ -253,7 +302,7 @@ class PromptPermutationGenerator {
});
}
else if (typeof val === 'string') {
let sub_dict = {};
let sub_dict: {[key: string]: any} = {};
sub_dict[param] = val;
new_prompt_temps = [template.fill(sub_dict)];
}
@ -261,19 +310,19 @@ class PromptPermutationGenerator {
throw new Error("Value of prompt template parameter is not a list or a string, but of type " + (typeof val).toString());
// Recurse
if (len(params_left) === 0)
return new_prompt_temps;
else {
let res: Array<PromptTemplate> = [];
new_prompt_temps.forEach(p => {
res.push(...this._gen_perm(p, params_left, paramDict));
});
return res;
if (len(params_left) === 0) {
yield* new_prompt_temps;
} else {
for (let i = 0; i < new_prompt_temps.length; i++) {
const p = new_prompt_temps[i];
yield* this._gen_perm(p, params_left, paramDict);
}
}
return true;
}
// Generator class method to yield permutations
*call(paramDict: { [key: string]: any }): Generator<PromptTemplate, boolean, PromptTemplate> {
// Generator class method to yield permutations of a root prompt template
*generate(paramDict: { [key: string]: any }): Generator<PromptTemplate, boolean, undefined> {
let template = (typeof this.template === 'string') ? new PromptTemplate(this.template) : this.template;
if (len(paramDict) === 0) {
@ -287,63 +336,78 @@ class PromptPermutationGenerator {
}
}
// # Test cases
// if __name__ == '__main__':
// # Dollar sign escape works
// tests = ["What is $2 + $2?", "If I have $4 and I want ${dollars} then how many do I have?", "$4 is equal to ${dollars}?", "${what} is the $400?"]
// escaped_tests = [escape_dollar_signs(t) for t in tests]
// print(escaped_tests)
// assert escaped_tests[0] == "What is $$2 + $$2?"
// assert escaped_tests[1] == "If I have $$4 and I want ${dollars} then how many do I have?"
// assert escaped_tests[2] == "$$4 is equal to ${dollars}?"
// assert escaped_tests[3] == "${what} is the $$400?"
function assert(condition: boolean, message?: string) {
if (!condition) {
throw new Error(message || "Assertion failed");
}
}
// # Single template
// gen = PromptPermutationGenerator('What is the ${timeframe} when ${person} was born?')
// res = [r for r in gen({'timeframe': ['year', 'decade', 'century'], 'person': ['Howard Hughes', 'Toni Morrison', 'Otis Redding']})]
// for r in res:
// print(r)
// assert len(res) == 9
/**
* Run test cases on `PromptPermutationGenerator`.
*/
function _test() {
// Single template
let prompt_gen = new PromptPermutationGenerator('What is the {timeframe} when {person} was born?');
let vars: {[key: string]: any} = {
'timeframe': ['year', 'decade', 'century'],
'person': ['Howard Hughes', 'Toni Morrison', 'Otis Redding']
};
let num_prompts = 0;
for (const prompt of prompt_gen.generate(vars)) {
console.log(prompt.toString());
num_prompts += 1;
}
assert(num_prompts === 9);
// # Nested templates
// gen = PromptPermutationGenerator('${prefix}... ${suffix}')
// res = [r for r in gen({
// 'prefix': ['Who invented ${tool}?', 'When was ${tool} invented?', 'What can you do with ${tool}?'],
// 'suffix': ['Phrase your answer in the form of a ${response_type}', 'Respond with a ${response_type}'],
// 'tool': ['the flashlight', 'CRISPR', 'rubber'],
// 'response_type': ['question', 'poem', 'nightmare']
// })]
// for r in res:
// print(r)
// assert len(res) == (3*3)*(2*3)
// Nested templates
prompt_gen = new PromptPermutationGenerator('{prefix}... {suffix}');
vars = {
'prefix': ['Who invented {tool}?', 'When was {tool} invented?', 'What can you do with {tool}?'],
'suffix': ['Phrase your answer in the form of a {response_type}', 'Respond with a {response_type}'],
'tool': ['the flashlight', 'CRISPR', 'rubber'],
'response_type': ['question', 'poem', 'nightmare']
};
num_prompts = 0;
for (const prompt of prompt_gen.generate(vars)) {
console.log(prompt.toString());
num_prompts += 1;
}
assert(num_prompts === (3*3)*(2*3));
// # 'Carry together' vars with 'metavar' data attached
// # 'Carry together' vars with 'metavar' data attached
// # NOTE: This feature may be used when passing rows of a table, so that vars that have associated values,
// # like 'inventor' with 'tool', 'carry together' when being filled into the prompt template.
// # In addition, 'metavars' may be attached which are, commonly, the values of other columns for that row, but
// # columns which weren't used to fill in the prompt template explcitly.
// gen = PromptPermutationGenerator('What ${timeframe} did ${inventor} invent the ${tool}?')
// res = [r for r in gen({
// 'inventor': [
// {'text': "Thomas Edison", "fill_history": {}, "associate_id": "A", "metavars": { "year": 1879 }},
// {'text': "Alexander Fleming", "fill_history": {}, "associate_id": "B", "metavars": { "year": 1928 }},
// {'text': "William Shockley", "fill_history": {}, "associate_id": "C", "metavars": { "year": 1947 }},
// ],
// 'tool': [
// {'text': "lightbulb", "fill_history": {}, "associate_id": "A"},
// {'text': "penicillin", "fill_history": {}, "associate_id": "B"},
// {'text': "transistor", "fill_history": {}, "associate_id": "C"},
// ],
// 'timeframe': [ "year", "decade", "century" ]
// })]
// for r in res:
// r_str = str(r)
// print(r_str, r.metavars)
// assert "year" in r.metavars
// if "Edison" in r_str:
// assert "lightbulb" in r_str
// elif "Fleming" in r_str:
// assert "penicillin" in r_str
// elif "Shockley" in r_str:
// assert "transistor" in r_str
// assert len(res) == 3*3
prompt_gen = new PromptPermutationGenerator('What {timeframe} did {inventor} invent the {tool}?')
vars = {
'inventor': [
{'text': "Thomas Edison", "fill_history": {}, "associate_id": "A", "metavars": { "year": 1879 }},
{'text': "Alexander Fleming", "fill_history": {}, "associate_id": "B", "metavars": { "year": 1928 }},
{'text': "William Shockley", "fill_history": {}, "associate_id": "C", "metavars": { "year": 1947 }},
],
'tool': [
{'text': "lightbulb", "fill_history": {}, "associate_id": "A"},
{'text': "penicillin", "fill_history": {}, "associate_id": "B"},
{'text': "transistor", "fill_history": {}, "associate_id": "C"},
],
'timeframe': [ "year", "decade", "century" ]
};
num_prompts = 0;
for (const prompt of prompt_gen.generate(vars)) {
const prompt_str = prompt.toString();
console.log(prompt_str, prompt.metavars)
assert("year" in prompt.metavars);
if (prompt_str.includes('Edison'))
assert(prompt_str.includes('lightbulb'));
else if (prompt_str.includes('Fleming'))
assert(prompt_str.includes('penicillin'));
else if (prompt_str.includes('Shockley'))
assert(prompt_str.includes('transistor'));
num_prompts += 1;
}
assert(num_prompts === 3*3);
}
// Uncomment and run 'ts-node template.ts' to test:
// _test();

View File

@ -0,0 +1,36 @@
const BACKEND_TYPES = {
FLASK: 'flask',
JAVASCRIPT: 'js',
};
let BACKEND_TYPE = BACKEND_TYPES.FLASK;
/** Where the ChainForge Flask server is being hosted. */
export const FLASK_BASE_URL = 'http://localhost:8000/';
/**
* Abstracts calls to the ChainForge backend, so that Python Flask backend can be used,
* or Javascript (client-side) 'backend' in used.
* This should be used in place of native 'fetch' operations.
*
* @returns a Promise with the result of the fetch call.
*/
export default function fetch_from_backend(route, params, rejected) {
switch (BACKEND_TYPE) {
case BACKEND_TYPES.FLASK: // Fetch from Flask (python) backend
return fetch(`${FLASK_BASE_URL}app/${route}`, {
method: 'POST',
headers: {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},
body: JSON.stringify(params)
}, rejected || ((err) => {throw new Error(err)}));
case BACKEND_TYPES.JAVASCRIPT: // Fetch from client-side Javascript 'backend'
// TO BE IMPLEMENTED
break;
default:
console.error('Unsupported backend type:', BACKEND_TYPE);
break;
}
}
export function set_backend_type(t) {
BACKEND_TYPE = t;
}

View File

@ -6,9 +6,6 @@ import {
useViewport,
} from 'react-flow-renderer';
// Where the ChainForge Flask server is being hosted.
export const BASE_URL = 'http://localhost:8000/';
// Initial project settings
const initialAPIKeys = {};
const initialLLMColors = {};