diff --git a/chainforge/react-server/src/LLMResponseInspector.js b/chainforge/react-server/src/LLMResponseInspector.js index 797d44f..78ca888 100644 --- a/chainforge/react-server/src/LLMResponseInspector.js +++ b/chainforge/react-server/src/LLMResponseInspector.js @@ -45,6 +45,7 @@ const countResponsesBy = (responses, keyFunc) => { }); return [responses_by_key, unspecified_group]; }; +const getLLMName = (resp_obj) => (typeof resp_obj?.llm === 'string' ? resp_obj.llm : resp_obj?.llm?.name); const SUCCESS_EVAL_SCORES = new Set(['true', 'yes']); const FAILURE_EVAL_SCORES = new Set(['false', 'no']); @@ -85,7 +86,7 @@ export const exportToExcel = (jsonResponses, filename) => { // NOTE: We need to 'unwind' responses in each batch, since each res_obj can have N>1 responses. // We will store every response text on a single row, but keep track of batches by creating a batch ID number. const data = jsonResponses.map((res_obj, res_obj_idx) => { - const llm = res_obj.llm; + const llm = getLLMName(res_obj); const prompt = res_obj.prompt; const vars = res_obj.vars; const eval_res_items = res_obj.eval_res ? res_obj.eval_res.items : null; @@ -166,7 +167,7 @@ const LLMResponseInspector = ({ jsonResponses, wideFormat }) => { Object.keys(res_obj.vars).forEach(v => { found_vars.add(v); }); - found_llms.add(res_obj.llm); + found_llms.add(getLLMName(res_obj)); }); found_vars = Array.from(found_vars); found_llms = Array.from(found_llms); @@ -258,14 +259,14 @@ const LLMResponseInspector = ({ jsonResponses, wideFormat }) => { ); }); return ( -