diff --git a/task-runner/src/collaborationExecutor.ts b/task-runner/src/collaborationExecutor.ts index 7b0fcf2..bdcf774 100644 --- a/task-runner/src/collaborationExecutor.ts +++ b/task-runner/src/collaborationExecutor.ts @@ -28,11 +28,13 @@ export async function processCollaborationRun(run: TeamRun): Promise { let totalTokens = 0; let totalCost = 0; + let teamData: { name: string; config: CollaborationConfig; mode: string; output_route_ids: string[] | null } | null = null; + try { // 1. Fetch team config const teamResponse = await supabase .from('teams') - .select('config, mode') + .select('name, config, mode, output_route_ids') .eq('id', run.team_id) .single(); @@ -40,7 +42,7 @@ export async function processCollaborationRun(run: TeamRun): Promise { throw new Error(`Failed to load team: ${teamResponse.error?.message ?? 'not found'}`); } - const teamData = teamResponse.data as { config: CollaborationConfig; mode: string }; + teamData = teamResponse.data as { name: string; config: CollaborationConfig; mode: string; output_route_ids: string[] | null }; const config = teamData.config; if (!config.agent_ids?.length || config.agent_ids.length < 2) { @@ -193,8 +195,9 @@ export async function processCollaborationRun(run: TeamRun): Promise { // Fire webhook (fire-and-forget) void dispatchTeamRunWebhooks( { id: run.id, team_id: run.team_id, workspace_id: run.workspace_id, status: 'completed', input_task: run.input_task, output: finalOutput }, - `Collaboration Team ${run.team_id}`, + teamData.name, 'team_run.completed', + teamData.output_route_ids, ); } catch (error: unknown) { @@ -214,8 +217,9 @@ export async function processCollaborationRun(run: TeamRun): Promise { void dispatchTeamRunWebhooks( { id: run.id, team_id: run.team_id, workspace_id: run.workspace_id, status: 'failed', input_task: run.input_task, error_message: errMsg }, - `Collaboration Team ${run.team_id}`, + teamData?.name ?? `Collaboration Team ${run.team_id}`, 'team_run.failed', + teamData?.output_route_ids ?? null, ); } } diff --git a/task-runner/src/orchestratorExecutor.ts b/task-runner/src/orchestratorExecutor.ts index d6f5d4b..28d09f0 100644 --- a/task-runner/src/orchestratorExecutor.ts +++ b/task-runner/src/orchestratorExecutor.ts @@ -542,8 +542,44 @@ async function executeToolCall( } case 'final_answer': { - const output = args.output as string; - console.log(`[Orchestrator] final_answer received, output length: ${output?.length ?? 0} chars`); + let output = (args.output as string) ?? ''; + console.log(`[Orchestrator] final_answer received, brain output length: ${output.length} chars`); + + // ── Augment with worker outputs if the brain was lazy ──────── + // LLMs often call final_answer with a brief summary like + // "Task completed successfully" instead of including the full + // synthesized content. If the final answer is much shorter than + // the accumulated worker outputs, append them. + const completedDelegations = Array.from(delegations.values()) + .filter((d) => d.status === 'completed' && d.worker_output); + + if (completedDelegations.length > 0) { + const workerOutputsTotal = completedDelegations + .reduce((sum, d) => sum + (d.worker_output?.length ?? 0), 0); + + // If brain's final answer is less than 20% of workers' combined output, + // the brain likely just wrote a summary — augment with full worker outputs + if (output.length < workerOutputsTotal * 0.2) { + console.log(`[Orchestrator] Brain output (${output.length} chars) << worker outputs (${workerOutputsTotal} chars) — augmenting`); + + const workerSections = completedDelegations.map((d) => { + const worker = workers.find((w) => w.id === d.worker_agent_id); + const workerName = worker?.name ?? 'Worker'; + return `## ${workerName}\n\n${d.worker_output}`; + }); + + output = [ + output, + '', + '---', + '', + ...workerSections, + ].join('\n'); + + console.log(`[Orchestrator] Augmented final output: ${output.length} chars`); + } + } + await finalizeRun(run.id, output, 0, 0); // tokens/cost already tracked return { result: output, tokensUsed: 0, costUsed: 0, isDone: true }; }