Compare commits

...

2 Commits

Author SHA1 Message Date
trial-danswer
2adc3b5063 handle AgentTools uniquely 2025-10-28 17:15:19 -07:00
trial-danswer
a47628adc3 agent-agent that kinda works 2025-10-28 14:23:02 -07:00
26 changed files with 1195 additions and 66 deletions

View File

@@ -0,0 +1,37 @@
"""add target_persona_id to tool for agent tools
Revision ID: 22f7ce63361f
Revises: 23f0b9b49965
Create Date: 2025-10-27 17:59:00.068872
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "22f7ce63361f"
down_revision = "23f0b9b49965"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column(
"tool",
sa.Column("target_persona_id", sa.Integer(), nullable=True),
)
op.create_foreign_key(
"fk_tool_target_persona_id_persona",
"tool",
"persona",
["target_persona_id"],
["id"],
ondelete="CASCADE",
)
def downgrade() -> None:
op.drop_constraint("fk_tool_target_persona_id_persona", "tool", type_="foreignkey")
op.drop_column("tool", "target_persona_id")

View File

@@ -0,0 +1,28 @@
"""add calling_agent_name to research_agent_iteration_sub_step
Revision ID: 23f0b9b49965
Revises: 09995b8811eb
Create Date: 2025-10-27 17:55:10.802310
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "23f0b9b49965"
down_revision = "09995b8811eb"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column(
"research_agent_iteration_sub_step",
sa.Column("calling_agent_name", sa.String(), nullable=True),
)
def downgrade() -> None:
op.drop_column("research_agent_iteration_sub_step", "calling_agent_name")

View File

@@ -108,6 +108,9 @@ class IterationAnswer(BaseModel):
# for multi-query search tools (v2 web search and internal search)
# TODO: Clean this up to be more flexible to tools
queries: list[str] | None = None
# for agent <> agent communication - tracks which agent called this tool
# None means it was called by the primary agent, otherwise contains the subagent name
calling_agent_name: str | None = None
class AggregatedDRContext(BaseModel):

View File

@@ -6,6 +6,7 @@ from langchain_core.messages import AIMessage
from langchain_core.runnables import RunnableConfig
from langgraph.types import StreamWriter
from onyx.agents.agent_search.dr.models import OrchestratorTool
from onyx.agents.agent_search.dr.sub_agents.states import BranchInput
from onyx.agents.agent_search.dr.sub_agents.states import BranchUpdate
from onyx.agents.agent_search.dr.sub_agents.states import IterationAnswer
@@ -17,6 +18,7 @@ from onyx.configs.agent_configs import TF_DR_TIMEOUT_SHORT
from onyx.prompts.dr_prompts import CUSTOM_TOOL_PREP_PROMPT
from onyx.prompts.dr_prompts import CUSTOM_TOOL_USE_PROMPT
from onyx.prompts.dr_prompts import OKTA_TOOL_USE_SPECIAL_PROMPT
from onyx.tools.tool_implementations.agent.agent_tool import AgentTool
from onyx.utils.logger import setup_logger
logger = setup_logger()
@@ -45,6 +47,16 @@ def generic_internal_tool_act(
if generic_internal_tool is None:
raise ValueError("generic_internal_tool is not set")
# Check if this is an AgentTool - handle it differently
if isinstance(generic_internal_tool, AgentTool):
return handle_agent_tool_delegation(
state=state,
config=config,
writer=writer,
agent_tool_info=generic_internal_tool_info,
node_start_time=node_start_time,
)
branch_query = state.branch_question
if not branch_query:
raise ValueError("branch_query is not set")
@@ -147,3 +159,127 @@ def generic_internal_tool_act(
)
],
)
def handle_agent_tool_delegation(
state: BranchInput,
config: RunnableConfig,
writer: StreamWriter,
agent_tool_info: OrchestratorTool,
node_start_time: datetime,
) -> BranchUpdate:
"""Handle AgentTool delegation with simplified processing.
AgentTool runs its own agent with its own tools and LLM orchestration.
We don't need to:
- Extract tool arguments via LLM (AgentTool just takes a query string)
- Summarize the result via LLM (the subagent already produces a final answer)
The subagent will make its own tool calls, which will be tracked separately
and marked with calling_agent_name by the Answer infrastructure.
"""
from langchain_core.messages import HumanMessage
from langchain_core.messages import SystemMessage
from onyx.db.persona import get_persona_by_id
from onyx.llm.factory import get_llms_for_persona
iteration_nr = state.iteration_nr
parallelization_nr = state.parallelization_nr
branch_query = state.branch_question
if not branch_query:
raise ValueError("branch_query is not set")
graph_config = cast(GraphConfig, config["metadata"]["config"])
agent_tool = cast(AgentTool, agent_tool_info.tool_object)
agent_name = agent_tool.display_name
logger.info(
f"AgentTool delegation start for {agent_name} {iteration_nr}.{parallelization_nr} at {datetime.now()}"
)
# Get the target persona
target_persona = get_persona_by_id(
persona_id=agent_tool.target_persona_id,
user=None, # Bypass auth for subagent calls
db_session=graph_config.persistence.db_session,
include_deleted=False,
is_for_edit=False,
)
# Get the LLM for the subagent persona
llm, fast_llm = get_llms_for_persona(
persona=target_persona,
llm_override=None,
additional_headers=None,
)
# For now, we'll run a simple LLM call for the subagent
# rather than a full nested deep research graph.
# This keeps the implementation simpler while still avoiding double-summarization.
# TODO: Implement full subagent delegation with its own tool orchestration
final_response = ""
try:
# Build a simple prompt for the subagent
subagent_system_prompt = f"You are {target_persona.name}. "
if target_persona.task_prompt:
subagent_system_prompt += target_persona.task_prompt
subagent_user_prompt = f"Please help with the following: {branch_query}"
# Get a simple response from the subagent's LLM
from langchain_core.messages import SystemMessage
from langchain_core.messages import HumanMessage
messages = [
SystemMessage(content=subagent_system_prompt),
HumanMessage(content=subagent_user_prompt),
]
response = llm.invoke(messages)
final_response = str(response.content).strip()
if not final_response:
final_response = f"Subagent {target_persona.name} completed the task"
except Exception as e:
logger.exception(f"Error executing subagent {target_persona.name}")
final_response = f"Error executing subagent: {str(e)}"
# Create simplified IterationAnswer for the delegation itself
answer_string = (
final_response.strip() if final_response else f"Delegated to {agent_name}"
)
reasoning_string = f"Delegated to {agent_name}: {branch_query}"
logger.info(
f"AgentTool delegation end for {agent_name} {iteration_nr}.{parallelization_nr} at {datetime.now()}"
)
return BranchUpdate(
branch_iteration_responses=[
IterationAnswer(
tool=agent_tool.llm_name,
tool_id=agent_tool_info.tool_id,
iteration_nr=iteration_nr,
parallelization_nr=parallelization_nr,
question=branch_query,
answer=answer_string,
claims=[],
cited_documents={},
reasoning=reasoning_string,
additional_data={"subagent_name": target_persona.name},
response_type="agent_delegation",
data=final_response,
)
],
log_messages=[
get_langgraph_node_log_string(
graph_component="agent_tool_delegation",
node_name=agent_name,
node_start_time=node_start_time,
)
],
)

View File

@@ -119,6 +119,7 @@ def save_iteration(
additional_data=iteration_answer.additional_data,
is_web_fetch=iteration_answer.is_web_fetch,
queries=iteration_answer.queries,
calling_agent_name=iteration_answer.calling_agent_name, # Track which agent made this tool call
)
db_session.add(research_agent_iteration_sub_step)

View File

@@ -2524,6 +2524,10 @@ class Tool(Base):
oauth_config_id: Mapped[int | None] = mapped_column(
Integer, ForeignKey("oauth_config.id", ondelete="SET NULL"), nullable=True
)
# For AgentTools: the persona that this tool delegates to (null for non-AgentTools)
target_persona_id: Mapped[int | None] = mapped_column(
Integer, ForeignKey("persona.id", ondelete="CASCADE"), nullable=True
)
enabled: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True)
user: Mapped[User | None] = relationship("User", back_populates="custom_tools")
@@ -3589,6 +3593,10 @@ class ResearchAgentIterationSubStep(Base):
postgresql.JSONB(), nullable=True
)
# for agent <> agent communication - tracks which agent called this tool
# None means it was called by the primary agent, otherwise contains the subagent name
calling_agent_name: Mapped[str | None] = mapped_column(String, nullable=True)
# Relationships
# Note: ChatMessage is accessible via primary_question_id. It is tied to the
# primary_question_id in research_agent_iteration, which has a foreign key constraint

View File

@@ -246,6 +246,33 @@ def create_update_persona(
except Exception:
raise ValueError("Invalid user_file_ids; must be UUID strings")
# Create AgentTools for subagent personas and add them to tool_ids
from onyx.db.tools import get_or_create_agent_tool__no_commit
all_tool_ids = create_persona_request.tool_ids or []
logger.info(f"Processing persona: {create_persona_request.name}")
logger.info(
f"Subagent persona IDs requested: {create_persona_request.subagent_persona_ids}"
)
# Now add AgentTools only for the currently selected subagents
for subagent_id in create_persona_request.subagent_persona_ids:
logger.info(f"Adding subagent {subagent_id} to this persona's tools")
# Get the AgentTool for the subagent (create if needed)
agent_tool = get_or_create_agent_tool__no_commit(
target_persona_id=subagent_id,
db_session=db_session,
)
# Add to this persona's tool_ids
if agent_tool.id not in all_tool_ids:
all_tool_ids.append(agent_tool.id)
logger.info(
f"Added AgentTool {agent_tool.id} (for subagent {subagent_id}) to persona's tools"
)
else:
logger.info(f"AgentTool {agent_tool.id} already in tool_ids")
persona = upsert_persona(
persona_id=persona_id,
user=user,
@@ -253,7 +280,7 @@ def create_update_persona(
description=create_persona_request.description,
name=create_persona_request.name,
document_set_ids=create_persona_request.document_set_ids,
tool_ids=create_persona_request.tool_ids,
tool_ids=all_tool_ids,
is_public=create_persona_request.is_public,
recency_bias=create_persona_request.recency_bias,
llm_model_provider_override=create_persona_request.llm_model_provider_override,

View File

@@ -54,6 +54,67 @@ def get_tool_by_name(tool_name: str, db_session: Session) -> Tool:
return tool
def get_or_create_agent_tool__no_commit(
target_persona_id: int,
db_session: Session,
) -> Tool:
"""Get or create an AgentTool for the given target persona. Reuses existing tools."""
from onyx.utils.logger import setup_logger
logger = setup_logger()
# Check if an AgentTool already exists for this persona
existing_tool = db_session.scalar(
select(Tool).where(
Tool.in_code_tool_id == "AgentTool",
Tool.target_persona_id == target_persona_id,
)
)
if existing_tool:
logger.info(
f"Reusing existing AgentTool {existing_tool.id} for target persona {target_persona_id}"
)
return existing_tool
# Create a new AgentTool
from onyx.db.persona import get_persona_by_id
target_persona = get_persona_by_id(
persona_id=target_persona_id,
user=None,
db_session=db_session,
include_deleted=False,
is_for_edit=False,
)
logger.info(
f"Creating new AgentTool for target persona {target_persona_id} ({target_persona.name})"
)
new_tool = Tool(
name=f"call_{target_persona.name.lower().replace(' ', '_')}",
description=f"Delegate tasks to the {target_persona.name} agent. {target_persona.description or ''}",
display_name=f"{target_persona.name}",
in_code_tool_id="AgentTool",
target_persona_id=target_persona_id,
openapi_schema=None,
custom_headers=[],
user_id=None,
passthrough_auth=False,
mcp_server_id=None,
oauth_config_id=None,
enabled=True,
)
db_session.add(new_tool)
db_session.flush() # Don't commit yet, let caller decide when to commit
logger.info(
f"Created new AgentTool {new_tool.id} for target persona {target_persona_id}"
)
return new_tool
def create_tool__no_commit(
name: str,
description: str | None,
@@ -66,11 +127,13 @@ def create_tool__no_commit(
mcp_server_id: int | None = None,
oauth_config_id: int | None = None,
enabled: bool = True,
target_persona_id: int | None = None,
in_code_tool_id: str | None = None,
) -> Tool:
new_tool = Tool(
name=name,
description=description,
in_code_tool_id=None,
in_code_tool_id=in_code_tool_id,
openapi_schema=openapi_schema,
custom_headers=(
[header.model_dump() for header in custom_headers] if custom_headers else []
@@ -78,6 +141,7 @@ def create_tool__no_commit(
user_id=user_id,
passthrough_auth=passthrough_auth,
mcp_server_id=mcp_server_id,
target_persona_id=target_persona_id,
oauth_config_id=oauth_config_id,
enabled=enabled,
)

View File

@@ -68,6 +68,8 @@ class PersonaUpsertRequest(BaseModel):
groups: list[int] = Field(default_factory=list)
# e.g. ID of SearchTool or ImageGenerationTool or <USER_DEFINED_TOOL>
tool_ids: list[int]
# IDs of personas to make available as subagents (AgentTools will be auto-created)
subagent_persona_ids: list[int] = Field(default_factory=list)
icon_color: str | None = None
icon_shape: int | None = None
remove_image: bool | None = None

View File

@@ -18,6 +18,7 @@ class ToolSnapshot(BaseModel):
user_id: str | None = None
oauth_config_id: int | None = None
oauth_config_name: str | None = None
target_persona_id: int | None = None
@classmethod
def from_model(cls, tool: Tool) -> "ToolSnapshot":
@@ -26,6 +27,7 @@ class ToolSnapshot(BaseModel):
name=tool.name,
description=tool.description,
definition=tool.openapi_schema,
target_persona_id=tool.target_persona_id,
display_name=tool.display_name or tool.name,
in_code_tool_id=tool.in_code_tool_id,
custom_headers=tool.custom_headers,

View File

@@ -64,6 +64,8 @@ class SearchToolDelta(BaseObj):
queries: list[str]
documents: list[SavedSearchDoc]
# For agent <> agent communication - tracks which agent called this tool
calling_agent_name: str | None = None
class FetchToolStart(BaseObj):
@@ -80,6 +82,8 @@ class ImageGenerationToolDelta(BaseObj):
type: Literal["image_generation_tool_delta"] = "image_generation_tool_delta"
images: list[GeneratedImage]
# For agent <> agent communication - tracks which agent called this tool
calling_agent_name: str | None = None
class ImageGenerationToolHeartbeat(BaseObj):
@@ -101,6 +105,8 @@ class CustomToolDelta(BaseObj):
data: dict | list | str | int | float | bool | None = None
# For file-based responses like image/csv
file_ids: list[str] | None = None
# For agent <> agent communication - tracks which agent called this tool
calling_agent_name: str | None = None
"""Reasoning Packets"""

View File

@@ -171,7 +171,7 @@ def create_reasoning_packets(reasoning_text: str, step_nr: int) -> list[Packet]:
def create_image_generation_packets(
images: list[GeneratedImage], step_nr: int
images: list[GeneratedImage], step_nr: int, calling_agent_name: str | None = None
) -> list[Packet]:
packets: list[Packet] = []
@@ -180,7 +180,9 @@ def create_image_generation_packets(
packets.append(
Packet(
ind=step_nr,
obj=ImageGenerationToolDelta(images=images),
obj=ImageGenerationToolDelta(
images=images, calling_agent_name=calling_agent_name
),
),
)
@@ -195,6 +197,7 @@ def create_custom_tool_packets(
step_nr: int,
data: dict | list | str | int | float | bool | None = None,
file_ids: list[str] | None = None,
calling_agent_name: str | None = None,
) -> list[Packet]:
packets: list[Packet] = []
@@ -208,6 +211,7 @@ def create_custom_tool_packets(
response_type=response_type,
data=data,
file_ids=file_ids,
calling_agent_name=calling_agent_name,
),
),
)
@@ -233,6 +237,7 @@ def create_search_packets(
saved_search_docs: list[SavedSearchDoc],
is_internet_search: bool,
step_nr: int,
calling_agent_name: str | None = None,
) -> list[Packet]:
packets: list[Packet] = []
@@ -251,6 +256,7 @@ def create_search_packets(
obj=SearchToolDelta(
queries=search_queries,
documents=saved_search_docs,
calling_agent_name=calling_agent_name,
),
),
)
@@ -332,8 +338,14 @@ def translate_db_message_to_packets_simple(
cited_docs: list[SavedSearchDoc] = []
fetches: list[list[SavedSearchDoc]] = []
is_web_fetch: bool = False
calling_agent_name: str | None = None
for sub_step in sub_steps:
# Track calling_agent_name from first sub_step
if calling_agent_name is None and hasattr(
sub_step, "calling_agent_name"
):
calling_agent_name = sub_step.calling_agent_name
# For v2 tools, use the queries field if available, otherwise fall back to sub_step_instructions
if sub_step.queries:
tasks.extend(sub_step.queries)
@@ -399,7 +411,9 @@ def translate_db_message_to_packets_simple(
if tool_name in [SearchTool.__name__, KnowledgeGraphTool.__name__]:
cited_docs = cast(list[SavedSearchDoc], cited_docs)
packet_list.extend(
create_search_packets(tasks, cited_docs, False, step_nr)
create_search_packets(
tasks, cited_docs, False, step_nr, calling_agent_name
)
)
step_nr += 1
@@ -409,7 +423,9 @@ def translate_db_message_to_packets_simple(
packet_list.extend(create_fetch_packets(fetches, step_nr))
else:
packet_list.extend(
create_search_packets(tasks, cited_docs, True, step_nr)
create_search_packets(
tasks, cited_docs, True, step_nr, calling_agent_name
)
)
step_nr += 1
@@ -418,7 +434,9 @@ def translate_db_message_to_packets_simple(
if sub_step.generated_images:
packet_list.extend(
create_image_generation_packets(
sub_step.generated_images.images, step_nr
sub_step.generated_images.images,
step_nr,
calling_agent_name,
)
)
step_nr += 1
@@ -430,6 +448,7 @@ def translate_db_message_to_packets_simple(
response_type="text",
step_nr=step_nr,
data=sub_step.sub_answer,
calling_agent_name=calling_agent_name,
)
)
step_nr += 1
@@ -441,6 +460,7 @@ def translate_db_message_to_packets_simple(
response_type="text",
step_nr=step_nr,
data=sub_step.sub_answer,
calling_agent_name=calling_agent_name,
)
)
step_nr += 1

View File

@@ -16,17 +16,19 @@ from onyx.server.query_and_chat.streaming_models import Packet
from onyx.tools.built_in_tools_v2 import BUILT_IN_TOOL_MAP_V2
from onyx.tools.force import ForceUseTool
from onyx.tools.tool import Tool
from onyx.tools.tool_implementations.agent.agent_tool import AgentTool
from onyx.tools.tool_implementations.custom.custom_tool import CustomTool
from onyx.tools.tool_implementations.mcp.mcp_tool import MCPTool
from onyx.tools.tool_implementations_v2.agent_tool import call_agent
from onyx.tools.tool_implementations_v2.tool_accounting import tool_accounting
# Type alias for tools that need custom handling
CustomOrMcpTool = Union[CustomTool, MCPTool]
CustomOrMcpOrAgentTool = Union[CustomTool, MCPTool, AgentTool]
def is_custom_or_mcp_tool(tool: Tool) -> bool:
"""Check if a tool is a CustomTool or MCPTool."""
return isinstance(tool, CustomTool) or isinstance(tool, MCPTool)
def is_custom_or_mcp_or_agent_tool(tool: Tool) -> bool:
"""Check if a tool is a CustomTool, MCPTool, or AgentTool."""
return isinstance(tool, (CustomTool, MCPTool, AgentTool))
@tool_accounting
@@ -109,6 +111,32 @@ def custom_or_mcp_tool_to_function_tool(tool: Tool) -> FunctionTool:
)
def agent_tool_to_function_tool(agent_tool: AgentTool) -> FunctionTool:
"""Convert an AgentTool to a FunctionTool that calls call_agent."""
# Create a wrapper that calls call_agent with the bound persona ID
async def invoke_agent(
context: RunContextWrapper[ChatTurnContext], json_string: str
) -> str:
# Parse the query from the JSON string
args = json.loads(json_string)
query = args.get("query", "")
# Call the call_agent function with the target persona ID
return call_agent(
run_context=context,
query=query,
agent_persona_id=agent_tool.target_persona_id,
)
return FunctionTool(
name=agent_tool.name,
description=agent_tool.description,
params_json_schema=agent_tool.tool_definition()["function"]["parameters"],
on_invoke_tool=invoke_agent,
)
def tools_to_function_tools(tools: Sequence[Tool]) -> Sequence[FunctionTool]:
onyx_tools: Sequence[Sequence[FunctionTool]] = [
BUILT_IN_TOOL_MAP_V2[type(tool).__name__]
@@ -121,10 +149,15 @@ def tools_to_function_tools(tools: Sequence[Tool]) -> Sequence[FunctionTool]:
custom_and_mcp_tools: list[FunctionTool] = [
custom_or_mcp_tool_to_function_tool(tool)
for tool in tools
if is_custom_or_mcp_tool(tool)
if isinstance(tool, (CustomTool, MCPTool))
]
agent_tools: list[FunctionTool] = [
agent_tool_to_function_tool(tool) # type: ignore
for tool in tools
if isinstance(tool, AgentTool)
]
return flattened_builtin_tools + custom_and_mcp_tools
return flattened_builtin_tools + custom_and_mcp_tools + agent_tools
def force_use_tool_to_function_tool_names(

View File

@@ -1,6 +1,7 @@
from typing import Type
from typing import Union
from onyx.tools.tool_implementations.agent.agent_tool import AgentTool
from onyx.tools.tool_implementations.images.image_generation_tool import (
ImageGenerationTool,
)
@@ -20,7 +21,12 @@ logger = setup_logger()
BUILT_IN_TOOL_TYPES = Union[
SearchTool, ImageGenerationTool, WebSearchTool, KnowledgeGraphTool, OktaProfileTool
SearchTool,
ImageGenerationTool,
WebSearchTool,
KnowledgeGraphTool,
OktaProfileTool,
AgentTool,
]
# same as d09fc20a3c66_seed_builtin_tools.py
@@ -30,6 +36,7 @@ BUILT_IN_TOOL_MAP: dict[str, Type[BUILT_IN_TOOL_TYPES]] = {
WebSearchTool.__name__: WebSearchTool,
KnowledgeGraphTool.__name__: KnowledgeGraphTool,
OktaProfileTool.__name__: OktaProfileTool,
AgentTool.__name__: AgentTool,
}

View File

@@ -1,5 +1,6 @@
from agents import FunctionTool
from onyx.tools.tool_implementations.agent.agent_tool import AgentTool
from onyx.tools.tool_implementations.images.image_generation_tool import (
ImageGenerationTool,
)
@@ -7,6 +8,7 @@ from onyx.tools.tool_implementations.search.search_tool import SearchTool
from onyx.tools.tool_implementations.web_search.web_search_tool import (
WebSearchTool,
)
from onyx.tools.tool_implementations_v2.agent_tool import call_agent
from onyx.tools.tool_implementations_v2.image_generation import image_generation
from onyx.tools.tool_implementations_v2.internal_search import internal_search
from onyx.tools.tool_implementations_v2.web import open_url
@@ -16,4 +18,5 @@ BUILT_IN_TOOL_MAP_V2: dict[str, list[FunctionTool]] = {
SearchTool.__name__: [internal_search],
ImageGenerationTool.__name__: [image_generation],
WebSearchTool.__name__: [web_search, open_url],
AgentTool.__name__: [call_agent],
}

View File

@@ -44,6 +44,7 @@ from onyx.onyxbot.slack.models import SlackContext
from onyx.tools.built_in_tools import get_built_in_tool_by_id
from onyx.tools.models import DynamicSchemaInfo
from onyx.tools.tool import Tool
from onyx.tools.tool_implementations.agent.agent_tool import AgentTool
from onyx.tools.tool_implementations.custom.custom_tool import (
build_custom_tools_from_openapi_schema_and_headers,
)
@@ -344,6 +345,40 @@ def construct_tools(
KnowledgeGraphTool(tool_id=db_tool_model.id)
]
# Handle Agent Tool
elif tool_cls.__name__ == AgentTool.__name__:
# AgentTools delegate to another persona
if db_tool_model.target_persona_id is None:
logger.warning(
f"AgentTool {db_tool_model.id} has no target_persona_id, skipping"
)
continue
# Get the target persona
from onyx.db.persona import get_persona_by_id
try:
target_persona = get_persona_by_id(
persona_id=db_tool_model.target_persona_id,
user=user,
db_session=db_session,
include_deleted=False,
is_for_edit=False,
)
tool_dict[db_tool_model.id] = [
AgentTool(
tool_id=db_tool_model.id,
target_persona=target_persona,
)
]
except Exception as e:
logger.error(
f"Failed to load target persona {db_tool_model.target_persona_id} "
f"for AgentTool {db_tool_model.id}: {e}"
)
continue
# Handle custom tools
elif db_tool_model.openapi_schema:
if not custom_tool_config:

View File

@@ -0,0 +1,122 @@
"""AgentTool implementation for agent-to-agent delegation."""
from collections.abc import Generator
from typing import Any
from onyx.db.models import Persona
from onyx.llm.interfaces import LLM
from onyx.llm.models import PreviousMessage
from onyx.tools.base_tool import BaseTool
from onyx.tools.models import ToolResponse
from onyx.utils.special_types import JSON_ro
class AgentTool(BaseTool):
"""Tool that delegates tasks to another persona (subagent)."""
def __init__(
self,
tool_id: int,
target_persona: Persona,
) -> None:
self._id = tool_id
self._target_persona = target_persona
self._name = f"call_{target_persona.name.lower().replace(' ', '_')}"
self._display_name = f"{target_persona.name}"
self._description = (
f"Delegate tasks to the {target_persona.name} agent. "
f"{target_persona.description or ''}"
)
@property
def id(self) -> int:
return self._id
@property
def name(self) -> str:
return self._name
@property
def description(self) -> str:
return self._description
@property
def display_name(self) -> str:
return self._display_name
@property
def target_persona_id(self) -> int:
return self._target_persona.id
def tool_definition(self) -> dict:
"""Return the tool definition for LLM tool calling."""
return {
"type": "function",
"function": {
"name": self._name,
"description": self._description,
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": f"The question or task to delegate to {self._target_persona.name}",
}
},
"required": ["query"],
},
},
}
def build_tool_message_content(
self, *args: ToolResponse
) -> str | list[str | dict[str, Any]]:
"""Build the message content from tool responses."""
# AgentTool returns JSON responses
if not args:
return "No response from subagent"
# The response is already a JSON string
return str(args[0].response)
def get_args_for_non_tool_calling_llm(
self,
query: str,
history: list[PreviousMessage],
llm: LLM,
force_run: bool = False,
) -> dict[str, Any] | None:
"""For non-tool-calling LLMs, always run the agent tool with the query."""
# Return the query as the argument for the agent tool
return {"query": query}
def run(
self, override_kwargs: None = None, **llm_kwargs: Any
) -> Generator[ToolResponse, None, None]:
"""Execute the agent tool by delegating to the target persona.
This is a placeholder implementation. The actual execution happens in the
v2 agent tool infrastructure or through direct persona invocation.
"""
# Extract the query from llm_kwargs
query = llm_kwargs.get("query", "")
# Return a placeholder response indicating delegation
yield ToolResponse(
id="agent_tool_delegation",
response={
"type": "agent_delegation",
"target_persona": self._target_persona.name,
"query": query,
"message": f"Delegating to {self._target_persona.name} agent",
},
)
def final_result(self, *args: ToolResponse) -> JSON_ro:
"""Return the final result from the agent tool execution."""
if not args:
return {}
# Return the aggregated response from all tool responses
# For now, return the last response
return args[-1].response

View File

@@ -0,0 +1,372 @@
import json
from typing import Any
from agents import function_tool
from agents import RunContextWrapper
from onyx.agents.agent_search.dr.models import IterationAnswer
from onyx.agents.agent_search.dr.models import IterationInstructions
from onyx.chat.answer import Answer
from onyx.chat.models import AnswerStyleConfig
from onyx.chat.models import CitationConfig
from onyx.chat.models import DocumentPruningConfig
from onyx.chat.models import PromptConfig
from onyx.chat.prompt_builder.answer_prompt_builder import AnswerPromptBuilder
from onyx.chat.prompt_builder.answer_prompt_builder import default_build_system_message
from onyx.chat.prompt_builder.answer_prompt_builder import default_build_user_message
from onyx.chat.turn.models import ChatTurnContext
from onyx.context.search.enums import OptionalSearchSetting
from onyx.context.search.models import InferenceSection
from onyx.context.search.models import RetrievalDetails
from onyx.db.persona import get_persona_by_id
from onyx.llm.factory import get_llms_for_persona
from onyx.server.query_and_chat.streaming_models import Packet
from onyx.server.query_and_chat.streaming_models import SearchToolStart
from onyx.tools.force import ForceUseTool
from onyx.tools.tool_constructor import construct_tools
from onyx.tools.tool_constructor import CustomToolConfig
from onyx.tools.tool_constructor import ImageGenerationToolConfig
from onyx.tools.tool_constructor import SearchToolConfig
from onyx.tools.tool_constructor import WebSearchToolConfig
from onyx.tools.tool_implementations_v2.tool_accounting import tool_accounting
from onyx.utils.logger import setup_logger
logger = setup_logger()
@tool_accounting
def _agent_tool_core(
run_context: RunContextWrapper[ChatTurnContext],
query: str,
target_persona_id: int,
agent_tool_id: int,
) -> dict[str, Any]:
"""
Core agent tool logic that delegates to a subagent.
This function:
1. Gets the target persona and its configured tools
2. Creates a system prompt with available tools
3. Uses the LLM to decide which tools to call
4. Executes those tools with calling_agent_name set
5. Returns the aggregated results
"""
logger.info(f"bruh run_context: {run_context}")
logger.info(f"bruh target_persona_id: {target_persona_id}")
index = run_context.context.current_run_step
# Get the target persona
target_persona = get_persona_by_id(
persona_id=target_persona_id,
user=None, # Bypass auth for subagent calls
db_session=run_context.context.run_dependencies.db_session,
include_deleted=False,
is_for_edit=False,
)
# Emit start event
run_context.context.run_dependencies.emitter.emit(
Packet(
ind=index,
obj=SearchToolStart(
type="internal_search_tool_start", is_internet_search=False
),
)
)
# Add iteration instructions
run_context.context.iteration_instructions.append(
IterationInstructions(
iteration_nr=index,
plan="plan",
purpose=f"Delegating to subagent: {target_persona.name}",
reasoning=f"I am now delegating this task to the {target_persona.name} agent: {query}",
)
)
# Get the LLM for the subagent persona
llm, fast_llm = get_llms_for_persona(
persona=target_persona,
llm_override=None,
additional_headers=None,
)
# Build the prompt config for the subagent
prompt_config = PromptConfig.from_model(target_persona)
# Construct tool configs similar to stream_chat_message_objects
answer_style_config = AnswerStyleConfig(
citation_config=CitationConfig(all_docs_useful=False),
structured_response_format=None,
)
document_pruning_config = DocumentPruningConfig(
max_chunks=int(
target_persona.num_chunks if target_persona.num_chunks is not None else 10
),
max_window_percentage=0.5,
)
tool_dict = construct_tools(
persona=target_persona,
prompt_config=prompt_config,
db_session=run_context.context.run_dependencies.db_session,
user=None, # No user context for subagent
llm=llm,
fast_llm=fast_llm,
run_search_setting=OptionalSearchSetting.AUTO,
search_tool_config=SearchToolConfig(
answer_style_config=answer_style_config,
document_pruning_config=document_pruning_config,
retrieval_options=RetrievalDetails(),
rerank_settings=None,
selected_sections=None,
chunks_above=0,
chunks_below=0,
full_doc=False,
latest_query_files=[],
bypass_acl=False,
),
internet_search_tool_config=WebSearchToolConfig(
answer_style_config=answer_style_config,
document_pruning_config=document_pruning_config,
),
image_generation_tool_config=ImageGenerationToolConfig(
additional_headers=None,
),
custom_tool_config=CustomToolConfig(
chat_session_id=run_context.context.chat_session_id,
message_id=run_context.context.message_id,
additional_headers=None,
),
allowed_tool_ids=None, # Subagent can use all its configured tools
slack_context=None, # Subagents don't have Slack context
)
# Flatten tool_dict to get list of tools
subagent_tools = []
for tool_list in tool_dict.values():
subagent_tools.extend(tool_list)
# Track how many iteration answers existed before we start
num_iteration_answers_before = len(
run_context.context.aggregated_context.global_iteration_responses
)
# Use Answer infrastructure for robust LLM orchestration
final_response = ""
num_tool_calls = 0
try:
# Build system message for the subagent
system_message = default_build_system_message(
prompt_config=prompt_config,
llm_config=llm.config,
mem_callback=None, # No memory callback for subagent
)
# Build user message
user_message = default_build_user_message(
user_query=query,
prompt_config=prompt_config,
files=[],
)
# Build prompt builder with empty history (subagent starts fresh)
prompt_builder = AnswerPromptBuilder(
user_message=user_message,
system_message=system_message,
message_history=[], # Subagent has no conversation history
llm_config=llm.config,
raw_user_query=query,
raw_user_uploaded_files=[],
)
# Create Answer instance to handle the subagent's response
answer = Answer(
prompt_builder=prompt_builder,
answer_style_config=answer_style_config,
llm=llm,
fast_llm=fast_llm,
force_use_tool=ForceUseTool(force_use=False, tool_name=""),
persona=target_persona,
rerank_settings=None,
chat_session_id=run_context.context.chat_session_id,
current_agent_message_id=run_context.context.message_id,
tools=subagent_tools,
db_session=run_context.context.run_dependencies.db_session,
latest_query_files=[],
is_connected=None,
use_agentic_search=False, # Subagent doesn't use agentic search
)
# Process the answer stream
for stream_part in answer.processed_streamed_output:
# Collect text content from the answer
if hasattr(stream_part, "answer_piece") and stream_part.answer_piece:
final_response += stream_part.answer_piece
# Count tool calls by checking how many new iteration answers were added
num_tool_calls = (
len(run_context.context.aggregated_context.global_iteration_responses)
- num_iteration_answers_before
)
# Now retroactively mark all IterationAnswers created during this subagent's
# execution with the calling_agent_name
for i in range(
num_iteration_answers_before,
len(run_context.context.aggregated_context.global_iteration_responses),
):
iteration_answer = (
run_context.context.aggregated_context.global_iteration_responses[i]
)
# Mark this as called by the subagent
iteration_answer.calling_agent_name = target_persona.name
# Generate final response if none was collected
if not final_response:
final_response = (
f"Subagent {target_persona.name} completed {num_tool_calls} tool calls"
)
except Exception as e:
logger.exception(f"Error executing subagent {target_persona.name}")
final_response = f"Error executing subagent: {str(e)}"
num_tool_calls = 0
# Add the subagent delegation itself as an iteration answer
run_context.context.aggregated_context.global_iteration_responses.append(
IterationAnswer(
tool=f"AgentTool_{target_persona.name}",
tool_id=agent_tool_id,
iteration_nr=index,
parallelization_nr=0,
question=query,
reasoning=f"Delegated to {target_persona.name}: {query}",
answer=final_response,
cited_documents={},
calling_agent_name=None, # Primary agent is calling this
)
)
return {
"agent": target_persona.name,
"status": "completed",
"query": query,
"tool_calls": num_tool_calls,
"result": final_response,
}
@function_tool
def call_agent(
run_context: RunContextWrapper[ChatTurnContext],
query: str,
agent_persona_id: int,
) -> str:
"""
Tool for delegating tasks to specialized subagents.
Use this tool when you need to delegate a specific task to another agent
that has specialized knowledge or capabilities. The subagent will have
access to its own set of tools and will handle the task independently.
Each tool call made by the subagent will be tracked with the subagent's
name in the calling_agent_name field of the IterationAnswer.
Args:
query: The question or task to delegate to the subagent
agent_persona_id: The ID of the persona/agent to call
Returns:
JSON string containing the delegation status and results
"""
# Look up the agent tool from run_dependencies.tools
agent_tool_id = -1 # Default fallback
# Try to find the tool ID from available tools
for tool in run_context.context.run_dependencies.tools:
if hasattr(tool, "id") and hasattr(tool, "name"):
# Check if this is an agent tool for the target persona
if f"agent_{agent_persona_id}" in tool.name.lower():
agent_tool_id = tool.id
break
result = _agent_tool_core(
run_context=run_context,
query=query,
target_persona_id=agent_persona_id,
agent_tool_id=agent_tool_id,
)
return json.dumps(result)
# Helper function to create IterationAnswers for subagent tool calls
def create_subagent_iteration_answer(
run_context: RunContextWrapper[ChatTurnContext],
calling_agent_name: str,
tool_name: str,
tool_id: int,
iteration_nr: int,
parallelization_nr: int,
question: str,
reasoning: str | None,
answer: str,
cited_documents: dict[int, InferenceSection] | None = None,
claims: list[str] | None = None,
is_web_fetch: bool = False,
queries: list[str] | None = None,
generated_images: list[Any] | None = None,
additional_data: dict[str, str] | None = None,
) -> IterationAnswer:
"""
Helper function to create an IterationAnswer for a tool call made by a subagent.
This function should be called by tool implementations when they are being
executed on behalf of a subagent (as indicated by context or parameters).
"""
iteration_answer = IterationAnswer(
tool=tool_name,
tool_id=tool_id,
iteration_nr=iteration_nr,
parallelization_nr=parallelization_nr,
question=question,
reasoning=reasoning,
answer=answer,
cited_documents=cited_documents or {},
claims=claims,
is_web_fetch=is_web_fetch,
queries=queries,
generated_images=generated_images,
additional_data=additional_data,
calling_agent_name=calling_agent_name, # Mark who called this tool
)
# Add to the global iteration responses
run_context.context.aggregated_context.global_iteration_responses.append(
iteration_answer
)
return iteration_answer
# Long description for the LLM to understand when to use this tool
AGENT_TOOL_LONG_DESCRIPTION = """
### Decision boundary
- Use this tool when you need to delegate a task to a specialized agent
- The subagent will have its own tools and capabilities
- Each subagent is designed for specific types of tasks
- The subagent's tool calls will be tracked separately in the iteration history
### When NOT to use
- For simple questions that you can answer directly
- When no specialized agent is available for the task
- For tasks that require coordination across multiple domains
### Usage hints
- Be specific in your query to the subagent
- The subagent will make its own tool calls which will be visible in the iteration history
- Each tool call made by the subagent will be marked with the subagent's name
- Review the subagent's work before incorporating it into your final answer
"""

View File

@@ -63,6 +63,7 @@ import * as Yup from "yup";
import { SettingsContext } from "@/components/settings/SettingsProvider";
import {
FullPersona,
MinimalPersonaSnapshot,
PersonaLabel,
StarterMessage,
} from "@/app/admin/assistants/interfaces";
@@ -85,6 +86,7 @@ import StarterMessagesList from "@/app/admin/assistants/StarterMessageList";
import { SwitchField } from "@/components/ui/switch";
import { generateIdenticon } from "@/refresh-components/AgentIcon";
import AgentIcon from "@/refresh-components/AgentIcon";
import { BackButton } from "@/components/BackButton";
import { AdvancedOptionsToggle } from "@/components/AdvancedOptionsToggle";
import { MinimalUserSnapshot } from "@/lib/types";
@@ -192,6 +194,9 @@ export function AssistantEditor({
useState<MinimalOnyxDocument | null>(null);
const [showAdvancedOptions, setShowAdvancedOptions] = useState(false);
const [showAllUserFiles, setShowAllUserFiles] = useState(false);
const [availablePersonas, setAvailablePersonas] = useState<
MinimalPersonaSnapshot[]
>([]);
// both `defautIconColor` and `defaultIconShape` are state so that they
// persist across formik reformatting
@@ -203,6 +208,24 @@ export function AssistantEditor({
);
const [isRefreshing, setIsRefreshing] = useState(false);
// Fetch available personas for subagent selection
useEffect(() => {
const fetchPersonas = async () => {
try {
const response = await fetch("/api/persona");
if (response.ok) {
const personas = await response.json();
setAvailablePersonas(personas);
} else {
console.error("Failed to fetch personas, status:", response.status);
}
} catch (error) {
console.error("Failed to fetch personas:", error);
}
};
fetchPersonas();
}, []);
const [removePersonaImage, setRemovePersonaImage] = useState(false);
const [uploadedImagePreview, setUploadedImagePreview] = useState<
string | null
@@ -244,39 +267,46 @@ export function AssistantEditor({
const webSearchTool = findWebSearchTool(tools);
// Separate MCP tools from regular custom tools - memoize to prevent re-renders
const { mcpTools, customTools, mcpToolsByServer } = useMemo(() => {
const allCustom = tools.filter(
(tool) =>
tool.in_code_tool_id !== searchTool?.in_code_tool_id &&
tool.in_code_tool_id !== imageGenerationTool?.in_code_tool_id &&
tool.in_code_tool_id !== webSearchTool?.in_code_tool_id
);
const { mcpTools, customTools, mcpToolsByServer, agentTools } =
useMemo(() => {
const allCustom = tools.filter(
(tool) =>
tool.in_code_tool_id !== searchTool?.in_code_tool_id &&
tool.in_code_tool_id !== imageGenerationTool?.in_code_tool_id &&
tool.in_code_tool_id !== webSearchTool?.in_code_tool_id &&
tool.in_code_tool_id !== "AgentTool"
);
const mcp = allCustom.filter((tool) => tool.mcp_server_id);
const custom = allCustom.filter((tool) => !tool.mcp_server_id);
const agentTools = tools.filter(
(tool) => tool.in_code_tool_id === "AgentTool"
);
const mcp = allCustom.filter((tool) => tool.mcp_server_id);
const custom = allCustom.filter((tool) => !tool.mcp_server_id);
// Group MCP tools by server
const groups: { [serverId: number]: ToolSnapshot[] } = {};
mcp.forEach((tool) => {
if (tool.mcp_server_id) {
if (!groups[tool.mcp_server_id]) {
groups[tool.mcp_server_id] = [];
// Group MCP tools by server
const groups: { [serverId: number]: ToolSnapshot[] } = {};
mcp.forEach((tool) => {
if (tool.mcp_server_id) {
if (!groups[tool.mcp_server_id]) {
groups[tool.mcp_server_id] = [];
}
groups[tool.mcp_server_id]!.push(tool);
}
groups[tool.mcp_server_id]!.push(tool);
}
});
});
return {
mcpTools: mcp,
customTools: custom,
mcpToolsByServer: groups,
};
}, [
tools,
searchTool?.in_code_tool_id,
imageGenerationTool?.in_code_tool_id,
webSearchTool?.in_code_tool_id,
]);
return {
mcpTools: mcp,
customTools: custom,
mcpToolsByServer: groups,
agentTools: agentTools,
};
}, [
tools,
searchTool?.in_code_tool_id,
imageGenerationTool?.in_code_tool_id,
webSearchTool?.in_code_tool_id,
"AgentTool",
]);
// Helper functions for MCP server checkbox state - memoize to prevent re-renders
const getMCPServerCheckboxState = useCallback(
@@ -331,6 +361,7 @@ export function AssistantEditor({
...(searchTool ? [searchTool] : []),
...(imageGenerationTool ? [imageGenerationTool] : []),
...(webSearchTool ? [webSearchTool] : []),
...agentTools,
];
const enabledToolsMap: { [key: number]: boolean } = {};
availableTools.forEach((tool) => {
@@ -395,6 +426,10 @@ export function AssistantEditor({
? "user_files"
: "team_knowledge",
is_default_persona: existingPersona?.is_default_persona ?? false,
subagents:
existingPersona?.tools.filter(
(tool) => tool.in_code_tool_id === "AgentTool"
) ?? [],
};
interface AssistantPrompt {
@@ -708,6 +743,10 @@ export function AssistantEditor({
const groups = values.is_public ? [] : values.selectedGroups;
const teamKnowledge = values.knowledge_source === "team_knowledge";
const subagent_persona_ids = values.subagents.map(
(p: ToolSnapshot) => p.target_persona_id!
);
const submissionData: PersonaUpsertParameters = {
...values,
icon_color: values.icon_color ?? null,
@@ -720,6 +759,7 @@ export function AssistantEditor({
...values.selectedUsers.map((u: MinimalUserSnapshot) => u.id),
],
tool_ids: enabledTools,
subagent_persona_ids: subagent_persona_ids,
remove_image: removePersonaImage,
search_start_date: values.search_start_date
? new Date(values.search_start_date)
@@ -1453,6 +1493,151 @@ export function AssistantEditor({
</div>
<Separator className="max-w-4xl mt-0" />
{/* Subagent Selection Section */}
<div className="-mt-2">
<div className="flex gap-x-2 mb-2 items-center">
<UserIcon className="w-4 h-4 shrink-0" />
<div className="block font-medium text-sm">Subagents</div>
</div>
<div className="text-sm text-text-500 mb-3">
Select other assistants to make available as tools. Your
assistant can delegate tasks to these subagents.
</div>
{availablePersonas.length === 0 ? (
<div className="text-sm text-text-500">
Loading personas...
</div>
) : (
<FastField name="subagents">
{({ field, form }: any) => (
<div className="space-y-2">
{/* Display selected subagents */}
{field.value.length > 0 && (
<div className="flex flex-wrap gap-2 mb-3">
{field.value.map((subagent: ToolSnapshot) => {
// Use target_persona_id as the unique identifier for subagents
const personaId = subagent.target_persona_id;
// Look up the full persona for display purposes
const persona = availablePersonas.find(
(p) => p.id === personaId
);
const displayName =
subagent.display_name ||
subagent.name ||
persona?.display_name ||
persona?.name;
return (
<div
key={personaId}
className="flex items-center gap-x-2 px-3 py-1.5 rounded-lg bg-background-100 border border-border-medium"
>
{persona && (
<AgentIcon agent={persona} size={16} />
)}
<span className="text-sm">
{displayName}
</span>
<button
type="button"
onClick={() => {
const updated = field.value.filter(
(s: ToolSnapshot) =>
s.target_persona_id !== personaId
);
form.setFieldValue(
"subagents",
updated
);
// Also remove from enabled_tools_map if it exists
if (subagent.id) {
const updatedToolsMap = {
...values.enabled_tools_map,
};
delete updatedToolsMap[subagent.id];
form.setFieldValue(
"enabled_tools_map",
updatedToolsMap
);
}
}}
className="hover:text-text-900"
>
×
</button>
</div>
);
})}
</div>
)}
{/* Dropdown to add more subagents */}
<SearchMultiSelectDropdown
options={availablePersonas
.filter(
(p) =>
p.id !== existingPersona?.id &&
!field.value.some(
(s: ToolSnapshot) =>
s.target_persona_id === p.id
) // Don't show already selected
)
.map((persona) => ({
name: persona.display_name || persona.name,
value: persona.id.toString(),
}))}
onSelect={(option) => {
const personaId = parseInt(
option.value as string
);
const persona = availablePersonas.find(
(p) => p.id === personaId
);
if (persona) {
// Find if this subagent already exists as a tool
const existingAgentTool = agentTools.find(
(tool) =>
tool.target_persona_id === persona.id
);
// Create a new subagent tool object
const newSubagent = {
id: existingAgentTool?.id, // Use existing tool id if available
name: persona.name,
display_name: persona.display_name,
in_code_tool_id: "AgentTool",
target_persona_id: persona.id,
};
form.setFieldValue("subagents", [
...field.value,
newSubagent,
]);
// Also enable in enabled_tools_map if it has an id
if (existingAgentTool?.id) {
form.setFieldValue(
`enabled_tools_map.${existingAgentTool.id}`,
true
);
}
}
}}
itemComponent={({ option }) => (
<div className="flex px-4 py-2.5 cursor-pointer hover:bg-accent-background-hovered">
<UserIcon className="w-4 h-4 mr-2 my-auto" />
<span className="text-sm">{option.name}</span>
</div>
)}
/>
</div>
)}
</FastField>
)}
</div>
<Separator className="max-w-4xl mt-0" />
<div className="-mt-2">
<div className="flex gap-x-2 mb-2 items-center">
<div className="block font-medium text-sm">

View File

@@ -11,6 +11,7 @@ export interface StarterMessage extends StarterMessageBase {
export interface MinimalPersonaSnapshot {
id: number;
name: string;
display_name: string;
description: string;
tools: ToolSnapshot[];
starter_messages: StarterMessage[] | null;

View File

@@ -19,6 +19,7 @@ interface PersonaUpsertRequest {
users?: string[];
groups: number[];
tool_ids: number[];
subagent_persona_ids: number[];
icon_color: string | null;
icon_shape: number | null;
remove_image?: boolean;
@@ -46,6 +47,7 @@ export interface PersonaUpsertParameters {
users?: string[];
groups: number[];
tool_ids: number[];
subagent_persona_ids: number[];
icon_color: string | null;
icon_shape: number | null;
remove_image?: boolean;
@@ -111,6 +113,7 @@ function buildPersonaUpsertRequest(
remove_image,
search_start_date,
user_file_ids,
subagent_persona_ids,
} = creationRequest;
return {
@@ -142,6 +145,7 @@ function buildPersonaUpsertRequest(
display_priority: null,
label_ids: creationRequest.label_ids ?? null,
user_file_ids: user_file_ids ?? null,
subagent_persona_ids: creationRequest.subagent_persona_ids ?? null,
};
}

View File

@@ -662,6 +662,9 @@ export function useChatController({
? assistantPreferences?.[liveAssistant?.id]?.disabled_tool_ids
: undefined;
console.log("liveAssistant?.tools:", liveAssistant?.tools);
console.log(disabledToolIds);
const stack = new CurrentMessageFIFO();
updateCurrentMessageFIFO(stack, {
signal: controller.signal,

View File

@@ -25,6 +25,7 @@ function constructCustomToolState(packets: CustomToolPacket[]) {
const responseType = latestDelta?.response_type || null;
const data = latestDelta?.data;
const fileIds = latestDelta?.file_ids || null;
const callingAgentName = latestDelta?.calling_agent_name || null;
const isRunning = Boolean(toolStart && !toolEnd);
const isComplete = Boolean(toolStart && toolEnd);
@@ -34,6 +35,7 @@ function constructCustomToolState(packets: CustomToolPacket[]) {
responseType,
data,
fileIds,
callingAgentName,
isRunning,
isComplete,
};
@@ -45,8 +47,15 @@ export const CustomToolRenderer: MessageRenderer<CustomToolPacket, {}> = ({
renderType,
children,
}) => {
const { toolName, responseType, data, fileIds, isRunning, isComplete } =
constructCustomToolState(packets);
const {
toolName,
responseType,
data,
fileIds,
callingAgentName,
isRunning,
isComplete,
} = constructCustomToolState(packets);
useEffect(() => {
if (isComplete) {
@@ -55,25 +64,29 @@ export const CustomToolRenderer: MessageRenderer<CustomToolPacket, {}> = ({
}, [isComplete, onComplete]);
const status = useMemo(() => {
const agentPrefix = callingAgentName ? `[${callingAgentName}] ` : "";
if (isComplete) {
if (responseType === "image") return `${toolName} returned images`;
if (responseType === "csv") return `${toolName} returned a file`;
return `${toolName} completed`;
if (responseType === "image")
return `${agentPrefix}${toolName} returned images`;
if (responseType === "csv")
return `${agentPrefix}${toolName} returned a file`;
return `${agentPrefix}${toolName} completed`;
}
if (isRunning) return `${toolName} running...`;
if (isRunning) return `${agentPrefix}${toolName} running...`;
return null;
}, [toolName, responseType, isComplete, isRunning]);
}, [toolName, responseType, callingAgentName, isComplete, isRunning]);
const icon = FiTool;
if (renderType === RenderType.HIGHLIGHT) {
const agentPrefix = callingAgentName ? `[${callingAgentName}] ` : "";
return children({
icon,
status: status,
content: (
<div className="text-sm text-muted-foreground">
{isRunning && `${toolName} running...`}
{isComplete && `${toolName} completed`}
{isRunning && `${agentPrefix}${toolName} running...`}
{isComplete && `${agentPrefix}${toolName} completed`}
</div>
),
});

View File

@@ -27,12 +27,15 @@ function constructCurrentImageState(packets: ImageGenerationToolPacket[]) {
const prompt = ""; // Image generation tools don't have a main description
const images = imageDeltas.flatMap((delta) => delta?.images || []);
const latestDelta = imageDeltas[imageDeltas.length - 1] || null;
const callingAgentName = latestDelta?.calling_agent_name || null;
const isGenerating = imageStart && !imageEnd;
const isComplete = imageStart && imageEnd;
return {
prompt,
images,
callingAgentName,
isGenerating,
isComplete,
error: false, // For now, we don't have error state in the packets
@@ -43,7 +46,7 @@ export const ImageToolRenderer: MessageRenderer<
ImageGenerationToolPacket,
{}
> = ({ packets, onComplete, renderType, children }) => {
const { prompt, images, isGenerating, isComplete, error } =
const { prompt, images, callingAgentName, isGenerating, isComplete, error } =
constructCurrentImageState(packets);
useEffect(() => {
@@ -52,15 +55,19 @@ export const ImageToolRenderer: MessageRenderer<
}
}, [isComplete]);
const agentPrefix = callingAgentName ? `[${callingAgentName}] ` : "";
const status = useMemo(() => {
if (isComplete) {
return `Generated ${images.length} image${images.length > 1 ? "s" : ""}`;
return `${agentPrefix}Generated ${images.length} image${
images.length > 1 ? "s" : ""
}`;
}
if (isGenerating) {
return "Generating image...";
return `${agentPrefix}Generating image...`;
}
return null;
}, [isComplete, isGenerating, images.length]);
}, [agentPrefix, isComplete, isGenerating, images.length]);
// Render based on renderType
if (renderType === RenderType.FULL) {
@@ -69,7 +76,7 @@ export const ImageToolRenderer: MessageRenderer<
if (isGenerating) {
return children({
icon: FiImage,
status: "Generating images...",
status: `${agentPrefix}Generating images...`,
content: (
<div className="flex flex-col">
<div>
@@ -84,7 +91,7 @@ export const ImageToolRenderer: MessageRenderer<
if (isComplete) {
return children({
icon: FiImage,
status: `Generated ${images.length} image${
status: `${agentPrefix}Generated ${images.length} image${
images.length !== 1 ? "s" : ""
}`,
content: (
@@ -128,7 +135,7 @@ export const ImageToolRenderer: MessageRenderer<
if (isGenerating) {
return children({
icon: FiImage,
status: "Generating image...",
status: `${agentPrefix}Generating image...`,
content: (
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<div className="flex gap-0.5">
@@ -142,7 +149,7 @@ export const ImageToolRenderer: MessageRenderer<
style={{ animationDelay: "0.2s" }}
></div>
</div>
<span>Generating image...</span>
<span>{agentPrefix}Generating image...</span>
</div>
),
});
@@ -151,10 +158,10 @@ export const ImageToolRenderer: MessageRenderer<
if (error) {
return children({
icon: FiImage,
status: "Image generation failed",
status: `${agentPrefix}Image generation failed`,
content: (
<div className="text-sm text-red-600 dark:text-red-400">
Image generation failed
{agentPrefix}Image generation failed
</div>
),
});
@@ -163,10 +170,12 @@ export const ImageToolRenderer: MessageRenderer<
if (isComplete && images.length > 0) {
return children({
icon: FiImage,
status: `Generated ${images.length} image${images.length > 1 ? "s" : ""}`,
status: `${agentPrefix}Generated ${images.length} image${
images.length > 1 ? "s" : ""
}`,
content: (
<div className="text-sm text-muted-foreground">
Generated {images.length} image
{agentPrefix}Generated {images.length} image
{images.length > 1 ? "s" : ""}
</div>
),
@@ -175,9 +184,11 @@ export const ImageToolRenderer: MessageRenderer<
return children({
icon: FiImage,
status: "Image generation",
status: `${agentPrefix}Image generation`,
content: (
<div className="text-sm text-muted-foreground">Image generation</div>
<div className="text-sm text-muted-foreground">
{agentPrefix}Image generation
</div>
),
});
};

View File

@@ -71,6 +71,7 @@ export interface SearchToolDelta extends BaseObj {
type: "internal_search_tool_delta";
queries: string[] | null;
documents: OnyxDocument[] | null;
calling_agent_name?: string | null;
}
export type ImageShape = "square" | "landscape" | "portrait";
@@ -89,6 +90,7 @@ export interface ImageGenerationToolStart extends BaseObj {
export interface ImageGenerationToolDelta extends BaseObj {
type: "image_generation_tool_delta";
images: GeneratedImage[];
calling_agent_name?: string | null;
}
export interface FetchToolStart extends BaseObj {
@@ -109,6 +111,7 @@ export interface CustomToolDelta extends BaseObj {
response_type: string;
data?: any;
file_ids?: string[] | null;
calling_agent_name?: string | null;
}
// Reasoning Packets

View File

@@ -35,6 +35,9 @@ export interface ToolSnapshot {
// OAuth configuration for this tool
oauth_config_id?: number | null;
// only specified for AgentTools. ID of the persona this tool delegates to.
target_persona_id?: number | null;
oauth_config_name?: string | null;
// If this is an MCP tool, which server it belongs to