Compare commits

..

16 Commits

Author SHA1 Message Date
Wenxi Onyx
4feca94f08 colors 2026-04-14 19:26:33 -07:00
Wenxi Onyx
e9538c2a8f 8 2026-04-14 17:57:54 -07:00
Wenxi Onyx
bc39465a5c 7 2026-04-14 15:17:47 -07:00
Wenxi Onyx
48463a353d 6 2026-04-14 14:15:47 -07:00
Wenxi Onyx
0c61b3bb97 rebase on main 2026-04-14 14:15:01 -07:00
Wenxi Onyx
0948f58fa0 5 2026-04-14 14:15:01 -07:00
Wenxi Onyx
ac448cf3c5 4 2026-04-14 14:15:01 -07:00
Wenxi Onyx
f7771847fb fix proposal creation 2026-04-14 14:15:01 -07:00
Wenxi Onyx
c61adc6560 init 3 2026-04-14 14:15:01 -07:00
Wenxi Onyx
4b0cb5b9c3 init 2 2026-04-14 14:15:01 -07:00
Wenxi Onyx
21293f6621 grant review init 2026-04-14 14:15:01 -07:00
acaprau
0f31c490fa chore(opensearch): Add debug log for when the migration task releases its lock (#10190) 2026-04-14 14:08:48 -07:00
Wenxi
c9a4a6e42b fix: text shimmer animation nice and fast (#10184) 2026-04-14 20:59:00 +00:00
Nikolas Garza
558c9df3c7 fix(chat): eliminate long-lived DB session in multi-model worker threads (#10159) 2026-04-14 20:37:05 +00:00
Jamison Lahman
30003036d3 chore(fe): Toast logs to the console by default in dev (#10183) 2026-04-14 20:34:04 +00:00
Nikolas Garza
4b2f18c239 fix(chat): speed up text gen (#10186) 2026-04-14 13:41:29 -07:00
117 changed files with 16312 additions and 290 deletions

View File

@@ -12,7 +12,7 @@ founders@onyx.app for more information. Please visit https://github.com/onyx-dot
ARG ENABLE_CRAFT=false
# DO_NOT_TRACK is used to disable telemetry for Unstructured
ENV ONYX_RUNNING_IN_DOCKER="true" \
ENV DANSWER_RUNNING_IN_DOCKER="true" \
DO_NOT_TRACK="true" \
PLAYWRIGHT_BROWSERS_PATH="/app/.cache/ms-playwright"

View File

@@ -1,7 +1,7 @@
# Base stage with dependencies
FROM python:3.11.7-slim-bookworm AS base
ENV ONYX_RUNNING_IN_DOCKER="true" \
ENV DANSWER_RUNNING_IN_DOCKER="true" \
HF_HOME=/app/.cache/huggingface
COPY --from=ghcr.io/astral-sh/uv:0.9.9 /uv /uvx /bin/

View File

@@ -0,0 +1,499 @@
"""add proposal review tables
Revision ID: 61ea78857c97
Revises: d129f37b3d87
Create Date: 2026-04-09 10:00:00.000000
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import fastapi_users_db_sqlalchemy
# revision identifiers, used by Alembic.
revision = "61ea78857c97"
down_revision = "d129f37b3d87"
branch_labels: str | None = None
depends_on: str | None = None
def upgrade() -> None:
# -- proposal_review_ruleset --
op.create_table(
"proposal_review_ruleset",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tenant_id", sa.Text(), nullable=False),
sa.Column("name", sa.Text(), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column(
"is_default",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column(
"is_active",
sa.Boolean(),
server_default=sa.text("true"),
nullable=False,
),
sa.Column(
"created_by",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(["created_by"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_ruleset_tenant_id",
"proposal_review_ruleset",
["tenant_id"],
)
# -- proposal_review_rule --
op.create_table(
"proposal_review_rule",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"ruleset_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column("name", sa.Text(), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("category", sa.Text(), nullable=True),
sa.Column("rule_type", sa.Text(), nullable=False),
sa.Column(
"rule_intent",
sa.Text(),
server_default=sa.text("'CHECK'"),
nullable=False,
),
sa.Column("prompt_template", sa.Text(), nullable=False),
sa.Column(
"source",
sa.Text(),
server_default=sa.text("'MANUAL'"),
nullable=False,
),
sa.Column("authority", sa.Text(), nullable=True),
sa.Column(
"is_hard_stop",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column(
"priority",
sa.Integer(),
server_default=sa.text("0"),
nullable=False,
),
sa.Column(
"is_active",
sa.Boolean(),
server_default=sa.text("true"),
nullable=False,
),
sa.Column(
"refinement_needed",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column("refinement_question", sa.Text(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["ruleset_id"],
["proposal_review_ruleset.id"],
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_rule_ruleset_id",
"proposal_review_rule",
["ruleset_id"],
)
# -- proposal_review_proposal --
# Includes inline proposal-level decision fields (no separate decision table).
op.create_table(
"proposal_review_proposal",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("document_id", sa.Text(), nullable=False),
sa.Column("tenant_id", sa.Text(), nullable=False),
sa.Column(
"status",
sa.Text(),
server_default=sa.text("'PENDING'"),
nullable=False,
),
# Inline proposal-level decision fields
sa.Column("decision_notes", sa.Text(), nullable=True),
sa.Column(
"decision_officer_id",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.Column("decision_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"jira_synced",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column("jira_synced_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(["decision_officer_id"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("document_id", "tenant_id"),
)
op.create_index(
"ix_proposal_review_proposal_tenant_id",
"proposal_review_proposal",
["tenant_id"],
)
op.create_index(
"ix_proposal_review_proposal_document_id",
"proposal_review_proposal",
["document_id"],
)
op.create_index(
"ix_proposal_review_proposal_status",
"proposal_review_proposal",
["status"],
)
# -- proposal_review_run --
op.create_table(
"proposal_review_run",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"proposal_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"ruleset_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"triggered_by",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=False,
),
sa.Column(
"status",
sa.Text(),
server_default=sa.text("'PENDING'"),
nullable=False,
),
sa.Column("total_rules", sa.Integer(), nullable=False),
sa.Column(
"completed_rules",
sa.Integer(),
server_default=sa.text("0"),
nullable=False,
),
sa.Column("started_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["proposal_id"],
["proposal_review_proposal.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["ruleset_id"],
["proposal_review_ruleset.id"],
),
sa.ForeignKeyConstraint(["triggered_by"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_run_proposal_id",
"proposal_review_run",
["proposal_id"],
)
# -- proposal_review_finding --
# Includes inline per-finding decision fields (no separate decision table).
op.create_table(
"proposal_review_finding",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"proposal_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"rule_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"review_run_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column("verdict", sa.Text(), nullable=False),
sa.Column("confidence", sa.Text(), nullable=True),
sa.Column("evidence", sa.Text(), nullable=True),
sa.Column("explanation", sa.Text(), nullable=True),
sa.Column("suggested_action", sa.Text(), nullable=True),
sa.Column("llm_model", sa.Text(), nullable=True),
sa.Column("llm_tokens_used", sa.Integer(), nullable=True),
# Inline per-finding decision fields
sa.Column("decision_action", sa.Text(), nullable=True),
sa.Column("decision_notes", sa.Text(), nullable=True),
sa.Column(
"decision_officer_id",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.Column("decided_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["proposal_id"],
["proposal_review_proposal.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["rule_id"],
["proposal_review_rule.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["review_run_id"],
["proposal_review_run.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(["decision_officer_id"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_finding_proposal_id",
"proposal_review_finding",
["proposal_id"],
)
op.create_index(
"ix_proposal_review_finding_review_run_id",
"proposal_review_finding",
["review_run_id"],
)
op.create_index(
"ix_proposal_review_finding_rule_id",
"proposal_review_finding",
["rule_id"],
)
# -- proposal_review_document --
op.create_table(
"proposal_review_document",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"proposal_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column("file_name", sa.Text(), nullable=False),
sa.Column("file_type", sa.Text(), nullable=True),
sa.Column("file_store_id", sa.Text(), nullable=True),
sa.Column("extracted_text", sa.Text(), nullable=True),
sa.Column("document_role", sa.Text(), nullable=False),
sa.Column(
"uploaded_by",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["proposal_id"],
["proposal_review_proposal.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(["uploaded_by"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_document_proposal_id",
"proposal_review_document",
["proposal_id"],
)
# -- proposal_review_import_job --
op.create_table(
"proposal_review_import_job",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"ruleset_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column("tenant_id", sa.Text(), nullable=False),
sa.Column(
"status",
sa.Text(),
server_default=sa.text("'PENDING'"),
nullable=False,
),
sa.Column("source_filename", sa.Text(), nullable=False),
sa.Column("extracted_text", sa.Text(), nullable=False),
sa.Column(
"rules_created",
sa.Integer(),
server_default=sa.text("0"),
nullable=False,
),
sa.Column("error_message", sa.Text(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True),
sa.ForeignKeyConstraint(
["ruleset_id"],
["proposal_review_ruleset.id"],
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_import_job_ruleset_id",
"proposal_review_import_job",
["ruleset_id"],
)
# -- proposal_review_config --
op.create_table(
"proposal_review_config",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tenant_id", sa.Text(), nullable=False, unique=True),
sa.Column("jira_connector_id", sa.Integer(), nullable=True),
sa.Column("jira_project_key", sa.Text(), nullable=True),
sa.Column("field_mapping", postgresql.JSONB(), nullable=True),
sa.Column("jira_writeback", postgresql.JSONB(), nullable=True),
sa.Column("review_model", sa.Text(), nullable=True),
sa.Column("import_model", sa.Text(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
)
def downgrade() -> None:
op.drop_table("proposal_review_import_job")
op.drop_table("proposal_review_config")
op.drop_table("proposal_review_document")
op.drop_table("proposal_review_finding")
op.drop_table("proposal_review_run")
op.drop_table("proposal_review_proposal")
op.drop_table("proposal_review_rule")
op.drop_table("proposal_review_ruleset")

View File

@@ -0,0 +1,32 @@
"""add failed_rules to proposal_review_run
Revision ID: ce2aa573d445
Revises: 61ea78857c97
Create Date: 2026-04-14 16:34:57.276707
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "ce2aa573d445"
down_revision = "61ea78857c97"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column(
"proposal_review_run",
sa.Column(
"failed_rules",
sa.Integer(),
nullable=False,
server_default=sa.text("0"),
),
)
def downgrade() -> None:
op.drop_column("proposal_review_run", "failed_rules")

View File

@@ -322,6 +322,7 @@ celery_app.autodiscover_tasks(
"onyx.background.celery.tasks.vespa",
"onyx.background.celery.tasks.llm_model_update",
"onyx.background.celery.tasks.user_file_processing",
"onyx.server.features.proposal_review.engine",
]
)
)

View File

@@ -5,8 +5,8 @@ from logging.handlers import RotatingFileHandler
import psutil
from onyx.utils.logger import is_running_in_container
from onyx.utils.logger import setup_logger
from onyx.utils.platform import is_running_in_container
# Regular application logger
logger = setup_logger()

View File

@@ -79,6 +79,15 @@ beat_task_templates: list[dict] = [
"skip_gated": False,
},
},
{
"name": "check-for-dangling-import-jobs",
"task": OnyxCeleryTask.CHECK_FOR_DANGLING_IMPORT_JOBS,
"schedule": timedelta(minutes=10),
"options": {
"priority": OnyxCeleryPriority.LOW,
"expires": BEAT_EXPIRES_DEFAULT,
},
},
{
"name": "check-for-index-attempt-cleanup",
"task": OnyxCeleryTask.CHECK_FOR_INDEX_ATTEMPT_CLEANUP,

View File

@@ -42,7 +42,7 @@ from onyx.db.models import UserGroup
from onyx.db.search_settings import get_active_search_settings_list
from onyx.redis.redis_pool import get_redis_client
from onyx.redis.redis_pool import redis_lock_dump
from onyx.utils.platform import is_running_in_container
from onyx.utils.logger import is_running_in_container
from onyx.utils.telemetry import optional_telemetry
from onyx.utils.telemetry import RecordType
from shared_configs.configs import MULTI_TENANT

View File

@@ -172,6 +172,10 @@ def migrate_chunks_from_vespa_to_opensearch_task(
search_settings = get_current_search_settings(db_session)
indexing_setting = IndexingSetting.from_db_model(search_settings)
task_logger.debug(
"Verified tenant info, migration record, and search settings."
)
# 2.e. Build sanitized to original doc ID mapping to check for
# conflicts in the event we sanitize a doc ID to an
# already-existing doc ID.
@@ -325,6 +329,7 @@ def migrate_chunks_from_vespa_to_opensearch_task(
finally:
if lock.owned():
lock.release()
task_logger.debug("Released the OpenSearch migration lock.")
else:
task_logger.warning(
"The OpenSearch migration lock was not owned on completion of the migration task."

View File

@@ -4,8 +4,6 @@ from collections.abc import Callable
from typing import Any
from typing import Literal
from sqlalchemy.orm import Session
from onyx.chat.chat_state import ChatStateContainer
from onyx.chat.chat_utils import create_tool_call_failure_messages
from onyx.chat.citation_processor import CitationMapping
@@ -635,7 +633,6 @@ def run_llm_loop(
user_memory_context: UserMemoryContext | None,
llm: LLM,
token_counter: Callable[[str], int],
db_session: Session,
forced_tool_id: int | None = None,
user_identity: LLMUserIdentity | None = None,
chat_session_id: str | None = None,
@@ -1020,20 +1017,16 @@ def run_llm_loop(
persisted_memory_id: int | None = None
if user_memory_context and user_memory_context.user_id:
if tool_response.rich_response.index_to_replace is not None:
memory = update_memory_at_index(
persisted_memory_id = update_memory_at_index(
user_id=user_memory_context.user_id,
index=tool_response.rich_response.index_to_replace,
new_text=tool_response.rich_response.memory_text,
db_session=db_session,
)
persisted_memory_id = memory.id if memory else None
else:
memory = add_memory(
persisted_memory_id = add_memory(
user_id=user_memory_context.user_id,
memory_text=tool_response.rich_response.memory_text,
db_session=db_session,
)
persisted_memory_id = memory.id
operation: Literal["add", "update"] = (
"update"
if tool_response.rich_response.index_to_replace is not None

View File

@@ -67,7 +67,6 @@ from onyx.db.chat import get_chat_session_by_id
from onyx.db.chat import get_or_create_root_message
from onyx.db.chat import reserve_message_id
from onyx.db.chat import reserve_multi_model_message_ids
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.db.enums import HookPoint
from onyx.db.memory import get_memories
from onyx.db.models import ChatMessage
@@ -1006,93 +1005,86 @@ def _run_models(
model_llm = setup.llms[model_idx]
try:
# Each worker opens its own session — SQLAlchemy sessions are not thread-safe.
# Do NOT write to the outer db_session (or any shared DB state) from here;
# all DB writes in this thread must go through thread_db_session.
with get_session_with_current_tenant() as thread_db_session:
thread_tool_dict = construct_tools(
persona=setup.persona,
db_session=thread_db_session,
emitter=model_emitter,
user=user,
llm=model_llm,
search_tool_config=SearchToolConfig(
user_selected_filters=setup.new_msg_req.internal_search_filters,
project_id_filter=setup.search_params.project_id_filter,
persona_id_filter=setup.search_params.persona_id_filter,
bypass_acl=setup.bypass_acl,
slack_context=setup.slack_context,
enable_slack_search=_should_enable_slack_search(
setup.persona, setup.new_msg_req.internal_search_filters
),
# Each function opens short-lived DB sessions on demand.
# Do NOT pass a long-lived session here — it would hold a
# connection for the entire LLM loop (minutes), and cloud
# infrastructure may drop idle connections.
thread_tool_dict = construct_tools(
persona=setup.persona,
emitter=model_emitter,
user=user,
llm=model_llm,
search_tool_config=SearchToolConfig(
user_selected_filters=setup.new_msg_req.internal_search_filters,
project_id_filter=setup.search_params.project_id_filter,
persona_id_filter=setup.search_params.persona_id_filter,
bypass_acl=setup.bypass_acl,
slack_context=setup.slack_context,
enable_slack_search=_should_enable_slack_search(
setup.persona, setup.new_msg_req.internal_search_filters
),
custom_tool_config=CustomToolConfig(
chat_session_id=setup.chat_session.id,
message_id=setup.user_message.id,
additional_headers=setup.custom_tool_additional_headers,
mcp_headers=setup.mcp_headers,
),
file_reader_tool_config=FileReaderToolConfig(
user_file_ids=setup.available_files.user_file_ids,
chat_file_ids=setup.available_files.chat_file_ids,
),
allowed_tool_ids=setup.new_msg_req.allowed_tool_ids,
search_usage_forcing_setting=setup.search_params.search_usage,
),
custom_tool_config=CustomToolConfig(
chat_session_id=setup.chat_session.id,
message_id=setup.user_message.id,
additional_headers=setup.custom_tool_additional_headers,
mcp_headers=setup.mcp_headers,
),
file_reader_tool_config=FileReaderToolConfig(
user_file_ids=setup.available_files.user_file_ids,
chat_file_ids=setup.available_files.chat_file_ids,
),
allowed_tool_ids=setup.new_msg_req.allowed_tool_ids,
search_usage_forcing_setting=setup.search_params.search_usage,
)
model_tools = [
tool for tool_list in thread_tool_dict.values() for tool in tool_list
]
if setup.forced_tool_id and setup.forced_tool_id not in {
tool.id for tool in model_tools
}:
raise ValueError(
f"Forced tool {setup.forced_tool_id} not found in tools"
)
model_tools = [
tool
for tool_list in thread_tool_dict.values()
for tool in tool_list
]
if setup.forced_tool_id and setup.forced_tool_id not in {
tool.id for tool in model_tools
}:
raise ValueError(
f"Forced tool {setup.forced_tool_id} not found in tools"
)
# Per-thread copy: run_llm_loop mutates simple_chat_history in-place.
if n_models == 1 and setup.new_msg_req.deep_research:
if setup.chat_session.project_id:
raise RuntimeError(
"Deep research is not supported for projects"
)
run_deep_research_llm_loop(
emitter=model_emitter,
state_container=sc,
simple_chat_history=list(setup.simple_chat_history),
tools=model_tools,
custom_agent_prompt=setup.custom_agent_prompt,
llm=model_llm,
token_counter=get_llm_token_counter(model_llm),
db_session=thread_db_session,
skip_clarification=setup.skip_clarification,
user_identity=setup.user_identity,
chat_session_id=str(setup.chat_session.id),
all_injected_file_metadata=setup.all_injected_file_metadata,
)
else:
run_llm_loop(
emitter=model_emitter,
state_container=sc,
simple_chat_history=list(setup.simple_chat_history),
tools=model_tools,
custom_agent_prompt=setup.custom_agent_prompt,
context_files=setup.extracted_context_files,
persona=setup.persona,
user_memory_context=setup.user_memory_context,
llm=model_llm,
token_counter=get_llm_token_counter(model_llm),
db_session=thread_db_session,
forced_tool_id=setup.forced_tool_id,
user_identity=setup.user_identity,
chat_session_id=str(setup.chat_session.id),
chat_files=setup.chat_files_for_tools,
include_citations=setup.new_msg_req.include_citations,
all_injected_file_metadata=setup.all_injected_file_metadata,
inject_memories_in_prompt=user.use_memories,
)
# Per-thread copy: run_llm_loop mutates simple_chat_history in-place.
if n_models == 1 and setup.new_msg_req.deep_research:
if setup.chat_session.project_id:
raise RuntimeError("Deep research is not supported for projects")
run_deep_research_llm_loop(
emitter=model_emitter,
state_container=sc,
simple_chat_history=list(setup.simple_chat_history),
tools=model_tools,
custom_agent_prompt=setup.custom_agent_prompt,
llm=model_llm,
token_counter=get_llm_token_counter(model_llm),
skip_clarification=setup.skip_clarification,
user_identity=setup.user_identity,
chat_session_id=str(setup.chat_session.id),
all_injected_file_metadata=setup.all_injected_file_metadata,
)
else:
run_llm_loop(
emitter=model_emitter,
state_container=sc,
simple_chat_history=list(setup.simple_chat_history),
tools=model_tools,
custom_agent_prompt=setup.custom_agent_prompt,
context_files=setup.extracted_context_files,
persona=setup.persona,
user_memory_context=setup.user_memory_context,
llm=model_llm,
token_counter=get_llm_token_counter(model_llm),
forced_tool_id=setup.forced_tool_id,
user_identity=setup.user_identity,
chat_session_id=str(setup.chat_session.id),
chat_files=setup.chat_files_for_tools,
include_citations=setup.new_msg_req.include_citations,
all_injected_file_metadata=setup.all_injected_file_metadata,
inject_memories_in_prompt=user.use_memories,
)
model_succeeded[model_idx] = True

View File

@@ -449,6 +449,7 @@ class OnyxRedisLocks:
"da_lock:check_connector_external_group_sync_beat"
)
OPENSEARCH_MIGRATION_BEAT_LOCK = "da_lock:opensearch_migration_beat"
CHECK_DANGLING_IMPORT_JOBS_BEAT_LOCK = "da_lock:check_dangling_import_jobs_beat"
MONITOR_BACKGROUND_PROCESSES_LOCK = "da_lock:monitor_background_processes"
CHECK_AVAILABLE_TENANTS_LOCK = "da_lock:check_available_tenants"
@@ -612,6 +613,9 @@ class OnyxCeleryTask:
# Hook execution log retention
HOOK_EXECUTION_LOG_CLEANUP_TASK = "hook_execution_log_cleanup_task"
# Proposal review import cleanup
CHECK_FOR_DANGLING_IMPORT_JOBS = "check_for_dangling_import_jobs"
# Sandbox cleanup
CLEANUP_IDLE_SANDBOXES = "cleanup_idle_sandboxes"
CLEANUP_OLD_SNAPSHOTS = "cleanup_old_snapshots"

View File

@@ -8,6 +8,7 @@ from collections.abc import Iterator
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from io import BytesIO
from typing import Any
import requests
@@ -40,6 +41,7 @@ from onyx.connectors.jira.utils import best_effort_basic_expert_info
from onyx.connectors.jira.utils import best_effort_get_field_from_issue
from onyx.connectors.jira.utils import build_jira_client
from onyx.connectors.jira.utils import build_jira_url
from onyx.connectors.jira.utils import CustomFieldExtractor
from onyx.connectors.jira.utils import extract_text_from_adf
from onyx.connectors.jira.utils import get_comment_strs
from onyx.connectors.jira.utils import JIRA_CLOUD_API_VERSION
@@ -52,6 +54,7 @@ from onyx.connectors.models import HierarchyNode
from onyx.connectors.models import SlimDocument
from onyx.connectors.models import TextSection
from onyx.db.enums import HierarchyNodeType
from onyx.file_processing.extract_file_text import extract_file_text
from onyx.indexing.indexing_heartbeat import IndexingHeartbeatInterface
from onyx.utils.logger import setup_logger
@@ -64,6 +67,7 @@ _MAX_RESULTS_FETCH_IDS = 5000
_JIRA_FULL_PAGE_SIZE = 50
# https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issues/
_JIRA_BULK_FETCH_LIMIT = 100
_MAX_ATTACHMENT_SIZE_BYTES = 50 * 1024 * 1024 # 50 MB
# Constants for Jira field names
_FIELD_REPORTER = "reporter"
@@ -377,6 +381,7 @@ def process_jira_issue(
comment_email_blacklist: tuple[str, ...] = (),
labels_to_skip: set[str] | None = None,
parent_hierarchy_raw_node_id: str | None = None,
custom_fields_mapping: dict[str, str] | None = None,
) -> Document | None:
if labels_to_skip:
if any(label in issue.fields.labels for label in labels_to_skip):
@@ -462,6 +467,24 @@ def process_jira_issue(
else:
logger.error(f"Project should exist but does not for {issue.key}")
# Merge custom fields into metadata if a mapping was provided
if custom_fields_mapping:
try:
custom_fields = CustomFieldExtractor.get_issue_custom_fields(
issue, custom_fields_mapping
)
# Filter out custom fields that collide with existing metadata keys
for key in list(custom_fields.keys()):
if key in metadata_dict:
logger.warning(
f"Custom field '{key}' on {issue.key} collides with "
f"standard metadata key; skipping custom field value"
)
del custom_fields[key]
metadata_dict.update(custom_fields)
except Exception as e:
logger.warning(f"Failed to extract custom fields for {issue.key}: {e}")
return Document(
id=page_url,
sections=[TextSection(link=page_url, text=ticket_content)],
@@ -504,6 +527,12 @@ class JiraConnector(
# Custom JQL query to filter Jira issues
jql_query: str | None = None,
scoped_token: bool = False,
# When True, extract custom fields from Jira issues and include them
# in document metadata with human-readable field names.
extract_custom_fields: bool = False,
# When True, download attachments from Jira issues and yield them
# as separate Documents linked to the parent ticket.
fetch_attachments: bool = False,
) -> None:
self.batch_size = batch_size
@@ -517,7 +546,11 @@ class JiraConnector(
self.labels_to_skip = set(labels_to_skip)
self.jql_query = jql_query
self.scoped_token = scoped_token
self.extract_custom_fields = extract_custom_fields
self.fetch_attachments = fetch_attachments
self._jira_client: JIRA | None = None
# Mapping of custom field IDs to human-readable names (populated on load_credentials)
self._custom_fields_mapping: dict[str, str] = {}
# Cache project permissions to avoid fetching them repeatedly across runs
self._project_permissions_cache: dict[str, Any] = {}
@@ -678,12 +711,134 @@ class JiraConnector(
# the document belongs directly under the project in the hierarchy
return project_key
def _process_attachments(
self,
issue: Issue,
parent_hierarchy_raw_node_id: str | None,
include_permissions: bool = False,
project_key: str | None = None,
) -> Generator[Document | ConnectorFailure, None, None]:
"""Download and yield Documents for each attachment on a Jira issue.
Each attachment becomes a separate Document whose text is extracted
from the downloaded file content. Failures on individual attachments
are logged and yielded as ConnectorFailure so they never break the
overall indexing run.
"""
attachments = best_effort_get_field_from_issue(issue, "attachment")
if not attachments:
return
issue_url = build_jira_url(self.jira_base, issue.key)
for attachment in attachments:
try:
filename = getattr(attachment, "filename", "unknown")
try:
size = int(getattr(attachment, "size", 0) or 0)
except (ValueError, TypeError):
size = 0
content_url = getattr(attachment, "content", None)
attachment_id = getattr(attachment, "id", filename)
mime_type = getattr(attachment, "mimeType", "application/octet-stream")
created = getattr(attachment, "created", None)
if size > _MAX_ATTACHMENT_SIZE_BYTES:
logger.warning(
f"Skipping attachment '{filename}' on {issue.key}: "
f"size {size} bytes exceeds {_MAX_ATTACHMENT_SIZE_BYTES} byte limit"
)
continue
if not content_url:
logger.warning(
f"Skipping attachment '{filename}' on {issue.key}: "
f"no content URL available"
)
continue
# Download the attachment using the public API on the
# python-jira Attachment resource (avoids private _session access
# and the double-copy from response.content + BytesIO wrapping).
file_content = attachment.get()
# Extract text from the downloaded file
try:
text = extract_file_text(
file=BytesIO(file_content),
file_name=filename,
)
except Exception as e:
logger.warning(
f"Could not extract text from attachment '{filename}' "
f"on {issue.key}: {e}"
)
continue
if not text or not text.strip():
logger.info(
f"Skipping attachment '{filename}' on {issue.key}: "
f"no text content could be extracted"
)
continue
doc_id = f"{issue_url}/attachments/{attachment_id}"
attachment_doc = Document(
id=doc_id,
sections=[TextSection(link=issue_url, text=text)],
source=DocumentSource.JIRA,
semantic_identifier=f"{issue.key}: {filename}",
title=filename,
doc_updated_at=(time_str_to_utc(created) if created else None),
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
metadata={
"parent_ticket": issue.key,
"attachment_filename": filename,
"attachment_mime_type": mime_type,
"attachment_size": str(size),
},
)
if include_permissions and project_key:
attachment_doc.external_access = self._get_project_permissions(
project_key,
add_prefix=True,
)
yield attachment_doc
except Exception as e:
logger.error(f"Failed to process attachment on {issue.key}: {e}")
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=f"{issue_url}/attachments/{getattr(attachment, 'id', 'unknown')}",
document_link=issue_url,
),
failure_message=f"Failed to process attachment '{getattr(attachment, 'filename', 'unknown')}': {str(e)}",
exception=e,
)
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self._jira_client = build_jira_client(
credentials=credentials,
jira_base=self.jira_base,
scoped_token=self.scoped_token,
)
# Fetch the custom field ID-to-name mapping once at credential load time.
# This avoids repeated API calls during issue processing.
if self.extract_custom_fields:
try:
self._custom_fields_mapping = (
CustomFieldExtractor.get_all_custom_fields(self._jira_client)
)
logger.info(
f"Loaded {len(self._custom_fields_mapping)} custom field definitions"
)
except Exception as e:
logger.warning(
f"Failed to fetch custom field definitions; "
f"custom field extraction will be skipped: {e}"
)
self._custom_fields_mapping = {}
return None
def _get_jql_query(
@@ -814,6 +969,11 @@ class JiraConnector(
comment_email_blacklist=self.comment_email_blacklist,
labels_to_skip=self.labels_to_skip,
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
custom_fields_mapping=(
self._custom_fields_mapping
if self._custom_fields_mapping
else None
),
):
# Add permission information to the document if requested
if include_permissions:
@@ -823,6 +983,15 @@ class JiraConnector(
)
yield document
# Yield attachment documents if enabled
if self.fetch_attachments:
yield from self._process_attachments(
issue=issue,
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
include_permissions=include_permissions,
project_key=project_key,
)
except Exception as e:
yield ConnectorFailure(
failed_document=DocumentFailure(
@@ -930,20 +1099,41 @@ class JiraConnector(
issue_key = best_effort_get_field_from_issue(issue, _FIELD_KEY)
doc_id = build_jira_url(self.jira_base, issue_key)
parent_hierarchy_raw_node_id = (
self._get_parent_hierarchy_raw_node_id(issue, project_key)
if project_key
else None
)
project_perms = self._get_project_permissions(
project_key, add_prefix=False
)
slim_doc_batch.append(
SlimDocument(
id=doc_id,
# Permission sync path - don't prefix, upsert_document_external_perms handles it
external_access=self._get_project_permissions(
project_key, add_prefix=False
),
parent_hierarchy_raw_node_id=(
self._get_parent_hierarchy_raw_node_id(issue, project_key)
if project_key
else None
),
external_access=project_perms,
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
)
)
# Also emit SlimDocument entries for each attachment
if self.fetch_attachments:
attachments = best_effort_get_field_from_issue(issue, "attachment")
if attachments:
for attachment in attachments:
attachment_id = getattr(
attachment,
"id",
getattr(attachment, "filename", "unknown"),
)
slim_doc_batch.append(
SlimDocument(
id=f"{doc_id}/attachments/{attachment_id}",
external_access=project_perms,
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
)
)
current_offset += 1
if len(slim_doc_batch) >= JIRA_SLIM_PAGE_SIZE:
yield slim_doc_batch

View File

@@ -11,6 +11,7 @@ from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy.engine import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.exc import DBAPIError
from sqlalchemy.orm import Session
from onyx.configs.app_configs import DB_READONLY_PASSWORD
@@ -346,6 +347,25 @@ def get_session_with_shared_schema() -> Generator[Session, None, None]:
CURRENT_TENANT_ID_CONTEXTVAR.reset(token)
def _safe_close_session(session: Session) -> None:
"""Close a session, catching connection-closed errors during cleanup.
Long-running operations (e.g. multi-model LLM loops) can hold a session
open for minutes. If the underlying connection is dropped by cloud
infrastructure (load-balancer timeouts, PgBouncer, idle-in-transaction
timeouts, etc.), the implicit rollback in Session.close() raises
OperationalError or InterfaceError. Since the work is already complete,
we log and move on — SQLAlchemy internally invalidates the connection
for pool recycling.
"""
try:
session.close()
except DBAPIError:
logger.warning(
"DB connection lost during session cleanup — the connection will be invalidated and recycled by the pool."
)
@contextmanager
def get_session_with_tenant(*, tenant_id: str) -> Generator[Session, None, None]:
"""
@@ -358,8 +378,11 @@ def get_session_with_tenant(*, tenant_id: str) -> Generator[Session, None, None]
# no need to use the schema translation map for self-hosted + default schema
if not MULTI_TENANT and tenant_id == POSTGRES_DEFAULT_SCHEMA_STANDARD_VALUE:
with Session(bind=engine, expire_on_commit=False) as session:
session = Session(bind=engine, expire_on_commit=False)
try:
yield session
finally:
_safe_close_session(session)
return
# Create connection with schema translation to handle querying the right schema
@@ -367,8 +390,11 @@ def get_session_with_tenant(*, tenant_id: str) -> Generator[Session, None, None]
with engine.connect().execution_options(
schema_translate_map=schema_translate_map
) as connection:
with Session(bind=connection, expire_on_commit=False) as session:
session = Session(bind=connection, expire_on_commit=False)
try:
yield session
finally:
_safe_close_session(session)
def get_session() -> Generator[Session, None, None]:

View File

@@ -5,6 +5,7 @@ from pydantic import ConfigDict
from sqlalchemy import select
from sqlalchemy.orm import Session
from onyx.db.engine.sql_engine import get_session_with_current_tenant_if_none
from onyx.db.models import Memory
from onyx.db.models import User
@@ -83,47 +84,51 @@ def get_memories(user: User, db_session: Session) -> UserMemoryContext:
def add_memory(
user_id: UUID,
memory_text: str,
db_session: Session,
) -> Memory:
db_session: Session | None = None,
) -> int:
"""Insert a new Memory row for the given user.
If the user already has MAX_MEMORIES_PER_USER memories, the oldest
one (lowest id) is deleted before inserting the new one.
Returns the id of the newly created Memory row.
"""
existing = db_session.scalars(
select(Memory).where(Memory.user_id == user_id).order_by(Memory.id.asc())
).all()
with get_session_with_current_tenant_if_none(db_session) as db_session:
existing = db_session.scalars(
select(Memory).where(Memory.user_id == user_id).order_by(Memory.id.asc())
).all()
if len(existing) >= MAX_MEMORIES_PER_USER:
db_session.delete(existing[0])
if len(existing) >= MAX_MEMORIES_PER_USER:
db_session.delete(existing[0])
memory = Memory(
user_id=user_id,
memory_text=memory_text,
)
db_session.add(memory)
db_session.commit()
return memory
memory = Memory(
user_id=user_id,
memory_text=memory_text,
)
db_session.add(memory)
db_session.commit()
return memory.id
def update_memory_at_index(
user_id: UUID,
index: int,
new_text: str,
db_session: Session,
) -> Memory | None:
db_session: Session | None = None,
) -> int | None:
"""Update the memory at the given 0-based index (ordered by id ASC, matching get_memories()).
Returns the updated Memory row, or None if the index is out of range.
Returns the id of the updated Memory row, or None if the index is out of range.
"""
memory_rows = db_session.scalars(
select(Memory).where(Memory.user_id == user_id).order_by(Memory.id.asc())
).all()
with get_session_with_current_tenant_if_none(db_session) as db_session:
memory_rows = db_session.scalars(
select(Memory).where(Memory.user_id == user_id).order_by(Memory.id.asc())
).all()
if index < 0 or index >= len(memory_rows):
return None
if index < 0 or index >= len(memory_rows):
return None
memory = memory_rows[index]
memory.memory_text = new_text
db_session.commit()
return memory
memory = memory_rows[index]
memory.memory_text = new_text
db_session.commit()
return memory.id

View File

@@ -7,8 +7,6 @@ import time
from collections.abc import Callable
from typing import cast
from sqlalchemy.orm import Session
from onyx.chat.chat_state import ChatStateContainer
from onyx.chat.citation_processor import CitationMapping
from onyx.chat.citation_processor import DynamicCitationProcessor
@@ -22,6 +20,7 @@ from onyx.chat.models import LlmStepResult
from onyx.chat.models import ToolCallSimple
from onyx.configs.chat_configs import SKIP_DEEP_RESEARCH_CLARIFICATION
from onyx.configs.constants import MessageType
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.db.tools import get_tool_by_name
from onyx.deep_research.dr_mock_tools import get_clarification_tool_definitions
from onyx.deep_research.dr_mock_tools import get_orchestrator_tools
@@ -184,6 +183,14 @@ def generate_final_report(
return has_reasoned
def _get_research_agent_tool_id() -> int:
with get_session_with_current_tenant() as db_session:
return get_tool_by_name(
tool_name=RESEARCH_AGENT_TOOL_NAME,
db_session=db_session,
).id
@log_function_time(print_only=True)
def run_deep_research_llm_loop(
emitter: Emitter,
@@ -193,7 +200,6 @@ def run_deep_research_llm_loop(
custom_agent_prompt: str | None, # noqa: ARG001
llm: LLM,
token_counter: Callable[[str], int],
db_session: Session,
skip_clarification: bool = False,
user_identity: LLMUserIdentity | None = None,
chat_session_id: str | None = None,
@@ -717,6 +723,7 @@ def run_deep_research_llm_loop(
simple_chat_history.append(assistant_with_tools)
# Now add TOOL_CALL_RESPONSE messages and tool call info for each result
research_agent_tool_id = _get_research_agent_tool_id()
for tab_index, report in enumerate(
research_results.intermediate_reports
):
@@ -737,10 +744,7 @@ def run_deep_research_llm_loop(
tab_index=tab_index,
tool_name=current_tool_call.tool_name,
tool_call_id=current_tool_call.tool_call_id,
tool_id=get_tool_by_name(
tool_name=RESEARCH_AGENT_TOOL_NAME,
db_session=db_session,
).id,
tool_id=research_agent_tool_id,
reasoning_tokens=llm_step_result.reasoning
or most_recent_reasoning,
tool_call_arguments=current_tool_call.tool_args,

View File

@@ -96,6 +96,9 @@ from onyx.server.features.persona.api import admin_router as admin_persona_route
from onyx.server.features.persona.api import agents_router
from onyx.server.features.persona.api import basic_router as persona_router
from onyx.server.features.projects.api import router as projects_router
from onyx.server.features.proposal_review.api.api import (
router as proposal_review_router,
)
from onyx.server.features.tool.api import admin_router as admin_tool_router
from onyx.server.features.tool.api import router as tool_router
from onyx.server.features.user_oauth_token.api import router as user_oauth_token_router
@@ -469,6 +472,7 @@ def get_application(lifespan_override: Lifespan | None = None) -> FastAPI:
include_router_with_global_prefix_prepended(application, projects_router)
include_router_with_global_prefix_prepended(application, public_build_router)
include_router_with_global_prefix_prepended(application, build_router)
include_router_with_global_prefix_prepended(application, proposal_review_router)
include_router_with_global_prefix_prepended(application, document_set_router)
include_router_with_global_prefix_prepended(application, hierarchy_router)
include_router_with_global_prefix_prepended(application, search_settings_router)

View File

@@ -0,0 +1,39 @@
"""Main router for Proposal Review.
Mounts all sub-routers under /proposal-review prefix.
"""
from fastapi import APIRouter
from fastapi import Depends
from onyx.auth.permissions import require_permission
from onyx.db.enums import Permission
from onyx.server.features.proposal_review.configs import ENABLE_PROPOSAL_REVIEW
router = APIRouter(
prefix="/proposal-review",
dependencies=[Depends(require_permission(Permission.BASIC_ACCESS))],
)
if ENABLE_PROPOSAL_REVIEW:
from onyx.server.features.proposal_review.api.config_api import (
router as config_router,
)
from onyx.server.features.proposal_review.api.decisions_api import (
router as decisions_router,
)
from onyx.server.features.proposal_review.api.proposals_api import (
router as proposals_router,
)
from onyx.server.features.proposal_review.api.review_api import (
router as review_router,
)
from onyx.server.features.proposal_review.api.rulesets_api import (
router as rulesets_router,
)
router.include_router(rulesets_router, tags=["proposal-review"])
router.include_router(proposals_router, tags=["proposal-review"])
router.include_router(review_router, tags=["proposal-review"])
router.include_router(decisions_router, tags=["proposal-review"])
router.include_router(config_router, tags=["proposal-review"])

View File

@@ -0,0 +1,89 @@
"""API endpoints for tenant configuration."""
from fastapi import APIRouter
from fastapi import Depends
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.configs.constants import DocumentSource
from onyx.db.connector import fetch_connectors
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import User
from onyx.server.features.proposal_review.api.models import ConfigResponse
from onyx.server.features.proposal_review.api.models import ConfigUpdate
from onyx.server.features.proposal_review.api.models import JiraConnectorInfo
from onyx.server.features.proposal_review.db import config as config_db
from shared_configs.contextvars import get_current_tenant_id
router = APIRouter()
@router.get("/config")
def get_config(
_user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> ConfigResponse:
"""Get the tenant's proposal review configuration."""
tenant_id = get_current_tenant_id()
config = config_db.get_config(tenant_id, db_session)
if not config:
# Return a default empty config rather than 404
config = config_db.upsert_config(tenant_id, db_session)
db_session.commit()
return ConfigResponse.from_model(config)
@router.put("/config")
def update_config(
request: ConfigUpdate,
_user: User = Depends(require_permission(Permission.MANAGE_CONNECTORS)),
db_session: Session = Depends(get_session),
) -> ConfigResponse:
"""Update the tenant's proposal review configuration."""
tenant_id = get_current_tenant_id()
config = config_db.upsert_config(
tenant_id=tenant_id,
jira_connector_id=request.jira_connector_id,
jira_project_key=request.jira_project_key,
field_mapping=request.field_mapping,
jira_writeback=request.jira_writeback,
review_model=request.review_model,
import_model=request.import_model,
db_session=db_session,
)
db_session.commit()
return ConfigResponse.from_model(config)
@router.get("/jira-connectors")
def list_jira_connectors(
_user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> list[JiraConnectorInfo]:
"""List all Jira connectors available to this tenant."""
connectors = fetch_connectors(db_session, sources=[DocumentSource.JIRA])
results: list[JiraConnectorInfo] = []
for c in connectors:
cfg = c.connector_specific_config or {}
project_key = cfg.get("project_key", "")
base_url = cfg.get("jira_base_url", "")
results.append(
JiraConnectorInfo(
id=c.id,
name=c.name,
project_key=project_key,
project_url=base_url,
)
)
return results
@router.get("/jira-connectors/{connector_id}/metadata-keys")
def get_connector_metadata_keys(
connector_id: int,
_user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[str]:
"""Return the distinct doc_metadata keys across all documents for a connector."""
return config_db.get_connector_metadata_keys(connector_id, db_session)

View File

@@ -0,0 +1,147 @@
"""API endpoints for per-finding decisions, proposal decisions, and Jira sync."""
from uuid import UUID
from fastapi import APIRouter
from fastapi import Depends
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import User
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.server.features.proposal_review.api.models import FindingDecisionCreate
from onyx.server.features.proposal_review.api.models import FindingResponse
from onyx.server.features.proposal_review.api.models import JiraSyncResponse
from onyx.server.features.proposal_review.api.models import ProposalDecisionCreate
from onyx.server.features.proposal_review.api.models import ProposalDecisionResponse
from onyx.server.features.proposal_review.db import decisions as decisions_db
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import proposals as proposals_db
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
router = APIRouter()
@router.post(
"/findings/{finding_id}/decision",
)
def record_finding_decision(
finding_id: UUID,
request: FindingDecisionCreate,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> FindingResponse:
"""Record or update a decision on a finding (upsert)."""
tenant_id = get_current_tenant_id()
# Verify finding exists
finding = findings_db.get_finding(finding_id, db_session)
if not finding:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Finding not found")
# Verify the finding's proposal belongs to the current tenant
proposal = proposals_db.get_proposal(finding.proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Finding not found")
finding = decisions_db.upsert_finding_decision(
finding_id=finding_id,
officer_id=user.id,
action=request.action,
notes=request.notes,
db_session=db_session,
)
db_session.commit()
return FindingResponse.from_model(finding)
@router.post(
"/proposals/{proposal_id}/decision",
status_code=201,
)
def record_proposal_decision(
proposal_id: UUID,
request: ProposalDecisionCreate,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> ProposalDecisionResponse:
"""Record a final decision on a proposal."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# Validate decision value
valid_decisions = {"APPROVED", "CHANGES_REQUESTED", "REJECTED"}
if request.decision not in valid_decisions:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"decision must be APPROVED, CHANGES_REQUESTED, or REJECTED",
)
proposal = decisions_db.update_proposal_decision(
proposal_id=proposal_id,
tenant_id=tenant_id,
officer_id=user.id,
decision=request.decision,
notes=request.notes,
db_session=db_session,
)
db_session.commit()
return ProposalDecisionResponse.from_proposal(proposal)
@router.post(
"/proposals/{proposal_id}/sync-jira",
)
def sync_jira(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> JiraSyncResponse:
"""Sync the latest proposal decision to Jira.
Dispatches a Celery task that performs 3 Jira API operations:
1. Update custom fields (decision, completion %)
2. Transition the issue to the appropriate column
3. Post a structured review summary comment
"""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
if not proposal.decision_at:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"No decision to sync -- record a proposal decision first",
)
if proposal.jira_synced:
return JiraSyncResponse(
success=True,
message="Decision already synced to Jira",
)
# Dispatch Celery task via the client app (has Redis broker configured)
from onyx.background.celery.versioned_apps.client import app as celery_app
celery_app.send_task(
"sync_decision_to_jira",
args=[str(proposal_id), tenant_id],
expires=300,
)
db_session.commit()
return JiraSyncResponse(
success=True,
message="Jira sync task dispatched",
)

View File

@@ -0,0 +1,483 @@
"""Pydantic request/response models for Proposal Review."""
from datetime import datetime
from typing import Any
from typing import Literal
from uuid import UUID
from pydantic import BaseModel
from onyx.server.features.proposal_review.db.models import ProposalReviewConfig
from onyx.server.features.proposal_review.db.models import ProposalReviewDocument
from onyx.server.features.proposal_review.db.models import ProposalReviewFinding
from onyx.server.features.proposal_review.db.models import ProposalReviewImportJob
from onyx.server.features.proposal_review.db.models import ProposalReviewProposal
from onyx.server.features.proposal_review.db.models import ProposalReviewRule
from onyx.server.features.proposal_review.db.models import ProposalReviewRuleset
from onyx.server.features.proposal_review.db.models import ProposalReviewRun
# =============================================================================
# Ruleset Schemas
# =============================================================================
class RulesetCreate(BaseModel):
name: str
description: str | None = None
is_default: bool = False
class RulesetUpdate(BaseModel):
name: str | None = None
description: str | None = None
is_default: bool | None = None
is_active: bool | None = None
class RulesetResponse(BaseModel):
id: UUID
tenant_id: str
name: str
description: str | None
is_default: bool
is_active: bool
created_by: UUID | None
created_at: datetime
updated_at: datetime
rules: list["RuleResponse"] = []
@classmethod
def from_model(
cls,
ruleset: ProposalReviewRuleset,
include_rules: bool = True,
) -> "RulesetResponse":
return cls(
id=ruleset.id,
tenant_id=ruleset.tenant_id,
name=ruleset.name,
description=ruleset.description,
is_default=ruleset.is_default,
is_active=ruleset.is_active,
created_by=ruleset.created_by,
created_at=ruleset.created_at,
updated_at=ruleset.updated_at,
rules=(
[RuleResponse.from_model(r) for r in ruleset.rules]
if include_rules
else []
),
)
# =============================================================================
# Rule Schemas
# =============================================================================
class RuleCreate(BaseModel):
name: str
description: str | None = None
category: str | None = None
rule_type: Literal[
"DOCUMENT_CHECK", "METADATA_CHECK", "CROSS_REFERENCE", "CUSTOM_NL"
]
rule_intent: Literal["CHECK", "HIGHLIGHT"] = "CHECK"
prompt_template: str
source: Literal["IMPORTED", "MANUAL"] = "MANUAL"
authority: Literal["OVERRIDE", "RETURN"] | None = None
is_hard_stop: bool = False
priority: int = 0
class RuleUpdate(BaseModel):
name: str | None = None
description: str | None = None
category: str | None = None
rule_type: str | None = None
rule_intent: str | None = None
prompt_template: str | None = None
authority: str | None = None
is_hard_stop: bool | None = None
priority: int | None = None
is_active: bool | None = None
refinement_needed: bool | None = None
refinement_question: str | None = None
class RuleRefinementRequest(BaseModel):
answer: str
class RuleResponse(BaseModel):
id: UUID
ruleset_id: UUID
name: str
description: str | None
category: str | None
rule_type: str
rule_intent: str
prompt_template: str
source: str
authority: str | None
is_hard_stop: bool
priority: int
is_active: bool
refinement_needed: bool
refinement_question: str | None
created_at: datetime
updated_at: datetime
@classmethod
def from_model(cls, rule: ProposalReviewRule) -> "RuleResponse":
return cls(
id=rule.id,
ruleset_id=rule.ruleset_id,
name=rule.name,
description=rule.description,
category=rule.category,
rule_type=rule.rule_type,
rule_intent=rule.rule_intent,
prompt_template=rule.prompt_template,
source=rule.source,
authority=rule.authority,
is_hard_stop=rule.is_hard_stop,
priority=rule.priority,
is_active=rule.is_active,
refinement_needed=rule.refinement_needed,
refinement_question=rule.refinement_question,
created_at=rule.created_at,
updated_at=rule.updated_at,
)
class BulkRuleUpdateRequest(BaseModel):
"""Batch activate/deactivate/delete rules."""
action: Literal["activate", "deactivate", "delete"]
rule_ids: list[UUID]
class BulkRuleUpdateResponse(BaseModel):
updated_count: int
class RuleTestResponse(BaseModel):
rule_id: str
success: bool
error: str | None = None
result: dict[str, Any] | None = None
# =============================================================================
# Proposal Schemas
# =============================================================================
class ProposalResponse(BaseModel):
"""Proposal response including inline decision fields."""
id: UUID
document_id: str
tenant_id: str
status: str
# Inline decision fields
decision_notes: str | None = None
decision_officer_id: UUID | None = None
decision_at: datetime | None = None
jira_synced: bool = False
jira_synced_at: datetime | None = None
created_at: datetime
updated_at: datetime
# Resolved metadata from Document table via field_mapping
metadata: dict[str, Any] = {}
@classmethod
def from_model(
cls,
proposal: ProposalReviewProposal,
metadata: dict[str, Any] | None = None,
) -> "ProposalResponse":
return cls(
id=proposal.id,
document_id=proposal.document_id,
tenant_id=proposal.tenant_id,
status=proposal.status,
decision_notes=proposal.decision_notes,
decision_officer_id=proposal.decision_officer_id,
decision_at=proposal.decision_at,
jira_synced=proposal.jira_synced,
jira_synced_at=proposal.jira_synced_at,
created_at=proposal.created_at,
updated_at=proposal.updated_at,
metadata=metadata or {},
)
class ProposalListResponse(BaseModel):
proposals: list[ProposalResponse]
total_count: int
config_missing: bool = False # True when no config exists
# =============================================================================
# Review Run Schemas
# =============================================================================
class ReviewRunTriggerRequest(BaseModel):
ruleset_id: UUID
class ReviewRunResponse(BaseModel):
id: UUID
proposal_id: UUID
ruleset_id: UUID
triggered_by: UUID
status: str
total_rules: int
completed_rules: int
failed_rules: int
started_at: datetime | None
completed_at: datetime | None
created_at: datetime
@classmethod
def from_model(cls, run: ProposalReviewRun) -> "ReviewRunResponse":
return cls(
id=run.id,
proposal_id=run.proposal_id,
ruleset_id=run.ruleset_id,
triggered_by=run.triggered_by,
status=run.status,
total_rules=run.total_rules,
completed_rules=run.completed_rules,
failed_rules=run.failed_rules,
started_at=run.started_at,
completed_at=run.completed_at,
created_at=run.created_at,
)
# =============================================================================
# Finding Schemas
# =============================================================================
class FindingResponse(BaseModel):
id: UUID
proposal_id: UUID
rule_id: UUID
review_run_id: UUID
verdict: str
confidence: str | None
evidence: str | None
explanation: str | None
suggested_action: str | None
llm_model: str | None
llm_tokens_used: int | None
created_at: datetime
# Nested rule info for display
rule_name: str | None = None
rule_category: str | None = None
rule_is_hard_stop: bool | None = None
# Inline decision fields
decision_action: str | None = None
decision_notes: str | None = None
decided_at: datetime | None = None
@classmethod
def from_model(cls, finding: ProposalReviewFinding) -> "FindingResponse":
rule_name = None
rule_category = None
rule_is_hard_stop = None
if finding.rule is not None:
rule_name = finding.rule.name
rule_category = finding.rule.category
rule_is_hard_stop = finding.rule.is_hard_stop
return cls(
id=finding.id,
proposal_id=finding.proposal_id,
rule_id=finding.rule_id,
review_run_id=finding.review_run_id,
verdict=finding.verdict,
confidence=finding.confidence,
evidence=finding.evidence,
explanation=finding.explanation,
suggested_action=finding.suggested_action,
llm_model=finding.llm_model,
llm_tokens_used=finding.llm_tokens_used,
created_at=finding.created_at,
rule_name=rule_name,
rule_category=rule_category,
rule_is_hard_stop=rule_is_hard_stop,
decision_action=finding.decision_action,
decision_notes=finding.decision_notes,
decided_at=finding.decided_at,
)
# =============================================================================
# Decision Schemas
# =============================================================================
class FindingDecisionCreate(BaseModel):
action: Literal["VERIFIED", "ISSUE", "NOT_APPLICABLE", "OVERRIDDEN"]
notes: str | None = None
class ProposalDecisionCreate(BaseModel):
decision: Literal["APPROVED", "CHANGES_REQUESTED", "REJECTED"]
notes: str | None = None
class ProposalDecisionResponse(BaseModel):
"""Response after recording a proposal-level decision."""
proposal_id: UUID
status: str
decision_notes: str | None
jira_synced: bool
decision_at: datetime | None
@classmethod
def from_proposal(
cls, proposal: ProposalReviewProposal
) -> "ProposalDecisionResponse":
return cls(
proposal_id=proposal.id,
status=proposal.status,
decision_notes=proposal.decision_notes,
jira_synced=proposal.jira_synced,
decision_at=proposal.decision_at,
)
# =============================================================================
# Config Schemas
# =============================================================================
class ConfigUpdate(BaseModel):
jira_connector_id: int | None = None
jira_project_key: str | None = None
field_mapping: list[str] | None = None # List of visible metadata keys
jira_writeback: dict[str, Any] | None = None
# LLM configuration
review_model: str | None = None # model name for rule evaluation
import_model: str | None = None # model name for checklist import
class ConfigResponse(BaseModel):
id: UUID
tenant_id: str
jira_connector_id: int | None
jira_project_key: str | None
field_mapping: list[str] | None
jira_writeback: dict[str, Any] | None
review_model: str | None
import_model: str | None
created_at: datetime
updated_at: datetime
@classmethod
def from_model(cls, config: ProposalReviewConfig) -> "ConfigResponse":
return cls(
id=config.id,
tenant_id=config.tenant_id,
jira_connector_id=config.jira_connector_id,
jira_project_key=config.jira_project_key,
field_mapping=config.field_mapping,
jira_writeback=config.jira_writeback,
review_model=config.review_model,
import_model=config.import_model,
created_at=config.created_at,
updated_at=config.updated_at,
)
# =============================================================================
# Import Schemas
# =============================================================================
class ImportResponse(BaseModel):
rules_created: int
rules: list[RuleResponse]
class ImportJobResponse(BaseModel):
id: UUID
status: str
source_filename: str
rules_created: int
error_message: str | None
created_at: datetime
completed_at: datetime | None
@classmethod
def from_model(cls, job: ProposalReviewImportJob) -> "ImportJobResponse":
return cls(
id=job.id,
status=job.status,
source_filename=job.source_filename,
rules_created=job.rules_created,
error_message=job.error_message,
created_at=job.created_at,
completed_at=job.completed_at,
)
# =============================================================================
# Document Schemas
# =============================================================================
class ProposalDocumentResponse(BaseModel):
id: UUID
proposal_id: UUID
file_name: str
file_type: str | None
document_role: str
uploaded_by: UUID | None
extracted_text: str | None = None
created_at: datetime
@classmethod
def from_model(cls, doc: ProposalReviewDocument) -> "ProposalDocumentResponse":
return cls(
id=doc.id,
proposal_id=doc.proposal_id,
file_name=doc.file_name,
file_type=doc.file_type,
document_role=doc.document_role,
uploaded_by=doc.uploaded_by,
extracted_text=getattr(doc, "extracted_text", None),
created_at=doc.created_at,
)
# =============================================================================
# Jira Sync Schemas
# =============================================================================
class JiraSyncResponse(BaseModel):
success: bool
message: str
# =============================================================================
# Jira Connector Discovery Schemas
# =============================================================================
class JiraConnectorInfo(BaseModel):
id: int
name: str
project_key: str
project_url: str

View File

@@ -0,0 +1,367 @@
"""API endpoints for proposals and proposal documents."""
import io
from typing import Any
from uuid import UUID
from fastapi import APIRouter
from fastapi import Depends
from fastapi import Form
from fastapi import UploadFile
from sqlalchemy import func
from sqlalchemy import or_
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.configs.constants import DocumentSource
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import Connector
from onyx.db.models import Document
from onyx.db.models import DocumentByConnectorCredentialPair
from onyx.db.models import User
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.file_processing.extract_file_text import extract_file_text
from onyx.server.features.proposal_review.api.models import ProposalDocumentResponse
from onyx.server.features.proposal_review.api.models import ProposalListResponse
from onyx.server.features.proposal_review.api.models import ProposalResponse
from onyx.server.features.proposal_review.configs import (
DOCUMENT_UPLOAD_MAX_FILE_SIZE_BYTES,
)
from onyx.server.features.proposal_review.db import config as config_db
from onyx.server.features.proposal_review.db import proposals as proposals_db
from onyx.server.features.proposal_review.db.models import ProposalReviewDocument
from onyx.server.features.proposal_review.db.models import ProposalReviewProposal
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
router = APIRouter()
def _resolve_document_metadata(
document: Document,
visible_fields: list[str] | None,
) -> dict[str, Any]:
"""Resolve metadata from a Document's tags, filtered to visible fields.
Jira custom fields are stored as Tag rows (tag_key / tag_value)
linked to the document via document__tag. visible_fields selects
which tag keys to include. If None/empty, returns all tags.
"""
# Build metadata from the document's tags
raw_metadata: dict[str, Any] = {}
for tag in document.tags:
key = tag.tag_key
value = tag.tag_value
# Tags with is_list=True can have multiple values for the same key
if tag.is_list:
raw_metadata.setdefault(key, [])
raw_metadata[key].append(value)
else:
raw_metadata[key] = value
# Extract jira_key from tags and clean title from semantic_id.
# Jira semantic_id is "KEY-123: Summary Text" — split to isolate each.
jira_key = raw_metadata.get("key", "")
title = document.semantic_id or ""
if title and ": " in title:
title = title.split(": ", 1)[1]
raw_metadata["jira_key"] = jira_key
raw_metadata["title"] = title
raw_metadata["link"] = document.link
if not visible_fields:
return raw_metadata
# Filter to only the selected fields, plus always include core fields
# that the frontend needs for navigation, display, and filtering.
resolved: dict[str, Any] = {
"jira_key": raw_metadata.get("jira_key"),
"title": raw_metadata.get("title"),
"link": raw_metadata.get("link"),
"status": raw_metadata.get("status"),
}
for key in visible_fields:
if key in raw_metadata:
resolved[key] = raw_metadata[key]
return resolved
@router.get("/proposals")
def list_proposals(
status: str | None = None,
limit: int = 100,
offset: int = 0,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> ProposalListResponse:
"""List proposals.
This queries the Document table filtered by the configured Jira project,
LEFT JOINs proposal_review_proposal for review state, and resolves
metadata field names via the field_mapping config.
Documents without a proposal record are returned with status PENDING
without persisting any new rows (read-only endpoint).
"""
tenant_id = get_current_tenant_id()
# Get config for field mapping and Jira project filtering
config = config_db.get_config(tenant_id, db_session)
# When no config exists, return an empty list with a hint for the frontend.
# The frontend can show "Configure a Jira connector in Settings to see proposals."
if config is None:
return ProposalListResponse(
proposals=[],
total_count=0,
config_missing=True,
)
visible_fields = config.field_mapping
# Query documents from the configured Jira connector only,
# LEFT JOIN proposal state for review tracking.
# NOTE: Tenant isolation is handled at the schema level (schema-per-tenant).
# The DB session is already scoped to the current tenant's schema, so
# cross-tenant data leakage is prevented by the connection itself.
query = (
db_session.query(Document, ProposalReviewProposal)
.outerjoin(
ProposalReviewProposal,
Document.id == ProposalReviewProposal.document_id,
)
.options(selectinload(Document.tags))
)
# Filter to only documents from the configured Jira connector
if config and config.jira_connector_id:
# Join through DocumentByConnectorCredentialPair to filter by connector
query = query.join(
DocumentByConnectorCredentialPair,
Document.id == DocumentByConnectorCredentialPair.id,
).filter(
DocumentByConnectorCredentialPair.connector_id == config.jira_connector_id,
)
else:
# No connector configured — filter to Jira source connectors only
# to avoid showing Slack/GitHub/etc documents
query = (
query.join(
DocumentByConnectorCredentialPair,
Document.id == DocumentByConnectorCredentialPair.id,
)
.join(
Connector,
DocumentByConnectorCredentialPair.connector_id == Connector.id,
)
.filter(
Connector.source == DocumentSource.JIRA,
)
)
# Exclude attachment documents — they are children of issue documents
# and have "/attachments/" in their document ID.
query = query.filter(~Document.id.contains("/attachments/"))
# If status filter is specified, only show documents with matching proposal status.
# PENDING is special: documents without a proposal record are implicitly pending.
if status:
if status == "PENDING":
query = query.filter(
or_(
ProposalReviewProposal.status == status,
ProposalReviewProposal.id.is_(None),
),
)
else:
query = query.filter(ProposalReviewProposal.status == status)
# Count before adding DISTINCT ON — count(distinct(...)) handles
# deduplication on its own and conflicts with DISTINCT ON.
total_count = (
query.with_entities(func.count(func.distinct(Document.id))).scalar() or 0
)
# Deduplicate rows that can arise from multiple connector-credential pairs.
# Applied after counting to avoid the DISTINCT ON + aggregate conflict.
# ORDER BY Document.id is required for DISTINCT ON to be deterministic.
query = query.distinct(Document.id).order_by(Document.id)
results = query.offset(offset).limit(limit).all()
proposals: list[ProposalResponse] = []
created_any = False
for document, proposal in results:
if proposal is None:
# Lazily create the proposal record so the frontend gets a
# stable UUID it can use for navigation and subsequent API calls.
proposal = proposals_db.get_or_create_proposal(
document_id=document.id,
tenant_id=tenant_id,
db_session=db_session,
)
created_any = True
metadata = _resolve_document_metadata(document, visible_fields)
proposals.append(ProposalResponse.from_model(proposal, metadata=metadata))
if created_any:
db_session.commit()
return ProposalListResponse(proposals=proposals, total_count=total_count)
@router.get("/proposals/{proposal_id}")
def get_proposal(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> ProposalResponse:
"""Get a single proposal with its metadata from the Document table."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# Load the linked Document for metadata
document = (
db_session.query(Document)
.options(selectinload(Document.tags))
.filter(Document.id == proposal.document_id)
.one_or_none()
)
config = config_db.get_config(tenant_id, db_session)
visible_fields = config.field_mapping if config else None
metadata: dict[str, Any] = {}
if document:
metadata = _resolve_document_metadata(document, visible_fields)
return ProposalResponse.from_model(proposal, metadata=metadata)
# =============================================================================
# Proposal Documents (manual uploads)
# =============================================================================
@router.post(
"/proposals/{proposal_id}/documents",
status_code=201,
)
def upload_document(
proposal_id: UUID,
file: UploadFile,
document_role: str = Form("OTHER"),
user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> ProposalDocumentResponse:
"""Upload a document to a proposal."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# Read file content
try:
file_bytes = file.file.read()
except Exception as e:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
f"Failed to read uploaded file: {str(e)}",
)
# Validate file size
if len(file_bytes) > DOCUMENT_UPLOAD_MAX_FILE_SIZE_BYTES:
raise OnyxError(
OnyxErrorCode.PAYLOAD_TOO_LARGE,
f"File size {len(file_bytes)} bytes exceeds maximum "
f"allowed size of {DOCUMENT_UPLOAD_MAX_FILE_SIZE_BYTES} bytes",
)
# Determine file type from filename
filename = file.filename or "untitled"
file_type = None
if filename:
parts = filename.rsplit(".", 1)
if len(parts) > 1:
file_type = parts[1].upper()
# Extract text from the uploaded file
extracted_text = None
if file_bytes:
try:
extracted_text = extract_file_text(
file=io.BytesIO(file_bytes),
file_name=filename,
)
except Exception as e:
logger.warning(
f"Failed to extract text from uploaded file '{filename}': {e}"
)
doc = ProposalReviewDocument(
proposal_id=proposal_id,
file_name=filename,
file_type=file_type,
document_role=document_role,
uploaded_by=user.id,
extracted_text=extracted_text,
)
db_session.add(doc)
db_session.commit()
return ProposalDocumentResponse.from_model(doc)
@router.get("/proposals/{proposal_id}/documents")
def list_documents(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[ProposalDocumentResponse]:
"""List documents for a proposal."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
docs = (
db_session.query(ProposalReviewDocument)
.filter(ProposalReviewDocument.proposal_id == proposal_id)
.order_by(ProposalReviewDocument.created_at)
.all()
)
return [ProposalDocumentResponse.from_model(d) for d in docs]
@router.delete("/proposals/{proposal_id}/documents/{doc_id}", status_code=204)
def delete_document(
proposal_id: UUID,
doc_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> None:
"""Delete a manually uploaded document."""
# Verify the proposal belongs to the current tenant
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
doc = (
db_session.query(ProposalReviewDocument)
.filter(
ProposalReviewDocument.id == doc_id,
ProposalReviewDocument.proposal_id == proposal_id,
)
.one_or_none()
)
if not doc:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Document not found")
db_session.delete(doc)
db_session.commit()

View File

@@ -0,0 +1,211 @@
"""API endpoints for review triggers, status, and findings."""
from uuid import UUID
from fastapi import APIRouter
from fastapi import Depends
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import User
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.server.features.proposal_review.api.models import FindingResponse
from onyx.server.features.proposal_review.api.models import ReviewRunResponse
from onyx.server.features.proposal_review.api.models import ReviewRunTriggerRequest
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import proposals as proposals_db
from onyx.server.features.proposal_review.db import rulesets as rulesets_db
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
router = APIRouter()
@router.post(
"/proposals/{proposal_id}/review",
status_code=201,
)
def trigger_review(
proposal_id: UUID,
request: ReviewRunTriggerRequest,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> ReviewRunResponse:
"""Trigger a new review run for a proposal."""
tenant_id = get_current_tenant_id()
# Verify proposal exists
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# Verify ruleset exists and count active rules
ruleset = rulesets_db.get_ruleset(request.ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
active_rule_count = rulesets_db.count_active_rules(request.ruleset_id, db_session)
if active_rule_count == 0:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"Ruleset has no active rules",
)
# Update proposal status to IN_REVIEW
proposals_db.update_proposal_status(proposal_id, tenant_id, "IN_REVIEW", db_session)
# Create the review run record
run = findings_db.create_review_run(
proposal_id=proposal_id,
ruleset_id=request.ruleset_id,
triggered_by=user.id,
total_rules=active_rule_count,
db_session=db_session,
)
db_session.commit()
logger.info(
f"Review triggered for proposal {proposal_id} "
f"with ruleset {request.ruleset_id} ({active_rule_count} rules)"
)
# Dispatch Celery task via the client app (has Redis broker configured)
from onyx.background.celery.versioned_apps.client import app as celery_app
celery_app.send_task(
"run_proposal_review",
args=[str(run.id), tenant_id],
expires=3600,
)
return ReviewRunResponse.from_model(run)
@router.get(
"/proposals/{proposal_id}/review-runs",
)
def list_review_runs(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[ReviewRunResponse]:
"""List all review runs for a proposal, most recent first."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
runs = findings_db.list_review_runs_by_proposal(proposal_id, db_session)
return [ReviewRunResponse.from_model(r) for r in runs]
@router.get(
"/proposals/{proposal_id}/review-status",
)
def get_review_status(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> ReviewRunResponse:
"""Get the status of the latest review run for a proposal."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
run = findings_db.get_latest_review_run(proposal_id, db_session)
if not run:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "No review runs found")
return ReviewRunResponse.from_model(run)
@router.post(
"/proposals/{proposal_id}/retry-failed",
status_code=200,
)
def retry_failed_rules_endpoint(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> ReviewRunResponse:
"""Retry only the rules that failed in the latest review run."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
run = findings_db.get_latest_review_run(proposal_id, db_session)
if not run:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "No review runs found")
if run.status not in ("COMPLETED", "FAILED"):
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"Cannot retry: review is still running",
)
failed = findings_db.get_failed_findings_for_run(run.id, db_session)
if not failed:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"No failed rules to retry",
)
rule_ids = list({str(f.rule_id) for f in failed})
# Set status to RUNNING before dispatching so a second call is rejected
run.status = "RUNNING"
run.completed_at = None
db_session.commit()
logger.info(
f"Retrying {len(rule_ids)} failed rules for run {run.id} "
f"on proposal {proposal_id}"
)
from onyx.background.celery.versioned_apps.client import app as celery_app
celery_app.send_task(
"run_proposal_review",
args=[str(run.id), tenant_id],
kwargs={"rule_ids": rule_ids},
expires=3600,
)
return ReviewRunResponse.from_model(run)
@router.get(
"/proposals/{proposal_id}/findings",
)
def get_findings(
proposal_id: UUID,
review_run_id: UUID | None = None,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[FindingResponse]:
"""Get findings for a proposal.
If review_run_id is not specified, returns findings from the latest run.
"""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# If no run specified, get the latest
if review_run_id is None:
run = findings_db.get_latest_review_run(proposal_id, db_session)
if not run:
return []
review_run_id = run.id
results = findings_db.list_findings_by_run(review_run_id, db_session)
return [FindingResponse.from_model(f) for f in results]

View File

@@ -0,0 +1,545 @@
"""API endpoints for rulesets and rules."""
from uuid import UUID
from fastapi import APIRouter
from fastapi import Depends
from fastapi import Form
from fastapi import UploadFile
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import User
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.server.features.proposal_review.api.models import BulkRuleUpdateRequest
from onyx.server.features.proposal_review.api.models import BulkRuleUpdateResponse
from onyx.server.features.proposal_review.api.models import ImportJobResponse
from onyx.server.features.proposal_review.api.models import RuleCreate
from onyx.server.features.proposal_review.api.models import RuleResponse
from onyx.server.features.proposal_review.api.models import RulesetCreate
from onyx.server.features.proposal_review.api.models import RulesetResponse
from onyx.server.features.proposal_review.api.models import RulesetUpdate
from onyx.server.features.proposal_review.api.models import RuleTestResponse
from onyx.server.features.proposal_review.api.models import RuleUpdate
from onyx.server.features.proposal_review.configs import IMPORT_MAX_FILE_SIZE_BYTES
from onyx.server.features.proposal_review.db import imports as imports_db
from onyx.server.features.proposal_review.db import rulesets as rulesets_db
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
router = APIRouter()
# =============================================================================
# Rulesets
# =============================================================================
@router.get("/rulesets")
def list_rulesets(
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[RulesetResponse]:
"""List all rulesets for the current tenant."""
tenant_id = get_current_tenant_id()
rulesets = rulesets_db.list_rulesets(tenant_id, db_session)
return [RulesetResponse.from_model(rs) for rs in rulesets]
@router.post("/rulesets", status_code=201)
def create_ruleset(
request: RulesetCreate,
user: User = Depends(require_permission(Permission.MANAGE_CONNECTORS)),
db_session: Session = Depends(get_session),
) -> RulesetResponse:
"""Create a new ruleset."""
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.create_ruleset(
tenant_id=tenant_id,
name=request.name,
description=request.description,
is_default=request.is_default,
created_by=user.id,
db_session=db_session,
)
db_session.commit()
return RulesetResponse.from_model(ruleset, include_rules=False)
@router.get("/rulesets/{ruleset_id}")
def get_ruleset(
ruleset_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> RulesetResponse:
"""Get a ruleset with all its rules."""
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
return RulesetResponse.from_model(ruleset)
@router.put("/rulesets/{ruleset_id}")
def update_ruleset(
ruleset_id: UUID,
request: RulesetUpdate,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> RulesetResponse:
"""Update a ruleset."""
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.update_ruleset(
ruleset_id=ruleset_id,
tenant_id=tenant_id,
db_session=db_session,
updates=request.model_dump(exclude_unset=True),
)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
db_session.commit()
return RulesetResponse.from_model(ruleset)
@router.delete("/rulesets/{ruleset_id}", status_code=204)
def delete_ruleset(
ruleset_id: UUID,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> None:
"""Delete a ruleset and all its rules."""
tenant_id = get_current_tenant_id()
deleted = rulesets_db.delete_ruleset(ruleset_id, tenant_id, db_session)
if not deleted:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
db_session.commit()
# =============================================================================
# Rules
# =============================================================================
@router.post(
"/rulesets/{ruleset_id}/rules",
status_code=201,
)
def create_rule(
ruleset_id: UUID,
request: RuleCreate,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> RuleResponse:
"""Create a new rule within a ruleset."""
# Verify ruleset exists and belongs to tenant
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
rule = rulesets_db.create_rule(
ruleset_id=ruleset_id,
name=request.name,
description=request.description,
category=request.category,
rule_type=request.rule_type,
rule_intent=request.rule_intent,
prompt_template=request.prompt_template,
source=request.source,
authority=request.authority,
is_hard_stop=request.is_hard_stop,
priority=request.priority,
db_session=db_session,
)
db_session.commit()
return RuleResponse.from_model(rule)
@router.put("/rules/{rule_id}")
def update_rule(
rule_id: UUID,
request: RuleUpdate,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> RuleResponse:
"""Update a rule."""
# Verify the rule belongs to a ruleset owned by the current tenant
tenant_id = get_current_tenant_id()
rule = rulesets_db.get_rule_with_tenant_check(rule_id, tenant_id, db_session)
if not rule:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
updated_rule = rulesets_db.update_rule(
rule_id=rule_id,
db_session=db_session,
updates=request.model_dump(exclude_unset=True),
)
if not updated_rule:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
db_session.commit()
return RuleResponse.from_model(updated_rule)
@router.delete("/rules/{rule_id}", status_code=204)
def delete_rule(
rule_id: UUID,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> None:
"""Delete a rule."""
# Verify the rule belongs to a ruleset owned by the current tenant
tenant_id = get_current_tenant_id()
rule = rulesets_db.get_rule_with_tenant_check(rule_id, tenant_id, db_session)
if not rule:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
deleted = rulesets_db.delete_rule(rule_id, db_session)
if not deleted:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
db_session.commit()
@router.post(
"/rulesets/{ruleset_id}/rules/bulk-update",
)
def bulk_update_rules(
ruleset_id: UUID,
request: BulkRuleUpdateRequest,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> BulkRuleUpdateResponse:
"""Batch activate/deactivate/delete rules."""
# Verify the ruleset belongs to the current tenant
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
if request.action not in ("activate", "deactivate", "delete"):
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"action must be 'activate', 'deactivate', or 'delete'",
)
# Only operate on rules that belong to this ruleset (tenant-scoped)
count = rulesets_db.bulk_update_rules(
request.rule_ids, request.action, ruleset_id, db_session
)
db_session.commit()
return BulkRuleUpdateResponse(updated_count=count)
@router.post(
"/rulesets/{ruleset_id}/import",
status_code=202,
)
def import_checklist_endpoint(
ruleset_id: UUID,
file: UploadFile,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> dict[str, str]:
"""Upload a checklist document and parse it into rules via LLM.
Text extraction happens synchronously (fast). The LLM decomposition
runs in a Celery task so the request returns 202 immediately.
Poll GET /rulesets/{ruleset_id}/import/{import_job_id}/status
to track progress.
"""
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
# Read the uploaded file content (synchronous -- fast)
try:
file_content = file.file.read()
except Exception as e:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
f"Failed to read uploaded file: {str(e)}",
)
if not file_content:
raise OnyxError(OnyxErrorCode.INVALID_INPUT, "Uploaded file is empty")
if len(file_content) > IMPORT_MAX_FILE_SIZE_BYTES:
raise OnyxError(
OnyxErrorCode.PAYLOAD_TOO_LARGE,
f"File size {len(file_content)} bytes exceeds maximum "
f"allowed size of {IMPORT_MAX_FILE_SIZE_BYTES} bytes",
)
# Extract text synchronously (fast -- no LLM involved)
extracted_text = ""
filename = file.filename or "untitled"
file_ext = filename.rsplit(".", 1)[-1].lower() if "." in filename else ""
if file_ext in ("txt", "text", "md"):
extracted_text = file_content.decode("utf-8", errors="replace")
else:
try:
import io
from onyx.file_processing.extract_file_text import extract_file_text
extracted_text = extract_file_text(
file=io.BytesIO(file_content),
file_name=filename,
)
except Exception as e:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
f"Failed to extract text from file: {str(e)}",
)
if not extracted_text or not extracted_text.strip():
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"No text could be extracted from the uploaded file",
)
# Create the import job row
job = imports_db.create_import_job(
ruleset_id=ruleset_id,
tenant_id=tenant_id,
source_filename=filename,
extracted_text=extracted_text,
db_session=db_session,
)
db_session.commit()
# Dispatch Celery task via the client app (has Redis broker configured)
from onyx.background.celery.versioned_apps.client import app as celery_app
celery_app.send_task(
"run_checklist_import",
args=[str(job.id), tenant_id],
expires=600,
)
return {"import_job_id": str(job.id)}
@router.get(
"/rulesets/{ruleset_id}/import/active",
)
def get_active_import_job(
ruleset_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> ImportJobResponse | None:
"""Get the latest active (PENDING/RUNNING) import job for a ruleset."""
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
job = imports_db.get_active_import_job(ruleset_id, db_session)
if not job:
return None
return ImportJobResponse.from_model(job)
@router.get(
"/rulesets/{ruleset_id}/import/{import_job_id}/status",
)
def get_import_job_status(
ruleset_id: UUID,
import_job_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> ImportJobResponse:
"""Get the status of a checklist import job."""
tenant_id = get_current_tenant_id()
# Verify ruleset belongs to tenant
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
job = imports_db.get_import_job(import_job_id, db_session)
if not job or job.ruleset_id != ruleset_id:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Import job not found")
return ImportJobResponse.from_model(job)
@router.post("/rules/{rule_id}/test")
def test_rule(
rule_id: UUID,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> RuleTestResponse:
"""Test a rule against sample text.
Evaluates the rule against an empty/minimal proposal context to verify
the prompt template is well-formed and the LLM can produce a valid response.
"""
# Verify the rule belongs to a ruleset owned by the current tenant
tenant_id = get_current_tenant_id()
rule = rulesets_db.get_rule_with_tenant_check(rule_id, tenant_id, db_session)
if not rule:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
from onyx.server.features.proposal_review.engine.context_assembler import (
ProposalContext,
)
from onyx.server.features.proposal_review.engine.rule_evaluator import (
evaluate_rule,
)
# Build a minimal test context
test_context = ProposalContext(
proposal_text="[Sample proposal text for testing. No real proposal loaded.]",
budget_text="[No budget text available for test.]",
foa_text="[No FOA text available for test.]",
metadata={"test_mode": True},
jira_key="TEST-000",
)
try:
result = evaluate_rule(rule, test_context, db_session)
except Exception as e:
return RuleTestResponse(
rule_id=str(rule_id),
success=False,
error=str(e),
)
return RuleTestResponse(
rule_id=str(rule_id),
success=True,
result=result,
)
@router.post("/rules/{rule_id}/refine")
async def refine_rule_endpoint(
rule_id: UUID,
answer: str = Form(...),
file: UploadFile | None = None,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> RuleResponse:
"""Submit an answer to a rule's refinement question.
Re-runs the LLM to produce a refined prompt_template that incorporates
the user's institution-specific information, then clears the refinement
flag on the rule. An optional file attachment (pdf, docx, etc.) can be
included — its extracted text is appended to the answer.
"""
tenant_id = get_current_tenant_id()
rule = rulesets_db.get_rule_with_tenant_check(rule_id, tenant_id, db_session)
if not rule:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
if not rule.refinement_needed:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"This rule does not need refinement",
)
if not rule.refinement_question:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"Rule is marked for refinement but has no refinement question",
)
# Build the combined answer from the text field + optional attachment
combined_answer = answer.strip()
if file and file.filename:
import io
file_content = await file.read()
if file_content:
filename = file.filename
file_ext = filename.rsplit(".", 1)[-1].lower() if "." in filename else ""
if file_ext in ("txt", "text", "md"):
file_text = file_content.decode("utf-8", errors="replace")
else:
try:
from onyx.file_processing.extract_file_text import (
extract_file_text,
)
file_text = extract_file_text(
file=io.BytesIO(file_content),
file_name=filename,
)
except Exception as e:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
f"Failed to extract text from file: {str(e)}",
)
if file_text and file_text.strip():
combined_answer += (
f"\n\n--- Attached file: {filename} ---\n{file_text.strip()}"
)
from onyx.llm.factory import get_default_llm
from onyx.server.features.proposal_review.engine.checklist_importer import (
refine_rule,
)
llm = get_default_llm(timeout=120)
try:
refined = refine_rule(
rule_name=rule.name,
rule_description=rule.description,
rule_prompt_template=rule.prompt_template,
refinement_question=rule.refinement_question,
user_answer=combined_answer,
llm=llm,
)
except RuntimeError as e:
raise OnyxError(
OnyxErrorCode.SERVER_ERROR,
f"Refinement failed: {str(e)}",
)
updated = rulesets_db.update_rule(
rule_id=rule_id,
db_session=db_session,
updates={
"name": refined["name"],
"description": refined.get("description"),
"prompt_template": refined["prompt_template"],
"rule_type": refined.get("rule_type", rule.rule_type),
"rule_intent": refined.get("rule_intent", rule.rule_intent),
"refinement_needed": False,
"refinement_question": None,
},
)
if not updated:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
db_session.commit()
return RuleResponse.from_model(updated)

View File

@@ -0,0 +1,18 @@
import os
# Feature flag for enabling proposal review
ENABLE_PROPOSAL_REVIEW = (
os.environ.get("ENABLE_PROPOSAL_REVIEW", "true").lower() == "true"
)
# Maximum file size for checklist imports (in MB)
IMPORT_MAX_FILE_SIZE_MB = int(
os.environ.get("PROPOSAL_REVIEW_IMPORT_MAX_FILE_SIZE_MB", "50")
)
IMPORT_MAX_FILE_SIZE_BYTES = IMPORT_MAX_FILE_SIZE_MB * 1024 * 1024
# Maximum file size for document uploads (in MB)
DOCUMENT_UPLOAD_MAX_FILE_SIZE_MB = int(
os.environ.get("PROPOSAL_REVIEW_DOCUMENT_UPLOAD_MAX_FILE_SIZE_MB", "100")
)
DOCUMENT_UPLOAD_MAX_FILE_SIZE_BYTES = DOCUMENT_UPLOAD_MAX_FILE_SIZE_MB * 1024 * 1024

View File

@@ -0,0 +1,101 @@
"""DB operations for tenant configuration."""
from datetime import datetime
from datetime import timezone
from typing import Any
from sqlalchemy import select
from sqlalchemy.orm import Session
from onyx.db.models import Document__Tag
from onyx.db.models import DocumentByConnectorCredentialPair
from onyx.db.models import Tag
from onyx.server.features.proposal_review.db.models import ProposalReviewConfig
from onyx.utils.logger import setup_logger
logger = setup_logger()
def get_config(
tenant_id: str,
db_session: Session,
) -> ProposalReviewConfig | None:
"""Get the config row for a tenant (there is at most one)."""
return (
db_session.query(ProposalReviewConfig)
.filter(ProposalReviewConfig.tenant_id == tenant_id)
.one_or_none()
)
def upsert_config(
tenant_id: str,
db_session: Session,
jira_connector_id: int | None = None,
jira_project_key: str | None = None,
field_mapping: list[str] | None = None,
jira_writeback: dict[str, Any] | None = None,
review_model: str | None = None,
import_model: str | None = None,
) -> ProposalReviewConfig:
"""Create or update the tenant config."""
config = get_config(tenant_id, db_session)
if config:
if jira_connector_id is not None:
config.jira_connector_id = jira_connector_id
if jira_project_key is not None:
config.jira_project_key = jira_project_key
if field_mapping is not None:
config.field_mapping = field_mapping
if jira_writeback is not None:
config.jira_writeback = jira_writeback
if review_model is not None:
config.review_model = review_model
if import_model is not None:
config.import_model = import_model
config.updated_at = datetime.now(timezone.utc)
db_session.flush()
logger.info(f"Updated proposal review config for tenant {tenant_id}")
return config
config = ProposalReviewConfig(
tenant_id=tenant_id,
jira_connector_id=jira_connector_id,
jira_project_key=jira_project_key,
field_mapping=field_mapping,
jira_writeback=jira_writeback,
review_model=review_model,
import_model=import_model,
)
db_session.add(config)
db_session.flush()
logger.info(f"Created proposal review config for tenant {tenant_id}")
return config
def get_connector_metadata_keys(
connector_id: int,
db_session: Session,
) -> list[str]:
"""Return distinct metadata tag keys for documents from a connector.
Jira custom fields are stored as tags (tag_key / tag_value) linked
to documents via the document__tag join table.
"""
stmt = (
select(Tag.tag_key)
.select_from(Tag)
.join(Document__Tag, Tag.id == Document__Tag.tag_id)
.join(
DocumentByConnectorCredentialPair,
Document__Tag.document_id == DocumentByConnectorCredentialPair.id,
)
.where(
DocumentByConnectorCredentialPair.connector_id == connector_id,
)
.distinct()
.limit(500)
)
rows = db_session.execute(stmt).all()
return sorted(row[0] for row in rows)

View File

@@ -0,0 +1,115 @@
"""DB operations for finding decisions and proposal decisions.
Finding decisions are stored inline on the ProposalReviewFinding row.
Proposal decisions are stored inline on the ProposalReviewProposal row.
"""
from datetime import datetime
from datetime import timezone
from uuid import UUID
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewFinding
from onyx.server.features.proposal_review.db.models import ProposalReviewProposal
from onyx.utils.logger import setup_logger
logger = setup_logger()
# =============================================================================
# Per-Finding Decisions (inline on finding row)
# =============================================================================
def upsert_finding_decision(
finding_id: UUID,
officer_id: UUID,
action: str,
db_session: Session,
notes: str | None = None,
) -> ProposalReviewFinding:
"""Record or update a decision on a finding.
The decision fields live directly on the finding row.
"""
finding = (
db_session.query(ProposalReviewFinding)
.filter(ProposalReviewFinding.id == finding_id)
.one_or_none()
)
if not finding:
raise ValueError(f"Finding {finding_id} not found")
finding.decision_action = action
finding.decision_notes = notes
finding.decision_officer_id = officer_id
finding.decided_at = datetime.now(timezone.utc)
db_session.flush()
logger.info(f"Recorded decision on finding {finding_id}: {action}")
return finding
# =============================================================================
# Proposal-Level Decisions (inline on proposal row)
# =============================================================================
def update_proposal_decision(
proposal_id: UUID,
tenant_id: str,
officer_id: UUID,
decision: str,
db_session: Session,
notes: str | None = None,
) -> ProposalReviewProposal:
"""Record a final decision on a proposal.
Overwrites previous decision fields on the proposal row.
"""
proposal = (
db_session.query(ProposalReviewProposal)
.filter(
ProposalReviewProposal.id == proposal_id,
ProposalReviewProposal.tenant_id == tenant_id,
)
.one_or_none()
)
if not proposal:
raise ValueError(f"Proposal {proposal_id} not found")
proposal.status = decision
proposal.decision_notes = notes
proposal.decision_officer_id = officer_id
proposal.decision_at = datetime.now(timezone.utc)
proposal.jira_synced = False
proposal.jira_synced_at = None
proposal.updated_at = datetime.now(timezone.utc)
db_session.flush()
logger.info(f"Recorded proposal decision {decision} for proposal {proposal_id}")
return proposal
def mark_proposal_jira_synced(
proposal_id: UUID,
tenant_id: str,
db_session: Session,
) -> ProposalReviewProposal | None:
"""Mark a proposal's decision as synced to Jira."""
proposal = (
db_session.query(ProposalReviewProposal)
.filter(
ProposalReviewProposal.id == proposal_id,
ProposalReviewProposal.tenant_id == tenant_id,
)
.one_or_none()
)
if not proposal:
return None
proposal.jira_synced = True
proposal.jira_synced_at = datetime.now(timezone.utc)
db_session.flush()
logger.info(f"Marked proposal {proposal_id} as jira_synced")
return proposal

View File

@@ -0,0 +1,206 @@
"""DB operations for review runs and findings."""
from uuid import UUID
from sqlalchemy import desc
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewFinding
from onyx.server.features.proposal_review.db.models import ProposalReviewRun
from onyx.utils.logger import setup_logger
logger = setup_logger()
# =============================================================================
# Review Runs
# =============================================================================
def create_review_run(
proposal_id: UUID,
ruleset_id: UUID,
triggered_by: UUID,
total_rules: int,
db_session: Session,
) -> ProposalReviewRun:
"""Create a new review run record."""
run = ProposalReviewRun(
proposal_id=proposal_id,
ruleset_id=ruleset_id,
triggered_by=triggered_by,
total_rules=total_rules,
)
db_session.add(run)
db_session.flush()
logger.info(
f"Created review run {run.id} for proposal {proposal_id} "
f"with {total_rules} rules"
)
return run
def get_review_run(
run_id: UUID,
db_session: Session,
) -> ProposalReviewRun | None:
"""Get a review run by ID."""
return (
db_session.query(ProposalReviewRun)
.filter(ProposalReviewRun.id == run_id)
.one_or_none()
)
def get_latest_review_run(
proposal_id: UUID,
db_session: Session,
) -> ProposalReviewRun | None:
"""Get the most recent review run for a proposal."""
return (
db_session.query(ProposalReviewRun)
.filter(ProposalReviewRun.proposal_id == proposal_id)
.order_by(desc(ProposalReviewRun.created_at))
.first()
)
def list_review_runs_by_proposal(
proposal_id: UUID,
db_session: Session,
limit: int = 20,
) -> list[ProposalReviewRun]:
"""List review runs for a proposal, most recent first."""
return (
db_session.query(ProposalReviewRun)
.filter(ProposalReviewRun.proposal_id == proposal_id)
.order_by(desc(ProposalReviewRun.created_at))
.limit(limit)
.all()
)
# =============================================================================
# Findings
# =============================================================================
def create_finding(
proposal_id: UUID,
rule_id: UUID,
review_run_id: UUID,
verdict: str,
db_session: Session,
confidence: str | None = None,
evidence: str | None = None,
explanation: str | None = None,
suggested_action: str | None = None,
llm_model: str | None = None,
llm_tokens_used: int | None = None,
) -> ProposalReviewFinding:
"""Create a new finding."""
finding = ProposalReviewFinding(
proposal_id=proposal_id,
rule_id=rule_id,
review_run_id=review_run_id,
verdict=verdict,
confidence=confidence,
evidence=evidence,
explanation=explanation,
suggested_action=suggested_action,
llm_model=llm_model,
llm_tokens_used=llm_tokens_used,
)
db_session.add(finding)
db_session.flush()
logger.info(
f"Created finding {finding.id} verdict={verdict} for proposal {proposal_id}"
)
return finding
def get_finding(
finding_id: UUID,
db_session: Session,
) -> ProposalReviewFinding | None:
"""Get a finding by ID with its rule eagerly loaded."""
return (
db_session.query(ProposalReviewFinding)
.filter(ProposalReviewFinding.id == finding_id)
.options(
selectinload(ProposalReviewFinding.rule),
)
.one_or_none()
)
def list_findings_by_proposal(
proposal_id: UUID,
db_session: Session,
review_run_id: UUID | None = None,
) -> list[ProposalReviewFinding]:
"""List findings for a proposal, optionally filtered to a specific run."""
query = (
db_session.query(ProposalReviewFinding)
.filter(ProposalReviewFinding.proposal_id == proposal_id)
.options(
selectinload(ProposalReviewFinding.rule),
)
.order_by(ProposalReviewFinding.created_at)
)
if review_run_id:
query = query.filter(ProposalReviewFinding.review_run_id == review_run_id)
return query.all()
def list_findings_by_run(
review_run_id: UUID,
db_session: Session,
) -> list[ProposalReviewFinding]:
"""List all findings for a specific review run."""
return (
db_session.query(ProposalReviewFinding)
.filter(ProposalReviewFinding.review_run_id == review_run_id)
.options(
selectinload(ProposalReviewFinding.rule),
)
.order_by(ProposalReviewFinding.created_at)
.all()
)
def get_failed_findings_for_run(
review_run_id: UUID,
db_session: Session,
) -> list[ProposalReviewFinding]:
"""Get findings that failed due to system errors (LLM timeout, etc.).
Error findings are created by _save_error_finding and are identifiable
by having no LLM metadata (the call never completed successfully).
"""
return (
db_session.query(ProposalReviewFinding)
.filter(
ProposalReviewFinding.review_run_id == review_run_id,
ProposalReviewFinding.verdict == "NEEDS_REVIEW",
ProposalReviewFinding.llm_model.is_(None),
ProposalReviewFinding.llm_tokens_used.is_(None),
)
.all()
)
def delete_findings(
finding_ids: list[UUID],
db_session: Session,
) -> int:
"""Delete findings by ID. Returns the number deleted."""
if not finding_ids:
return 0
count = (
db_session.query(ProposalReviewFinding)
.filter(ProposalReviewFinding.id.in_(finding_ids))
.delete(synchronize_session="fetch")
)
return count

View File

@@ -0,0 +1,97 @@
"""DB operations for checklist import jobs."""
import datetime
from datetime import timezone
from uuid import UUID
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewImportJob
from onyx.utils.logger import setup_logger
logger = setup_logger()
def create_import_job(
ruleset_id: UUID,
tenant_id: str,
source_filename: str,
extracted_text: str,
db_session: Session,
) -> ProposalReviewImportJob:
"""Create a new import job record."""
job = ProposalReviewImportJob(
ruleset_id=ruleset_id,
tenant_id=tenant_id,
source_filename=source_filename,
extracted_text=extracted_text,
)
db_session.add(job)
db_session.flush()
logger.info(
f"Created import job {job.id} for ruleset {ruleset_id} "
f"(file: {source_filename})"
)
return job
def get_import_job(
job_id: UUID,
db_session: Session,
) -> ProposalReviewImportJob | None:
"""Get a single import job by ID."""
return (
db_session.query(ProposalReviewImportJob)
.filter(ProposalReviewImportJob.id == job_id)
.one_or_none()
)
def get_active_import_job(
ruleset_id: UUID,
db_session: Session,
) -> ProposalReviewImportJob | None:
"""Get the latest PENDING or RUNNING import job for a ruleset, if any."""
return (
db_session.query(ProposalReviewImportJob)
.filter(
ProposalReviewImportJob.ruleset_id == ruleset_id,
ProposalReviewImportJob.status.in_(["PENDING", "RUNNING"]),
)
.order_by(ProposalReviewImportJob.created_at.desc())
.first()
)
def get_dangling_import_jobs(
db_session: Session,
stale_threshold_minutes: int = 30,
) -> list[ProposalReviewImportJob]:
"""Return import jobs stuck in PENDING or RUNNING for longer than the threshold."""
cutoff = datetime.datetime.now(timezone.utc) - datetime.timedelta(
minutes=stale_threshold_minutes
)
return (
db_session.query(ProposalReviewImportJob)
.filter(
ProposalReviewImportJob.status.in_(["PENDING", "RUNNING"]),
ProposalReviewImportJob.created_at < cutoff,
)
.all()
)
def mark_import_job_failed(
job: ProposalReviewImportJob,
error_message: str,
db_session: Session,
) -> None:
"""Mark an import job as FAILED with the given error message.
Flushes but does NOT commit — the caller is responsible for committing
so that batch operations can be done in a single transaction.
"""
job.status = "FAILED"
job.error_message = error_message
job.completed_at = datetime.datetime.now(timezone.utc)
db_session.flush()

View File

@@ -0,0 +1,367 @@
"""SQLAlchemy models for Proposal Review."""
import datetime
from uuid import UUID
from sqlalchemy import Boolean
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy import text
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.postgresql import JSONB as PGJSONB
from sqlalchemy.dialects.postgresql import UUID as PGUUID
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
from onyx.db.models import Base
class ProposalReviewRuleset(Base):
__tablename__ = "proposal_review_ruleset"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
tenant_id: Mapped[str] = mapped_column(Text, nullable=False, index=True)
name: Mapped[str] = mapped_column(Text, nullable=False)
description: Mapped[str | None] = mapped_column(Text, nullable=True)
is_default: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("false")
)
is_active: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("true")
)
created_by: Mapped[UUID | None] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=True
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
rules: Mapped[list["ProposalReviewRule"]] = relationship(
"ProposalReviewRule",
back_populates="ruleset",
cascade="all, delete-orphan",
order_by="ProposalReviewRule.priority",
)
class ProposalReviewRule(Base):
__tablename__ = "proposal_review_rule"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
ruleset_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_ruleset.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
name: Mapped[str] = mapped_column(Text, nullable=False)
description: Mapped[str | None] = mapped_column(Text, nullable=True)
category: Mapped[str | None] = mapped_column(Text, nullable=True)
rule_type: Mapped[str] = mapped_column(Text, nullable=False)
rule_intent: Mapped[str] = mapped_column(
Text, nullable=False, server_default=text("'CHECK'")
)
prompt_template: Mapped[str] = mapped_column(Text, nullable=False)
source: Mapped[str] = mapped_column(
Text, nullable=False, server_default=text("'MANUAL'")
)
authority: Mapped[str | None] = mapped_column(Text, nullable=True)
is_hard_stop: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("false")
)
priority: Mapped[int] = mapped_column(
Integer, nullable=False, server_default=text("0")
)
is_active: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("true")
)
refinement_needed: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("false")
)
refinement_question: Mapped[str | None] = mapped_column(Text, nullable=True)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
ruleset: Mapped["ProposalReviewRuleset"] = relationship(
"ProposalReviewRuleset", back_populates="rules"
)
class ProposalReviewProposal(Base):
__tablename__ = "proposal_review_proposal"
__table_args__ = (
UniqueConstraint("document_id", "tenant_id"),
Index("ix_proposal_review_proposal_tenant_id", "tenant_id"),
Index("ix_proposal_review_proposal_document_id", "document_id"),
Index("ix_proposal_review_proposal_status", "status"),
)
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
document_id: Mapped[str] = mapped_column(Text, nullable=False)
tenant_id: Mapped[str] = mapped_column(Text, nullable=False)
status: Mapped[str] = mapped_column(
Text, nullable=False, server_default=text("'PENDING'")
)
# Inline proposal-level decision fields
decision_notes: Mapped[str | None] = mapped_column(Text, nullable=True)
decision_officer_id: Mapped[UUID | None] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=True
)
decision_at: Mapped[datetime.datetime | None] = mapped_column(
DateTime(timezone=True), nullable=True
)
jira_synced: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("false")
)
jira_synced_at: Mapped[datetime.datetime | None] = mapped_column(
DateTime(timezone=True), nullable=True
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
review_runs: Mapped[list["ProposalReviewRun"]] = relationship(
"ProposalReviewRun",
back_populates="proposal",
cascade="all, delete-orphan",
)
findings: Mapped[list["ProposalReviewFinding"]] = relationship(
"ProposalReviewFinding",
back_populates="proposal",
cascade="all, delete-orphan",
)
documents: Mapped[list["ProposalReviewDocument"]] = relationship(
"ProposalReviewDocument",
back_populates="proposal",
cascade="all, delete-orphan",
)
class ProposalReviewRun(Base):
__tablename__ = "proposal_review_run"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
proposal_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_proposal.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
ruleset_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_ruleset.id"),
nullable=False,
)
triggered_by: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=False
)
status: Mapped[str] = mapped_column(
Text, nullable=False, server_default=text("'PENDING'")
)
total_rules: Mapped[int] = mapped_column(Integer, nullable=False)
completed_rules: Mapped[int] = mapped_column(
Integer, nullable=False, server_default=text("0")
)
failed_rules: Mapped[int] = mapped_column(
Integer, nullable=False, server_default=text("0")
)
started_at: Mapped[datetime.datetime | None] = mapped_column(
DateTime(timezone=True), nullable=True
)
completed_at: Mapped[datetime.datetime | None] = mapped_column(
DateTime(timezone=True), nullable=True
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
proposal: Mapped["ProposalReviewProposal"] = relationship(
"ProposalReviewProposal", back_populates="review_runs"
)
findings: Mapped[list["ProposalReviewFinding"]] = relationship(
"ProposalReviewFinding",
back_populates="review_run",
cascade="all, delete-orphan",
)
class ProposalReviewFinding(Base):
__tablename__ = "proposal_review_finding"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
proposal_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_proposal.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
rule_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_rule.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
review_run_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_run.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
verdict: Mapped[str] = mapped_column(Text, nullable=False)
confidence: Mapped[str | None] = mapped_column(Text, nullable=True)
evidence: Mapped[str | None] = mapped_column(Text, nullable=True)
explanation: Mapped[str | None] = mapped_column(Text, nullable=True)
suggested_action: Mapped[str | None] = mapped_column(Text, nullable=True)
llm_model: Mapped[str | None] = mapped_column(Text, nullable=True)
llm_tokens_used: Mapped[int | None] = mapped_column(Integer, nullable=True)
# Inline per-finding decision fields
decision_action: Mapped[str | None] = mapped_column(Text, nullable=True)
decision_notes: Mapped[str | None] = mapped_column(Text, nullable=True)
decision_officer_id: Mapped[UUID | None] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=True
)
decided_at: Mapped[datetime.datetime | None] = mapped_column(
DateTime(timezone=True), nullable=True
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
proposal: Mapped["ProposalReviewProposal"] = relationship(
"ProposalReviewProposal", back_populates="findings"
)
review_run: Mapped["ProposalReviewRun"] = relationship(
"ProposalReviewRun", back_populates="findings"
)
rule: Mapped["ProposalReviewRule"] = relationship("ProposalReviewRule")
class ProposalReviewDocument(Base):
"""Manually uploaded documents or auto-fetched FOAs."""
__tablename__ = "proposal_review_document"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
proposal_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_proposal.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
file_name: Mapped[str] = mapped_column(Text, nullable=False)
file_type: Mapped[str | None] = mapped_column(Text, nullable=True)
file_store_id: Mapped[str | None] = mapped_column(Text, nullable=True)
extracted_text: Mapped[str | None] = mapped_column(Text, nullable=True)
document_role: Mapped[str] = mapped_column(Text, nullable=False)
uploaded_by: Mapped[UUID | None] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=True
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
proposal: Mapped["ProposalReviewProposal"] = relationship(
"ProposalReviewProposal", back_populates="documents"
)
class ProposalReviewImportJob(Base):
"""Tracks background checklist import jobs dispatched via Celery."""
__tablename__ = "proposal_review_import_job"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
ruleset_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_ruleset.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
tenant_id: Mapped[str] = mapped_column(Text, nullable=False)
status: Mapped[str] = mapped_column(
Text, nullable=False, server_default=text("'PENDING'")
)
source_filename: Mapped[str] = mapped_column(Text, nullable=False)
extracted_text: Mapped[str] = mapped_column(Text, nullable=False)
rules_created: Mapped[int] = mapped_column(
Integer, nullable=False, server_default=text("0")
)
error_message: Mapped[str | None] = mapped_column(Text, nullable=True)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
completed_at: Mapped[datetime.datetime | None] = mapped_column(
DateTime(timezone=True), nullable=True
)
class ProposalReviewConfig(Base):
"""Admin configuration (one row per tenant)."""
__tablename__ = "proposal_review_config"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
tenant_id: Mapped[str] = mapped_column(Text, nullable=False, unique=True)
jira_connector_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
jira_project_key: Mapped[str | None] = mapped_column(Text, nullable=True)
field_mapping: Mapped[list | None] = mapped_column(PGJSONB(), nullable=True)
jira_writeback: Mapped[dict | None] = mapped_column(PGJSONB(), nullable=True)
review_model: Mapped[str | None] = mapped_column(Text, nullable=True)
import_model: Mapped[str | None] = mapped_column(Text, nullable=True)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)

View File

@@ -0,0 +1,133 @@
"""DB operations for proposal state records."""
from datetime import datetime
from datetime import timezone
from uuid import UUID
from sqlalchemy import desc
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewProposal
from onyx.utils.logger import setup_logger
logger = setup_logger()
def get_proposal(
proposal_id: UUID,
tenant_id: str,
db_session: Session,
) -> ProposalReviewProposal | None:
"""Get a proposal by its ID."""
return (
db_session.query(ProposalReviewProposal)
.filter(
ProposalReviewProposal.id == proposal_id,
ProposalReviewProposal.tenant_id == tenant_id,
)
.one_or_none()
)
def get_proposal_by_document_id(
document_id: str,
tenant_id: str,
db_session: Session,
) -> ProposalReviewProposal | None:
"""Get a proposal by its linked document ID."""
return (
db_session.query(ProposalReviewProposal)
.filter(
ProposalReviewProposal.document_id == document_id,
ProposalReviewProposal.tenant_id == tenant_id,
)
.one_or_none()
)
def get_or_create_proposal(
document_id: str,
tenant_id: str,
db_session: Session,
) -> ProposalReviewProposal:
"""Get or lazily create a proposal state record for a document.
This is the primary entry point — the proposal record is created on first
interaction, not when the Jira ticket is ingested.
"""
proposal = get_proposal_by_document_id(document_id, tenant_id, db_session)
if proposal:
return proposal
proposal = ProposalReviewProposal(
document_id=document_id,
tenant_id=tenant_id,
)
db_session.add(proposal)
try:
db_session.flush()
except IntegrityError:
db_session.rollback()
proposal = get_proposal_by_document_id(document_id, tenant_id, db_session)
if proposal is None:
raise
return proposal
logger.info(f"Lazily created proposal {proposal.id} for document {document_id}")
return proposal
def list_proposals(
tenant_id: str,
db_session: Session,
status: str | None = None,
limit: int = 100,
offset: int = 0,
) -> list[ProposalReviewProposal]:
"""List proposals for a tenant with optional status filter."""
query = (
db_session.query(ProposalReviewProposal)
.filter(ProposalReviewProposal.tenant_id == tenant_id)
.order_by(desc(ProposalReviewProposal.updated_at))
)
if status:
query = query.filter(ProposalReviewProposal.status == status)
return query.offset(offset).limit(limit).all()
def count_proposals(
tenant_id: str,
db_session: Session,
status: str | None = None,
) -> int:
"""Count proposals for a tenant."""
query = db_session.query(ProposalReviewProposal).filter(
ProposalReviewProposal.tenant_id == tenant_id
)
if status:
query = query.filter(ProposalReviewProposal.status == status)
return query.count()
def update_proposal_status(
proposal_id: UUID,
tenant_id: str,
status: str,
db_session: Session,
) -> ProposalReviewProposal | None:
"""Update a proposal's status."""
proposal = (
db_session.query(ProposalReviewProposal)
.filter(
ProposalReviewProposal.id == proposal_id,
ProposalReviewProposal.tenant_id == tenant_id,
)
.one_or_none()
)
if not proposal:
return None
proposal.status = status
proposal.updated_at = datetime.now(timezone.utc)
db_session.flush()
logger.info(f"Updated proposal {proposal_id} status to {status}")
return proposal

View File

@@ -0,0 +1,337 @@
"""DB operations for rulesets and rules."""
from datetime import datetime
from datetime import timezone
from typing import Any
from uuid import UUID
from sqlalchemy import desc
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.server.features.proposal_review.db.models import ProposalReviewRule
from onyx.server.features.proposal_review.db.models import ProposalReviewRuleset
from onyx.utils.logger import setup_logger
logger = setup_logger()
_RULESET_UPDATABLE_FIELDS = frozenset(
{"name", "description", "is_default", "is_active"}
)
_RULE_UPDATABLE_FIELDS = frozenset(
{
"name",
"description",
"category",
"rule_type",
"rule_intent",
"prompt_template",
"authority",
"is_hard_stop",
"priority",
"is_active",
"refinement_needed",
"refinement_question",
}
)
# =============================================================================
# Ruleset CRUD
# =============================================================================
def list_rulesets(
tenant_id: str,
db_session: Session,
active_only: bool = False,
) -> list[ProposalReviewRuleset]:
"""List all rulesets for a tenant."""
query = (
db_session.query(ProposalReviewRuleset)
.filter(ProposalReviewRuleset.tenant_id == tenant_id)
.options(selectinload(ProposalReviewRuleset.rules))
.order_by(desc(ProposalReviewRuleset.created_at))
)
if active_only:
query = query.filter(ProposalReviewRuleset.is_active.is_(True))
return query.all()
def get_ruleset(
ruleset_id: UUID,
tenant_id: str,
db_session: Session,
) -> ProposalReviewRuleset | None:
"""Get a single ruleset by ID with all its rules."""
return (
db_session.query(ProposalReviewRuleset)
.filter(
ProposalReviewRuleset.id == ruleset_id,
ProposalReviewRuleset.tenant_id == tenant_id,
)
.options(selectinload(ProposalReviewRuleset.rules))
.one_or_none()
)
def create_ruleset(
tenant_id: str,
name: str,
db_session: Session,
description: str | None = None,
is_default: bool = False,
created_by: UUID | None = None,
) -> ProposalReviewRuleset:
"""Create a new ruleset."""
# If this ruleset is default, un-default any existing default
if is_default:
_clear_default_ruleset(tenant_id, db_session)
ruleset = ProposalReviewRuleset(
tenant_id=tenant_id,
name=name,
description=description,
is_default=is_default,
created_by=created_by,
)
db_session.add(ruleset)
db_session.flush()
logger.info(f"Created ruleset {ruleset.id} '{name}' for tenant {tenant_id}")
return ruleset
def update_ruleset(
ruleset_id: UUID,
tenant_id: str,
db_session: Session,
updates: dict[str, Any],
) -> ProposalReviewRuleset | None:
"""Update a ruleset. Returns None if not found."""
ruleset = get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
return None
for field, value in updates.items():
if field not in _RULESET_UPDATABLE_FIELDS:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT, f"Cannot update field: {field}"
)
if field == "is_default" and value:
_clear_default_ruleset(tenant_id, db_session)
setattr(ruleset, field, value)
ruleset.updated_at = datetime.now(timezone.utc)
db_session.flush()
return ruleset
def delete_ruleset(
ruleset_id: UUID,
tenant_id: str,
db_session: Session,
) -> bool:
"""Delete a ruleset. Returns False if not found."""
ruleset = get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
return False
db_session.delete(ruleset)
db_session.flush()
logger.info(f"Deleted ruleset {ruleset_id}")
return True
def _clear_default_ruleset(tenant_id: str, db_session: Session) -> None:
"""Un-default any existing default ruleset for a tenant."""
db_session.query(ProposalReviewRuleset).filter(
ProposalReviewRuleset.tenant_id == tenant_id,
ProposalReviewRuleset.is_default.is_(True),
).update({ProposalReviewRuleset.is_default: False})
db_session.flush()
# =============================================================================
# Rule CRUD
# =============================================================================
def list_rules_by_ruleset(
ruleset_id: UUID,
db_session: Session,
active_only: bool = False,
) -> list[ProposalReviewRule]:
"""List all rules in a ruleset."""
query = (
db_session.query(ProposalReviewRule)
.filter(ProposalReviewRule.ruleset_id == ruleset_id)
.order_by(ProposalReviewRule.priority)
)
if active_only:
query = query.filter(ProposalReviewRule.is_active.is_(True))
return query.all()
def get_rule(
rule_id: UUID,
db_session: Session,
) -> ProposalReviewRule | None:
"""Get a single rule by ID."""
return (
db_session.query(ProposalReviewRule)
.filter(ProposalReviewRule.id == rule_id)
.one_or_none()
)
def get_rule_with_tenant_check(
rule_id: UUID,
tenant_id: str,
db_session: Session,
) -> ProposalReviewRule | None:
"""Get a single rule by ID, validating it belongs to the given tenant.
Joins with the ruleset table so the tenant check happens in one query,
eliminating the race between separate get_rule + get_ruleset calls.
"""
return (
db_session.query(ProposalReviewRule)
.join(
ProposalReviewRuleset,
ProposalReviewRule.ruleset_id == ProposalReviewRuleset.id,
)
.filter(
ProposalReviewRule.id == rule_id,
ProposalReviewRuleset.tenant_id == tenant_id,
)
.one_or_none()
)
def create_rule(
ruleset_id: UUID,
name: str,
rule_type: str,
prompt_template: str,
db_session: Session,
description: str | None = None,
category: str | None = None,
rule_intent: str = "CHECK",
source: str = "MANUAL",
authority: str | None = None,
is_hard_stop: bool = False,
priority: int = 0,
refinement_needed: bool = False,
refinement_question: str | None = None,
) -> ProposalReviewRule:
"""Create a new rule within a ruleset."""
rule = ProposalReviewRule(
ruleset_id=ruleset_id,
name=name,
description=description,
category=category,
rule_type=rule_type,
rule_intent=rule_intent,
prompt_template=prompt_template,
source=source,
authority=authority,
is_hard_stop=is_hard_stop,
priority=priority,
refinement_needed=refinement_needed,
refinement_question=refinement_question,
)
db_session.add(rule)
db_session.flush()
logger.info(f"Created rule {rule.id} '{name}' in ruleset {ruleset_id}")
return rule
def update_rule(
rule_id: UUID,
db_session: Session,
updates: dict[str, Any],
) -> ProposalReviewRule | None:
"""Update a rule. Returns None if not found."""
rule = get_rule(rule_id, db_session)
if not rule:
return None
for field, value in updates.items():
if field not in _RULE_UPDATABLE_FIELDS:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT, f"Cannot update field: {field}"
)
setattr(rule, field, value)
rule.updated_at = datetime.now(timezone.utc)
db_session.flush()
return rule
def delete_rule(
rule_id: UUID,
db_session: Session,
) -> bool:
"""Delete a rule. Returns False if not found."""
rule = get_rule(rule_id, db_session)
if not rule:
return False
db_session.delete(rule)
db_session.flush()
logger.info(f"Deleted rule {rule_id}")
return True
def bulk_update_rules(
rule_ids: list[UUID],
action: str,
ruleset_id: UUID,
db_session: Session,
) -> int:
"""Batch activate/deactivate/delete rules.
Args:
rule_ids: list of rule IDs
action: "activate" | "deactivate" | "delete"
ruleset_id: scope operations to rules within this ruleset
Returns:
number of rules affected
"""
base_query = db_session.query(ProposalReviewRule).filter(
ProposalReviewRule.id.in_(rule_ids),
ProposalReviewRule.ruleset_id == ruleset_id,
)
if action == "delete":
count = base_query.delete(synchronize_session="fetch")
elif action in ("activate", "deactivate"):
count = base_query.update(
{
ProposalReviewRule.is_active: action == "activate",
ProposalReviewRule.updated_at: datetime.now(timezone.utc),
},
synchronize_session="fetch",
)
else:
raise OnyxError(OnyxErrorCode.INVALID_INPUT, f"Unknown bulk action: {action}")
db_session.flush()
logger.info(f"Bulk {action} on {count} rules")
return count
def count_active_rules(
ruleset_id: UUID,
db_session: Session,
) -> int:
"""Count active rules in a ruleset."""
return (
db_session.query(ProposalReviewRule)
.filter(
ProposalReviewRule.ruleset_id == ruleset_id,
ProposalReviewRule.is_active.is_(True),
)
.count()
)

View File

@@ -0,0 +1 @@
"""Proposal Review Engine — AI-powered proposal evaluation."""

View File

@@ -0,0 +1,497 @@
"""Parses uploaded checklist documents into atomic review rules via LLM.
Uses a two-pass approach to handle checklists of any size without hitting
output token limits:
Pass 1 — Enumerate: Identify all distinct checklist items from the
document (names, categories, sub-checks). This produces a small,
bounded output regardless of document size.
Pass 2 — Decompose: For each identified item, make a focused LLM call
to generate atomic review rules with full prompt templates.
Each call produces 15 rules, well within token limits.
Callers orchestrate persistence — this module is pure LLM + parsing, no
DB access, no callbacks, no threads.
"""
import json
import re
from dataclasses import dataclass
from dataclasses import field
from onyx.configs.model_configs import GEN_AI_MODEL_FALLBACK_MAX_TOKENS
from onyx.llm.interfaces import LLM
from onyx.llm.models import SystemMessage
from onyx.llm.models import UserMessage
from onyx.llm.utils import get_llm_max_output_tokens
from onyx.llm.utils import get_model_map
from onyx.llm.utils import llm_response_to_string
from onyx.tracing.llm_utils import llm_generation_span
from onyx.tracing.llm_utils import record_llm_response
from onyx.utils.logger import setup_logger
logger = setup_logger()
# ---------------------------------------------------------------------------
# Data structures
# ---------------------------------------------------------------------------
@dataclass
class ChecklistItem:
"""A single checklist item identified during pass 1."""
id: str
name: str
category: str
description: str
sub_checks: list[str] = field(default_factory=list)
# ---------------------------------------------------------------------------
# Prompts — Pass 1 (Enumerate)
# ---------------------------------------------------------------------------
_ENUMERATE_SYSTEM = """\
You are an expert at analyzing institutional review checklists for university \
grant offices. Your task is to read a checklist document and identify every \
distinct checklist item or section that requires review."""
_ENUMERATE_USER = """\
Read the checklist document below and list every distinct checklist item.
CHECKLIST DOCUMENT:
---
{checklist_text}
---
For each item, provide:
- **id**: A short identifier derived from the document (e.g., "IR-1", \
"KR-3", "Section-A.2"). Invent one if the document doesn't assign one.
- **name**: The item's title or heading.
- **category**: A display label combining the id and name \
(e.g., "IR-2: Regulatory Compliance").
- **description**: One sentence summarizing what this item covers.
- **sub_checks**: A list of the individual checks or requirements \
mentioned under this item. Be thorough — include every distinct \
requirement even if the document groups them together.
Respond with ONLY a valid JSON array:
[
{{
"id": "IR-1",
"name": "Institutional and PI Eligibility",
"category": "IR-1: Institutional and PI Eligibility Requirements",
"description": "Verify institution and PI meet sponsor eligibility.",
"sub_checks": ["Institutional eligibility", "PI eligibility", ...]
}},
...
]"""
# ---------------------------------------------------------------------------
# Prompts — Pass 2 (Decompose one item into rules)
# ---------------------------------------------------------------------------
_DECOMPOSE_SYSTEM = """\
You are an expert at creating AI review rules for university grant proposal \
review. Each rule you create will be independently evaluated by an LLM \
against a grant proposal. Rules must be atomic (one criterion each) and \
self-contained (the prompt template includes all context needed).
Variable placeholders available for prompt templates:
{{{{proposal_text}}}} — full proposal and supporting documents
{{{{budget_text}}}} — budget / financial sections
{{{{foa_text}}}} — funding opportunity announcement
{{{{metadata}}}} — structured metadata (PI, sponsor, etc.)
{{{{metadata.FIELD_NAME}}}} — a specific metadata field
Rule types:
DOCUMENT_CHECK — verify presence / content in documents
METADATA_CHECK — validate a structured metadata field
CROSS_REFERENCE — compare information across documents
CUSTOM_NL — natural language evaluation
Rule intents:
CHECK — pass / fail criterion
HIGHLIGHT — informational flag (no pass / fail)
If a rule requires institution-specific info NOT present in the checklist \
(IDC rates, mandatory cost categories, local policies, etc.), set \
refinement_needed=true and include a refinement_question."""
_DECOMPOSE_USER = """\
Create atomic review rules for the checklist item described below.
ITEM TO DECOMPOSE:
ID: {item_id}
Name: {item_name}
Category: {item_category}
Description: {item_description}
Sub-checks: {sub_checks}
FULL CHECKLIST (for context — only create rules for the item above):
---
{checklist_text}
---
Generate one rule per sub-check. Each rule object must have:
{{
"name": "Short descriptive name (max 100 chars)",
"description": "What this rule checks",
"category": "{item_category}",
"rule_type": "DOCUMENT_CHECK | METADATA_CHECK | CROSS_REFERENCE | CUSTOM_NL",
"rule_intent": "CHECK | HIGHLIGHT",
"prompt_template": "Self-contained prompt with {{{{variable}}}} placeholders.",
"refinement_needed": false,
"refinement_question": null
}}
Respond with ONLY a valid JSON array of rule objects."""
# ---------------------------------------------------------------------------
# Public API
# ---------------------------------------------------------------------------
def enumerate_checklist_items(
checklist_text: str,
llm: LLM,
) -> list[ChecklistItem]:
"""Pass 1: Identify all distinct checklist items from the document.
One LLM call. Output is small and bounded regardless of document size.
Args:
checklist_text: Full text extracted from the uploaded checklist file.
llm: The LLM instance to use.
Returns:
Ordered list of checklist items found in the document.
Raises:
RuntimeError: If the LLM call fails or returns unparseable output.
"""
user_content = _ENUMERATE_USER.format(checklist_text=checklist_text)
messages = [
SystemMessage(content=_ENUMERATE_SYSTEM),
UserMessage(content=user_content),
]
max_output_tokens = _get_max_output_tokens(llm)
try:
with llm_generation_span(llm, "checklist_enumerate", messages) as gen_span:
response = llm.invoke(
messages, timeout_override=300, max_tokens=max_output_tokens
)
record_llm_response(gen_span, response)
raw_text = llm_response_to_string(response)
except Exception as e:
logger.error(f"Pass 1 (enumerate) LLM call failed: {e}")
raise RuntimeError(f"Failed to enumerate checklist items: {str(e)}") from e
parsed = _parse_json_array(raw_text, context="enumerate")
items: list[ChecklistItem] = []
for i, raw in enumerate(parsed):
if not isinstance(raw, dict):
logger.warning(f"Enumerate: skipping non-dict at index {i}")
continue
item_id = str(raw.get("id", f"ITEM-{i + 1}"))
name = raw.get("name")
if not name:
logger.warning(f"Enumerate: skipping item at index {i} (no name)")
continue
items.append(
ChecklistItem(
id=item_id,
name=str(name),
category=str(raw.get("category", f"{item_id}: {name}")),
description=str(raw.get("description", "")),
sub_checks=[str(s) for s in raw.get("sub_checks", [])],
)
)
return items
def decompose_checklist_item(
item: ChecklistItem,
checklist_text: str,
llm: LLM,
) -> list[dict]:
"""Pass 2: Decompose one checklist item into atomic review rules.
One LLM call. Output is bounded (110 rules per item).
Args:
item: The checklist item to decompose.
checklist_text: Full checklist text (passed as context).
llm: The LLM instance to use.
Returns:
List of validated rule dicts for this item.
Raises:
RuntimeError: If the LLM call fails or returns unparseable output.
"""
sub_checks_str = "\n".join(f" - {s}" for s in item.sub_checks) or " (none listed)"
user_content = _DECOMPOSE_USER.format(
item_id=item.id,
item_name=item.name,
item_category=item.category,
item_description=item.description,
sub_checks=sub_checks_str,
checklist_text=checklist_text,
)
messages = [
SystemMessage(content=_DECOMPOSE_SYSTEM),
UserMessage(content=user_content),
]
max_output_tokens = _get_max_output_tokens(llm)
try:
with llm_generation_span(
llm, f"checklist_decompose_{item.id}", messages
) as gen_span:
response = llm.invoke(
messages, timeout_override=300, max_tokens=max_output_tokens
)
record_llm_response(gen_span, response)
raw_text = llm_response_to_string(response)
except Exception as e:
raise RuntimeError(f"LLM call failed for item '{item.name}': {str(e)}") from e
parsed = _parse_json_array(raw_text, context=f"decompose[{item.id}]")
rules: list[dict] = []
for i, raw_rule in enumerate(parsed):
if not isinstance(raw_rule, dict):
continue
rule = _validate_rule(raw_rule, i)
if rule:
if not rule["category"]:
rule["category"] = item.category
rules.append(rule)
return rules
# ---------------------------------------------------------------------------
# Prompts — Refinement (single rule)
# ---------------------------------------------------------------------------
_REFINE_SYSTEM = """\
You are an expert at creating AI review rules for university grant proposal \
review. You are refining a rule that was previously flagged as needing \
institution-specific information. The user has now provided that information.
Variable placeholders available for prompt templates:
{{{{proposal_text}}}} — full proposal and supporting documents
{{{{budget_text}}}} — budget / financial sections
{{{{foa_text}}}} — funding opportunity announcement
{{{{metadata}}}} — structured metadata (PI, sponsor, etc.)
{{{{metadata.FIELD_NAME}}}} — a specific metadata field
Rule types:
DOCUMENT_CHECK — verify presence / content in documents
METADATA_CHECK — validate a structured metadata field
CROSS_REFERENCE — compare information across documents
CUSTOM_NL — natural language evaluation
Rule intents:
CHECK — pass / fail criterion
HIGHLIGHT — informational flag (no pass / fail)"""
_REFINE_USER = """\
The following rule was imported from a checklist but flagged as needing \
additional information before it can be used.
CURRENT RULE:
Name: {rule_name}
Description: {rule_description}
Prompt Template: {rule_prompt_template}
QUESTION THAT WAS ASKED:
{refinement_question}
USER'S ANSWER:
{user_answer}
Using the user's answer, produce a refined version of this rule. \
Incorporate the institution-specific information into the prompt_template \
so the rule is fully self-contained and no longer needs refinement.
Respond with ONLY a single JSON object (not an array):
{{
"name": "Short descriptive name (max 100 chars)",
"description": "What this rule checks",
"rule_type": "DOCUMENT_CHECK | METADATA_CHECK | CROSS_REFERENCE | CUSTOM_NL",
"rule_intent": "CHECK | HIGHLIGHT",
"prompt_template": "Refined self-contained prompt with {{{{variable}}}} placeholders.",
"refinement_needed": false,
"refinement_question": null
}}"""
def refine_rule(
rule_name: str,
rule_description: str | None,
rule_prompt_template: str,
refinement_question: str,
user_answer: str,
llm: LLM,
) -> dict:
"""Refine a single rule using the user's answer to the refinement question.
One LLM call. Returns a validated rule dict with refinement_needed=False.
Raises:
RuntimeError: If the LLM call fails or returns unparseable output.
"""
user_content = _REFINE_USER.format(
rule_name=rule_name,
rule_description=rule_description or "(none)",
rule_prompt_template=rule_prompt_template,
refinement_question=refinement_question,
user_answer=user_answer,
)
messages = [
SystemMessage(content=_REFINE_SYSTEM),
UserMessage(content=user_content),
]
try:
with llm_generation_span(llm, "checklist_refine_rule", messages) as gen_span:
response = llm.invoke(messages, timeout_override=120)
record_llm_response(gen_span, response)
raw_text = llm_response_to_string(response)
except Exception as e:
raise RuntimeError(f"LLM call failed during rule refinement: {str(e)}") from e
# Parse the single JSON object (strip code fences)
text = raw_text.strip()
if text.startswith("```"):
text = re.sub(r"^```(?:json)?\s*\n?", "", text)
text = re.sub(r"\n?```\s*$", "", text)
text = text.strip()
try:
parsed = json.loads(text)
except json.JSONDecodeError as e:
raise RuntimeError(f"LLM returned invalid JSON during refinement: {e}") from e
if isinstance(parsed, list):
if not parsed:
raise RuntimeError("LLM returned an empty array during refinement")
parsed = parsed[0]
if not isinstance(parsed, dict):
raise RuntimeError("LLM returned non-object JSON during refinement")
rule = _validate_rule(parsed, 0)
if not rule:
raise RuntimeError("LLM returned an invalid rule during refinement")
# Force refinement_needed to False
rule["refinement_needed"] = False
rule["refinement_question"] = None
return rule
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _get_max_output_tokens(llm: LLM) -> int:
"""Look up the model's max output tokens from litellm's model cost map."""
try:
model_map = get_model_map()
return get_llm_max_output_tokens(
model_map=model_map,
model_name=llm.config.model_name,
model_provider=llm.config.model_provider,
)
except Exception as e:
logger.warning(f"Failed to resolve max output tokens: {e}")
return int(GEN_AI_MODEL_FALLBACK_MAX_TOKENS)
def _parse_json_array(raw_text: str, context: str) -> list:
"""Parse an LLM response as a JSON array, stripping code fences."""
text = raw_text.strip()
if text.startswith("```"):
text = re.sub(r"^```(?:json)?\s*\n?", "", text)
text = re.sub(r"\n?```\s*$", "", text)
text = text.strip()
try:
parsed = json.loads(text)
except json.JSONDecodeError as e:
logger.error(f"[{context}] Failed to parse JSON: {e}")
logger.debug(f"[{context}] Raw LLM response: {text[:500]}...")
raise RuntimeError(
f"LLM returned invalid JSON during {context}. "
"Please try the import again."
) from e
if not isinstance(parsed, list):
raise RuntimeError(
f"LLM returned non-array JSON during {context}. " "Expected a list."
)
return parsed
def _validate_rule(raw_rule: dict, index: int) -> dict | None:
"""Validate and normalize a single parsed rule dict."""
valid_types = {
"DOCUMENT_CHECK",
"METADATA_CHECK",
"CROSS_REFERENCE",
"CUSTOM_NL",
}
valid_intents = {"CHECK", "HIGHLIGHT"}
name = raw_rule.get("name")
if not name:
logger.warning(f"Rule at index {index} missing 'name', skipping")
return None
prompt_template = raw_rule.get("prompt_template")
if not prompt_template:
logger.warning(f"Rule '{name}' missing 'prompt_template', skipping")
return None
rule_type = str(raw_rule.get("rule_type", "CUSTOM_NL")).upper()
if rule_type not in valid_types:
rule_type = "CUSTOM_NL"
rule_intent = str(raw_rule.get("rule_intent", "CHECK")).upper()
if rule_intent not in valid_intents:
rule_intent = "CHECK"
return {
"name": str(name)[:200],
"description": raw_rule.get("description"),
"category": raw_rule.get("category"),
"rule_type": rule_type,
"rule_intent": rule_intent,
"prompt_template": str(prompt_template),
"refinement_needed": bool(raw_rule.get("refinement_needed", False)),
"refinement_question": (
str(raw_rule["refinement_question"])
if raw_rule.get("refinement_question")
else None
),
}

View File

@@ -0,0 +1,340 @@
"""Assembles all available text content for a proposal to pass to rule evaluation.
V1 LIMITATION: Document body text (the main text content extracted by connectors)
is stored in Vespa, not in the PostgreSQL Document table. The DB row only stores
metadata (semantic_id, link, doc_metadata, primary_owners, etc.). For Jira tickets,
the Description and Comments text are indexed into Vespa during connector runs and
are NOT accessible here without a Vespa query.
As a result, the primary source of rich text for rule evaluation in V1 is:
- Manually uploaded documents (proposal_review_document.extracted_text)
- Structured metadata from the Document row's doc_metadata JSONB column
- For Jira tickets: the connector populates doc_metadata with field values,
which often includes Description, Status, Priority, Assignee, etc.
Future improvement: add a Vespa retrieval step to fetch indexed text chunks for
the parent document and its attachments.
"""
import json
from dataclasses import dataclass
from dataclasses import field
from uuid import UUID
from sqlalchemy.orm import Session
from onyx.db.models import Document
from onyx.server.features.proposal_review.db.models import ProposalReviewDocument
from onyx.server.features.proposal_review.db.models import ProposalReviewProposal
from onyx.utils.logger import setup_logger
logger = setup_logger()
# Metadata keys from Jira connector that commonly carry useful text content.
# These are extracted from doc_metadata and presented as labeled sections to
# give the LLM more signal when evaluating rules.
_JIRA_TEXT_METADATA_KEYS = [
"description",
"summary",
"comment",
"comments",
"acceptance_criteria",
"story_points",
"priority",
"status",
"resolution",
"issue_type",
"labels",
"components",
"fix_versions",
"affects_versions",
"environment",
"assignee",
"reporter",
"creator",
]
@dataclass
class ProposalContext:
"""All text and metadata context assembled for rule evaluation."""
proposal_text: str # concatenated text from all documents
budget_text: str # best-effort budget section extraction
foa_text: str # FOA content (auto-fetched or uploaded)
metadata: dict # structured metadata from Document.doc_metadata
jira_key: str # for display/reference
metadata_raw: dict = field(default_factory=dict) # full unresolved metadata
def get_proposal_context(
proposal_id: UUID,
db_session: Session,
) -> ProposalContext:
"""Assemble context for rule evaluation.
Gathers text from three sources:
1. Jira ticket content (from Document.semantic_id + doc_metadata)
2. Jira attachments (child Documents linked by ID prefix convention)
3. Manually uploaded documents (from proposal_review_document.extracted_text)
For MVP, returns full text of everything. Future: smart section selection.
"""
# 1. Get the proposal record to find the linked document_id
proposal = (
db_session.query(ProposalReviewProposal)
.filter(ProposalReviewProposal.id == proposal_id)
.one_or_none()
)
if not proposal:
logger.warning(f"Proposal {proposal_id} not found during context assembly")
return ProposalContext(
proposal_text="",
budget_text="",
foa_text="",
metadata={},
jira_key="",
metadata_raw={},
)
# 2. Fetch the parent Document (Jira ticket)
parent_doc = (
db_session.query(Document)
.filter(Document.id == proposal.document_id)
.one_or_none()
)
jira_key = ""
metadata: dict = {}
all_text_parts: list[str] = []
budget_parts: list[str] = []
foa_parts: list[str] = []
if parent_doc:
jira_key = parent_doc.semantic_id or ""
metadata = parent_doc.doc_metadata or {}
# Build text from DB-available fields. The actual ticket body text lives
# in Vespa and is not accessible here. The doc_metadata JSONB column
# often contains structured Jira fields that the connector extracted.
parent_text = _build_parent_document_text(parent_doc)
if parent_text:
all_text_parts.append(parent_text)
# 3. Look for child Documents (Jira attachments).
# Jira attachment Documents have IDs of the form:
# "{parent_jira_url}/attachments/{attachment_id}"
# We find them via ID prefix match.
#
# V1 LIMITATION: child document text content is in Vespa, not in the
# DB. We can only extract metadata (filename, mime type, etc.) from
# the Document row. The actual attachment text is not available here
# without a Vespa query. See module docstring for details.
child_docs = _find_child_documents(parent_doc, db_session)
if child_docs:
logger.info(
f"Found {len(child_docs)} child documents for {jira_key}. "
f"Note: their text content is in Vespa and only metadata is "
f"available for rule evaluation."
)
for child_doc in child_docs:
child_text = _build_child_document_text(child_doc)
if child_text:
all_text_parts.append(child_text)
_classify_child_text(child_doc, child_text, budget_parts, foa_parts)
else:
logger.warning(
f"Parent Document not found for proposal {proposal_id} "
f"(document_id={proposal.document_id}). "
f"Context will rely on manually uploaded documents only."
)
# 4. Fetch manually uploaded documents from proposal_review_document.
# This is the PRIMARY source of rich text content for V1 since the
# extracted_text column holds the full document content.
manual_docs = (
db_session.query(ProposalReviewDocument)
.filter(ProposalReviewDocument.proposal_id == proposal_id)
.order_by(ProposalReviewDocument.created_at)
.all()
)
for doc in manual_docs:
if doc.extracted_text:
all_text_parts.append(
f"--- Document: {doc.file_name} (role: {doc.document_role}) ---\n"
f"{doc.extracted_text}"
)
# Classify by role
role_upper = (doc.document_role or "").upper()
if role_upper == "BUDGET" or _is_budget_filename(doc.file_name):
budget_parts.append(doc.extracted_text)
elif role_upper == "FOA":
foa_parts.append(doc.extracted_text)
return ProposalContext(
proposal_text="\n\n".join(all_text_parts) if all_text_parts else "",
budget_text="\n\n".join(budget_parts) if budget_parts else "",
foa_text="\n\n".join(foa_parts) if foa_parts else "",
metadata=metadata,
jira_key=jira_key,
metadata_raw=metadata,
)
def _build_parent_document_text(doc: Document) -> str:
"""Build text representation from a parent Document row (Jira ticket).
The Document table does NOT store the ticket body text -- that lives in Vespa.
What we DO have access to:
- semantic_id: typically "{ISSUE_KEY}: {summary}"
- link: URL to the Jira ticket
- doc_metadata: JSONB with structured fields from the connector (may include
description, status, priority, assignee, custom fields, etc.)
- primary_owners / secondary_owners: people associated with the document
We extract all available metadata and present it as labeled sections to
maximize the signal available to the LLM for rule evaluation.
"""
parts: list[str] = []
if doc.semantic_id:
parts.append(f"Document: {doc.semantic_id}")
if doc.link:
parts.append(f"Link: {doc.link}")
# Include owner information which may be useful for compliance checks
if doc.primary_owners:
parts.append(f"Primary Owners: {', '.join(doc.primary_owners)}")
if doc.secondary_owners:
parts.append(f"Secondary Owners: {', '.join(doc.secondary_owners)}")
# doc_metadata contains structured data from the Jira connector.
# Extract well-known text-bearing fields first, then include the rest.
if doc.doc_metadata:
metadata = doc.doc_metadata
# Extract well-known Jira fields as labeled sections
for key in _JIRA_TEXT_METADATA_KEYS:
value = metadata.get(key)
if value is not None and value != "" and value != []:
label = key.replace("_", " ").title()
if isinstance(value, list):
parts.append(f"{label}: {', '.join(str(v) for v in value)}")
elif isinstance(value, dict):
parts.append(
f"{label}:\n{json.dumps(value, indent=2, default=str)}"
)
else:
parts.append(f"{label}: {value}")
# Include any remaining metadata keys not in the well-known set,
# so custom fields and connector-specific data are not lost.
remaining = {
k: v
for k, v in metadata.items()
if k.lower() not in _JIRA_TEXT_METADATA_KEYS
and v is not None
and v != ""
and v != []
}
if remaining:
parts.append(
f"Additional Metadata:\n"
f"{json.dumps(remaining, indent=2, default=str)}"
)
return "\n".join(parts) if parts else ""
def _build_child_document_text(doc: Document) -> str:
"""Build text representation from a child Document row (Jira attachment).
V1 LIMITATION: The actual extracted text of the attachment lives in Vespa,
not in the Document table. We can only present the metadata that the
connector stored in doc_metadata (filename, mime type, size, parent ticket).
This means the LLM knows an attachment EXISTS and its metadata, but cannot
read its contents. Future versions should add a Vespa retrieval step.
"""
parts: list[str] = []
if doc.semantic_id:
parts.append(f"Attachment: {doc.semantic_id}")
if doc.link:
parts.append(f"Link: {doc.link}")
# Child document metadata typically includes:
# parent_ticket, attachment_filename, attachment_mime_type, attachment_size
if doc.doc_metadata:
for key, value in doc.doc_metadata.items():
if value is not None and value != "":
label = key.replace("_", " ").title()
parts.append(f"{label}: {value}")
if not parts:
return ""
# Note the limitation inline for the LLM context
parts.append(
"[Note: Full attachment text is indexed in Vespa and not available "
"in this context. Upload the document manually for full text analysis.]"
)
return "\n".join(parts)
def _find_child_documents(parent_doc: Document, db_session: Session) -> list[Document]:
"""Find child Documents linked to the parent (e.g. Jira attachments).
Jira attachments are indexed as separate Document rows whose ID follows
the convention: "{parent_document_id}/attachments/{attachment_id}".
The parent_document_id for Jira is the full URL to the issue, e.g.
"https://jira.example.com/browse/PROJ-123".
V1 LIMITATION: These child Document rows only contain metadata in the DB.
Their actual extracted text content is stored in Vespa. To read the
attachment text, a Vespa query would be required. This is not implemented
in V1 -- officers should upload key documents manually for full text
analysis.
"""
if not parent_doc.id:
return []
# Child documents have IDs that start with the parent document's ID
# followed by a path segment (e.g., /attachments/12345)
# Escape LIKE wildcards in the document ID
escaped_id = parent_doc.id.replace("%", r"\%").replace("_", r"\_")
child_docs = (
db_session.query(Document)
.filter(
Document.id.like(f"{escaped_id}/%"),
Document.id != parent_doc.id,
)
.all()
)
return child_docs
def _classify_child_text(
doc: Document,
text: str,
budget_parts: list[str],
foa_parts: list[str],
) -> None:
"""Best-effort classification of child document text into budget or FOA."""
semantic_id = (doc.semantic_id or "").lower()
if _is_budget_filename(semantic_id):
budget_parts.append(text)
elif any(
term in semantic_id
for term in ["foa", "funding opportunity", "rfa", "solicitation", "nofo"]
):
foa_parts.append(text)
def _is_budget_filename(filename: str) -> bool:
"""Check if a filename suggests budget content."""
lower = (filename or "").lower()
return any(term in lower for term in ["budget", "cost", "financial", "expenditure"])

View File

@@ -0,0 +1,168 @@
"""Auto-fetches Funding Opportunity Announcements using Onyx web search infrastructure."""
from uuid import UUID
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewDocument
from onyx.utils.logger import setup_logger
logger = setup_logger()
# Map known opportunity ID prefixes to federal agency domains
_AGENCY_DOMAINS: dict[str, str] = {
"RFA": "grants.nih.gov",
"PA": "grants.nih.gov",
"PAR": "grants.nih.gov",
"R01": "grants.nih.gov",
"R21": "grants.nih.gov",
"U01": "grants.nih.gov",
"NOT": "grants.nih.gov",
"NSF": "nsf.gov",
"DE-FOA": "energy.gov",
"HRSA": "hrsa.gov",
"W911": "grants.gov", # DoD
"FA": "grants.gov", # Air Force
"N00": "grants.gov", # Navy
"NOFO": "grants.gov",
}
def fetch_foa(
opportunity_id: str,
proposal_id: UUID,
db_session: Session,
) -> str | None:
"""Fetch FOA content given an opportunity ID.
1. Determine domain from ID prefix (RFA/PA -> nih.gov, NSF -> nsf.gov, etc.)
2. Build search query
3. Call Onyx web search provider
4. Fetch full content from best URL
5. Save as proposal_review_document with role=FOA
6. Return extracted text or None
If the web search provider is not configured, logs a warning and returns None.
"""
if not opportunity_id or not opportunity_id.strip():
logger.debug("No opportunity_id provided, skipping FOA fetch")
return None
opportunity_id = opportunity_id.strip()
# Check if we already have an FOA document for this proposal
existing_foa = (
db_session.query(ProposalReviewDocument)
.filter(
ProposalReviewDocument.proposal_id == proposal_id,
ProposalReviewDocument.document_role == "FOA",
)
.first()
)
if existing_foa and existing_foa.extracted_text:
logger.info(
f"FOA document already exists for proposal {proposal_id}, skipping fetch"
)
return existing_foa.extracted_text
# Determine search domain from opportunity ID prefix
site_domain = _determine_domain(opportunity_id)
# Build search query
search_query = f"{opportunity_id} funding opportunity announcement"
if site_domain:
search_query = f"site:{site_domain} {opportunity_id}"
# Try to get the web search provider
try:
from onyx.tools.tool_implementations.web_search.providers import (
get_default_provider,
)
provider = get_default_provider()
except Exception as e:
logger.warning(f"Failed to load web search provider: {e}")
provider = None
if provider is None:
logger.warning(
"No web search provider configured. Cannot auto-fetch FOA. "
"Configure a web search provider in Admin settings to enable this feature."
)
return None
# Search for the FOA
try:
results = provider.search(search_query)
except Exception as e:
logger.error(f"Web search failed for FOA '{opportunity_id}': {e}")
return None
if not results:
logger.info(f"No search results found for FOA '{opportunity_id}'")
return None
# Pick the best result URL
best_url = str(results[0].link)
logger.info(f"Fetching FOA content from: {best_url}")
# Fetch full content from the URL
try:
from onyx.tools.tool_implementations.open_url.onyx_web_crawler import (
OnyxWebCrawler,
)
crawler = OnyxWebCrawler()
contents = crawler.contents([best_url])
if (
not contents
or not contents[0].scrape_successful
or not contents[0].full_content
):
logger.warning(f"No content extracted from FOA URL: {best_url}")
return None
foa_text = contents[0].full_content
except Exception as e:
logger.error(f"Failed to fetch FOA content from {best_url}: {e}")
return None
# Save as a proposal_review_document with role=FOA
try:
foa_doc = ProposalReviewDocument(
proposal_id=proposal_id,
file_name=f"FOA_{opportunity_id}.html",
file_type="HTML",
document_role="FOA",
extracted_text=foa_text,
# uploaded_by is None for auto-fetched documents
)
db_session.add(foa_doc)
db_session.flush()
logger.info(
f"Saved FOA document for proposal {proposal_id} "
f"(opportunity_id={opportunity_id}, {len(foa_text)} chars)"
)
except Exception as e:
logger.error(f"Failed to save FOA document: {e}")
# Still return the text even if save fails
return foa_text
return foa_text
def _determine_domain(opportunity_id: str) -> str | None:
"""Determine the likely agency domain from the opportunity ID prefix."""
upper_id = opportunity_id.upper()
for prefix, domain in _AGENCY_DOMAINS.items():
if upper_id.startswith(prefix):
return domain
# If it looks like a grants.gov number (numeric), try grants.gov
if opportunity_id.replace("-", "").isdigit():
return "grants.gov"
return None

View File

@@ -0,0 +1,391 @@
"""Writes officer decisions back to Jira."""
from datetime import datetime
from datetime import timezone
from uuid import UUID
import requests
from sqlalchemy.orm import Session
from onyx.db.connector import fetch_connector_by_id
from onyx.db.connector_credential_pair import (
fetch_connector_credential_pair_for_connector,
)
from onyx.db.models import Document
from onyx.server.features.proposal_review.db import config as config_db
from onyx.server.features.proposal_review.db import decisions as decisions_db
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import proposals as proposals_db
from onyx.server.features.proposal_review.db.models import ProposalReviewFinding
from onyx.server.features.proposal_review.db.models import ProposalReviewProposal
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
def sync_to_jira(
proposal_id: UUID,
db_session: Session,
) -> None:
"""Write the officer's final decision back to Jira.
Performs up to 3 Jira API operations:
1. PUT custom fields (decision, completion %)
2. POST transition (move to configured column)
3. POST comment (structured review summary)
Then marks the proposal as jira_synced.
Raises:
ValueError: If required config/data is missing.
RuntimeError: If Jira API calls fail.
"""
tenant_id = get_current_tenant_id()
# Load proposal
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise ValueError(f"Proposal {proposal_id} not found")
if not proposal.decision_at:
raise ValueError(f"No decision found for proposal {proposal_id}")
if proposal.jira_synced:
logger.info(f"Decision for proposal {proposal_id} already synced to Jira")
return
# Load tenant config for Jira settings
config = config_db.get_config(tenant_id, db_session)
if not config:
raise ValueError("Proposal review config not found for this tenant")
if not config.jira_connector_id:
raise ValueError(
"No Jira connector configured. Set jira_connector_id in proposal review settings."
)
writeback_config = config.jira_writeback or {}
# Get the Jira issue key from the linked Document
parent_doc = (
db_session.query(Document)
.filter(Document.id == proposal.document_id)
.one_or_none()
)
if not parent_doc:
raise ValueError(f"Linked document {proposal.document_id} not found")
# semantic_id is formatted as "KEY-123: Summary text" by the Jira connector.
# Extract just the issue key (everything before the first colon).
raw_id = parent_doc.semantic_id
if not raw_id:
raise ValueError(
f"Document {proposal.document_id} has no semantic_id (Jira issue key)"
)
issue_key = raw_id.split(":")[0].strip()
# Get Jira credentials from the connector
jira_base_url, auth_headers = _get_jira_credentials(
config.jira_connector_id, db_session
)
# Get findings for the summary
latest_run = findings_db.get_latest_review_run(proposal_id, db_session)
all_findings: list[ProposalReviewFinding] = []
if latest_run:
all_findings = findings_db.list_findings_by_run(latest_run.id, db_session)
# Calculate summary counts
verdict_counts = _count_verdicts(all_findings)
# Operation 1: Update custom fields
_update_custom_fields(
jira_base_url=jira_base_url,
auth_headers=auth_headers,
issue_key=issue_key,
decision=proposal.status,
verdict_counts=verdict_counts,
writeback_config=writeback_config,
)
# Operation 2: Transition the issue
_transition_issue(
jira_base_url=jira_base_url,
auth_headers=auth_headers,
issue_key=issue_key,
decision=proposal.status,
writeback_config=writeback_config,
)
# Operation 3: Post review summary comment
_post_comment(
jira_base_url=jira_base_url,
auth_headers=auth_headers,
issue_key=issue_key,
proposal=proposal,
verdict_counts=verdict_counts,
findings=all_findings,
)
# Mark as synced
decisions_db.mark_proposal_jira_synced(proposal_id, tenant_id, db_session)
db_session.flush()
logger.info(
f"Successfully synced decision for proposal {proposal_id} to Jira issue {issue_key}"
)
def _get_jira_credentials(
connector_id: int,
db_session: Session,
) -> tuple[str, dict[str, str]]:
"""Extract Jira base URL and auth headers from the connector's credentials.
Returns:
Tuple of (jira_base_url, auth_headers_dict).
"""
connector = fetch_connector_by_id(connector_id, db_session)
if not connector:
raise ValueError(f"Jira connector {connector_id} not found")
# Get the connector's credential pair
cc_pair = fetch_connector_credential_pair_for_connector(db_session, connector_id)
if not cc_pair:
raise ValueError(f"No credential pair found for connector {connector_id}")
# Extract credentials — guard against missing credential_json
cred_json = cc_pair.credential.credential_json
if cred_json is None:
raise ValueError(f"No credential_json for connector {connector_id}")
credentials = cred_json.get_value(apply_mask=False)
if not credentials:
raise ValueError(f"Empty credentials for connector {connector_id}")
# Extract Jira base URL from connector config
connector_config = connector.connector_specific_config or {}
jira_base_url = connector_config.get("jira_base_url", "")
if not jira_base_url:
raise ValueError("Could not determine Jira base URL from connector config")
# Build auth headers
api_token = credentials.get("jira_api_token", "")
email = credentials.get("jira_user_email")
if email:
# Cloud auth: Basic auth with email:token
import base64
auth_string = base64.b64encode(f"{email}:{api_token}".encode()).decode()
auth_headers = {
"Authorization": f"Basic {auth_string}",
"Content-Type": "application/json",
}
else:
# Server auth: Bearer token
auth_headers = {
"Authorization": f"Bearer {api_token}",
"Content-Type": "application/json",
}
return jira_base_url, auth_headers
def _count_verdicts(findings: list[ProposalReviewFinding]) -> dict[str, int]:
"""Count findings by verdict."""
counts: dict[str, int] = {
"PASS": 0,
"FAIL": 0,
"FLAG": 0,
"NEEDS_REVIEW": 0,
"NOT_APPLICABLE": 0,
}
for f in findings:
verdict = f.verdict.upper() if f.verdict else "NEEDS_REVIEW"
counts[verdict] = counts.get(verdict, 0) + 1
return counts
def _update_custom_fields(
jira_base_url: str,
auth_headers: dict[str, str],
issue_key: str,
decision: str,
verdict_counts: dict[str, int],
writeback_config: dict,
) -> None:
"""PUT custom fields on the Jira issue (decision, completion %)."""
decision_field = writeback_config.get("decision_field_id")
completion_field = writeback_config.get("completion_field_id")
if not decision_field and not completion_field:
logger.debug("No custom field IDs configured for Jira writeback, skipping")
return
fields: dict = {}
if decision_field:
fields[decision_field] = decision
if completion_field:
total = sum(verdict_counts.values())
completed = total - verdict_counts.get("NEEDS_REVIEW", 0)
pct = (completed / total * 100) if total > 0 else 0
fields[completion_field] = round(pct, 1)
url = f"{jira_base_url}/rest/api/3/issue/{issue_key}"
payload = {"fields": fields}
try:
resp = requests.put(url, headers=auth_headers, json=payload, timeout=30)
resp.raise_for_status()
logger.info(f"Updated custom fields on {issue_key}")
except requests.RequestException as e:
logger.error(f"Failed to update custom fields on {issue_key}: {e}")
raise RuntimeError(f"Jira field update failed: {e}") from e
def _transition_issue(
jira_base_url: str,
auth_headers: dict[str, str],
issue_key: str,
decision: str,
writeback_config: dict,
) -> None:
"""POST a transition to move the issue to the appropriate column."""
transition_map = writeback_config.get("transitions", {})
transition_name = transition_map.get(decision)
if not transition_name:
logger.debug(f"No transition configured for decision '{decision}', skipping")
return
# First, get available transitions
transitions_url = f"{jira_base_url}/rest/api/3/issue/{issue_key}/transitions"
try:
resp = requests.get(transitions_url, headers=auth_headers, timeout=30)
resp.raise_for_status()
available = resp.json().get("transitions", [])
except requests.RequestException as e:
logger.error(f"Failed to fetch transitions for {issue_key}: {e}")
raise RuntimeError(f"Jira transition fetch failed: {e}") from e
# Find the matching transition by name (case-insensitive)
target_transition = None
for t in available:
if t.get("name", "").lower() == transition_name.lower():
target_transition = t
break
if not target_transition:
available_names = [t.get("name", "") for t in available]
logger.warning(
f"Transition '{transition_name}' not found for {issue_key}. "
f"Available: {available_names}"
)
return
# Perform the transition
payload = {"transition": {"id": target_transition["id"]}}
try:
resp = requests.post(
transitions_url, headers=auth_headers, json=payload, timeout=30
)
resp.raise_for_status()
logger.info(f"Transitioned {issue_key} to '{transition_name}'")
except requests.RequestException as e:
logger.error(f"Failed to transition {issue_key}: {e}")
raise RuntimeError(f"Jira transition failed: {e}") from e
def _post_comment(
jira_base_url: str,
auth_headers: dict[str, str],
issue_key: str,
proposal: ProposalReviewProposal,
verdict_counts: dict[str, int],
findings: list[ProposalReviewFinding],
) -> None:
"""POST a structured review summary as a Jira comment."""
comment_text = _build_comment_text(proposal, verdict_counts, findings)
url = f"{jira_base_url}/rest/api/3/issue/{issue_key}/comment"
# Jira Cloud uses ADF (Atlassian Document Format) for comments
payload = {
"body": {
"version": 1,
"type": "doc",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": comment_text,
}
],
}
],
}
}
try:
resp = requests.post(url, headers=auth_headers, json=payload, timeout=30)
resp.raise_for_status()
logger.info(f"Posted review summary comment on {issue_key}")
except requests.RequestException as e:
logger.error(f"Failed to post comment on {issue_key}: {e}")
raise RuntimeError(f"Jira comment post failed: {e}") from e
def _build_comment_text(
proposal: ProposalReviewProposal,
verdict_counts: dict[str, int],
findings: list[ProposalReviewFinding],
) -> str:
"""Build a structured review summary text for the Jira comment."""
lines: list[str] = []
lines.append("=== Proposal Review Summary ===")
lines.append("")
# Decision
decision_text = proposal.status or "N/A"
decision_notes = proposal.decision_notes
lines.append(f"Final Decision: {decision_text}")
if decision_notes:
lines.append(f"Notes: {decision_notes}")
lines.append("")
# Summary counts
total = sum(verdict_counts.values())
lines.append(f"Review Results ({total} rules evaluated):")
lines.append(f" Pass: {verdict_counts.get('PASS', 0)}")
lines.append(f" Fail: {verdict_counts.get('FAIL', 0)}")
lines.append(f" Flag: {verdict_counts.get('FLAG', 0)}")
lines.append(f" Needs Review: {verdict_counts.get('NEEDS_REVIEW', 0)}")
lines.append(f" Not Applicable: {verdict_counts.get('NOT_APPLICABLE', 0)}")
lines.append("")
# Individual findings (truncated for readability)
if findings:
lines.append("--- Detailed Findings ---")
for f in findings:
rule_name = f.rule.name if f.rule else "Unknown Rule"
verdict = f.verdict or "N/A"
officer_action = ""
if f.decision_action:
officer_action = f" | Officer: {f.decision_action}"
lines.append(f" [{verdict}] {rule_name}{officer_action}")
if f.explanation:
# Truncate long explanations
explanation = f.explanation[:200]
if len(f.explanation) > 200:
explanation += "..."
lines.append(f" Reason: {explanation}")
lines.append("")
lines.append(f"Reviewed at: {datetime.now(timezone.utc).isoformat()}")
lines.append("Generated by Onyx Proposal Review")
return "\n".join(lines)

View File

@@ -0,0 +1,532 @@
"""Proposal review engine — private helpers for task implementations.
The actual Celery @shared_task definitions live in tasks.py (for autodiscovery).
This module contains the orchestration and evaluation logic they delegate to.
"""
from __future__ import annotations
import contextvars
import os
import time
from concurrent.futures import as_completed
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from datetime import timezone
from typing import TYPE_CHECKING
from uuid import UUID
from sqlalchemy import update
from onyx.utils.logger import setup_logger
if TYPE_CHECKING:
from onyx.server.features.proposal_review.engine.context_assembler import (
ProposalContext,
)
logger = setup_logger()
def _execute_review(
review_run_id: str,
rule_ids: list[str] | None = None,
) -> None:
"""Core review logic, separated for testability.
When rule_ids is None, evaluates all active rules in the run's ruleset
(full run). When rule_ids is provided, deletes the old error findings
for those rules and re-evaluates only them (retry flow).
"""
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import rulesets as rulesets_db
from onyx.server.features.proposal_review.db.models import ProposalReviewRun
from onyx.server.features.proposal_review.engine.context_assembler import (
get_proposal_context,
)
from onyx.server.features.proposal_review.engine.foa_fetcher import fetch_foa
run_uuid = UUID(review_run_id)
is_retry = rule_ids is not None
if is_retry and not rule_ids:
logger.warning(f"Retry called with empty rule_ids for run {review_run_id}")
# Reset status since the API already set it to RUNNING
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if run and run.status == "RUNNING":
run.status = "COMPLETED"
db_session.commit()
return
# Step 1: Set run status to RUNNING; for retries, clean up old findings
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if not run:
raise ValueError(f"Review run {review_run_id} not found")
proposal_id = run.proposal_id
ruleset_id = run.ruleset_id
if is_retry:
rule_id_set = set(rule_ids)
# Delete old error findings for the rules being retried
failed = findings_db.get_failed_findings_for_run(run_uuid, db_session)
failed_for_rules = [f for f in failed if str(f.rule_id) in rule_id_set]
if failed_for_rules:
findings_db.delete_findings(
[f.id for f in failed_for_rules], db_session
)
# Roll back counters so re-evaluated rules are tracked correctly.
# completed_rules is rolled back by the number of rules being
# re-evaluated (not findings — a rule may lack a finding if
# _save_error_finding itself failed). failed_rules is rolled back
# by the number of error findings actually deleted.
run.completed_rules = max(0, run.completed_rules - len(rule_ids))
run.failed_rules = max(0, run.failed_rules - len(failed_for_rules))
run.completed_at = None
run.status = "RUNNING"
if not is_retry:
run.started_at = datetime.now(timezone.utc)
db_session.commit()
# Step 2: Assemble proposal context
with get_session_with_current_tenant() as db_session:
context = get_proposal_context(proposal_id, db_session)
# Step 3: Try to auto-fetch FOA if opportunity_id is in metadata
opportunity_id = context.metadata.get("opportunity_id") or context.metadata.get(
"funding_opportunity_number"
)
if opportunity_id and not context.foa_text:
logger.info(f"Attempting to auto-fetch FOA for opportunity_id={opportunity_id}")
try:
with get_session_with_current_tenant() as db_session:
foa_text = fetch_foa(opportunity_id, proposal_id, db_session)
db_session.commit()
if foa_text:
context.foa_text = foa_text
logger.info(f"Auto-fetched FOA: {len(foa_text)} chars")
except Exception as e:
logger.warning(f"FOA auto-fetch failed (non-fatal): {e}")
# Step 4: Determine which rules to evaluate
if is_retry:
# Retry: use the specific rule IDs passed in
rules_to_eval = rule_ids
else:
# Full run: get all active rules for the ruleset
with get_session_with_current_tenant() as db_session:
rules = rulesets_db.list_rules_by_ruleset(
ruleset_id, db_session, active_only=True
)
rules_to_eval = [str(rule.id) for rule in rules]
if not rules_to_eval:
logger.warning(f"No active rules found for ruleset {ruleset_id}")
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if run:
run.status = "COMPLETED"
run.completed_at = datetime.now(timezone.utc)
db_session.commit()
return
# Step 5: Update total_rules on the run (full run only)
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if run:
run.total_rules = len(rules_to_eval)
db_session.commit()
# Step 6: Evaluate rules in parallel via ThreadPoolExecutor
parallel_workers = int(os.environ.get("PROPOSAL_REVIEW_PARALLEL_WORKERS", "4"))
workers = min(parallel_workers, len(rules_to_eval))
with ThreadPoolExecutor(max_workers=workers) as executor:
future_to_rule_id = {
executor.submit(
contextvars.copy_context().run,
_evaluate_single_rule,
review_run_id,
rid,
proposal_id,
context,
): rid
for rid in rules_to_eval
}
for future in as_completed(future_to_rule_id):
rid = future_to_rule_id[future]
succeeded = True
try:
succeeded = future.result()
except Exception as e:
succeeded = False
logger.error(
f"Rule {rid} failed: {e}",
exc_info=True,
)
# Increment completed_rules (and failed_rules on error) atomically
# so the frontend progress bar always reaches 100%.
updates: dict = {
"completed_rules": ProposalReviewRun.completed_rules + 1,
}
if not succeeded:
updates["failed_rules"] = ProposalReviewRun.failed_rules + 1
with get_session_with_current_tenant() as db_session:
db_session.execute(
update(ProposalReviewRun)
.where(ProposalReviewRun.id == run_uuid)
.values(**updates)
)
db_session.commit()
# Step 7: Mark run as completed
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if run:
run.status = "COMPLETED"
run.completed_at = datetime.now(timezone.utc)
db_session.commit()
logger.info(
f"Review run {review_run_id} completed: "
f"{len(rules_to_eval)} rules evaluated"
f"{' (retry)' if is_retry else ''}"
)
def _evaluate_and_save(
review_run_id: str,
rule_id: str,
proposal_id: "UUID",
context: "ProposalContext",
) -> None:
"""Evaluate a single rule and save the finding to DB."""
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import rulesets as rulesets_db
from onyx.server.features.proposal_review.engine.rule_evaluator import (
evaluate_rule,
)
rule_uuid = UUID(rule_id)
run_uuid = UUID(review_run_id)
# Load the rule from DB
with get_session_with_current_tenant() as db_session:
rule = rulesets_db.get_rule(rule_uuid, db_session)
if not rule:
raise ValueError(f"Rule {rule_id} not found")
# Evaluate the rule
result = evaluate_rule(rule, context, db_session)
# Save finding
findings_db.create_finding(
proposal_id=proposal_id,
rule_id=rule_uuid,
review_run_id=run_uuid,
verdict=result["verdict"],
confidence=result.get("confidence"),
evidence=result.get("evidence"),
explanation=result.get("explanation"),
suggested_action=result.get("suggested_action"),
llm_model=result.get("llm_model"),
llm_tokens_used=result.get("llm_tokens_used"),
db_session=db_session,
)
db_session.commit()
logger.debug(f"Rule {rule_id} evaluated: verdict={result['verdict']}")
def _save_error_finding(
review_run_id: str,
rule_id: str,
proposal_id: "UUID",
error: str,
) -> None:
"""Save an error finding when a rule evaluation fails."""
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import findings as findings_db
try:
with get_session_with_current_tenant() as db_session:
findings_db.create_finding(
proposal_id=proposal_id,
rule_id=UUID(rule_id),
review_run_id=UUID(review_run_id),
verdict="NEEDS_REVIEW",
confidence="LOW",
evidence=None,
explanation=f"Rule evaluation failed with error: {error}",
suggested_action="Manual review required due to system error.",
db_session=db_session,
)
db_session.commit()
except Exception as e:
logger.error(f"Failed to save error finding for rule {rule_id}: {e}")
def _mark_run_failed(review_run_id: str) -> None:
"""Mark a review run as FAILED."""
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import findings as findings_db
try:
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(UUID(review_run_id), db_session)
if run:
run.status = "FAILED"
run.completed_at = datetime.now(timezone.utc)
db_session.commit()
except Exception as e:
logger.error(f"Failed to mark run {review_run_id} as FAILED: {e}")
_MAX_RULE_RETRIES = int(os.environ.get("PROPOSAL_REVIEW_RULE_MAX_RETRIES", "2"))
_RETRY_BACKOFF_BASE = 2 # seconds — retry waits 2s, 4s, ...
def _evaluate_single_rule(
review_run_id: str,
rule_id: str,
proposal_id: "UUID",
context: "ProposalContext",
) -> bool:
"""Evaluate one rule, save the finding. Called from ThreadPoolExecutor.
Context is shared in-memory from the parent — no DB re-fetch needed.
Retries up to _MAX_RULE_RETRIES times with exponential backoff on failure
(e.g. LLM timeout). On final failure, an error finding (NEEDS_REVIEW) is
saved so the officer sees which rule failed.
Returns True on success, False if all attempts failed.
"""
last_error: Exception | None = None
for attempt in range(_MAX_RULE_RETRIES + 1):
try:
_evaluate_and_save(review_run_id, rule_id, proposal_id, context)
return True
except Exception as e:
last_error = e
if attempt < _MAX_RULE_RETRIES:
wait = _RETRY_BACKOFF_BASE * (2**attempt)
logger.warning(
f"Rule {rule_id} attempt {attempt + 1} failed: {e}. "
f"Retrying in {wait}s..."
)
time.sleep(wait)
else:
logger.error(
f"Rule {rule_id} failed after {attempt + 1} attempts: {e}",
exc_info=True,
)
_save_error_finding(
review_run_id=review_run_id,
rule_id=rule_id,
proposal_id=proposal_id,
error=str(last_error),
)
return False
def _execute_checklist_import(import_job_id: str) -> None:
"""Core import logic, separated for traceability."""
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.llm.factory import get_default_llm
from onyx.server.features.proposal_review.db import imports as imports_db
from onyx.server.features.proposal_review.db import rulesets as rulesets_db
from onyx.server.features.proposal_review.engine.checklist_importer import (
decompose_checklist_item,
enumerate_checklist_items,
)
job_uuid = UUID(import_job_id)
parallel_workers = int(
os.environ.get("PROPOSAL_REVIEW_IMPORT_PARALLEL_WORKERS", "4")
)
# Step 1: Mark RUNNING and load job data
with get_session_with_current_tenant() as db_session:
job = imports_db.get_import_job(job_uuid, db_session)
if not job:
raise ValueError(f"Import job {import_job_id} not found")
job.status = "RUNNING"
db_session.commit()
ruleset_id = job.ruleset_id
extracted_text = job.extracted_text
llm = get_default_llm(timeout=300)
# Step 2: Enumerate checklist items
items = enumerate_checklist_items(extracted_text, llm)
if not items:
logger.warning(f"Import {import_job_id}: no checklist items found")
with get_session_with_current_tenant() as db_session:
job = imports_db.get_import_job(job_uuid, db_session)
if job:
job.status = "COMPLETED"
job.completed_at = datetime.now(timezone.utc)
db_session.commit()
return
# Split items with too many sub-checks into smaller pieces so each
# LLM call produces bounded output. The threshold is conservative —
# 3 sub-checks keeps output well within token limits.
max_sub_checks = int(
os.environ.get("PROPOSAL_REVIEW_IMPORT_MAX_SUB_CHECKS_PER_CALL", "3")
)
work_items = _split_large_items(items, max_sub_checks)
logger.info(
f"Import {import_job_id}: enumerated {len(items)} items "
f"({len(work_items)} work units after splitting), "
f"decomposing with {parallel_workers} workers"
)
# Step 3: Decompose each work item in parallel, persist as each completes
rules_created = 0
failed_items: list[str] = []
workers = min(parallel_workers, len(work_items))
with ThreadPoolExecutor(max_workers=workers) as executor:
future_to_item = {
executor.submit(
contextvars.copy_context().run,
decompose_checklist_item,
item,
extracted_text,
llm,
): item
for item in work_items
}
for future in as_completed(future_to_item):
item = future_to_item[future]
try:
rule_dicts = future.result()
except Exception as e:
logger.error(
f" [{item.id}] '{item.name}' failed: {e}",
exc_info=True,
)
failed_items.append(item.name)
continue
if not rule_dicts:
continue
# Persist this item's rules in their own transaction
with get_session_with_current_tenant() as db_session:
for rd in rule_dicts:
rule = rulesets_db.create_rule(
ruleset_id=ruleset_id,
name=rd["name"],
description=rd.get("description"),
category=rd.get("category"),
rule_type=rd.get("rule_type", "CUSTOM_NL"),
rule_intent=rd.get("rule_intent", "CHECK"),
prompt_template=rd["prompt_template"],
source="IMPORTED",
is_hard_stop=False,
priority=0,
refinement_needed=rd.get("refinement_needed", False),
refinement_question=rd.get("refinement_question"),
db_session=db_session,
)
rule.is_active = False
db_session.flush()
rules_created += len(rule_dicts)
# Update progress so the frontend can poll it
job = imports_db.get_import_job(job_uuid, db_session)
if job:
job.rules_created = rules_created
db_session.commit()
logger.info(
f" [{item.id}] '{item.name}': "
f"{len(rule_dicts)} rules persisted "
f"({rules_created} total)"
)
# Step 4: Mark completed
with get_session_with_current_tenant() as db_session:
job = imports_db.get_import_job(job_uuid, db_session)
if job:
job.status = "COMPLETED"
job.completed_at = datetime.now(timezone.utc)
db_session.commit()
status = f"{rules_created} rules created"
if failed_items:
status += f", {len(failed_items)} items failed: {failed_items}"
logger.info(f"Import job {import_job_id} completed: {status}")
def _mark_import_failed(import_job_id: str, error: str) -> None:
"""Mark an import job as FAILED."""
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import imports as imports_db
try:
with get_session_with_current_tenant() as db_session:
job = imports_db.get_import_job(UUID(import_job_id), db_session)
if job:
job.status = "FAILED"
job.error_message = error
job.completed_at = datetime.now(timezone.utc)
db_session.commit()
except Exception as e:
logger.error(f"Failed to mark import job {import_job_id} as FAILED: {e}")
def _split_large_items(
items: list, # list[ChecklistItem] — untyped to avoid top-level import
max_sub_checks: int,
) -> list:
"""Split checklist items with many sub-checks into smaller work units.
Each returned item has at most *max_sub_checks* sub-checks, keeping the
LLM output bounded regardless of how large the original item was. Items
that are already within the limit pass through unchanged.
"""
from onyx.server.features.proposal_review.engine.checklist_importer import (
ChecklistItem,
)
work_items: list[ChecklistItem] = []
for item in items:
if len(item.sub_checks) <= max_sub_checks:
work_items.append(item)
continue
# Split into batches, each becoming its own work unit
for batch_idx in range(0, len(item.sub_checks), max_sub_checks):
batch = item.sub_checks[batch_idx : batch_idx + max_sub_checks]
part_num = (batch_idx // max_sub_checks) + 1
work_items.append(
ChecklistItem(
id=f"{item.id}-p{part_num}",
name=item.name,
category=item.category,
description=item.description,
sub_checks=batch,
)
)
return work_items

View File

@@ -0,0 +1,202 @@
"""Evaluates a single rule against a proposal context via LLM."""
import json
import re
from sqlalchemy.orm import Session
from onyx.llm.factory import get_default_llm
from onyx.llm.models import SystemMessage
from onyx.llm.models import UserMessage
from onyx.llm.utils import llm_response_to_string
from onyx.server.features.proposal_review.db.models import ProposalReviewRule
from onyx.server.features.proposal_review.engine.context_assembler import (
ProposalContext,
)
from onyx.tracing.llm_utils import llm_generation_span
from onyx.tracing.llm_utils import record_llm_response
from onyx.utils.logger import setup_logger
logger = setup_logger()
SYSTEM_PROMPT = """\
You are a meticulous grant proposal compliance reviewer for a university research office.
Your role is to evaluate specific aspects of grant proposals against institutional
and sponsor requirements.
You must evaluate each rule independently, focusing ONLY on the specific criterion
described. Be precise in your assessment. When in doubt, mark for human review.
Always respond with a valid JSON object in the exact format specified."""
RESPONSE_FORMAT_INSTRUCTIONS = """
Respond with ONLY a valid JSON object in the following format:
{
"verdict": "PASS | FAIL | FLAG | NEEDS_REVIEW | NOT_APPLICABLE",
"confidence": "HIGH | MEDIUM | LOW",
"evidence": "Direct quote or reference from the proposal documents that supports your verdict. If no relevant text found, state that clearly.",
"explanation": "Concise reasoning for why this verdict was reached. Reference specific requirements and how the proposal does or does not meet them.",
"suggested_action": "If verdict is FAIL or FLAG, describe what the officer or PI should do. Otherwise, null."
}
Verdict meanings:
- PASS: The proposal clearly meets this requirement.
- FAIL: The proposal clearly does NOT meet this requirement.
- FLAG: There is a potential issue that needs human attention.
- NEEDS_REVIEW: Insufficient information to make a determination.
- NOT_APPLICABLE: This rule does not apply to this proposal.
"""
def evaluate_rule(
rule: ProposalReviewRule,
context: ProposalContext,
_db_session: Session | None = None,
) -> dict:
"""Evaluate one rule against proposal context via LLM.
1. Fills rule.prompt_template variables ({{proposal_text}}, {{metadata}}, etc.)
2. Wraps in system prompt establishing reviewer role
3. Calls llm.invoke() with structured output instructions
4. Parses response into a findings dict
Args:
rule: The rule to evaluate.
context: Assembled proposal context.
db_session: Optional DB session (not used for LLM call but kept for API compat).
Returns:
Dict with verdict, confidence, evidence, explanation, suggested_action,
plus llm_model and llm_tokens_used if available.
"""
# 1. Fill template variables
filled_prompt = _fill_template(rule.prompt_template, context)
# 2. Build full prompt
user_content = f"{filled_prompt}\n\n" f"{RESPONSE_FORMAT_INSTRUCTIONS}"
prompt_messages = [
SystemMessage(content=SYSTEM_PROMPT),
UserMessage(content=user_content),
]
# 3. Call LLM — exceptions propagate to the caller so the retry
# mechanism in _evaluate_single_rule can handle transient failures.
llm = get_default_llm()
with llm_generation_span(llm, "proposal_review", prompt_messages) as gen_span:
response = llm.invoke(prompt_messages)
record_llm_response(gen_span, response)
raw_text = llm_response_to_string(response)
# Extract model info
llm_model = llm.config.model_name if llm.config else None
llm_tokens_used = _extract_token_usage(response)
# 4. Parse JSON response
result = _parse_llm_response(raw_text)
result["llm_model"] = llm_model
result["llm_tokens_used"] = llm_tokens_used
return result
def _fill_template(template: str, context: ProposalContext) -> str:
"""Replace {{variable}} placeholders in the prompt template.
Supported variables:
- {{proposal_text}} -> context.proposal_text
- {{budget_text}} -> context.budget_text
- {{foa_text}} -> context.foa_text
- {{metadata}} -> JSON dump of context.metadata
- {{metadata.FIELD}} -> specific metadata field value
- {{jira_key}} -> context.jira_key
"""
result = template
# Direct substitutions
result = result.replace("{{proposal_text}}", context.proposal_text or "")
result = result.replace("{{budget_text}}", context.budget_text or "")
result = result.replace("{{foa_text}}", context.foa_text or "")
result = result.replace("{{jira_key}}", context.jira_key or "")
# Metadata as JSON
metadata_str = json.dumps(context.metadata, indent=2, default=str)
result = result.replace("{{metadata}}", metadata_str)
# Specific metadata fields: {{metadata.FIELD}}
metadata_field_pattern = re.compile(r"\{\{metadata\.([^}]+)\}\}")
for match in metadata_field_pattern.finditer(result):
field_name = match.group(1)
field_value = context.metadata.get(field_name, "")
if isinstance(field_value, (dict, list)):
field_value = json.dumps(field_value, default=str)
result = result.replace(match.group(0), str(field_value))
return result
def _parse_llm_response(raw_text: str) -> dict:
"""Parse the LLM response text as JSON.
Handles cases where the LLM wraps JSON in markdown code fences.
"""
text = raw_text.strip()
# Strip markdown code fences if present
if text.startswith("```"):
# Remove opening fence (with optional language tag)
text = re.sub(r"^```(?:json)?\s*\n?", "", text)
# Remove closing fence
text = re.sub(r"\n?```\s*$", "", text)
text = text.strip()
try:
parsed = json.loads(text)
except json.JSONDecodeError:
logger.warning(f"Failed to parse LLM response as JSON: {text[:200]}...")
return {
"verdict": "NEEDS_REVIEW",
"confidence": "LOW",
"evidence": None,
"explanation": f"Failed to parse LLM response. Raw output: {text[:500]}",
"suggested_action": "Manual review required due to unparseable AI response.",
}
# Validate and normalize the parsed result
valid_verdicts = {"PASS", "FAIL", "FLAG", "NEEDS_REVIEW", "NOT_APPLICABLE"}
valid_confidences = {"HIGH", "MEDIUM", "LOW"}
verdict = str(parsed.get("verdict", "NEEDS_REVIEW")).upper()
if verdict not in valid_verdicts:
verdict = "NEEDS_REVIEW"
confidence = str(parsed.get("confidence", "LOW")).upper()
if confidence not in valid_confidences:
confidence = "LOW"
return {
"verdict": verdict,
"confidence": confidence,
"evidence": parsed.get("evidence"),
"explanation": parsed.get("explanation"),
"suggested_action": parsed.get("suggested_action"),
}
def _extract_token_usage(response: object) -> int | None:
"""Best-effort extraction of token usage from the LLM response."""
try:
# litellm ModelResponse has a usage attribute
if hasattr(response, "usage") and response.usage:
usage = response.usage
total = getattr(usage, "total_tokens", None)
if total is not None:
return int(total)
# Sum prompt + completion tokens if total not available
prompt_tokens = getattr(usage, "prompt_tokens", 0) or 0
completion_tokens = getattr(usage, "completion_tokens", 0) or 0
if prompt_tokens or completion_tokens:
return prompt_tokens + completion_tokens
except Exception:
pass
return None

View File

@@ -0,0 +1,195 @@
"""Celery tasks for proposal review — discovered by autodiscover_tasks."""
from __future__ import annotations
from uuid import UUID
from celery import shared_task
from redis.lock import Lock as RedisLock
from onyx.configs.constants import CELERY_GENERIC_BEAT_LOCK_TIMEOUT
from onyx.configs.constants import OnyxCeleryTask
from onyx.configs.constants import OnyxRedisLocks
from onyx.redis.redis_pool import get_redis_client
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import CURRENT_TENANT_ID_CONTEXTVAR
logger = setup_logger()
@shared_task(
name="run_proposal_review",
bind=True,
ignore_result=True,
soft_time_limit=3600,
time_limit=3660,
)
def run_proposal_review(
_self: object,
review_run_id: str,
tenant_id: str,
rule_ids: list[str] | None = None,
) -> None:
"""Evaluate rules for a review run.
When rule_ids is None, evaluates all active rules in the run's ruleset
(full run). When rule_ids is provided, evaluates only those specific
rules (retry flow).
"""
CURRENT_TENANT_ID_CONTEXTVAR.set(tenant_id)
try:
from onyx.tracing.framework.create import trace
with trace(
"proposal_review",
metadata={"review_run_id": review_run_id},
):
from onyx.server.features.proposal_review.engine.review_engine import (
_execute_review,
)
_execute_review(review_run_id, rule_ids=rule_ids)
except Exception as e:
logger.error(f"Review run {review_run_id} failed: {e}", exc_info=True)
from onyx.server.features.proposal_review.engine.review_engine import (
_mark_run_failed,
)
_mark_run_failed(review_run_id)
raise
finally:
CURRENT_TENANT_ID_CONTEXTVAR.set(None)
@shared_task(name="run_checklist_import", bind=True, ignore_result=True)
def run_checklist_import(_self: object, import_job_id: str, tenant_id: str) -> None:
"""Background task: decompose a checklist via LLM and save rules."""
CURRENT_TENANT_ID_CONTEXTVAR.set(tenant_id)
try:
from onyx.tracing.framework.create import trace
with trace(
"checklist_import",
metadata={"import_job_id": import_job_id},
):
from onyx.server.features.proposal_review.engine.review_engine import (
_execute_checklist_import,
)
_execute_checklist_import(import_job_id)
except Exception as e:
logger.error(f"Import job {import_job_id} failed: {e}", exc_info=True)
from onyx.server.features.proposal_review.engine.review_engine import (
_mark_import_failed,
)
_mark_import_failed(import_job_id, str(e))
raise
finally:
CURRENT_TENANT_ID_CONTEXTVAR.set(None)
@shared_task(
name="sync_decision_to_jira",
bind=True,
ignore_result=True,
soft_time_limit=60,
time_limit=90,
)
def sync_decision_to_jira(_self: object, proposal_id: str, tenant_id: str) -> None:
"""Writes officer decision back to Jira.
Dispatched from the sync-jira API endpoint.
"""
CURRENT_TENANT_ID_CONTEXTVAR.set(tenant_id)
try:
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.engine.jira_sync import sync_to_jira
with get_session_with_current_tenant() as db_session:
sync_to_jira(UUID(proposal_id), db_session)
db_session.commit()
logger.info(f"Jira sync completed for proposal {proposal_id}")
except Exception as e:
logger.error(f"Jira sync failed for proposal {proposal_id}: {e}", exc_info=True)
raise
finally:
CURRENT_TENANT_ID_CONTEXTVAR.set(None)
@shared_task(
name=OnyxCeleryTask.CHECK_FOR_DANGLING_IMPORT_JOBS,
bind=True,
ignore_result=True,
soft_time_limit=60,
time_limit=90,
)
def check_for_dangling_import_jobs(_self: object, *, tenant_id: str) -> None:
"""Beat task: mark import jobs stuck in PENDING/RUNNING as FAILED.
A job is considered stuck if it has been in a non-terminal state for
longer than the stale threshold (default 60 minutes). This handles
cases where the Celery message was discarded (e.g. worker restart
before the task was registered) or the task crashed without marking
the job as FAILED.
"""
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import imports as imports_db
CURRENT_TENANT_ID_CONTEXTVAR.set(tenant_id)
locked = False
redis_client = get_redis_client(tenant_id=tenant_id)
lock: RedisLock = redis_client.lock(
OnyxRedisLocks.CHECK_DANGLING_IMPORT_JOBS_BEAT_LOCK,
timeout=CELERY_GENERIC_BEAT_LOCK_TIMEOUT,
)
if not lock.acquire(blocking=False):
logger.info(
f"check_for_dangling_import_jobs - Lock not acquired: tenant={tenant_id}"
)
return None
try:
locked = True
with get_session_with_current_tenant() as db_session:
dangling = imports_db.get_dangling_import_jobs(
db_session, stale_threshold_minutes=60
)
if not dangling:
return
for job in dangling:
logger.warning(
f"Marking dangling import job {job.id} as FAILED "
f"(status={job.status}, created_at={job.created_at})"
)
imports_db.mark_import_job_failed(
job,
"Import timed out — the background task did not complete. "
"Please try importing again.",
db_session,
)
db_session.commit()
logger.info(
f"Cleaned up {len(dangling)} dangling import job(s) "
f"for tenant {tenant_id}"
)
except Exception:
logger.exception("Unexpected error during dangling import job cleanup")
finally:
if locked:
if lock.owned():
lock.release()
else:
logger.error(
f"check_for_dangling_import_jobs - "
f"Lock not owned on completion: tenant={tenant_id}"
)
CURRENT_TENANT_ID_CONTEXTVAR.set(None)

View File

@@ -34,7 +34,6 @@ from onyx.server.settings.models import UserSettings
from onyx.server.settings.store import load_settings
from onyx.server.settings.store import store_settings
from onyx.utils.logger import setup_logger
from onyx.utils.platform import is_running_in_container
from onyx.utils.variable_functionality import (
fetch_versioned_implementation_with_fallback,
)
@@ -112,7 +111,6 @@ def fetch_settings(
if DISABLE_VECTOR_DB
else DEFAULT_FILE_TOKEN_COUNT_THRESHOLD_K_VECTOR_DB
),
is_containerized=is_running_in_container(),
)

View File

@@ -133,7 +133,3 @@ class UserSettings(Settings):
else DEFAULT_FILE_TOKEN_COUNT_THRESHOLD_K_VECTOR_DB
)
)
# True when the backend is running inside a container (Docker/Podman).
# The frontend uses this to default local-service URLs (e.g. Ollama,
# LM Studio) to host.docker.internal instead of localhost.
is_containerized: bool = False

View File

@@ -10,6 +10,7 @@ from onyx.configs.app_configs import DISABLE_VECTOR_DB
from onyx.configs.model_configs import GEN_AI_TEMPERATURE
from onyx.context.search.models import BaseFilters
from onyx.context.search.models import PersonaSearchInfo
from onyx.db.engine.sql_engine import get_session_with_current_tenant_if_none
from onyx.db.enums import MCPAuthenticationPerformer
from onyx.db.enums import MCPAuthenticationType
from onyx.db.mcp import get_all_mcp_tools_for_server
@@ -113,10 +114,10 @@ def _get_image_generation_config(llm: LLM, db_session: Session) -> LLMConfig:
def construct_tools(
persona: Persona,
db_session: Session,
emitter: Emitter,
user: User,
llm: LLM,
db_session: Session | None = None,
search_tool_config: SearchToolConfig | None = None,
custom_tool_config: CustomToolConfig | None = None,
file_reader_tool_config: FileReaderToolConfig | None = None,
@@ -131,6 +132,33 @@ def construct_tools(
``attached_documents``, and ``hierarchy_nodes`` already eager-loaded
(e.g. via ``eager_load_persona=True`` or ``eager_load_for_tools=True``)
to avoid lazy SQL queries after the session may have been flushed."""
with get_session_with_current_tenant_if_none(db_session) as db_session:
return _construct_tools_impl(
persona=persona,
db_session=db_session,
emitter=emitter,
user=user,
llm=llm,
search_tool_config=search_tool_config,
custom_tool_config=custom_tool_config,
file_reader_tool_config=file_reader_tool_config,
allowed_tool_ids=allowed_tool_ids,
search_usage_forcing_setting=search_usage_forcing_setting,
)
def _construct_tools_impl(
persona: Persona,
db_session: Session,
emitter: Emitter,
user: User,
llm: LLM,
search_tool_config: SearchToolConfig | None = None,
custom_tool_config: CustomToolConfig | None = None,
file_reader_tool_config: FileReaderToolConfig | None = None,
allowed_tool_ids: list[int] | None = None,
search_usage_forcing_setting: SearchToolUsage = SearchToolUsage.AUTO,
) -> dict[int, list[Tool]]:
tool_dict: dict[int, list[Tool]] = {}
# Log which tools are attached to the persona for debugging

View File

@@ -5,7 +5,6 @@ from collections.abc import MutableMapping
from logging.handlers import RotatingFileHandler
from typing import Any
from onyx.utils.platform import is_running_in_container
from onyx.utils.tenant import get_tenant_id_short_string
from shared_configs.configs import DEV_LOGGING_ENABLED
from shared_configs.configs import LOG_FILE_NAME
@@ -170,6 +169,13 @@ def get_standard_formatter() -> ColoredFormatter:
)
DANSWER_DOCKER_ENV_STR = "DANSWER_RUNNING_IN_DOCKER"
def is_running_in_container() -> bool:
return os.getenv(DANSWER_DOCKER_ENV_STR) == "true"
def setup_logger(
name: str = __name__,
log_level: int = get_log_level_from_str(),

View File

@@ -1,32 +0,0 @@
import logging
import os
logger = logging.getLogger(__name__)
_ONYX_DOCKER_ENV_STR = "ONYX_RUNNING_IN_DOCKER"
_DANSWER_DOCKER_ENV_STR = "DANSWER_RUNNING_IN_DOCKER"
def _resolve_container_flag() -> bool:
onyx_val = os.getenv(_ONYX_DOCKER_ENV_STR)
if onyx_val is not None:
return onyx_val.lower() == "true"
danswer_val = os.getenv(_DANSWER_DOCKER_ENV_STR)
if danswer_val is not None:
logger.warning(
"%s is deprecated and will be ignored in a future release. "
"Use %s instead.",
_DANSWER_DOCKER_ENV_STR,
_ONYX_DOCKER_ENV_STR,
)
return danswer_val.lower() == "true"
return False
_IS_RUNNING_IN_CONTAINER: bool = _resolve_container_flag()
def is_running_in_container() -> bool:
return _IS_RUNNING_IN_CONTAINER

View File

@@ -0,0 +1,85 @@
"""Shared fixtures for proposal review integration tests.
Uses the same real-PostgreSQL pattern as the parent external_dependency_unit
conftest. Tables must already exist (via the 61ea78857c97 migration).
"""
from collections.abc import Generator
from uuid import uuid4
import pytest
from fastapi_users.password import PasswordHelper
from sqlalchemy import text
from sqlalchemy.orm import Session
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.db.engine.sql_engine import SqlEngine
from onyx.db.enums import AccountType
from onyx.db.models import User
from onyx.db.models import UserRole
from shared_configs.contextvars import CURRENT_TENANT_ID_CONTEXTVAR
from tests.external_dependency_unit.constants import TEST_TENANT_ID
# Tables to clean up after each test, in dependency order (children first).
_PROPOSAL_REVIEW_TABLES = [
"proposal_review_finding",
"proposal_review_run",
"proposal_review_document",
"proposal_review_proposal",
"proposal_review_rule",
"proposal_review_import_job",
"proposal_review_ruleset",
"proposal_review_config",
]
@pytest.fixture(scope="function")
def tenant_context() -> Generator[None, None, None]:
token = CURRENT_TENANT_ID_CONTEXTVAR.set(TEST_TENANT_ID)
try:
yield
finally:
CURRENT_TENANT_ID_CONTEXTVAR.reset(token)
@pytest.fixture(scope="function")
def db_session(tenant_context: None) -> Generator[Session, None, None]: # noqa: ARG001
"""Yield a DB session scoped to the current tenant.
After the test completes, all proposal_review rows are deleted so tests
don't leave artifacts in the database.
"""
SqlEngine.init_engine(pool_size=10, max_overflow=5)
with get_session_with_current_tenant() as session:
yield session
# Clean up all proposal_review data created during this test
try:
for table in _PROPOSAL_REVIEW_TABLES:
session.execute(text(f"DELETE FROM {table}")) # noqa: S608
session.commit()
except Exception:
session.rollback()
@pytest.fixture(scope="function")
def test_user(db_session: Session) -> User:
"""Create a throwaway user for FK references (triggered_by, officer_id, etc.)."""
unique_email = f"pr_test_{uuid4().hex[:8]}@example.com"
password_helper = PasswordHelper()
hashed_password = password_helper.hash(password_helper.generate())
user = User(
id=uuid4(),
email=unique_email,
hashed_password=hashed_password,
is_active=True,
is_superuser=False,
is_verified=True,
role=UserRole.ADMIN,
account_type=AccountType.STANDARD,
)
db_session.add(user)
db_session.commit()
db_session.refresh(user)
return user

View File

@@ -0,0 +1,328 @@
"""Integration tests for per-finding decisions, proposal decisions, and config."""
from uuid import uuid4
from sqlalchemy.orm import Session
from onyx.db.models import User
from onyx.server.features.proposal_review.db.config import get_config
from onyx.server.features.proposal_review.db.config import upsert_config
from onyx.server.features.proposal_review.db.decisions import (
mark_proposal_jira_synced,
)
from onyx.server.features.proposal_review.db.decisions import (
update_proposal_decision,
)
from onyx.server.features.proposal_review.db.decisions import (
upsert_finding_decision,
)
from onyx.server.features.proposal_review.db.findings import create_finding
from onyx.server.features.proposal_review.db.findings import create_review_run
from onyx.server.features.proposal_review.db.findings import get_finding
from onyx.server.features.proposal_review.db.proposals import get_or_create_proposal
from onyx.server.features.proposal_review.db.rulesets import create_rule
from onyx.server.features.proposal_review.db.rulesets import create_ruleset
from tests.external_dependency_unit.constants import TEST_TENANT_ID
TENANT = TEST_TENANT_ID
def _make_finding(db_session: Session, test_user: User):
"""Helper: create a full chain (ruleset -> rule -> proposal -> run -> finding)."""
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS-{uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Test Rule",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
finding = create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run.id,
verdict="FAIL",
db_session=db_session,
)
db_session.commit()
return finding, proposal
class TestFindingDecision:
def test_create_finding_decision(
self, db_session: Session, test_user: User
) -> None:
finding, _ = _make_finding(db_session, test_user)
updated = upsert_finding_decision(
finding_id=finding.id,
officer_id=test_user.id,
action="VERIFIED",
db_session=db_session,
notes="Looks good",
)
db_session.commit()
assert updated.id == finding.id
assert updated.decision_action == "VERIFIED"
assert updated.decision_notes == "Looks good"
assert updated.decided_at is not None
def test_upsert_overwrites_previous_decision(
self, db_session: Session, test_user: User
) -> None:
finding, _ = _make_finding(db_session, test_user)
upsert_finding_decision(
finding_id=finding.id,
officer_id=test_user.id,
action="VERIFIED",
db_session=db_session,
)
db_session.commit()
updated = upsert_finding_decision(
finding_id=finding.id,
officer_id=test_user.id,
action="ISSUE",
db_session=db_session,
notes="Actually, this is a problem",
)
db_session.commit()
# Same row was updated
assert updated.id == finding.id
assert updated.decision_action == "ISSUE"
assert updated.decision_notes == "Actually, this is a problem"
def test_finding_decision_accessible_via_finding(
self, db_session: Session, test_user: User
) -> None:
finding, _ = _make_finding(db_session, test_user)
upsert_finding_decision(
finding_id=finding.id,
officer_id=test_user.id,
action="OVERRIDDEN",
db_session=db_session,
)
db_session.commit()
fetched = get_finding(finding.id, db_session)
assert fetched is not None
assert fetched.decision_action == "OVERRIDDEN"
assert fetched.decision_officer_id == test_user.id
def test_finding_has_no_decision_by_default(
self, db_session: Session, test_user: User
) -> None:
finding, _ = _make_finding(db_session, test_user)
assert finding.decision_action is None
assert finding.decided_at is None
class TestProposalDecision:
def test_update_proposal_decision(
self, db_session: Session, test_user: User
) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
updated = update_proposal_decision(
proposal_id=proposal.id,
tenant_id=TENANT,
officer_id=test_user.id,
decision="APPROVED",
db_session=db_session,
notes="All checks pass",
)
db_session.commit()
assert updated.status == "APPROVED"
assert updated.decision_notes == "All checks pass"
assert updated.decision_officer_id == test_user.id
assert updated.decision_at is not None
assert updated.jira_synced is False
def test_proposal_decision_overwrites_previous(
self, db_session: Session, test_user: User
) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
update_proposal_decision(
proposal_id=proposal.id,
tenant_id=TENANT,
officer_id=test_user.id,
decision="CHANGES_REQUESTED",
db_session=db_session,
)
db_session.commit()
updated = update_proposal_decision(
proposal_id=proposal.id,
tenant_id=TENANT,
officer_id=test_user.id,
decision="APPROVED",
db_session=db_session,
)
db_session.commit()
assert updated.status == "APPROVED"
def test_mark_proposal_jira_synced(
self, db_session: Session, test_user: User
) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
update_proposal_decision(
proposal_id=proposal.id,
tenant_id=TENANT,
officer_id=test_user.id,
decision="APPROVED",
db_session=db_session,
)
db_session.commit()
assert proposal.jira_synced is False
synced = mark_proposal_jira_synced(proposal.id, TENANT, db_session)
db_session.commit()
assert synced is not None
assert synced.jira_synced is True
assert synced.jira_synced_at is not None
def test_mark_jira_synced_returns_none_for_nonexistent(
self, db_session: Session
) -> None:
assert mark_proposal_jira_synced(uuid4(), TENANT, db_session) is None
def test_new_decision_resets_jira_synced(
self, db_session: Session, test_user: User
) -> None:
"""Re-deciding should reset jira_synced so the new decision can be synced."""
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
update_proposal_decision(
proposal_id=proposal.id,
tenant_id=TENANT,
officer_id=test_user.id,
decision="APPROVED",
db_session=db_session,
)
mark_proposal_jira_synced(proposal.id, TENANT, db_session)
db_session.commit()
assert proposal.jira_synced is True
update_proposal_decision(
proposal_id=proposal.id,
tenant_id=TENANT,
officer_id=test_user.id,
decision="REJECTED",
db_session=db_session,
)
db_session.commit()
db_session.refresh(proposal)
assert proposal.jira_synced is False
assert proposal.jira_synced_at is None
assert proposal.status == "REJECTED"
class TestConfig:
def test_create_config(self, db_session: Session) -> None:
# Use a unique tenant to avoid collision with other tests
tenant = f"test-tenant-{uuid4().hex[:8]}"
config = upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="PROJ",
field_mapping={"title": "summary", "budget": "customfield_10001"},
)
db_session.commit()
assert config.id is not None
assert config.tenant_id == tenant
assert config.jira_project_key == "PROJ"
assert config.field_mapping == {
"title": "summary",
"budget": "customfield_10001",
}
def test_upsert_config_updates_existing(self, db_session: Session) -> None:
tenant = f"test-tenant-{uuid4().hex[:8]}"
first = upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="OLD",
)
db_session.commit()
first_id = first.id
second = upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="NEW",
field_mapping={"x": "y"},
)
db_session.commit()
assert second.id == first_id
assert second.jira_project_key == "NEW"
assert second.field_mapping == {"x": "y"}
def test_get_config_returns_correct_tenant(self, db_session: Session) -> None:
tenant = f"test-tenant-{uuid4().hex[:8]}"
upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="ABC",
jira_writeback={"status_field": "customfield_20001"},
)
db_session.commit()
fetched = get_config(tenant, db_session)
assert fetched is not None
assert fetched.jira_project_key == "ABC"
assert fetched.jira_writeback == {"status_field": "customfield_20001"}
def test_get_config_returns_none_for_unknown_tenant(
self, db_session: Session
) -> None:
assert get_config(f"nonexistent-{uuid4().hex[:8]}", db_session) is None
def test_upsert_config_preserves_unset_fields(self, db_session: Session) -> None:
tenant = f"test-tenant-{uuid4().hex[:8]}"
upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="KEEP",
jira_connector_id=42,
)
db_session.commit()
# Update only field_mapping, leave jira_project_key alone
upsert_config(
tenant_id=tenant,
db_session=db_session,
field_mapping={"a": "b"},
)
db_session.commit()
fetched = get_config(tenant, db_session)
assert fetched is not None
assert fetched.jira_project_key == "KEEP"
assert fetched.jira_connector_id == 42
assert fetched.field_mapping == {"a": "b"}

View File

@@ -0,0 +1,159 @@
"""Integration tests for proposal state management DB operations."""
from uuid import uuid4
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.proposals import count_proposals
from onyx.server.features.proposal_review.db.proposals import get_or_create_proposal
from onyx.server.features.proposal_review.db.proposals import get_proposal
from onyx.server.features.proposal_review.db.proposals import (
get_proposal_by_document_id,
)
from onyx.server.features.proposal_review.db.proposals import list_proposals
from onyx.server.features.proposal_review.db.proposals import update_proposal_status
from tests.external_dependency_unit.constants import TEST_TENANT_ID
TENANT = TEST_TENANT_ID
class TestGetOrCreateProposal:
def test_creates_proposal_on_first_call(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
proposal = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
assert proposal.id is not None
assert proposal.document_id == doc_id
assert proposal.tenant_id == TENANT
assert proposal.status == "PENDING"
def test_returns_same_proposal_on_second_call(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
first = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
second = get_or_create_proposal(doc_id, TENANT, db_session)
assert second.id == first.id
def test_different_document_ids_create_different_proposals(
self, db_session: Session
) -> None:
p1 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
p2 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
assert p1.id != p2.id
class TestGetProposal:
def test_returns_none_for_nonexistent_id(self, db_session: Session) -> None:
result = get_proposal(uuid4(), TENANT, db_session)
assert result is None
def test_returns_proposal_by_id(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
created = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
fetched = get_proposal(created.id, TENANT, db_session)
assert fetched is not None
assert fetched.id == created.id
assert fetched.document_id == doc_id
def test_returns_none_for_wrong_tenant(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
created = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
result = get_proposal(created.id, "nonexistent_tenant", db_session)
assert result is None
class TestGetProposalByDocumentId:
def test_returns_none_when_no_proposal_exists(self, db_session: Session) -> None:
result = get_proposal_by_document_id("no-such-doc", TENANT, db_session)
assert result is None
def test_finds_proposal_by_document_id(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
created = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
fetched = get_proposal_by_document_id(doc_id, TENANT, db_session)
assert fetched is not None
assert fetched.id == created.id
class TestUpdateProposalStatus:
def test_changes_status_correctly(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
proposal = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
assert proposal.status == "PENDING"
updated = update_proposal_status(proposal.id, TENANT, "IN_REVIEW", db_session)
db_session.commit()
assert updated is not None
assert updated.status == "IN_REVIEW"
# Verify persisted
refetched = get_proposal(proposal.id, TENANT, db_session)
assert refetched is not None
assert refetched.status == "IN_REVIEW"
def test_returns_none_for_nonexistent_proposal(self, db_session: Session) -> None:
result = update_proposal_status(uuid4(), TENANT, "IN_REVIEW", db_session)
assert result is None
def test_successive_status_updates(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
proposal = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
update_proposal_status(proposal.id, TENANT, "IN_REVIEW", db_session)
db_session.commit()
update_proposal_status(proposal.id, TENANT, "APPROVED", db_session)
db_session.commit()
refetched = get_proposal(proposal.id, TENANT, db_session)
assert refetched is not None
assert refetched.status == "APPROVED"
class TestListAndCountProposals:
def test_list_proposals_with_status_filter(self, db_session: Session) -> None:
p1 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
p2 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
update_proposal_status(p1.id, TENANT, "IN_REVIEW", db_session)
db_session.commit()
in_review = list_proposals(TENANT, db_session, status="IN_REVIEW")
in_review_ids = {p.id for p in in_review}
assert p1.id in in_review_ids
assert p2.id not in in_review_ids
def test_count_proposals_with_status_filter(self, db_session: Session) -> None:
p1 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
update_proposal_status(p1.id, TENANT, "COMPLETED", db_session)
db_session.commit()
total = count_proposals(TENANT, db_session)
completed = count_proposals(TENANT, db_session, status="COMPLETED")
assert total >= 2
assert completed >= 1
def test_list_proposals_pagination(self, db_session: Session) -> None:
for _ in range(5):
get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
page = list_proposals(TENANT, db_session, limit=2, offset=0)
assert len(page) <= 2

View File

@@ -0,0 +1,448 @@
"""Integration tests for review run + findings + progress tracking."""
from uuid import uuid4
from sqlalchemy.orm import Session
from onyx.db.models import User
from onyx.server.features.proposal_review.db.findings import create_finding
from onyx.server.features.proposal_review.db.findings import create_review_run
from onyx.server.features.proposal_review.db.findings import get_finding
from onyx.server.features.proposal_review.db.findings import get_latest_review_run
from onyx.server.features.proposal_review.db.findings import get_review_run
from onyx.server.features.proposal_review.db.findings import (
list_findings_by_proposal,
)
from onyx.server.features.proposal_review.db.findings import list_findings_by_run
from onyx.server.features.proposal_review.db.proposals import get_or_create_proposal
from onyx.server.features.proposal_review.db.rulesets import create_rule
from onyx.server.features.proposal_review.db.rulesets import create_ruleset
from tests.external_dependency_unit.constants import TEST_TENANT_ID
TENANT = TEST_TENANT_ID
class TestReviewRun:
def test_create_review_run_and_verify_status(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Review RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
create_rule(
ruleset_id=rs.id,
name="Rule 1",
rule_type="DOCUMENT_CHECK",
prompt_template="t1",
db_session=db_session,
)
create_rule(
ruleset_id=rs.id,
name="Rule 2",
rule_type="DOCUMENT_CHECK",
prompt_template="t2",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=2,
db_session=db_session,
)
db_session.commit()
assert run.id is not None
assert run.proposal_id == proposal.id
assert run.ruleset_id == rs.id
assert run.triggered_by == test_user.id
assert run.total_rules == 2
assert run.completed_rules == 0
assert run.status == "PENDING"
def test_get_review_run(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
fetched = get_review_run(run.id, db_session)
assert fetched is not None
assert fetched.id == run.id
def test_get_review_run_returns_none_for_nonexistent(
self, db_session: Session
) -> None:
assert get_review_run(uuid4(), db_session) is None
def test_get_latest_review_run(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
run2 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=2,
db_session=db_session,
)
db_session.commit()
latest = get_latest_review_run(proposal.id, db_session)
assert latest is not None
assert latest.id == run2.id
def test_increment_completed_rules_tracks_progress(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Progress RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=3,
db_session=db_session,
)
db_session.commit()
# Simulate progress by incrementing completed_rules directly
run.completed_rules = 1
db_session.flush()
db_session.commit()
fetched = get_review_run(run.id, db_session)
assert fetched is not None
assert fetched.completed_rules == 1
assert fetched.total_rules == 3
run.completed_rules = 3
db_session.flush()
db_session.commit()
fetched = get_review_run(run.id, db_session)
assert fetched is not None
assert fetched.completed_rules == 3
class TestFindings:
def test_create_finding_and_retrieve(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Findings RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Budget Rule",
rule_type="DOCUMENT_CHECK",
prompt_template="Check budget",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
finding = create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run.id,
verdict="PASS",
db_session=db_session,
confidence="HIGH",
evidence="Budget is $500k",
explanation="Under the $1M cap",
llm_model="gpt-4",
llm_tokens_used=1500,
)
db_session.commit()
fetched = get_finding(finding.id, db_session)
assert fetched is not None
assert fetched.verdict == "PASS"
assert fetched.confidence == "HIGH"
assert fetched.evidence == "Budget is $500k"
assert fetched.llm_model == "gpt-4"
assert fetched.llm_tokens_used == 1500
assert fetched.rule is not None
assert fetched.rule.name == "Budget Rule"
def test_list_findings_by_proposal(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"List Findings RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule1 = create_rule(
ruleset_id=rs.id,
name="R1",
rule_type="DOCUMENT_CHECK",
prompt_template="t1",
db_session=db_session,
)
rule2 = create_rule(
ruleset_id=rs.id,
name="R2",
rule_type="DOCUMENT_CHECK",
prompt_template="t2",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=2,
db_session=db_session,
)
db_session.commit()
create_finding(
proposal_id=proposal.id,
rule_id=rule1.id,
review_run_id=run.id,
verdict="PASS",
db_session=db_session,
)
create_finding(
proposal_id=proposal.id,
rule_id=rule2.id,
review_run_id=run.id,
verdict="FAIL",
db_session=db_session,
)
db_session.commit()
findings = list_findings_by_proposal(proposal.id, db_session)
assert len(findings) == 2
verdicts = {f.verdict for f in findings}
assert verdicts == {"PASS", "FAIL"}
def test_list_findings_by_run_filters_correctly(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Run Filter RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="R",
rule_type="DOCUMENT_CHECK",
prompt_template="t",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run1 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
run2 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run1.id,
verdict="PASS",
db_session=db_session,
)
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run2.id,
verdict="FAIL",
db_session=db_session,
)
db_session.commit()
run1_findings = list_findings_by_run(run1.id, db_session)
assert len(run1_findings) == 1
assert run1_findings[0].verdict == "PASS"
run2_findings = list_findings_by_run(run2.id, db_session)
assert len(run2_findings) == 1
assert run2_findings[0].verdict == "FAIL"
def test_list_findings_by_proposal_with_run_id_filter(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Filter RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="R",
rule_type="DOCUMENT_CHECK",
prompt_template="t",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run1 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
run2 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run1.id,
verdict="PASS",
db_session=db_session,
)
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run2.id,
verdict="FAIL",
db_session=db_session,
)
db_session.commit()
# All findings for proposal
all_findings = list_findings_by_proposal(proposal.id, db_session)
assert len(all_findings) == 2
# Filtered by run
filtered = list_findings_by_proposal(
proposal.id, db_session, review_run_id=run1.id
)
assert len(filtered) == 1
assert filtered[0].verdict == "PASS"
def test_get_finding_returns_none_for_nonexistent(
self, db_session: Session
) -> None:
assert get_finding(uuid4(), db_session) is None
def test_full_review_flow_end_to_end(
self, db_session: Session, test_user: User
) -> None:
"""Create ruleset with rules -> proposal -> run -> findings -> verify."""
# Setup
rs = create_ruleset(
tenant_id=TENANT,
name=f"E2E RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rules = []
for i in range(3):
r = create_rule(
ruleset_id=rs.id,
name=f"E2E Rule {i}",
rule_type="DOCUMENT_CHECK",
prompt_template=f"Check {i}: {{{{proposal_text}}}}",
db_session=db_session,
)
rules.append(r)
proposal = get_or_create_proposal(
f"doc-e2e-{uuid4().hex[:8]}", TENANT, db_session
)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=3,
db_session=db_session,
)
db_session.commit()
# Create findings for each rule
verdicts = ["PASS", "FAIL", "PASS"]
for rule, verdict in zip(rules, verdicts):
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run.id,
verdict=verdict,
db_session=db_session,
confidence="HIGH",
)
run.completed_rules += 1
db_session.flush()
db_session.commit()
# Verify
fetched_run = get_review_run(run.id, db_session)
assert fetched_run is not None
assert fetched_run.completed_rules == 3
assert fetched_run.total_rules == 3
findings = list_findings_by_run(run.id, db_session)
assert len(findings) == 3
assert {f.verdict for f in findings} == {"PASS", "FAIL"}

View File

@@ -0,0 +1,422 @@
"""Integration tests for ruleset + rule CRUD DB operations."""
from uuid import uuid4
from sqlalchemy.orm import Session
from onyx.db.models import User
from onyx.server.features.proposal_review.db.rulesets import bulk_update_rules
from onyx.server.features.proposal_review.db.rulesets import count_active_rules
from onyx.server.features.proposal_review.db.rulesets import create_rule
from onyx.server.features.proposal_review.db.rulesets import create_ruleset
from onyx.server.features.proposal_review.db.rulesets import delete_rule
from onyx.server.features.proposal_review.db.rulesets import delete_ruleset
from onyx.server.features.proposal_review.db.rulesets import get_rule
from onyx.server.features.proposal_review.db.rulesets import get_ruleset
from onyx.server.features.proposal_review.db.rulesets import list_rules_by_ruleset
from onyx.server.features.proposal_review.db.rulesets import list_rulesets
from onyx.server.features.proposal_review.db.rulesets import update_rule
from onyx.server.features.proposal_review.db.rulesets import update_ruleset
from tests.external_dependency_unit.constants import TEST_TENANT_ID
TENANT = TEST_TENANT_ID
class TestRulesetCRUD:
def test_create_ruleset_appears_in_list(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Compliance v1 {uuid4().hex[:6]}",
db_session=db_session,
description="First ruleset",
created_by=test_user.id,
)
db_session.commit()
rulesets = list_rulesets(TENANT, db_session)
ids = [r.id for r in rulesets]
assert rs.id in ids
def test_create_ruleset_with_rules_returned_together(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS with rules {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
create_rule(
ruleset_id=rs.id,
name="Rule A",
rule_type="DOCUMENT_CHECK",
prompt_template="Check A: {{proposal_text}}",
db_session=db_session,
)
create_rule(
ruleset_id=rs.id,
name="Rule B",
rule_type="METADATA_CHECK",
prompt_template="Check B: {{proposal_text}}",
db_session=db_session,
)
db_session.commit()
fetched = get_ruleset(rs.id, TENANT, db_session)
assert fetched is not None
assert len(fetched.rules) == 2
rule_names = {r.name for r in fetched.rules}
assert rule_names == {"Rule A", "Rule B"}
def test_list_rulesets_active_only_filter(
self, db_session: Session, test_user: User
) -> None:
rs_active = create_ruleset(
tenant_id=TENANT,
name=f"Active RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rs_inactive = create_ruleset(
tenant_id=TENANT,
name=f"Inactive RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
update_ruleset(rs_inactive.id, TENANT, db_session, {"is_active": False})
db_session.commit()
active_rulesets = list_rulesets(TENANT, db_session, active_only=True)
active_ids = {r.id for r in active_rulesets}
assert rs_active.id in active_ids
assert rs_inactive.id not in active_ids
def test_update_ruleset_changes_persist(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Original {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
db_session.commit()
updated = update_ruleset(
rs.id,
TENANT,
db_session,
{"name": "Updated Name", "description": "New desc"},
)
db_session.commit()
assert updated is not None
assert updated.name == "Updated Name"
assert updated.description == "New desc"
refetched = get_ruleset(rs.id, TENANT, db_session)
assert refetched is not None
assert refetched.name == "Updated Name"
def test_update_nonexistent_ruleset_returns_none(self, db_session: Session) -> None:
result = update_ruleset(uuid4(), TENANT, db_session, {"name": "nope"})
assert result is None
def test_delete_ruleset_returns_false_for_nonexistent(
self, db_session: Session
) -> None:
assert delete_ruleset(uuid4(), TENANT, db_session) is False
def test_set_default_ruleset_clears_previous_default(
self, db_session: Session, test_user: User
) -> None:
rs1 = create_ruleset(
tenant_id=TENANT,
name=f"Default 1 {uuid4().hex[:6]}",
db_session=db_session,
is_default=True,
created_by=test_user.id,
)
db_session.commit()
assert rs1.is_default is True
rs2 = create_ruleset(
tenant_id=TENANT,
name=f"Default 2 {uuid4().hex[:6]}",
db_session=db_session,
is_default=True,
created_by=test_user.id,
)
db_session.commit()
# rs1 should no longer be default
db_session.refresh(rs1)
assert rs1.is_default is False
assert rs2.is_default is True
def test_delete_ruleset_cascade_deletes_rules(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS to delete {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
r1 = create_rule(
ruleset_id=rs.id,
name="Doomed Rule",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
rule_id = r1.id
db_session.commit()
assert delete_ruleset(rs.id, TENANT, db_session) is True
db_session.commit()
assert get_ruleset(rs.id, TENANT, db_session) is None
assert get_rule(rule_id, db_session) is None
class TestRuleCRUD:
def test_create_and_get_rule(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS for rules {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Budget Cap",
rule_type="DOCUMENT_CHECK",
prompt_template="Check budget cap: {{proposal_text}}",
db_session=db_session,
description="Verify budget < $1M",
category="FINANCIAL",
is_hard_stop=True,
priority=10,
)
db_session.commit()
fetched = get_rule(rule.id, db_session)
assert fetched is not None
assert fetched.name == "Budget Cap"
assert fetched.rule_type == "DOCUMENT_CHECK"
assert fetched.is_hard_stop is True
assert fetched.priority == 10
assert fetched.category == "FINANCIAL"
def test_update_rule_prompt_template_and_is_active(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Rule X",
rule_type="CUSTOM_NL",
prompt_template="old template",
db_session=db_session,
)
db_session.commit()
assert rule.is_active is True
updated = update_rule(
rule.id,
db_session,
{"prompt_template": "new template: {{proposal_text}}", "is_active": False},
)
db_session.commit()
assert updated is not None
assert updated.prompt_template == "new template: {{proposal_text}}"
assert updated.is_active is False
refetched = get_rule(rule.id, db_session)
assert refetched is not None
assert refetched.prompt_template == "new template: {{proposal_text}}"
assert refetched.is_active is False
def test_update_nonexistent_rule_returns_none(self, db_session: Session) -> None:
result = update_rule(uuid4(), db_session, {"name": "nope"})
assert result is None
def test_delete_rule(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Temp Rule",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
rule_id = rule.id
db_session.commit()
assert delete_rule(rule_id, db_session) is True
db_session.commit()
assert get_rule(rule_id, db_session) is None
def test_delete_nonexistent_rule_returns_false(self, db_session: Session) -> None:
assert delete_rule(uuid4(), db_session) is False
def test_list_rules_by_ruleset_respects_active_only(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
r_active = create_rule(
ruleset_id=rs.id,
name="Active",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
r_inactive = create_rule(
ruleset_id=rs.id,
name="Inactive",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
update_rule(r_inactive.id, db_session, {"is_active": False})
db_session.commit()
all_rules = list_rules_by_ruleset(rs.id, db_session)
assert len(all_rules) == 2
active_rules = list_rules_by_ruleset(rs.id, db_session, active_only=True)
assert len(active_rules) == 1
assert active_rules[0].id == r_active.id
def test_bulk_activate_rules_only_affects_specified_rules(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Bulk test RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
# Create 5 rules, all initially active
rules = []
for i in range(5):
r = create_rule(
ruleset_id=rs.id,
name=f"Rule {i}",
rule_type="DOCUMENT_CHECK",
prompt_template=f"Template {i}",
db_session=db_session,
)
rules.append(r)
db_session.commit()
# Deactivate all 5
all_ids = [r.id for r in rules]
bulk_update_rules(all_ids, "deactivate", rs.id, db_session)
db_session.commit()
# Verify all are inactive
assert count_active_rules(rs.id, db_session) == 0
# Bulk activate only the first 3
activate_ids = [rules[0].id, rules[1].id, rules[2].id]
count = bulk_update_rules(activate_ids, "activate", rs.id, db_session)
db_session.commit()
assert count == 3
assert count_active_rules(rs.id, db_session) == 3
# Verify exactly which are active
active_rules = list_rules_by_ruleset(rs.id, db_session, active_only=True)
active_ids_result = {r.id for r in active_rules}
assert active_ids_result == set(activate_ids)
def test_bulk_delete_rules(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Bulk delete RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
r1 = create_rule(
ruleset_id=rs.id,
name="Keep",
rule_type="DOCUMENT_CHECK",
prompt_template="keep",
db_session=db_session,
)
r2 = create_rule(
ruleset_id=rs.id,
name="Delete 1",
rule_type="DOCUMENT_CHECK",
prompt_template="del1",
db_session=db_session,
)
r3 = create_rule(
ruleset_id=rs.id,
name="Delete 2",
rule_type="DOCUMENT_CHECK",
prompt_template="del2",
db_session=db_session,
)
db_session.commit()
count = bulk_update_rules([r2.id, r3.id], "delete", rs.id, db_session)
db_session.commit()
assert count == 2
remaining = list_rules_by_ruleset(rs.id, db_session)
assert len(remaining) == 1
assert remaining[0].id == r1.id
def test_bulk_update_unknown_action_raises_error(self, db_session: Session) -> None:
import pytest as _pytest
with _pytest.raises(ValueError, match="Unknown bulk action"):
bulk_update_rules([uuid4()], "explode", uuid4(), db_session)
def test_count_active_rules(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Count RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
create_rule(
ruleset_id=rs.id,
name="Active1",
rule_type="DOCUMENT_CHECK",
prompt_template="t1",
db_session=db_session,
)
r2 = create_rule(
ruleset_id=rs.id,
name="Inactive1",
rule_type="DOCUMENT_CHECK",
prompt_template="t2",
db_session=db_session,
)
update_rule(r2.id, db_session, {"is_active": False})
db_session.commit()
assert count_active_rules(rs.id, db_session) == 1

View File

@@ -38,38 +38,41 @@ class TestAddMemory:
def test_add_memory_creates_row(self, db_session: Session, test_user: User) -> None:
"""Verify that add_memory inserts a new Memory row."""
user_id = test_user.id
memory = add_memory(
memory_id = add_memory(
user_id=user_id,
memory_text="User prefers dark mode",
db_session=db_session,
)
assert memory.id is not None
assert memory.user_id == user_id
assert memory.memory_text == "User prefers dark mode"
assert memory_id is not None
# Verify it persists
fetched = db_session.get(Memory, memory.id)
fetched = db_session.get(Memory, memory_id)
assert fetched is not None
assert fetched.user_id == user_id
assert fetched.memory_text == "User prefers dark mode"
def test_add_multiple_memories(self, db_session: Session, test_user: User) -> None:
"""Verify that multiple memories can be added for the same user."""
user_id = test_user.id
m1 = add_memory(
m1_id = add_memory(
user_id=user_id,
memory_text="Favorite color is blue",
db_session=db_session,
)
m2 = add_memory(
m2_id = add_memory(
user_id=user_id,
memory_text="Works in engineering",
db_session=db_session,
)
assert m1.id != m2.id
assert m1.memory_text == "Favorite color is blue"
assert m2.memory_text == "Works in engineering"
assert m1_id != m2_id
fetched_m1 = db_session.get(Memory, m1_id)
fetched_m2 = db_session.get(Memory, m2_id)
assert fetched_m1 is not None
assert fetched_m2 is not None
assert fetched_m1.memory_text == "Favorite color is blue"
assert fetched_m2.memory_text == "Works in engineering"
class TestUpdateMemoryAtIndex:
@@ -82,15 +85,17 @@ class TestUpdateMemoryAtIndex:
add_memory(user_id=user_id, memory_text="Memory 1", db_session=db_session)
add_memory(user_id=user_id, memory_text="Memory 2", db_session=db_session)
updated = update_memory_at_index(
updated_id = update_memory_at_index(
user_id=user_id,
index=1,
new_text="Updated Memory 1",
db_session=db_session,
)
assert updated is not None
assert updated.memory_text == "Updated Memory 1"
assert updated_id is not None
fetched = db_session.get(Memory, updated_id)
assert fetched is not None
assert fetched.memory_text == "Updated Memory 1"
def test_update_memory_at_out_of_range_index(
self, db_session: Session, test_user: User
@@ -167,7 +172,7 @@ class TestMemoryCap:
assert len(rows_before) == MAX_MEMORIES_PER_USER
# Add one more — should evict the oldest
new_memory = add_memory(
new_memory_id = add_memory(
user_id=user_id,
memory_text="New memory after cap",
db_session=db_session,
@@ -181,7 +186,7 @@ class TestMemoryCap:
# Oldest ("Memory 0") should be gone; "Memory 1" is now the oldest
assert rows_after[0].memory_text == "Memory 1"
# Newest should be the one we just added
assert rows_after[-1].id == new_memory.id
assert rows_after[-1].id == new_memory_id
assert rows_after[-1].memory_text == "New memory after cap"
@@ -221,22 +226,26 @@ class TestGetMemoriesWithUserId:
user_id = test_user_no_memories.id
# Add a memory
memory = add_memory(
memory_id = add_memory(
user_id=user_id,
memory_text="Memory with use_memories off",
db_session=db_session,
)
assert memory.memory_text == "Memory with use_memories off"
fetched = db_session.get(Memory, memory_id)
assert fetched is not None
assert fetched.memory_text == "Memory with use_memories off"
# Update that memory
updated = update_memory_at_index(
updated_id = update_memory_at_index(
user_id=user_id,
index=0,
new_text="Updated memory with use_memories off",
db_session=db_session,
)
assert updated is not None
assert updated.memory_text == "Updated memory with use_memories off"
assert updated_id is not None
fetched_updated = db_session.get(Memory, updated_id)
assert fetched_updated is not None
assert fetched_updated.memory_text == "Updated memory with use_memories off"
# Verify get_memories returns the updated memory
context = get_memories(test_user_no_memories, db_session)

View File

@@ -301,7 +301,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop", side_effect=emit_stop),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch("onyx.chat.process_message.llm_loop_completion_handle"),
patch(
"onyx.chat.process_message.get_llm_token_counter",
@@ -332,7 +331,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop", side_effect=emit_one),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch("onyx.chat.process_message.llm_loop_completion_handle"),
patch(
"onyx.chat.process_message.get_llm_token_counter",
@@ -363,7 +361,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop", side_effect=emit_one),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch("onyx.chat.process_message.llm_loop_completion_handle"),
patch(
"onyx.chat.process_message.get_llm_token_counter",
@@ -391,7 +388,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop", side_effect=always_fail),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch("onyx.chat.process_message.llm_loop_completion_handle"),
patch(
"onyx.chat.process_message.get_llm_token_counter",
@@ -423,7 +419,6 @@ class TestRunModels:
),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch("onyx.chat.process_message.llm_loop_completion_handle"),
patch(
"onyx.chat.process_message.get_llm_token_counter",
@@ -456,7 +451,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop", side_effect=slow_llm),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch("onyx.chat.process_message.llm_loop_completion_handle"),
patch(
"onyx.chat.process_message.get_llm_token_counter",
@@ -497,7 +491,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop", side_effect=slow_llm),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch(
"onyx.chat.process_message.llm_loop_completion_handle"
) as mock_handle,
@@ -519,7 +512,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop"),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch(
"onyx.chat.process_message.llm_loop_completion_handle"
) as mock_handle,
@@ -542,7 +534,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop", side_effect=always_fail),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch(
"onyx.chat.process_message.llm_loop_completion_handle"
) as mock_handle,
@@ -596,7 +587,6 @@ class TestRunModels:
),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch(
"onyx.chat.process_message.llm_loop_completion_handle",
side_effect=lambda *_, **__: completion_called.set(),
@@ -653,7 +643,6 @@ class TestRunModels:
),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch(
"onyx.chat.process_message.llm_loop_completion_handle",
side_effect=lambda *_, **__: completion_called.set(),
@@ -706,7 +695,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop", side_effect=fail_model_0),
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch(
"onyx.chat.process_message.llm_loop_completion_handle"
) as mock_handle,
@@ -736,7 +724,6 @@ class TestRunModels:
patch("onyx.chat.process_message.run_llm_loop") as mock_llm,
patch("onyx.chat.process_message.run_deep_research_llm_loop"),
patch("onyx.chat.process_message.construct_tools", return_value={}),
patch("onyx.chat.process_message.get_session_with_current_tenant"),
patch("onyx.chat.process_message.llm_loop_completion_handle"),
patch(
"onyx.chat.process_message.get_llm_token_counter",

View File

@@ -0,0 +1,659 @@
"""Tests for Jira connector enhancements: custom field extraction and attachment fetching."""
from types import SimpleNamespace
from typing import Any
from unittest.mock import MagicMock
from unittest.mock import patch
from jira import JIRA
from jira.resources import CustomFieldOption
from jira.resources import User
from onyx.connectors.jira.connector import _MAX_ATTACHMENT_SIZE_BYTES
from onyx.connectors.jira.connector import JiraConnector
from onyx.connectors.jira.connector import process_jira_issue
from onyx.connectors.jira.utils import CustomFieldExtractor
from onyx.connectors.models import ConnectorFailure
from onyx.connectors.models import Document
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
class _FieldsBag:
"""A plain object whose __dict__ is exactly what we put in it.
MagicMock pollutes __dict__ with internal bookkeeping, which breaks
CustomFieldExtractor.get_issue_custom_fields (it iterates __dict__).
This class gives us full control over the attribute namespace.
"""
def __init__(self, **kwargs: Any) -> None:
for k, v in kwargs.items():
object.__setattr__(self, k, v)
def _make_mock_issue(
key: str = "TEST-1",
summary: str = "Test Issue",
description: str = "Test description",
labels: list[str] | None = None,
extra_fields: dict[str, Any] | None = None,
attachments: list[Any] | None = None,
) -> MagicMock:
"""Build a mock Issue with standard fields wired up.
Uses _FieldsBag for ``issue.fields`` so that ``fields.__dict__``
contains only the Jira field attributes (no MagicMock internals).
"""
# Build sub-objects using SimpleNamespace so attribute access
# returns real values instead of auto-generated MagicMock objects.
reporter = SimpleNamespace(
displayName="Reporter Name",
emailAddress="reporter@example.com",
)
assignee = SimpleNamespace(
displayName="Assignee Name",
emailAddress="assignee@example.com",
)
priority = SimpleNamespace(name="High")
status = SimpleNamespace(name="Open")
project = SimpleNamespace(key="TEST", name="Test Project")
issuetype = SimpleNamespace(name="Bug")
comment = SimpleNamespace(comments=[])
field_kwargs: dict[str, Any] = {
"description": description,
"summary": summary,
"labels": labels or [],
"updated": "2024-01-01T00:00:00+0000",
"reporter": reporter,
"assignee": assignee,
"priority": priority,
"status": status,
"resolution": None,
"project": project,
"issuetype": issuetype,
"parent": None,
"created": "2024-01-01T00:00:00+0000",
"duedate": None,
"resolutiondate": None,
"comment": comment,
"attachment": attachments if attachments is not None else [],
}
if extra_fields:
field_kwargs.update(extra_fields)
fields = _FieldsBag(**field_kwargs)
# Use _FieldsBag for the issue itself too, then add the attributes
# that process_jira_issue needs. This prevents MagicMock from
# auto-creating attributes for field names like "reporter", which
# would shadow the real values on issue.fields.
issue = _FieldsBag(
fields=fields,
key=key,
raw={"fields": {"description": description}},
)
return issue # type: ignore[return-value]
def _make_attachment(
attachment_id: str = "att-1",
filename: str = "report.pdf",
size: int = 1024,
content_url: str | None = "https://jira.example.com/attachment/att-1",
mime_type: str = "application/pdf",
created: str | None = "2026-01-15T10:00:00.000+0000",
download_content: bytes = b"binary content",
download_raises: Exception | None = None,
) -> MagicMock:
"""Build a mock Jira attachment resource."""
att = MagicMock()
att.id = attachment_id
att.filename = filename
att.size = size
att.content = content_url
att.mimeType = mime_type
att.created = created
if download_raises:
att.get.side_effect = download_raises
else:
att.get.return_value = download_content
return att
# ===================================================================
# Test 1: Custom Field Extraction
# ===================================================================
class TestCustomFieldExtractorGetAllCustomFields:
def test_returns_only_custom_fields(self) -> None:
"""Given a mix of standard and custom fields, only custom fields are returned."""
mock_client = MagicMock(spec=JIRA)
mock_client.fields.return_value = [
{"id": "summary", "name": "Summary", "custom": False},
{"id": "customfield_10001", "name": "Sprint", "custom": True},
{"id": "status", "name": "Status", "custom": False},
{"id": "customfield_10002", "name": "Story Points", "custom": True},
]
result = CustomFieldExtractor.get_all_custom_fields(mock_client)
assert result == {
"customfield_10001": "Sprint",
"customfield_10002": "Story Points",
}
assert "summary" not in result
assert "status" not in result
def test_returns_empty_dict_when_no_custom_fields(self) -> None:
"""When no custom fields exist, an empty dict is returned."""
mock_client = MagicMock(spec=JIRA)
mock_client.fields.return_value = [
{"id": "summary", "name": "Summary", "custom": False},
]
result = CustomFieldExtractor.get_all_custom_fields(mock_client)
assert result == {}
class TestCustomFieldExtractorGetIssueCustomFields:
def test_string_value_extracted(self) -> None:
"""String custom field values pass through as-is."""
issue = _make_mock_issue(extra_fields={"customfield_10001": "v2024.1"})
mapping = {"customfield_10001": "Release Version"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Release Version": "v2024.1"}
def test_custom_field_option_value_extracted_as_string(self) -> None:
"""CustomFieldOption objects are converted via .value."""
option = MagicMock(spec=CustomFieldOption)
option.value = "Critical Path"
issue = _make_mock_issue(extra_fields={"customfield_10002": option})
mapping = {"customfield_10002": "Category"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Category": "Critical Path"}
def test_user_value_extracted_as_display_name(self) -> None:
"""User objects are converted via .displayName."""
user = MagicMock(spec=User)
user.displayName = "Alice Johnson"
issue = _make_mock_issue(extra_fields={"customfield_10003": user})
mapping = {"customfield_10003": "Reviewer"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Reviewer": "Alice Johnson"}
def test_list_value_extracted_as_space_joined_string(self) -> None:
"""Lists of values are space-joined after individual processing."""
opt1 = MagicMock(spec=CustomFieldOption)
opt1.value = "Backend"
opt2 = MagicMock(spec=CustomFieldOption)
opt2.value = "Frontend"
issue = _make_mock_issue(extra_fields={"customfield_10004": [opt1, opt2]})
mapping = {"customfield_10004": "Components"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Components": "Backend Frontend"}
def test_none_value_excluded(self) -> None:
"""None custom field values are excluded from the result."""
issue = _make_mock_issue(extra_fields={"customfield_10005": None})
mapping = {"customfield_10005": "Optional Field"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert "Optional Field" not in result
def test_value_exceeding_max_length_excluded(self) -> None:
"""Values longer than max_value_length are excluded."""
long_value = "x" * 300 # exceeds the default 250 limit
issue = _make_mock_issue(extra_fields={"customfield_10006": long_value})
mapping = {"customfield_10006": "Long Description"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert "Long Description" not in result
def test_value_at_exact_max_length_excluded(self) -> None:
"""Values at exactly max_value_length are excluded (< not <=)."""
exact_value = "x" * 250 # exactly 250, not < 250
issue = _make_mock_issue(extra_fields={"customfield_10007": exact_value})
mapping = {"customfield_10007": "Edge Case"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert "Edge Case" not in result
def test_value_just_under_max_length_included(self) -> None:
"""Values just under max_value_length are included."""
under_value = "x" * 249
issue = _make_mock_issue(extra_fields={"customfield_10008": under_value})
mapping = {"customfield_10008": "Just Under"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Just Under": under_value}
def test_unmapped_custom_fields_ignored(self) -> None:
"""Custom fields not in the mapping dict are not included."""
issue = _make_mock_issue(
extra_fields={
"customfield_10001": "mapped_value",
"customfield_99999": "unmapped_value",
}
)
mapping = {"customfield_10001": "Mapped Field"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert "Mapped Field" in result
assert len(result) == 1
class TestProcessJiraIssueWithCustomFields:
def test_custom_fields_added_to_metadata(self) -> None:
"""When custom_fields_mapping is provided, custom fields appear in metadata."""
option = MagicMock(spec=CustomFieldOption)
option.value = "High Impact"
issue = _make_mock_issue(
extra_fields={
"customfield_10001": "Sprint 42",
"customfield_10002": option,
}
)
mapping = {
"customfield_10001": "Sprint",
"customfield_10002": "Impact Level",
}
doc = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
custom_fields_mapping=mapping,
)
assert doc is not None
assert doc.metadata["Sprint"] == "Sprint 42"
assert doc.metadata["Impact Level"] == "High Impact"
# Standard fields should still be present
assert doc.metadata["key"] == "TEST-1"
def test_no_custom_fields_when_mapping_is_none(self) -> None:
"""When custom_fields_mapping is None, no custom fields in metadata."""
issue = _make_mock_issue(
extra_fields={"customfield_10001": "should_not_appear"}
)
doc = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
custom_fields_mapping=None,
)
assert doc is not None
# The custom field name should not appear since we didn't provide a mapping
assert "customfield_10001" not in doc.metadata
def test_custom_field_extraction_failure_does_not_break_processing(self) -> None:
"""If custom field extraction raises, the document is still returned."""
issue = _make_mock_issue()
mapping = {"customfield_10001": "Broken Field"}
with patch.object(
CustomFieldExtractor,
"get_issue_custom_fields",
side_effect=RuntimeError("extraction failed"),
):
doc = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
custom_fields_mapping=mapping,
)
assert doc is not None
# The document should still have standard metadata
assert doc.metadata["key"] == "TEST-1"
# The broken custom field should not have leaked into metadata
assert "Broken Field" not in doc.metadata
# ===================================================================
# Test 2: Attachment Fetching
# ===================================================================
class TestProcessAttachments:
"""Tests for JiraConnector._process_attachments."""
def _make_connector(self, fetch_attachments: bool = True) -> JiraConnector:
"""Create a JiraConnector wired with a mock client."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
fetch_attachments=fetch_attachments,
)
# Don't use spec=JIRA because _process_attachments accesses
# the private _session attribute, which spec blocks.
mock_client = MagicMock()
mock_client._options = {"rest_api_version": "2"}
mock_client.client_info.return_value = "https://jira.example.com"
connector._jira_client = mock_client
return connector
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_happy_path_two_attachments(self, mock_extract: MagicMock) -> None:
"""Two normal attachments yield two Documents with correct structure."""
mock_extract.side_effect = ["Text from report", "Text from spec"]
att1 = _make_attachment(
attachment_id="att-1",
filename="report.pdf",
size=1024,
download_content=b"report bytes",
)
att2 = _make_attachment(
attachment_id="att-2",
filename="spec.docx",
size=2048,
content_url="https://jira.example.com/attachment/att-2",
mime_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
download_content=b"spec bytes",
)
issue = _make_mock_issue(key="TEST-42", attachments=[att1, att2])
connector = self._make_connector()
results = list(
connector._process_attachments(issue, parent_hierarchy_raw_node_id="TEST")
)
docs = [r for r in results if isinstance(r, Document)]
assert len(docs) == 2
# First attachment
assert docs[0].id == "https://jira.example.com/browse/TEST-42/attachments/att-1"
assert docs[0].title == "report.pdf"
assert docs[0].metadata["parent_ticket"] == "TEST-42"
assert docs[0].metadata["attachment_filename"] == "report.pdf"
assert docs[0].metadata["attachment_size"] == "1024"
assert docs[0].parent_hierarchy_raw_node_id == "TEST"
assert docs[0].sections[0].text == "Text from report"
# Second attachment
assert docs[1].id == "https://jira.example.com/browse/TEST-42/attachments/att-2"
assert docs[1].title == "spec.docx"
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_large_attachment_skipped(self, mock_extract: MagicMock) -> None:
"""Attachments exceeding 50 MB are skipped silently (only warning logged)."""
large_att = _make_attachment(
size=_MAX_ATTACHMENT_SIZE_BYTES + 1,
filename="huge.zip",
)
issue = _make_mock_issue(attachments=[large_att])
connector = self._make_connector()
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
mock_extract.assert_not_called()
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_no_content_url_skipped(self, mock_extract: MagicMock) -> None:
"""Attachments with no content URL are skipped gracefully."""
att = _make_attachment(content_url=None, filename="orphan.txt")
issue = _make_mock_issue(attachments=[att])
connector = self._make_connector()
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
mock_extract.assert_not_called()
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_download_failure_yields_connector_failure(
self, mock_extract: MagicMock
) -> None:
"""If the download raises, a ConnectorFailure is yielded; other attachments continue."""
att_bad = _make_attachment(
attachment_id="att-bad",
filename="broken.pdf",
content_url="https://jira.example.com/attachment/att-bad",
download_raises=ConnectionError("download failed"),
)
att_good = _make_attachment(
attachment_id="att-good",
filename="good.pdf",
content_url="https://jira.example.com/attachment/att-good",
download_content=b"good content",
)
issue = _make_mock_issue(attachments=[att_bad, att_good])
connector = self._make_connector()
mock_extract.return_value = "extracted good text"
results = list(connector._process_attachments(issue, None))
failures = [r for r in results if isinstance(r, ConnectorFailure)]
docs = [r for r in results if isinstance(r, Document)]
assert len(failures) == 1
assert "broken.pdf" in failures[0].failure_message
assert len(docs) == 1
assert docs[0].title == "good.pdf"
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_text_extraction_failure_skips_attachment(
self, mock_extract: MagicMock
) -> None:
"""If extract_file_text raises, the attachment is skipped (not a ConnectorFailure)."""
att = _make_attachment(
filename="bad_format.xyz", download_content=b"some bytes"
)
issue = _make_mock_issue(attachments=[att])
connector = self._make_connector()
mock_extract.side_effect = ValueError("Unsupported format")
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_empty_text_extraction_skips_attachment(
self, mock_extract: MagicMock
) -> None:
"""Attachments yielding empty text are skipped."""
att = _make_attachment(filename="empty.pdf", download_content=b"some bytes")
issue = _make_mock_issue(attachments=[att])
connector = self._make_connector()
mock_extract.return_value = ""
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_whitespace_only_text_skips_attachment(
self, mock_extract: MagicMock
) -> None:
"""Attachments yielding only whitespace are skipped."""
att = _make_attachment(filename="whitespace.txt", download_content=b" ")
issue = _make_mock_issue(attachments=[att])
connector = self._make_connector()
mock_extract.return_value = " \n\t "
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_no_attachments_on_issue(self, mock_extract: MagicMock) -> None:
"""When an issue has no attachments, nothing is yielded."""
issue = _make_mock_issue(attachments=[])
connector = self._make_connector()
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
mock_extract.assert_not_called()
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_attachment_field_is_none(self, mock_extract: MagicMock) -> None:
"""When the attachment field is None (not set), nothing is yielded."""
issue = _make_mock_issue()
# Override attachment to be explicitly falsy (best_effort_get_field returns None)
issue.fields.attachment = None
issue.fields.__dict__["attachment"] = None
connector = self._make_connector()
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
mock_extract.assert_not_called()
class TestFetchAttachmentsFlag:
"""Verify _process_attachments is only called when fetch_attachments=True."""
def test_fetch_attachments_false_skips_processing(self) -> None:
"""With fetch_attachments=False, _process_attachments should not be invoked
during the load_from_checkpoint flow."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
fetch_attachments=False,
)
assert connector.fetch_attachments is False
with patch.object(connector, "_process_attachments") as mock_process:
# Simulate what _load_from_checkpoint does: only call
# _process_attachments when self.fetch_attachments is True.
if connector.fetch_attachments:
connector._process_attachments(MagicMock(), None)
mock_process.assert_not_called()
# ===================================================================
# Test 3: Backwards Compatibility
# ===================================================================
class TestBackwardsCompatibility:
def test_default_config_has_flags_off(self) -> None:
"""JiraConnector defaults have both new feature flags disabled."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
)
assert connector.extract_custom_fields is False
assert connector.fetch_attachments is False
def test_default_config_has_empty_custom_fields_mapping(self) -> None:
"""Before load_credentials, the custom fields mapping is empty."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
)
assert connector._custom_fields_mapping == {}
def test_process_jira_issue_without_mapping_has_no_custom_fields(self) -> None:
"""Calling process_jira_issue without custom_fields_mapping produces
the same metadata as the pre-enhancement code."""
issue = _make_mock_issue(
key="COMPAT-1",
extra_fields={"customfield_10001": "should_be_ignored"},
)
doc = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
)
assert doc is not None
# Standard fields present
assert doc.metadata["key"] == "COMPAT-1"
assert doc.metadata["priority"] == "High"
assert doc.metadata["status"] == "Open"
# No custom field should leak through
for key in doc.metadata:
assert not key.startswith(
"customfield_"
), f"Custom field {key} leaked into metadata without mapping"
def test_process_jira_issue_default_params_match_old_signature(self) -> None:
"""process_jira_issue with only the required params works identically
to the pre-enhancement signature (jira_base_url + issue)."""
issue = _make_mock_issue()
doc_new = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
)
doc_explicit_none = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
custom_fields_mapping=None,
)
assert doc_new is not None
assert doc_explicit_none is not None
assert doc_new.metadata == doc_explicit_none.metadata
assert doc_new.id == doc_explicit_none.id
def test_load_credentials_does_not_fetch_custom_fields_when_flag_off(self) -> None:
"""When extract_custom_fields=False, load_credentials does not call
get_all_custom_fields."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
extract_custom_fields=False,
)
with patch("onyx.connectors.jira.connector.build_jira_client") as mock_build:
mock_client = MagicMock(spec=JIRA)
mock_build.return_value = mock_client
connector.load_credentials({"jira_api_token": "tok"})
mock_client.fields.assert_not_called()
assert connector._custom_fields_mapping == {}
def test_load_credentials_fetches_custom_fields_when_flag_on(self) -> None:
"""When extract_custom_fields=True, load_credentials populates the mapping."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
extract_custom_fields=True,
)
with patch("onyx.connectors.jira.connector.build_jira_client") as mock_build:
mock_client = MagicMock(spec=JIRA)
mock_client.fields.return_value = [
{"id": "summary", "name": "Summary", "custom": False},
{"id": "customfield_10001", "name": "Sprint", "custom": True},
]
mock_build.return_value = mock_client
connector.load_credentials({"jira_api_token": "tok"})
assert connector._custom_fields_mapping == {"customfield_10001": "Sprint"}
def test_load_credentials_handles_custom_fields_fetch_failure(self) -> None:
"""If get_all_custom_fields raises, the mapping stays empty and no exception propagates."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
extract_custom_fields=True,
)
with patch("onyx.connectors.jira.connector.build_jira_client") as mock_build:
mock_client = MagicMock(spec=JIRA)
mock_client.fields.side_effect = RuntimeError("API unavailable")
mock_build.return_value = mock_client
# Should not raise
connector.load_credentials({"jira_api_token": "tok"})
assert connector._custom_fields_mapping == {}

View File

@@ -95,9 +95,9 @@ class TestForceAddSearchToolGuard:
without a vector DB."""
import inspect
from onyx.tools.tool_constructor import construct_tools
from onyx.tools.tool_constructor import _construct_tools_impl
source = inspect.getsource(construct_tools)
source = inspect.getsource(_construct_tools_impl)
assert (
"DISABLE_VECTOR_DB" in source
), "construct_tools should reference DISABLE_VECTOR_DB to suppress force-adding SearchTool"

View File

@@ -0,0 +1,73 @@
"""Shared fixtures for proposal review engine unit tests."""
import json
from unittest.mock import MagicMock
from uuid import UUID
from uuid import uuid4
import pytest
# ---------------------------------------------------------------------------
# Lightweight stand-in for ProposalContext (avoids importing the real one,
# which pulls in SQLAlchemy models that are irrelevant to pure-logic tests).
# The real dataclass lives in context_assembler.py; we import it directly
# where needed but provide a builder here for convenience.
# ---------------------------------------------------------------------------
@pytest.fixture
def make_proposal_context():
"""Factory fixture that builds a ProposalContext with sensible defaults."""
from onyx.server.features.proposal_review.engine.context_assembler import (
ProposalContext,
)
def _make(
proposal_text: str = "Default proposal text.",
budget_text: str = "",
foa_text: str = "",
metadata: dict | None = None,
jira_key: str = "PROJ-100",
) -> "ProposalContext":
return ProposalContext(
proposal_text=proposal_text,
budget_text=budget_text,
foa_text=foa_text,
metadata=metadata or {},
jira_key=jira_key,
)
return _make
@pytest.fixture
def make_rule():
"""Factory fixture that builds a minimal mock ProposalReviewRule."""
def _make(
name: str = "Test Rule",
prompt_template: str = "Evaluate: {{proposal_text}}",
rule_id: UUID | None = None,
) -> MagicMock:
rule = MagicMock()
rule.id = rule_id or uuid4()
rule.name = name
rule.prompt_template = prompt_template
return rule
return _make
@pytest.fixture
def well_formed_llm_json() -> str:
"""A valid JSON string matching the expected rule-evaluator response schema."""
return json.dumps(
{
"verdict": "PASS",
"confidence": "HIGH",
"evidence": "Section 4.2 states the budget is $500k.",
"explanation": "The proposal meets the budget cap requirement.",
"suggested_action": None,
}
)

View File

@@ -0,0 +1,409 @@
"""Unit tests for the checklist importer engine component.
Tests cover:
- _parse_json_array: JSON array parsing, code-fence stripping, error handling
- _validate_rule: field validation, type normalization, missing fields
- enumerate_checklist_items: LLM response parsing into ChecklistItems
- decompose_checklist_item: LLM response parsing into rule dicts
- Refinement detection (refinement_needed / refinement_question)
"""
import json
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from onyx.server.features.proposal_review.engine.checklist_importer import (
_parse_json_array,
)
from onyx.server.features.proposal_review.engine.checklist_importer import (
_validate_rule,
)
from onyx.server.features.proposal_review.engine.checklist_importer import ChecklistItem
from onyx.server.features.proposal_review.engine.checklist_importer import (
decompose_checklist_item,
)
from onyx.server.features.proposal_review.engine.checklist_importer import (
enumerate_checklist_items,
)
# =====================================================================
# _validate_rule -- single rule validation
# =====================================================================
class TestValidateRule:
"""Tests for _validate_rule (field validation and normalization)."""
def test_valid_rule_passes(self):
raw = {
"name": "Check budget cap",
"description": "Ensures budget is under $500k",
"category": "IR-2: Budget",
"rule_type": "DOCUMENT_CHECK",
"rule_intent": "CHECK",
"prompt_template": "Review {{budget_text}} for compliance.",
"refinement_needed": False,
"refinement_question": None,
}
result = _validate_rule(raw, 0)
assert result is not None
assert result["name"] == "Check budget cap"
assert result["rule_type"] == "DOCUMENT_CHECK"
assert result["rule_intent"] == "CHECK"
assert result["refinement_needed"] is False
def test_missing_name_returns_none(self):
raw = {"prompt_template": "something"}
assert _validate_rule(raw, 0) is None
def test_missing_prompt_template_returns_none(self):
raw = {"name": "A rule"}
assert _validate_rule(raw, 0) is None
def test_invalid_rule_type_defaults_to_custom_nl(self):
raw = {
"name": "Test",
"prompt_template": "t",
"rule_type": "INVALID_TYPE",
}
result = _validate_rule(raw, 0)
assert result["rule_type"] == "CUSTOM_NL"
def test_invalid_rule_intent_defaults_to_check(self):
raw = {
"name": "Test",
"prompt_template": "t",
"rule_intent": "NOTIFY",
}
result = _validate_rule(raw, 0)
assert result["rule_intent"] == "CHECK"
def test_missing_rule_type_defaults_to_custom_nl(self):
raw = {"name": "Test", "prompt_template": "t"}
result = _validate_rule(raw, 0)
assert result["rule_type"] == "CUSTOM_NL"
def test_missing_rule_intent_defaults_to_check(self):
raw = {"name": "Test", "prompt_template": "t"}
result = _validate_rule(raw, 0)
assert result["rule_intent"] == "CHECK"
def test_name_truncated_to_200_chars(self):
raw = {"name": "x" * 300, "prompt_template": "t"}
result = _validate_rule(raw, 0)
assert len(result["name"]) == 200
def test_refinement_needed_truthy_values(self):
raw = {
"name": "Test",
"prompt_template": "t",
"refinement_needed": True,
"refinement_question": "What is the IDC rate?",
}
result = _validate_rule(raw, 0)
assert result["refinement_needed"] is True
assert result["refinement_question"] == "What is the IDC rate?"
def test_refinement_needed_defaults_false(self):
raw = {"name": "Test", "prompt_template": "t"}
result = _validate_rule(raw, 0)
assert result["refinement_needed"] is False
assert result["refinement_question"] is None
@pytest.mark.parametrize(
"rule_type",
["DOCUMENT_CHECK", "METADATA_CHECK", "CROSS_REFERENCE", "CUSTOM_NL"],
)
def test_all_valid_rule_types_accepted(self, rule_type):
raw = {"name": "Test", "prompt_template": "t", "rule_type": rule_type}
result = _validate_rule(raw, 0)
assert result["rule_type"] == rule_type
@pytest.mark.parametrize("intent", ["CHECK", "HIGHLIGHT"])
def test_all_valid_intents_accepted(self, intent):
raw = {"name": "Test", "prompt_template": "t", "rule_intent": intent}
result = _validate_rule(raw, 0)
assert result["rule_intent"] == intent
# =====================================================================
# _parse_json_array -- JSON array parsing
# =====================================================================
class TestParseJsonArray:
"""Tests for _parse_json_array (JSON parsing + code-fence stripping)."""
def test_parses_valid_array(self):
raw = json.dumps([{"name": "Rule A"}, {"name": "Rule B"}])
result = _parse_json_array(raw, context="test")
assert len(result) == 2
assert result[0]["name"] == "Rule A"
def test_strips_markdown_code_fences(self):
inner = json.dumps([{"name": "R"}])
raw = f"```json\n{inner}\n```"
result = _parse_json_array(raw, context="test")
assert len(result) == 1
assert result[0]["name"] == "R"
def test_strips_plain_code_fences(self):
inner = json.dumps([{"key": "val"}])
raw = f"```\n{inner}\n```"
result = _parse_json_array(raw, context="test")
assert len(result) == 1
def test_invalid_json_raises_runtime_error(self):
with pytest.raises(RuntimeError, match="invalid JSON"):
_parse_json_array("not valid json [", context="test")
def test_non_array_json_raises_runtime_error(self):
with pytest.raises(RuntimeError, match="non-array JSON"):
_parse_json_array('{"name": "single object"}', context="test")
def test_empty_array(self):
result = _parse_json_array("[]", context="test")
assert result == []
def test_whitespace_stripped(self):
raw = " \n" + json.dumps([{"a": 1}]) + "\n "
result = _parse_json_array(raw, context="test")
assert len(result) == 1
# =====================================================================
# enumerate_checklist_items -- pass 1
# =====================================================================
class TestEnumerateChecklistItems:
"""Tests for enumerate_checklist_items (LLM → ChecklistItems)."""
def _make_mock_llm(self) -> MagicMock:
mock_llm = MagicMock()
mock_llm.config.model_name = "test-model"
mock_llm.config.model_provider = "test-provider"
mock_response = MagicMock()
mock_llm.invoke.return_value = mock_response
return mock_llm
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_response_to_string"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.record_llm_response"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_generation_span"
)
def test_parses_items(self, mock_span, _mock_record, mock_to_string):
mock_span.return_value.__enter__ = MagicMock()
mock_span.return_value.__exit__ = MagicMock(return_value=False)
items_json = json.dumps(
[
{
"id": "IR-1",
"name": "PI Eligibility",
"category": "IR-1: PI Eligibility",
"description": "Check PI eligibility",
"sub_checks": ["PI has PhD", "PI is faculty"],
},
{
"id": "IR-2",
"name": "Budget Review",
"category": "IR-2: Budget Review",
"description": "Check budget compliance",
"sub_checks": ["Under $500k"],
},
]
)
mock_to_string.return_value = items_json
mock_llm = self._make_mock_llm()
result = enumerate_checklist_items("Some checklist text", mock_llm)
assert len(result) == 2
assert result[0].id == "IR-1"
assert result[0].name == "PI Eligibility"
assert result[0].sub_checks == ["PI has PhD", "PI is faculty"]
assert result[1].id == "IR-2"
mock_llm.invoke.assert_called_once()
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_response_to_string"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.record_llm_response"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_generation_span"
)
def test_skips_items_without_name(self, mock_span, _mock_record, mock_to_string):
mock_span.return_value.__enter__ = MagicMock()
mock_span.return_value.__exit__ = MagicMock(return_value=False)
items_json = json.dumps(
[
{"id": "IR-1", "name": "Valid", "description": "ok"},
{"id": "IR-2", "description": "missing name"},
]
)
mock_to_string.return_value = items_json
mock_llm = self._make_mock_llm()
result = enumerate_checklist_items("text", mock_llm)
assert len(result) == 1
assert result[0].name == "Valid"
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_response_to_string"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.record_llm_response"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_generation_span"
)
def test_generates_default_id(self, mock_span, _mock_record, mock_to_string):
mock_span.return_value.__enter__ = MagicMock()
mock_span.return_value.__exit__ = MagicMock(return_value=False)
items_json = json.dumps([{"name": "No ID Item"}])
mock_to_string.return_value = items_json
mock_llm = self._make_mock_llm()
result = enumerate_checklist_items("text", mock_llm)
assert len(result) == 1
assert result[0].id == "ITEM-1"
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_generation_span"
)
def test_llm_failure_raises_runtime_error(self, mock_span):
mock_span.return_value.__enter__ = MagicMock()
mock_span.return_value.__exit__ = MagicMock(return_value=False)
mock_llm = MagicMock()
mock_llm.config.model_name = "test-model"
mock_llm.config.model_provider = "test-provider"
mock_llm.invoke.side_effect = RuntimeError("API down")
with pytest.raises(RuntimeError, match="Failed to enumerate"):
enumerate_checklist_items("text", mock_llm)
# =====================================================================
# decompose_checklist_item -- pass 2
# =====================================================================
class TestDecomposeChecklistItem:
"""Tests for decompose_checklist_item (ChecklistItem → rule dicts)."""
SAMPLE_ITEM = ChecklistItem(
id="IR-3",
name="Budget Compliance",
category="IR-3: Budget Compliance",
description="Check budget compliance",
sub_checks=["Budget under 500k", "IDC rates correct"],
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_response_to_string"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.record_llm_response"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_generation_span"
)
def test_decomposes_into_rules(self, mock_span, _mock_record, mock_to_string):
mock_span.return_value.__enter__ = MagicMock()
mock_span.return_value.__exit__ = MagicMock(return_value=False)
rules_json = json.dumps(
[
{
"name": "Budget under 500k",
"description": "Check budget cap",
"category": "IR-3: Budget Compliance",
"rule_type": "DOCUMENT_CHECK",
"rule_intent": "CHECK",
"prompt_template": "Check {{budget_text}} for cap.",
},
{
"name": "IDC rates correct",
"description": "Verify IDC rates",
"category": "IR-3: Budget Compliance",
"rule_type": "DOCUMENT_CHECK",
"rule_intent": "CHECK",
"prompt_template": "Check {{budget_text}} for IDC.",
"refinement_needed": True,
"refinement_question": "What is the IDC rate?",
},
]
)
mock_to_string.return_value = rules_json
mock_llm = MagicMock()
mock_llm.config.model_name = "test-model"
mock_llm.config.model_provider = "test-provider"
result = decompose_checklist_item(self.SAMPLE_ITEM, "checklist text", mock_llm)
assert len(result) == 2
assert result[0]["name"] == "Budget under 500k"
assert result[1]["refinement_needed"] is True
assert result[1]["refinement_question"] == "What is the IDC rate?"
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_response_to_string"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.record_llm_response"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_generation_span"
)
def test_fills_missing_category_from_item(
self, mock_span, _mock_record, mock_to_string
):
mock_span.return_value.__enter__ = MagicMock()
mock_span.return_value.__exit__ = MagicMock(return_value=False)
rules_json = json.dumps(
[
{
"name": "Some rule",
"prompt_template": "Check {{proposal_text}}",
# no category — should inherit from item
},
]
)
mock_to_string.return_value = rules_json
mock_llm = MagicMock()
mock_llm.config.model_name = "test-model"
mock_llm.config.model_provider = "test-provider"
result = decompose_checklist_item(self.SAMPLE_ITEM, "text", mock_llm)
assert len(result) == 1
assert result[0]["category"] == "IR-3: Budget Compliance"
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_generation_span"
)
def test_llm_failure_raises_runtime_error(self, mock_span):
mock_span.return_value.__enter__ = MagicMock()
mock_span.return_value.__exit__ = MagicMock(return_value=False)
mock_llm = MagicMock()
mock_llm.config.model_name = "test-model"
mock_llm.config.model_provider = "test-provider"
mock_llm.invoke.side_effect = RuntimeError("API down")
with pytest.raises(RuntimeError, match="LLM call failed"):
decompose_checklist_item(self.SAMPLE_ITEM, "text", mock_llm)

View File

@@ -0,0 +1,462 @@
"""Unit tests for the context assembler engine component.
Tests cover:
- get_proposal_context: full assembly with mocked DB queries
- Budget detection by document role and filename
- FOA detection by document role
- Multiple document concatenation
- Missing documents / missing proposal handling
- _is_budget_filename helper
- _build_parent_document_text helper
- _classify_child_text helper
"""
from unittest.mock import MagicMock
from uuid import uuid4
import pytest
from onyx.server.features.proposal_review.engine.context_assembler import (
_build_parent_document_text,
)
from onyx.server.features.proposal_review.engine.context_assembler import (
_classify_child_text,
)
from onyx.server.features.proposal_review.engine.context_assembler import (
_is_budget_filename,
)
from onyx.server.features.proposal_review.engine.context_assembler import (
get_proposal_context,
)
from onyx.server.features.proposal_review.engine.context_assembler import (
ProposalContext,
)
# =====================================================================
# _is_budget_filename
# =====================================================================
class TestIsBudgetFilename:
"""Tests for _is_budget_filename helper."""
@pytest.mark.parametrize(
"filename",
[
"budget.xlsx",
"BUDGET_justification.pdf",
"project_budget_v2.docx",
"cost_estimate.xlsx",
"financial_plan.pdf",
"annual_expenditure.csv",
],
)
def test_budget_filenames_detected(self, filename):
assert _is_budget_filename(filename) is True
@pytest.mark.parametrize(
"filename",
[
"narrative.pdf",
"abstract.docx",
"biosketch.pdf",
"facilities.docx",
"",
],
)
def test_non_budget_filenames_not_detected(self, filename):
assert _is_budget_filename(filename) is False
def test_none_filename_returns_false(self):
assert _is_budget_filename(None) is False # type: ignore[arg-type]
# =====================================================================
# _build_parent_document_text
# =====================================================================
class TestBuildParentDocumentText:
"""Tests for _build_parent_document_text helper."""
def test_includes_semantic_id(self):
doc = MagicMock()
doc.semantic_id = "PROJ-42"
doc.link = None
doc.doc_metadata = None
result = _build_parent_document_text(doc)
assert "PROJ-42" in result
def test_includes_link(self):
doc = MagicMock()
doc.semantic_id = None
doc.link = "https://jira.example.com/PROJ-42"
doc.doc_metadata = None
result = _build_parent_document_text(doc)
assert "https://jira.example.com/PROJ-42" in result
def test_includes_metadata_as_json(self):
doc = MagicMock()
doc.semantic_id = "PROJ-42"
doc.link = None
doc.doc_metadata = {"sponsor": "NIH", "pi": "Dr. Smith"}
result = _build_parent_document_text(doc)
assert "NIH" in result
assert "Dr. Smith" in result
def test_empty_document_returns_minimal_text(self):
doc = MagicMock()
doc.semantic_id = None
doc.link = None
doc.doc_metadata = None
doc.primary_owners = None
doc.secondary_owners = None
result = _build_parent_document_text(doc)
# With no content at all, the result should be empty or only contain
# structural headers. Verify it doesn't contain any meaningful data.
assert "NIH" not in result
assert "Dr. Smith" not in result
# =====================================================================
# _classify_child_text
# =====================================================================
class TestClassifyChildText:
"""Tests for _classify_child_text helper."""
def test_budget_filename_classified(self):
doc = MagicMock()
doc.semantic_id = "PROJ-42/attachments/budget_v2.xlsx"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "budget content", budget_parts, foa_parts)
assert "budget content" in budget_parts
assert foa_parts == []
def test_foa_filename_classified(self):
doc = MagicMock()
doc.semantic_id = "PROJ-42/attachments/foa_document.pdf"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "foa content", budget_parts, foa_parts)
assert foa_parts == ["foa content"]
assert budget_parts == []
def test_solicitation_keyword_classified_as_foa(self):
doc = MagicMock()
doc.semantic_id = "solicitation_2024.pdf"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "solicitation text", budget_parts, foa_parts)
assert "solicitation text" in foa_parts
def test_rfa_keyword_classified_as_foa(self):
doc = MagicMock()
doc.semantic_id = "RFA-AI-24-001.pdf"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "rfa text", budget_parts, foa_parts)
assert "rfa text" in foa_parts
def test_unrelated_filename_not_classified(self):
doc = MagicMock()
doc.semantic_id = "narrative_v3.docx"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "narrative text", budget_parts, foa_parts)
assert budget_parts == []
assert foa_parts == []
def test_none_semantic_id_not_classified(self):
doc = MagicMock()
doc.semantic_id = None
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "some text", budget_parts, foa_parts)
assert budget_parts == []
assert foa_parts == []
# =====================================================================
# get_proposal_context -- full assembly with mocked DB
# =====================================================================
def _make_mock_proposal(document_id="DOC-123"):
"""Create a mock ProposalReviewProposal."""
proposal = MagicMock()
proposal.id = uuid4()
proposal.document_id = document_id
return proposal
def _make_mock_document(
doc_id="DOC-123",
semantic_id="PROJ-42",
link=None,
doc_metadata=None,
):
"""Create a mock Document."""
doc = MagicMock()
doc.id = doc_id
doc.semantic_id = semantic_id
doc.link = link
doc.doc_metadata = doc_metadata or {}
return doc
def _make_mock_review_doc(
file_name="doc.pdf",
document_role="SUPPORTING",
extracted_text="Some text.",
):
"""Create a mock ProposalReviewDocument."""
doc = MagicMock()
doc.file_name = file_name
doc.document_role = document_role
doc.extracted_text = extracted_text
return doc
class TestGetProposalContext:
"""Tests for get_proposal_context with mocked DB session."""
def _setup_db(
self,
proposal=None,
parent_doc=None,
child_docs=None,
manual_docs=None,
):
"""Build a mock db_session with controlled query results.
The function under test does three queries:
1. ProposalReviewProposal by id
2. Document by id (parent doc)
3. Document.id.like(...) (child docs)
4. ProposalReviewDocument by proposal_id (manual docs)
We use side_effect on db_session.query() to differentiate them.
"""
db = MagicMock()
# We need to handle multiple .query() calls with different model classes.
# The function calls:
# db_session.query(ProposalReviewProposal).filter(...).one_or_none()
# db_session.query(Document).filter(...).one_or_none()
# db_session.query(Document).filter(..., ...).all()
# db_session.query(ProposalReviewDocument).filter(...).order_by(...).all()
call_count = {"n": 0}
def query_side_effect(model_cls):
call_count["n"] += 1
mock_query = MagicMock()
model_name = getattr(model_cls, "__name__", str(model_cls))
if model_name == "ProposalReviewProposal":
mock_query.filter.return_value = mock_query
mock_query.one_or_none.return_value = proposal
return mock_query
if model_name == "Document":
# First Document query is for parent (one_or_none),
# second is for children (all).
# We track via a sub-counter.
if not hasattr(query_side_effect, "_doc_calls"):
query_side_effect._doc_calls = 0
query_side_effect._doc_calls += 1
if query_side_effect._doc_calls == 1:
# Parent doc query
mock_query.filter.return_value = mock_query
mock_query.one_or_none.return_value = parent_doc
else:
# Child docs query
mock_query.filter.return_value = mock_query
mock_query.all.return_value = child_docs or []
return mock_query
if model_name == "ProposalReviewDocument":
mock_query.filter.return_value = mock_query
mock_query.order_by.return_value = mock_query
mock_query.all.return_value = manual_docs or []
return mock_query
return mock_query
# Reset the doc_calls counter if it exists from a previous test
if hasattr(query_side_effect, "_doc_calls"):
del query_side_effect._doc_calls
db.query.side_effect = query_side_effect
return db
def test_basic_assembly_with_parent_doc(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document(
semantic_id="PROJ-42",
doc_metadata={"sponsor": "NIH", "pi": "Dr. Smith"},
)
db = self._setup_db(proposal=proposal, parent_doc=parent_doc)
ctx = get_proposal_context(proposal.id, db)
assert isinstance(ctx, ProposalContext)
assert ctx.jira_key == "PROJ-42"
assert ctx.metadata["sponsor"] == "NIH"
assert "PROJ-42" in ctx.proposal_text
def test_proposal_not_found_returns_empty_context(self):
db = self._setup_db(proposal=None)
ctx = get_proposal_context(uuid4(), db)
# When proposal is not found, returns a safe empty context
assert isinstance(ctx, ProposalContext)
assert ctx.proposal_text == ""
assert ctx.metadata == {}
def test_budget_document_by_role(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document()
budget_doc = _make_mock_review_doc(
file_name="project_budget.xlsx",
document_role="BUDGET",
extracted_text="Total: $500k direct costs.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[budget_doc],
)
ctx = get_proposal_context(proposal.id, db)
assert "$500k" in ctx.budget_text
# Budget text should also appear in proposal_text (all docs)
assert "$500k" in ctx.proposal_text
def test_budget_document_by_filename(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document()
budget_doc = _make_mock_review_doc(
file_name="budget_justification.pdf",
document_role="SUPPORTING", # role is not BUDGET
extracted_text="Budget justification: $200k.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[budget_doc],
)
ctx = get_proposal_context(proposal.id, db)
assert "$200k" in ctx.budget_text
def test_foa_document_by_role(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document()
foa_doc = _make_mock_review_doc(
file_name="rfa-ai-24-001.html",
document_role="FOA",
extracted_text="This is the funding opportunity announcement.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[foa_doc],
)
ctx = get_proposal_context(proposal.id, db)
assert "funding opportunity announcement" in ctx.foa_text
def test_multiple_documents_concatenated(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document(semantic_id="PROJ-42")
doc_a = _make_mock_review_doc(
file_name="narrative.pdf",
document_role="SUPPORTING",
extracted_text="Section A content.",
)
doc_b = _make_mock_review_doc(
file_name="abstract.pdf",
document_role="SUPPORTING",
extracted_text="Section B content.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[doc_a, doc_b],
)
ctx = get_proposal_context(proposal.id, db)
assert "Section A content" in ctx.proposal_text
assert "Section B content" in ctx.proposal_text
def test_no_documents_returns_minimal_text(self):
proposal = _make_mock_proposal()
# Parent doc exists but has no meaningful content fields
parent_doc = _make_mock_document(
semantic_id=None,
link=None,
doc_metadata=None,
)
parent_doc.primary_owners = None
parent_doc.secondary_owners = None
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[],
)
ctx = get_proposal_context(proposal.id, db)
# No meaningful content — may contain structural headers but no real data
assert "NIH" not in ctx.proposal_text
assert ctx.budget_text == ""
assert ctx.foa_text == ""
def test_no_parent_doc_still_returns_context(self):
proposal = _make_mock_proposal()
manual_doc = _make_mock_review_doc(
file_name="narrative.pdf",
document_role="SUPPORTING",
extracted_text="Manual upload content.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=None,
manual_docs=[manual_doc],
)
ctx = get_proposal_context(proposal.id, db)
assert "Manual upload content" in ctx.proposal_text
assert ctx.jira_key == ""
assert ctx.metadata == {}
def test_manual_doc_with_no_text_is_skipped(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document()
empty_doc = _make_mock_review_doc(
file_name="empty.pdf",
document_role="SUPPORTING",
extracted_text=None,
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[empty_doc],
)
ctx = get_proposal_context(proposal.id, db)
# The empty doc should not contribute to proposal_text
assert "empty.pdf" not in ctx.proposal_text

View File

@@ -0,0 +1,227 @@
"""Unit tests for the FOA fetcher engine component.
Tests cover:
- _determine_domain: opportunity ID prefix -> agency domain mapping
- fetch_foa: search flow with mocked web search provider and crawler
- Graceful failure when no web search provider is configured
- Empty / missing opportunity ID handling
"""
from unittest.mock import MagicMock
from unittest.mock import patch
from uuid import uuid4
import pytest
from onyx.server.features.proposal_review.engine.foa_fetcher import _determine_domain
from onyx.server.features.proposal_review.engine.foa_fetcher import fetch_foa
# =====================================================================
# _determine_domain -- prefix -> domain mapping
# =====================================================================
class TestDetermineDomain:
"""Tests for _determine_domain (opportunity ID prefix detection)."""
@pytest.mark.parametrize(
"opp_id, expected_domain",
[
("RFA-AI-24-001", "grants.nih.gov"),
("PA-24-123", "grants.nih.gov"),
("PAR-24-100", "grants.nih.gov"),
("R01-AI-12345", "grants.nih.gov"),
("R21-GM-67890", "grants.nih.gov"),
("U01-CA-11111", "grants.nih.gov"),
("NOT-OD-24-100", "grants.nih.gov"),
("NSF-24-567", "nsf.gov"),
("DE-FOA-0003000", "energy.gov"),
("HRSA-25-001", "hrsa.gov"),
("W911NF-24-R-0001", "grants.gov"),
("FA8750-24-S-0001", "grants.gov"),
("N00014-24-S-0001", "grants.gov"),
("NOFO-2024-001", "grants.gov"),
],
)
def test_known_prefixes(self, opp_id, expected_domain):
assert _determine_domain(opp_id) == expected_domain
def test_unknown_prefix_returns_none(self):
assert _determine_domain("UNKNOWN-123") is None
def test_purely_numeric_id_returns_grants_gov(self):
assert _determine_domain("12345-67890") == "grants.gov"
def test_case_insensitive_matching(self):
assert _determine_domain("rfa-ai-24-001") == "grants.nih.gov"
assert _determine_domain("nsf-24-567") == "nsf.gov"
def test_empty_string_returns_none(self):
# Empty string is not purely numeric after dash removal, so returns None
assert _determine_domain("") is None
# =====================================================================
# fetch_foa -- search flow
# =====================================================================
class TestFetchFoa:
"""Tests for fetch_foa with mocked dependencies."""
def _mock_db_session(self, existing_foa=None):
"""Build a mock db_session that returns existing_foa for the FOA query."""
db_session = MagicMock()
query_mock = MagicMock()
db_session.query.return_value = query_mock
query_mock.filter.return_value = query_mock
query_mock.first.return_value = existing_foa
return db_session
def test_empty_opportunity_id_returns_none(self):
db = MagicMock()
assert fetch_foa("", uuid4(), db) is None
assert fetch_foa(" ", uuid4(), db) is None
def test_none_opportunity_id_returns_none(self):
db = MagicMock()
assert fetch_foa(None, uuid4(), db) is None # type: ignore[arg-type]
def test_existing_foa_is_returned_without_search(self):
existing = MagicMock()
existing.extracted_text = "Previously fetched FOA content."
db = self._mock_db_session(existing_foa=existing)
result = fetch_foa("RFA-AI-24-001", uuid4(), db)
assert result == "Previously fetched FOA content."
def test_search_flow_fetches_and_saves(self):
"""Full happy-path: search returns a URL, crawler fetches content, doc is saved."""
# Setup: no existing FOA
db = self._mock_db_session(existing_foa=None)
# Mock the web search provider
search_result = MagicMock()
search_result.link = "https://grants.nih.gov/foa/RFA-AI-24-001"
provider = MagicMock()
provider.search.return_value = [search_result]
# Mock the crawler
content = MagicMock()
content.scrape_successful = True
content.full_content = "Full FOA text from NIH."
crawler_instance = MagicMock()
crawler_instance.contents.return_value = [content]
# The function does lazy imports, so we patch at the module level
# where the imports happen
import_target_provider = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
import_target_crawler = (
"onyx.tools.tool_implementations.open_url.onyx_web_crawler.OnyxWebCrawler"
)
with (
patch(import_target_provider, return_value=provider),
patch(import_target_crawler, return_value=crawler_instance),
):
result = fetch_foa("RFA-AI-24-001", uuid4(), db)
assert result == "Full FOA text from NIH."
db.add.assert_called_once()
db.flush.assert_called_once()
def test_no_provider_configured_returns_none(self):
"""If get_default_provider raises or returns None, fetch_foa returns None."""
db = self._mock_db_session(existing_foa=None)
import_target = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
with patch(import_target, return_value=None):
result = fetch_foa("RFA-AI-24-001", uuid4(), db)
assert result is None
def test_provider_import_failure_returns_none(self):
"""If the web search provider module can't be imported, returns None."""
db = self._mock_db_session(existing_foa=None)
import_target = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
with patch(import_target, side_effect=ImportError("module not found")):
result = fetch_foa("RFA-AI-24-001", uuid4(), db)
assert result is None
def test_search_returns_no_results(self):
"""If the search returns an empty list, fetch_foa returns None."""
db = self._mock_db_session(existing_foa=None)
provider = MagicMock()
provider.search.return_value = []
import_target = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
with patch(import_target, return_value=provider):
result = fetch_foa("NSF-24-567", uuid4(), db)
assert result is None
def test_crawler_failure_returns_none(self):
"""If the crawler raises an exception, fetch_foa returns None."""
db = self._mock_db_session(existing_foa=None)
search_result = MagicMock()
search_result.link = "https://nsf.gov/foa/24-567"
provider = MagicMock()
provider.search.return_value = [search_result]
import_target_provider = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
import_target_crawler = (
"onyx.tools.tool_implementations.open_url.onyx_web_crawler.OnyxWebCrawler"
)
with (
patch(import_target_provider, return_value=provider),
patch(import_target_crawler, side_effect=Exception("crawl failed")),
):
result = fetch_foa("NSF-24-567", uuid4(), db)
assert result is None
def test_scrape_unsuccessful_returns_none(self):
"""If the crawler succeeds but scrape_successful is False, returns None."""
db = self._mock_db_session(existing_foa=None)
search_result = MagicMock()
search_result.link = "https://nsf.gov/foa/24-567"
provider = MagicMock()
provider.search.return_value = [search_result]
content = MagicMock()
content.scrape_successful = False
content.full_content = None
crawler_instance = MagicMock()
crawler_instance.contents.return_value = [content]
import_target_provider = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
import_target_crawler = (
"onyx.tools.tool_implementations.open_url.onyx_web_crawler.OnyxWebCrawler"
)
with (
patch(import_target_provider, return_value=provider),
patch(import_target_crawler, return_value=crawler_instance),
):
result = fetch_foa("NSF-24-567", uuid4(), db)
assert result is None

View File

@@ -0,0 +1,354 @@
"""Unit tests for the rule evaluator engine component.
Tests cover:
- Template variable substitution (_fill_template)
- LLM response parsing (_parse_llm_response)
- Malformed / missing-field responses
- Markdown code fence stripping
- Verdict and confidence validation/normalization
- Token usage extraction
"""
import json
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from onyx.server.features.proposal_review.engine.rule_evaluator import (
_extract_token_usage,
)
from onyx.server.features.proposal_review.engine.rule_evaluator import _fill_template
from onyx.server.features.proposal_review.engine.rule_evaluator import (
_parse_llm_response,
)
from onyx.server.features.proposal_review.engine.rule_evaluator import evaluate_rule
# =====================================================================
# _fill_template -- variable substitution
# =====================================================================
class TestFillTemplate:
"""Tests for _fill_template (prompt variable substitution)."""
def test_replaces_proposal_text(self, make_proposal_context):
ctx = make_proposal_context(proposal_text="My great proposal.")
result = _fill_template("Review: {{proposal_text}}", ctx)
assert result == "Review: My great proposal."
def test_replaces_budget_text(self, make_proposal_context):
ctx = make_proposal_context(budget_text="$100k total")
result = _fill_template("Budget info: {{budget_text}}", ctx)
assert result == "Budget info: $100k total"
def test_replaces_foa_text(self, make_proposal_context):
ctx = make_proposal_context(foa_text="NSF solicitation 24-567")
result = _fill_template("FOA: {{foa_text}}", ctx)
assert result == "FOA: NSF solicitation 24-567"
def test_replaces_jira_key(self, make_proposal_context):
ctx = make_proposal_context(jira_key="PROJ-42")
result = _fill_template("Ticket: {{jira_key}}", ctx)
assert result == "Ticket: PROJ-42"
def test_replaces_metadata_as_json(self, make_proposal_context):
ctx = make_proposal_context(metadata={"sponsor": "NIH", "pi": "Dr. Smith"})
result = _fill_template("Meta: {{metadata}}", ctx)
# Should be valid JSON
parsed = json.loads(result.replace("Meta: ", ""))
assert parsed["sponsor"] == "NIH"
assert parsed["pi"] == "Dr. Smith"
def test_replaces_metadata_dot_field(self, make_proposal_context):
ctx = make_proposal_context(
metadata={"sponsor": "NIH", "deadline": "2025-01-15"}
)
result = _fill_template(
"Sponsor is {{metadata.sponsor}}, due {{metadata.deadline}}", ctx
)
assert result == "Sponsor is NIH, due 2025-01-15"
def test_metadata_dot_field_with_dict_value(self, make_proposal_context):
ctx = make_proposal_context(
metadata={"budget_detail": {"direct": 100, "indirect": 50}}
)
result = _fill_template("Details: {{metadata.budget_detail}}", ctx)
parsed = json.loads(result.replace("Details: ", ""))
assert parsed == {"direct": 100, "indirect": 50}
def test_metadata_dot_field_missing_returns_empty(self, make_proposal_context):
ctx = make_proposal_context(metadata={"sponsor": "NIH"})
result = _fill_template("Agency: {{metadata.agency}}", ctx)
assert result == "Agency: "
def test_replaces_all_placeholders_in_one_template(self, make_proposal_context):
ctx = make_proposal_context(
proposal_text="proposal body",
budget_text="budget body",
foa_text="foa body",
jira_key="PROJ-99",
metadata={"sponsor": "NSF"},
)
template = (
"{{jira_key}}: {{proposal_text}} | "
"Budget: {{budget_text}} | FOA: {{foa_text}} | "
"Sponsor: {{metadata.sponsor}} | All: {{metadata}}"
)
result = _fill_template(template, ctx)
assert "PROJ-99" in result
assert "proposal body" in result
assert "budget body" in result
assert "foa body" in result
assert "NSF" in result
def test_none_values_replaced_with_empty_string(self, make_proposal_context):
ctx = make_proposal_context(
proposal_text=None, # type: ignore[arg-type]
budget_text=None, # type: ignore[arg-type]
foa_text=None, # type: ignore[arg-type]
jira_key=None, # type: ignore[arg-type]
)
result = _fill_template(
"{{proposal_text}}|{{budget_text}}|{{foa_text}}|{{jira_key}}", ctx
)
assert result == "|||"
# =====================================================================
# _parse_llm_response -- JSON parsing and validation
# =====================================================================
class TestParseLLMResponse:
"""Tests for _parse_llm_response (JSON parsing / verdict validation)."""
def test_parses_well_formed_json(self, well_formed_llm_json):
result = _parse_llm_response(well_formed_llm_json)
assert result["verdict"] == "PASS"
assert result["confidence"] == "HIGH"
assert result["evidence"] == "Section 4.2 states the budget is $500k."
assert result["explanation"] == "The proposal meets the budget cap requirement."
assert result["suggested_action"] is None
def test_strips_markdown_json_fence(self):
inner = json.dumps(
{
"verdict": "FAIL",
"confidence": "MEDIUM",
"evidence": "x",
"explanation": "y",
"suggested_action": "Fix it.",
}
)
raw = f"```json\n{inner}\n```"
result = _parse_llm_response(raw)
assert result["verdict"] == "FAIL"
assert result["confidence"] == "MEDIUM"
assert result["suggested_action"] == "Fix it."
def test_strips_bare_code_fence(self):
inner = json.dumps(
{
"verdict": "FLAG",
"confidence": "LOW",
"evidence": "e",
"explanation": "exp",
"suggested_action": None,
}
)
raw = f"```\n{inner}\n```"
result = _parse_llm_response(raw)
assert result["verdict"] == "FLAG"
def test_malformed_json_returns_needs_review(self):
result = _parse_llm_response("this is not json at all")
assert result["verdict"] == "NEEDS_REVIEW"
assert result["confidence"] == "LOW"
assert result["evidence"] is None
assert "Failed to parse" in result["explanation"]
assert result["suggested_action"] is not None
def test_invalid_verdict_normalised_to_needs_review(self):
raw = json.dumps(
{
"verdict": "MAYBE",
"confidence": "HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["verdict"] == "NEEDS_REVIEW"
def test_invalid_confidence_normalised_to_low(self):
raw = json.dumps(
{
"verdict": "PASS",
"confidence": "VERY_HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["confidence"] == "LOW"
def test_missing_verdict_defaults_to_needs_review(self):
raw = json.dumps(
{
"confidence": "HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["verdict"] == "NEEDS_REVIEW"
def test_missing_confidence_defaults_to_low(self):
raw = json.dumps(
{
"verdict": "PASS",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["confidence"] == "LOW"
@pytest.mark.parametrize(
"verdict", ["PASS", "FAIL", "FLAG", "NEEDS_REVIEW", "NOT_APPLICABLE"]
)
def test_all_valid_verdicts_accepted(self, verdict):
raw = json.dumps(
{
"verdict": verdict,
"confidence": "HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["verdict"] == verdict
def test_verdict_case_insensitive(self):
raw = json.dumps(
{
"verdict": "pass",
"confidence": "high",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["verdict"] == "PASS"
assert result["confidence"] == "HIGH"
def test_whitespace_around_json_is_tolerated(self):
inner = json.dumps(
{
"verdict": "PASS",
"confidence": "HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
raw = f" \n {inner} \n "
result = _parse_llm_response(raw)
assert result["verdict"] == "PASS"
# =====================================================================
# _extract_token_usage
# =====================================================================
class TestExtractTokenUsage:
"""Tests for _extract_token_usage (best-effort token extraction)."""
def test_extracts_total_tokens(self):
response = MagicMock()
response.usage.total_tokens = 1234
assert _extract_token_usage(response) == 1234
def test_sums_prompt_and_completion_tokens_when_no_total(self):
response = MagicMock()
response.usage.total_tokens = None
response.usage.prompt_tokens = 100
response.usage.completion_tokens = 50
assert _extract_token_usage(response) == 150
def test_returns_none_when_no_usage(self):
response = MagicMock(spec=[]) # no usage attr
assert _extract_token_usage(response) is None
def test_returns_none_when_usage_is_none(self):
response = MagicMock()
response.usage = None
assert _extract_token_usage(response) is None
# =====================================================================
# evaluate_rule -- integration of template + LLM call + parsing
# =====================================================================
class TestEvaluateRule:
"""Tests for the top-level evaluate_rule function with mocked LLM."""
@patch("onyx.server.features.proposal_review.engine.rule_evaluator.get_default_llm")
@patch(
"onyx.server.features.proposal_review.engine.rule_evaluator.llm_response_to_string"
)
def test_successful_evaluation(
self, mock_to_string, mock_get_llm, make_rule, make_proposal_context
):
llm_response_json = json.dumps(
{
"verdict": "PASS",
"confidence": "HIGH",
"evidence": "Found in section 3.",
"explanation": "Meets requirement.",
"suggested_action": None,
}
)
mock_to_string.return_value = llm_response_json
mock_llm = MagicMock()
mock_llm.config.model_name = "gpt-4o"
mock_llm.invoke.return_value = MagicMock(usage=MagicMock(total_tokens=500))
mock_get_llm.return_value = mock_llm
rule = make_rule(prompt_template="Check: {{proposal_text}}")
ctx = make_proposal_context(proposal_text="Grant text here.")
result = evaluate_rule(rule, ctx)
assert result["verdict"] == "PASS"
assert result["confidence"] == "HIGH"
assert result["llm_model"] == "gpt-4o"
assert result["llm_tokens_used"] == 500
@patch("onyx.server.features.proposal_review.engine.rule_evaluator.get_default_llm")
def test_llm_failure_returns_needs_review(
self, mock_get_llm, make_rule, make_proposal_context
):
mock_get_llm.side_effect = RuntimeError("API key expired")
rule = make_rule()
ctx = make_proposal_context()
result = evaluate_rule(rule, ctx)
assert result["verdict"] == "NEEDS_REVIEW"
assert result["confidence"] == "LOW"
assert "LLM evaluation failed" in result["explanation"]
assert result["llm_model"] is None
assert result["llm_tokens_used"] is None

View File

@@ -145,6 +145,7 @@ export function Table<TData>(props: DataTableProps<TData>) {
pageSize,
initialSorting,
initialColumnVisibility,
onColumnVisibilityChange,
initialRowSelection,
initialViewSelected,
draggable,
@@ -223,6 +224,7 @@ export function Table<TData>(props: DataTableProps<TData>) {
pageSize: effectivePageSize,
initialSorting,
initialColumnVisibility,
onColumnVisibilityChange,
initialRowSelection,
initialViewSelected,
getRowId,

View File

@@ -1,7 +1,7 @@
"use client";
"use no memo";
import { useState, useEffect, useMemo, useRef } from "react";
import { useState, useEffect, useMemo, useRef, useCallback } from "react";
import {
useReactTable,
getCoreRowModel,
@@ -103,6 +103,8 @@ interface UseDataTableOptions<TData extends RowData> {
initialSorting?: SortingState;
/** Initial column visibility state. @default {} */
initialColumnVisibility?: VisibilityState;
/** Called when column visibility changes. */
onColumnVisibilityChange?: (visibility: VisibilityState) => void;
/** Initial row selection state. Keys are row IDs (from `getRowId`), values are `true`. @default {} */
initialRowSelection?: RowSelectionState;
/** When true AND `initialRowSelection` is non-empty, start in view-selected mode (filtered to selected rows). @default false */
@@ -199,6 +201,7 @@ export default function useDataTable<TData extends RowData>(
columnResizeMode = "onChange",
initialSorting = [],
initialColumnVisibility = {},
onColumnVisibilityChange: onColumnVisibilityChangeProp,
initialRowSelection = {},
initialViewSelected = false,
getRowId,
@@ -215,9 +218,19 @@ export default function useDataTable<TData extends RowData>(
const [rowSelection, setRowSelection] =
useState<RowSelectionState>(initialRowSelection);
const [columnSizing, setColumnSizing] = useState<ColumnSizingState>({});
const [columnVisibility, setColumnVisibility] = useState<VisibilityState>(
const [columnVisibility, setColumnVisibilityRaw] = useState<VisibilityState>(
initialColumnVisibility
);
const setColumnVisibility: typeof setColumnVisibilityRaw = useCallback(
(updater) => {
setColumnVisibilityRaw((prev) => {
const next = typeof updater === "function" ? updater(prev) : updater;
onColumnVisibilityChangeProp?.(next);
return next;
});
},
[onColumnVisibilityChangeProp]
);
const [pagination, setPagination] = useState<PaginationState>({
pageIndex: 0,
pageSize: pageSizeOption,

View File

@@ -146,6 +146,8 @@ export interface DataTableProps<TData> {
initialSorting?: SortingState;
/** Initial column visibility state. */
initialColumnVisibility?: VisibilityState;
/** Called when column visibility changes. Receives the full visibility state. */
onColumnVisibilityChange?: (visibility: VisibilityState) => void;
/** Initial row selection state. Keys are row IDs (from `getRowId`), values are `true`. */
initialRowSelection?: Record<string, boolean>;
/** When true AND `initialRowSelection` is non-empty, start in view-selected mode. @default false */

View File

@@ -7,7 +7,7 @@ import { cn } from "@opal/utils";
// Types
// ---------------------------------------------------------------------------
type TagColor = "green" | "purple" | "blue" | "gray" | "amber";
type TagColor = "green" | "purple" | "blue" | "gray" | "amber" | "red";
type TagSize = "sm" | "md";
@@ -34,6 +34,7 @@ const COLOR_CONFIG: Record<TagColor, { bg: string; text: string }> = {
blue: { bg: "bg-theme-blue-01", text: "text-theme-blue-05" },
purple: { bg: "bg-theme-purple-01", text: "text-theme-purple-05" },
amber: { bg: "bg-theme-amber-01", text: "text-theme-amber-05" },
red: { bg: "bg-theme-red-01", text: "text-theme-red-05" },
gray: { bg: "bg-background-tint-02", text: "text-text-03" },
};

View File

@@ -0,0 +1,68 @@
"use client";
import React, { useRef } from "react";
import { SvgUploadCloud } from "@opal/icons";
import { Button } from "@opal/components";
import { IllustrationContent } from "@opal/layouts";
import Modal from "@/refresh-components/Modal";
interface ImportFlowProps {
open: boolean;
onClose: () => void;
onFileSelected: (file: File) => void;
}
function ImportFlow({ open, onClose, onFileSelected }: ImportFlowProps) {
const fileInputRef = useRef<HTMLInputElement>(null);
function handleFileChange(e: React.ChangeEvent<HTMLInputElement>) {
const file = e.target.files?.[0];
if (!file) return;
if (fileInputRef.current) {
fileInputRef.current.value = "";
}
onFileSelected(file);
onClose();
}
if (!open) return null;
return (
<Modal open onOpenChange={(isOpen) => !isOpen && onClose()}>
<Modal.Content width="sm" height="fit">
<Modal.Header
icon={SvgUploadCloud}
title="Import from Checklist"
description="Upload a checklist document to generate rules automatically."
onClose={onClose}
/>
<Modal.Body>
<IllustrationContent
illustration={SvgUploadCloud}
title="Upload a checklist document (.xlsx, .docx, or .pdf)"
description="The document will be analyzed and rules will be added as inactive drafts."
/>
<input
ref={fileInputRef}
type="file"
accept=".xlsx,.docx,.pdf,.txt,.md"
onChange={handleFileChange}
className="hidden"
/>
<div className="flex justify-center w-full">
<Button
icon={SvgUploadCloud}
onClick={() => fileInputRef.current?.click()}
>
Choose File
</Button>
</div>
</Modal.Body>
</Modal.Content>
</Modal>
);
}
export default ImportFlow;

View File

@@ -0,0 +1,191 @@
"use client";
import { useRef, useState } from "react";
import { Button, Text } from "@opal/components";
import { SvgEdit, SvgPaperclip, SvgX } from "@opal/icons";
import Modal from "@/refresh-components/Modal";
import InputTextArea from "@/refresh-components/inputs/InputTextArea";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import { Section } from "@/layouts/general-layouts";
import { toast } from "@/hooks/useToast";
import type { RuleResponse } from "@/app/admin/proposal-review/interfaces";
interface RefinementModalProps {
open: boolean;
onClose: () => void;
rule: RuleResponse | null;
onRefined: () => void;
}
function RefinementModal({
open,
onClose,
rule,
onRefined,
}: RefinementModalProps) {
const [answer, setAnswer] = useState("");
const [file, setFile] = useState<File | null>(null);
const [loading, setLoading] = useState(false);
const fileInputRef = useRef<HTMLInputElement>(null);
function handleClose() {
setAnswer("");
setFile(null);
setLoading(false);
onClose();
}
async function handleSubmit() {
if (!rule || !answer.trim()) return;
setLoading(true);
try {
const formData = new FormData();
formData.append("answer", answer.trim());
if (file) {
formData.append("file", file);
}
const res = await fetch(`/api/proposal-review/rules/${rule.id}/refine`, {
method: "POST",
body: formData,
});
if (!res.ok) {
const err = await res.json().catch(() => ({}));
throw new Error(err.detail || "Refinement failed");
}
toast.success("Rule refined successfully.");
onRefined();
handleClose();
} catch (err) {
toast.error(err instanceof Error ? err.message : "Failed to refine rule");
} finally {
setLoading(false);
}
}
if (!open || !rule) return null;
return (
<Modal open onOpenChange={(isOpen) => !isOpen && handleClose()}>
<Modal.Content
width="sm"
height="lg"
onPointerDownOutside={(e) => e.preventDefault()}
onEscapeKeyDown={(e) => e.preventDefault()}
>
<Modal.Header
icon={SvgEdit}
title="Refine Rule"
onClose={handleClose}
/>
<Modal.Body>
{loading ? (
<Section alignItems="center" gap={1}>
<SimpleLoader />
<Text font="main-ui-body" color="text-03">
Refining rule...
</Text>
</Section>
) : (
<Section alignItems="start" gap={1}>
<Section alignItems="start" gap={0.25}>
<Text font="main-ui-action" color="text-04">
Rule
</Text>
<div className="w-full rounded-08 bg-background-neutral-02 p-3 flex flex-col gap-1">
<Text font="main-ui-action" color="text-05">
{rule.name}
</Text>
{rule.description && (
<Text font="secondary-body" color="text-03">
{rule.description}
</Text>
)}
</div>
</Section>
<Section alignItems="start" gap={0.25}>
<Text font="main-ui-action" color="text-04">
Question from the AI
</Text>
<div className="w-full rounded-08 bg-status-warning-01 p-3">
<Text font="main-ui-body" color="text-05">
{rule.refinement_question ?? undefined}
</Text>
</div>
</Section>
<Section alignItems="start" gap={0.25}>
<Text font="main-ui-action" color="text-04">
Your Answer
</Text>
<Text font="secondary-body" color="text-03">
Provide the institution-specific information requested above.
The AI will use your answer to refine the rule.
</Text>
<InputTextArea
value={answer}
onChange={(e) => setAnswer(e.target.value)}
placeholder="Enter your answer..."
rows={5}
/>
<input
ref={fileInputRef}
type="file"
accept=".pdf,.docx,.doc,.txt,.md,.rtf"
onChange={(e) => {
const selected = e.target.files?.[0] ?? null;
setFile(selected);
e.target.value = "";
}}
className="hidden"
/>
{file ? (
<div className="flex items-center gap-2 w-full rounded-08 bg-background-neutral-02 px-3 py-2">
<SvgPaperclip className="size-4 shrink-0 text-text-03" />
<div className="truncate flex-1">
<Text font="secondary-body" color="text-04">
{file.name}
</Text>
</div>
<Button
icon={SvgX}
prominence="tertiary"
size="2xs"
onClick={() => setFile(null)}
tooltip="Remove file"
/>
</div>
) : (
<Button
icon={SvgPaperclip}
prominence="tertiary"
size="sm"
onClick={() => fileInputRef.current?.click()}
>
Attach file
</Button>
)}
</Section>
</Section>
)}
</Modal.Body>
<Modal.Footer>
<Button
prominence="secondary"
onClick={handleClose}
disabled={loading}
>
Cancel
</Button>
<Button onClick={handleSubmit} disabled={loading || !answer.trim()}>
{loading ? "Refining..." : "Submit Answer"}
</Button>
</Modal.Footer>
</Modal.Content>
</Modal>
);
}
export default RefinementModal;

View File

@@ -0,0 +1,322 @@
"use client";
import { Form, Formik } from "formik";
import { Button, Text } from "@opal/components";
import { SvgEdit, SvgPlus } from "@opal/icons";
import InputTypeIn from "@/refresh-components/inputs/InputTypeIn";
import InputTextArea from "@/refresh-components/inputs/InputTextArea";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { toast } from "@/hooks/useToast";
import { FormikField } from "@/refresh-components/form/FormikField";
import Modal from "@/refresh-components/Modal";
import { InputVertical } from "@opal/layouts";
import type {
RuleResponse,
RuleCreate,
RuleUpdate,
RuleType,
RuleIntent,
RuleAuthority,
} from "@/app/admin/proposal-review/interfaces";
import {
RULE_TYPE_LABELS,
RULE_INTENT_LABELS,
RULE_AUTHORITY_LABELS,
} from "@/app/admin/proposal-review/interfaces";
interface RuleEditorProps {
open: boolean;
onClose: () => void;
onSave: (rule: RuleCreate | RuleUpdate) => Promise<void>;
existingRule?: RuleResponse | null;
}
interface RuleFormValues {
name: string;
description: string;
category: string;
rule_type: RuleType;
rule_intent: RuleIntent;
authority: RuleAuthority | "none";
is_hard_stop: string;
prompt_template: string;
}
function RuleEditor({ open, onClose, onSave, existingRule }: RuleEditorProps) {
if (!open) return null;
const initialValues: RuleFormValues = existingRule
? {
name: existingRule.name,
description: existingRule.description || "",
category: existingRule.category || "",
rule_type: existingRule.rule_type,
rule_intent: existingRule.rule_intent,
authority: existingRule.authority || "none",
is_hard_stop: existingRule.is_hard_stop ? "yes" : "no",
prompt_template: existingRule.prompt_template,
}
: {
name: "",
description: "",
category: "",
rule_type: "DOCUMENT_CHECK" as RuleType,
rule_intent: "CHECK" as RuleIntent,
authority: "none" as const,
is_hard_stop: "no",
prompt_template: "",
};
return (
<Modal open onOpenChange={(isOpen) => !isOpen && onClose()}>
<Modal.Content
width="md"
height="lg"
onPointerDownOutside={(e) => e.preventDefault()}
onEscapeKeyDown={(e) => e.preventDefault()}
>
<Modal.Header
icon={existingRule ? SvgEdit : SvgPlus}
title={existingRule ? "Edit Rule" : "Add Rule"}
description={
existingRule
? "Update the rule configuration."
: "Define a new rule for this ruleset."
}
onClose={onClose}
/>
<Formik
initialValues={initialValues}
onSubmit={async (values, { setSubmitting }) => {
setSubmitting(true);
try {
const ruleData = {
name: values.name.trim(),
description: values.description.trim() || undefined,
category: values.category.trim() || undefined,
rule_type: values.rule_type,
rule_intent: values.rule_intent,
prompt_template: values.prompt_template,
authority:
values.authority === "none"
? null
: (values.authority as RuleAuthority),
is_hard_stop: values.is_hard_stop === "yes",
};
await onSave(ruleData);
onClose();
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to save rule"
);
} finally {
setSubmitting(false);
}
}}
>
{({ isSubmitting, values }) => (
<Form className="w-full">
<Modal.Body>
<InputVertical title="Name" withLabel="name">
<FormikField<string>
name="name"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="Rule name"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</InputVertical>
<InputVertical title="Description" withLabel="description">
<FormikField<string>
name="description"
render={(field, helper) => (
<InputTextArea
value={field.value}
onChange={(e) => helper.setValue(e.target.value)}
placeholder="Brief description of what this rule checks"
rows={3}
/>
)}
/>
</InputVertical>
<InputVertical title="Category" withLabel="category">
<FormikField<string>
name="category"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="e.g., IR-2: Regulatory Compliance"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</InputVertical>
<div className="flex gap-4 w-full">
<div className="flex-1 min-w-0">
<InputVertical title="Rule Type" withLabel="rule_type">
<FormikField<string>
name="rule_type"
render={(field, helper) => (
<InputSelect
value={field.value}
onValueChange={(v) => helper.setValue(v)}
>
<InputSelect.Trigger placeholder="Select type" />
<InputSelect.Content>
{Object.entries(RULE_TYPE_LABELS).map(
([key, label]) => (
<InputSelect.Item key={key} value={key}>
{label}
</InputSelect.Item>
)
)}
</InputSelect.Content>
</InputSelect>
)}
/>
</InputVertical>
</div>
<div className="flex-1">
<InputVertical title="Intent" withLabel="rule_intent">
<FormikField<string>
name="rule_intent"
render={(field, helper) => (
<InputSelect
value={field.value}
onValueChange={(v) => helper.setValue(v)}
>
<InputSelect.Trigger placeholder="Select intent" />
<InputSelect.Content>
{Object.entries(RULE_INTENT_LABELS).map(
([key, label]) => (
<InputSelect.Item key={key} value={key}>
{label}
</InputSelect.Item>
)
)}
</InputSelect.Content>
</InputSelect>
)}
/>
</InputVertical>
</div>
</div>
<div className="flex gap-4 w-full">
<div className="flex-1 min-w-0">
<InputVertical title="Authority" withLabel="authority">
<FormikField<string>
name="authority"
render={(field, helper) => (
<InputSelect
value={field.value}
onValueChange={(v) => helper.setValue(v)}
>
<InputSelect.Trigger placeholder="Select authority" />
<InputSelect.Content>
<InputSelect.Item value="none">
None
</InputSelect.Item>
{Object.entries(RULE_AUTHORITY_LABELS).map(
([key, label]) => (
<InputSelect.Item key={key} value={key}>
{label}
</InputSelect.Item>
)
)}
</InputSelect.Content>
</InputSelect>
)}
/>
</InputVertical>
</div>
<div className="flex-1">
<InputVertical title="Hard Stop" withLabel="is_hard_stop">
<FormikField<string>
name="is_hard_stop"
render={(field, helper) => (
<InputSelect
value={field.value}
onValueChange={(v) => helper.setValue(v)}
>
<InputSelect.Trigger />
<InputSelect.Content>
<InputSelect.Item value="no">No</InputSelect.Item>
<InputSelect.Item value="yes">
Yes - Fail stops entire review
</InputSelect.Item>
</InputSelect.Content>
</InputSelect>
)}
/>
</InputVertical>
</div>
</div>
<InputVertical
title="Prompt Template"
withLabel="prompt_template"
>
<Text font="secondary-body" color="text-04">
{
"Available variables: {{proposal_text}}, {{metadata}}, {{foa_text}}"
}
</Text>
<FormikField<string>
name="prompt_template"
render={(field, helper) => (
<InputTextArea
value={field.value}
onChange={(e) => helper.setValue(e.target.value)}
placeholder="Enter the LLM prompt template for evaluating this rule..."
rows={8}
/>
)}
/>
</InputVertical>
</Modal.Body>
<Modal.Footer>
<Button
prominence="secondary"
type="button"
onClick={onClose}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
type="submit"
disabled={
isSubmitting ||
!values.name.trim() ||
!values.prompt_template.trim()
}
>
{isSubmitting
? "Saving..."
: existingRule
? "Update Rule"
: "Add Rule"}
</Button>
</Modal.Footer>
</Form>
)}
</Formik>
</Modal.Content>
</Modal>
);
}
export default RuleEditor;

View File

@@ -0,0 +1,216 @@
"use client";
import { useState } from "react";
import { Button, Text, Tag } from "@opal/components";
import { SvgPlayCircle } from "@opal/icons";
import Modal from "@/refresh-components/Modal";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import { Section } from "@/layouts/general-layouts";
import type { TagColor } from "@opal/components";
type Verdict = "PASS" | "FAIL" | "FLAG" | "NEEDS_REVIEW" | "NOT_APPLICABLE";
type Confidence = "HIGH" | "MEDIUM" | "LOW";
interface RuleTestResult {
verdict: Verdict;
confidence: Confidence;
evidence: string | null;
explanation: string;
suggested_action: string | null;
llm_model: string;
llm_tokens_used: number;
}
interface RuleTestResponse {
rule_id: string;
success: boolean;
result?: RuleTestResult;
error?: string;
}
interface RuleTestModalProps {
open: boolean;
onClose: () => void;
rule: { id: string; name: string } | null;
}
const VERDICT_COLOR: Record<Verdict, TagColor> = {
PASS: "green",
FAIL: "purple",
FLAG: "amber",
NEEDS_REVIEW: "blue",
NOT_APPLICABLE: "gray",
};
const VERDICT_LABEL: Record<Verdict, string> = {
PASS: "Pass",
FAIL: "Fail",
FLAG: "Flag",
NEEDS_REVIEW: "Needs Review",
NOT_APPLICABLE: "Not Applicable",
};
const CONFIDENCE_COLOR: Record<Confidence, TagColor> = {
HIGH: "green",
MEDIUM: "amber",
LOW: "gray",
};
function RuleTestModal({ open, onClose, rule }: RuleTestModalProps) {
const [loading, setLoading] = useState(false);
const [response, setResponse] = useState<RuleTestResponse | null>(null);
async function runTest() {
if (!rule) return;
setLoading(true);
setResponse(null);
try {
const res = await fetch(`/api/proposal-review/rules/${rule.id}/test`, {
method: "POST",
});
const data: RuleTestResponse = await res.json();
setResponse(data);
} catch {
setResponse({
rule_id: rule.id,
success: false,
error: "Network error. Please try again.",
});
} finally {
setLoading(false);
}
}
function handleClose() {
setResponse(null);
setLoading(false);
onClose();
}
if (!open || !rule) return null;
return (
<Modal open onOpenChange={(isOpen) => !isOpen && handleClose()}>
<Modal.Content width="sm" height="lg" preventAccidentalClose={false}>
<Modal.Header
icon={SvgPlayCircle}
title={`Test Rule: ${rule.name}`}
description="Tests the rule against a minimal sample context. Results are illustrative only."
onClose={handleClose}
/>
<Modal.Body>
{!loading && !response && (
<Section alignItems="center" gap={1}>
<Text font="main-ui-body" color="text-03">
Click the button below to evaluate this rule against a minimal
sample context.
</Text>
<Button icon={SvgPlayCircle} onClick={runTest}>
Run Test
</Button>
</Section>
)}
{loading && (
<Section alignItems="center" gap={1}>
<SimpleLoader />
<Text font="main-ui-body" color="text-03">
Running test...
</Text>
</Section>
)}
{response && !response.success && (
<Section alignItems="start" gap={0.5}>
<Text font="main-ui-action" color="text-04">
Error
</Text>
<div className="w-full rounded-08 bg-status-error-01 p-3">
<Text font="main-ui-body" color="text-05">
{response.error || "An unknown error occurred."}
</Text>
</div>
</Section>
)}
{response && response.success && response.result && (
<Section alignItems="start" gap={1}>
{/* Verdict and Confidence */}
<div className="flex items-center gap-2">
<Tag
title={VERDICT_LABEL[response.result.verdict]}
color={VERDICT_COLOR[response.result.verdict]}
size="md"
/>
<Tag
title={response.result.confidence}
color={CONFIDENCE_COLOR[response.result.confidence]}
/>
</div>
{/* Evidence */}
{response.result.evidence && (
<Section alignItems="start" gap={0.25}>
<Text font="main-ui-action" color="text-04">
Evidence
</Text>
<div className="w-full rounded-08 bg-background-neutral-02 p-3 border-l-2 border-border-03">
<Text font="secondary-body" color="text-03" as="p">
{response.result.evidence}
</Text>
</div>
</Section>
)}
{/* Explanation */}
<Section alignItems="start" gap={0.25}>
<Text font="main-ui-action" color="text-04">
Explanation
</Text>
<Text font="main-ui-body" color="text-03" as="p">
{response.result.explanation}
</Text>
</Section>
{/* Suggested Action */}
{response.result.suggested_action && (
<Section alignItems="start" gap={0.25}>
<Text font="main-ui-action" color="text-04">
Suggested Action
</Text>
<Text font="main-ui-body" color="text-03" as="p">
{response.result.suggested_action}
</Text>
</Section>
)}
{/* LLM info footer */}
<div className="flex items-center gap-3 pt-2 w-full">
<Text font="secondary-body" color="text-02">
{`Model: ${response.result.llm_model}`}
</Text>
<Text font="secondary-body" color="text-02">
{`Tokens: ${response.result.llm_tokens_used.toLocaleString()}`}
</Text>
</div>
</Section>
)}
</Modal.Body>
<Modal.Footer>
<Button prominence="secondary" onClick={handleClose}>
Close
</Button>
{response && (
<Button icon={SvgPlayCircle} onClick={runTest} disabled={loading}>
Run Again
</Button>
)}
</Modal.Footer>
</Modal.Content>
</Modal>
);
}
export default RuleTestModal;

View File

@@ -0,0 +1,471 @@
"use client";
import { useState, useEffect, useMemo } from "react";
import useSWR from "swr";
import { Text } from "@opal/components";
import { Button } from "@opal/components/buttons/button/components";
import Checkbox from "@/refresh-components/inputs/Checkbox";
import InputComboBox from "@/refresh-components/inputs/InputComboBox";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import InputTypeIn from "@/refresh-components/inputs/InputTypeIn";
import Separator from "@/refresh-components/Separator";
import { toast } from "@/hooks/useToast";
import { errorHandlingFetcher } from "@/lib/fetcher";
import { SvgPlus, SvgTrash } from "@opal/icons";
import { Section } from "@/layouts/general-layouts";
import { Content } from "@opal/layouts";
import { useAdminLLMProviders } from "@/hooks/useLLMProviders";
import type {
ConfigResponse,
ConfigUpdate,
JiraConnectorInfo,
} from "@/app/admin/proposal-review/interfaces";
const CONNECTORS_URL = "/api/proposal-review/jira-connectors";
const SYSTEM_DEFAULT_MODEL = "__system_default__";
interface SettingsFormProps {
config: ConfigResponse;
onSave: (update: ConfigUpdate) => Promise<void>;
onCancel: () => void;
}
function SettingsForm({ config, onSave, onCancel }: SettingsFormProps) {
const [connectorId, setConnectorId] = useState<number | null>(
config.jira_connector_id
);
const [visibleFields, setVisibleFields] = useState<string[]>(
config.field_mapping ?? []
);
const [jiraWriteback, setJiraWriteback] = useState<Record<string, string>>(
(config.jira_writeback as Record<string, string>) || {}
);
const [reviewModel, setReviewModel] = useState<string | null>(
config.review_model
);
const [importModel, setImportModel] = useState<string | null>(
config.import_model
);
const [saving, setSaving] = useState(false);
const [fieldSearch, setFieldSearch] = useState("");
// Writeback add-row state
const [newWritebackKey, setNewWritebackKey] = useState("");
const [newWritebackField, setNewWritebackField] = useState("");
useEffect(() => {
setConnectorId(config.jira_connector_id);
setVisibleFields(config.field_mapping ?? []);
setJiraWriteback((config.jira_writeback as Record<string, string>) || {});
setReviewModel(config.review_model);
setImportModel(config.import_model);
}, [config]);
// Fetch configured LLM providers for model selection
const { llmProviders, isLoading: llmLoading } = useAdminLLMProviders();
const modelOptions = useMemo(() => {
if (!llmProviders) return [];
const options: { value: string; label: string }[] = [];
for (const provider of llmProviders) {
for (const model of provider.model_configurations) {
if (!model.is_visible) continue;
const displayName = model.display_name || model.name;
const label = `${provider.name} / ${displayName}`;
options.push({ value: model.name, label });
}
}
return options;
}, [llmProviders]);
// Fetch available Jira connectors
const { data: connectors, isLoading: connectorsLoading } = useSWR<
JiraConnectorInfo[]
>(CONNECTORS_URL, errorHandlingFetcher);
// Fetch metadata keys from indexed documents for the selected connector
const { data: metadataKeys, isLoading: fieldsLoading } = useSWR<string[]>(
connectorId
? `/api/proposal-review/jira-connectors/${connectorId}/metadata-keys`
: null,
errorHandlingFetcher
);
const selectedConnector = (connectors ?? []).find(
(c) => c.id === connectorId
);
async function handleSave() {
setSaving(true);
try {
await onSave({
jira_connector_id: connectorId,
jira_project_key: selectedConnector?.project_key || null,
field_mapping: visibleFields.length > 0 ? visibleFields : null,
jira_writeback:
Object.keys(jiraWriteback).length > 0 ? jiraWriteback : null,
review_model: reviewModel,
import_model: importModel,
});
toast.success("Settings saved.");
} catch {
toast.error("Failed to save settings.");
} finally {
setSaving(false);
}
}
function toggleField(key: string) {
setVisibleFields((prev) =>
prev.includes(key) ? prev.filter((k) => k !== key) : [...prev, key]
);
}
function handleAddWriteback() {
if (!newWritebackKey) return;
setJiraWriteback({
...jiraWriteback,
[newWritebackKey]: newWritebackField,
});
setNewWritebackKey("");
setNewWritebackField("");
}
const writebackEntries = Object.entries(jiraWriteback);
// Filter metadata keys by search
const allKeys = metadataKeys ?? [];
const filteredKeys = fieldSearch
? allKeys.filter((k) => k.toLowerCase().includes(fieldSearch.toLowerCase()))
: allKeys;
return (
<Section gap={2} alignItems="stretch" height="auto">
{/* Jira Connector Selection */}
<Section gap={1} alignItems="stretch" height="auto">
<Content
sizePreset="section"
variant="section"
title="Jira Connector"
description="Select which Jira connector to use for proposal sourcing."
/>
<Section gap={0.25} alignItems="start" height="auto">
<Text font="main-ui-action" color="text-04">
Connector
</Text>
{connectorsLoading ? (
<Text font="main-ui-body" color="text-03" as="p">
Loading connectors...
</Text>
) : connectors && connectors.length > 0 ? (
<InputSelect
value={connectorId != null ? String(connectorId) : undefined}
onValueChange={(val) => {
const newId = val ? Number(val) : null;
if (newId !== connectorId) {
setConnectorId(newId);
setVisibleFields([]);
}
}}
>
<InputSelect.Trigger placeholder="Select a Jira connector..." />
<InputSelect.Content>
{connectors.map((c) => (
<InputSelect.Item
key={c.id}
value={String(c.id)}
description={
c.project_key ? `Project: ${c.project_key}` : undefined
}
>
{c.name}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
) : (
<Text font="main-ui-body" color="text-03" as="p">
No Jira connectors found. Add one in the Connectors settings
first.
</Text>
)}
</Section>
</Section>
<Separator noPadding />
{/* Visible Fields Checklist */}
<Section gap={1} alignItems="stretch" height="auto">
<Content
sizePreset="section"
variant="section"
title="Visible Fields"
description="Choose which metadata fields to display in the proposal queue and review interface. If none are selected, all fields are shown."
/>
{fieldsLoading && connectorId && (
<Text font="secondary-body" color="text-03" as="p">
Loading fields...
</Text>
)}
{!fieldsLoading && connectorId && allKeys.length > 0 && (
<>
<InputTypeIn
placeholder="Filter fields..."
value={fieldSearch}
onChange={(e) => setFieldSearch(e.target.value)}
onClear={() => setFieldSearch("")}
leftSearchIcon
/>
<div className="flex flex-col gap-1 max-h-64 overflow-y-auto">
{filteredKeys.map((key) => (
<label
key={key}
className="flex items-center gap-3 px-2 py-1.5 rounded-8 cursor-pointer hover:bg-background-neutral-02"
>
<Checkbox
checked={visibleFields.includes(key)}
onCheckedChange={() => toggleField(key)}
/>
<Text font="main-ui-body" color="text-04">
{key}
</Text>
</label>
))}
</div>
{visibleFields.length > 0 && (
<Text font="secondary-body" color="text-03" as="p">
{`${visibleFields.length} field${
visibleFields.length !== 1 ? "s" : ""
} selected`}
</Text>
)}
</>
)}
{!fieldsLoading && connectorId && allKeys.length === 0 && (
<Text font="secondary-body" color="text-03" as="p">
No metadata fields found. Make sure the connector has indexed some
documents.
</Text>
)}
{!connectorId && (
<Text font="secondary-body" color="text-03" as="p">
Select a connector above to see available fields.
</Text>
)}
</Section>
<Separator noPadding />
{/* Write-back Configuration */}
<Section gap={1} alignItems="stretch" height="auto">
<Content
sizePreset="section"
variant="section"
title="Write-back Configuration"
description="Map review outcomes to Jira custom fields for automatic status sync."
/>
{writebackEntries.length > 0 && (
<Section gap={0.5} alignItems="stretch" height="auto">
<div className="flex gap-3 px-1">
<span className="flex-1">
<Text font="secondary-action" color="text-03">
Outcome
</Text>
</span>
<span className="flex-1">
<Text font="secondary-action" color="text-03">
Jira Field
</Text>
</span>
<div className="w-8" />
</div>
{writebackEntries.map(([key, value]) => (
<Section
key={key}
flexDirection="row"
gap={0.75}
alignItems="center"
height="auto"
>
<div className="flex-1">
<Text font="main-ui-body" color="text-04">
{key}
</Text>
</div>
<div className="flex-1">
<Text font="main-ui-body" color="text-04">
{value}
</Text>
</div>
<Button
icon={SvgTrash}
prominence="tertiary"
variant="danger"
size="sm"
onClick={() => {
const updated = { ...jiraWriteback };
delete updated[key];
setJiraWriteback(updated);
}}
tooltip="Remove"
/>
</Section>
))}
</Section>
)}
{connectorId && (
<Section
flexDirection="row"
gap={0.75}
alignItems="center"
height="auto"
>
<div className="flex-1">
<InputSelect
value={newWritebackKey || undefined}
onValueChange={setNewWritebackKey}
>
<InputSelect.Trigger placeholder="Select outcome..." />
<InputSelect.Content>
{["decision_field_id", "completion_field_id"].map((key) => (
<InputSelect.Item key={key} value={key}>
{key === "decision_field_id"
? "Decision Field"
: "Completion % Field"}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
</div>
<div className="flex-1">
{allKeys.length > 0 ? (
<InputComboBox
placeholder="Search fields..."
value={newWritebackField}
onValueChange={setNewWritebackField}
options={allKeys.map((key) => ({
value: key,
label: key,
}))}
strict
leftSearchIcon
/>
) : (
<Text font="secondary-body" color="text-03" as="p">
Select a connector first
</Text>
)}
</div>
<Button
icon={SvgPlus}
prominence="tertiary"
size="sm"
onClick={handleAddWriteback}
disabled={!newWritebackKey || !newWritebackField}
tooltip="Add entry"
/>
</Section>
)}
</Section>
<Separator noPadding />
{/* LLM Configuration */}
<Section gap={1} alignItems="stretch" height="auto">
<Content
sizePreset="section"
variant="section"
title="LLM Configuration"
description="Select which models to use for rule evaluation and checklist import."
/>
<Section gap={0.25} alignItems="start" height="auto">
<Text font="main-ui-action" color="text-04">
Review Model
</Text>
<Text font="secondary-body" color="text-03" as="p">
Model used for evaluating rules against proposals.
</Text>
{llmLoading ? (
<Text font="main-ui-body" color="text-03" as="p">
Loading models...
</Text>
) : (
<InputSelect
value={reviewModel || SYSTEM_DEFAULT_MODEL}
onValueChange={(val) =>
setReviewModel(val === SYSTEM_DEFAULT_MODEL ? null : val)
}
>
<InputSelect.Trigger placeholder="Select a model..." />
<InputSelect.Content>
<InputSelect.Item value={SYSTEM_DEFAULT_MODEL}>
Default (system)
</InputSelect.Item>
{modelOptions.map((opt) => (
<InputSelect.Item key={opt.value} value={opt.value}>
{opt.label}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
)}
</Section>
<Section gap={0.25} alignItems="start" height="auto">
<Text font="main-ui-action" color="text-04">
Import Model
</Text>
<Text font="secondary-body" color="text-03" as="p">
Model used for parsing checklists into rules.
</Text>
{llmLoading ? (
<Text font="main-ui-body" color="text-03" as="p">
Loading models...
</Text>
) : (
<InputSelect
value={importModel || SYSTEM_DEFAULT_MODEL}
onValueChange={(val) =>
setImportModel(val === SYSTEM_DEFAULT_MODEL ? null : val)
}
>
<InputSelect.Trigger placeholder="Select a model..." />
<InputSelect.Content>
<InputSelect.Item value={SYSTEM_DEFAULT_MODEL}>
Default (system)
</InputSelect.Item>
{modelOptions.map((opt) => (
<InputSelect.Item key={opt.value} value={opt.value}>
{opt.label}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
)}
</Section>
</Section>
<Separator noPadding />
{/* Actions */}
<Section flexDirection="row" gap={0.75} alignItems="center" height="auto">
<Button onClick={handleSave} disabled={saving}>
{saving ? "Saving..." : "Save"}
</Button>
<Button prominence="secondary" onClick={onCancel} disabled={saving}>
Cancel
</Button>
</Section>
</Section>
);
}
export default SettingsForm;

View File

@@ -0,0 +1,79 @@
"use client";
import { useEffect, useState } from "react";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
export interface ImportJobStatus {
id: string;
status: "PENDING" | "RUNNING" | "COMPLETED" | "FAILED";
source_filename: string;
rules_created: number;
error_message: string | null;
created_at: string;
completed_at: string | null;
}
/**
* Polls an import job's status endpoint while the job is active.
*
* If `importJobId` is provided, polls the specific job.
* Otherwise, checks the `/import/active` endpoint once on mount to detect
* an in-progress import (e.g. when the user navigates away and back).
* Once a job ID is discovered, only the specific-job endpoint is polled.
*/
export function useImportStatus(rulesetId: string, importJobId: string | null) {
// Tracks the job ID discovered via the /active endpoint so we can
// stop polling /active once we know which job to watch.
const [discoveredJobId, setDiscoveredJobId] = useState<string | null>(null);
const needsDiscovery = !importJobId && !discoveredJobId;
// One-shot fetch to discover an already-active import on mount
const { data: activeJob } = useSWR<ImportJobStatus | null>(
needsDiscovery
? `/api/proposal-review/rulesets/${rulesetId}/import/active`
: null,
errorHandlingFetcher
);
// Once the /active endpoint returns a job, capture its ID and stop polling /active
useEffect(() => {
if (activeJob?.id) {
setDiscoveredJobId(activeJob.id);
}
}, [activeJob]);
// Resolve which job ID to poll
const resolvedJobId = importJobId ?? discoveredJobId;
const { data, error } = useSWR<ImportJobStatus>(
resolvedJobId
? `/api/proposal-review/rulesets/${rulesetId}/import/${resolvedJobId}/status`
: null,
errorHandlingFetcher,
{
refreshInterval: (latestData) => {
if (!latestData) return 5000;
if (
latestData.status === "COMPLETED" ||
latestData.status === "FAILED"
) {
return 0; // stop polling
}
return 5000;
},
}
);
const job = data ?? null;
return {
importJob: job,
isProcessing:
!!job && (job.status === "PENDING" || job.status === "RUNNING"),
isComplete: job?.status === "COMPLETED",
isFailed: job?.status === "FAILED",
error,
};
}

View File

@@ -0,0 +1,151 @@
/** Shared types for Proposal Review admin pages. */
export interface RulesetResponse {
id: string;
tenant_id: string;
name: string;
description: string | null;
is_default: boolean;
is_active: boolean;
created_by: string | null;
created_at: string;
updated_at: string;
rules: RuleResponse[];
}
export interface RuleResponse {
id: string;
ruleset_id: string;
name: string;
description: string | null;
category: string | null;
rule_type: RuleType;
rule_intent: RuleIntent;
prompt_template: string;
source: RuleSource;
authority: RuleAuthority | null;
is_hard_stop: boolean;
priority: number;
is_active: boolean;
refinement_needed: boolean;
refinement_question: string | null;
created_at: string;
updated_at: string;
}
export type RuleType =
| "DOCUMENT_CHECK"
| "METADATA_CHECK"
| "CROSS_REFERENCE"
| "CUSTOM_NL";
export type RuleIntent = "CHECK" | "HIGHLIGHT";
export type RuleSource = "IMPORTED" | "MANUAL";
export type RuleAuthority = "OVERRIDE" | "RETURN";
export interface RulesetCreate {
name: string;
description?: string;
is_default?: boolean;
}
export interface RulesetUpdate {
name?: string;
description?: string;
is_default?: boolean;
is_active?: boolean;
}
export interface RuleCreate {
name: string;
description?: string;
category?: string;
rule_type: RuleType;
rule_intent?: RuleIntent;
prompt_template: string;
source?: RuleSource;
authority?: RuleAuthority | null;
is_hard_stop?: boolean;
priority?: number;
}
export interface RuleUpdate {
name?: string;
description?: string;
category?: string;
rule_type?: RuleType;
rule_intent?: RuleIntent;
prompt_template?: string;
authority?: RuleAuthority | null;
is_hard_stop?: boolean;
priority?: number;
is_active?: boolean;
}
export interface BulkRuleUpdateRequest {
action: "activate" | "deactivate" | "delete";
rule_ids: string[];
}
export interface BulkRuleUpdateResponse {
updated_count: number;
}
export interface ImportResponse {
rules_created: number;
rules: RuleResponse[];
}
export interface ConfigResponse {
id: string;
tenant_id: string;
jira_connector_id: number | null;
jira_project_key: string | null;
field_mapping: string[] | null;
jira_writeback: Record<string, string> | null;
review_model: string | null;
import_model: string | null;
created_at: string;
updated_at: string;
}
export interface ConfigUpdate {
jira_connector_id?: number | null;
jira_project_key?: string | null;
field_mapping?: string[] | null;
jira_writeback?: Record<string, string> | null;
review_model?: string | null;
import_model?: string | null;
}
export interface JiraConnectorInfo {
id: number;
name: string;
project_key: string;
project_url: string;
}
/** Labels for display purposes. */
export const RULE_TYPE_LABELS: Record<RuleType, string> = {
DOCUMENT_CHECK: "Document Check",
METADATA_CHECK: "Metadata Check",
CROSS_REFERENCE: "Cross Reference",
CUSTOM_NL: "Custom NL",
};
export const RULE_INTENT_LABELS: Record<RuleIntent, string> = {
CHECK: "Check",
HIGHLIGHT: "Highlight",
};
export const RULE_SOURCE_LABELS: Record<RuleSource, string> = {
IMPORTED: "Imported",
MANUAL: "Manual",
};
export const RULE_AUTHORITY_LABELS: Record<string, string> = {
OVERRIDE: "Override",
RETURN: "Return",
};

View File

@@ -0,0 +1,516 @@
"use client";
import { useMemo, useState } from "react";
import { useRouter } from "next/navigation";
import useSWR, { mutate } from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import * as SettingsLayouts from "@/layouts/settings-layouts";
import { ADMIN_ROUTES } from "@/lib/admin-routes";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import { toast } from "@/hooks/useToast";
import { Button, Text, Tag } from "@opal/components";
import { Content, IllustrationContent } from "@opal/layouts";
import SvgNoResult from "@opal/illustrations/no-result";
import {
SvgClipboard,
SvgEdit,
SvgMoreHorizontal,
SvgSettings,
SvgTrash,
} from "@opal/icons";
import { Form, Formik } from "formik";
import InputTypeIn from "@/refresh-components/inputs/InputTypeIn";
import { FormikField } from "@/refresh-components/form/FormikField";
import Switch from "@/refresh-components/inputs/Switch";
import AdminListHeader from "@/sections/admin/AdminListHeader";
import Modal from "@/refresh-components/Modal";
import Popover, { PopoverMenu } from "@/refresh-components/Popover";
import LineItem from "@/refresh-components/buttons/LineItem";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
import { markdown } from "@opal/utils";
import { Table } from "@opal/components";
import { createTableColumns } from "@opal/components/table/columns";
import { InputVertical } from "@opal/layouts";
import type { RulesetResponse } from "@/app/admin/proposal-review/interfaces";
const API_URL = "/api/proposal-review/rulesets";
const route = ADMIN_ROUTES.PROPOSAL_REVIEW;
const tc = createTableColumns<RulesetResponse>();
function formatDate(dateStr: string): string {
return new Date(dateStr).toLocaleDateString("en-US", {
month: "short",
day: "numeric",
year: "numeric",
});
}
function RulesetsPage() {
const router = useRouter();
const {
data: rulesets,
isLoading,
error,
} = useSWR<RulesetResponse[]>(API_URL, errorHandlingFetcher);
const [showCreateForm, setShowCreateForm] = useState(false);
const [editTarget, setEditTarget] = useState<RulesetResponse | null>(null);
const [deleteTarget, setDeleteTarget] = useState<RulesetResponse | null>(
null
);
const [search, setSearch] = useState("");
const filteredRulesets = (rulesets ?? []).filter(
(rs) =>
!search ||
rs.name.toLowerCase().includes(search.toLowerCase()) ||
(rs.description ?? "").toLowerCase().includes(search.toLowerCase())
);
async function handleToggleActive(ruleset: RulesetResponse) {
try {
const res = await fetch(`${API_URL}/${ruleset.id}`, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ is_active: !ruleset.is_active }),
});
if (!res.ok) throw new Error(await res.text());
await mutate(API_URL);
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to update ruleset"
);
}
}
async function handleToggleDefault(ruleset: RulesetResponse) {
try {
const res = await fetch(`${API_URL}/${ruleset.id}`, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ is_default: !ruleset.is_default }),
});
if (!res.ok) throw new Error(await res.text());
await mutate(API_URL);
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to update ruleset"
);
}
}
function handleEditOpen(ruleset: RulesetResponse) {
setEditTarget(ruleset);
}
async function handleDelete(ruleset: RulesetResponse) {
try {
const res = await fetch(`${API_URL}/${ruleset.id}`, {
method: "DELETE",
});
if (!res.ok && res.status !== 204) {
const err = await res.json();
throw new Error(err.detail || "Failed to delete ruleset");
}
await mutate(API_URL);
setDeleteTarget(null);
toast.success("Ruleset deleted.");
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to delete ruleset"
);
}
}
const columns = useMemo(
() => [
tc.qualifier({
content: "icon",
getContent: () => SvgClipboard,
}),
tc.column("name", {
header: "Name",
weight: 30,
cell: (value) => (
<Content title={value} sizePreset="main-ui" variant="body" />
),
}),
tc.displayColumn({
id: "rules_count",
header: "Rules",
width: { weight: 10, minWidth: 80 },
cell: (row) => (
<Text font="main-ui-body" color="text-03">
{String(row.rules.length)}
</Text>
),
}),
tc.displayColumn({
id: "status",
header: "Active",
width: { weight: 10, minWidth: 80 },
cell: (row) => (
<div onClick={(e) => e.stopPropagation()}>
<Switch
checked={row.is_active}
onCheckedChange={() => handleToggleActive(row)}
/>
</div>
),
}),
tc.displayColumn({
id: "default",
header: "Default",
width: { weight: 10, minWidth: 80 },
cell: (row) => (
<div onClick={(e) => e.stopPropagation()}>
<Switch
checked={row.is_default}
onCheckedChange={() => handleToggleDefault(row)}
/>
</div>
),
}),
tc.column("updated_at", {
header: "Last Modified",
weight: 15,
cell: (value) => (
<Text font="secondary-body" color="text-03">
{formatDate(value)}
</Text>
),
}),
tc.actions({
cell: (row) => (
<div className="flex flex-row gap-1">
<Popover>
<Popover.Trigger asChild>
<Button
icon={SvgMoreHorizontal}
prominence="tertiary"
tooltip="More"
/>
</Popover.Trigger>
<Popover.Content side="bottom" align="end" width="md">
<PopoverMenu>
<LineItem icon={SvgEdit} onClick={() => handleEditOpen(row)}>
Edit Ruleset
</LineItem>
<LineItem
icon={SvgTrash}
danger
onClick={() => setDeleteTarget(row)}
>
Delete Ruleset
</LineItem>
</PopoverMenu>
</Popover.Content>
</Popover>
</div>
),
}),
],
[] // eslint-disable-line react-hooks/exhaustive-deps
);
if (error) {
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
title={route.title}
icon={route.icon}
description="Manage review rulesets for automated proposal evaluation."
separator
/>
<SettingsLayouts.Body>
<IllustrationContent
illustration={SvgNoResult}
title="Failed to load rulesets."
description="Please check the console for more details."
/>
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
if (isLoading) {
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
title={route.title}
icon={route.icon}
description="Manage review rulesets for automated proposal evaluation."
separator
/>
<SettingsLayouts.Body>
<SimpleLoader />
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
const hasRulesets = (rulesets ?? []).length > 0;
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
title={route.title}
icon={route.icon}
description="Manage review rulesets for automated proposal evaluation."
separator
rightChildren={
<Button
icon={SvgSettings}
prominence="secondary"
onClick={() => router.push("/admin/proposal-review/settings")}
>
Configuration
</Button>
}
/>
<SettingsLayouts.Body>
<div className="flex flex-col">
<AdminListHeader
hasItems={hasRulesets}
searchQuery={search}
onSearchQueryChange={setSearch}
placeholder="Search rulesets..."
emptyStateText="Create rulesets to define automated proposal review rules."
onAction={() => setShowCreateForm(true)}
actionLabel="New Ruleset"
/>
{hasRulesets && (
<div className="[&_.tbl-row]:cursor-pointer [&_.tbl-row:hover_td]:bg-background-tint-02">
<Table
data={filteredRulesets}
getRowId={(row) => row.id}
columns={columns}
searchTerm={search}
onRowClick={(row) =>
router.push(`/admin/proposal-review/rulesets/${row.id}`)
}
/>
</div>
)}
</div>
</SettingsLayouts.Body>
{/* Create Ruleset Modal */}
{showCreateForm && (
<Modal open onOpenChange={() => setShowCreateForm(false)}>
<Modal.Content width="sm" height="sm">
<Modal.Header
icon={SvgClipboard}
title="New Ruleset"
description="Create a new set of review rules."
onClose={() => setShowCreateForm(false)}
/>
<Formik
initialValues={{ name: "", description: "" }}
onSubmit={async (values, { setSubmitting }) => {
setSubmitting(true);
try {
const res = await fetch("/api/proposal-review/rulesets", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(values),
});
if (!res.ok) throw new Error(await res.text());
const created = await res.json();
toast.success("Ruleset created. Add rules to get started.");
setShowCreateForm(false);
router.push(`/admin/proposal-review/rulesets/${created.id}`);
} catch (err) {
toast.error(
err instanceof Error
? err.message
: "Failed to create ruleset."
);
} finally {
setSubmitting(false);
}
}}
>
{({ isSubmitting, values }) => (
<Form className="w-full">
<Modal.Body>
<InputVertical withLabel="name" title="Name">
<FormikField<string>
name="name"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="e.g., Institutional Review"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</InputVertical>
<InputVertical title="Description" withLabel="description">
<FormikField<string>
name="description"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="Optional description"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</InputVertical>
</Modal.Body>
<Modal.Footer>
<Button
prominence="secondary"
type="button"
onClick={() => setShowCreateForm(false)}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
type="submit"
disabled={isSubmitting || !values.name.trim()}
>
{isSubmitting ? "Creating..." : "Create"}
</Button>
</Modal.Footer>
</Form>
)}
</Formik>
</Modal.Content>
</Modal>
)}
{/* Edit Ruleset Modal */}
{editTarget && (
<Modal open onOpenChange={() => setEditTarget(null)}>
<Modal.Content width="sm" height="sm">
<Modal.Header
icon={SvgEdit}
title="Edit Ruleset"
description="Update the ruleset name and description."
onClose={() => setEditTarget(null)}
/>
<Formik
initialValues={{
name: editTarget.name,
description: editTarget.description || "",
}}
onSubmit={async (values, { setSubmitting }) => {
setSubmitting(true);
try {
const res = await fetch(
`/api/proposal-review/rulesets/${editTarget.id}`,
{
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(values),
}
);
if (!res.ok) throw new Error(await res.text());
toast.success("Ruleset updated.");
mutate(API_URL);
setEditTarget(null);
} catch (err) {
toast.error(
err instanceof Error
? err.message
: "Failed to update ruleset."
);
} finally {
setSubmitting(false);
}
}}
>
{({ isSubmitting, values }) => (
<Form className="w-full">
<Modal.Body>
<InputVertical withLabel="name" title="Name">
<FormikField<string>
name="name"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="Ruleset name"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</InputVertical>
<InputVertical withLabel="description" title="Description">
<FormikField<string>
name="description"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="Optional description"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</InputVertical>
</Modal.Body>
<Modal.Footer>
<Button
prominence="secondary"
type="button"
onClick={() => setEditTarget(null)}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
type="submit"
disabled={isSubmitting || !values.name.trim()}
>
{isSubmitting ? "Saving..." : "Save"}
</Button>
</Modal.Footer>
</Form>
)}
</Formik>
</Modal.Content>
</Modal>
)}
{/* Delete Confirmation */}
{deleteTarget && (
<ConfirmationModalLayout
icon={SvgTrash}
title="Delete Ruleset"
onClose={() => setDeleteTarget(null)}
submit={
<Button
variant="danger"
onClick={async () => {
const target = deleteTarget;
setDeleteTarget(null);
await handleDelete(target);
}}
>
Delete
</Button>
}
>
<Text as="p" color="text-03">
{markdown(
`Are you sure you want to delete *${deleteTarget.name}*? All rules within this ruleset will also be deleted. This action cannot be undone.`
)}
</Text>
</ConfirmationModalLayout>
)}
</SettingsLayouts.Root>
);
}
export default function Page() {
return <RulesetsPage />;
}

View File

@@ -0,0 +1,759 @@
"use client";
import { useState, useMemo, useEffect, useRef } from "react";
import { useParams } from "next/navigation";
import useSWR, { mutate } from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import * as SettingsLayouts from "@/layouts/settings-layouts";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import InputSearch from "@/refresh-components/inputs/InputSearch";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import Checkbox from "@/refresh-components/inputs/Checkbox";
import { toast } from "@/hooks/useToast";
import { Button, Text, Tag, Card } from "@opal/components";
import { ContentAction, IllustrationContent } from "@opal/layouts";
import SvgNoResult from "@opal/illustrations/no-result";
import {
SvgAlertCircle,
SvgCheckCircle,
SvgClipboard,
SvgEdit,
SvgMoreHorizontal,
SvgPauseCircle,
SvgPlayCircle,
SvgPlus,
SvgTrash,
SvgUploadCloud,
} from "@opal/icons";
import Popover, { PopoverMenu } from "@/refresh-components/Popover";
import LineItem from "@/refresh-components/buttons/LineItem";
import SimpleTooltip from "@/refresh-components/SimpleTooltip";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
import { markdown } from "@opal/utils";
import RuleEditor from "@/app/admin/proposal-review/components/RuleEditor";
import ImportFlow from "@/app/admin/proposal-review/components/ImportFlow";
import { useImportStatus } from "@/app/admin/proposal-review/hooks/useImportStatus";
import RefinementModal from "@/app/admin/proposal-review/components/RefinementModal";
import RuleTestModal from "@/app/admin/proposal-review/components/RuleTestModal";
import type {
RulesetResponse,
RulesetUpdate,
RuleResponse,
RuleCreate,
RuleUpdate,
BulkRuleUpdateRequest,
} from "@/app/admin/proposal-review/interfaces";
function RulesetDetailPage() {
const params = useParams();
const rulesetId = params.id as string;
const apiUrl = `/api/proposal-review/rulesets/${rulesetId}`;
const {
data: ruleset,
isLoading,
error,
} = useSWR<RulesetResponse>(apiUrl, errorHandlingFetcher);
// Modal states
const [showRuleEditor, setShowRuleEditor] = useState(false);
const [editingRule, setEditingRule] = useState<RuleResponse | null>(null);
const [showImportFlow, setShowImportFlow] = useState(false);
const [deleteTarget, setDeleteTarget] = useState<RuleResponse | null>(null);
const [testTarget, setTestTarget] = useState<RuleResponse | null>(null);
const [refineTarget, setRefineTarget] = useState<RuleResponse | null>(null);
const [ruleSearch, setRuleSearch] = useState("");
const [categoryFilter, setCategoryFilter] = useState("all");
const [statusFilter, setStatusFilter] = useState("all");
// Import job tracking — persists across navigation via the hook's SWR polling
const [importJobId, setImportJobId] = useState<string | null>(null);
const [isUploading, setIsUploading] = useState(false);
const handledJobRef = useRef<string | null>(null);
const { importJob, isProcessing, isComplete, isFailed } = useImportStatus(
rulesetId,
importJobId
);
// When import completes, refresh the ruleset and show toast
useEffect(() => {
if (!importJob || handledJobRef.current === importJob.id) return;
// SWR has picked up the job — safe to drop the eager upload indicator
setIsUploading(false);
if (isComplete) {
handledJobRef.current = importJob.id;
mutate(apiUrl);
toast.success(
`Imported ${importJob.rules_created} rule${
importJob.rules_created !== 1 ? "s" : ""
} from "${importJob.source_filename}" as inactive drafts.`
);
setImportJobId(null);
}
if (isFailed) {
handledJobRef.current = importJob.id;
toast.error(
`Import failed: ${importJob.error_message || "Unknown error"}`
);
setImportJobId(null);
}
}, [isComplete, isFailed, importJob, apiUrl]);
async function handleImportFile(file: File) {
setShowImportFlow(false);
setIsUploading(true);
try {
const formData = new FormData();
formData.append("file", file);
const res = await fetch(
`/api/proposal-review/rulesets/${rulesetId}/import`,
{ method: "POST", body: formData }
);
if (!res.ok) {
const err = await res.json();
throw new Error(err.detail || "Failed to import checklist");
}
const data = await res.json();
setImportJobId(data.import_job_id);
} catch (err) {
setIsUploading(false);
toast.error(err instanceof Error ? err.message : "Import failed");
}
}
// Batch selection
const [selectedRuleIds, setSelectedRuleIds] = useState<Set<string>>(
new Set()
);
const [batchSaving, setBatchSaving] = useState(false);
// Update ruleset metadata (name, description)
async function handleUpdateRuleset(updates: Partial<RulesetUpdate>) {
try {
const res = await fetch(apiUrl, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(updates),
});
if (!res.ok) throw new Error("Failed to update ruleset");
await mutate(apiUrl);
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to update ruleset"
);
}
}
// Toggle handlers (for individual rule active/inactive)
async function handleToggleRuleActive(rule: RuleResponse) {
try {
const update: RuleUpdate = { is_active: !rule.is_active };
const res = await fetch(`/api/proposal-review/rules/${rule.id}`, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(update),
});
if (!res.ok) {
const err = await res.json().catch(() => ({}));
throw new Error(err.detail || "Failed to toggle rule active status");
}
await mutate(apiUrl);
toast.success(rule.is_active ? "Rule deactivated." : "Rule activated.");
} catch (err) {
toast.error(
err instanceof Error
? err.message
: "Failed to toggle rule active status"
);
}
}
// Rule CRUD
async function handleSaveRule(ruleData: RuleCreate | RuleUpdate) {
if (editingRule) {
const res = await fetch(`/api/proposal-review/rules/${editingRule.id}`, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(ruleData),
});
if (!res.ok) {
const err = await res.json().catch(() => ({}));
throw new Error(err.detail || "Failed to update rule");
}
toast.success("Rule updated.");
} else {
const res = await fetch(
`/api/proposal-review/rulesets/${rulesetId}/rules`,
{
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(ruleData),
}
);
if (!res.ok) {
const err = await res.json().catch(() => ({}));
throw new Error(err.detail || "Failed to create rule");
}
toast.success("Rule created.");
}
await mutate(apiUrl);
}
async function handleDeleteRule(rule: RuleResponse) {
try {
const res = await fetch(`/api/proposal-review/rules/${rule.id}`, {
method: "DELETE",
});
if (!res.ok && res.status !== 204) {
const err = await res.json();
throw new Error(err.detail || "Failed to delete rule");
}
setSelectedRuleIds((prev) => {
const next = new Set(prev);
next.delete(rule.id);
return next;
});
await mutate(apiUrl);
toast.success("Rule deleted.");
} catch (err) {
toast.error(err instanceof Error ? err.message : "Failed to delete rule");
}
}
// Batch operations
async function handleBulkAction(action: BulkRuleUpdateRequest["action"]) {
if (selectedRuleIds.size === 0) return;
setBatchSaving(true);
try {
const body: BulkRuleUpdateRequest = {
action,
rule_ids: Array.from(selectedRuleIds),
};
const res = await fetch(
`/api/proposal-review/rulesets/${rulesetId}/rules/bulk-update`,
{
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(body),
}
);
if (!res.ok) {
const err = await res.json();
throw new Error(err.detail || "Bulk operation failed");
}
setSelectedRuleIds(new Set());
await mutate(apiUrl);
toast.success(
`Bulk ${action} completed for ${selectedRuleIds.size} rule${
selectedRuleIds.size === 1 ? "" : "s"
}.`
);
} catch (err) {
toast.error(err instanceof Error ? err.message : "Bulk operation failed");
} finally {
setBatchSaving(false);
}
}
const categories = useMemo(() => {
if (!ruleset) return [];
const cats = new Set(
ruleset.rules.map((r) => r.category).filter(Boolean) as string[]
);
return Array.from(cats).sort();
}, [ruleset]);
const filteredRules = useMemo(() => {
if (!ruleset) return [];
let rules = ruleset.rules;
if (ruleSearch) {
const q = ruleSearch.toLowerCase();
rules = rules.filter(
(rule) =>
rule.name.toLowerCase().includes(q) ||
(rule.category ?? "").toLowerCase().includes(q) ||
(rule.description ?? "").toLowerCase().includes(q)
);
}
if (categoryFilter !== "all") {
rules = rules.filter((r) => r.category === categoryFilter);
}
if (statusFilter === "active") {
rules = rules.filter((r) => r.is_active);
} else if (statusFilter === "inactive") {
rules = rules.filter((r) => !r.is_active);
} else if (statusFilter === "refinement") {
rules = rules.filter((r) => r.refinement_needed);
}
// Sort: refinement-needed first, then by category + name
const natural = { numeric: true, sensitivity: "base" } as const;
return [...rules].sort((a, b) => {
// Refinement-needed rules float to the top
if (a.refinement_needed !== b.refinement_needed) {
return a.refinement_needed ? -1 : 1;
}
const catCmp = (a.category ?? "").localeCompare(
b.category ?? "",
undefined,
natural
);
if (catCmp !== 0) return catCmp;
return a.name.localeCompare(b.name, undefined, natural);
});
}, [ruleset, ruleSearch, categoryFilter, statusFilter]);
const refinementRules = useMemo(
() => filteredRules.filter((r) => r.refinement_needed),
[filteredRules]
);
const otherRules = useMemo(
() => filteredRules.filter((r) => !r.refinement_needed),
[filteredRules]
);
const filteredRuleIds = useMemo(
() => new Set(filteredRules.map((r) => r.id)),
[filteredRules]
);
// "All selected" means every currently visible (filtered) rule is selected
const filteredRuleIdArr = Array.from(filteredRuleIds);
const allSelected =
filteredRuleIds.size > 0 &&
filteredRuleIdArr.every((id) => selectedRuleIds.has(id));
const someSelected =
!allSelected && filteredRuleIdArr.some((id) => selectedRuleIds.has(id));
function toggleSelectAll() {
if (allSelected) {
// Deselect all visible rules (keep any selected but currently hidden rules)
setSelectedRuleIds((prev) => {
const next = new Set(prev);
filteredRuleIdArr.forEach((id) => next.delete(id));
return next;
});
} else {
// Select all visible rules
setSelectedRuleIds((prev) => {
const next = new Set(prev);
filteredRuleIdArr.forEach((id) => next.add(id));
return next;
});
}
}
function toggleSelectRule(ruleId: string) {
setSelectedRuleIds((prev) => {
const next = new Set(prev);
if (next.has(ruleId)) {
next.delete(ruleId);
} else {
next.add(ruleId);
}
return next;
});
}
function RuleCard({ rule }: { rule: RuleResponse }) {
const isSelected = selectedRuleIds.has(rule.id);
return (
<div className="flex items-center gap-3">
<div onClick={(e) => e.stopPropagation()}>
<Checkbox
checked={isSelected}
onCheckedChange={() => toggleSelectRule(rule.id)}
aria-label={`Select ${rule.name}`}
/>
</div>
<div
className="flex-1 min-w-0 cursor-pointer"
onClick={() => {
if (rule.refinement_needed) {
setRefineTarget(rule);
} else {
setEditingRule(rule);
setShowRuleEditor(true);
}
}}
>
<Card padding="md" border="solid" background="light">
<ContentAction
sizePreset="main-ui"
variant="section"
title={rule.name}
description={rule.description || rule.category || undefined}
rightChildren={
<div
className="flex items-center gap-2 shrink-0"
onClick={(e) => e.stopPropagation()}
>
{rule.category && (
<SimpleTooltip
tooltip={rule.category}
side="top"
delayDuration={0}
>
<div className="max-w-[160px] overflow-hidden [&>.opal-auxiliary-tag]:shrink [&>.opal-auxiliary-tag>span]:truncate">
<Tag title={rule.category} color="gray" size="sm" />
</div>
</SimpleTooltip>
)}
<Tag
title={rule.is_active ? "Active" : "Inactive"}
color={rule.is_active ? "green" : "gray"}
size="sm"
/>
{rule.refinement_needed && (
<Tag title="Needs Refinement" color="purple" size="sm" />
)}
{rule.is_hard_stop && (
<Tag title="Hard Stop" color="amber" size="sm" />
)}
<Popover>
<Popover.Trigger asChild>
<Button
icon={SvgMoreHorizontal}
prominence="tertiary"
tooltip="More"
/>
</Popover.Trigger>
<Popover.Content side="bottom" align="end" width="md">
<PopoverMenu>
<LineItem
icon={SvgEdit}
onClick={() => {
setEditingRule(rule);
setShowRuleEditor(true);
}}
>
Edit Rule
</LineItem>
<LineItem
icon={SvgPlayCircle}
onClick={() => setTestTarget(rule)}
>
Test Rule
</LineItem>
{rule.refinement_needed && (
<LineItem
icon={SvgAlertCircle}
onClick={() => setRefineTarget(rule)}
>
Answer Refinement Question
</LineItem>
)}
<LineItem
icon={
rule.is_active ? SvgPauseCircle : SvgCheckCircle
}
onClick={() => handleToggleRuleActive(rule)}
>
{rule.is_active ? "Deactivate" : "Activate"}
</LineItem>
<LineItem
icon={SvgTrash}
danger
onClick={() => setDeleteTarget(rule)}
>
Delete Rule
</LineItem>
</PopoverMenu>
</Popover.Content>
</Popover>
</div>
}
/>
</Card>
</div>
</div>
);
}
if (isLoading) {
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
icon={SvgClipboard}
title="Loading..."
backButton
separator
/>
<SettingsLayouts.Body>
<SimpleLoader />
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
if (error || !ruleset) {
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
icon={SvgClipboard}
title="Ruleset"
backButton
separator
/>
<SettingsLayouts.Body>
<IllustrationContent
illustration={SvgNoResult}
title="Failed to load ruleset."
description={
error?.info?.message ||
error?.info?.detail ||
"Ruleset not found."
}
/>
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
icon={SvgClipboard}
title={ruleset.name}
description={
ruleset.description
? `${ruleset.description} · ${ruleset.rules.length} rule${
ruleset.rules.length !== 1 ? "s" : ""
}`
: `${ruleset.rules.length} rule${
ruleset.rules.length !== 1 ? "s" : ""
}`
}
backButton
editable
onTitleChange={async (newName) => {
await handleUpdateRuleset({ name: newName });
}}
separator
/>
<SettingsLayouts.Body>
{/* Import progress bar */}
{(isUploading || (isProcessing && importJob)) && (
<div className="flex items-center gap-3 px-4 py-3 rounded-08 bg-background-neutral-02">
<div className="h-2 flex-1 min-w-[80px] rounded-08 bg-border-02 animate-pulse" />
<Text font="secondary-body" color="text-03" nowrap>
{importJob && importJob.rules_created > 0
? `${importJob.rules_created} rule${
importJob.rules_created !== 1 ? "s" : ""
} created`
: isUploading
? "Uploading..."
: `Analyzing "${importJob!.source_filename}"...`}
</Text>
</div>
)}
{/* Search + action bar */}
<div className="flex items-center gap-3">
{ruleset.rules.length > 0 && (
<Checkbox
checked={allSelected}
indeterminate={someSelected}
onCheckedChange={toggleSelectAll}
aria-label="Select all rules"
/>
)}
<div className="flex-1">
<InputSearch
placeholder="Search rules..."
value={ruleSearch}
onChange={(e) => setRuleSearch(e.target.value)}
/>
</div>
{categories.length > 0 && (
<div className="shrink-0">
<InputSelect
value={categoryFilter}
onValueChange={setCategoryFilter}
>
<InputSelect.Trigger placeholder="Category" />
<InputSelect.Content>
<InputSelect.Item value="all">
All Categories
</InputSelect.Item>
{categories.map((cat) => (
<InputSelect.Item key={cat} value={cat}>
{cat}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
</div>
)}
<div className="shrink-0">
<InputSelect value={statusFilter} onValueChange={setStatusFilter}>
<InputSelect.Trigger placeholder="Status" />
<InputSelect.Content>
<InputSelect.Item value="all">All Statuses</InputSelect.Item>
<InputSelect.Item value="active">Active</InputSelect.Item>
<InputSelect.Item value="inactive">Inactive</InputSelect.Item>
<InputSelect.Item value="refinement">
Needs Refinement
</InputSelect.Item>
</InputSelect.Content>
</InputSelect>
</div>
{selectedRuleIds.size > 0 ? (
<>
<Text font="main-ui-action" color="text-03">
{`${selectedRuleIds.size} selected`}
</Text>
<Button
prominence="secondary"
size="sm"
onClick={() => handleBulkAction("activate")}
disabled={batchSaving}
>
Activate
</Button>
<Button
prominence="secondary"
size="sm"
onClick={() => handleBulkAction("deactivate")}
disabled={batchSaving}
>
Deactivate
</Button>
<Button
variant="danger"
prominence="secondary"
size="sm"
onClick={() => handleBulkAction("delete")}
disabled={batchSaving}
>
Delete
</Button>
</>
) : (
<>
<Button
prominence="secondary"
icon={SvgUploadCloud}
onClick={() => setShowImportFlow(true)}
>
Import
</Button>
<Button
icon={SvgPlus}
onClick={() => {
setEditingRule(null);
setShowRuleEditor(true);
}}
>
Add Rule
</Button>
</>
)}
</div>
{/* Rules list */}
{ruleset.rules.length === 0 ? (
<IllustrationContent
illustration={SvgNoResult}
title="No rules yet"
description="Add rules manually or import from a checklist."
/>
) : filteredRules.length === 0 ? (
<IllustrationContent
illustration={SvgNoResult}
title="No matching rules"
description="Try a different search term."
/>
) : (
<div className="flex flex-col gap-2">
{refinementRules.map((rule) => (
<RuleCard key={rule.id} rule={rule} />
))}
{refinementRules.length > 0 && otherRules.length > 0 && (
<hr className="border-border-02" />
)}
{otherRules.map((rule) => (
<RuleCard key={rule.id} rule={rule} />
))}
</div>
)}
</SettingsLayouts.Body>
{/* Rule Editor Modal */}
<RuleEditor
open={showRuleEditor}
onClose={() => {
setShowRuleEditor(false);
setEditingRule(null);
}}
onSave={handleSaveRule}
existingRule={editingRule}
/>
{/* Import Flow Modal */}
<ImportFlow
open={showImportFlow}
onClose={() => setShowImportFlow(false)}
onFileSelected={handleImportFile}
/>
{/* Rule Test Modal */}
<RuleTestModal
open={!!testTarget}
onClose={() => setTestTarget(null)}
rule={testTarget}
/>
{/* Rule Refinement Modal */}
<RefinementModal
open={!!refineTarget}
onClose={() => setRefineTarget(null)}
rule={refineTarget}
onRefined={() => mutate(apiUrl)}
/>
{/* Delete Rule Confirmation */}
{deleteTarget && (
<ConfirmationModalLayout
icon={SvgTrash}
title="Delete Rule"
onClose={() => setDeleteTarget(null)}
submit={
<Button
variant="danger"
onClick={async () => {
const target = deleteTarget;
setDeleteTarget(null);
await handleDeleteRule(target);
}}
>
Delete
</Button>
}
>
<Text as="p" color="text-03">
{markdown(
`Are you sure you want to delete *${deleteTarget.name}*? This action cannot be undone.`
)}
</Text>
</ConfirmationModalLayout>
)}
</SettingsLayouts.Root>
);
}
export default function Page() {
return <RulesetDetailPage />;
}

View File

@@ -0,0 +1,75 @@
"use client";
import { useRouter } from "next/navigation";
import useSWR, { mutate } from "swr";
import { IllustrationContent } from "@opal/layouts";
import SvgNoResult from "@opal/illustrations/no-result";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import * as SettingsLayouts from "@/layouts/settings-layouts";
import { errorHandlingFetcher } from "@/lib/fetcher";
import { SvgSettings } from "@opal/icons";
import SettingsForm from "@/app/admin/proposal-review/components/SettingsForm";
import type {
ConfigResponse,
ConfigUpdate,
} from "@/app/admin/proposal-review/interfaces";
const API_URL = "/api/proposal-review/config";
function ProposalReviewSettingsPage() {
const router = useRouter();
const {
data: config,
isLoading,
error,
} = useSWR<ConfigResponse>(API_URL, errorHandlingFetcher);
async function handleSave(update: ConfigUpdate) {
const res = await fetch(API_URL, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(update),
});
if (!res.ok) {
const err = await res.json();
throw new Error(err.detail || "Failed to save settings");
}
await mutate(API_URL);
}
return (
<SettingsLayouts.Root>
<SettingsLayouts.Header
icon={SvgSettings}
title="Proposal Review Configuration"
description="Configure the proposal review process."
separator
backButton
onBack={() => router.push("/admin/proposal-review")}
/>
<SettingsLayouts.Body>
{isLoading && <SimpleLoader />}
{error && (
<IllustrationContent
illustration={SvgNoResult}
title="Error loading settings"
description={
error?.info?.message || error?.info?.detail || "An error occurred"
}
/>
)}
{config && (
<SettingsForm
config={config}
onSave={handleSave}
onCancel={() => router.push("/admin/proposal-review")}
/>
)}
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
export default function Page() {
return <ProposalReviewSettingsPage />;
}

View File

@@ -346,12 +346,7 @@ export const AgentTimeline = React.memo(function AgentTimeline({
agent={chatState.agent}
headerContent={
<div className="flex w-full h-full items-center pl-[var(--timeline-header-padding-left)] pr-[var(--timeline-header-padding-right)]">
<Text
as="p"
mainUiAction
text03
className="animate-shimmer bg-[length:200%_100%] bg-[linear-gradient(90deg,var(--shimmer-base)_10%,var(--shimmer-highlight)_40%,var(--shimmer-base)_70%)] bg-clip-text text-transparent"
>
<Text as="p" mainUiAction text03 className="shimmer-text">
{headerText}
</Text>
</div>

View File

@@ -40,12 +40,7 @@ export const StreamingHeader = React.memo(function StreamingHeader({
return (
<>
<div className="px-[var(--timeline-header-text-padding-x)] py-[var(--timeline-header-text-padding-y)]">
<Text
as="p"
mainUiAction
text03
className="animate-shimmer bg-[length:200%_100%] bg-[linear-gradient(90deg,var(--shimmer-base)_10%,var(--shimmer-highlight)_40%,var(--shimmer-base)_70%)] bg-clip-text text-transparent"
>
<Text as="p" mainUiAction text03 className="shimmer-text">
{headerText}
</Text>
</div>

View File

@@ -406,49 +406,29 @@ textarea {
/* Preserves whitespace and wraps text */
}
.loading-text {
color: #e5e5e5;
background: linear-gradient(
-90deg,
#a3a3a3 0%,
#000000 5%,
#a3a3a3 10%,
#a3a3a3 100%
);
background-size: 200% 100%;
background-clip: text;
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
animation: shimmerTransition 1.8s ease-out infinite;
}
.dark .loading-text {
color: #1a1a1a;
background: linear-gradient(
-90deg,
#5c5c5c 0%,
#ffffff 5%,
#5c5c5c 10%,
#5c5c5c 100%
);
background-size: 200% 100%;
background-clip: text;
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
@keyframes shimmerTransition {
@keyframes shimmer {
0% {
background-position: 100% 0;
}
100% {
background-position: -100% 0;
background-position: 0% 0;
}
}
.shimmer-text {
animation: shimmer 1s ease-out infinite;
background-size: 300% 100%;
background-image: linear-gradient(
90deg,
var(--shimmer-base) 35%,
var(--shimmer-highlight) 50%,
var(--shimmer-base) 65%
);
background-clip: text;
-webkit-background-clip: text;
color: transparent;
}
.collapsible {
max-height: 300px;
transition:

View File

@@ -0,0 +1,373 @@
"use client";
import { useEffect, useCallback, useState, useRef } from "react";
import { Button, Text } from "@opal/components";
import {
SvgPlayCircle,
SvgRefreshCw,
SvgChevronDown,
SvgChevronUp,
} from "@opal/icons";
import { IllustrationContent } from "@opal/layouts";
import SvgEmpty from "@opal/illustrations/empty";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import {
Collapsible,
CollapsibleTrigger,
CollapsibleContent,
} from "@/refresh-components/Collapsible";
import RulesetSelector from "@/app/proposal-review/components/RulesetSelector";
import ReviewProgress from "@/app/proposal-review/components/ReviewProgress";
import RunHistorySelector from "@/app/proposal-review/components/RunHistorySelector";
import FindingCard from "@/app/proposal-review/components/FindingCard";
import { useFindings } from "@/app/proposal-review/hooks/useFindings";
import { useReviewStatus } from "@/app/proposal-review/hooks/useReviewStatus";
import { useReviewRuns } from "@/app/proposal-review/hooks/useReviewRuns";
import { useProposalReviewContext } from "@/app/proposal-review/contexts/ProposalReviewContext";
import {
triggerReview,
retryFailedRules,
} from "@/app/proposal-review/services/apiServices";
import type { Finding } from "@/app/proposal-review/types";
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface ChecklistPanelProps {
proposalId: string;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function ChecklistPanel({ proposalId }: ChecklistPanelProps) {
const {
selectedRulesetId,
isReviewRunning,
setIsReviewRunning,
currentReviewRunId,
setCurrentReviewRunId,
viewingRunId,
setViewingRunId,
focusedFindingId,
setFocusedFindingId,
resetReviewState,
} = useProposalReviewContext();
const [triggerError, setTriggerError] = useState<string | null>(null);
// Reset review state when navigating to a different proposal
const prevProposalIdRef = useRef(proposalId);
useEffect(() => {
if (prevProposalIdRef.current !== proposalId) {
prevProposalIdRef.current = proposalId;
resetReviewState();
setTriggerError(null);
}
}, [proposalId, resetReviewState]);
// Always fetch latest review run; poll while running
const { reviewStatus, mutate: mutateReviewStatus } = useReviewStatus(
proposalId,
isReviewRunning
);
// Fetch all runs for the history selector
const { runs: reviewRuns, mutate: mutateReviewRuns } =
useReviewRuns(proposalId);
const isViewingLatest =
viewingRunId === null || viewingRunId === reviewRuns[0]?.id;
// Fetch findings — poll while review is running so results appear in real time.
// When viewing an older run, don't poll.
const {
findingsByCategory,
isLoading: findingsLoading,
mutate: mutateFindings,
findings,
} = useFindings(
proposalId,
isViewingLatest && isReviewRunning,
isViewingLatest ? null : viewingRunId
);
// When review completes, stop polling and load findings.
// Guards: (1) must be actively polling, (2) must have status data,
// (3) must match the run we triggered — prevents stale COMPLETED status
// from a previous run from immediately killing polling on re-run.
useEffect(() => {
if (!isReviewRunning || !reviewStatus || !currentReviewRunId) return;
if (reviewStatus.id !== currentReviewRunId) return;
if (
reviewStatus.status === "COMPLETED" ||
reviewStatus.status === "FAILED"
) {
setIsReviewRunning(false);
mutateFindings();
mutateReviewRuns();
}
}, [
reviewStatus,
isReviewRunning,
currentReviewRunId,
setIsReviewRunning,
mutateFindings,
mutateReviewRuns,
]);
const handleRunReview = useCallback(async () => {
if (!selectedRulesetId) return;
setTriggerError(null);
// Clear run ID first — blocks the completion effect and hides stale
// data via isTriggerInFlight until the trigger API returns.
setCurrentReviewRunId(null);
setIsReviewRunning(true);
setViewingRunId(null);
try {
const result = await triggerReview(proposalId, selectedRulesetId);
setCurrentReviewRunId(result.id);
// Revalidate caches — the backend APIs now return data for the new
// (latest) run, so SWR naturally picks up fresh data.
mutateReviewStatus();
mutateFindings();
mutateReviewRuns();
} catch (err) {
setIsReviewRunning(false);
setTriggerError(
err instanceof Error ? err.message : "Failed to start review"
);
}
}, [
proposalId,
selectedRulesetId,
setIsReviewRunning,
setCurrentReviewRunId,
mutateFindings,
mutateReviewStatus,
mutateReviewRuns,
]);
const handleRetryFailed = useCallback(async () => {
setTriggerError(null);
setCurrentReviewRunId(null);
setIsReviewRunning(true);
setViewingRunId(null);
try {
const result = await retryFailedRules(proposalId);
setCurrentReviewRunId(result.id);
mutateReviewStatus();
} catch (err) {
setIsReviewRunning(false);
setTriggerError(err instanceof Error ? err.message : "Failed to retry");
}
}, [
proposalId,
setIsReviewRunning,
setCurrentReviewRunId,
mutateReviewStatus,
]);
// True between clicking "Run Review" and the trigger API returning.
// During this window, currentReviewRunId is null — we hide stale data
// from the previous run so the UI looks clean.
const isTriggerInFlight = isReviewRunning && !currentReviewRunId;
const showRetryButton =
isViewingLatest &&
!isReviewRunning &&
(reviewStatus?.status === "COMPLETED" ||
reviewStatus?.status === "FAILED") &&
(reviewStatus?.failed_rules ?? 0) > 0;
const handleFocusHandled = useCallback(
() => setFocusedFindingId(null),
[setFocusedFindingId]
);
return (
<div className="flex flex-col h-full overflow-hidden">
{/* Top bar: ruleset selector + run button + progress */}
<div className="flex items-center gap-3 p-4 border-b border-border-01 shrink-0">
<div className="shrink-0 max-w-[200px]">
<RulesetSelector />
</div>
<Button
variant="default"
prominence="primary"
icon={SvgPlayCircle}
disabled={!selectedRulesetId || isReviewRunning}
onClick={handleRunReview}
>
{isReviewRunning ? "Running..." : "Run Review"}
</Button>
{reviewStatus && !isTriggerInFlight && (
<ReviewProgress reviewStatus={reviewStatus} />
)}
{showRetryButton && (
<Button
variant="default"
prominence="secondary"
icon={SvgRefreshCw}
size="sm"
onClick={handleRetryFailed}
>
Retry Failed
</Button>
)}
{isReviewRunning && !reviewStatus && (
<SimpleLoader className="h-4 w-4" />
)}
</div>
{triggerError && (
<div className="px-4 pt-2">
<Text font="secondary-body" color="text-03">
{triggerError}
</Text>
</div>
)}
{/* Run history selector */}
{reviewRuns.length > 1 && (
<RunHistorySelector
runs={reviewRuns}
selectedRunId={viewingRunId}
onSelectRun={setViewingRunId}
/>
)}
{/* Findings list */}
<div className="flex-1 overflow-y-auto">
{(isTriggerInFlight || (!isReviewRunning && findingsLoading)) && (
<div className="flex items-center justify-center py-8">
<SimpleLoader className="h-6 w-6" />
</div>
)}
{!isReviewRunning && !findingsLoading && findings.length === 0 && (
<div className="flex items-center justify-center py-12 px-4">
<IllustrationContent
illustration={SvgEmpty}
title="No review results"
description="Select a ruleset and click Run Review to evaluate this proposal."
/>
</div>
)}
{!isTriggerInFlight && findingsByCategory.length > 0 && (
<div className="flex flex-col gap-3 p-4">
{findingsByCategory.map((group) => (
<CategoryGroup
key={group.category}
category={group.category}
findings={group.findings}
focusedFindingId={focusedFindingId}
onFocusHandled={handleFocusHandled}
onDecisionSaved={() => mutateFindings()}
/>
))}
</div>
)}
</div>
</div>
);
}
// ---------------------------------------------------------------------------
// CategoryGroup: collapsible group of findings
// ---------------------------------------------------------------------------
interface CategoryGroupProps {
category: string;
findings: Finding[];
focusedFindingId: string | null;
onFocusHandled: () => void;
onDecisionSaved: () => void;
}
function CategoryGroup({
category,
findings,
focusedFindingId,
onFocusHandled,
onDecisionSaved,
}: CategoryGroupProps) {
const failCount = findings.filter(
(f) => f.verdict === "FAIL" || f.verdict === "FLAG"
).length;
const decidedCount = findings.filter(
(f) => f.decision_action !== null
).length;
// Default open if there are failures/flags
const [isOpen, setIsOpen] = useState(failCount > 0);
// Auto-open this group when a finding inside it is focused
const containsFocused =
focusedFindingId !== null &&
findings.some((f) => f.id === focusedFindingId);
useEffect(() => {
if (containsFocused) {
setIsOpen(true);
}
}, [containsFocused]);
return (
<Collapsible open={isOpen} onOpenChange={setIsOpen}>
<CollapsibleTrigger asChild>
<div
role="button"
tabIndex={0}
className="flex items-center justify-between w-full py-2 px-3 rounded-08 hover:bg-background-neutral-02 cursor-pointer"
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.preventDefault();
setIsOpen((prev) => !prev);
}
}}
>
<div className="flex items-center gap-2">
{isOpen ? (
<SvgChevronUp className="h-4 w-4 text-text-03" />
) : (
<SvgChevronDown className="h-4 w-4 text-text-03" />
)}
<Text font="main-ui-action" color="text-04">
{category}
</Text>
</div>
<div className="flex items-center gap-3">
{failCount > 0 && (
<Text font="secondary-body" color="text-03">
{`${failCount} issue${failCount !== 1 ? "s" : ""}`}
</Text>
)}
<Text font="secondary-body" color="text-03">
{`${decidedCount}/${findings.length} reviewed`}
</Text>
</div>
</div>
</CollapsibleTrigger>
<CollapsibleContent>
<div className="flex flex-col gap-2 pt-2 pl-6">
{findings.map((finding) => (
<FindingCard
key={finding.id}
finding={finding}
isFocused={finding.id === focusedFindingId}
onFocusHandled={onFocusHandled}
onDecisionSaved={onDecisionSaved}
/>
))}
</div>
</CollapsibleContent>
</Collapsible>
);
}

View File

@@ -0,0 +1,234 @@
"use client";
import { useState, useCallback } from "react";
import { Button, Text, Card } from "@opal/components";
import {
SvgCheckCircle,
SvgAlertTriangle,
SvgXCircle,
SvgRefreshCw,
} from "@opal/icons";
import { cn } from "@/lib/utils";
import { Section } from "@/layouts/general-layouts";
import InputTextArea from "@/refresh-components/inputs/InputTextArea";
import "@/app/proposal-review/components/decision-toggle.css";
import { toast } from "@/hooks/useToast";
import {
submitProposalDecision,
syncToJira,
} from "@/app/proposal-review/services/apiServices";
import type {
ProposalDecisionOutcome,
ProposalStatus,
Finding,
} from "@/app/proposal-review/types";
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
/** Map proposal status back to a decision outcome, or null if no decision yet. */
function statusToDecision(
status: ProposalStatus
): ProposalDecisionOutcome | null {
if (status === "APPROVED") return "APPROVED";
if (status === "CHANGES_REQUESTED") return "CHANGES_REQUESTED";
if (status === "REJECTED") return "REJECTED";
return null;
}
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface DecisionPanelProps {
proposalId: string;
findings: Finding[];
proposalStatus: ProposalStatus;
existingDecisionNotes?: string;
onDecisionSubmitted: () => void;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function DecisionPanel({
proposalId,
findings,
proposalStatus,
existingDecisionNotes,
onDecisionSubmitted,
}: DecisionPanelProps) {
const existingDecision = statusToDecision(proposalStatus);
const [selectedDecision, setSelectedDecision] =
useState<ProposalDecisionOutcome | null>(existingDecision);
const [notes, setNotes] = useState(existingDecisionNotes ?? "");
const [isSubmitting, setIsSubmitting] = useState(false);
const [isSyncing, setIsSyncing] = useState(false);
const [submitError, setSubmitError] = useState<string | null>(null);
const [decisionSaved, setDecisionSaved] = useState(existingDecision !== null);
// Check for unresolved hard stops
const unresolvedHardStops = findings.filter(
(f) =>
f.rule_is_hard_stop &&
(f.verdict === "FAIL" || f.verdict === "FLAG") &&
(!f.decision_action || f.decision_action === "ISSUE")
);
const hasUnresolvedHardStops = unresolvedHardStops.length > 0;
const handleSubmit = useCallback(async () => {
if (!selectedDecision) return;
setIsSubmitting(true);
setSubmitError(null);
try {
await submitProposalDecision(
proposalId,
selectedDecision,
notes || undefined
);
setDecisionSaved(true);
onDecisionSubmitted();
} catch (err) {
setSubmitError(
err instanceof Error ? err.message : "Failed to submit decision"
);
} finally {
setIsSubmitting(false);
}
}, [proposalId, selectedDecision, notes, onDecisionSubmitted]);
const handleSync = useCallback(async () => {
setIsSyncing(true);
try {
await syncToJira(proposalId);
} catch {
toast.error("Failed to sync to Jira");
} finally {
setIsSyncing(false);
}
}, [proposalId]);
return (
<Card padding="md" border="solid" background="light">
<Section
gap={0.75}
height="auto"
justifyContent="start"
alignItems="start"
>
<Text font="main-ui-action" color="text-04">
Final Decision
</Text>
{/* Decision buttons */}
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<div
className={cn(
selectedDecision === "APPROVED" && "decision-toggle-green"
)}
>
<Button
variant="default"
prominence="secondary"
icon={SvgCheckCircle}
disabled={hasUnresolvedHardStops || isSubmitting}
onClick={() => setSelectedDecision("APPROVED")}
>
Approve
</Button>
</div>
{hasUnresolvedHardStops && (
<Text font="secondary-body" color="text-03">
{`Cannot approve: ${
unresolvedHardStops.length
} unresolved hard stop${
unresolvedHardStops.length !== 1 ? "s" : ""
}`}
</Text>
)}
<div
className={cn(
selectedDecision === "CHANGES_REQUESTED" &&
"decision-toggle-yellow"
)}
>
<Button
variant="default"
prominence="secondary"
icon={SvgAlertTriangle}
disabled={isSubmitting}
onClick={() => setSelectedDecision("CHANGES_REQUESTED")}
>
Request Changes
</Button>
</div>
<Button
variant={selectedDecision === "REJECTED" ? "danger" : "default"}
prominence={
selectedDecision === "REJECTED" ? "primary" : "secondary"
}
icon={SvgXCircle}
disabled={isSubmitting}
onClick={() => setSelectedDecision("REJECTED")}
>
Reject
</Button>
</Section>
{/* Notes */}
<InputTextArea
placeholder="Decision notes (optional)"
value={notes}
onChange={(e) => setNotes(e.target.value)}
rows={3}
/>
{/* Submit + Sync */}
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Button
variant="default"
prominence="primary"
disabled={!selectedDecision || isSubmitting}
onClick={handleSubmit}
>
{isSubmitting ? "Submitting..." : "Submit Decision"}
</Button>
<Button
variant="default"
prominence="secondary"
icon={SvgRefreshCw}
disabled={!decisionSaved || isSyncing}
onClick={handleSync}
>
{isSyncing ? "Syncing..." : "Sync to Jira"}
</Button>
</Section>
{submitError && (
<Text font="secondary-body" color="text-03">
{submitError}
</Text>
)}
</Section>
</Card>
);
}

View File

@@ -0,0 +1,99 @@
"use client";
import { useState, useRef } from "react";
import { Button, Text } from "@opal/components";
import { SvgUploadCloud } from "@opal/icons";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { uploadDocument } from "@/app/proposal-review/services/apiServices";
import type { DocumentRole } from "@/app/proposal-review/types";
interface DocumentUploadProps {
proposalId: string;
onUploadComplete: () => void;
}
const DOCUMENT_ROLES: { value: DocumentRole; label: string }[] = [
{ value: "PROPOSAL", label: "Proposal" },
{ value: "BUDGET", label: "Budget" },
{ value: "FOA", label: "FOA" },
{ value: "INTERNAL", label: "Internal" },
{ value: "SOW", label: "Scope of Work" },
{ value: "OTHER", label: "Other" },
];
export default function DocumentUpload({
proposalId,
onUploadComplete,
}: DocumentUploadProps) {
const fileInputRef = useRef<HTMLInputElement>(null);
const [selectedRole, setSelectedRole] = useState<DocumentRole>("OTHER");
const [isUploading, setIsUploading] = useState(false);
const [uploadError, setUploadError] = useState<string | null>(null);
async function handleFileSelect(e: React.ChangeEvent<HTMLInputElement>) {
const file = e.target.files?.[0];
if (!file) return;
setIsUploading(true);
setUploadError(null);
try {
await uploadDocument(proposalId, file, selectedRole);
onUploadComplete();
} catch (err) {
setUploadError(err instanceof Error ? err.message : "Upload failed");
} finally {
setIsUploading(false);
// Reset the file input so the same file can be re-selected
if (fileInputRef.current) {
fileInputRef.current.value = "";
}
}
}
return (
<div className="flex flex-col gap-2">
<div className="flex items-center gap-2">
<div className="flex-1">
<InputSelect
value={selectedRole}
onValueChange={(v) => setSelectedRole(v as DocumentRole)}
>
<InputSelect.Trigger placeholder="Document role" />
<InputSelect.Content>
{DOCUMENT_ROLES.map((role) => (
<InputSelect.Item key={role.value} value={role.value}>
{role.label}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
</div>
<Button
variant="default"
prominence="secondary"
icon={SvgUploadCloud}
disabled={isUploading}
onClick={() => fileInputRef.current?.click()}
>
{isUploading ? "Uploading..." : "Upload"}
</Button>
</div>
<input
ref={fileInputRef}
type="file"
className="hidden"
accept=".pdf,.docx,.xlsx,.html,.txt"
onChange={handleFileSelect}
/>
{uploadError && (
<Text font="secondary-body" color="text-03">
{uploadError}
</Text>
)}
</div>
);
}

View File

@@ -0,0 +1,314 @@
"use client";
import { useState, useCallback, useEffect, useRef } from "react";
import { Button, Tag, Text, Card } from "@opal/components";
import {
SvgCheckCircle,
SvgAlertTriangle,
SvgAlertCircle,
SvgShield,
} from "@opal/icons";
import ReactMarkdown from "react-markdown";
import remarkGfm from "remark-gfm";
import { cn } from "@/lib/utils";
import "@/app/proposal-review/components/decision-toggle.css";
/** Tailwind prose classes with design-system color tokens so dark mode works
* without the `dark:` modifier — the CSS variables auto-switch via colors.css. */
const PROSE_CLASSES = cn(
"prose prose-sm max-w-full",
"[--tw-prose-body:var(--text-03)]",
"[--tw-prose-bold:var(--text-04)]",
"[--tw-prose-headings:var(--text-04)]",
"[--tw-prose-links:var(--action-link-05)]",
"[--tw-prose-counters:var(--text-03)]",
"[--tw-prose-bullets:var(--text-03)]"
);
import { Section } from "@/layouts/general-layouts";
import InputTextArea from "@/refresh-components/inputs/InputTextArea";
import { toast } from "@/hooks/useToast";
import { submitFindingDecision } from "@/app/proposal-review/services/apiServices";
import {
VERDICT_CONFIG,
type Finding,
type DecisionAction,
} from "@/app/proposal-review/types";
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface FindingCardProps {
finding: Finding;
isFocused?: boolean;
onFocusHandled?: () => void;
onDecisionSaved: () => void;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function FindingCard({
finding,
isFocused,
onFocusHandled,
onDecisionSaved,
}: FindingCardProps) {
const cardRef = useRef<HTMLDivElement>(null);
const {
rule_name,
rule_is_hard_stop,
verdict,
explanation,
evidence,
suggested_action,
decision_action,
decision_notes,
} = finding;
const isActionable = verdict === "FAIL" || verdict === "FLAG";
const isNeedsReview = verdict === "NEEDS_REVIEW";
const isPass = verdict === "PASS" || verdict === "NOT_APPLICABLE";
// Default expansion: FAIL/FLAG/NEEDS_REVIEW expanded, PASS collapsed
const [isExpanded, setIsExpanded] = useState(!isPass);
const [notes, setNotes] = useState(decision_notes ?? "");
const [currentAction, setCurrentAction] = useState<DecisionAction | null>(
decision_action ?? null
);
const [isSaving, setIsSaving] = useState(false);
// Scroll into view and expand when focused from sidebar.
// Delay accounts for the Radix collapsible open animation (~200ms).
// onFocusHandled is called INSIDE the timeout so that clearing focusedFindingId
// doesn't trigger effect cleanup (clearTimeout) before the scroll fires.
useEffect(() => {
if (isFocused && cardRef.current) {
setIsExpanded(true);
const timer = setTimeout(() => {
cardRef.current?.scrollIntoView({
behavior: "smooth",
block: "center",
});
onFocusHandled?.();
}, 250);
return () => clearTimeout(timer);
}
}, [isFocused, onFocusHandled]);
const verdictConfig = VERDICT_CONFIG[verdict];
const handleDecision = useCallback(
async (action: DecisionAction) => {
setIsSaving(true);
try {
await submitFindingDecision(finding.id, action, notes || undefined);
setCurrentAction(action);
onDecisionSaved();
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to save finding decision"
);
} finally {
setIsSaving(false);
}
},
[finding.id, notes, onDecisionSaved]
);
const handleNotesBlur = useCallback(async () => {
if (currentAction && notes !== (decision_notes ?? "")) {
setIsSaving(true);
try {
await submitFindingDecision(
finding.id,
currentAction,
notes || undefined
);
onDecisionSaved();
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to save notes"
);
} finally {
setIsSaving(false);
}
}
}, [currentAction, notes, decision_notes, finding.id, onDecisionSaved]);
return (
<div ref={cardRef}>
<Card
padding="md"
border="solid"
background={rule_is_hard_stop && isActionable ? "heavy" : "light"}
>
<Section
gap={0.75}
height="auto"
justifyContent="start"
alignItems="start"
className={cn(
rule_is_hard_stop &&
isActionable &&
"border-l-2 border-status-error-03 pl-3"
)}
>
{/* Header row: verdict tag + rule name */}
<div
role="button"
tabIndex={0}
aria-expanded={isExpanded}
aria-label={`${rule_name ?? "Unnamed Rule"} - ${
verdictConfig.label
}`}
className="flex items-center gap-2 text-left w-full cursor-pointer"
onClick={() => setIsExpanded((prev) => !prev)}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.preventDefault();
setIsExpanded((prev) => !prev);
}
}}
>
<Tag title={verdictConfig.label} color={verdictConfig.color} />
<Text font="main-ui-action" color="text-04" as="span">
{rule_name ?? "Unnamed Rule"}
</Text>
{rule_is_hard_stop && isActionable && (
<div className="flex items-center gap-1 pl-2">
<SvgShield className="h-4 w-4 text-status-error-03" />
<Text font="secondary-body" color="text-03">
Hard Stop
</Text>
</div>
)}
</div>
{/* Expanded content */}
{isExpanded && (
<Section
gap={0.75}
height="auto"
justifyContent="start"
alignItems="start"
className="pl-2"
>
{/* Explanation */}
{explanation && (
<div className={PROSE_CLASSES}>
<ReactMarkdown remarkPlugins={[remarkGfm]}>
{explanation}
</ReactMarkdown>
</div>
)}
{/* Evidence */}
{evidence && (
<Card padding="sm" rounding="sm" background="heavy">
<Text font="secondary-body" color="text-03" as="p">
Evidence:
</Text>
<div className={PROSE_CLASSES}>
<ReactMarkdown remarkPlugins={[remarkGfm]}>
{evidence}
</ReactMarkdown>
</div>
</Card>
)}
{/* Suggested action */}
{suggested_action && (
<div className="flex items-start gap-2">
<SvgAlertCircle className="h-4 w-4 text-status-warning-03 shrink-0 mt-0.5" />
<div className={PROSE_CLASSES}>
<ReactMarkdown remarkPlugins={[remarkGfm]}>
{suggested_action}
</ReactMarkdown>
</div>
</div>
)}
{/* Action buttons + notes */}
{(isActionable || isNeedsReview) && (
<div className="pt-4 border-t border-border-01 w-full">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Section
flexDirection="row"
gap={0.5}
height="auto"
justifyContent="start"
alignItems="center"
>
<div
className={cn(
currentAction === "VERIFIED" &&
"decision-toggle-green"
)}
>
<Button
variant="default"
prominence="secondary"
size="sm"
icon={SvgCheckCircle}
disabled={isSaving}
onClick={() => handleDecision("VERIFIED")}
>
Verify
</Button>
</div>
<Button
variant={
currentAction === "ISSUE" ? "danger" : "default"
}
prominence={
currentAction === "ISSUE" ? "primary" : "secondary"
}
size="sm"
icon={SvgAlertTriangle}
disabled={isSaving}
onClick={() => handleDecision("ISSUE")}
>
Issue
</Button>
<div
className={cn(
currentAction === "NOT_APPLICABLE" &&
"decision-toggle-gray"
)}
>
<Button
variant="default"
prominence="secondary"
size="sm"
disabled={isSaving}
onClick={() => handleDecision("NOT_APPLICABLE")}
>
N/A
</Button>
</div>
</Section>
<InputTextArea
placeholder="Notes (optional)"
value={notes}
onChange={(e) => setNotes(e.target.value)}
onBlur={handleNotesBlur}
rows={2}
/>
</Section>
</div>
)}
</Section>
)}
</Section>
</Card>
</div>
);
}

View File

@@ -0,0 +1,219 @@
"use client";
import { useState } from "react";
import useSWR from "swr";
import { Text, Card, Tag } from "@opal/components";
import { Button } from "@opal/components/buttons/button/components";
import { SvgExternalLink, SvgFileText, SvgX } from "@opal/icons";
import { errorHandlingFetcher } from "@/lib/fetcher";
import { IllustrationContent } from "@opal/layouts";
import SvgEmpty from "@opal/illustrations/empty";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import DocumentUpload from "@/app/proposal-review/components/DocumentUpload";
import type {
Proposal,
ProposalDocument,
ProposalStatus,
} from "@/app/proposal-review/types";
import type { TagColor } from "@opal/components";
// ---------------------------------------------------------------------------
// Status → Tag
// ---------------------------------------------------------------------------
const STATUS_TAG: Record<ProposalStatus, { color: TagColor; label: string }> = {
PENDING: { color: "gray", label: "Pending" },
IN_REVIEW: { color: "blue", label: "In Review" },
APPROVED: { color: "green", label: "Approved" },
CHANGES_REQUESTED: { color: "amber", label: "Changes Requested" },
REJECTED: { color: "amber", label: "Rejected" },
};
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
interface MetadataRowProps {
label: string;
value: string | string[] | undefined;
}
function MetadataRow({ label, value }: MetadataRowProps) {
if (!value) return null;
const display = Array.isArray(value) ? value.join(", ") : value;
return (
<div className="flex justify-between gap-2 py-1">
<Text font="secondary-body" color="text-03" nowrap>
{label}
</Text>
<div className="text-end">
<Text font="main-ui-body" color="text-04" as="span">
{display}
</Text>
</div>
</div>
);
}
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface ProposalInfoPanelProps {
proposal: Proposal;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function ProposalInfoPanel({
proposal,
}: ProposalInfoPanelProps) {
const { metadata, status, id: proposalId } = proposal;
const statusConfig = STATUS_TAG[status];
const [selectedDoc, setSelectedDoc] = useState<ProposalDocument | null>(null);
// Fetch documents
const {
data: documents,
isLoading: docsLoading,
mutate: mutateDocs,
} = useSWR<ProposalDocument[]>(
`/api/proposal-review/proposals/${proposalId}/documents`,
errorHandlingFetcher
);
return (
<div className="flex flex-col gap-4 h-full overflow-y-auto p-4">
{/* Proposal metadata card */}
<Card padding="md" border="solid" background="light">
<div className="flex flex-col gap-3">
<div className="flex items-center justify-between">
<Text font="main-ui-action" color="text-04">
Proposal Details
</Text>
<Tag title={statusConfig.label} color={statusConfig.color} />
</div>
<div className="flex flex-col">
{/* Jira key — link out if URL available */}
{metadata.jira_key && (
<div className="flex justify-between gap-2 py-1">
<Text font="secondary-body" color="text-03" nowrap>
Jira Key
</Text>
{metadata.link ? (
<Button
href={String(metadata.link)}
prominence="tertiary"
size="sm"
rightIcon={SvgExternalLink}
>
{String(metadata.jira_key)}
</Button>
) : (
<Text font="main-ui-body" color="text-04" as="span">
{metadata.jira_key}
</Text>
)}
</div>
)}
{Object.entries(metadata)
.filter(
([key]) =>
key !== "title" && key !== "link" && key !== "jira_key"
)
.map(([key, value]) => (
<MetadataRow key={key} label={key} value={value} />
))}
</div>
</div>
</Card>
{/* Documents section */}
<Card padding="md" border="solid" background="light">
<div className="flex flex-col gap-3">
<Text font="main-ui-action" color="text-04">
Documents
</Text>
{docsLoading && (
<div className="flex items-center justify-center py-4">
<SimpleLoader />
</div>
)}
{!docsLoading && (!documents || documents.length === 0) && (
<IllustrationContent
illustration={SvgEmpty}
title="No documents"
description="Upload a document to get started."
/>
)}
{documents && documents.length > 0 && (
<div className="flex flex-col gap-1">
{documents.map((doc) => (
<div
key={doc.id}
className="flex items-center gap-2 py-2 px-2 rounded-08 hover:bg-background-neutral-02 cursor-pointer"
onClick={() =>
setSelectedDoc(selectedDoc?.id === doc.id ? null : doc)
}
>
<SvgFileText className="h-4 w-4 text-text-03 shrink-0" />
<div className="flex-1 min-w-0 truncate">
<Text font="main-ui-body" color="text-04">
{doc.file_name}
</Text>
</div>
<div className="shrink-0">
<Tag title={doc.document_role} color="gray" size="sm" />
</div>
</div>
))}
</div>
)}
{/* Document text viewer */}
{selectedDoc && (
<Card padding="md" border="dashed" background="light">
<div className="flex flex-col gap-2">
<div className="flex items-center justify-between">
<Text font="secondary-action" color="text-03">
{selectedDoc.file_name}
</Text>
<Button
variant="default"
prominence="tertiary"
size="xs"
icon={SvgX}
onClick={() => setSelectedDoc(null)}
/>
</div>
<div className="max-h-[300px] overflow-y-auto rounded-08 bg-background-neutral-01 p-3">
{selectedDoc.extracted_text ? (
<Text font="secondary-mono" color="text-03" as="p">
{selectedDoc.extracted_text}
</Text>
) : (
<Text font="secondary-body" color="text-03" as="p">
No extracted text available for this document.
</Text>
)}
</div>
</div>
</Card>
)}
<DocumentUpload
proposalId={proposalId}
onUploadComplete={() => mutateDocs()}
/>
</div>
</Card>
</div>
);
}

View File

@@ -0,0 +1,382 @@
"use client";
import { useState, useMemo, useCallback } from "react";
import { useRouter } from "next/navigation";
import { Text, Tag, Table } from "@opal/components";
import { createTableColumns } from "@opal/components/table/columns";
import { IllustrationContent } from "@opal/layouts";
import SvgNoResult from "@opal/illustrations/no-result";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import InputSearch from "@/refresh-components/inputs/InputSearch";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { Button } from "@opal/components/buttons/button/components";
import { useProposals } from "@/app/proposal-review/hooks/useProposals";
import type { Proposal, ProposalStatus } from "@/app/proposal-review/types";
import type { TagColor } from "@opal/components";
// ---------------------------------------------------------------------------
// Status configuration
// ---------------------------------------------------------------------------
const STATUS_TAG: Record<ProposalStatus, { color: TagColor; label: string }> = {
PENDING: { color: "gray", label: "Pending" },
IN_REVIEW: { color: "blue", label: "In Review" },
APPROVED: { color: "green", label: "Approved" },
CHANGES_REQUESTED: { color: "amber", label: "Changes Requested" },
REJECTED: { color: "amber", label: "Rejected" },
};
const STATUS_OPTIONS: { value: string; label: string }[] = [
{ value: "ALL", label: "All statuses" },
{ value: "PENDING", label: "Pending" },
{ value: "IN_REVIEW", label: "In Review" },
{ value: "APPROVED", label: "Approved" },
{ value: "CHANGES_REQUESTED", label: "Changes Requested" },
{ value: "REJECTED", label: "Rejected" },
];
// Keys that are used for fixed columns or are internal — not shown as dynamic columns
const RESERVED_KEYS = new Set([
"jira_key",
"title",
"link",
"key",
"status",
"project",
"project_name",
"issuetype",
"priority",
"created",
"updated",
"reporter",
"reporter_email",
"Rank",
"resolution",
"resolution_date",
"[CHART] Time in Status",
]);
// Jira statuses that mean "finished" — excluded by the default "Open" filter
const DONE_STATUSES = new Set(["Done", "Closed", "Resolved"]);
// Keys to show by default when no prior column visibility state exists
const DEFAULT_VISIBLE_KEYS = new Set([
"PI Name",
"Sponsor",
"Sponsor Deadline",
"Review Team",
]);
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function isDateLike(value: string): boolean {
return /^\d{4}-\d{2}-\d{2}/.test(value);
}
function formatCellValue(value: string | string[] | undefined): string {
if (value === undefined || value === null) return "--";
if (Array.isArray(value)) return value.join(", ");
return String(value);
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
const tc = createTableColumns<Proposal>();
export default function ProposalQueue() {
const router = useRouter();
const { proposals, isLoading, error, configMissing } = useProposals();
const [reviewFilter, setReviewFilter] = useState("ALL");
const [jiraStatusFilter, setJiraStatusFilter] = useState("OPEN");
const [searchQuery, setSearchQuery] = useState("");
// Discover unique Jira ticket statuses from the data
const jiraStatuses = useMemo(() => {
const statuses = new Set<string>();
for (const p of proposals) {
const s = p.metadata.status;
if (typeof s === "string" && s) statuses.add(s);
}
return Array.from(statuses).sort();
}, [proposals]);
// Discover all unique metadata keys across proposals (excluding reserved ones)
const dynamicKeys = useMemo(() => {
const keys = new Set<string>();
for (const p of proposals) {
for (const k of Object.keys(p.metadata)) {
if (!RESERVED_KEYS.has(k)) {
keys.add(k);
}
}
}
return Array.from(keys).sort();
}, [proposals]);
// Build columns: fixed (Jira Key, Title) + dynamic + fixed (Status) + actions
const columns = useMemo(() => {
const cols = [
tc.displayColumn({
id: "jira_key",
header: "Jira Key",
width: { weight: 10, minWidth: 100 },
cell: (row) => (
<Text font="main-ui-body" color="text-04" nowrap>
{row.metadata.jira_key ?? "--"}
</Text>
),
}),
tc.displayColumn({
id: "title",
header: "Title",
width: { weight: 25, minWidth: 150 },
cell: (row) => (
<Text font="main-ui-body" color="text-04">
{row.metadata.title ?? "Untitled"}
</Text>
),
}),
// Dynamic metadata columns
...dynamicKeys.map((key) =>
tc.displayColumn({
id: `meta_${key}`,
header: key,
width: { weight: 12, minWidth: 100 },
cell: (row) => {
const value = row.metadata[key];
// Render dates with locale formatting
if (typeof value === "string" && isDateLike(value)) {
return (
<Text font="main-ui-body" color="text-03" nowrap>
{new Date(value).toLocaleDateString()}
</Text>
);
}
return (
<Text font="main-ui-body" color="text-03" nowrap>
{formatCellValue(value)}
</Text>
);
},
})
),
tc.displayColumn({
id: "review_status",
header: "Review",
width: { weight: 10, minWidth: 120 },
cell: (row) => {
const statusConfig = STATUS_TAG[row.status];
return (
<Tag
title={statusConfig.label}
color={statusConfig.color}
size="sm"
/>
);
},
}),
tc.actions({ showColumnVisibility: true }),
];
return cols;
}, [dynamicKeys]);
// Load saved visibility from localStorage, falling back to defaults
const STORAGE_KEY = "proposal-review-queue-columns";
const initialColumnVisibility = useMemo(() => {
try {
const saved = localStorage.getItem(STORAGE_KEY);
if (saved) return JSON.parse(saved) as Record<string, boolean>;
} catch {
// ignore parse errors
}
// Default: show DEFAULT_VISIBLE_KEYS, hide the rest
const vis: Record<string, boolean> = {};
for (const key of dynamicKeys) {
vis[`meta_${key}`] = DEFAULT_VISIBLE_KEYS.has(key);
}
return vis;
}, [dynamicKeys]);
const handleColumnVisibilityChange = useCallback(
(visibility: Record<string, boolean>) => {
try {
localStorage.setItem(STORAGE_KEY, JSON.stringify(visibility));
} catch {
// localStorage full or unavailable — silently ignore
}
},
[]
);
// Filter proposals
const filteredProposals = useMemo(() => {
let result = proposals;
// Jira ticket status filter
if (jiraStatusFilter === "OPEN") {
result = result.filter((p) => {
const s =
typeof p.metadata.status === "string" ? p.metadata.status : "";
return !DONE_STATUSES.has(s);
});
} else if (jiraStatusFilter !== "ALL") {
result = result.filter((p) => p.metadata.status === jiraStatusFilter);
}
// Review status filter
if (reviewFilter !== "ALL") {
result = result.filter((p) => p.status === reviewFilter);
}
// Search filter
if (searchQuery.trim()) {
const q = searchQuery.toLowerCase();
result = result.filter((p) => {
const m = p.metadata;
return Object.values(m).some((v) => {
if (!v) return false;
const str = Array.isArray(v) ? v.join(" ") : String(v);
return str.toLowerCase().includes(q);
});
});
}
return result;
}, [proposals, jiraStatusFilter, reviewFilter, searchQuery]);
function handleRowClick(proposal: Proposal) {
router.push(`/proposal-review/proposals/${proposal.id}`);
}
if (isLoading) {
return (
<div className="flex items-center justify-center py-16">
<SimpleLoader className="h-8 w-8" />
</div>
);
}
if (error) {
return (
<div className="flex items-center justify-center py-16 px-4">
<IllustrationContent
illustration={SvgNoResult}
title="Failed to load proposals"
description="Please try refreshing the page."
/>
</div>
);
}
return (
<div className="flex flex-col gap-4">
{/* Filters row */}
<div className="flex items-center gap-4 flex-nowrap">
<div className="w-[280px] shrink-0">
<InputSearch
placeholder="Search proposals..."
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
/>
</div>
<div className="flex items-center gap-2 shrink-0">
<Text font="secondary-action" color="text-03">
Ticket Status:
</Text>
<InputSelect
value={jiraStatusFilter}
onValueChange={setJiraStatusFilter}
>
<InputSelect.Trigger placeholder="Ticket Status" />
<InputSelect.Content>
<InputSelect.Group>
<InputSelect.Item value="ALL">All</InputSelect.Item>
<InputSelect.Item value="OPEN">Open</InputSelect.Item>
</InputSelect.Group>
<InputSelect.Separator />
<InputSelect.Group>
<InputSelect.Label>Jira Statuses</InputSelect.Label>
{jiraStatuses.map((s) => (
<InputSelect.Item key={s} value={s}>
{s}
</InputSelect.Item>
))}
</InputSelect.Group>
</InputSelect.Content>
</InputSelect>
</div>
<div className="flex items-center gap-2 shrink-0">
<Text font="secondary-action" color="text-03">
Review Status:
</Text>
<InputSelect value={reviewFilter} onValueChange={setReviewFilter}>
<InputSelect.Trigger placeholder="Review Status" />
<InputSelect.Content>
{STATUS_OPTIONS.map((opt) => (
<InputSelect.Item key={opt.value} value={opt.value}>
{opt.label}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
</div>
</div>
{/* Empty state — config missing */}
{filteredProposals.length === 0 && configMissing && (
<div className="flex flex-col items-center justify-center gap-4 py-12">
<IllustrationContent
illustration={SvgNoResult}
title="No proposals yet"
description="Configure a Jira connector in Settings to start seeing proposals."
/>
<Button
href="/admin/proposal-review/settings"
variant="default"
prominence="primary"
>
Go to Settings
</Button>
</div>
)}
{/* Empty state — filtered or no data */}
{filteredProposals.length === 0 && !configMissing && (
<div className="flex items-center justify-center py-12">
<IllustrationContent
illustration={SvgNoResult}
title="No proposals found"
description={
searchQuery ||
reviewFilter !== "ALL" ||
jiraStatusFilter !== "OPEN"
? "Try adjusting your search or filters."
: "Proposals from Jira will appear here once synced."
}
/>
</div>
)}
{/* Table — wrapper adds pointer cursor since onRowClick doesn't set it */}
{filteredProposals.length > 0 && (
<div className="[&_.tbl-row]:cursor-pointer [&_.tbl-row:hover_td]:bg-background-tint-02">
<Table
key={dynamicKeys.join(",")}
data={filteredProposals}
getRowId={(row) => row.id}
columns={columns}
initialColumnVisibility={initialColumnVisibility}
onColumnVisibilityChange={handleColumnVisibilityChange}
onRowClick={(row) => handleRowClick(row)}
/>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,110 @@
"use client";
import { useCallback } from "react";
import { Text, Button } from "@opal/components";
import { SvgArrowLeft } from "@opal/icons";
import { IllustrationContent } from "@opal/layouts";
import SvgNotFound from "@opal/illustrations/not-found";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import { useProposal } from "@/app/proposal-review/hooks/useProposal";
import { useProposalReviewContext } from "@/app/proposal-review/contexts/ProposalReviewContext";
import ProposalInfoPanel from "@/app/proposal-review/components/ProposalInfoPanel";
import ChecklistPanel from "@/app/proposal-review/components/ChecklistPanel";
import ReviewSidebar from "@/app/proposal-review/components/ReviewSidebar";
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface ProposalReviewProps {
proposalId: string;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function ProposalReview({ proposalId }: ProposalReviewProps) {
const { proposal, isLoading, error, mutate } = useProposal(proposalId);
useProposalReviewContext(); // ensure we're inside the provider
const handleDecisionSubmitted = useCallback(() => {
mutate();
}, [mutate]);
// --- Loading ---
if (isLoading) {
return (
<div className="flex items-center justify-center h-full w-full">
<SimpleLoader className="h-8 w-8" />
</div>
);
}
// --- Error / not found ---
if (error || !proposal) {
return (
<div className="flex flex-col items-center justify-center h-full w-full gap-4 p-8">
<IllustrationContent
illustration={SvgNotFound}
title="Proposal not found"
description="This proposal may have been removed or you may not have access."
/>
<Button
variant="default"
prominence="secondary"
href="/proposal-review"
>
Back to queue
</Button>
</div>
);
}
return (
<div className="flex flex-col h-full w-full">
{/* Top header */}
<div className="flex items-center gap-3 px-4 py-3 border-b border-border-01 shrink-0">
<Button
variant="default"
prominence="tertiary"
icon={SvgArrowLeft}
size="sm"
href="/proposal-review"
/>
<Text font="main-ui-action" color="text-04">
{proposal.metadata.title ?? "Untitled Proposal"}
</Text>
{proposal.metadata.jira_key && (
<Text font="secondary-body" color="text-03">
{proposal.metadata.jira_key}
</Text>
)}
</div>
{/* Three-panel layout */}
<div className="flex flex-1 min-h-0">
{/* Left panel: Proposal info */}
<div className="w-[400px] shrink-0 border-r border-border-01 overflow-y-auto">
<ProposalInfoPanel proposal={proposal} />
</div>
{/* Center panel: Checklist */}
<div className="flex-1 min-w-0 overflow-hidden">
<ChecklistPanel proposalId={proposalId} />
</div>
{/* Right panel: Review sidebar */}
<div className="w-[320px] shrink-0 border-l border-border-01 overflow-y-auto">
<ReviewSidebar
proposalId={proposalId}
proposalStatus={proposal.status}
existingDecisionNotes={proposal.decision_notes ?? undefined}
onDecisionSubmitted={handleDecisionSubmitted}
/>
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,67 @@
"use client";
import { memo } from "react";
import { usePathname } from "next/navigation";
import { SvgArrowLeft, SvgCheckSquare, SvgSettings } from "@opal/icons";
import { SidebarTab } from "@opal/components";
import * as SidebarLayouts from "@/layouts/sidebar-layouts";
import { useSidebarState, useSidebarFolded } from "@/layouts/sidebar-layouts";
import AccountPopover from "@/sections/sidebar/AccountPopover";
// ============================================================================
// Sidebar Content
// ============================================================================
const MemoizedSidebarContent = memo(function ProposalReviewSidebarContent() {
const pathname = usePathname();
const folded = useSidebarFolded();
const isProposalsActive =
pathname === "/proposal-review" ||
pathname.startsWith("/proposal-review/proposals");
return (
<>
<SidebarLayouts.Body scrollKey="proposal-review-sidebar">
<div className="flex flex-col gap-0.5">
<SidebarTab
icon={SvgCheckSquare}
folded={folded}
href="/proposal-review"
selected={isProposalsActive}
>
Proposals
</SidebarTab>
</div>
</SidebarLayouts.Body>
<SidebarLayouts.Footer>
<SidebarTab
icon={SvgSettings}
folded={folded}
href="/admin/proposal-review"
selected={pathname.startsWith("/admin/proposal-review")}
>
Settings
</SidebarTab>
<SidebarTab icon={SvgArrowLeft} folded={folded} href="/app">
Back to Onyx
</SidebarTab>
<AccountPopover folded={folded} />
</SidebarLayouts.Footer>
</>
);
});
// ============================================================================
// Sidebar (Main Export)
// ============================================================================
export default function ProposalReviewSidebar() {
const { folded, setFolded } = useSidebarState();
return (
<SidebarLayouts.Root folded={folded} onFoldChange={setFolded} foldable>
<MemoizedSidebarContent />
</SidebarLayouts.Root>
);
}

View File

@@ -0,0 +1,85 @@
"use client";
import { Text } from "@opal/components";
import { SvgCheckCircle, SvgAlertCircle } from "@opal/icons";
import { cn } from "@/lib/utils";
import type { ReviewRun } from "@/app/proposal-review/types";
interface ReviewProgressProps {
reviewStatus: ReviewRun;
}
export default function ReviewProgress({ reviewStatus }: ReviewProgressProps) {
const { total_rules, completed_rules, failed_rules, status } = reviewStatus;
const pct =
total_rules > 0 ? Math.round((completed_rules / total_rules) * 100) : 0;
const isRunning = status === "RUNNING" || status === "PENDING";
const isCompleted = status === "COMPLETED";
const isFailed = status === "FAILED";
const hasErrors = failed_rules > 0;
return (
<div className="flex items-center gap-2 flex-1 min-w-0">
<div
role="progressbar"
aria-valuenow={pct}
aria-valuemin={0}
aria-valuemax={100}
aria-label={`Review progress: ${completed_rules} of ${total_rules} rules`}
className={cn(
"h-2 flex-1 min-w-[80px] rounded-08 overflow-hidden",
isCompleted && !hasErrors
? "bg-theme-green-01"
: "bg-background-neutral-03"
)}
>
<div
className={cn(
"h-full rounded-08 transition-all duration-300",
isFailed
? "bg-status-error-03"
: isCompleted && hasErrors
? "bg-status-warning-03"
: isCompleted
? "bg-theme-green-01"
: "bg-theme-primary-03"
)}
style={{ width: `${pct}%` }}
/>
</div>
{isRunning && (
<Text font="secondary-body" color="text-03" nowrap>
{failed_rules > 0
? `${completed_rules}/${total_rules} (${failed_rules} failed)`
: `${completed_rules}/${total_rules}`}
</Text>
)}
{isCompleted && !hasErrors && (
<div className="flex items-center gap-1">
<SvgCheckCircle className="h-3.5 w-3.5 text-status-success-03" />
<Text font="secondary-body" color="text-03" nowrap>
{`${total_rules}/${total_rules}`}
</Text>
</div>
)}
{isCompleted && hasErrors && (
<div className="flex items-center gap-1">
<SvgAlertCircle className="h-3.5 w-3.5 text-status-warning-03" />
<Text font="secondary-body" color="text-03" nowrap>
{`${
total_rules - failed_rules
}/${total_rules} (${failed_rules} failed)`}
</Text>
</div>
)}
{isFailed && (
<div className="flex items-center gap-1">
<SvgAlertCircle className="h-3.5 w-3.5 text-status-error-03" />
<Text font="secondary-body" color="text-03" nowrap>
Failed
</Text>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,336 @@
"use client";
import { useMemo } from "react";
import { Text, Tag, Card } from "@opal/components";
import {
SvgAlertCircle,
SvgCheckCircle,
SvgAlertTriangle,
SvgShield,
} from "@opal/icons";
import { cn } from "@/lib/utils";
import { Section } from "@/layouts/general-layouts";
import { useFindings } from "@/app/proposal-review/hooks/useFindings";
import { useProposalReviewContext } from "@/app/proposal-review/contexts/ProposalReviewContext";
import DecisionPanel from "@/app/proposal-review/components/DecisionPanel";
import {
VERDICT_CONFIG,
type Finding,
type FindingsByCategory,
type ProposalStatus,
} from "@/app/proposal-review/types";
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface ReviewSidebarProps {
proposalId: string;
proposalStatus: ProposalStatus;
existingDecisionNotes?: string;
onDecisionSubmitted: () => void;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function ReviewSidebar({
proposalId,
proposalStatus,
existingDecisionNotes,
onDecisionSubmitted,
}: ReviewSidebarProps) {
const { viewingRunId, setFocusedFindingId } = useProposalReviewContext();
const { findings, findingsByCategory } = useFindings(
proposalId,
false,
viewingRunId
);
const stats = useMemo(() => {
const failCount = findings.filter((f) => f.verdict === "FAIL").length;
const flagCount = findings.filter((f) => f.verdict === "FLAG").length;
const passCount = findings.filter((f) => f.verdict === "PASS").length;
const naCount = findings.filter(
(f) => f.verdict === "NOT_APPLICABLE"
).length;
const needsReviewCount = findings.filter(
(f) => f.verdict === "NEEDS_REVIEW"
).length;
const hardStops = findings.filter(
(f) =>
f.rule_is_hard_stop && (f.verdict === "FAIL" || f.verdict === "FLAG")
);
// Derive unresolved from findingsByCategory so they appear in the
// same category-sorted order as the main checklist panel.
const unresolvedFindings = findingsByCategory.flatMap((group) =>
group.findings.filter(
(f) =>
(f.verdict === "FAIL" || f.verdict === "FLAG") && !f.decision_action
)
);
return {
failCount,
flagCount,
passCount,
naCount,
needsReviewCount,
hardStops,
unresolvedFindings,
total: findings.length,
};
}, [findings, findingsByCategory]);
if (findings.length === 0) {
return (
<div className="flex items-center justify-center h-full p-4">
<Text font="secondary-body" color="text-03">
Run a review to see results here.
</Text>
</div>
);
}
return (
<div className="flex flex-col gap-4 h-full overflow-y-auto p-4">
{/* Summary counts */}
<Card padding="md" border="solid" background="light">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Text font="main-ui-action" color="text-04">
Summary
</Text>
<div className="grid grid-cols-3 gap-2">
<SummaryCount
icon={SvgAlertCircle}
count={stats.failCount}
label="Failures"
iconClass="text-status-error-03"
/>
<SummaryCount
icon={SvgAlertTriangle}
count={stats.flagCount}
label="Flags"
iconClass="text-status-warning-03"
/>
<SummaryCount
icon={SvgCheckCircle}
count={stats.passCount}
label="Passes"
iconClass="text-status-success-03"
/>
</div>
</Section>
</Card>
{/* Progress by category */}
<Card padding="md" border="solid" background="light">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Text font="main-ui-action" color="text-04">
Progress
</Text>
{findingsByCategory.map((group) => (
<CategoryProgress key={group.category} group={group} />
))}
</Section>
</Card>
{/* Hard stops */}
{stats.hardStops.length > 0 && (
<Card padding="md" border="solid" background="heavy">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Section
flexDirection="row"
gap={0.5}
height="auto"
justifyContent="start"
alignItems="center"
>
<SvgShield className="h-4 w-4 text-status-error-03" />
<Text font="main-ui-action" color="text-04">
{`Hard Stops (${stats.hardStops.length})`}
</Text>
</Section>
{stats.hardStops.map((finding) => (
<div
key={finding.id}
role="button"
tabIndex={0}
className="flex items-center gap-2 py-1 px-2 w-full overflow-hidden rounded-08 hover:bg-background-neutral-02 cursor-pointer"
onClick={() => setFocusedFindingId(finding.id)}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.preventDefault();
setFocusedFindingId(finding.id);
}
}}
>
<div className="min-w-0 truncate">
<Text font="secondary-body" color="text-03">
{finding.rule_name ?? "Unnamed Rule"}
</Text>
</div>
<div className="shrink-0">
{finding.decision_action ? (
<Tag
title={finding.decision_action}
color={
finding.decision_action === "VERIFIED"
? "green"
: "amber"
}
size="sm"
/>
) : (
<Tag title="Unresolved" color="amber" size="sm" />
)}
</div>
</div>
))}
</Section>
</Card>
)}
{/* Open flags / unresolved items */}
{stats.unresolvedFindings.length > 0 && (
<Card padding="md" border="solid" background="light">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Text font="main-ui-action" color="text-04">
{`Unresolved (${stats.unresolvedFindings.length})`}
</Text>
{stats.unresolvedFindings.map((finding) => (
<div
key={finding.id}
role="button"
tabIndex={0}
className="flex items-center gap-2 py-1 px-2 w-full overflow-hidden rounded-08 hover:bg-background-neutral-02 cursor-pointer"
onClick={() => setFocusedFindingId(finding.id)}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.preventDefault();
setFocusedFindingId(finding.id);
}
}}
>
<div className="shrink-0">
<Tag
title={VERDICT_CONFIG[finding.verdict].label}
color={VERDICT_CONFIG[finding.verdict].color}
size="sm"
/>
</div>
<div className="min-w-0 truncate">
<Text font="secondary-body" color="text-03">
{finding.rule_name ?? "Unnamed Rule"}
</Text>
</div>
</div>
))}
</Section>
</Card>
)}
{/* Decision panel at the bottom */}
<DecisionPanel
proposalId={proposalId}
findings={findings}
proposalStatus={proposalStatus}
existingDecisionNotes={existingDecisionNotes}
onDecisionSubmitted={onDecisionSubmitted}
/>
</div>
);
}
// ---------------------------------------------------------------------------
// Summary count pill
// ---------------------------------------------------------------------------
interface SummaryCountProps {
icon: React.FunctionComponent<{ className?: string }>;
count: number;
label: string;
iconClass: string;
}
function SummaryCount({
icon: Icon,
count,
label,
iconClass,
}: SummaryCountProps) {
return (
<Section
gap={0.25}
height="auto"
padding={0.5}
alignItems="center"
justifyContent="center"
>
<Icon className={cn("h-5 w-5", iconClass)} />
<Text font="main-ui-action" color="text-04">
{String(count)}
</Text>
<Text font="secondary-body" color="text-03">
{label}
</Text>
</Section>
);
}
// ---------------------------------------------------------------------------
// Category progress row
// ---------------------------------------------------------------------------
interface CategoryProgressProps {
group: FindingsByCategory;
}
function CategoryProgress({ group }: CategoryProgressProps) {
const decidedCount = group.findings.filter(
(f) => f.decision_action !== null
).length;
const total = group.findings.length;
const allDone = decidedCount === total;
return (
<div className="flex items-center justify-between gap-2 py-1 w-full overflow-hidden">
<div className="min-w-0 truncate">
<Text font="secondary-body" color="text-03">
{group.category}
</Text>
</div>
<div className="flex items-center gap-1 shrink-0">
<Text font="secondary-body" color={allDone ? "text-01" : "text-03"}>
{`${decidedCount}/${total}`}
</Text>
{allDone && (
<SvgCheckCircle className="h-3.5 w-3.5 text-status-success-03" />
)}
</div>
</div>
);
}

View File

@@ -0,0 +1,52 @@
"use client";
import { useEffect } from "react";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { useRulesets } from "@/app/proposal-review/hooks/useRulesets";
import { useProposalReviewContext } from "@/app/proposal-review/contexts/ProposalReviewContext";
import { Text } from "@opal/components";
export default function RulesetSelector() {
const { rulesets, defaultRuleset, isLoading } = useRulesets();
const { selectedRulesetId, setSelectedRulesetId } =
useProposalReviewContext();
// Auto-select the default ruleset on first load
useEffect(() => {
if (!selectedRulesetId && defaultRuleset) {
setSelectedRulesetId(defaultRuleset.id);
}
}, [defaultRuleset, selectedRulesetId, setSelectedRulesetId]);
if (isLoading) {
return (
<Text font="secondary-body" color="text-03">
Loading rulesets...
</Text>
);
}
if (rulesets.length === 0) {
return (
<Text font="secondary-body" color="text-03">
No rulesets available
</Text>
);
}
return (
<InputSelect
value={selectedRulesetId ?? undefined}
onValueChange={setSelectedRulesetId}
>
<InputSelect.Trigger placeholder="Select ruleset" />
<InputSelect.Content>
{rulesets.map((ruleset) => (
<InputSelect.Item key={ruleset.id} value={ruleset.id}>
{ruleset.name}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
);
}

View File

@@ -0,0 +1,146 @@
"use client";
import { useState } from "react";
import { Button, Text } from "@opal/components";
import { SvgHistory, SvgChevronDown } from "@opal/icons";
import { cn } from "@/lib/utils";
import Popover from "@/refresh-components/Popover";
import type { ReviewRun } from "@/app/proposal-review/types";
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function formatRunDate(dateStr: string): string {
const date = new Date(dateStr);
return date.toLocaleDateString(undefined, {
month: "short",
day: "numeric",
hour: "numeric",
minute: "2-digit",
});
}
function statusDotColor(run: ReviewRun): string {
if (run.status === "RUNNING" || run.status === "PENDING") {
return "bg-theme-primary-03";
}
if (run.status === "FAILED") {
return "bg-status-error-03";
}
if (run.failed_rules > 0) {
return "bg-status-warning-03";
}
return "bg-status-success-03";
}
function statusLabel(run: ReviewRun): string {
if (run.status === "RUNNING") return "Running";
if (run.status === "PENDING") return "Pending";
if (run.status === "FAILED") return "Failed";
if (run.failed_rules > 0) {
return `${run.total_rules - run.failed_rules}/${run.total_rules}`;
}
return `${run.total_rules} rules`;
}
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface RunHistorySelectorProps {
runs: ReviewRun[];
selectedRunId: string | null;
onSelectRun: (runId: string | null) => void;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function RunHistorySelector({
runs,
selectedRunId,
onSelectRun,
}: RunHistorySelectorProps) {
const [open, setOpen] = useState(false);
const latestRun = runs[0] as ReviewRun | undefined;
if (!latestRun) return null;
const isViewingLatest =
selectedRunId === null || selectedRunId === latestRun.id;
const selectedRun = isViewingLatest
? latestRun
: runs.find((r) => r.id === selectedRunId) ?? latestRun;
return (
<div className="flex items-center px-4 py-1.5 border-b border-border-01 shrink-0">
<Popover open={open} onOpenChange={setOpen}>
<Popover.Trigger asChild>
<button className="flex items-center gap-1.5 rounded-08 px-1.5 py-0.5 hover:bg-background-neutral-02 transition-colors">
<SvgHistory className="h-3.5 w-3.5 text-text-02" />
<Text font="secondary-body" color="text-03">
{isViewingLatest
? `Latest run \u00b7 ${formatRunDate(selectedRun.created_at)}`
: formatRunDate(selectedRun.created_at)}
</Text>
<SvgChevronDown className="h-3 w-3 text-text-02" />
</button>
</Popover.Trigger>
<Popover.Content width="xl" align="start" sideOffset={4}>
<Popover.Menu>
{runs.map((run, index) => {
const isSelected =
run.id === selectedRun.id &&
(isViewingLatest ? index === 0 : true);
return (
<Popover.Close asChild key={run.id}>
<button
className={cn(
"flex items-center gap-2 w-full px-2 py-1.5 rounded-08 text-left",
"hover:bg-background-neutral-02 transition-colors",
isSelected && "bg-background-neutral-02"
)}
onClick={() => onSelectRun(index === 0 ? null : run.id)}
>
<div
className={cn(
"h-2 w-2 rounded-full shrink-0",
statusDotColor(run)
)}
/>
<div className="flex-1 min-w-0">
<Text font="secondary-action" color="text-04">
{index === 0
? `Latest \u00b7 ${formatRunDate(run.created_at)}`
: formatRunDate(run.created_at)}
</Text>
</div>
<Text font="secondary-body" color="text-03">
{statusLabel(run)}
</Text>
</button>
</Popover.Close>
);
})}
</Popover.Menu>
</Popover.Content>
</Popover>
{!isViewingLatest && (
<div className="ml-2">
<Button
variant="default"
prominence="tertiary"
size="2xs"
onClick={() => onSelectRun(null)}
>
Back to latest
</Button>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,74 @@
/* Decision toggle button color overrides.
The Opal Interactive system doesn't support custom color variants, so we
override via scoped CSS classes. All colors use design-system CSS variables
that auto-switch for dark mode — no `dark:` modifier needed.
Specificity is intentionally 0,6,0 for hover states to beat the base
Interactive rules (0,5,0). */
/* ---------------------------------------------------------------------------
Green — Approve / Verify
--------------------------------------------------------------------------- */
.decision-toggle-green
.interactive[data-interactive-variant][data-interactive-prominence] {
background-color: var(--status-success-00);
--interactive-foreground: var(--status-success-05);
--interactive-foreground-icon: var(--status-success-05);
}
.decision-toggle-green
.interactive[data-interactive-variant][data-interactive-prominence]:hover:not(
[data-disabled]
) {
background-color: var(--status-success-01);
}
.decision-toggle-green
.interactive[data-interactive-variant][data-interactive-prominence]:active:not(
[data-disabled]
) {
background-color: var(--status-success-02);
}
/* ---------------------------------------------------------------------------
Yellow — Request Changes / Flag
--------------------------------------------------------------------------- */
.decision-toggle-yellow
.interactive[data-interactive-variant][data-interactive-prominence] {
background-color: var(--status-warning-00);
--interactive-foreground: var(--status-warning-05);
--interactive-foreground-icon: var(--status-warning-05);
}
.decision-toggle-yellow
.interactive[data-interactive-variant][data-interactive-prominence]:hover:not(
[data-disabled]
) {
background-color: var(--status-warning-01);
}
.decision-toggle-yellow
.interactive[data-interactive-variant][data-interactive-prominence]:active:not(
[data-disabled]
) {
background-color: var(--status-warning-02);
}
/* ---------------------------------------------------------------------------
Gray — N/A
--------------------------------------------------------------------------- */
.decision-toggle-gray
.interactive[data-interactive-variant][data-interactive-prominence] {
background-color: var(--background-neutral-03);
--interactive-foreground: var(--text-04);
--interactive-foreground-icon: var(--text-04);
}
.decision-toggle-gray
.interactive[data-interactive-variant][data-interactive-prominence]:hover:not(
[data-disabled]
) {
background-color: var(--background-neutral-04);
}
.decision-toggle-gray
.interactive[data-interactive-variant][data-interactive-prominence]:active:not(
[data-disabled]
) {
background-color: var(--background-neutral-04);
}

View File

@@ -0,0 +1,120 @@
"use client";
import {
createContext,
useContext,
useState,
useCallback,
useMemo,
type ReactNode,
} from "react";
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
interface ProposalReviewContextValue {
/** Currently selected ruleset ID for the review run. */
selectedRulesetId: string | null;
setSelectedRulesetId: (id: string) => void;
/** Whether an AI review is currently running. */
isReviewRunning: boolean;
setIsReviewRunning: (running: boolean) => void;
/** ID of the current review run (set after triggering). */
currentReviewRunId: string | null;
setCurrentReviewRunId: (id: string | null) => void;
/** ID of a prior run the user is viewing (null = latest). */
viewingRunId: string | null;
setViewingRunId: (id: string | null) => void;
/** Finding ID to scroll to and highlight in ChecklistPanel. */
focusedFindingId: string | null;
setFocusedFindingId: (id: string | null) => void;
/** Reset review state (for starting a new review). */
resetReviewState: () => void;
}
// ---------------------------------------------------------------------------
// Context
// ---------------------------------------------------------------------------
const ProposalReviewContext = createContext<ProposalReviewContextValue | null>(
null
);
// ---------------------------------------------------------------------------
// Provider
// ---------------------------------------------------------------------------
interface ProposalReviewProviderProps {
children: ReactNode;
}
export function ProposalReviewProvider({
children,
}: ProposalReviewProviderProps) {
const [selectedRulesetId, setSelectedRulesetId] = useState<string | null>(
null
);
const [isReviewRunning, setIsReviewRunning] = useState(false);
const [currentReviewRunId, setCurrentReviewRunId] = useState<string | null>(
null
);
const [viewingRunId, setViewingRunId] = useState<string | null>(null);
const [focusedFindingId, setFocusedFindingId] = useState<string | null>(null);
const resetReviewState = useCallback(() => {
setIsReviewRunning(false);
setCurrentReviewRunId(null);
setViewingRunId(null);
setFocusedFindingId(null);
}, []);
const value = useMemo<ProposalReviewContextValue>(
() => ({
selectedRulesetId,
setSelectedRulesetId,
isReviewRunning,
setIsReviewRunning,
currentReviewRunId,
setCurrentReviewRunId,
viewingRunId,
setViewingRunId,
focusedFindingId,
setFocusedFindingId,
resetReviewState,
}),
[
selectedRulesetId,
isReviewRunning,
currentReviewRunId,
viewingRunId,
focusedFindingId,
resetReviewState,
]
);
return (
<ProposalReviewContext.Provider value={value}>
{children}
</ProposalReviewContext.Provider>
);
}
// ---------------------------------------------------------------------------
// Hook
// ---------------------------------------------------------------------------
export function useProposalReviewContext() {
const context = useContext(ProposalReviewContext);
if (!context) {
throw new Error(
"useProposalReviewContext must be used within a ProposalReviewProvider"
);
}
return context;
}

View File

@@ -0,0 +1,65 @@
"use client";
import { useMemo } from "react";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { Finding, FindingsByCategory } from "@/app/proposal-review/types";
const NATURAL_SORT_OPTIONS = { numeric: true, sensitivity: "base" } as const;
export function useFindings(
proposalId: string | null,
polling = false,
reviewRunId: string | null = null
) {
const url = proposalId
? `/api/proposal-review/proposals/${proposalId}/findings${
reviewRunId ? `?review_run_id=${reviewRunId}` : ""
}`
: null;
const { data, error, isLoading, mutate } = useSWR<Finding[]>(
url,
errorHandlingFetcher,
{
refreshInterval: polling ? 3000 : 0,
revalidateOnFocus: false,
}
);
const findings = useMemo(() => data ?? [], [data]);
const findingsByCategory = useMemo(() => {
const result: FindingsByCategory[] = [];
const categoryMap = new Map<string, Finding[]>();
for (const finding of findings) {
const cat = finding.rule_category ?? "Uncategorized";
const existing = categoryMap.get(cat);
if (existing) {
existing.push(finding);
} else {
categoryMap.set(cat, [finding]);
}
}
categoryMap.forEach((catFindings, category) => {
result.push({ category, findings: catFindings });
});
// Natural sort so "IR 2" comes before "IR 10"
result.sort((a, b) =>
a.category.localeCompare(b.category, undefined, NATURAL_SORT_OPTIONS)
);
return result;
}, [findings]);
return {
findings,
findingsByCategory,
error,
isLoading,
mutate,
};
}

View File

@@ -0,0 +1,19 @@
"use client";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { Proposal } from "@/app/proposal-review/types";
export function useProposal(proposalId: string | null) {
const { data, error, isLoading, mutate } = useSWR<Proposal>(
proposalId ? `/api/proposal-review/proposals/${proposalId}` : null,
errorHandlingFetcher
);
return {
proposal: data ?? null,
error,
isLoading,
mutate,
};
}

View File

@@ -0,0 +1,29 @@
"use client";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { Proposal } from "@/app/proposal-review/types";
const PROPOSALS_URL = "/api/proposal-review/proposals";
interface ProposalListResponse {
proposals: Proposal[];
total_count: number;
config_missing: boolean;
}
export function useProposals() {
const { data, error, isLoading, mutate } = useSWR<ProposalListResponse>(
PROPOSALS_URL,
errorHandlingFetcher
);
return {
proposals: data?.proposals ?? [],
totalCount: data?.total_count ?? 0,
configMissing: data?.config_missing ?? false,
error,
isLoading,
mutate,
};
}

View File

@@ -0,0 +1,26 @@
"use client";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { ReviewRun } from "@/app/proposal-review/types";
/**
* Fetches the list of review runs for a proposal, most recent first.
* Revalidates when `refreshKey` changes (e.g. after triggering a new run).
*/
export function useReviewRuns(proposalId: string | null) {
const { data, error, isLoading, mutate } = useSWR<ReviewRun[]>(
proposalId
? `/api/proposal-review/proposals/${proposalId}/review-runs`
: null,
errorHandlingFetcher,
{ revalidateOnFocus: false }
);
return {
runs: data ?? [],
error,
isLoading,
mutate,
};
}

View File

@@ -0,0 +1,36 @@
"use client";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { ReviewRun } from "@/app/proposal-review/types";
/**
* Polls the review status endpoint every 2.5 seconds while a review is running.
* Stops polling once the status is COMPLETED or FAILED.
*
* The backend returns a full ReviewRunResponse (mapped to ReviewRun on the
* frontend). Only a subset of fields (status, total_rules, completed_rules)
* is typically consumed by callers.
*/
export function useReviewStatus(
proposalId: string | null,
isReviewRunning: boolean
) {
const { data, error, isLoading, mutate } = useSWR<ReviewRun>(
proposalId
? `/api/proposal-review/proposals/${proposalId}/review-status`
: null,
errorHandlingFetcher,
{
refreshInterval: isReviewRunning ? 2500 : 0,
revalidateOnFocus: false,
}
);
return {
reviewStatus: data ?? null,
error,
isLoading,
mutate,
};
}

Some files were not shown because too many files have changed in this diff Show More