Compare commits

..

4 Commits
argus ... edge

Author SHA1 Message Date
Jamison Lahman
8a517c4f10 fix(mcp): route OAuth callback to web server instead of MCP server (#10071) 2026-04-10 15:11:46 -07:00
Jamison Lahman
6959d851ea fix(mcp): prevent masked OAuth credentials from being stored on re-auth (#10066) 2026-04-10 21:30:21 +00:00
dependabot[bot]
6a2550fc2d chore(deps): bump lodash from 4.17.23 to 4.18.1 in /web (#9901)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jamison Lahman <jamison@lahman.dev>
2026-04-10 19:43:34 +00:00
Nikolas Garza
b1cc0c2bf9 fix(scim): add advisory lock to prevent seat limit race condition (#10048) 2026-04-10 18:50:24 +00:00
86 changed files with 228 additions and 14237 deletions

View File

@@ -1,541 +0,0 @@
"""add proposal review tables
Revision ID: 61ea78857c97
Revises: c7bf5721733e
Create Date: 2026-04-09 10:00:00.000000
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import fastapi_users_db_sqlalchemy
# revision identifiers, used by Alembic.
revision = "61ea78857c97"
down_revision = "c7bf5721733e"
branch_labels: str | None = None
depends_on: str | None = None
def upgrade() -> None:
# -- proposal_review_ruleset --
op.create_table(
"proposal_review_ruleset",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tenant_id", sa.Text(), nullable=False),
sa.Column("name", sa.Text(), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column(
"is_default",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column(
"is_active",
sa.Boolean(),
server_default=sa.text("true"),
nullable=False,
),
sa.Column(
"created_by",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(["created_by"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_ruleset_tenant_id",
"proposal_review_ruleset",
["tenant_id"],
)
# -- proposal_review_rule --
op.create_table(
"proposal_review_rule",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"ruleset_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column("name", sa.Text(), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("category", sa.Text(), nullable=True),
sa.Column("rule_type", sa.Text(), nullable=False),
sa.Column(
"rule_intent",
sa.Text(),
server_default=sa.text("'CHECK'"),
nullable=False,
),
sa.Column("prompt_template", sa.Text(), nullable=False),
sa.Column(
"source",
sa.Text(),
server_default=sa.text("'MANUAL'"),
nullable=False,
),
sa.Column("authority", sa.Text(), nullable=True),
sa.Column(
"is_hard_stop",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column(
"priority",
sa.Integer(),
server_default=sa.text("0"),
nullable=False,
),
sa.Column(
"is_active",
sa.Boolean(),
server_default=sa.text("true"),
nullable=False,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["ruleset_id"],
["proposal_review_ruleset.id"],
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_rule_ruleset_id",
"proposal_review_rule",
["ruleset_id"],
)
# -- proposal_review_proposal --
op.create_table(
"proposal_review_proposal",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("document_id", sa.Text(), nullable=False),
sa.Column("tenant_id", sa.Text(), nullable=False),
sa.Column(
"status",
sa.Text(),
server_default=sa.text("'PENDING'"),
nullable=False,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("document_id", "tenant_id"),
)
op.create_index(
"ix_proposal_review_proposal_tenant_id",
"proposal_review_proposal",
["tenant_id"],
)
op.create_index(
"ix_proposal_review_proposal_document_id",
"proposal_review_proposal",
["document_id"],
)
op.create_index(
"ix_proposal_review_proposal_status",
"proposal_review_proposal",
["status"],
)
# -- proposal_review_run --
op.create_table(
"proposal_review_run",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"proposal_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"ruleset_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"triggered_by",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=False,
),
sa.Column(
"status",
sa.Text(),
server_default=sa.text("'PENDING'"),
nullable=False,
),
sa.Column("total_rules", sa.Integer(), nullable=False),
sa.Column(
"completed_rules",
sa.Integer(),
server_default=sa.text("0"),
nullable=False,
),
sa.Column("started_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["proposal_id"],
["proposal_review_proposal.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["ruleset_id"],
["proposal_review_ruleset.id"],
),
sa.ForeignKeyConstraint(["triggered_by"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_run_proposal_id",
"proposal_review_run",
["proposal_id"],
)
# -- proposal_review_finding --
op.create_table(
"proposal_review_finding",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"proposal_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"rule_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"review_run_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column("verdict", sa.Text(), nullable=False),
sa.Column("confidence", sa.Text(), nullable=True),
sa.Column("evidence", sa.Text(), nullable=True),
sa.Column("explanation", sa.Text(), nullable=True),
sa.Column("suggested_action", sa.Text(), nullable=True),
sa.Column("llm_model", sa.Text(), nullable=True),
sa.Column("llm_tokens_used", sa.Integer(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["proposal_id"],
["proposal_review_proposal.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["rule_id"],
["proposal_review_rule.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["review_run_id"],
["proposal_review_run.id"],
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_finding_proposal_id",
"proposal_review_finding",
["proposal_id"],
)
op.create_index(
"ix_proposal_review_finding_review_run_id",
"proposal_review_finding",
["review_run_id"],
)
# -- proposal_review_decision (per-finding) --
op.create_table(
"proposal_review_decision",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"finding_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"officer_id",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=False,
),
sa.Column("action", sa.Text(), nullable=False),
sa.Column("notes", sa.Text(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["finding_id"],
["proposal_review_finding.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(["officer_id"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("finding_id"),
)
# -- proposal_review_proposal_decision --
op.create_table(
"proposal_review_proposal_decision",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"proposal_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"officer_id",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=False,
),
sa.Column("decision", sa.Text(), nullable=False),
sa.Column("notes", sa.Text(), nullable=True),
sa.Column(
"jira_synced",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column("jira_synced_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["proposal_id"],
["proposal_review_proposal.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(["officer_id"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_proposal_decision_proposal_id",
"proposal_review_proposal_decision",
["proposal_id"],
)
# -- proposal_review_document --
op.create_table(
"proposal_review_document",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"proposal_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column("file_name", sa.Text(), nullable=False),
sa.Column("file_type", sa.Text(), nullable=True),
sa.Column("file_store_id", sa.Text(), nullable=True),
sa.Column("extracted_text", sa.Text(), nullable=True),
sa.Column("document_role", sa.Text(), nullable=False),
sa.Column(
"uploaded_by",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["proposal_id"],
["proposal_review_proposal.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(["uploaded_by"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_document_proposal_id",
"proposal_review_document",
["proposal_id"],
)
# -- proposal_review_audit_log --
op.create_table(
"proposal_review_audit_log",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"proposal_id",
postgresql.UUID(as_uuid=True),
nullable=False,
),
sa.Column(
"user_id",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.Column("action", sa.Text(), nullable=False),
sa.Column("details", postgresql.JSONB(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["proposal_id"],
["proposal_review_proposal.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(["user_id"], ["user.id"]),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_proposal_review_audit_log_proposal_id",
"proposal_review_audit_log",
["proposal_id"],
)
# -- proposal_review_config --
op.create_table(
"proposal_review_config",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("tenant_id", sa.Text(), nullable=False, unique=True),
sa.Column("jira_connector_id", sa.Integer(), nullable=True),
sa.Column("jira_project_key", sa.Text(), nullable=True),
sa.Column("field_mapping", postgresql.JSONB(), nullable=True),
sa.Column("jira_writeback", postgresql.JSONB(), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
)
def downgrade() -> None:
op.drop_table("proposal_review_config")
op.drop_table("proposal_review_audit_log")
op.drop_table("proposal_review_document")
op.drop_table("proposal_review_proposal_decision")
op.drop_table("proposal_review_decision")
op.drop_table("proposal_review_finding")
op.drop_table("proposal_review_run")
op.drop_table("proposal_review_proposal")
op.drop_table("proposal_review_rule")
op.drop_table("proposal_review_ruleset")

View File

@@ -11,6 +11,8 @@ require a valid SCIM bearer token.
from __future__ import annotations
import hashlib
import struct
from uuid import UUID
from fastapi import APIRouter
@@ -22,6 +24,7 @@ from fastapi import Response
from fastapi.responses import JSONResponse
from fastapi_users.password import PasswordHelper
from sqlalchemy import func
from sqlalchemy import text
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
@@ -65,12 +68,25 @@ from onyx.db.permissions import recompute_user_permissions__no_commit
from onyx.db.users import assign_user_to_default_groups__no_commit
from onyx.utils.logger import setup_logger
from onyx.utils.variable_functionality import fetch_ee_implementation_or_noop
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
# Group names reserved for system default groups (seeded by migration).
_RESERVED_GROUP_NAMES = frozenset({"Admin", "Basic"})
# Namespace prefix for the seat-allocation advisory lock. Hashed together
# with the tenant ID so the lock is scoped per-tenant (unrelated tenants
# never block each other) and cannot collide with unrelated advisory locks.
_SEAT_LOCK_NAMESPACE = "onyx_scim_seat_lock"
def _seat_lock_id_for_tenant(tenant_id: str) -> int:
"""Derive a stable 64-bit signed int lock id for this tenant's seat lock."""
digest = hashlib.sha256(f"{_SEAT_LOCK_NAMESPACE}:{tenant_id}".encode()).digest()
# pg_advisory_xact_lock takes a signed 8-byte int; unpack as such.
return struct.unpack("q", digest[:8])[0]
class ScimJSONResponse(JSONResponse):
"""JSONResponse with Content-Type: application/scim+json (RFC 7644 §3.1)."""
@@ -209,12 +225,37 @@ def _apply_exclusions(
def _check_seat_availability(dal: ScimDAL) -> str | None:
"""Return an error message if seat limit is reached, else None."""
"""Return an error message if seat limit is reached, else None.
Acquires a transaction-scoped advisory lock so that concurrent
SCIM requests are serialized. IdPs like Okta send provisioning
requests in parallel batches — without serialization the check is
vulnerable to a TOCTOU race where N concurrent requests each see
"seats available", all insert, and the tenant ends up over its
seat limit.
The lock is held until the caller's next COMMIT or ROLLBACK, which
means the seat count cannot change between the check here and the
subsequent INSERT/UPDATE. Each call site in this module follows
the pattern: _check_seat_availability → write → dal.commit()
(which releases the lock for the next waiting request).
"""
check_fn = fetch_ee_implementation_or_noop(
"onyx.db.license", "check_seat_availability", None
)
if check_fn is None:
return None
# Transaction-scoped advisory lock — released on dal.commit() / dal.rollback().
# The lock id is derived from the tenant so unrelated tenants never block
# each other, and from a namespace string so it cannot collide with
# unrelated advisory locks elsewhere in the codebase.
lock_id = _seat_lock_id_for_tenant(get_current_tenant_id())
dal.session.execute(
text("SELECT pg_advisory_xact_lock(:lock_id)"),
{"lock_id": lock_id},
)
result = check_fn(dal.session, seats_needed=1)
if not result.available:
return result.error_message or "Seat limit reached"

View File

@@ -322,7 +322,6 @@ celery_app.autodiscover_tasks(
"onyx.background.celery.tasks.vespa",
"onyx.background.celery.tasks.llm_model_update",
"onyx.background.celery.tasks.user_file_processing",
"onyx.server.features.proposal_review.engine",
]
)
)

View File

@@ -8,7 +8,6 @@ from collections.abc import Iterator
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from io import BytesIO
from typing import Any
import requests
@@ -41,7 +40,6 @@ from onyx.connectors.jira.utils import best_effort_basic_expert_info
from onyx.connectors.jira.utils import best_effort_get_field_from_issue
from onyx.connectors.jira.utils import build_jira_client
from onyx.connectors.jira.utils import build_jira_url
from onyx.connectors.jira.utils import CustomFieldExtractor
from onyx.connectors.jira.utils import extract_text_from_adf
from onyx.connectors.jira.utils import get_comment_strs
from onyx.connectors.jira.utils import JIRA_CLOUD_API_VERSION
@@ -54,7 +52,6 @@ from onyx.connectors.models import HierarchyNode
from onyx.connectors.models import SlimDocument
from onyx.connectors.models import TextSection
from onyx.db.enums import HierarchyNodeType
from onyx.file_processing.extract_file_text import extract_file_text
from onyx.indexing.indexing_heartbeat import IndexingHeartbeatInterface
from onyx.utils.logger import setup_logger
@@ -67,7 +64,6 @@ _MAX_RESULTS_FETCH_IDS = 5000
_JIRA_FULL_PAGE_SIZE = 50
# https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issues/
_JIRA_BULK_FETCH_LIMIT = 100
_MAX_ATTACHMENT_SIZE_BYTES = 50 * 1024 * 1024 # 50 MB
# Constants for Jira field names
_FIELD_REPORTER = "reporter"
@@ -381,7 +377,6 @@ def process_jira_issue(
comment_email_blacklist: tuple[str, ...] = (),
labels_to_skip: set[str] | None = None,
parent_hierarchy_raw_node_id: str | None = None,
custom_fields_mapping: dict[str, str] | None = None,
) -> Document | None:
if labels_to_skip:
if any(label in issue.fields.labels for label in labels_to_skip):
@@ -467,24 +462,6 @@ def process_jira_issue(
else:
logger.error(f"Project should exist but does not for {issue.key}")
# Merge custom fields into metadata if a mapping was provided
if custom_fields_mapping:
try:
custom_fields = CustomFieldExtractor.get_issue_custom_fields(
issue, custom_fields_mapping
)
# Filter out custom fields that collide with existing metadata keys
for key in list(custom_fields.keys()):
if key in metadata_dict:
logger.warning(
f"Custom field '{key}' on {issue.key} collides with "
f"standard metadata key; skipping custom field value"
)
del custom_fields[key]
metadata_dict.update(custom_fields)
except Exception as e:
logger.warning(f"Failed to extract custom fields for {issue.key}: {e}")
return Document(
id=page_url,
sections=[TextSection(link=page_url, text=ticket_content)],
@@ -527,12 +504,6 @@ class JiraConnector(
# Custom JQL query to filter Jira issues
jql_query: str | None = None,
scoped_token: bool = False,
# When True, extract custom fields from Jira issues and include them
# in document metadata with human-readable field names.
extract_custom_fields: bool = False,
# When True, download attachments from Jira issues and yield them
# as separate Documents linked to the parent ticket.
fetch_attachments: bool = False,
) -> None:
self.batch_size = batch_size
@@ -546,11 +517,7 @@ class JiraConnector(
self.labels_to_skip = set(labels_to_skip)
self.jql_query = jql_query
self.scoped_token = scoped_token
self.extract_custom_fields = extract_custom_fields
self.fetch_attachments = fetch_attachments
self._jira_client: JIRA | None = None
# Mapping of custom field IDs to human-readable names (populated on load_credentials)
self._custom_fields_mapping: dict[str, str] = {}
# Cache project permissions to avoid fetching them repeatedly across runs
self._project_permissions_cache: dict[str, Any] = {}
@@ -711,134 +678,12 @@ class JiraConnector(
# the document belongs directly under the project in the hierarchy
return project_key
def _process_attachments(
self,
issue: Issue,
parent_hierarchy_raw_node_id: str | None,
include_permissions: bool = False,
project_key: str | None = None,
) -> Generator[Document | ConnectorFailure, None, None]:
"""Download and yield Documents for each attachment on a Jira issue.
Each attachment becomes a separate Document whose text is extracted
from the downloaded file content. Failures on individual attachments
are logged and yielded as ConnectorFailure so they never break the
overall indexing run.
"""
attachments = best_effort_get_field_from_issue(issue, "attachment")
if not attachments:
return
issue_url = build_jira_url(self.jira_base, issue.key)
for attachment in attachments:
try:
filename = getattr(attachment, "filename", "unknown")
try:
size = int(getattr(attachment, "size", 0) or 0)
except (ValueError, TypeError):
size = 0
content_url = getattr(attachment, "content", None)
attachment_id = getattr(attachment, "id", filename)
mime_type = getattr(attachment, "mimeType", "application/octet-stream")
created = getattr(attachment, "created", None)
if size > _MAX_ATTACHMENT_SIZE_BYTES:
logger.warning(
f"Skipping attachment '{filename}' on {issue.key}: "
f"size {size} bytes exceeds {_MAX_ATTACHMENT_SIZE_BYTES} byte limit"
)
continue
if not content_url:
logger.warning(
f"Skipping attachment '{filename}' on {issue.key}: "
f"no content URL available"
)
continue
# Download the attachment using the public API on the
# python-jira Attachment resource (avoids private _session access
# and the double-copy from response.content + BytesIO wrapping).
file_content = attachment.get()
# Extract text from the downloaded file
try:
text = extract_file_text(
file=BytesIO(file_content),
file_name=filename,
)
except Exception as e:
logger.warning(
f"Could not extract text from attachment '{filename}' "
f"on {issue.key}: {e}"
)
continue
if not text or not text.strip():
logger.info(
f"Skipping attachment '{filename}' on {issue.key}: "
f"no text content could be extracted"
)
continue
doc_id = f"{issue_url}/attachments/{attachment_id}"
attachment_doc = Document(
id=doc_id,
sections=[TextSection(link=issue_url, text=text)],
source=DocumentSource.JIRA,
semantic_identifier=f"{issue.key}: {filename}",
title=filename,
doc_updated_at=(time_str_to_utc(created) if created else None),
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
metadata={
"parent_ticket": issue.key,
"attachment_filename": filename,
"attachment_mime_type": mime_type,
"attachment_size": str(size),
},
)
if include_permissions and project_key:
attachment_doc.external_access = self._get_project_permissions(
project_key,
add_prefix=True,
)
yield attachment_doc
except Exception as e:
logger.error(f"Failed to process attachment on {issue.key}: {e}")
yield ConnectorFailure(
failed_document=DocumentFailure(
document_id=f"{issue_url}/attachments/{getattr(attachment, 'id', 'unknown')}",
document_link=issue_url,
),
failure_message=f"Failed to process attachment '{getattr(attachment, 'filename', 'unknown')}': {str(e)}",
exception=e,
)
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self._jira_client = build_jira_client(
credentials=credentials,
jira_base=self.jira_base,
scoped_token=self.scoped_token,
)
# Fetch the custom field ID-to-name mapping once at credential load time.
# This avoids repeated API calls during issue processing.
if self.extract_custom_fields:
try:
self._custom_fields_mapping = (
CustomFieldExtractor.get_all_custom_fields(self._jira_client)
)
logger.info(
f"Loaded {len(self._custom_fields_mapping)} custom field definitions"
)
except Exception as e:
logger.warning(
f"Failed to fetch custom field definitions; "
f"custom field extraction will be skipped: {e}"
)
self._custom_fields_mapping = {}
return None
def _get_jql_query(
@@ -969,11 +814,6 @@ class JiraConnector(
comment_email_blacklist=self.comment_email_blacklist,
labels_to_skip=self.labels_to_skip,
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
custom_fields_mapping=(
self._custom_fields_mapping
if self._custom_fields_mapping
else None
),
):
# Add permission information to the document if requested
if include_permissions:
@@ -983,15 +823,6 @@ class JiraConnector(
)
yield document
# Yield attachment documents if enabled
if self.fetch_attachments:
yield from self._process_attachments(
issue=issue,
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
include_permissions=include_permissions,
project_key=project_key,
)
except Exception as e:
yield ConnectorFailure(
failed_document=DocumentFailure(
@@ -1099,41 +930,20 @@ class JiraConnector(
issue_key = best_effort_get_field_from_issue(issue, _FIELD_KEY)
doc_id = build_jira_url(self.jira_base, issue_key)
parent_hierarchy_raw_node_id = (
self._get_parent_hierarchy_raw_node_id(issue, project_key)
if project_key
else None
)
project_perms = self._get_project_permissions(
project_key, add_prefix=False
)
slim_doc_batch.append(
SlimDocument(
id=doc_id,
# Permission sync path - don't prefix, upsert_document_external_perms handles it
external_access=project_perms,
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
external_access=self._get_project_permissions(
project_key, add_prefix=False
),
parent_hierarchy_raw_node_id=(
self._get_parent_hierarchy_raw_node_id(issue, project_key)
if project_key
else None
),
)
)
# Also emit SlimDocument entries for each attachment
if self.fetch_attachments:
attachments = best_effort_get_field_from_issue(issue, "attachment")
if attachments:
for attachment in attachments:
attachment_id = getattr(
attachment,
"id",
getattr(attachment, "filename", "unknown"),
)
slim_doc_batch.append(
SlimDocument(
id=f"{doc_id}/attachments/{attachment_id}",
external_access=project_perms,
parent_hierarchy_raw_node_id=parent_hierarchy_raw_node_id,
)
)
current_offset += 1
if len(slim_doc_batch) >= JIRA_SLIM_PAGE_SIZE:
yield slim_doc_batch

View File

@@ -96,9 +96,6 @@ from onyx.server.features.persona.api import admin_router as admin_persona_route
from onyx.server.features.persona.api import agents_router
from onyx.server.features.persona.api import basic_router as persona_router
from onyx.server.features.projects.api import router as projects_router
from onyx.server.features.proposal_review.api.api import (
router as proposal_review_router,
)
from onyx.server.features.tool.api import admin_router as admin_tool_router
from onyx.server.features.tool.api import router as tool_router
from onyx.server.features.user_oauth_token.api import router as user_oauth_token_router
@@ -472,7 +469,6 @@ def get_application(lifespan_override: Lifespan | None = None) -> FastAPI:
include_router_with_global_prefix_prepended(application, projects_router)
include_router_with_global_prefix_prepended(application, public_build_router)
include_router_with_global_prefix_prepended(application, build_router)
include_router_with_global_prefix_prepended(application, proposal_review_router)
include_router_with_global_prefix_prepended(application, document_set_router)
include_router_with_global_prefix_prepended(application, hierarchy_router)
include_router_with_global_prefix_prepended(application, search_settings_router)

View File

@@ -96,6 +96,32 @@ def _truncate_description(description: str | None, max_length: int = 500) -> str
return description[: max_length - 3] + "..."
# TODO: Replace mask-comparison approach with an explicit Unset sentinel from the
# frontend indicating whether each credential field was actually modified. The current
# approach is brittle (e.g. short credentials produce a fixed-length mask that could
# collide) and mutates request values, which is surprising. The frontend should signal
# "unchanged" vs "new value" directly rather than relying on masked-string equality.
def _restore_masked_oauth_credentials(
request_client_id: str | None,
request_client_secret: str | None,
existing_client: OAuthClientInformationFull,
) -> tuple[str | None, str | None]:
"""If the frontend sent back masked credentials, restore the real stored values."""
if (
request_client_id
and existing_client.client_id
and request_client_id == mask_string(existing_client.client_id)
):
request_client_id = existing_client.client_id
if (
request_client_secret
and existing_client.client_secret
and request_client_secret == mask_string(existing_client.client_secret)
):
request_client_secret = existing_client.client_secret
return request_client_id, request_client_secret
router = APIRouter(prefix="/mcp")
admin_router = APIRouter(prefix="/admin/mcp")
STATE_TTL_SECONDS = 60 * 5 # 5 minutes
@@ -392,6 +418,26 @@ async def _connect_oauth(
detail=f"Server was configured with authentication type {auth_type_str}",
)
# If the frontend sent back masked credentials (unchanged by the user),
# restore the real stored values so we don't overwrite them with masks.
if mcp_server.admin_connection_config:
existing_data = extract_connection_data(
mcp_server.admin_connection_config, apply_mask=False
)
existing_client_raw = existing_data.get(MCPOAuthKeys.CLIENT_INFO.value)
if existing_client_raw:
existing_client = OAuthClientInformationFull.model_validate(
existing_client_raw
)
(
request.oauth_client_id,
request.oauth_client_secret,
) = _restore_masked_oauth_credentials(
request.oauth_client_id,
request.oauth_client_secret,
existing_client,
)
# Create admin config with client info if provided
config_data = MCPConnectionData(headers={})
if request.oauth_client_id and request.oauth_client_secret:
@@ -1356,6 +1402,19 @@ def _upsert_mcp_server(
if client_info_raw:
client_info = OAuthClientInformationFull.model_validate(client_info_raw)
# If the frontend sent back masked credentials (unchanged by the user),
# restore the real stored values so the comparison below sees no change
# and the credentials aren't overwritten with masked strings.
if client_info and request.auth_type == MCPAuthenticationType.OAUTH:
(
request.oauth_client_id,
request.oauth_client_secret,
) = _restore_masked_oauth_credentials(
request.oauth_client_id,
request.oauth_client_secret,
client_info,
)
changing_connection_config = (
not mcp_server.admin_connection_config
or (

View File

@@ -1,39 +0,0 @@
"""Main router for Proposal Review (Argus).
Mounts all sub-routers under /proposal-review prefix.
"""
from fastapi import APIRouter
from fastapi import Depends
from onyx.auth.permissions import require_permission
from onyx.db.enums import Permission
from onyx.server.features.proposal_review.configs import ENABLE_PROPOSAL_REVIEW
router = APIRouter(
prefix="/proposal-review",
dependencies=[Depends(require_permission(Permission.BASIC_ACCESS))],
)
if ENABLE_PROPOSAL_REVIEW:
from onyx.server.features.proposal_review.api.config_api import (
router as config_router,
)
from onyx.server.features.proposal_review.api.decisions_api import (
router as decisions_router,
)
from onyx.server.features.proposal_review.api.proposals_api import (
router as proposals_router,
)
from onyx.server.features.proposal_review.api.review_api import (
router as review_router,
)
from onyx.server.features.proposal_review.api.rulesets_api import (
router as rulesets_router,
)
router.include_router(rulesets_router, tags=["proposal-review"])
router.include_router(proposals_router, tags=["proposal-review"])
router.include_router(review_router, tags=["proposal-review"])
router.include_router(decisions_router, tags=["proposal-review"])
router.include_router(config_router, tags=["proposal-review"])

View File

@@ -1,87 +0,0 @@
"""API endpoints for tenant configuration."""
from fastapi import APIRouter
from fastapi import Depends
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.configs.constants import DocumentSource
from onyx.db.connector import fetch_connectors
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import User
from onyx.server.features.proposal_review.api.models import ConfigResponse
from onyx.server.features.proposal_review.api.models import ConfigUpdate
from onyx.server.features.proposal_review.api.models import JiraConnectorInfo
from onyx.server.features.proposal_review.db import config as config_db
from shared_configs.contextvars import get_current_tenant_id
router = APIRouter()
@router.get("/config")
def get_config(
_user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> ConfigResponse:
"""Get the tenant's proposal review configuration."""
tenant_id = get_current_tenant_id()
config = config_db.get_config(tenant_id, db_session)
if not config:
# Return a default empty config rather than 404
config = config_db.upsert_config(tenant_id, db_session)
db_session.commit()
return ConfigResponse.from_model(config)
@router.put("/config")
def update_config(
request: ConfigUpdate,
_user: User = Depends(require_permission(Permission.MANAGE_CONNECTORS)),
db_session: Session = Depends(get_session),
) -> ConfigResponse:
"""Update the tenant's proposal review configuration."""
tenant_id = get_current_tenant_id()
config = config_db.upsert_config(
tenant_id=tenant_id,
jira_connector_id=request.jira_connector_id,
jira_project_key=request.jira_project_key,
field_mapping=request.field_mapping,
jira_writeback=request.jira_writeback,
db_session=db_session,
)
db_session.commit()
return ConfigResponse.from_model(config)
@router.get("/jira-connectors")
def list_jira_connectors(
_user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> list[JiraConnectorInfo]:
"""List all Jira connectors available to this tenant."""
connectors = fetch_connectors(db_session, sources=[DocumentSource.JIRA])
results: list[JiraConnectorInfo] = []
for c in connectors:
cfg = c.connector_specific_config or {}
project_key = cfg.get("project_key", "")
base_url = cfg.get("jira_base_url", "")
results.append(
JiraConnectorInfo(
id=c.id,
name=c.name,
project_key=project_key,
project_url=base_url,
)
)
return results
@router.get("/jira-connectors/{connector_id}/metadata-keys")
def get_connector_metadata_keys(
connector_id: int,
_user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[str]:
"""Return the distinct doc_metadata keys across all documents for a connector."""
return config_db.get_connector_metadata_keys(connector_id, db_session)

View File

@@ -1,187 +0,0 @@
"""API endpoints for per-finding decisions, proposal decisions, and Jira sync."""
from uuid import UUID
from fastapi import APIRouter
from fastapi import Depends
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import User
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.server.features.proposal_review.api.models import FindingDecisionCreate
from onyx.server.features.proposal_review.api.models import FindingDecisionResponse
from onyx.server.features.proposal_review.api.models import JiraSyncResponse
from onyx.server.features.proposal_review.api.models import ProposalDecisionCreate
from onyx.server.features.proposal_review.api.models import ProposalDecisionResponse
from onyx.server.features.proposal_review.db import decisions as decisions_db
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import proposals as proposals_db
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
router = APIRouter()
@router.post(
"/findings/{finding_id}/decision",
)
def record_finding_decision(
finding_id: UUID,
request: FindingDecisionCreate,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> FindingDecisionResponse:
"""Record or update a decision on a finding (upsert)."""
tenant_id = get_current_tenant_id()
# Verify finding exists
finding = findings_db.get_finding(finding_id, db_session)
if not finding:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Finding not found")
# Verify the finding's proposal belongs to the current tenant
proposal = proposals_db.get_proposal(finding.proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Finding not found")
decision = decisions_db.upsert_finding_decision(
finding_id=finding_id,
officer_id=user.id,
action=request.action,
notes=request.notes,
db_session=db_session,
)
# Audit log
decisions_db.create_audit_log(
proposal_id=finding.proposal_id,
action="finding_decided",
user_id=user.id,
details={
"finding_id": str(finding_id),
"action": request.action,
},
db_session=db_session,
)
db_session.commit()
return FindingDecisionResponse.from_model(decision)
@router.post(
"/proposals/{proposal_id}/decision",
status_code=201,
)
def record_proposal_decision(
proposal_id: UUID,
request: ProposalDecisionCreate,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> ProposalDecisionResponse:
"""Record a final decision on a proposal."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# Map decision to proposal status
status_map = {
"APPROVED": "APPROVED",
"CHANGES_REQUESTED": "CHANGES_REQUESTED",
"REJECTED": "REJECTED",
}
new_status = status_map.get(request.decision)
if not new_status:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"decision must be APPROVED, CHANGES_REQUESTED, or REJECTED",
)
# Update proposal status
proposals_db.update_proposal_status(proposal_id, tenant_id, new_status, db_session)
# Create the decision record
decision = decisions_db.create_proposal_decision(
proposal_id=proposal_id,
officer_id=user.id,
decision=request.decision,
notes=request.notes,
db_session=db_session,
)
# Audit log
decisions_db.create_audit_log(
proposal_id=proposal_id,
action="decision_submitted",
user_id=user.id,
details={
"decision_id": str(decision.id),
"decision": request.decision,
},
db_session=db_session,
)
db_session.commit()
return ProposalDecisionResponse.from_model(decision)
@router.post(
"/proposals/{proposal_id}/sync-jira",
)
def sync_jira(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> JiraSyncResponse:
"""Sync the latest proposal decision to Jira.
Dispatches a Celery task that performs 3 Jira API operations:
1. Update custom fields (decision, completion %)
2. Transition the issue to the appropriate column
3. Post a structured review summary comment
"""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
latest_decision = decisions_db.get_latest_proposal_decision(proposal_id, db_session)
if not latest_decision:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"No decision to sync -- record a proposal decision first",
)
if latest_decision.jira_synced:
return JiraSyncResponse(
success=True,
message="Decision already synced to Jira",
)
# Dispatch Celery task for Jira sync
from onyx.server.features.proposal_review.engine.review_engine import (
sync_decision_to_jira,
)
sync_decision_to_jira.apply_async(args=[str(proposal_id), tenant_id], expires=300)
# Audit log
decisions_db.create_audit_log(
proposal_id=proposal_id,
action="jira_sync_dispatched",
user_id=user.id,
details={"decision_id": str(latest_decision.id)},
db_session=db_session,
)
db_session.commit()
return JiraSyncResponse(
success=True,
message="Jira sync task dispatched",
)

View File

@@ -1,463 +0,0 @@
"""Pydantic request/response models for Proposal Review (Argus)."""
from datetime import datetime
from typing import Any
from typing import Literal
from uuid import UUID
from pydantic import BaseModel
# =============================================================================
# Ruleset Schemas
# =============================================================================
class RulesetCreate(BaseModel):
name: str
description: str | None = None
is_default: bool = False
class RulesetUpdate(BaseModel):
name: str | None = None
description: str | None = None
is_default: bool | None = None
is_active: bool | None = None
class RulesetResponse(BaseModel):
id: UUID
tenant_id: str
name: str
description: str | None
is_default: bool
is_active: bool
created_by: UUID | None
created_at: datetime
updated_at: datetime
rules: list["RuleResponse"] = []
@classmethod
def from_model(
cls,
ruleset: Any,
include_rules: bool = True,
) -> "RulesetResponse":
return cls(
id=ruleset.id,
tenant_id=ruleset.tenant_id,
name=ruleset.name,
description=ruleset.description,
is_default=ruleset.is_default,
is_active=ruleset.is_active,
created_by=ruleset.created_by,
created_at=ruleset.created_at,
updated_at=ruleset.updated_at,
rules=(
[RuleResponse.from_model(r) for r in ruleset.rules]
if include_rules
else []
),
)
# =============================================================================
# Rule Schemas
# =============================================================================
class RuleCreate(BaseModel):
name: str
description: str | None = None
category: str | None = None
rule_type: Literal[
"DOCUMENT_CHECK", "METADATA_CHECK", "CROSS_REFERENCE", "CUSTOM_NL"
]
rule_intent: Literal["CHECK", "HIGHLIGHT"] = "CHECK"
prompt_template: str
source: Literal["IMPORTED", "MANUAL"] = "MANUAL"
authority: Literal["OVERRIDE", "RETURN"] | None = None
is_hard_stop: bool = False
priority: int = 0
class RuleUpdate(BaseModel):
name: str | None = None
description: str | None = None
category: str | None = None
rule_type: str | None = None
rule_intent: str | None = None
prompt_template: str | None = None
authority: str | None = None
is_hard_stop: bool | None = None
priority: int | None = None
is_active: bool | None = None
class RuleResponse(BaseModel):
id: UUID
ruleset_id: UUID
name: str
description: str | None
category: str | None
rule_type: str
rule_intent: str
prompt_template: str
source: str
authority: str | None
is_hard_stop: bool
priority: int
is_active: bool
created_at: datetime
updated_at: datetime
@classmethod
def from_model(cls, rule: Any) -> "RuleResponse":
return cls(
id=rule.id,
ruleset_id=rule.ruleset_id,
name=rule.name,
description=rule.description,
category=rule.category,
rule_type=rule.rule_type,
rule_intent=rule.rule_intent,
prompt_template=rule.prompt_template,
source=rule.source,
authority=rule.authority,
is_hard_stop=rule.is_hard_stop,
priority=rule.priority,
is_active=rule.is_active,
created_at=rule.created_at,
updated_at=rule.updated_at,
)
class BulkRuleUpdateRequest(BaseModel):
"""Batch activate/deactivate/delete rules."""
action: Literal["activate", "deactivate", "delete"]
rule_ids: list[UUID]
class BulkRuleUpdateResponse(BaseModel):
updated_count: int
# =============================================================================
# Proposal Schemas
# =============================================================================
class ProposalResponse(BaseModel):
"""Thin response -- status + document_id. Metadata comes from Document."""
id: UUID
document_id: str
tenant_id: str
status: str
created_at: datetime
updated_at: datetime
# Resolved metadata from Document table via field_mapping
metadata: dict[str, Any] = {}
@classmethod
def from_model(
cls,
proposal: Any,
metadata: dict[str, Any] | None = None,
) -> "ProposalResponse":
return cls(
id=proposal.id,
document_id=proposal.document_id,
tenant_id=proposal.tenant_id,
status=proposal.status,
created_at=proposal.created_at,
updated_at=proposal.updated_at,
metadata=metadata or {},
)
class ProposalListResponse(BaseModel):
proposals: list[ProposalResponse]
total_count: int
config_missing: bool = False # True when no Argus config exists
# =============================================================================
# Review Run Schemas
# =============================================================================
class ReviewRunTriggerRequest(BaseModel):
ruleset_id: UUID
class ReviewRunResponse(BaseModel):
id: UUID
proposal_id: UUID
ruleset_id: UUID
triggered_by: UUID
status: str
total_rules: int
completed_rules: int
started_at: datetime | None
completed_at: datetime | None
created_at: datetime
@classmethod
def from_model(cls, run: Any) -> "ReviewRunResponse":
return cls(
id=run.id,
proposal_id=run.proposal_id,
ruleset_id=run.ruleset_id,
triggered_by=run.triggered_by,
status=run.status,
total_rules=run.total_rules,
completed_rules=run.completed_rules,
started_at=run.started_at,
completed_at=run.completed_at,
created_at=run.created_at,
)
# =============================================================================
# Finding Schemas
# =============================================================================
class FindingDecisionResponse(BaseModel):
id: UUID
finding_id: UUID
officer_id: UUID
action: str
notes: str | None
created_at: datetime
updated_at: datetime
@classmethod
def from_model(cls, decision: Any) -> "FindingDecisionResponse":
return cls(
id=decision.id,
finding_id=decision.finding_id,
officer_id=decision.officer_id,
action=decision.action,
notes=decision.notes,
created_at=decision.created_at,
updated_at=decision.updated_at,
)
class FindingResponse(BaseModel):
id: UUID
proposal_id: UUID
rule_id: UUID
review_run_id: UUID
verdict: str
confidence: str | None
evidence: str | None
explanation: str | None
suggested_action: str | None
llm_model: str | None
llm_tokens_used: int | None
created_at: datetime
# Nested rule info for display
rule_name: str | None = None
rule_category: str | None = None
rule_is_hard_stop: bool | None = None
# Nested decision if exists
decision: FindingDecisionResponse | None = None
@classmethod
def from_model(cls, finding: Any) -> "FindingResponse":
decision = None
if finding.decision is not None:
decision = FindingDecisionResponse.from_model(finding.decision)
rule_name = None
rule_category = None
rule_is_hard_stop = None
if finding.rule is not None:
rule_name = finding.rule.name
rule_category = finding.rule.category
rule_is_hard_stop = finding.rule.is_hard_stop
return cls(
id=finding.id,
proposal_id=finding.proposal_id,
rule_id=finding.rule_id,
review_run_id=finding.review_run_id,
verdict=finding.verdict,
confidence=finding.confidence,
evidence=finding.evidence,
explanation=finding.explanation,
suggested_action=finding.suggested_action,
llm_model=finding.llm_model,
llm_tokens_used=finding.llm_tokens_used,
created_at=finding.created_at,
rule_name=rule_name,
rule_category=rule_category,
rule_is_hard_stop=rule_is_hard_stop,
decision=decision,
)
# =============================================================================
# Decision Schemas
# =============================================================================
class FindingDecisionCreate(BaseModel):
action: Literal["VERIFIED", "ISSUE", "NOT_APPLICABLE", "OVERRIDDEN"]
notes: str | None = None
class ProposalDecisionCreate(BaseModel):
decision: Literal["APPROVED", "CHANGES_REQUESTED", "REJECTED"]
notes: str | None = None
class ProposalDecisionResponse(BaseModel):
id: UUID
proposal_id: UUID
officer_id: UUID
decision: str
notes: str | None
jira_synced: bool
jira_synced_at: datetime | None
created_at: datetime
@classmethod
def from_model(cls, decision: Any) -> "ProposalDecisionResponse":
return cls(
id=decision.id,
proposal_id=decision.proposal_id,
officer_id=decision.officer_id,
decision=decision.decision,
notes=decision.notes,
jira_synced=decision.jira_synced,
jira_synced_at=decision.jira_synced_at,
created_at=decision.created_at,
)
# =============================================================================
# Config Schemas
# =============================================================================
class ConfigUpdate(BaseModel):
jira_connector_id: int | None = None
jira_project_key: str | None = None
field_mapping: list[str] | None = None # List of visible metadata keys
jira_writeback: dict[str, Any] | None = None
class ConfigResponse(BaseModel):
id: UUID
tenant_id: str
jira_connector_id: int | None
jira_project_key: str | None
field_mapping: list[str] | None
jira_writeback: dict[str, Any] | None
created_at: datetime
updated_at: datetime
@classmethod
def from_model(cls, config: Any) -> "ConfigResponse":
return cls(
id=config.id,
tenant_id=config.tenant_id,
jira_connector_id=config.jira_connector_id,
jira_project_key=config.jira_project_key,
field_mapping=config.field_mapping,
jira_writeback=config.jira_writeback,
created_at=config.created_at,
updated_at=config.updated_at,
)
# =============================================================================
# Import Schemas
# =============================================================================
class ImportResponse(BaseModel):
rules_created: int
rules: list[RuleResponse]
# =============================================================================
# Document Schemas
# =============================================================================
class ProposalDocumentResponse(BaseModel):
id: UUID
proposal_id: UUID
file_name: str
file_type: str | None
document_role: str
uploaded_by: UUID | None
extracted_text: str | None = None
created_at: datetime
@classmethod
def from_model(cls, doc: Any) -> "ProposalDocumentResponse":
return cls(
id=doc.id,
proposal_id=doc.proposal_id,
file_name=doc.file_name,
file_type=doc.file_type,
document_role=doc.document_role,
uploaded_by=doc.uploaded_by,
extracted_text=getattr(doc, "extracted_text", None),
created_at=doc.created_at,
)
# =============================================================================
# Audit Log Schemas
# =============================================================================
class AuditLogEntry(BaseModel):
id: UUID
proposal_id: UUID
user_id: UUID | None
action: str
details: dict[str, Any] | None
created_at: datetime
@classmethod
def from_model(cls, entry: Any) -> "AuditLogEntry":
return cls(
id=entry.id,
proposal_id=entry.proposal_id,
user_id=entry.user_id,
action=entry.action,
details=entry.details,
created_at=entry.created_at,
)
# =============================================================================
# Jira Sync Schemas
# =============================================================================
class JiraSyncResponse(BaseModel):
success: bool
message: str
# =============================================================================
# Jira Connector Discovery Schemas
# =============================================================================
class JiraConnectorInfo(BaseModel):
id: int
name: str
project_key: str
project_url: str

View File

@@ -1,370 +0,0 @@
"""API endpoints for proposals and proposal documents."""
import io
from datetime import datetime
from datetime import timezone
from typing import Any
from uuid import UUID
from uuid import uuid4
from fastapi import APIRouter
from fastapi import Depends
from fastapi import Form
from fastapi import UploadFile
from sqlalchemy import func
from sqlalchemy import or_
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.configs.constants import DocumentSource
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import Connector
from onyx.db.models import Document
from onyx.db.models import DocumentByConnectorCredentialPair
from onyx.db.models import User
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.file_processing.extract_file_text import extract_file_text
from onyx.server.features.proposal_review.api.models import ProposalDocumentResponse
from onyx.server.features.proposal_review.api.models import ProposalListResponse
from onyx.server.features.proposal_review.api.models import ProposalResponse
from onyx.server.features.proposal_review.configs import (
DOCUMENT_UPLOAD_MAX_FILE_SIZE_BYTES,
)
from onyx.server.features.proposal_review.db import config as config_db
from onyx.server.features.proposal_review.db import proposals as proposals_db
from onyx.server.features.proposal_review.db.models import ProposalReviewDocument
from onyx.server.features.proposal_review.db.models import ProposalReviewProposal
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
router = APIRouter()
def _resolve_document_metadata(
document: Document,
visible_fields: list[str] | None,
) -> dict[str, Any]:
"""Resolve metadata from a Document's tags, filtered to visible fields.
Jira custom fields are stored as Tag rows (tag_key / tag_value)
linked to the document via document__tag. visible_fields selects
which tag keys to include. If None/empty, returns all tags.
"""
# Build metadata from the document's tags
raw_metadata: dict[str, Any] = {}
for tag in document.tags:
key = tag.tag_key
value = tag.tag_value
# Tags with is_list=True can have multiple values for the same key
if tag.is_list:
raw_metadata.setdefault(key, [])
raw_metadata[key].append(value)
else:
raw_metadata[key] = value
# Extract jira_key from tags and clean title from semantic_id.
# Jira semantic_id is "KEY-123: Summary Text" — split to isolate each.
jira_key = raw_metadata.get("key", "")
title = document.semantic_id or ""
if title and ": " in title:
title = title.split(": ", 1)[1]
raw_metadata["jira_key"] = jira_key
raw_metadata["title"] = title
raw_metadata["link"] = document.link
if not visible_fields:
return raw_metadata
# Filter to only the selected fields, plus always include core fields
resolved: dict[str, Any] = {
"jira_key": raw_metadata.get("jira_key"),
"title": raw_metadata.get("title"),
"link": raw_metadata.get("link"),
}
for key in visible_fields:
if key in raw_metadata:
resolved[key] = raw_metadata[key]
return resolved
@router.get("/proposals")
def list_proposals(
status: str | None = None,
limit: int = 100,
offset: int = 0,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> ProposalListResponse:
"""List proposals.
This queries the Document table filtered by the configured Jira project,
LEFT JOINs proposal_review_proposal for review state, and resolves
metadata field names via the field_mapping config.
Documents without a proposal record are returned with status PENDING
without persisting any new rows (read-only endpoint).
"""
tenant_id = get_current_tenant_id()
# Get config for field mapping and Jira project filtering
config = config_db.get_config(tenant_id, db_session)
# When no Argus config exists, return an empty list with a hint for the frontend.
# The frontend can show "Configure a Jira connector in Settings to see proposals."
if config is None:
return ProposalListResponse(
proposals=[],
total_count=0,
config_missing=True,
)
visible_fields = config.field_mapping
# Query documents from the configured Jira connector only,
# LEFT JOIN proposal state for review tracking.
# NOTE: Tenant isolation is handled at the schema level (schema-per-tenant).
# The DB session is already scoped to the current tenant's schema, so
# cross-tenant data leakage is prevented by the connection itself.
query = (
db_session.query(Document, ProposalReviewProposal)
.outerjoin(
ProposalReviewProposal,
Document.id == ProposalReviewProposal.document_id,
)
.options(selectinload(Document.tags))
)
# Filter to only documents from the configured Jira connector
if config and config.jira_connector_id:
# Join through DocumentByConnectorCredentialPair to filter by connector
query = query.join(
DocumentByConnectorCredentialPair,
Document.id == DocumentByConnectorCredentialPair.id,
).filter(
DocumentByConnectorCredentialPair.connector_id == config.jira_connector_id,
)
else:
# No connector configured — filter to Jira source connectors only
# to avoid showing Slack/GitHub/etc documents
query = (
query.join(
DocumentByConnectorCredentialPair,
Document.id == DocumentByConnectorCredentialPair.id,
)
.join(
Connector,
DocumentByConnectorCredentialPair.connector_id == Connector.id,
)
.filter(
Connector.source == DocumentSource.JIRA,
)
)
# Exclude attachment documents — they are children of issue documents
# and have "/attachments/" in their document ID.
query = query.filter(~Document.id.contains("/attachments/"))
# If status filter is specified, only show documents with matching proposal status.
# PENDING is special: documents without a proposal record are implicitly pending.
if status:
if status == "PENDING":
query = query.filter(
or_(
ProposalReviewProposal.status == status,
ProposalReviewProposal.id.is_(None),
),
)
else:
query = query.filter(ProposalReviewProposal.status == status)
# Count before adding DISTINCT ON — count(distinct(...)) handles
# deduplication on its own and conflicts with DISTINCT ON.
total_count = (
query.with_entities(func.count(func.distinct(Document.id))).scalar() or 0
)
# Deduplicate rows that can arise from multiple connector-credential pairs.
# Applied after counting to avoid the DISTINCT ON + aggregate conflict.
# ORDER BY Document.id is required for DISTINCT ON to be deterministic.
query = query.distinct(Document.id).order_by(Document.id)
results = query.offset(offset).limit(limit).all()
proposals: list[ProposalResponse] = []
for document, proposal in results:
if proposal is None:
# Don't create DB records during GET — treat as pending
metadata = _resolve_document_metadata(document, visible_fields)
proposals.append(
ProposalResponse(
id=uuid4(), # temporary, not persisted
document_id=document.id,
tenant_id=tenant_id,
status="PENDING",
created_at=document.doc_updated_at or datetime.now(timezone.utc),
updated_at=document.doc_updated_at or datetime.now(timezone.utc),
metadata=metadata,
)
)
continue
metadata = _resolve_document_metadata(document, visible_fields)
proposals.append(ProposalResponse.from_model(proposal, metadata=metadata))
return ProposalListResponse(proposals=proposals, total_count=total_count)
@router.get("/proposals/{proposal_id}")
def get_proposal(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> ProposalResponse:
"""Get a single proposal with its metadata from the Document table."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# Load the linked Document for metadata
document = (
db_session.query(Document)
.options(selectinload(Document.tags))
.filter(Document.id == proposal.document_id)
.one_or_none()
)
config = config_db.get_config(tenant_id, db_session)
visible_fields = config.field_mapping if config else None
metadata: dict[str, Any] = {}
if document:
metadata = _resolve_document_metadata(document, visible_fields)
return ProposalResponse.from_model(proposal, metadata=metadata)
# =============================================================================
# Proposal Documents (manual uploads)
# =============================================================================
@router.post(
"/proposals/{proposal_id}/documents",
status_code=201,
)
def upload_document(
proposal_id: UUID,
file: UploadFile,
document_role: str = Form("OTHER"),
user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> ProposalDocumentResponse:
"""Upload a document to a proposal."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# Read file content
try:
file_bytes = file.file.read()
except Exception as e:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
f"Failed to read uploaded file: {str(e)}",
)
# Validate file size
if len(file_bytes) > DOCUMENT_UPLOAD_MAX_FILE_SIZE_BYTES:
raise OnyxError(
OnyxErrorCode.PAYLOAD_TOO_LARGE,
f"File size {len(file_bytes)} bytes exceeds maximum "
f"allowed size of {DOCUMENT_UPLOAD_MAX_FILE_SIZE_BYTES} bytes",
)
# Determine file type from filename
filename = file.filename or "untitled"
file_type = None
if filename:
parts = filename.rsplit(".", 1)
if len(parts) > 1:
file_type = parts[1].upper()
# Extract text from the uploaded file
extracted_text = None
if file_bytes:
try:
extracted_text = extract_file_text(
file=io.BytesIO(file_bytes),
file_name=filename,
)
except Exception as e:
logger.warning(
f"Failed to extract text from uploaded file '{filename}': {e}"
)
doc = ProposalReviewDocument(
proposal_id=proposal_id,
file_name=filename,
file_type=file_type,
document_role=document_role,
uploaded_by=user.id,
extracted_text=extracted_text,
)
db_session.add(doc)
db_session.commit()
return ProposalDocumentResponse.from_model(doc)
@router.get("/proposals/{proposal_id}/documents")
def list_documents(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[ProposalDocumentResponse]:
"""List documents for a proposal."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
docs = (
db_session.query(ProposalReviewDocument)
.filter(ProposalReviewDocument.proposal_id == proposal_id)
.order_by(ProposalReviewDocument.created_at)
.all()
)
return [ProposalDocumentResponse.from_model(d) for d in docs]
@router.delete("/proposals/{proposal_id}/documents/{doc_id}", status_code=204)
def delete_document(
proposal_id: UUID,
doc_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> None:
"""Delete a manually uploaded document."""
# Verify the proposal belongs to the current tenant
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
doc = (
db_session.query(ProposalReviewDocument)
.filter(
ProposalReviewDocument.id == doc_id,
ProposalReviewDocument.proposal_id == proposal_id,
)
.one_or_none()
)
if not doc:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Document not found")
db_session.delete(doc)
db_session.commit()

View File

@@ -1,171 +0,0 @@
"""API endpoints for review triggers, status, and findings."""
from uuid import UUID
from fastapi import APIRouter
from fastapi import Depends
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import User
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.server.features.proposal_review.api.models import AuditLogEntry
from onyx.server.features.proposal_review.api.models import FindingResponse
from onyx.server.features.proposal_review.api.models import ReviewRunResponse
from onyx.server.features.proposal_review.api.models import ReviewRunTriggerRequest
from onyx.server.features.proposal_review.db import decisions as decisions_db
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import proposals as proposals_db
from onyx.server.features.proposal_review.db import rulesets as rulesets_db
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
router = APIRouter()
@router.post(
"/proposals/{proposal_id}/review",
status_code=201,
)
def trigger_review(
proposal_id: UUID,
request: ReviewRunTriggerRequest,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)),
db_session: Session = Depends(get_session),
) -> ReviewRunResponse:
"""Trigger a new review run for a proposal.
Creates a ReviewRun record and returns it. No Celery dispatch yet --
the engine will be wired in Workstream 3.
"""
tenant_id = get_current_tenant_id()
# Verify proposal exists
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# Verify ruleset exists and count active rules
ruleset = rulesets_db.get_ruleset(request.ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
active_rule_count = rulesets_db.count_active_rules(request.ruleset_id, db_session)
if active_rule_count == 0:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"Ruleset has no active rules",
)
# Update proposal status to IN_REVIEW
proposals_db.update_proposal_status(proposal_id, tenant_id, "IN_REVIEW", db_session)
# Create the review run record
run = findings_db.create_review_run(
proposal_id=proposal_id,
ruleset_id=request.ruleset_id,
triggered_by=user.id,
total_rules=active_rule_count,
db_session=db_session,
)
# Create audit log entry
decisions_db.create_audit_log(
proposal_id=proposal_id,
action="review_triggered",
user_id=user.id,
details={
"review_run_id": str(run.id),
"ruleset_id": str(request.ruleset_id),
"total_rules": active_rule_count,
},
db_session=db_session,
)
db_session.commit()
logger.info(
f"Review triggered for proposal {proposal_id} "
f"with ruleset {request.ruleset_id} ({active_rule_count} rules)"
)
# Dispatch Celery task to run the review asynchronously
from onyx.server.features.proposal_review.engine.review_engine import (
run_proposal_review,
)
run_proposal_review.apply_async(args=[str(run.id), tenant_id], expires=3600)
return ReviewRunResponse.from_model(run)
@router.get(
"/proposals/{proposal_id}/review-status",
)
def get_review_status(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> ReviewRunResponse:
"""Get the status of the latest review run for a proposal."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
run = findings_db.get_latest_review_run(proposal_id, db_session)
if not run:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "No review runs found")
return ReviewRunResponse.from_model(run)
@router.get(
"/proposals/{proposal_id}/findings",
)
def get_findings(
proposal_id: UUID,
review_run_id: UUID | None = None,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[FindingResponse]:
"""Get findings for a proposal.
If review_run_id is not specified, returns findings from the latest run.
"""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
# If no run specified, get the latest
if review_run_id is None:
run = findings_db.get_latest_review_run(proposal_id, db_session)
if not run:
return []
review_run_id = run.id
results = findings_db.list_findings_by_run(review_run_id, db_session)
return [FindingResponse.from_model(f) for f in results]
@router.get(
"/proposals/{proposal_id}/audit-log",
)
def get_audit_log(
proposal_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[AuditLogEntry]:
"""Get the audit trail for a proposal."""
tenant_id = get_current_tenant_id()
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Proposal not found")
entries = decisions_db.list_audit_log(proposal_id, db_session)
return [AuditLogEntry.from_model(e) for e in entries]

View File

@@ -1,421 +0,0 @@
"""API endpoints for rulesets and rules."""
from uuid import UUID
from fastapi import APIRouter
from fastapi import Depends
from fastapi import UploadFile
from sqlalchemy.orm import Session
from onyx.auth.permissions import require_permission
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import Permission
from onyx.db.models import User
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.server.features.proposal_review.api.models import BulkRuleUpdateRequest
from onyx.server.features.proposal_review.api.models import BulkRuleUpdateResponse
from onyx.server.features.proposal_review.api.models import ImportResponse
from onyx.server.features.proposal_review.api.models import RuleCreate
from onyx.server.features.proposal_review.api.models import RuleResponse
from onyx.server.features.proposal_review.api.models import RulesetCreate
from onyx.server.features.proposal_review.api.models import RulesetResponse
from onyx.server.features.proposal_review.api.models import RulesetUpdate
from onyx.server.features.proposal_review.api.models import RuleUpdate
from onyx.server.features.proposal_review.configs import IMPORT_MAX_FILE_SIZE_BYTES
from onyx.server.features.proposal_review.db import rulesets as rulesets_db
from shared_configs.contextvars import get_current_tenant_id
router = APIRouter()
# =============================================================================
# Rulesets
# =============================================================================
@router.get("/rulesets")
def list_rulesets(
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> list[RulesetResponse]:
"""List all rulesets for the current tenant."""
tenant_id = get_current_tenant_id()
rulesets = rulesets_db.list_rulesets(tenant_id, db_session)
return [RulesetResponse.from_model(rs) for rs in rulesets]
@router.post("/rulesets", status_code=201)
def create_ruleset(
request: RulesetCreate,
user: User = Depends(require_permission(Permission.MANAGE_CONNECTORS)),
db_session: Session = Depends(get_session),
) -> RulesetResponse:
"""Create a new ruleset."""
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.create_ruleset(
tenant_id=tenant_id,
name=request.name,
description=request.description,
is_default=request.is_default,
created_by=user.id,
db_session=db_session,
)
db_session.commit()
return RulesetResponse.from_model(ruleset, include_rules=False)
@router.get("/rulesets/{ruleset_id}")
def get_ruleset(
ruleset_id: UUID,
user: User = Depends(require_permission(Permission.BASIC_ACCESS)), # noqa: ARG001
db_session: Session = Depends(get_session),
) -> RulesetResponse:
"""Get a ruleset with all its rules."""
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
return RulesetResponse.from_model(ruleset)
@router.put("/rulesets/{ruleset_id}")
def update_ruleset(
ruleset_id: UUID,
request: RulesetUpdate,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> RulesetResponse:
"""Update a ruleset."""
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.update_ruleset(
ruleset_id=ruleset_id,
tenant_id=tenant_id,
name=request.name,
description=request.description,
is_default=request.is_default,
is_active=request.is_active,
db_session=db_session,
)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
db_session.commit()
return RulesetResponse.from_model(ruleset)
@router.delete("/rulesets/{ruleset_id}", status_code=204)
def delete_ruleset(
ruleset_id: UUID,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> None:
"""Delete a ruleset and all its rules."""
tenant_id = get_current_tenant_id()
deleted = rulesets_db.delete_ruleset(ruleset_id, tenant_id, db_session)
if not deleted:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
db_session.commit()
# =============================================================================
# Rules
# =============================================================================
@router.post(
"/rulesets/{ruleset_id}/rules",
status_code=201,
)
def create_rule(
ruleset_id: UUID,
request: RuleCreate,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> RuleResponse:
"""Create a new rule within a ruleset."""
# Verify ruleset exists and belongs to tenant
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
rule = rulesets_db.create_rule(
ruleset_id=ruleset_id,
name=request.name,
description=request.description,
category=request.category,
rule_type=request.rule_type,
rule_intent=request.rule_intent,
prompt_template=request.prompt_template,
source=request.source,
authority=request.authority,
is_hard_stop=request.is_hard_stop,
priority=request.priority,
db_session=db_session,
)
db_session.commit()
return RuleResponse.from_model(rule)
@router.put("/rules/{rule_id}")
def update_rule(
rule_id: UUID,
request: RuleUpdate,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> RuleResponse:
"""Update a rule."""
# Verify the rule belongs to a ruleset owned by the current tenant
tenant_id = get_current_tenant_id()
rule = rulesets_db.get_rule(rule_id, db_session)
if not rule:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
ruleset = rulesets_db.get_ruleset(rule.ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
updated_rule = rulesets_db.update_rule(
rule_id=rule_id,
name=request.name,
description=request.description,
category=request.category,
rule_type=request.rule_type,
rule_intent=request.rule_intent,
prompt_template=request.prompt_template,
authority=request.authority,
is_hard_stop=request.is_hard_stop,
priority=request.priority,
is_active=request.is_active,
db_session=db_session,
)
if not updated_rule:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
db_session.commit()
return RuleResponse.from_model(updated_rule)
@router.delete("/rules/{rule_id}", status_code=204)
def delete_rule(
rule_id: UUID,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> None:
"""Delete a rule."""
# Verify the rule belongs to a ruleset owned by the current tenant
tenant_id = get_current_tenant_id()
rule = rulesets_db.get_rule(rule_id, db_session)
if not rule:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
ruleset = rulesets_db.get_ruleset(rule.ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
deleted = rulesets_db.delete_rule(rule_id, db_session)
if not deleted:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
db_session.commit()
@router.post(
"/rulesets/{ruleset_id}/rules/bulk-update",
)
def bulk_update_rules(
ruleset_id: UUID,
request: BulkRuleUpdateRequest,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> BulkRuleUpdateResponse:
"""Batch activate/deactivate/delete rules."""
# Verify the ruleset belongs to the current tenant
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
if request.action not in ("activate", "deactivate", "delete"):
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"action must be 'activate', 'deactivate', or 'delete'",
)
# Only operate on rules that belong to this ruleset (tenant-scoped)
count = rulesets_db.bulk_update_rules(
request.rule_ids, request.action, ruleset_id, db_session
)
db_session.commit()
return BulkRuleUpdateResponse(updated_count=count)
@router.post(
"/rulesets/{ruleset_id}/import",
)
def import_checklist(
ruleset_id: UUID,
file: UploadFile,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> ImportResponse:
"""Upload a checklist document and parse it into atomic review rules via LLM.
Accepts a checklist file (.pdf, .docx, .xlsx, .txt), extracts its text,
and uses LLM to decompose it into atomic, self-contained rules.
Rules are saved to the ruleset as inactive drafts (is_active=false).
"""
tenant_id = get_current_tenant_id()
ruleset = rulesets_db.get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Ruleset not found")
# Read the uploaded file content
try:
file_content = file.file.read()
except Exception as e:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
f"Failed to read uploaded file: {str(e)}",
)
if not file_content:
raise OnyxError(OnyxErrorCode.INVALID_INPUT, "Uploaded file is empty")
# Validate file size
if len(file_content) > IMPORT_MAX_FILE_SIZE_BYTES:
raise OnyxError(
OnyxErrorCode.PAYLOAD_TOO_LARGE,
f"File size {len(file_content)} bytes exceeds maximum "
f"allowed size of {IMPORT_MAX_FILE_SIZE_BYTES} bytes",
)
# Extract text from the file
# For text files, decode directly; for other formats, use extract_file_text
extracted_text = ""
filename = file.filename or "untitled"
file_ext = filename.rsplit(".", 1)[-1].lower() if "." in filename else ""
if file_ext in ("txt", "text", "md"):
extracted_text = file_content.decode("utf-8", errors="replace")
else:
try:
import io
from onyx.file_processing.extract_file_text import extract_file_text
extracted_text = extract_file_text(
file=io.BytesIO(file_content),
file_name=filename,
)
except Exception as e:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
f"Failed to extract text from file: {str(e)}",
)
if not extracted_text or not extracted_text.strip():
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
"No text could be extracted from the uploaded file",
)
# Call the LLM-based checklist importer
from onyx.server.features.proposal_review.engine.checklist_importer import (
import_checklist,
)
try:
rule_dicts = import_checklist(extracted_text)
except RuntimeError as e:
raise OnyxError(OnyxErrorCode.INTERNAL_ERROR, str(e))
# Save parsed rules to the ruleset as inactive drafts
created_rules = []
for rd in rule_dicts:
rule = rulesets_db.create_rule(
ruleset_id=ruleset_id,
name=rd["name"],
description=rd.get("description"),
category=rd.get("category"),
rule_type=rd.get("rule_type", "CUSTOM_NL"),
rule_intent=rd.get("rule_intent", "CHECK"),
prompt_template=rd["prompt_template"],
source="IMPORTED",
is_hard_stop=False,
priority=0,
db_session=db_session,
)
# Rules start as inactive drafts — admin reviews and activates
rule.is_active = False
db_session.flush()
created_rules.append(rule)
db_session.commit()
return ImportResponse(
rules_created=len(created_rules),
rules=[RuleResponse.from_model(r) for r in created_rules],
)
@router.post("/rules/{rule_id}/test")
def test_rule(
rule_id: UUID,
user: User = Depends( # noqa: ARG001
require_permission(Permission.MANAGE_CONNECTORS)
),
db_session: Session = Depends(get_session),
) -> dict:
"""Test a rule against sample text.
Evaluates the rule against an empty/minimal proposal context to verify
the prompt template is well-formed and the LLM can produce a valid response.
"""
# Verify the rule belongs to a ruleset owned by the current tenant
tenant_id = get_current_tenant_id()
rule = rulesets_db.get_rule(rule_id, db_session)
if not rule:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
ruleset = rulesets_db.get_ruleset(rule.ruleset_id, tenant_id, db_session)
if not ruleset:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "Rule not found")
from onyx.server.features.proposal_review.engine.context_assembler import (
ProposalContext,
)
from onyx.server.features.proposal_review.engine.rule_evaluator import (
evaluate_rule,
)
# Build a minimal test context
test_context = ProposalContext(
proposal_text="[Sample proposal text for testing. No real proposal loaded.]",
budget_text="[No budget text available for test.]",
foa_text="[No FOA text available for test.]",
metadata={"test_mode": True},
jira_key="TEST-000",
)
try:
result = evaluate_rule(rule, test_context, db_session)
except Exception as e:
return {
"rule_id": str(rule_id),
"success": False,
"error": str(e),
}
return {
"rule_id": str(rule_id),
"success": True,
"result": result,
}

View File

@@ -1,18 +0,0 @@
import os
# Feature flag for enabling proposal review
ENABLE_PROPOSAL_REVIEW = (
os.environ.get("ENABLE_PROPOSAL_REVIEW", "true").lower() == "true"
)
# Maximum file size for checklist imports (in MB)
IMPORT_MAX_FILE_SIZE_MB = int(
os.environ.get("PROPOSAL_REVIEW_IMPORT_MAX_FILE_SIZE_MB", "50")
)
IMPORT_MAX_FILE_SIZE_BYTES = IMPORT_MAX_FILE_SIZE_MB * 1024 * 1024
# Maximum file size for document uploads (in MB)
DOCUMENT_UPLOAD_MAX_FILE_SIZE_MB = int(
os.environ.get("PROPOSAL_REVIEW_DOCUMENT_UPLOAD_MAX_FILE_SIZE_MB", "100")
)
DOCUMENT_UPLOAD_MAX_FILE_SIZE_BYTES = DOCUMENT_UPLOAD_MAX_FILE_SIZE_MB * 1024 * 1024

View File

@@ -1,93 +0,0 @@
"""DB operations for tenant configuration."""
from datetime import datetime
from datetime import timezone
from typing import Any
from sqlalchemy import select
from sqlalchemy.orm import Session
from onyx.db.models import Document__Tag
from onyx.db.models import DocumentByConnectorCredentialPair
from onyx.db.models import Tag
from onyx.server.features.proposal_review.db.models import ProposalReviewConfig
from onyx.utils.logger import setup_logger
logger = setup_logger()
def get_config(
tenant_id: str,
db_session: Session,
) -> ProposalReviewConfig | None:
"""Get the config row for a tenant (there is at most one)."""
return (
db_session.query(ProposalReviewConfig)
.filter(ProposalReviewConfig.tenant_id == tenant_id)
.one_or_none()
)
def upsert_config(
tenant_id: str,
db_session: Session,
jira_connector_id: int | None = None,
jira_project_key: str | None = None,
field_mapping: list[str] | None = None,
jira_writeback: dict[str, Any] | None = None,
) -> ProposalReviewConfig:
"""Create or update the tenant config."""
config = get_config(tenant_id, db_session)
if config:
if jira_connector_id is not None:
config.jira_connector_id = jira_connector_id
if jira_project_key is not None:
config.jira_project_key = jira_project_key
if field_mapping is not None:
config.field_mapping = field_mapping
if jira_writeback is not None:
config.jira_writeback = jira_writeback
config.updated_at = datetime.now(timezone.utc)
db_session.flush()
logger.info(f"Updated proposal review config for tenant {tenant_id}")
return config
config = ProposalReviewConfig(
tenant_id=tenant_id,
jira_connector_id=jira_connector_id,
jira_project_key=jira_project_key,
field_mapping=field_mapping,
jira_writeback=jira_writeback,
)
db_session.add(config)
db_session.flush()
logger.info(f"Created proposal review config for tenant {tenant_id}")
return config
def get_connector_metadata_keys(
connector_id: int,
db_session: Session,
) -> list[str]:
"""Return distinct metadata tag keys for documents from a connector.
Jira custom fields are stored as tags (tag_key / tag_value) linked
to documents via the document__tag join table.
"""
stmt = (
select(Tag.tag_key)
.select_from(Tag)
.join(Document__Tag, Tag.id == Document__Tag.tag_id)
.join(
DocumentByConnectorCredentialPair,
Document__Tag.document_id == DocumentByConnectorCredentialPair.id,
)
.where(
DocumentByConnectorCredentialPair.connector_id == connector_id,
)
.distinct()
.limit(500)
)
rows = db_session.execute(stmt).all()
return sorted(row[0] for row in rows)

View File

@@ -1,168 +0,0 @@
"""DB operations for finding decisions and proposal decisions."""
from datetime import datetime
from datetime import timezone
from uuid import UUID
from sqlalchemy import desc
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewAuditLog
from onyx.server.features.proposal_review.db.models import ProposalReviewDecision
from onyx.server.features.proposal_review.db.models import (
ProposalReviewProposalDecision,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()
# =============================================================================
# Per-Finding Decisions (upsert — one decision per finding)
# =============================================================================
def upsert_finding_decision(
finding_id: UUID,
officer_id: UUID,
action: str,
db_session: Session,
notes: str | None = None,
) -> ProposalReviewDecision:
"""Create or update a decision on a finding.
There is a UNIQUE constraint on finding_id, so this is an upsert.
"""
existing = (
db_session.query(ProposalReviewDecision)
.filter(ProposalReviewDecision.finding_id == finding_id)
.one_or_none()
)
if existing:
existing.officer_id = officer_id
existing.action = action
existing.notes = notes
existing.updated_at = datetime.now(timezone.utc)
db_session.flush()
logger.info(f"Updated decision on finding {finding_id} to {action}")
return existing
decision = ProposalReviewDecision(
finding_id=finding_id,
officer_id=officer_id,
action=action,
notes=notes,
)
db_session.add(decision)
db_session.flush()
logger.info(f"Created decision {decision.id} on finding {finding_id}")
return decision
def get_finding_decision(
finding_id: UUID,
db_session: Session,
) -> ProposalReviewDecision | None:
"""Get the decision for a finding."""
return (
db_session.query(ProposalReviewDecision)
.filter(ProposalReviewDecision.finding_id == finding_id)
.one_or_none()
)
# =============================================================================
# Proposal-Level Decisions
# =============================================================================
def create_proposal_decision(
proposal_id: UUID,
officer_id: UUID,
decision: str,
db_session: Session,
notes: str | None = None,
) -> ProposalReviewProposalDecision:
"""Create a final decision on a proposal."""
pd = ProposalReviewProposalDecision(
proposal_id=proposal_id,
officer_id=officer_id,
decision=decision,
notes=notes,
)
db_session.add(pd)
db_session.flush()
logger.info(
f"Created proposal decision {pd.id} ({decision}) for proposal {proposal_id}"
)
return pd
def get_latest_proposal_decision(
proposal_id: UUID,
db_session: Session,
) -> ProposalReviewProposalDecision | None:
"""Get the most recent decision for a proposal."""
return (
db_session.query(ProposalReviewProposalDecision)
.filter(ProposalReviewProposalDecision.proposal_id == proposal_id)
.order_by(desc(ProposalReviewProposalDecision.created_at))
.first()
)
def mark_decision_jira_synced(
decision_id: UUID,
db_session: Session,
) -> ProposalReviewProposalDecision | None:
"""Mark a proposal decision as synced to Jira."""
pd = (
db_session.query(ProposalReviewProposalDecision)
.filter(ProposalReviewProposalDecision.id == decision_id)
.one_or_none()
)
if not pd:
return None
pd.jira_synced = True
pd.jira_synced_at = datetime.now(timezone.utc)
db_session.flush()
logger.info(f"Marked proposal decision {decision_id} as jira_synced")
return pd
# =============================================================================
# Audit Log
# =============================================================================
def create_audit_log(
proposal_id: UUID,
action: str,
db_session: Session,
user_id: UUID | None = None,
details: dict | None = None,
) -> ProposalReviewAuditLog:
"""Create an audit log entry."""
entry = ProposalReviewAuditLog(
proposal_id=proposal_id,
user_id=user_id,
action=action,
details=details,
)
db_session.add(entry)
db_session.flush()
return entry
def list_audit_log(
proposal_id: UUID,
db_session: Session,
) -> list[ProposalReviewAuditLog]:
"""List audit log entries for a proposal, newest first."""
return (
db_session.query(ProposalReviewAuditLog)
.filter(ProposalReviewAuditLog.proposal_id == proposal_id)
.order_by(desc(ProposalReviewAuditLog.created_at))
.all()
)

View File

@@ -1,158 +0,0 @@
"""DB operations for review runs and findings."""
from uuid import UUID
from sqlalchemy import desc
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewFinding
from onyx.server.features.proposal_review.db.models import ProposalReviewRun
from onyx.utils.logger import setup_logger
logger = setup_logger()
# =============================================================================
# Review Runs
# =============================================================================
def create_review_run(
proposal_id: UUID,
ruleset_id: UUID,
triggered_by: UUID,
total_rules: int,
db_session: Session,
) -> ProposalReviewRun:
"""Create a new review run record."""
run = ProposalReviewRun(
proposal_id=proposal_id,
ruleset_id=ruleset_id,
triggered_by=triggered_by,
total_rules=total_rules,
)
db_session.add(run)
db_session.flush()
logger.info(
f"Created review run {run.id} for proposal {proposal_id} "
f"with {total_rules} rules"
)
return run
def get_review_run(
run_id: UUID,
db_session: Session,
) -> ProposalReviewRun | None:
"""Get a review run by ID."""
return (
db_session.query(ProposalReviewRun)
.filter(ProposalReviewRun.id == run_id)
.one_or_none()
)
def get_latest_review_run(
proposal_id: UUID,
db_session: Session,
) -> ProposalReviewRun | None:
"""Get the most recent review run for a proposal."""
return (
db_session.query(ProposalReviewRun)
.filter(ProposalReviewRun.proposal_id == proposal_id)
.order_by(desc(ProposalReviewRun.created_at))
.first()
)
# =============================================================================
# Findings
# =============================================================================
def create_finding(
proposal_id: UUID,
rule_id: UUID,
review_run_id: UUID,
verdict: str,
db_session: Session,
confidence: str | None = None,
evidence: str | None = None,
explanation: str | None = None,
suggested_action: str | None = None,
llm_model: str | None = None,
llm_tokens_used: int | None = None,
) -> ProposalReviewFinding:
"""Create a new finding."""
finding = ProposalReviewFinding(
proposal_id=proposal_id,
rule_id=rule_id,
review_run_id=review_run_id,
verdict=verdict,
confidence=confidence,
evidence=evidence,
explanation=explanation,
suggested_action=suggested_action,
llm_model=llm_model,
llm_tokens_used=llm_tokens_used,
)
db_session.add(finding)
db_session.flush()
logger.info(
f"Created finding {finding.id} verdict={verdict} for proposal {proposal_id}"
)
return finding
def get_finding(
finding_id: UUID,
db_session: Session,
) -> ProposalReviewFinding | None:
"""Get a finding by ID with its decision and rule eagerly loaded."""
return (
db_session.query(ProposalReviewFinding)
.filter(ProposalReviewFinding.id == finding_id)
.options(
selectinload(ProposalReviewFinding.decision),
selectinload(ProposalReviewFinding.rule),
)
.one_or_none()
)
def list_findings_by_proposal(
proposal_id: UUID,
db_session: Session,
review_run_id: UUID | None = None,
) -> list[ProposalReviewFinding]:
"""List findings for a proposal, optionally filtered to a specific run."""
query = (
db_session.query(ProposalReviewFinding)
.filter(ProposalReviewFinding.proposal_id == proposal_id)
.options(
selectinload(ProposalReviewFinding.decision),
selectinload(ProposalReviewFinding.rule),
)
.order_by(ProposalReviewFinding.created_at)
)
if review_run_id:
query = query.filter(ProposalReviewFinding.review_run_id == review_run_id)
return query.all()
def list_findings_by_run(
review_run_id: UUID,
db_session: Session,
) -> list[ProposalReviewFinding]:
"""List all findings for a specific review run."""
return (
db_session.query(ProposalReviewFinding)
.filter(ProposalReviewFinding.review_run_id == review_run_id)
.options(
selectinload(ProposalReviewFinding.decision),
selectinload(ProposalReviewFinding.rule),
)
.order_by(ProposalReviewFinding.created_at)
.all()
)

View File

@@ -1,412 +0,0 @@
"""SQLAlchemy models for Proposal Review (Argus)."""
import datetime
from uuid import UUID
from sqlalchemy import Boolean
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy import text
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.postgresql import JSONB as PGJSONB
from sqlalchemy.dialects.postgresql import UUID as PGUUID
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
from onyx.db.models import Base
class ProposalReviewRuleset(Base):
__tablename__ = "proposal_review_ruleset"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
tenant_id: Mapped[str] = mapped_column(Text, nullable=False, index=True)
name: Mapped[str] = mapped_column(Text, nullable=False)
description: Mapped[str | None] = mapped_column(Text, nullable=True)
is_default: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("false")
)
is_active: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("true")
)
created_by: Mapped[UUID | None] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=True
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
rules: Mapped[list["ProposalReviewRule"]] = relationship(
"ProposalReviewRule",
back_populates="ruleset",
cascade="all, delete-orphan",
order_by="ProposalReviewRule.priority",
)
class ProposalReviewRule(Base):
__tablename__ = "proposal_review_rule"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
ruleset_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_ruleset.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
name: Mapped[str] = mapped_column(Text, nullable=False)
description: Mapped[str | None] = mapped_column(Text, nullable=True)
category: Mapped[str | None] = mapped_column(Text, nullable=True)
rule_type: Mapped[str] = mapped_column(Text, nullable=False)
rule_intent: Mapped[str] = mapped_column(
Text, nullable=False, server_default=text("'CHECK'")
)
prompt_template: Mapped[str] = mapped_column(Text, nullable=False)
source: Mapped[str] = mapped_column(
Text, nullable=False, server_default=text("'MANUAL'")
)
authority: Mapped[str | None] = mapped_column(Text, nullable=True)
is_hard_stop: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("false")
)
priority: Mapped[int] = mapped_column(
Integer, nullable=False, server_default=text("0")
)
is_active: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("true")
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
ruleset: Mapped["ProposalReviewRuleset"] = relationship(
"ProposalReviewRuleset", back_populates="rules"
)
class ProposalReviewProposal(Base):
__tablename__ = "proposal_review_proposal"
__table_args__ = (
UniqueConstraint("document_id", "tenant_id"),
Index("ix_proposal_review_proposal_tenant_id", "tenant_id"),
Index("ix_proposal_review_proposal_document_id", "document_id"),
Index("ix_proposal_review_proposal_status", "status"),
)
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
document_id: Mapped[str] = mapped_column(Text, nullable=False)
tenant_id: Mapped[str] = mapped_column(Text, nullable=False)
status: Mapped[str] = mapped_column(
Text, nullable=False, server_default=text("'PENDING'")
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
review_runs: Mapped[list["ProposalReviewRun"]] = relationship(
"ProposalReviewRun",
back_populates="proposal",
cascade="all, delete-orphan",
)
findings: Mapped[list["ProposalReviewFinding"]] = relationship(
"ProposalReviewFinding",
back_populates="proposal",
cascade="all, delete-orphan",
)
proposal_decisions: Mapped[list["ProposalReviewProposalDecision"]] = relationship(
"ProposalReviewProposalDecision",
back_populates="proposal",
cascade="all, delete-orphan",
)
documents: Mapped[list["ProposalReviewDocument"]] = relationship(
"ProposalReviewDocument",
back_populates="proposal",
cascade="all, delete-orphan",
)
audit_logs: Mapped[list["ProposalReviewAuditLog"]] = relationship(
"ProposalReviewAuditLog",
back_populates="proposal",
cascade="all, delete-orphan",
)
class ProposalReviewRun(Base):
__tablename__ = "proposal_review_run"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
proposal_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_proposal.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
ruleset_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_ruleset.id"),
nullable=False,
)
triggered_by: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=False
)
status: Mapped[str] = mapped_column(
Text, nullable=False, server_default=text("'PENDING'")
)
total_rules: Mapped[int] = mapped_column(Integer, nullable=False)
completed_rules: Mapped[int] = mapped_column(
Integer, nullable=False, server_default=text("0")
)
started_at: Mapped[datetime.datetime | None] = mapped_column(
DateTime(timezone=True), nullable=True
)
completed_at: Mapped[datetime.datetime | None] = mapped_column(
DateTime(timezone=True), nullable=True
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
proposal: Mapped["ProposalReviewProposal"] = relationship(
"ProposalReviewProposal", back_populates="review_runs"
)
findings: Mapped[list["ProposalReviewFinding"]] = relationship(
"ProposalReviewFinding",
back_populates="review_run",
cascade="all, delete-orphan",
)
class ProposalReviewFinding(Base):
__tablename__ = "proposal_review_finding"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
proposal_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_proposal.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
rule_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_rule.id", ondelete="CASCADE"),
nullable=False,
)
review_run_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_run.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
verdict: Mapped[str] = mapped_column(Text, nullable=False)
confidence: Mapped[str | None] = mapped_column(Text, nullable=True)
evidence: Mapped[str | None] = mapped_column(Text, nullable=True)
explanation: Mapped[str | None] = mapped_column(Text, nullable=True)
suggested_action: Mapped[str | None] = mapped_column(Text, nullable=True)
llm_model: Mapped[str | None] = mapped_column(Text, nullable=True)
llm_tokens_used: Mapped[int | None] = mapped_column(Integer, nullable=True)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
proposal: Mapped["ProposalReviewProposal"] = relationship(
"ProposalReviewProposal", back_populates="findings"
)
review_run: Mapped["ProposalReviewRun"] = relationship(
"ProposalReviewRun", back_populates="findings"
)
rule: Mapped["ProposalReviewRule"] = relationship("ProposalReviewRule")
decision: Mapped["ProposalReviewDecision | None"] = relationship(
"ProposalReviewDecision",
back_populates="finding",
uselist=False,
cascade="all, delete-orphan",
)
class ProposalReviewDecision(Base):
"""Officer's decision on a single finding."""
__tablename__ = "proposal_review_decision"
__table_args__ = (UniqueConstraint("finding_id"),)
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
finding_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_finding.id", ondelete="CASCADE"),
nullable=False,
unique=True,
)
officer_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=False
)
action: Mapped[str] = mapped_column(Text, nullable=False)
notes: Mapped[str | None] = mapped_column(Text, nullable=True)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
finding: Mapped["ProposalReviewFinding"] = relationship(
"ProposalReviewFinding", back_populates="decision"
)
class ProposalReviewProposalDecision(Base):
"""Officer's final decision on the entire proposal."""
__tablename__ = "proposal_review_proposal_decision"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
proposal_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_proposal.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
officer_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=False
)
decision: Mapped[str] = mapped_column(Text, nullable=False)
notes: Mapped[str | None] = mapped_column(Text, nullable=True)
jira_synced: Mapped[bool] = mapped_column(
Boolean, nullable=False, server_default=text("false")
)
jira_synced_at: Mapped[datetime.datetime | None] = mapped_column(
DateTime(timezone=True), nullable=True
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
proposal: Mapped["ProposalReviewProposal"] = relationship(
"ProposalReviewProposal", back_populates="proposal_decisions"
)
class ProposalReviewDocument(Base):
"""Manually uploaded documents or auto-fetched FOAs."""
__tablename__ = "proposal_review_document"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
proposal_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_proposal.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
file_name: Mapped[str] = mapped_column(Text, nullable=False)
file_type: Mapped[str | None] = mapped_column(Text, nullable=True)
file_store_id: Mapped[str | None] = mapped_column(Text, nullable=True)
extracted_text: Mapped[str | None] = mapped_column(Text, nullable=True)
document_role: Mapped[str] = mapped_column(Text, nullable=False)
uploaded_by: Mapped[UUID | None] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=True
)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
proposal: Mapped["ProposalReviewProposal"] = relationship(
"ProposalReviewProposal", back_populates="documents"
)
class ProposalReviewAuditLog(Base):
"""Audit trail for all proposal review actions."""
__tablename__ = "proposal_review_audit_log"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
proposal_id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
ForeignKey("proposal_review_proposal.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
user_id: Mapped[UUID | None] = mapped_column(
PGUUID(as_uuid=True), ForeignKey("user.id"), nullable=True
)
action: Mapped[str] = mapped_column(Text, nullable=False)
details: Mapped[dict | None] = mapped_column(PGJSONB(), nullable=True)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
proposal: Mapped["ProposalReviewProposal"] = relationship(
"ProposalReviewProposal", back_populates="audit_logs"
)
class ProposalReviewConfig(Base):
"""Admin configuration (one row per tenant)."""
__tablename__ = "proposal_review_config"
id: Mapped[UUID] = mapped_column(
PGUUID(as_uuid=True),
primary_key=True,
server_default=text("gen_random_uuid()"),
)
tenant_id: Mapped[str] = mapped_column(Text, nullable=False, unique=True)
jira_connector_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
jira_project_key: Mapped[str | None] = mapped_column(Text, nullable=True)
field_mapping: Mapped[list | None] = mapped_column(PGJSONB(), nullable=True)
jira_writeback: Mapped[dict | None] = mapped_column(PGJSONB(), nullable=True)
created_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)
updated_at: Mapped[datetime.datetime] = mapped_column(
DateTime(timezone=True), nullable=False, server_default=func.now()
)

View File

@@ -1,133 +0,0 @@
"""DB operations for proposal state records."""
from datetime import datetime
from datetime import timezone
from uuid import UUID
from sqlalchemy import desc
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewProposal
from onyx.utils.logger import setup_logger
logger = setup_logger()
def get_proposal(
proposal_id: UUID,
tenant_id: str,
db_session: Session,
) -> ProposalReviewProposal | None:
"""Get a proposal by its ID."""
return (
db_session.query(ProposalReviewProposal)
.filter(
ProposalReviewProposal.id == proposal_id,
ProposalReviewProposal.tenant_id == tenant_id,
)
.one_or_none()
)
def get_proposal_by_document_id(
document_id: str,
tenant_id: str,
db_session: Session,
) -> ProposalReviewProposal | None:
"""Get a proposal by its linked document ID."""
return (
db_session.query(ProposalReviewProposal)
.filter(
ProposalReviewProposal.document_id == document_id,
ProposalReviewProposal.tenant_id == tenant_id,
)
.one_or_none()
)
def get_or_create_proposal(
document_id: str,
tenant_id: str,
db_session: Session,
) -> ProposalReviewProposal:
"""Get or lazily create a proposal state record for a document.
This is the primary entry point — the proposal record is created on first
interaction, not when the Jira ticket is ingested.
"""
proposal = get_proposal_by_document_id(document_id, tenant_id, db_session)
if proposal:
return proposal
proposal = ProposalReviewProposal(
document_id=document_id,
tenant_id=tenant_id,
)
db_session.add(proposal)
try:
db_session.flush()
except IntegrityError:
db_session.rollback()
proposal = get_proposal_by_document_id(document_id, tenant_id, db_session)
if proposal is None:
raise
return proposal
logger.info(f"Lazily created proposal {proposal.id} for document {document_id}")
return proposal
def list_proposals(
tenant_id: str,
db_session: Session,
status: str | None = None,
limit: int = 100,
offset: int = 0,
) -> list[ProposalReviewProposal]:
"""List proposals for a tenant with optional status filter."""
query = (
db_session.query(ProposalReviewProposal)
.filter(ProposalReviewProposal.tenant_id == tenant_id)
.order_by(desc(ProposalReviewProposal.updated_at))
)
if status:
query = query.filter(ProposalReviewProposal.status == status)
return query.offset(offset).limit(limit).all()
def count_proposals(
tenant_id: str,
db_session: Session,
status: str | None = None,
) -> int:
"""Count proposals for a tenant."""
query = db_session.query(ProposalReviewProposal).filter(
ProposalReviewProposal.tenant_id == tenant_id
)
if status:
query = query.filter(ProposalReviewProposal.status == status)
return query.count()
def update_proposal_status(
proposal_id: UUID,
tenant_id: str,
status: str,
db_session: Session,
) -> ProposalReviewProposal | None:
"""Update a proposal's status."""
proposal = (
db_session.query(ProposalReviewProposal)
.filter(
ProposalReviewProposal.id == proposal_id,
ProposalReviewProposal.tenant_id == tenant_id,
)
.one_or_none()
)
if not proposal:
return None
proposal.status = status
proposal.updated_at = datetime.now(timezone.utc)
db_session.flush()
logger.info(f"Updated proposal {proposal_id} status to {status}")
return proposal

View File

@@ -1,338 +0,0 @@
"""DB operations for rulesets and rules."""
from datetime import datetime
from datetime import timezone
from uuid import UUID
from sqlalchemy import desc
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewRule
from onyx.server.features.proposal_review.db.models import ProposalReviewRuleset
from onyx.utils.logger import setup_logger
logger = setup_logger()
# =============================================================================
# Ruleset CRUD
# =============================================================================
def list_rulesets(
tenant_id: str,
db_session: Session,
active_only: bool = False,
) -> list[ProposalReviewRuleset]:
"""List all rulesets for a tenant."""
query = (
db_session.query(ProposalReviewRuleset)
.filter(ProposalReviewRuleset.tenant_id == tenant_id)
.options(selectinload(ProposalReviewRuleset.rules))
.order_by(desc(ProposalReviewRuleset.created_at))
)
if active_only:
query = query.filter(ProposalReviewRuleset.is_active.is_(True))
return query.all()
def get_ruleset(
ruleset_id: UUID,
tenant_id: str,
db_session: Session,
) -> ProposalReviewRuleset | None:
"""Get a single ruleset by ID with all its rules."""
return (
db_session.query(ProposalReviewRuleset)
.filter(
ProposalReviewRuleset.id == ruleset_id,
ProposalReviewRuleset.tenant_id == tenant_id,
)
.options(selectinload(ProposalReviewRuleset.rules))
.one_or_none()
)
def create_ruleset(
tenant_id: str,
name: str,
db_session: Session,
description: str | None = None,
is_default: bool = False,
created_by: UUID | None = None,
) -> ProposalReviewRuleset:
"""Create a new ruleset."""
# If this ruleset is default, un-default any existing default
if is_default:
_clear_default_ruleset(tenant_id, db_session)
ruleset = ProposalReviewRuleset(
tenant_id=tenant_id,
name=name,
description=description,
is_default=is_default,
created_by=created_by,
)
db_session.add(ruleset)
db_session.flush()
logger.info(f"Created ruleset {ruleset.id} '{name}' for tenant {tenant_id}")
return ruleset
def update_ruleset(
ruleset_id: UUID,
tenant_id: str,
db_session: Session,
name: str | None = None,
description: str | None = None,
is_default: bool | None = None,
is_active: bool | None = None,
) -> ProposalReviewRuleset | None:
"""Update a ruleset. Returns None if not found."""
ruleset = get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
return None
if name is not None:
ruleset.name = name
if description is not None:
ruleset.description = description
if is_default is not None:
if is_default:
_clear_default_ruleset(tenant_id, db_session)
ruleset.is_default = is_default
if is_active is not None:
ruleset.is_active = is_active
ruleset.updated_at = datetime.now(timezone.utc)
db_session.flush()
return ruleset
def delete_ruleset(
ruleset_id: UUID,
tenant_id: str,
db_session: Session,
) -> bool:
"""Delete a ruleset. Returns False if not found."""
ruleset = get_ruleset(ruleset_id, tenant_id, db_session)
if not ruleset:
return False
db_session.delete(ruleset)
db_session.flush()
logger.info(f"Deleted ruleset {ruleset_id}")
return True
def _clear_default_ruleset(tenant_id: str, db_session: Session) -> None:
"""Un-default any existing default ruleset for a tenant."""
db_session.query(ProposalReviewRuleset).filter(
ProposalReviewRuleset.tenant_id == tenant_id,
ProposalReviewRuleset.is_default.is_(True),
).update({ProposalReviewRuleset.is_default: False})
db_session.flush()
# =============================================================================
# Rule CRUD
# =============================================================================
def list_rules_by_ruleset(
ruleset_id: UUID,
db_session: Session,
active_only: bool = False,
) -> list[ProposalReviewRule]:
"""List all rules in a ruleset."""
query = (
db_session.query(ProposalReviewRule)
.filter(ProposalReviewRule.ruleset_id == ruleset_id)
.order_by(ProposalReviewRule.priority)
)
if active_only:
query = query.filter(ProposalReviewRule.is_active.is_(True))
return query.all()
def get_rule(
rule_id: UUID,
db_session: Session,
) -> ProposalReviewRule | None:
"""Get a single rule by ID."""
return (
db_session.query(ProposalReviewRule)
.filter(ProposalReviewRule.id == rule_id)
.one_or_none()
)
def create_rule(
ruleset_id: UUID,
name: str,
rule_type: str,
prompt_template: str,
db_session: Session,
description: str | None = None,
category: str | None = None,
rule_intent: str = "CHECK",
source: str = "MANUAL",
authority: str | None = None,
is_hard_stop: bool = False,
priority: int = 0,
) -> ProposalReviewRule:
"""Create a new rule within a ruleset."""
rule = ProposalReviewRule(
ruleset_id=ruleset_id,
name=name,
description=description,
category=category,
rule_type=rule_type,
rule_intent=rule_intent,
prompt_template=prompt_template,
source=source,
authority=authority,
is_hard_stop=is_hard_stop,
priority=priority,
)
db_session.add(rule)
db_session.flush()
logger.info(f"Created rule {rule.id} '{name}' in ruleset {ruleset_id}")
return rule
def update_rule(
rule_id: UUID,
db_session: Session,
name: str | None = None,
description: str | None = None,
category: str | None = None,
rule_type: str | None = None,
rule_intent: str | None = None,
prompt_template: str | None = None,
authority: str | None = None,
is_hard_stop: bool | None = None,
priority: int | None = None,
is_active: bool | None = None,
) -> ProposalReviewRule | None:
"""Update a rule. Returns None if not found."""
rule = get_rule(rule_id, db_session)
if not rule:
return None
if name is not None:
rule.name = name
if description is not None:
rule.description = description
if category is not None:
rule.category = category
if rule_type is not None:
rule.rule_type = rule_type
if rule_intent is not None:
rule.rule_intent = rule_intent
if prompt_template is not None:
rule.prompt_template = prompt_template
if authority is not None:
rule.authority = authority
if is_hard_stop is not None:
rule.is_hard_stop = is_hard_stop
if priority is not None:
rule.priority = priority
if is_active is not None:
rule.is_active = is_active
rule.updated_at = datetime.now(timezone.utc)
db_session.flush()
return rule
def delete_rule(
rule_id: UUID,
db_session: Session,
) -> bool:
"""Delete a rule. Returns False if not found."""
rule = get_rule(rule_id, db_session)
if not rule:
return False
db_session.delete(rule)
db_session.flush()
logger.info(f"Deleted rule {rule_id}")
return True
def bulk_update_rules(
rule_ids: list[UUID],
action: str,
ruleset_id: UUID,
db_session: Session,
) -> int:
"""Batch activate/deactivate/delete rules.
Args:
rule_ids: list of rule IDs
action: "activate" | "deactivate" | "delete"
ruleset_id: scope operations to rules within this ruleset
Returns:
number of rules affected
"""
if action == "delete":
count = (
db_session.query(ProposalReviewRule)
.filter(
ProposalReviewRule.id.in_(rule_ids),
ProposalReviewRule.ruleset_id == ruleset_id,
)
.delete(synchronize_session="fetch")
)
elif action == "activate":
count = (
db_session.query(ProposalReviewRule)
.filter(
ProposalReviewRule.id.in_(rule_ids),
ProposalReviewRule.ruleset_id == ruleset_id,
)
.update(
{
ProposalReviewRule.is_active: True,
ProposalReviewRule.updated_at: datetime.now(timezone.utc),
},
synchronize_session="fetch",
)
)
elif action == "deactivate":
count = (
db_session.query(ProposalReviewRule)
.filter(
ProposalReviewRule.id.in_(rule_ids),
ProposalReviewRule.ruleset_id == ruleset_id,
)
.update(
{
ProposalReviewRule.is_active: False,
ProposalReviewRule.updated_at: datetime.now(timezone.utc),
},
synchronize_session="fetch",
)
)
else:
raise ValueError(f"Unknown bulk action: {action}")
db_session.flush()
logger.info(f"Bulk {action} on {count} rules")
return count
def count_active_rules(
ruleset_id: UUID,
db_session: Session,
) -> int:
"""Count active rules in a ruleset."""
return (
db_session.query(ProposalReviewRule)
.filter(
ProposalReviewRule.ruleset_id == ruleset_id,
ProposalReviewRule.is_active.is_(True),
)
.count()
)

View File

@@ -1 +0,0 @@
"""Argus Review Engine — AI-powered proposal evaluation."""

View File

@@ -1,204 +0,0 @@
"""Parses uploaded checklist documents into atomic review rules via LLM."""
import json
import re
from onyx.llm.factory import get_default_llm
from onyx.llm.models import SystemMessage
from onyx.llm.models import UserMessage
from onyx.llm.utils import llm_response_to_string
from onyx.utils.logger import setup_logger
logger = setup_logger()
_IMPORT_SYSTEM_PROMPT = """\
You are an expert at analyzing institutional review checklists for university grant \
offices. Your task is to decompose a checklist document into atomic, self-contained \
review rules that can each be independently evaluated by an AI against a grant proposal.
Key principles:
1. ATOMIC DECOMPOSITION: Each checklist item may contain multiple distinct requirements. \
You MUST split compound items into separate atomic rules. Each rule should test exactly \
ONE pass/fail criterion.
2. CATEGORY PRESERVATION: All rules decomposed from the same source checklist item \
should share the same category value. Use the checklist item's identifier and title \
(e.g., "IR-2: Regulatory Compliance") as the category.
3. SELF-CONTAINED PROMPTS: Each rule's prompt_template must be fully self-contained. \
It should include all context needed to evaluate the criterion. Use {{variable}} \
placeholders for dynamic content:
- {{proposal_text}} - full text of the proposal and supporting documents
- {{budget_text}} - budget/financial sections
- {{foa_text}} - funding opportunity announcement
- {{metadata}} - structured metadata (PI, sponsor, deadlines, etc.)
- {{metadata.FIELD_NAME}} - specific metadata field
4. REFINEMENT DETECTION: If a rule requires institution-specific information that is \
NOT present in the source checklist (such as IDC rates, cost categories, institutional \
policies, specific thresholds, or local procedures), mark it with:
- refinement_needed: true
- refinement_question: a clear question asking for the missing information
- Use a placeholder like {{INSTITUTION_IDC_RATES}} in the prompt_template
5. RULE TYPES:
- DOCUMENT_CHECK: Verifies presence/content of specific documents or sections
- METADATA_CHECK: Validates structured metadata fields
- CROSS_REFERENCE: Compares information across multiple documents (e.g., budget vs narrative)
- CUSTOM_NL: Natural language evaluation of content quality or compliance
6. RULE INTENT:
- CHECK: Pass/fail criterion that must be satisfied
- HIGHLIGHT: Informational flag for officer attention (no pass/fail)"""
_IMPORT_USER_PROMPT = """\
Analyze the following checklist document and decompose it into atomic review rules.
CHECKLIST CONTENT:
---
{checklist_text}
---
Respond with ONLY a valid JSON array of rule objects. Each rule must have:
{{
"name": "Short descriptive name for the rule (max 100 chars)",
"description": "Detailed description of what this rule checks",
"category": "Source checklist item identifier and title (e.g., 'IR-2: Regulatory Compliance')",
"rule_type": "DOCUMENT_CHECK | METADATA_CHECK | CROSS_REFERENCE | CUSTOM_NL",
"rule_intent": "CHECK | HIGHLIGHT",
"prompt_template": "Self-contained prompt with {{{{variable}}}} placeholders. Must clearly state the criterion and ask for evaluation.",
"refinement_needed": false,
"refinement_question": null
}}
For rules that need institution-specific info:
{{
"name": "...",
"description": "...",
"category": "...",
"rule_type": "...",
"rule_intent": "CHECK",
"prompt_template": "... {{{{INSTITUTION_IDC_RATES}}}} ...",
"refinement_needed": true,
"refinement_question": "Please provide your institution's IDC rate schedule."
}}
Important:
- Decompose compound checklist items into multiple atomic rules
- Each rule tests exactly ONE criterion
- Prompt templates must be specific and actionable
- Include all relevant context placeholders in templates
- Flag any rule requiring institution-specific knowledge"""
def import_checklist(
extracted_text: str,
) -> list[dict]:
"""Parse a checklist document into atomic review rules via LLM.
Args:
extracted_text: The full text content extracted from the uploaded checklist file.
Returns:
List of rule dicts, each with: name, description, category, rule_type,
rule_intent, prompt_template, refinement_needed, refinement_question.
"""
if not extracted_text or not extracted_text.strip():
logger.warning("Empty checklist text provided for import")
return []
# Build the prompt
user_content = _IMPORT_USER_PROMPT.format(checklist_text=extracted_text)
prompt_messages = [
SystemMessage(content=_IMPORT_SYSTEM_PROMPT),
UserMessage(content=user_content),
]
# Call LLM synchronously (this runs in the API request, not Celery)
try:
llm = get_default_llm()
response = llm.invoke(prompt_messages)
raw_text = llm_response_to_string(response)
except Exception as e:
logger.error(f"LLM call failed during checklist import: {e}")
raise RuntimeError(f"Failed to parse checklist via LLM: {str(e)}") from e
# Parse JSON response
rules = _parse_import_response(raw_text)
logger.info(f"Checklist import produced {len(rules)} atomic rules")
return rules
def _parse_import_response(raw_text: str) -> list[dict]:
"""Parse the LLM response text as a JSON array of rule dicts."""
text = raw_text.strip()
# Strip markdown code fences if present
if text.startswith("```"):
text = re.sub(r"^```(?:json)?\s*\n?", "", text)
text = re.sub(r"\n?```\s*$", "", text)
text = text.strip()
try:
parsed = json.loads(text)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse import response as JSON: {e}")
logger.debug(f"Raw LLM response: {text[:500]}...")
raise RuntimeError(
"LLM returned invalid JSON. Please try the import again."
) from e
if not isinstance(parsed, list):
raise RuntimeError(
"LLM returned a non-array JSON. Expected a list of rule objects."
)
# Validate and normalize each rule
validated_rules: list[dict] = []
for i, raw_rule in enumerate(parsed):
if not isinstance(raw_rule, dict):
logger.warning(f"Skipping non-dict entry at index {i}")
continue
rule = _validate_rule(raw_rule, i)
if rule:
validated_rules.append(rule)
return validated_rules
def _validate_rule(raw_rule: dict, index: int) -> dict | None:
"""Validate and normalize a single parsed rule dict."""
valid_types = {"DOCUMENT_CHECK", "METADATA_CHECK", "CROSS_REFERENCE", "CUSTOM_NL"}
valid_intents = {"CHECK", "HIGHLIGHT"}
name = raw_rule.get("name")
if not name:
logger.warning(f"Rule at index {index} missing 'name', skipping")
return None
prompt_template = raw_rule.get("prompt_template")
if not prompt_template:
logger.warning(f"Rule '{name}' missing 'prompt_template', skipping")
return None
rule_type = str(raw_rule.get("rule_type", "CUSTOM_NL")).upper()
if rule_type not in valid_types:
rule_type = "CUSTOM_NL"
rule_intent = str(raw_rule.get("rule_intent", "CHECK")).upper()
if rule_intent not in valid_intents:
rule_intent = "CHECK"
return {
"name": str(name)[:200], # Cap length
"description": raw_rule.get("description"),
"category": raw_rule.get("category"),
"rule_type": rule_type,
"rule_intent": rule_intent,
"prompt_template": str(prompt_template),
"refinement_needed": bool(raw_rule.get("refinement_needed", False)),
"refinement_question": raw_rule.get("refinement_question"),
}

View File

@@ -1,340 +0,0 @@
"""Assembles all available text content for a proposal to pass to rule evaluation.
V1 LIMITATION: Document body text (the main text content extracted by connectors)
is stored in Vespa, not in the PostgreSQL Document table. The DB row only stores
metadata (semantic_id, link, doc_metadata, primary_owners, etc.). For Jira tickets,
the Description and Comments text are indexed into Vespa during connector runs and
are NOT accessible here without a Vespa query.
As a result, the primary source of rich text for rule evaluation in V1 is:
- Manually uploaded documents (proposal_review_document.extracted_text)
- Structured metadata from the Document row's doc_metadata JSONB column
- For Jira tickets: the connector populates doc_metadata with field values,
which often includes Description, Status, Priority, Assignee, etc.
Future improvement: add a Vespa retrieval step to fetch indexed text chunks for
the parent document and its attachments.
"""
import json
from dataclasses import dataclass
from dataclasses import field
from uuid import UUID
from sqlalchemy.orm import Session
from onyx.db.models import Document
from onyx.server.features.proposal_review.db.models import ProposalReviewDocument
from onyx.server.features.proposal_review.db.models import ProposalReviewProposal
from onyx.utils.logger import setup_logger
logger = setup_logger()
# Metadata keys from Jira connector that commonly carry useful text content.
# These are extracted from doc_metadata and presented as labeled sections to
# give the LLM more signal when evaluating rules.
_JIRA_TEXT_METADATA_KEYS = [
"description",
"summary",
"comment",
"comments",
"acceptance_criteria",
"story_points",
"priority",
"status",
"resolution",
"issue_type",
"labels",
"components",
"fix_versions",
"affects_versions",
"environment",
"assignee",
"reporter",
"creator",
]
@dataclass
class ProposalContext:
"""All text and metadata context assembled for rule evaluation."""
proposal_text: str # concatenated text from all documents
budget_text: str # best-effort budget section extraction
foa_text: str # FOA content (auto-fetched or uploaded)
metadata: dict # structured metadata from Document.doc_metadata
jira_key: str # for display/reference
metadata_raw: dict = field(default_factory=dict) # full unresolved metadata
def get_proposal_context(
proposal_id: UUID,
db_session: Session,
) -> ProposalContext:
"""Assemble context for rule evaluation.
Gathers text from three sources:
1. Jira ticket content (from Document.semantic_id + doc_metadata)
2. Jira attachments (child Documents linked by ID prefix convention)
3. Manually uploaded documents (from proposal_review_document.extracted_text)
For MVP, returns full text of everything. Future: smart section selection.
"""
# 1. Get the proposal record to find the linked document_id
proposal = (
db_session.query(ProposalReviewProposal)
.filter(ProposalReviewProposal.id == proposal_id)
.one_or_none()
)
if not proposal:
logger.warning(f"Proposal {proposal_id} not found during context assembly")
return ProposalContext(
proposal_text="",
budget_text="",
foa_text="",
metadata={},
jira_key="",
metadata_raw={},
)
# 2. Fetch the parent Document (Jira ticket)
parent_doc = (
db_session.query(Document)
.filter(Document.id == proposal.document_id)
.one_or_none()
)
jira_key = ""
metadata: dict = {}
all_text_parts: list[str] = []
budget_parts: list[str] = []
foa_parts: list[str] = []
if parent_doc:
jira_key = parent_doc.semantic_id or ""
metadata = parent_doc.doc_metadata or {}
# Build text from DB-available fields. The actual ticket body text lives
# in Vespa and is not accessible here. The doc_metadata JSONB column
# often contains structured Jira fields that the connector extracted.
parent_text = _build_parent_document_text(parent_doc)
if parent_text:
all_text_parts.append(parent_text)
# 3. Look for child Documents (Jira attachments).
# Jira attachment Documents have IDs of the form:
# "{parent_jira_url}/attachments/{attachment_id}"
# We find them via ID prefix match.
#
# V1 LIMITATION: child document text content is in Vespa, not in the
# DB. We can only extract metadata (filename, mime type, etc.) from
# the Document row. The actual attachment text is not available here
# without a Vespa query. See module docstring for details.
child_docs = _find_child_documents(parent_doc, db_session)
if child_docs:
logger.info(
f"Found {len(child_docs)} child documents for {jira_key}. "
f"Note: their text content is in Vespa and only metadata is "
f"available for rule evaluation."
)
for child_doc in child_docs:
child_text = _build_child_document_text(child_doc)
if child_text:
all_text_parts.append(child_text)
_classify_child_text(child_doc, child_text, budget_parts, foa_parts)
else:
logger.warning(
f"Parent Document not found for proposal {proposal_id} "
f"(document_id={proposal.document_id}). "
f"Context will rely on manually uploaded documents only."
)
# 4. Fetch manually uploaded documents from proposal_review_document.
# This is the PRIMARY source of rich text content for V1 since the
# extracted_text column holds the full document content.
manual_docs = (
db_session.query(ProposalReviewDocument)
.filter(ProposalReviewDocument.proposal_id == proposal_id)
.order_by(ProposalReviewDocument.created_at)
.all()
)
for doc in manual_docs:
if doc.extracted_text:
all_text_parts.append(
f"--- Document: {doc.file_name} (role: {doc.document_role}) ---\n"
f"{doc.extracted_text}"
)
# Classify by role
role_upper = (doc.document_role or "").upper()
if role_upper == "BUDGET" or _is_budget_filename(doc.file_name):
budget_parts.append(doc.extracted_text)
elif role_upper == "FOA":
foa_parts.append(doc.extracted_text)
return ProposalContext(
proposal_text="\n\n".join(all_text_parts) if all_text_parts else "",
budget_text="\n\n".join(budget_parts) if budget_parts else "",
foa_text="\n\n".join(foa_parts) if foa_parts else "",
metadata=metadata,
jira_key=jira_key,
metadata_raw=metadata,
)
def _build_parent_document_text(doc: Document) -> str:
"""Build text representation from a parent Document row (Jira ticket).
The Document table does NOT store the ticket body text -- that lives in Vespa.
What we DO have access to:
- semantic_id: typically "{ISSUE_KEY}: {summary}"
- link: URL to the Jira ticket
- doc_metadata: JSONB with structured fields from the connector (may include
description, status, priority, assignee, custom fields, etc.)
- primary_owners / secondary_owners: people associated with the document
We extract all available metadata and present it as labeled sections to
maximize the signal available to the LLM for rule evaluation.
"""
parts: list[str] = []
if doc.semantic_id:
parts.append(f"Document: {doc.semantic_id}")
if doc.link:
parts.append(f"Link: {doc.link}")
# Include owner information which may be useful for compliance checks
if doc.primary_owners:
parts.append(f"Primary Owners: {', '.join(doc.primary_owners)}")
if doc.secondary_owners:
parts.append(f"Secondary Owners: {', '.join(doc.secondary_owners)}")
# doc_metadata contains structured data from the Jira connector.
# Extract well-known text-bearing fields first, then include the rest.
if doc.doc_metadata:
metadata = doc.doc_metadata
# Extract well-known Jira fields as labeled sections
for key in _JIRA_TEXT_METADATA_KEYS:
value = metadata.get(key)
if value is not None and value != "" and value != []:
label = key.replace("_", " ").title()
if isinstance(value, list):
parts.append(f"{label}: {', '.join(str(v) for v in value)}")
elif isinstance(value, dict):
parts.append(
f"{label}:\n{json.dumps(value, indent=2, default=str)}"
)
else:
parts.append(f"{label}: {value}")
# Include any remaining metadata keys not in the well-known set,
# so custom fields and connector-specific data are not lost.
remaining = {
k: v
for k, v in metadata.items()
if k.lower() not in _JIRA_TEXT_METADATA_KEYS
and v is not None
and v != ""
and v != []
}
if remaining:
parts.append(
f"Additional Metadata:\n"
f"{json.dumps(remaining, indent=2, default=str)}"
)
return "\n".join(parts) if parts else ""
def _build_child_document_text(doc: Document) -> str:
"""Build text representation from a child Document row (Jira attachment).
V1 LIMITATION: The actual extracted text of the attachment lives in Vespa,
not in the Document table. We can only present the metadata that the
connector stored in doc_metadata (filename, mime type, size, parent ticket).
This means the LLM knows an attachment EXISTS and its metadata, but cannot
read its contents. Future versions should add a Vespa retrieval step.
"""
parts: list[str] = []
if doc.semantic_id:
parts.append(f"Attachment: {doc.semantic_id}")
if doc.link:
parts.append(f"Link: {doc.link}")
# Child document metadata typically includes:
# parent_ticket, attachment_filename, attachment_mime_type, attachment_size
if doc.doc_metadata:
for key, value in doc.doc_metadata.items():
if value is not None and value != "":
label = key.replace("_", " ").title()
parts.append(f"{label}: {value}")
if not parts:
return ""
# Note the limitation inline for the LLM context
parts.append(
"[Note: Full attachment text is indexed in Vespa and not available "
"in this context. Upload the document manually for full text analysis.]"
)
return "\n".join(parts)
def _find_child_documents(parent_doc: Document, db_session: Session) -> list[Document]:
"""Find child Documents linked to the parent (e.g. Jira attachments).
Jira attachments are indexed as separate Document rows whose ID follows
the convention: "{parent_document_id}/attachments/{attachment_id}".
The parent_document_id for Jira is the full URL to the issue, e.g.
"https://jira.example.com/browse/PROJ-123".
V1 LIMITATION: These child Document rows only contain metadata in the DB.
Their actual extracted text content is stored in Vespa. To read the
attachment text, a Vespa query would be required. This is not implemented
in V1 -- officers should upload key documents manually for full text
analysis.
"""
if not parent_doc.id:
return []
# Child documents have IDs that start with the parent document's ID
# followed by a path segment (e.g., /attachments/12345)
# Escape LIKE wildcards in the document ID
escaped_id = parent_doc.id.replace("%", r"\%").replace("_", r"\_")
child_docs = (
db_session.query(Document)
.filter(
Document.id.like(f"{escaped_id}/%"),
Document.id != parent_doc.id,
)
.all()
)
return child_docs
def _classify_child_text(
doc: Document,
text: str,
budget_parts: list[str],
foa_parts: list[str],
) -> None:
"""Best-effort classification of child document text into budget or FOA."""
semantic_id = (doc.semantic_id or "").lower()
if _is_budget_filename(semantic_id):
budget_parts.append(text)
elif any(
term in semantic_id
for term in ["foa", "funding opportunity", "rfa", "solicitation", "nofo"]
):
foa_parts.append(text)
def _is_budget_filename(filename: str) -> bool:
"""Check if a filename suggests budget content."""
lower = (filename or "").lower()
return any(term in lower for term in ["budget", "cost", "financial", "expenditure"])

View File

@@ -1,168 +0,0 @@
"""Auto-fetches Funding Opportunity Announcements using Onyx web search infrastructure."""
from uuid import UUID
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.models import ProposalReviewDocument
from onyx.utils.logger import setup_logger
logger = setup_logger()
# Map known opportunity ID prefixes to federal agency domains
_AGENCY_DOMAINS: dict[str, str] = {
"RFA": "grants.nih.gov",
"PA": "grants.nih.gov",
"PAR": "grants.nih.gov",
"R01": "grants.nih.gov",
"R21": "grants.nih.gov",
"U01": "grants.nih.gov",
"NOT": "grants.nih.gov",
"NSF": "nsf.gov",
"DE-FOA": "energy.gov",
"HRSA": "hrsa.gov",
"W911": "grants.gov", # DoD
"FA": "grants.gov", # Air Force
"N00": "grants.gov", # Navy
"NOFO": "grants.gov",
}
def fetch_foa(
opportunity_id: str,
proposal_id: UUID,
db_session: Session,
) -> str | None:
"""Fetch FOA content given an opportunity ID.
1. Determine domain from ID prefix (RFA/PA -> nih.gov, NSF -> nsf.gov, etc.)
2. Build search query
3. Call Onyx web search provider
4. Fetch full content from best URL
5. Save as proposal_review_document with role=FOA
6. Return extracted text or None
If the web search provider is not configured, logs a warning and returns None.
"""
if not opportunity_id or not opportunity_id.strip():
logger.debug("No opportunity_id provided, skipping FOA fetch")
return None
opportunity_id = opportunity_id.strip()
# Check if we already have an FOA document for this proposal
existing_foa = (
db_session.query(ProposalReviewDocument)
.filter(
ProposalReviewDocument.proposal_id == proposal_id,
ProposalReviewDocument.document_role == "FOA",
)
.first()
)
if existing_foa and existing_foa.extracted_text:
logger.info(
f"FOA document already exists for proposal {proposal_id}, skipping fetch"
)
return existing_foa.extracted_text
# Determine search domain from opportunity ID prefix
site_domain = _determine_domain(opportunity_id)
# Build search query
search_query = f"{opportunity_id} funding opportunity announcement"
if site_domain:
search_query = f"site:{site_domain} {opportunity_id}"
# Try to get the web search provider
try:
from onyx.tools.tool_implementations.web_search.providers import (
get_default_provider,
)
provider = get_default_provider()
except Exception as e:
logger.warning(f"Failed to load web search provider: {e}")
provider = None
if provider is None:
logger.warning(
"No web search provider configured. Cannot auto-fetch FOA. "
"Configure a web search provider in Admin settings to enable this feature."
)
return None
# Search for the FOA
try:
results = provider.search(search_query)
except Exception as e:
logger.error(f"Web search failed for FOA '{opportunity_id}': {e}")
return None
if not results:
logger.info(f"No search results found for FOA '{opportunity_id}'")
return None
# Pick the best result URL
best_url = str(results[0].link)
logger.info(f"Fetching FOA content from: {best_url}")
# Fetch full content from the URL
try:
from onyx.tools.tool_implementations.open_url.onyx_web_crawler import (
OnyxWebCrawler,
)
crawler = OnyxWebCrawler()
contents = crawler.contents([best_url])
if (
not contents
or not contents[0].scrape_successful
or not contents[0].full_content
):
logger.warning(f"No content extracted from FOA URL: {best_url}")
return None
foa_text = contents[0].full_content
except Exception as e:
logger.error(f"Failed to fetch FOA content from {best_url}: {e}")
return None
# Save as a proposal_review_document with role=FOA
try:
foa_doc = ProposalReviewDocument(
proposal_id=proposal_id,
file_name=f"FOA_{opportunity_id}.html",
file_type="HTML",
document_role="FOA",
extracted_text=foa_text,
# uploaded_by is None for auto-fetched documents
)
db_session.add(foa_doc)
db_session.flush()
logger.info(
f"Saved FOA document for proposal {proposal_id} "
f"(opportunity_id={opportunity_id}, {len(foa_text)} chars)"
)
except Exception as e:
logger.error(f"Failed to save FOA document: {e}")
# Still return the text even if save fails
return foa_text
return foa_text
def _determine_domain(opportunity_id: str) -> str | None:
"""Determine the likely agency domain from the opportunity ID prefix."""
upper_id = opportunity_id.upper()
for prefix, domain in _AGENCY_DOMAINS.items():
if upper_id.startswith(prefix):
return domain
# If it looks like a grants.gov number (numeric), try grants.gov
if opportunity_id.replace("-", "").isdigit():
return "grants.gov"
return None

View File

@@ -1,394 +0,0 @@
"""Writes officer decisions back to Jira."""
from datetime import datetime
from datetime import timezone
from uuid import UUID
import requests
from sqlalchemy.orm import Session
from onyx.db.connector import fetch_connector_by_id
from onyx.db.connector_credential_pair import (
fetch_connector_credential_pair_for_connector,
)
from onyx.db.models import Document
from onyx.server.features.proposal_review.db import config as config_db
from onyx.server.features.proposal_review.db import decisions as decisions_db
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import proposals as proposals_db
from onyx.server.features.proposal_review.db.models import ProposalReviewFinding
from onyx.server.features.proposal_review.db.models import (
ProposalReviewProposalDecision,
)
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import get_current_tenant_id
logger = setup_logger()
def sync_to_jira(
proposal_id: UUID,
db_session: Session,
) -> None:
"""Write the officer's final decision back to Jira.
Performs up to 3 Jira API operations:
1. PUT custom fields (decision, completion %)
2. POST transition (move to configured column)
3. POST comment (structured review summary)
Then marks the decision as jira_synced.
Raises:
ValueError: If required config/data is missing.
RuntimeError: If Jira API calls fail.
"""
tenant_id = get_current_tenant_id()
# Load proposal and decision
proposal = proposals_db.get_proposal(proposal_id, tenant_id, db_session)
if not proposal:
raise ValueError(f"Proposal {proposal_id} not found")
latest_decision = decisions_db.get_latest_proposal_decision(proposal_id, db_session)
if not latest_decision:
raise ValueError(f"No decision found for proposal {proposal_id}")
if latest_decision.jira_synced:
logger.info(f"Decision for proposal {proposal_id} already synced to Jira")
return
# Load tenant config for Jira settings
config = config_db.get_config(tenant_id, db_session)
if not config:
raise ValueError("Proposal review config not found for this tenant")
if not config.jira_connector_id:
raise ValueError(
"No Jira connector configured. Set jira_connector_id in proposal review settings."
)
writeback_config = config.jira_writeback or {}
# Get the Jira issue key from the linked Document
parent_doc = (
db_session.query(Document)
.filter(Document.id == proposal.document_id)
.one_or_none()
)
if not parent_doc:
raise ValueError(f"Linked document {proposal.document_id} not found")
# semantic_id is formatted as "KEY-123: Summary text" by the Jira connector.
# Extract just the issue key (everything before the first colon).
raw_id = parent_doc.semantic_id
if not raw_id:
raise ValueError(
f"Document {proposal.document_id} has no semantic_id (Jira issue key)"
)
issue_key = raw_id.split(":")[0].strip()
# Get Jira credentials from the connector
jira_base_url, auth_headers = _get_jira_credentials(
config.jira_connector_id, db_session
)
# Get findings for the summary
latest_run = findings_db.get_latest_review_run(proposal_id, db_session)
all_findings: list[ProposalReviewFinding] = []
if latest_run:
all_findings = findings_db.list_findings_by_run(latest_run.id, db_session)
# Calculate summary counts
verdict_counts = _count_verdicts(all_findings)
# Operation 1: Update custom fields
_update_custom_fields(
jira_base_url=jira_base_url,
auth_headers=auth_headers,
issue_key=issue_key,
decision=latest_decision.decision,
verdict_counts=verdict_counts,
writeback_config=writeback_config,
)
# Operation 2: Transition the issue
_transition_issue(
jira_base_url=jira_base_url,
auth_headers=auth_headers,
issue_key=issue_key,
decision=latest_decision.decision,
writeback_config=writeback_config,
)
# Operation 3: Post review summary comment
_post_comment(
jira_base_url=jira_base_url,
auth_headers=auth_headers,
issue_key=issue_key,
decision=latest_decision,
verdict_counts=verdict_counts,
findings=all_findings,
)
# Mark the decision as synced
decisions_db.mark_decision_jira_synced(latest_decision.id, db_session)
db_session.flush()
logger.info(
f"Successfully synced decision for proposal {proposal_id} to Jira issue {issue_key}"
)
def _get_jira_credentials(
connector_id: int,
db_session: Session,
) -> tuple[str, dict[str, str]]:
"""Extract Jira base URL and auth headers from the connector's credentials.
Returns:
Tuple of (jira_base_url, auth_headers_dict).
"""
connector = fetch_connector_by_id(connector_id, db_session)
if not connector:
raise ValueError(f"Jira connector {connector_id} not found")
# Get the connector's credential pair
cc_pair = fetch_connector_credential_pair_for_connector(db_session, connector_id)
if not cc_pair:
raise ValueError(f"No credential pair found for connector {connector_id}")
# Extract credentials — guard against missing credential_json
cred_json = cc_pair.credential.credential_json
if cred_json is None:
raise ValueError(f"No credential_json for connector {connector_id}")
credentials = cred_json.get_value(apply_mask=False)
if not credentials:
raise ValueError(f"Empty credentials for connector {connector_id}")
# Extract Jira base URL from connector config
connector_config = connector.connector_specific_config or {}
jira_base_url = connector_config.get("jira_base_url", "")
if not jira_base_url:
raise ValueError("Could not determine Jira base URL from connector config")
# Build auth headers
api_token = credentials.get("jira_api_token", "")
email = credentials.get("jira_user_email")
if email:
# Cloud auth: Basic auth with email:token
import base64
auth_string = base64.b64encode(f"{email}:{api_token}".encode()).decode()
auth_headers = {
"Authorization": f"Basic {auth_string}",
"Content-Type": "application/json",
}
else:
# Server auth: Bearer token
auth_headers = {
"Authorization": f"Bearer {api_token}",
"Content-Type": "application/json",
}
return jira_base_url, auth_headers
def _count_verdicts(findings: list[ProposalReviewFinding]) -> dict[str, int]:
"""Count findings by verdict."""
counts: dict[str, int] = {
"PASS": 0,
"FAIL": 0,
"FLAG": 0,
"NEEDS_REVIEW": 0,
"NOT_APPLICABLE": 0,
}
for f in findings:
verdict = f.verdict.upper() if f.verdict else "NEEDS_REVIEW"
counts[verdict] = counts.get(verdict, 0) + 1
return counts
def _update_custom_fields(
jira_base_url: str,
auth_headers: dict[str, str],
issue_key: str,
decision: str,
verdict_counts: dict[str, int],
writeback_config: dict,
) -> None:
"""PUT custom fields on the Jira issue (decision, completion %)."""
decision_field = writeback_config.get("decision_field_id")
completion_field = writeback_config.get("completion_field_id")
if not decision_field and not completion_field:
logger.debug("No custom field IDs configured for Jira writeback, skipping")
return
fields: dict = {}
if decision_field:
fields[decision_field] = decision
if completion_field:
total = sum(verdict_counts.values())
completed = total - verdict_counts.get("NEEDS_REVIEW", 0)
pct = (completed / total * 100) if total > 0 else 0
fields[completion_field] = round(pct, 1)
url = f"{jira_base_url}/rest/api/3/issue/{issue_key}"
payload = {"fields": fields}
try:
resp = requests.put(url, headers=auth_headers, json=payload, timeout=30)
resp.raise_for_status()
logger.info(f"Updated custom fields on {issue_key}")
except requests.RequestException as e:
logger.error(f"Failed to update custom fields on {issue_key}: {e}")
raise RuntimeError(f"Jira field update failed: {e}") from e
def _transition_issue(
jira_base_url: str,
auth_headers: dict[str, str],
issue_key: str,
decision: str,
writeback_config: dict,
) -> None:
"""POST a transition to move the issue to the appropriate column."""
transition_map = writeback_config.get("transitions", {})
transition_name = transition_map.get(decision)
if not transition_name:
logger.debug(f"No transition configured for decision '{decision}', skipping")
return
# First, get available transitions
transitions_url = f"{jira_base_url}/rest/api/3/issue/{issue_key}/transitions"
try:
resp = requests.get(transitions_url, headers=auth_headers, timeout=30)
resp.raise_for_status()
available = resp.json().get("transitions", [])
except requests.RequestException as e:
logger.error(f"Failed to fetch transitions for {issue_key}: {e}")
raise RuntimeError(f"Jira transition fetch failed: {e}") from e
# Find the matching transition by name (case-insensitive)
target_transition = None
for t in available:
if t.get("name", "").lower() == transition_name.lower():
target_transition = t
break
if not target_transition:
available_names = [t.get("name", "") for t in available]
logger.warning(
f"Transition '{transition_name}' not found for {issue_key}. "
f"Available: {available_names}"
)
return
# Perform the transition
payload = {"transition": {"id": target_transition["id"]}}
try:
resp = requests.post(
transitions_url, headers=auth_headers, json=payload, timeout=30
)
resp.raise_for_status()
logger.info(f"Transitioned {issue_key} to '{transition_name}'")
except requests.RequestException as e:
logger.error(f"Failed to transition {issue_key}: {e}")
raise RuntimeError(f"Jira transition failed: {e}") from e
def _post_comment(
jira_base_url: str,
auth_headers: dict[str, str],
issue_key: str,
decision: ProposalReviewProposalDecision | None,
verdict_counts: dict[str, int],
findings: list[ProposalReviewFinding],
) -> None:
"""POST a structured review summary as a Jira comment."""
comment_text = _build_comment_text(decision, verdict_counts, findings)
url = f"{jira_base_url}/rest/api/3/issue/{issue_key}/comment"
# Jira Cloud uses ADF (Atlassian Document Format) for comments
payload = {
"body": {
"version": 1,
"type": "doc",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": comment_text,
}
],
}
],
}
}
try:
resp = requests.post(url, headers=auth_headers, json=payload, timeout=30)
resp.raise_for_status()
logger.info(f"Posted review summary comment on {issue_key}")
except requests.RequestException as e:
logger.error(f"Failed to post comment on {issue_key}: {e}")
raise RuntimeError(f"Jira comment post failed: {e}") from e
def _build_comment_text(
decision: ProposalReviewProposalDecision | None,
verdict_counts: dict[str, int],
findings: list[ProposalReviewFinding],
) -> str:
"""Build a structured review summary text for the Jira comment."""
lines: list[str] = []
lines.append("=== Argus Proposal Review Summary ===")
lines.append("")
# Decision
decision_text = getattr(decision, "decision", "N/A")
decision_notes = getattr(decision, "notes", None)
lines.append(f"Final Decision: {decision_text}")
if decision_notes:
lines.append(f"Notes: {decision_notes}")
lines.append("")
# Summary counts
total = sum(verdict_counts.values())
lines.append(f"Review Results ({total} rules evaluated):")
lines.append(f" Pass: {verdict_counts.get('PASS', 0)}")
lines.append(f" Fail: {verdict_counts.get('FAIL', 0)}")
lines.append(f" Flag: {verdict_counts.get('FLAG', 0)}")
lines.append(f" Needs Review: {verdict_counts.get('NEEDS_REVIEW', 0)}")
lines.append(f" Not Applicable: {verdict_counts.get('NOT_APPLICABLE', 0)}")
lines.append("")
# Individual findings (truncated for readability)
if findings:
lines.append("--- Detailed Findings ---")
for f in findings:
rule_name = f.rule.name if f.rule else "Unknown Rule"
verdict = f.verdict or "N/A"
officer_action = ""
if f.decision:
officer_action = f" | Officer: {f.decision.action}"
lines.append(f" [{verdict}] {rule_name}{officer_action}")
if f.explanation:
# Truncate long explanations
explanation = f.explanation[:200]
if len(f.explanation) > 200:
explanation += "..."
lines.append(f" Reason: {explanation}")
lines.append("")
lines.append(f"Reviewed at: {datetime.now(timezone.utc).isoformat()}")
lines.append("Generated by Argus (Onyx Proposal Review)")
return "\n".join(lines)

View File

@@ -1,339 +0,0 @@
"""Celery tasks that orchestrate proposal review — parallel rule evaluation."""
from datetime import datetime
from datetime import timezone
from celery import shared_task
from sqlalchemy import update
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import CURRENT_TENANT_ID_CONTEXTVAR
logger = setup_logger()
@shared_task(bind=True, ignore_result=True, soft_time_limit=3600, time_limit=3660)
def run_proposal_review(_self: object, review_run_id: str, tenant_id: str) -> None:
"""Parent task: orchestrates rule evaluation for a review run.
1. Set run status=RUNNING
2. Call get_proposal_context() once
3. Try to auto-fetch FOA if opportunity_id in metadata and no FOA doc
4. Get all active rules for the run's ruleset
5. Set total_rules on the run
6. Evaluate each rule sequentially (V1 — no Celery subtasks)
7. After all complete: set status=COMPLETED
8. On error: set status=FAILED
"""
# Set tenant context for DB access
CURRENT_TENANT_ID_CONTEXTVAR.set(tenant_id)
try:
_execute_review(review_run_id)
except Exception as e:
logger.error(f"Review run {review_run_id} failed: {e}", exc_info=True)
_mark_run_failed(review_run_id)
raise
finally:
CURRENT_TENANT_ID_CONTEXTVAR.set(None)
def _execute_review(review_run_id: str) -> None:
"""Core review logic, separated for testability."""
from uuid import UUID
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import rulesets as rulesets_db
from onyx.server.features.proposal_review.engine.context_assembler import (
get_proposal_context,
)
from onyx.server.features.proposal_review.engine.foa_fetcher import fetch_foa
run_uuid = UUID(review_run_id)
# Step 1: Set run status to RUNNING
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if not run:
raise ValueError(f"Review run {review_run_id} not found")
run.status = "RUNNING"
run.started_at = datetime.now(timezone.utc)
db_session.commit()
proposal_id = run.proposal_id
ruleset_id = run.ruleset_id
# Step 2: Assemble proposal context
with get_session_with_current_tenant() as db_session:
context = get_proposal_context(proposal_id, db_session)
# Step 3: Try to auto-fetch FOA if opportunity_id is in metadata
opportunity_id = context.metadata.get("opportunity_id") or context.metadata.get(
"funding_opportunity_number"
)
if opportunity_id and not context.foa_text:
logger.info(f"Attempting to auto-fetch FOA for opportunity_id={opportunity_id}")
try:
with get_session_with_current_tenant() as db_session:
foa_text = fetch_foa(opportunity_id, proposal_id, db_session)
db_session.commit()
if foa_text:
context.foa_text = foa_text
logger.info(f"Auto-fetched FOA: {len(foa_text)} chars")
except Exception as e:
logger.warning(f"FOA auto-fetch failed (non-fatal): {e}")
# Step 4: Get all active rules for the ruleset
with get_session_with_current_tenant() as db_session:
rules = rulesets_db.list_rules_by_ruleset(
ruleset_id, db_session, active_only=True
)
# Detach rules from the session so we can use them outside
rule_data = [
{
"id": rule.id,
"name": rule.name,
"prompt_template": rule.prompt_template,
"rule_type": rule.rule_type,
"rule_intent": rule.rule_intent,
"is_hard_stop": rule.is_hard_stop,
"category": rule.category,
}
for rule in rules
]
if not rule_data:
logger.warning(f"No active rules found for ruleset {ruleset_id}")
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if run:
run.status = "COMPLETED"
run.completed_at = datetime.now(timezone.utc)
db_session.commit()
return
# Step 5: Update total_rules on the run
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if run:
run.total_rules = len(rule_data)
db_session.commit()
# Step 6: Evaluate each rule sequentially
completed = 0
for rule_info in rule_data:
try:
_evaluate_and_save(
review_run_id=review_run_id,
rule_id=str(rule_info["id"]),
proposal_id=proposal_id,
context=context,
)
completed += 1
except Exception as e:
logger.error(
f"Rule '{rule_info['name']}' (id={rule_info['id']}) failed: {e}",
exc_info=True,
)
# Save an error finding so the officer sees which rule failed
_save_error_finding(
review_run_id=review_run_id,
rule_id=str(rule_info["id"]),
proposal_id=proposal_id,
error=str(e),
)
completed += 1
# Increment completed_rules counter
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if run:
run.completed_rules = completed
db_session.commit()
# Step 7: Mark run as completed
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if run:
run.status = "COMPLETED"
run.completed_at = datetime.now(timezone.utc)
run.completed_rules = completed
db_session.commit()
logger.info(
f"Review run {review_run_id} completed: {completed}/{len(rule_data)} rules evaluated"
)
def _evaluate_and_save(
review_run_id: str,
rule_id: str,
proposal_id: str,
context: object, # ProposalContext — typed as object to avoid circular import at module level
) -> None:
"""Evaluate a single rule and save the finding to DB."""
from uuid import UUID
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.db import rulesets as rulesets_db
from onyx.server.features.proposal_review.engine.rule_evaluator import (
evaluate_rule,
)
rule_uuid = UUID(rule_id)
run_uuid = UUID(review_run_id)
# Load the rule from DB
with get_session_with_current_tenant() as db_session:
rule = rulesets_db.get_rule(rule_uuid, db_session)
if not rule:
raise ValueError(f"Rule {rule_id} not found")
# Evaluate the rule
result = evaluate_rule(rule, context, db_session)
# Save finding
findings_db.create_finding(
proposal_id=proposal_id,
rule_id=rule_uuid,
review_run_id=run_uuid,
verdict=result["verdict"],
confidence=result.get("confidence"),
evidence=result.get("evidence"),
explanation=result.get("explanation"),
suggested_action=result.get("suggested_action"),
llm_model=result.get("llm_model"),
llm_tokens_used=result.get("llm_tokens_used"),
db_session=db_session,
)
db_session.commit()
logger.debug(f"Rule {rule_id} evaluated: verdict={result['verdict']}")
def _save_error_finding(
review_run_id: str,
rule_id: str,
proposal_id: str,
error: str,
) -> None:
"""Save an error finding when a rule evaluation fails."""
from uuid import UUID
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import findings as findings_db
try:
with get_session_with_current_tenant() as db_session:
findings_db.create_finding(
proposal_id=proposal_id,
rule_id=UUID(rule_id),
review_run_id=UUID(review_run_id),
verdict="NEEDS_REVIEW",
confidence="LOW",
evidence=None,
explanation=f"Rule evaluation failed with error: {error}",
suggested_action="Manual review required due to system error.",
db_session=db_session,
)
db_session.commit()
except Exception as e:
logger.error(f"Failed to save error finding for rule {rule_id}: {e}")
def _mark_run_failed(review_run_id: str) -> None:
"""Mark a review run as FAILED."""
from uuid import UUID
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import findings as findings_db
try:
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(UUID(review_run_id), db_session)
if run:
run.status = "FAILED"
run.completed_at = datetime.now(timezone.utc)
db_session.commit()
except Exception as e:
logger.error(f"Failed to mark run {review_run_id} as FAILED: {e}")
@shared_task(bind=True, ignore_result=True, soft_time_limit=300, time_limit=330)
def evaluate_single_rule(
_self: object, review_run_id: str, rule_id: str, tenant_id: str
) -> None:
"""Child task: evaluates one rule (for future parallel execution).
Currently not used in V1 — rules are evaluated sequentially in
run_proposal_review. This task exists for future migration to
parallel execution via Celery groups.
"""
CURRENT_TENANT_ID_CONTEXTVAR.set(tenant_id)
try:
from uuid import UUID
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.db import findings as findings_db
from onyx.server.features.proposal_review.engine.context_assembler import (
get_proposal_context,
)
run_uuid = UUID(review_run_id)
with get_session_with_current_tenant() as db_session:
run = findings_db.get_review_run(run_uuid, db_session)
if not run:
raise ValueError(f"Review run {review_run_id} not found")
proposal_id = run.proposal_id
# Re-assemble context (each subtask is independent)
with get_session_with_current_tenant() as db_session:
context = get_proposal_context(proposal_id, db_session)
_evaluate_and_save(review_run_id, rule_id, proposal_id, context)
# Increment completed_rules atomically to avoid race conditions
with get_session_with_current_tenant() as db_session:
from onyx.server.features.proposal_review.db.models import (
ProposalReviewRun,
)
db_session.execute(
update(ProposalReviewRun)
.where(ProposalReviewRun.id == run_uuid)
.values(completed_rules=ProposalReviewRun.completed_rules + 1)
)
db_session.commit()
finally:
CURRENT_TENANT_ID_CONTEXTVAR.set(None)
@shared_task(bind=True, ignore_result=True, soft_time_limit=60, time_limit=90)
def sync_decision_to_jira(_self: object, proposal_id: str, tenant_id: str) -> None:
"""Writes officer decision back to Jira.
Dispatched from the sync-jira API endpoint.
"""
CURRENT_TENANT_ID_CONTEXTVAR.set(tenant_id)
try:
from uuid import UUID
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.server.features.proposal_review.engine.jira_sync import sync_to_jira
with get_session_with_current_tenant() as db_session:
sync_to_jira(UUID(proposal_id), db_session)
db_session.commit()
logger.info(f"Jira sync completed for proposal {proposal_id}")
except Exception as e:
logger.error(f"Jira sync failed for proposal {proposal_id}: {e}", exc_info=True)
raise
finally:
CURRENT_TENANT_ID_CONTEXTVAR.set(None)

View File

@@ -1,210 +0,0 @@
"""Evaluates a single rule against a proposal context via LLM."""
import json
import re
from sqlalchemy.orm import Session
from onyx.llm.factory import get_default_llm
from onyx.llm.models import SystemMessage
from onyx.llm.models import UserMessage
from onyx.llm.utils import llm_response_to_string
from onyx.server.features.proposal_review.db.models import ProposalReviewRule
from onyx.server.features.proposal_review.engine.context_assembler import (
ProposalContext,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()
SYSTEM_PROMPT = """\
You are a meticulous grant proposal compliance reviewer for a university research office.
Your role is to evaluate specific aspects of grant proposals against institutional
and sponsor requirements.
You must evaluate each rule independently, focusing ONLY on the specific criterion
described. Be precise in your assessment. When in doubt, mark for human review.
Always respond with a valid JSON object in the exact format specified."""
RESPONSE_FORMAT_INSTRUCTIONS = """
Respond with ONLY a valid JSON object in the following format:
{
"verdict": "PASS | FAIL | FLAG | NEEDS_REVIEW | NOT_APPLICABLE",
"confidence": "HIGH | MEDIUM | LOW",
"evidence": "Direct quote or reference from the proposal documents that supports your verdict. If no relevant text found, state that clearly.",
"explanation": "Concise reasoning for why this verdict was reached. Reference specific requirements and how the proposal does or does not meet them.",
"suggested_action": "If verdict is FAIL or FLAG, describe what the officer or PI should do. Otherwise, null."
}
Verdict meanings:
- PASS: The proposal clearly meets this requirement.
- FAIL: The proposal clearly does NOT meet this requirement.
- FLAG: There is a potential issue that needs human attention.
- NEEDS_REVIEW: Insufficient information to make a determination.
- NOT_APPLICABLE: This rule does not apply to this proposal.
"""
def evaluate_rule(
rule: ProposalReviewRule,
context: ProposalContext,
_db_session: Session | None = None,
) -> dict:
"""Evaluate one rule against proposal context via LLM.
1. Fills rule.prompt_template variables ({{proposal_text}}, {{metadata}}, etc.)
2. Wraps in system prompt establishing reviewer role
3. Calls llm.invoke() with structured output instructions
4. Parses response into a findings dict
Args:
rule: The rule to evaluate.
context: Assembled proposal context.
db_session: Optional DB session (not used for LLM call but kept for API compat).
Returns:
Dict with verdict, confidence, evidence, explanation, suggested_action,
plus llm_model and llm_tokens_used if available.
"""
# 1. Fill template variables
filled_prompt = _fill_template(rule.prompt_template, context)
# 2. Build full prompt
user_content = f"{filled_prompt}\n\n" f"{RESPONSE_FORMAT_INSTRUCTIONS}"
prompt_messages = [
SystemMessage(content=SYSTEM_PROMPT),
UserMessage(content=user_content),
]
# 3. Call LLM
try:
llm = get_default_llm()
response = llm.invoke(prompt_messages)
raw_text = llm_response_to_string(response)
# Extract model info
llm_model = llm.config.model_name if llm.config else None
llm_tokens_used = _extract_token_usage(response)
except Exception as e:
logger.error(f"LLM call failed for rule {rule.id} '{rule.name}': {e}")
return {
"verdict": "NEEDS_REVIEW",
"confidence": "LOW",
"evidence": None,
"explanation": f"LLM evaluation failed: {str(e)}",
"suggested_action": "Manual review required due to system error.",
"llm_model": None,
"llm_tokens_used": None,
}
# 4. Parse JSON response
result = _parse_llm_response(raw_text)
result["llm_model"] = llm_model
result["llm_tokens_used"] = llm_tokens_used
return result
def _fill_template(template: str, context: ProposalContext) -> str:
"""Replace {{variable}} placeholders in the prompt template.
Supported variables:
- {{proposal_text}} -> context.proposal_text
- {{budget_text}} -> context.budget_text
- {{foa_text}} -> context.foa_text
- {{metadata}} -> JSON dump of context.metadata
- {{metadata.FIELD}} -> specific metadata field value
- {{jira_key}} -> context.jira_key
"""
result = template
# Direct substitutions
result = result.replace("{{proposal_text}}", context.proposal_text or "")
result = result.replace("{{budget_text}}", context.budget_text or "")
result = result.replace("{{foa_text}}", context.foa_text or "")
result = result.replace("{{jira_key}}", context.jira_key or "")
# Metadata as JSON
metadata_str = json.dumps(context.metadata, indent=2, default=str)
result = result.replace("{{metadata}}", metadata_str)
# Specific metadata fields: {{metadata.FIELD}}
metadata_field_pattern = re.compile(r"\{\{metadata\.([^}]+)\}\}")
for match in metadata_field_pattern.finditer(result):
field_name = match.group(1)
field_value = context.metadata.get(field_name, "")
if isinstance(field_value, (dict, list)):
field_value = json.dumps(field_value, default=str)
result = result.replace(match.group(0), str(field_value))
return result
def _parse_llm_response(raw_text: str) -> dict:
"""Parse the LLM response text as JSON.
Handles cases where the LLM wraps JSON in markdown code fences.
"""
text = raw_text.strip()
# Strip markdown code fences if present
if text.startswith("```"):
# Remove opening fence (with optional language tag)
text = re.sub(r"^```(?:json)?\s*\n?", "", text)
# Remove closing fence
text = re.sub(r"\n?```\s*$", "", text)
text = text.strip()
try:
parsed = json.loads(text)
except json.JSONDecodeError:
logger.warning(f"Failed to parse LLM response as JSON: {text[:200]}...")
return {
"verdict": "NEEDS_REVIEW",
"confidence": "LOW",
"evidence": None,
"explanation": f"Failed to parse LLM response. Raw output: {text[:500]}",
"suggested_action": "Manual review required due to unparseable AI response.",
}
# Validate and normalize the parsed result
valid_verdicts = {"PASS", "FAIL", "FLAG", "NEEDS_REVIEW", "NOT_APPLICABLE"}
valid_confidences = {"HIGH", "MEDIUM", "LOW"}
verdict = str(parsed.get("verdict", "NEEDS_REVIEW")).upper()
if verdict not in valid_verdicts:
verdict = "NEEDS_REVIEW"
confidence = str(parsed.get("confidence", "LOW")).upper()
if confidence not in valid_confidences:
confidence = "LOW"
return {
"verdict": verdict,
"confidence": confidence,
"evidence": parsed.get("evidence"),
"explanation": parsed.get("explanation"),
"suggested_action": parsed.get("suggested_action"),
}
def _extract_token_usage(response: object) -> int | None:
"""Best-effort extraction of token usage from the LLM response."""
try:
# litellm ModelResponse has a usage attribute
if hasattr(response, "usage") and response.usage:
usage = response.usage
total = getattr(usage, "total_tokens", None)
if total is not None:
return int(total)
# Sum prompt + completion tokens if total not available
prompt_tokens = getattr(usage, "prompt_tokens", 0) or 0
completion_tokens = getattr(usage, "completion_tokens", 0) or 0
if prompt_tokens or completion_tokens:
return prompt_tokens + completion_tokens
except Exception:
pass
return None

View File

@@ -1,87 +0,0 @@
"""Shared fixtures for proposal review integration tests.
Uses the same real-PostgreSQL pattern as the parent external_dependency_unit
conftest. Tables must already exist (via the 61ea78857c97 migration).
"""
from collections.abc import Generator
from uuid import uuid4
import pytest
from fastapi_users.password import PasswordHelper
from sqlalchemy import text
from sqlalchemy.orm import Session
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.db.engine.sql_engine import SqlEngine
from onyx.db.enums import AccountType
from onyx.db.models import User
from onyx.db.models import UserRole
from shared_configs.contextvars import CURRENT_TENANT_ID_CONTEXTVAR
from tests.external_dependency_unit.constants import TEST_TENANT_ID
# Tables to clean up after each test, in dependency order (children first).
_PROPOSAL_REVIEW_TABLES = [
"proposal_review_audit_log",
"proposal_review_decision",
"proposal_review_proposal_decision",
"proposal_review_finding",
"proposal_review_run",
"proposal_review_document",
"proposal_review_proposal",
"proposal_review_rule",
"proposal_review_ruleset",
"proposal_review_config",
]
@pytest.fixture(scope="function")
def tenant_context() -> Generator[None, None, None]:
token = CURRENT_TENANT_ID_CONTEXTVAR.set(TEST_TENANT_ID)
try:
yield
finally:
CURRENT_TENANT_ID_CONTEXTVAR.reset(token)
@pytest.fixture(scope="function")
def db_session(tenant_context: None) -> Generator[Session, None, None]: # noqa: ARG001
"""Yield a DB session scoped to the current tenant.
After the test completes, all proposal_review rows are deleted so tests
don't leave artifacts in the database.
"""
SqlEngine.init_engine(pool_size=10, max_overflow=5)
with get_session_with_current_tenant() as session:
yield session
# Clean up all proposal_review data created during this test
try:
for table in _PROPOSAL_REVIEW_TABLES:
session.execute(text(f"DELETE FROM {table}")) # noqa: S608
session.commit()
except Exception:
session.rollback()
@pytest.fixture(scope="function")
def test_user(db_session: Session) -> User:
"""Create a throwaway user for FK references (triggered_by, officer_id, etc.)."""
unique_email = f"pr_test_{uuid4().hex[:8]}@example.com"
password_helper = PasswordHelper()
hashed_password = password_helper.hash(password_helper.generate())
user = User(
id=uuid4(),
email=unique_email,
hashed_password=hashed_password,
is_active=True,
is_superuser=False,
is_verified=True,
role=UserRole.ADMIN,
account_type=AccountType.STANDARD,
)
db_session.add(user)
db_session.commit()
db_session.refresh(user)
return user

View File

@@ -1,425 +0,0 @@
"""Integration tests for per-finding decisions, proposal decisions, config, and audit log."""
from uuid import uuid4
from sqlalchemy.orm import Session
from onyx.db.models import User
from onyx.server.features.proposal_review.db.config import get_config
from onyx.server.features.proposal_review.db.config import upsert_config
from onyx.server.features.proposal_review.db.decisions import create_audit_log
from onyx.server.features.proposal_review.db.decisions import (
create_proposal_decision,
)
from onyx.server.features.proposal_review.db.decisions import get_finding_decision
from onyx.server.features.proposal_review.db.decisions import (
get_latest_proposal_decision,
)
from onyx.server.features.proposal_review.db.decisions import list_audit_log
from onyx.server.features.proposal_review.db.decisions import (
mark_decision_jira_synced,
)
from onyx.server.features.proposal_review.db.decisions import (
upsert_finding_decision,
)
from onyx.server.features.proposal_review.db.findings import create_finding
from onyx.server.features.proposal_review.db.findings import create_review_run
from onyx.server.features.proposal_review.db.findings import get_finding
from onyx.server.features.proposal_review.db.proposals import get_or_create_proposal
from onyx.server.features.proposal_review.db.proposals import update_proposal_status
from onyx.server.features.proposal_review.db.rulesets import create_rule
from onyx.server.features.proposal_review.db.rulesets import create_ruleset
from tests.external_dependency_unit.constants import TEST_TENANT_ID
TENANT = TEST_TENANT_ID
def _make_finding(db_session: Session, test_user: User):
"""Helper: create a full chain (ruleset -> rule -> proposal -> run -> finding)."""
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS-{uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Test Rule",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
finding = create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run.id,
verdict="FAIL",
db_session=db_session,
)
db_session.commit()
return finding, proposal
class TestFindingDecision:
def test_create_finding_decision(
self, db_session: Session, test_user: User
) -> None:
finding, _ = _make_finding(db_session, test_user)
decision = upsert_finding_decision(
finding_id=finding.id,
officer_id=test_user.id,
action="VERIFIED",
db_session=db_session,
notes="Looks good",
)
db_session.commit()
assert decision.id is not None
assert decision.finding_id == finding.id
assert decision.action == "VERIFIED"
assert decision.notes == "Looks good"
def test_upsert_overwrites_previous_decision(
self, db_session: Session, test_user: User
) -> None:
finding, _ = _make_finding(db_session, test_user)
first = upsert_finding_decision(
finding_id=finding.id,
officer_id=test_user.id,
action="VERIFIED",
db_session=db_session,
)
db_session.commit()
first_id = first.id
second = upsert_finding_decision(
finding_id=finding.id,
officer_id=test_user.id,
action="ISSUE",
db_session=db_session,
notes="Actually, this is a problem",
)
db_session.commit()
# Same row was updated, not a new one created
assert second.id == first_id
assert second.action == "ISSUE"
assert second.notes == "Actually, this is a problem"
def test_get_finding_decision(self, db_session: Session, test_user: User) -> None:
finding, _ = _make_finding(db_session, test_user)
upsert_finding_decision(
finding_id=finding.id,
officer_id=test_user.id,
action="NOT_APPLICABLE",
db_session=db_session,
)
db_session.commit()
fetched = get_finding_decision(finding.id, db_session)
assert fetched is not None
assert fetched.action == "NOT_APPLICABLE"
def test_get_finding_decision_returns_none_when_no_decision(
self, db_session: Session, test_user: User
) -> None:
finding, _ = _make_finding(db_session, test_user)
assert get_finding_decision(finding.id, db_session) is None
def test_finding_decision_accessible_via_finding_relationship(
self, db_session: Session, test_user: User
) -> None:
finding, _ = _make_finding(db_session, test_user)
upsert_finding_decision(
finding_id=finding.id,
officer_id=test_user.id,
action="OVERRIDDEN",
db_session=db_session,
)
db_session.commit()
fetched = get_finding(finding.id, db_session)
assert fetched is not None
assert fetched.decision is not None
assert fetched.decision.action == "OVERRIDDEN"
class TestProposalDecision:
def test_create_proposal_decision(
self, db_session: Session, test_user: User
) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
pd = create_proposal_decision(
proposal_id=proposal.id,
officer_id=test_user.id,
decision="APPROVED",
db_session=db_session,
notes="All checks pass",
)
db_session.commit()
assert pd.id is not None
assert pd.decision == "APPROVED"
assert pd.notes == "All checks pass"
assert pd.jira_synced is False
def test_get_latest_proposal_decision(
self, db_session: Session, test_user: User
) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
create_proposal_decision(
proposal_id=proposal.id,
officer_id=test_user.id,
decision="CHANGES_REQUESTED",
db_session=db_session,
)
db_session.commit()
create_proposal_decision(
proposal_id=proposal.id,
officer_id=test_user.id,
decision="APPROVED",
db_session=db_session,
)
db_session.commit()
latest = get_latest_proposal_decision(proposal.id, db_session)
assert latest is not None
assert latest.decision == "APPROVED"
def test_get_latest_proposal_decision_returns_none_when_empty(
self, db_session: Session
) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
assert get_latest_proposal_decision(proposal.id, db_session) is None
def test_proposal_decision_updates_proposal_status(
self, db_session: Session, test_user: User
) -> None:
"""Verify that recording a decision and updating status works together."""
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
create_proposal_decision(
proposal_id=proposal.id,
officer_id=test_user.id,
decision="REJECTED",
db_session=db_session,
)
update_proposal_status(proposal.id, TENANT, "REJECTED", db_session)
db_session.commit()
db_session.refresh(proposal)
assert proposal.status == "REJECTED"
def test_mark_decision_jira_synced(
self, db_session: Session, test_user: User
) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
pd = create_proposal_decision(
proposal_id=proposal.id,
officer_id=test_user.id,
decision="APPROVED",
db_session=db_session,
)
db_session.commit()
assert pd.jira_synced is False
synced = mark_decision_jira_synced(pd.id, db_session)
db_session.commit()
assert synced is not None
assert synced.jira_synced is True
assert synced.jira_synced_at is not None
def test_mark_decision_jira_synced_returns_none_for_nonexistent(
self, db_session: Session
) -> None:
assert mark_decision_jira_synced(uuid4(), db_session) is None
class TestConfig:
def test_create_config(self, db_session: Session) -> None:
# Use a unique tenant to avoid collision with other tests
tenant = f"test-tenant-{uuid4().hex[:8]}"
config = upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="PROJ",
field_mapping={"title": "summary", "budget": "customfield_10001"},
)
db_session.commit()
assert config.id is not None
assert config.tenant_id == tenant
assert config.jira_project_key == "PROJ"
assert config.field_mapping == {
"title": "summary",
"budget": "customfield_10001",
}
def test_upsert_config_updates_existing(self, db_session: Session) -> None:
tenant = f"test-tenant-{uuid4().hex[:8]}"
first = upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="OLD",
)
db_session.commit()
first_id = first.id
second = upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="NEW",
field_mapping={"x": "y"},
)
db_session.commit()
assert second.id == first_id
assert second.jira_project_key == "NEW"
assert second.field_mapping == {"x": "y"}
def test_get_config_returns_correct_tenant(self, db_session: Session) -> None:
tenant = f"test-tenant-{uuid4().hex[:8]}"
upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="ABC",
jira_writeback={"status_field": "customfield_20001"},
)
db_session.commit()
fetched = get_config(tenant, db_session)
assert fetched is not None
assert fetched.jira_project_key == "ABC"
assert fetched.jira_writeback == {"status_field": "customfield_20001"}
def test_get_config_returns_none_for_unknown_tenant(
self, db_session: Session
) -> None:
assert get_config(f"nonexistent-{uuid4().hex[:8]}", db_session) is None
def test_upsert_config_preserves_unset_fields(self, db_session: Session) -> None:
tenant = f"test-tenant-{uuid4().hex[:8]}"
upsert_config(
tenant_id=tenant,
db_session=db_session,
jira_project_key="KEEP",
jira_connector_id=42,
)
db_session.commit()
# Update only field_mapping, leave jira_project_key alone
upsert_config(
tenant_id=tenant,
db_session=db_session,
field_mapping={"a": "b"},
)
db_session.commit()
fetched = get_config(tenant, db_session)
assert fetched is not None
assert fetched.jira_project_key == "KEEP"
assert fetched.jira_connector_id == 42
assert fetched.field_mapping == {"a": "b"}
class TestAuditLog:
def test_create_audit_log_entry(self, db_session: Session, test_user: User) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
entry = create_audit_log(
proposal_id=proposal.id,
action="REVIEW_STARTED",
db_session=db_session,
user_id=test_user.id,
details={"ruleset_id": str(uuid4())},
)
db_session.commit()
assert entry.id is not None
assert entry.action == "REVIEW_STARTED"
assert entry.user_id == test_user.id
def test_list_audit_log_ordered_by_created_at_desc(
self, db_session: Session, test_user: User
) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
actions = ["REVIEW_STARTED", "FINDING_CREATED", "DECISION_MADE"]
for action in actions:
create_audit_log(
proposal_id=proposal.id,
action=action,
db_session=db_session,
user_id=test_user.id,
)
db_session.commit()
entries = list_audit_log(proposal.id, db_session)
assert len(entries) == 3
# Newest first
assert entries[0].action == "DECISION_MADE"
assert entries[1].action == "FINDING_CREATED"
assert entries[2].action == "REVIEW_STARTED"
def test_audit_log_entries_are_scoped_to_proposal(
self, db_session: Session, test_user: User # noqa: ARG002
) -> None:
p1 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
p2 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
create_audit_log(
proposal_id=p1.id,
action="ACTION_A",
db_session=db_session,
)
create_audit_log(
proposal_id=p2.id,
action="ACTION_B",
db_session=db_session,
)
db_session.commit()
p1_entries = list_audit_log(p1.id, db_session)
p1_actions = {e.action for e in p1_entries}
assert "ACTION_A" in p1_actions
assert "ACTION_B" not in p1_actions
def test_audit_log_with_null_user_id(self, db_session: Session) -> None:
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
entry = create_audit_log(
proposal_id=proposal.id,
action="SYSTEM_ACTION",
db_session=db_session,
details={"source": "automated"},
)
db_session.commit()
assert entry.user_id is None
assert entry.details == {"source": "automated"}

View File

@@ -1,159 +0,0 @@
"""Integration tests for proposal state management DB operations."""
from uuid import uuid4
from sqlalchemy.orm import Session
from onyx.server.features.proposal_review.db.proposals import count_proposals
from onyx.server.features.proposal_review.db.proposals import get_or_create_proposal
from onyx.server.features.proposal_review.db.proposals import get_proposal
from onyx.server.features.proposal_review.db.proposals import (
get_proposal_by_document_id,
)
from onyx.server.features.proposal_review.db.proposals import list_proposals
from onyx.server.features.proposal_review.db.proposals import update_proposal_status
from tests.external_dependency_unit.constants import TEST_TENANT_ID
TENANT = TEST_TENANT_ID
class TestGetOrCreateProposal:
def test_creates_proposal_on_first_call(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
proposal = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
assert proposal.id is not None
assert proposal.document_id == doc_id
assert proposal.tenant_id == TENANT
assert proposal.status == "PENDING"
def test_returns_same_proposal_on_second_call(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
first = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
second = get_or_create_proposal(doc_id, TENANT, db_session)
assert second.id == first.id
def test_different_document_ids_create_different_proposals(
self, db_session: Session
) -> None:
p1 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
p2 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
assert p1.id != p2.id
class TestGetProposal:
def test_returns_none_for_nonexistent_id(self, db_session: Session) -> None:
result = get_proposal(uuid4(), TENANT, db_session)
assert result is None
def test_returns_proposal_by_id(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
created = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
fetched = get_proposal(created.id, TENANT, db_session)
assert fetched is not None
assert fetched.id == created.id
assert fetched.document_id == doc_id
def test_returns_none_for_wrong_tenant(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
created = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
result = get_proposal(created.id, "nonexistent_tenant", db_session)
assert result is None
class TestGetProposalByDocumentId:
def test_returns_none_when_no_proposal_exists(self, db_session: Session) -> None:
result = get_proposal_by_document_id("no-such-doc", TENANT, db_session)
assert result is None
def test_finds_proposal_by_document_id(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
created = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
fetched = get_proposal_by_document_id(doc_id, TENANT, db_session)
assert fetched is not None
assert fetched.id == created.id
class TestUpdateProposalStatus:
def test_changes_status_correctly(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
proposal = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
assert proposal.status == "PENDING"
updated = update_proposal_status(proposal.id, TENANT, "IN_REVIEW", db_session)
db_session.commit()
assert updated is not None
assert updated.status == "IN_REVIEW"
# Verify persisted
refetched = get_proposal(proposal.id, TENANT, db_session)
assert refetched is not None
assert refetched.status == "IN_REVIEW"
def test_returns_none_for_nonexistent_proposal(self, db_session: Session) -> None:
result = update_proposal_status(uuid4(), TENANT, "IN_REVIEW", db_session)
assert result is None
def test_successive_status_updates(self, db_session: Session) -> None:
doc_id = f"doc-{uuid4().hex[:8]}"
proposal = get_or_create_proposal(doc_id, TENANT, db_session)
db_session.commit()
update_proposal_status(proposal.id, TENANT, "IN_REVIEW", db_session)
db_session.commit()
update_proposal_status(proposal.id, TENANT, "APPROVED", db_session)
db_session.commit()
refetched = get_proposal(proposal.id, TENANT, db_session)
assert refetched is not None
assert refetched.status == "APPROVED"
class TestListAndCountProposals:
def test_list_proposals_with_status_filter(self, db_session: Session) -> None:
p1 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
p2 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
update_proposal_status(p1.id, TENANT, "IN_REVIEW", db_session)
db_session.commit()
in_review = list_proposals(TENANT, db_session, status="IN_REVIEW")
in_review_ids = {p.id for p in in_review}
assert p1.id in in_review_ids
assert p2.id not in in_review_ids
def test_count_proposals_with_status_filter(self, db_session: Session) -> None:
p1 = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
update_proposal_status(p1.id, TENANT, "COMPLETED", db_session)
db_session.commit()
total = count_proposals(TENANT, db_session)
completed = count_proposals(TENANT, db_session, status="COMPLETED")
assert total >= 2
assert completed >= 1
def test_list_proposals_pagination(self, db_session: Session) -> None:
for _ in range(5):
get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
page = list_proposals(TENANT, db_session, limit=2, offset=0)
assert len(page) <= 2

View File

@@ -1,448 +0,0 @@
"""Integration tests for review run + findings + progress tracking."""
from uuid import uuid4
from sqlalchemy.orm import Session
from onyx.db.models import User
from onyx.server.features.proposal_review.db.findings import create_finding
from onyx.server.features.proposal_review.db.findings import create_review_run
from onyx.server.features.proposal_review.db.findings import get_finding
from onyx.server.features.proposal_review.db.findings import get_latest_review_run
from onyx.server.features.proposal_review.db.findings import get_review_run
from onyx.server.features.proposal_review.db.findings import (
list_findings_by_proposal,
)
from onyx.server.features.proposal_review.db.findings import list_findings_by_run
from onyx.server.features.proposal_review.db.proposals import get_or_create_proposal
from onyx.server.features.proposal_review.db.rulesets import create_rule
from onyx.server.features.proposal_review.db.rulesets import create_ruleset
from tests.external_dependency_unit.constants import TEST_TENANT_ID
TENANT = TEST_TENANT_ID
class TestReviewRun:
def test_create_review_run_and_verify_status(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Review RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
create_rule(
ruleset_id=rs.id,
name="Rule 1",
rule_type="DOCUMENT_CHECK",
prompt_template="t1",
db_session=db_session,
)
create_rule(
ruleset_id=rs.id,
name="Rule 2",
rule_type="DOCUMENT_CHECK",
prompt_template="t2",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
db_session.commit()
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=2,
db_session=db_session,
)
db_session.commit()
assert run.id is not None
assert run.proposal_id == proposal.id
assert run.ruleset_id == rs.id
assert run.triggered_by == test_user.id
assert run.total_rules == 2
assert run.completed_rules == 0
assert run.status == "PENDING"
def test_get_review_run(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
fetched = get_review_run(run.id, db_session)
assert fetched is not None
assert fetched.id == run.id
def test_get_review_run_returns_none_for_nonexistent(
self, db_session: Session
) -> None:
assert get_review_run(uuid4(), db_session) is None
def test_get_latest_review_run(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
run2 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=2,
db_session=db_session,
)
db_session.commit()
latest = get_latest_review_run(proposal.id, db_session)
assert latest is not None
assert latest.id == run2.id
def test_increment_completed_rules_tracks_progress(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Progress RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=3,
db_session=db_session,
)
db_session.commit()
# Simulate progress by incrementing completed_rules directly
run.completed_rules = 1
db_session.flush()
db_session.commit()
fetched = get_review_run(run.id, db_session)
assert fetched is not None
assert fetched.completed_rules == 1
assert fetched.total_rules == 3
run.completed_rules = 3
db_session.flush()
db_session.commit()
fetched = get_review_run(run.id, db_session)
assert fetched is not None
assert fetched.completed_rules == 3
class TestFindings:
def test_create_finding_and_retrieve(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Findings RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Budget Rule",
rule_type="DOCUMENT_CHECK",
prompt_template="Check budget",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
finding = create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run.id,
verdict="PASS",
db_session=db_session,
confidence="HIGH",
evidence="Budget is $500k",
explanation="Under the $1M cap",
llm_model="gpt-4",
llm_tokens_used=1500,
)
db_session.commit()
fetched = get_finding(finding.id, db_session)
assert fetched is not None
assert fetched.verdict == "PASS"
assert fetched.confidence == "HIGH"
assert fetched.evidence == "Budget is $500k"
assert fetched.llm_model == "gpt-4"
assert fetched.llm_tokens_used == 1500
assert fetched.rule is not None
assert fetched.rule.name == "Budget Rule"
def test_list_findings_by_proposal(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"List Findings RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule1 = create_rule(
ruleset_id=rs.id,
name="R1",
rule_type="DOCUMENT_CHECK",
prompt_template="t1",
db_session=db_session,
)
rule2 = create_rule(
ruleset_id=rs.id,
name="R2",
rule_type="DOCUMENT_CHECK",
prompt_template="t2",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=2,
db_session=db_session,
)
db_session.commit()
create_finding(
proposal_id=proposal.id,
rule_id=rule1.id,
review_run_id=run.id,
verdict="PASS",
db_session=db_session,
)
create_finding(
proposal_id=proposal.id,
rule_id=rule2.id,
review_run_id=run.id,
verdict="FAIL",
db_session=db_session,
)
db_session.commit()
findings = list_findings_by_proposal(proposal.id, db_session)
assert len(findings) == 2
verdicts = {f.verdict for f in findings}
assert verdicts == {"PASS", "FAIL"}
def test_list_findings_by_run_filters_correctly(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Run Filter RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="R",
rule_type="DOCUMENT_CHECK",
prompt_template="t",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run1 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
run2 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run1.id,
verdict="PASS",
db_session=db_session,
)
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run2.id,
verdict="FAIL",
db_session=db_session,
)
db_session.commit()
run1_findings = list_findings_by_run(run1.id, db_session)
assert len(run1_findings) == 1
assert run1_findings[0].verdict == "PASS"
run2_findings = list_findings_by_run(run2.id, db_session)
assert len(run2_findings) == 1
assert run2_findings[0].verdict == "FAIL"
def test_list_findings_by_proposal_with_run_id_filter(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Filter RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="R",
rule_type="DOCUMENT_CHECK",
prompt_template="t",
db_session=db_session,
)
proposal = get_or_create_proposal(f"doc-{uuid4().hex[:8]}", TENANT, db_session)
run1 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
run2 = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=1,
db_session=db_session,
)
db_session.commit()
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run1.id,
verdict="PASS",
db_session=db_session,
)
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run2.id,
verdict="FAIL",
db_session=db_session,
)
db_session.commit()
# All findings for proposal
all_findings = list_findings_by_proposal(proposal.id, db_session)
assert len(all_findings) == 2
# Filtered by run
filtered = list_findings_by_proposal(
proposal.id, db_session, review_run_id=run1.id
)
assert len(filtered) == 1
assert filtered[0].verdict == "PASS"
def test_get_finding_returns_none_for_nonexistent(
self, db_session: Session
) -> None:
assert get_finding(uuid4(), db_session) is None
def test_full_review_flow_end_to_end(
self, db_session: Session, test_user: User
) -> None:
"""Create ruleset with rules -> proposal -> run -> findings -> verify."""
# Setup
rs = create_ruleset(
tenant_id=TENANT,
name=f"E2E RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rules = []
for i in range(3):
r = create_rule(
ruleset_id=rs.id,
name=f"E2E Rule {i}",
rule_type="DOCUMENT_CHECK",
prompt_template=f"Check {i}: {{{{proposal_text}}}}",
db_session=db_session,
)
rules.append(r)
proposal = get_or_create_proposal(
f"doc-e2e-{uuid4().hex[:8]}", TENANT, db_session
)
run = create_review_run(
proposal_id=proposal.id,
ruleset_id=rs.id,
triggered_by=test_user.id,
total_rules=3,
db_session=db_session,
)
db_session.commit()
# Create findings for each rule
verdicts = ["PASS", "FAIL", "PASS"]
for rule, verdict in zip(rules, verdicts):
create_finding(
proposal_id=proposal.id,
rule_id=rule.id,
review_run_id=run.id,
verdict=verdict,
db_session=db_session,
confidence="HIGH",
)
run.completed_rules += 1
db_session.flush()
db_session.commit()
# Verify
fetched_run = get_review_run(run.id, db_session)
assert fetched_run is not None
assert fetched_run.completed_rules == 3
assert fetched_run.total_rules == 3
findings = list_findings_by_run(run.id, db_session)
assert len(findings) == 3
assert {f.verdict for f in findings} == {"PASS", "FAIL"}

View File

@@ -1,424 +0,0 @@
"""Integration tests for ruleset + rule CRUD DB operations."""
from uuid import uuid4
from sqlalchemy.orm import Session
from onyx.db.models import User
from onyx.server.features.proposal_review.db.rulesets import bulk_update_rules
from onyx.server.features.proposal_review.db.rulesets import count_active_rules
from onyx.server.features.proposal_review.db.rulesets import create_rule
from onyx.server.features.proposal_review.db.rulesets import create_ruleset
from onyx.server.features.proposal_review.db.rulesets import delete_rule
from onyx.server.features.proposal_review.db.rulesets import delete_ruleset
from onyx.server.features.proposal_review.db.rulesets import get_rule
from onyx.server.features.proposal_review.db.rulesets import get_ruleset
from onyx.server.features.proposal_review.db.rulesets import list_rules_by_ruleset
from onyx.server.features.proposal_review.db.rulesets import list_rulesets
from onyx.server.features.proposal_review.db.rulesets import update_rule
from onyx.server.features.proposal_review.db.rulesets import update_ruleset
from tests.external_dependency_unit.constants import TEST_TENANT_ID
TENANT = TEST_TENANT_ID
class TestRulesetCRUD:
def test_create_ruleset_appears_in_list(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Compliance v1 {uuid4().hex[:6]}",
db_session=db_session,
description="First ruleset",
created_by=test_user.id,
)
db_session.commit()
rulesets = list_rulesets(TENANT, db_session)
ids = [r.id for r in rulesets]
assert rs.id in ids
def test_create_ruleset_with_rules_returned_together(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS with rules {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
create_rule(
ruleset_id=rs.id,
name="Rule A",
rule_type="DOCUMENT_CHECK",
prompt_template="Check A: {{proposal_text}}",
db_session=db_session,
)
create_rule(
ruleset_id=rs.id,
name="Rule B",
rule_type="METADATA_CHECK",
prompt_template="Check B: {{proposal_text}}",
db_session=db_session,
)
db_session.commit()
fetched = get_ruleset(rs.id, TENANT, db_session)
assert fetched is not None
assert len(fetched.rules) == 2
rule_names = {r.name for r in fetched.rules}
assert rule_names == {"Rule A", "Rule B"}
def test_list_rulesets_active_only_filter(
self, db_session: Session, test_user: User
) -> None:
rs_active = create_ruleset(
tenant_id=TENANT,
name=f"Active RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rs_inactive = create_ruleset(
tenant_id=TENANT,
name=f"Inactive RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
update_ruleset(rs_inactive.id, TENANT, db_session, is_active=False)
db_session.commit()
active_rulesets = list_rulesets(TENANT, db_session, active_only=True)
active_ids = {r.id for r in active_rulesets}
assert rs_active.id in active_ids
assert rs_inactive.id not in active_ids
def test_update_ruleset_changes_persist(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Original {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
db_session.commit()
updated = update_ruleset(
rs.id,
TENANT,
db_session,
name="Updated Name",
description="New desc",
)
db_session.commit()
assert updated is not None
assert updated.name == "Updated Name"
assert updated.description == "New desc"
refetched = get_ruleset(rs.id, TENANT, db_session)
assert refetched is not None
assert refetched.name == "Updated Name"
def test_update_nonexistent_ruleset_returns_none(self, db_session: Session) -> None:
result = update_ruleset(uuid4(), TENANT, db_session, name="nope")
assert result is None
def test_delete_ruleset_returns_false_for_nonexistent(
self, db_session: Session
) -> None:
assert delete_ruleset(uuid4(), TENANT, db_session) is False
def test_set_default_ruleset_clears_previous_default(
self, db_session: Session, test_user: User
) -> None:
rs1 = create_ruleset(
tenant_id=TENANT,
name=f"Default 1 {uuid4().hex[:6]}",
db_session=db_session,
is_default=True,
created_by=test_user.id,
)
db_session.commit()
assert rs1.is_default is True
rs2 = create_ruleset(
tenant_id=TENANT,
name=f"Default 2 {uuid4().hex[:6]}",
db_session=db_session,
is_default=True,
created_by=test_user.id,
)
db_session.commit()
# rs1 should no longer be default
db_session.refresh(rs1)
assert rs1.is_default is False
assert rs2.is_default is True
def test_delete_ruleset_cascade_deletes_rules(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS to delete {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
r1 = create_rule(
ruleset_id=rs.id,
name="Doomed Rule",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
rule_id = r1.id
db_session.commit()
assert delete_ruleset(rs.id, TENANT, db_session) is True
db_session.commit()
assert get_ruleset(rs.id, TENANT, db_session) is None
assert get_rule(rule_id, db_session) is None
class TestRuleCRUD:
def test_create_and_get_rule(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS for rules {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Budget Cap",
rule_type="DOCUMENT_CHECK",
prompt_template="Check budget cap: {{proposal_text}}",
db_session=db_session,
description="Verify budget < $1M",
category="FINANCIAL",
is_hard_stop=True,
priority=10,
)
db_session.commit()
fetched = get_rule(rule.id, db_session)
assert fetched is not None
assert fetched.name == "Budget Cap"
assert fetched.rule_type == "DOCUMENT_CHECK"
assert fetched.is_hard_stop is True
assert fetched.priority == 10
assert fetched.category == "FINANCIAL"
def test_update_rule_prompt_template_and_is_active(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Rule X",
rule_type="CUSTOM_NL",
prompt_template="old template",
db_session=db_session,
)
db_session.commit()
assert rule.is_active is True
updated = update_rule(
rule.id,
db_session,
prompt_template="new template: {{proposal_text}}",
is_active=False,
)
db_session.commit()
assert updated is not None
assert updated.prompt_template == "new template: {{proposal_text}}"
assert updated.is_active is False
refetched = get_rule(rule.id, db_session)
assert refetched is not None
assert refetched.prompt_template == "new template: {{proposal_text}}"
assert refetched.is_active is False
def test_update_nonexistent_rule_returns_none(self, db_session: Session) -> None:
result = update_rule(uuid4(), db_session, name="nope")
assert result is None
def test_delete_rule(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
rule = create_rule(
ruleset_id=rs.id,
name="Temp Rule",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
rule_id = rule.id
db_session.commit()
assert delete_rule(rule_id, db_session) is True
db_session.commit()
assert get_rule(rule_id, db_session) is None
def test_delete_nonexistent_rule_returns_false(self, db_session: Session) -> None:
assert delete_rule(uuid4(), db_session) is False
def test_list_rules_by_ruleset_respects_active_only(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
r_active = create_rule(
ruleset_id=rs.id,
name="Active",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
r_inactive = create_rule(
ruleset_id=rs.id,
name="Inactive",
rule_type="DOCUMENT_CHECK",
prompt_template="{{proposal_text}}",
db_session=db_session,
)
update_rule(r_inactive.id, db_session, is_active=False)
db_session.commit()
all_rules = list_rules_by_ruleset(rs.id, db_session)
assert len(all_rules) == 2
active_rules = list_rules_by_ruleset(rs.id, db_session, active_only=True)
assert len(active_rules) == 1
assert active_rules[0].id == r_active.id
def test_bulk_activate_rules_only_affects_specified_rules(
self, db_session: Session, test_user: User
) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Bulk test RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
# Create 5 rules, all initially active
rules = []
for i in range(5):
r = create_rule(
ruleset_id=rs.id,
name=f"Rule {i}",
rule_type="DOCUMENT_CHECK",
prompt_template=f"Template {i}",
db_session=db_session,
)
rules.append(r)
db_session.commit()
# Deactivate all 5
all_ids = [r.id for r in rules]
bulk_update_rules(all_ids, "deactivate", rs.id, db_session)
db_session.commit()
# Verify all are inactive
assert count_active_rules(rs.id, db_session) == 0
# Bulk activate only the first 3
activate_ids = [rules[0].id, rules[1].id, rules[2].id]
count = bulk_update_rules(activate_ids, "activate", rs.id, db_session)
db_session.commit()
assert count == 3
assert count_active_rules(rs.id, db_session) == 3
# Verify exactly which are active
active_rules = list_rules_by_ruleset(rs.id, db_session, active_only=True)
active_ids_result = {r.id for r in active_rules}
assert active_ids_result == set(activate_ids)
def test_bulk_delete_rules(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Bulk delete RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
r1 = create_rule(
ruleset_id=rs.id,
name="Keep",
rule_type="DOCUMENT_CHECK",
prompt_template="keep",
db_session=db_session,
)
r2 = create_rule(
ruleset_id=rs.id,
name="Delete 1",
rule_type="DOCUMENT_CHECK",
prompt_template="del1",
db_session=db_session,
)
r3 = create_rule(
ruleset_id=rs.id,
name="Delete 2",
rule_type="DOCUMENT_CHECK",
prompt_template="del2",
db_session=db_session,
)
db_session.commit()
count = bulk_update_rules([r2.id, r3.id], "delete", rs.id, db_session)
db_session.commit()
assert count == 2
remaining = list_rules_by_ruleset(rs.id, db_session)
assert len(remaining) == 1
assert remaining[0].id == r1.id
def test_bulk_update_unknown_action_raises_error(self, db_session: Session) -> None:
import pytest as _pytest
with _pytest.raises(ValueError, match="Unknown bulk action"):
bulk_update_rules([uuid4()], "explode", uuid4(), db_session)
def test_count_active_rules(self, db_session: Session, test_user: User) -> None:
rs = create_ruleset(
tenant_id=TENANT,
name=f"Count RS {uuid4().hex[:6]}",
db_session=db_session,
created_by=test_user.id,
)
create_rule(
ruleset_id=rs.id,
name="Active1",
rule_type="DOCUMENT_CHECK",
prompt_template="t1",
db_session=db_session,
)
r2 = create_rule(
ruleset_id=rs.id,
name="Inactive1",
rule_type="DOCUMENT_CHECK",
prompt_template="t2",
db_session=db_session,
)
update_rule(r2.id, db_session, is_active=False)
db_session.commit()
assert count_active_rules(rs.id, db_session) == 1

View File

@@ -1,659 +0,0 @@
"""Tests for Jira connector enhancements: custom field extraction and attachment fetching."""
from types import SimpleNamespace
from typing import Any
from unittest.mock import MagicMock
from unittest.mock import patch
from jira import JIRA
from jira.resources import CustomFieldOption
from jira.resources import User
from onyx.connectors.jira.connector import _MAX_ATTACHMENT_SIZE_BYTES
from onyx.connectors.jira.connector import JiraConnector
from onyx.connectors.jira.connector import process_jira_issue
from onyx.connectors.jira.utils import CustomFieldExtractor
from onyx.connectors.models import ConnectorFailure
from onyx.connectors.models import Document
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
class _FieldsBag:
"""A plain object whose __dict__ is exactly what we put in it.
MagicMock pollutes __dict__ with internal bookkeeping, which breaks
CustomFieldExtractor.get_issue_custom_fields (it iterates __dict__).
This class gives us full control over the attribute namespace.
"""
def __init__(self, **kwargs: Any) -> None:
for k, v in kwargs.items():
object.__setattr__(self, k, v)
def _make_mock_issue(
key: str = "TEST-1",
summary: str = "Test Issue",
description: str = "Test description",
labels: list[str] | None = None,
extra_fields: dict[str, Any] | None = None,
attachments: list[Any] | None = None,
) -> MagicMock:
"""Build a mock Issue with standard fields wired up.
Uses _FieldsBag for ``issue.fields`` so that ``fields.__dict__``
contains only the Jira field attributes (no MagicMock internals).
"""
# Build sub-objects using SimpleNamespace so attribute access
# returns real values instead of auto-generated MagicMock objects.
reporter = SimpleNamespace(
displayName="Reporter Name",
emailAddress="reporter@example.com",
)
assignee = SimpleNamespace(
displayName="Assignee Name",
emailAddress="assignee@example.com",
)
priority = SimpleNamespace(name="High")
status = SimpleNamespace(name="Open")
project = SimpleNamespace(key="TEST", name="Test Project")
issuetype = SimpleNamespace(name="Bug")
comment = SimpleNamespace(comments=[])
field_kwargs: dict[str, Any] = {
"description": description,
"summary": summary,
"labels": labels or [],
"updated": "2024-01-01T00:00:00+0000",
"reporter": reporter,
"assignee": assignee,
"priority": priority,
"status": status,
"resolution": None,
"project": project,
"issuetype": issuetype,
"parent": None,
"created": "2024-01-01T00:00:00+0000",
"duedate": None,
"resolutiondate": None,
"comment": comment,
"attachment": attachments if attachments is not None else [],
}
if extra_fields:
field_kwargs.update(extra_fields)
fields = _FieldsBag(**field_kwargs)
# Use _FieldsBag for the issue itself too, then add the attributes
# that process_jira_issue needs. This prevents MagicMock from
# auto-creating attributes for field names like "reporter", which
# would shadow the real values on issue.fields.
issue = _FieldsBag(
fields=fields,
key=key,
raw={"fields": {"description": description}},
)
return issue # type: ignore[return-value]
def _make_attachment(
attachment_id: str = "att-1",
filename: str = "report.pdf",
size: int = 1024,
content_url: str | None = "https://jira.example.com/attachment/att-1",
mime_type: str = "application/pdf",
created: str | None = "2026-01-15T10:00:00.000+0000",
download_content: bytes = b"binary content",
download_raises: Exception | None = None,
) -> MagicMock:
"""Build a mock Jira attachment resource."""
att = MagicMock()
att.id = attachment_id
att.filename = filename
att.size = size
att.content = content_url
att.mimeType = mime_type
att.created = created
if download_raises:
att.get.side_effect = download_raises
else:
att.get.return_value = download_content
return att
# ===================================================================
# Test 1: Custom Field Extraction
# ===================================================================
class TestCustomFieldExtractorGetAllCustomFields:
def test_returns_only_custom_fields(self) -> None:
"""Given a mix of standard and custom fields, only custom fields are returned."""
mock_client = MagicMock(spec=JIRA)
mock_client.fields.return_value = [
{"id": "summary", "name": "Summary", "custom": False},
{"id": "customfield_10001", "name": "Sprint", "custom": True},
{"id": "status", "name": "Status", "custom": False},
{"id": "customfield_10002", "name": "Story Points", "custom": True},
]
result = CustomFieldExtractor.get_all_custom_fields(mock_client)
assert result == {
"customfield_10001": "Sprint",
"customfield_10002": "Story Points",
}
assert "summary" not in result
assert "status" not in result
def test_returns_empty_dict_when_no_custom_fields(self) -> None:
"""When no custom fields exist, an empty dict is returned."""
mock_client = MagicMock(spec=JIRA)
mock_client.fields.return_value = [
{"id": "summary", "name": "Summary", "custom": False},
]
result = CustomFieldExtractor.get_all_custom_fields(mock_client)
assert result == {}
class TestCustomFieldExtractorGetIssueCustomFields:
def test_string_value_extracted(self) -> None:
"""String custom field values pass through as-is."""
issue = _make_mock_issue(extra_fields={"customfield_10001": "v2024.1"})
mapping = {"customfield_10001": "Release Version"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Release Version": "v2024.1"}
def test_custom_field_option_value_extracted_as_string(self) -> None:
"""CustomFieldOption objects are converted via .value."""
option = MagicMock(spec=CustomFieldOption)
option.value = "Critical Path"
issue = _make_mock_issue(extra_fields={"customfield_10002": option})
mapping = {"customfield_10002": "Category"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Category": "Critical Path"}
def test_user_value_extracted_as_display_name(self) -> None:
"""User objects are converted via .displayName."""
user = MagicMock(spec=User)
user.displayName = "Alice Johnson"
issue = _make_mock_issue(extra_fields={"customfield_10003": user})
mapping = {"customfield_10003": "Reviewer"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Reviewer": "Alice Johnson"}
def test_list_value_extracted_as_space_joined_string(self) -> None:
"""Lists of values are space-joined after individual processing."""
opt1 = MagicMock(spec=CustomFieldOption)
opt1.value = "Backend"
opt2 = MagicMock(spec=CustomFieldOption)
opt2.value = "Frontend"
issue = _make_mock_issue(extra_fields={"customfield_10004": [opt1, opt2]})
mapping = {"customfield_10004": "Components"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Components": "Backend Frontend"}
def test_none_value_excluded(self) -> None:
"""None custom field values are excluded from the result."""
issue = _make_mock_issue(extra_fields={"customfield_10005": None})
mapping = {"customfield_10005": "Optional Field"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert "Optional Field" not in result
def test_value_exceeding_max_length_excluded(self) -> None:
"""Values longer than max_value_length are excluded."""
long_value = "x" * 300 # exceeds the default 250 limit
issue = _make_mock_issue(extra_fields={"customfield_10006": long_value})
mapping = {"customfield_10006": "Long Description"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert "Long Description" not in result
def test_value_at_exact_max_length_excluded(self) -> None:
"""Values at exactly max_value_length are excluded (< not <=)."""
exact_value = "x" * 250 # exactly 250, not < 250
issue = _make_mock_issue(extra_fields={"customfield_10007": exact_value})
mapping = {"customfield_10007": "Edge Case"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert "Edge Case" not in result
def test_value_just_under_max_length_included(self) -> None:
"""Values just under max_value_length are included."""
under_value = "x" * 249
issue = _make_mock_issue(extra_fields={"customfield_10008": under_value})
mapping = {"customfield_10008": "Just Under"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert result == {"Just Under": under_value}
def test_unmapped_custom_fields_ignored(self) -> None:
"""Custom fields not in the mapping dict are not included."""
issue = _make_mock_issue(
extra_fields={
"customfield_10001": "mapped_value",
"customfield_99999": "unmapped_value",
}
)
mapping = {"customfield_10001": "Mapped Field"}
result = CustomFieldExtractor.get_issue_custom_fields(issue, mapping)
assert "Mapped Field" in result
assert len(result) == 1
class TestProcessJiraIssueWithCustomFields:
def test_custom_fields_added_to_metadata(self) -> None:
"""When custom_fields_mapping is provided, custom fields appear in metadata."""
option = MagicMock(spec=CustomFieldOption)
option.value = "High Impact"
issue = _make_mock_issue(
extra_fields={
"customfield_10001": "Sprint 42",
"customfield_10002": option,
}
)
mapping = {
"customfield_10001": "Sprint",
"customfield_10002": "Impact Level",
}
doc = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
custom_fields_mapping=mapping,
)
assert doc is not None
assert doc.metadata["Sprint"] == "Sprint 42"
assert doc.metadata["Impact Level"] == "High Impact"
# Standard fields should still be present
assert doc.metadata["key"] == "TEST-1"
def test_no_custom_fields_when_mapping_is_none(self) -> None:
"""When custom_fields_mapping is None, no custom fields in metadata."""
issue = _make_mock_issue(
extra_fields={"customfield_10001": "should_not_appear"}
)
doc = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
custom_fields_mapping=None,
)
assert doc is not None
# The custom field name should not appear since we didn't provide a mapping
assert "customfield_10001" not in doc.metadata
def test_custom_field_extraction_failure_does_not_break_processing(self) -> None:
"""If custom field extraction raises, the document is still returned."""
issue = _make_mock_issue()
mapping = {"customfield_10001": "Broken Field"}
with patch.object(
CustomFieldExtractor,
"get_issue_custom_fields",
side_effect=RuntimeError("extraction failed"),
):
doc = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
custom_fields_mapping=mapping,
)
assert doc is not None
# The document should still have standard metadata
assert doc.metadata["key"] == "TEST-1"
# The broken custom field should not have leaked into metadata
assert "Broken Field" not in doc.metadata
# ===================================================================
# Test 2: Attachment Fetching
# ===================================================================
class TestProcessAttachments:
"""Tests for JiraConnector._process_attachments."""
def _make_connector(self, fetch_attachments: bool = True) -> JiraConnector:
"""Create a JiraConnector wired with a mock client."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
fetch_attachments=fetch_attachments,
)
# Don't use spec=JIRA because _process_attachments accesses
# the private _session attribute, which spec blocks.
mock_client = MagicMock()
mock_client._options = {"rest_api_version": "2"}
mock_client.client_info.return_value = "https://jira.example.com"
connector._jira_client = mock_client
return connector
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_happy_path_two_attachments(self, mock_extract: MagicMock) -> None:
"""Two normal attachments yield two Documents with correct structure."""
mock_extract.side_effect = ["Text from report", "Text from spec"]
att1 = _make_attachment(
attachment_id="att-1",
filename="report.pdf",
size=1024,
download_content=b"report bytes",
)
att2 = _make_attachment(
attachment_id="att-2",
filename="spec.docx",
size=2048,
content_url="https://jira.example.com/attachment/att-2",
mime_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
download_content=b"spec bytes",
)
issue = _make_mock_issue(key="TEST-42", attachments=[att1, att2])
connector = self._make_connector()
results = list(
connector._process_attachments(issue, parent_hierarchy_raw_node_id="TEST")
)
docs = [r for r in results if isinstance(r, Document)]
assert len(docs) == 2
# First attachment
assert docs[0].id == "https://jira.example.com/browse/TEST-42/attachments/att-1"
assert docs[0].title == "report.pdf"
assert docs[0].metadata["parent_ticket"] == "TEST-42"
assert docs[0].metadata["attachment_filename"] == "report.pdf"
assert docs[0].metadata["attachment_size"] == "1024"
assert docs[0].parent_hierarchy_raw_node_id == "TEST"
assert docs[0].sections[0].text == "Text from report"
# Second attachment
assert docs[1].id == "https://jira.example.com/browse/TEST-42/attachments/att-2"
assert docs[1].title == "spec.docx"
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_large_attachment_skipped(self, mock_extract: MagicMock) -> None:
"""Attachments exceeding 50 MB are skipped silently (only warning logged)."""
large_att = _make_attachment(
size=_MAX_ATTACHMENT_SIZE_BYTES + 1,
filename="huge.zip",
)
issue = _make_mock_issue(attachments=[large_att])
connector = self._make_connector()
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
mock_extract.assert_not_called()
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_no_content_url_skipped(self, mock_extract: MagicMock) -> None:
"""Attachments with no content URL are skipped gracefully."""
att = _make_attachment(content_url=None, filename="orphan.txt")
issue = _make_mock_issue(attachments=[att])
connector = self._make_connector()
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
mock_extract.assert_not_called()
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_download_failure_yields_connector_failure(
self, mock_extract: MagicMock
) -> None:
"""If the download raises, a ConnectorFailure is yielded; other attachments continue."""
att_bad = _make_attachment(
attachment_id="att-bad",
filename="broken.pdf",
content_url="https://jira.example.com/attachment/att-bad",
download_raises=ConnectionError("download failed"),
)
att_good = _make_attachment(
attachment_id="att-good",
filename="good.pdf",
content_url="https://jira.example.com/attachment/att-good",
download_content=b"good content",
)
issue = _make_mock_issue(attachments=[att_bad, att_good])
connector = self._make_connector()
mock_extract.return_value = "extracted good text"
results = list(connector._process_attachments(issue, None))
failures = [r for r in results if isinstance(r, ConnectorFailure)]
docs = [r for r in results if isinstance(r, Document)]
assert len(failures) == 1
assert "broken.pdf" in failures[0].failure_message
assert len(docs) == 1
assert docs[0].title == "good.pdf"
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_text_extraction_failure_skips_attachment(
self, mock_extract: MagicMock
) -> None:
"""If extract_file_text raises, the attachment is skipped (not a ConnectorFailure)."""
att = _make_attachment(
filename="bad_format.xyz", download_content=b"some bytes"
)
issue = _make_mock_issue(attachments=[att])
connector = self._make_connector()
mock_extract.side_effect = ValueError("Unsupported format")
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_empty_text_extraction_skips_attachment(
self, mock_extract: MagicMock
) -> None:
"""Attachments yielding empty text are skipped."""
att = _make_attachment(filename="empty.pdf", download_content=b"some bytes")
issue = _make_mock_issue(attachments=[att])
connector = self._make_connector()
mock_extract.return_value = ""
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_whitespace_only_text_skips_attachment(
self, mock_extract: MagicMock
) -> None:
"""Attachments yielding only whitespace are skipped."""
att = _make_attachment(filename="whitespace.txt", download_content=b" ")
issue = _make_mock_issue(attachments=[att])
connector = self._make_connector()
mock_extract.return_value = " \n\t "
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_no_attachments_on_issue(self, mock_extract: MagicMock) -> None:
"""When an issue has no attachments, nothing is yielded."""
issue = _make_mock_issue(attachments=[])
connector = self._make_connector()
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
mock_extract.assert_not_called()
@patch("onyx.connectors.jira.connector.extract_file_text")
def test_attachment_field_is_none(self, mock_extract: MagicMock) -> None:
"""When the attachment field is None (not set), nothing is yielded."""
issue = _make_mock_issue()
# Override attachment to be explicitly falsy (best_effort_get_field returns None)
issue.fields.attachment = None
issue.fields.__dict__["attachment"] = None
connector = self._make_connector()
results = list(connector._process_attachments(issue, None))
assert len(results) == 0
mock_extract.assert_not_called()
class TestFetchAttachmentsFlag:
"""Verify _process_attachments is only called when fetch_attachments=True."""
def test_fetch_attachments_false_skips_processing(self) -> None:
"""With fetch_attachments=False, _process_attachments should not be invoked
during the load_from_checkpoint flow."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
fetch_attachments=False,
)
assert connector.fetch_attachments is False
with patch.object(connector, "_process_attachments") as mock_process:
# Simulate what _load_from_checkpoint does: only call
# _process_attachments when self.fetch_attachments is True.
if connector.fetch_attachments:
connector._process_attachments(MagicMock(), None)
mock_process.assert_not_called()
# ===================================================================
# Test 3: Backwards Compatibility
# ===================================================================
class TestBackwardsCompatibility:
def test_default_config_has_flags_off(self) -> None:
"""JiraConnector defaults have both new feature flags disabled."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
)
assert connector.extract_custom_fields is False
assert connector.fetch_attachments is False
def test_default_config_has_empty_custom_fields_mapping(self) -> None:
"""Before load_credentials, the custom fields mapping is empty."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
)
assert connector._custom_fields_mapping == {}
def test_process_jira_issue_without_mapping_has_no_custom_fields(self) -> None:
"""Calling process_jira_issue without custom_fields_mapping produces
the same metadata as the pre-enhancement code."""
issue = _make_mock_issue(
key="COMPAT-1",
extra_fields={"customfield_10001": "should_be_ignored"},
)
doc = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
)
assert doc is not None
# Standard fields present
assert doc.metadata["key"] == "COMPAT-1"
assert doc.metadata["priority"] == "High"
assert doc.metadata["status"] == "Open"
# No custom field should leak through
for key in doc.metadata:
assert not key.startswith(
"customfield_"
), f"Custom field {key} leaked into metadata without mapping"
def test_process_jira_issue_default_params_match_old_signature(self) -> None:
"""process_jira_issue with only the required params works identically
to the pre-enhancement signature (jira_base_url + issue)."""
issue = _make_mock_issue()
doc_new = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
)
doc_explicit_none = process_jira_issue(
jira_base_url="https://jira.example.com",
issue=issue,
custom_fields_mapping=None,
)
assert doc_new is not None
assert doc_explicit_none is not None
assert doc_new.metadata == doc_explicit_none.metadata
assert doc_new.id == doc_explicit_none.id
def test_load_credentials_does_not_fetch_custom_fields_when_flag_off(self) -> None:
"""When extract_custom_fields=False, load_credentials does not call
get_all_custom_fields."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
extract_custom_fields=False,
)
with patch("onyx.connectors.jira.connector.build_jira_client") as mock_build:
mock_client = MagicMock(spec=JIRA)
mock_build.return_value = mock_client
connector.load_credentials({"jira_api_token": "tok"})
mock_client.fields.assert_not_called()
assert connector._custom_fields_mapping == {}
def test_load_credentials_fetches_custom_fields_when_flag_on(self) -> None:
"""When extract_custom_fields=True, load_credentials populates the mapping."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
extract_custom_fields=True,
)
with patch("onyx.connectors.jira.connector.build_jira_client") as mock_build:
mock_client = MagicMock(spec=JIRA)
mock_client.fields.return_value = [
{"id": "summary", "name": "Summary", "custom": False},
{"id": "customfield_10001", "name": "Sprint", "custom": True},
]
mock_build.return_value = mock_client
connector.load_credentials({"jira_api_token": "tok"})
assert connector._custom_fields_mapping == {"customfield_10001": "Sprint"}
def test_load_credentials_handles_custom_fields_fetch_failure(self) -> None:
"""If get_all_custom_fields raises, the mapping stays empty and no exception propagates."""
connector = JiraConnector(
jira_base_url="https://jira.example.com",
project_key="TEST",
extract_custom_fields=True,
)
with patch("onyx.connectors.jira.connector.build_jira_client") as mock_build:
mock_client = MagicMock(spec=JIRA)
mock_client.fields.side_effect = RuntimeError("API unavailable")
mock_build.return_value = mock_client
# Should not raise
connector.load_credentials({"jira_api_token": "tok"})
assert connector._custom_fields_mapping == {}

View File

@@ -2,6 +2,7 @@
from __future__ import annotations
from typing import Any
from unittest.mock import MagicMock
from unittest.mock import patch
from uuid import uuid4
@@ -9,7 +10,9 @@ from uuid import uuid4
from fastapi import Response
from sqlalchemy.exc import IntegrityError
from ee.onyx.server.scim.api import _check_seat_availability
from ee.onyx.server.scim.api import _scim_name_to_str
from ee.onyx.server.scim.api import _seat_lock_id_for_tenant
from ee.onyx.server.scim.api import create_user
from ee.onyx.server.scim.api import delete_user
from ee.onyx.server.scim.api import get_user
@@ -741,3 +744,80 @@ class TestEmailCasePreservation:
resource = parse_scim_user(result)
assert resource.userName == "Alice@Example.COM"
assert resource.emails[0].value == "Alice@Example.COM"
class TestSeatLock:
"""Tests for the advisory lock in _check_seat_availability."""
@patch("ee.onyx.server.scim.api.get_current_tenant_id", return_value="tenant_abc")
def test_acquires_advisory_lock_before_checking(
self,
_mock_tenant: MagicMock,
mock_dal: MagicMock,
) -> None:
"""The advisory lock must be acquired before the seat check runs."""
call_order: list[str] = []
def track_execute(stmt: Any, _params: Any = None) -> None:
if "pg_advisory_xact_lock" in str(stmt):
call_order.append("lock")
mock_dal.session.execute.side_effect = track_execute
with patch(
"ee.onyx.server.scim.api.fetch_ee_implementation_or_noop"
) as mock_fetch:
mock_result = MagicMock()
mock_result.available = True
mock_fn = MagicMock(return_value=mock_result)
mock_fetch.return_value = mock_fn
def track_check(*_args: Any, **_kwargs: Any) -> Any:
call_order.append("check")
return mock_result
mock_fn.side_effect = track_check
_check_seat_availability(mock_dal)
assert call_order == ["lock", "check"]
@patch("ee.onyx.server.scim.api.get_current_tenant_id", return_value="tenant_xyz")
def test_lock_uses_tenant_scoped_key(
self,
_mock_tenant: MagicMock,
mock_dal: MagicMock,
) -> None:
"""The lock id must be derived from the tenant via _seat_lock_id_for_tenant."""
mock_result = MagicMock()
mock_result.available = True
mock_check = MagicMock(return_value=mock_result)
with patch(
"ee.onyx.server.scim.api.fetch_ee_implementation_or_noop",
return_value=mock_check,
):
_check_seat_availability(mock_dal)
mock_dal.session.execute.assert_called_once()
params = mock_dal.session.execute.call_args[0][1]
assert params["lock_id"] == _seat_lock_id_for_tenant("tenant_xyz")
def test_seat_lock_id_is_stable_and_tenant_scoped(self) -> None:
"""Lock id must be deterministic and differ across tenants."""
assert _seat_lock_id_for_tenant("t1") == _seat_lock_id_for_tenant("t1")
assert _seat_lock_id_for_tenant("t1") != _seat_lock_id_for_tenant("t2")
def test_no_lock_when_ee_absent(
self,
mock_dal: MagicMock,
) -> None:
"""No advisory lock should be acquired when the EE check is absent."""
with patch(
"ee.onyx.server.scim.api.fetch_ee_implementation_or_noop",
return_value=None,
):
result = _check_seat_availability(mock_dal)
assert result is None
mock_dal.session.execute.assert_not_called()

View File

@@ -1,73 +0,0 @@
"""Shared fixtures for proposal review engine unit tests."""
import json
from unittest.mock import MagicMock
from uuid import UUID
from uuid import uuid4
import pytest
# ---------------------------------------------------------------------------
# Lightweight stand-in for ProposalContext (avoids importing the real one,
# which pulls in SQLAlchemy models that are irrelevant to pure-logic tests).
# The real dataclass lives in context_assembler.py; we import it directly
# where needed but provide a builder here for convenience.
# ---------------------------------------------------------------------------
@pytest.fixture
def make_proposal_context():
"""Factory fixture that builds a ProposalContext with sensible defaults."""
from onyx.server.features.proposal_review.engine.context_assembler import (
ProposalContext,
)
def _make(
proposal_text: str = "Default proposal text.",
budget_text: str = "",
foa_text: str = "",
metadata: dict | None = None,
jira_key: str = "PROJ-100",
) -> "ProposalContext":
return ProposalContext(
proposal_text=proposal_text,
budget_text=budget_text,
foa_text=foa_text,
metadata=metadata or {},
jira_key=jira_key,
)
return _make
@pytest.fixture
def make_rule():
"""Factory fixture that builds a minimal mock ProposalReviewRule."""
def _make(
name: str = "Test Rule",
prompt_template: str = "Evaluate: {{proposal_text}}",
rule_id: UUID | None = None,
) -> MagicMock:
rule = MagicMock()
rule.id = rule_id or uuid4()
rule.name = name
rule.prompt_template = prompt_template
return rule
return _make
@pytest.fixture
def well_formed_llm_json() -> str:
"""A valid JSON string matching the expected rule-evaluator response schema."""
return json.dumps(
{
"verdict": "PASS",
"confidence": "HIGH",
"evidence": "Section 4.2 states the budget is $500k.",
"explanation": "The proposal meets the budget cap requirement.",
"suggested_action": None,
}
)

View File

@@ -1,294 +0,0 @@
"""Unit tests for the checklist importer engine component.
Tests cover:
- _parse_import_response: JSON array parsing and validation
- _validate_rule: field validation, type normalization, missing fields
- Compound decomposition (multiple rules sharing a category)
- Refinement detection (refinement_needed / refinement_question)
- Malformed response handling (invalid JSON, non-array)
- import_checklist: empty-input guard and LLM error propagation
"""
import json
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from onyx.server.features.proposal_review.engine.checklist_importer import (
_parse_import_response,
)
from onyx.server.features.proposal_review.engine.checklist_importer import (
_validate_rule,
)
from onyx.server.features.proposal_review.engine.checklist_importer import (
import_checklist,
)
# =====================================================================
# _validate_rule -- single rule validation
# =====================================================================
class TestValidateRule:
"""Tests for _validate_rule (field validation and normalization)."""
def test_valid_rule_passes(self):
raw = {
"name": "Check budget cap",
"description": "Ensures budget is under $500k",
"category": "IR-2: Budget",
"rule_type": "DOCUMENT_CHECK",
"rule_intent": "CHECK",
"prompt_template": "Review {{budget_text}} for compliance.",
"refinement_needed": False,
"refinement_question": None,
}
result = _validate_rule(raw, 0)
assert result is not None
assert result["name"] == "Check budget cap"
assert result["rule_type"] == "DOCUMENT_CHECK"
assert result["rule_intent"] == "CHECK"
assert result["refinement_needed"] is False
def test_missing_name_returns_none(self):
raw = {"prompt_template": "something"}
assert _validate_rule(raw, 0) is None
def test_missing_prompt_template_returns_none(self):
raw = {"name": "A rule"}
assert _validate_rule(raw, 0) is None
def test_invalid_rule_type_defaults_to_custom_nl(self):
raw = {
"name": "Test",
"prompt_template": "t",
"rule_type": "INVALID_TYPE",
}
result = _validate_rule(raw, 0)
assert result["rule_type"] == "CUSTOM_NL"
def test_invalid_rule_intent_defaults_to_check(self):
raw = {
"name": "Test",
"prompt_template": "t",
"rule_intent": "NOTIFY",
}
result = _validate_rule(raw, 0)
assert result["rule_intent"] == "CHECK"
def test_missing_rule_type_defaults_to_custom_nl(self):
raw = {"name": "Test", "prompt_template": "t"}
result = _validate_rule(raw, 0)
assert result["rule_type"] == "CUSTOM_NL"
def test_missing_rule_intent_defaults_to_check(self):
raw = {"name": "Test", "prompt_template": "t"}
result = _validate_rule(raw, 0)
assert result["rule_intent"] == "CHECK"
def test_name_truncated_to_200_chars(self):
raw = {"name": "x" * 300, "prompt_template": "t"}
result = _validate_rule(raw, 0)
assert len(result["name"]) == 200
def test_refinement_needed_truthy_values(self):
raw = {
"name": "Test",
"prompt_template": "t",
"refinement_needed": True,
"refinement_question": "What is the IDC rate?",
}
result = _validate_rule(raw, 0)
assert result["refinement_needed"] is True
assert result["refinement_question"] == "What is the IDC rate?"
def test_refinement_needed_defaults_false(self):
raw = {"name": "Test", "prompt_template": "t"}
result = _validate_rule(raw, 0)
assert result["refinement_needed"] is False
assert result["refinement_question"] is None
@pytest.mark.parametrize(
"rule_type",
["DOCUMENT_CHECK", "METADATA_CHECK", "CROSS_REFERENCE", "CUSTOM_NL"],
)
def test_all_valid_rule_types_accepted(self, rule_type):
raw = {"name": "Test", "prompt_template": "t", "rule_type": rule_type}
result = _validate_rule(raw, 0)
assert result["rule_type"] == rule_type
@pytest.mark.parametrize("intent", ["CHECK", "HIGHLIGHT"])
def test_all_valid_intents_accepted(self, intent):
raw = {"name": "Test", "prompt_template": "t", "rule_intent": intent}
result = _validate_rule(raw, 0)
assert result["rule_intent"] == intent
# =====================================================================
# _parse_import_response -- full array parsing
# =====================================================================
class TestParseImportResponse:
"""Tests for _parse_import_response (JSON array parsing + validation)."""
def test_parses_valid_array(self):
rules_json = json.dumps(
[
{
"name": "Rule A",
"description": "Checks A",
"category": "Cat-1",
"rule_type": "DOCUMENT_CHECK",
"rule_intent": "CHECK",
"prompt_template": "Check {{proposal_text}}",
"refinement_needed": False,
"refinement_question": None,
},
{
"name": "Rule B",
"description": "Checks B",
"category": "Cat-1",
"rule_type": "METADATA_CHECK",
"rule_intent": "HIGHLIGHT",
"prompt_template": "Check {{metadata.sponsor}}",
"refinement_needed": False,
"refinement_question": None,
},
]
)
result = _parse_import_response(rules_json)
assert len(result) == 2
assert result[0]["name"] == "Rule A"
assert result[1]["name"] == "Rule B"
def test_strips_markdown_code_fences(self):
inner = json.dumps([{"name": "R", "prompt_template": "p"}])
raw = f"```json\n{inner}\n```"
result = _parse_import_response(raw)
assert len(result) == 1
assert result[0]["name"] == "R"
def test_invalid_json_raises_runtime_error(self):
with pytest.raises(RuntimeError, match="invalid JSON"):
_parse_import_response("not valid json [")
def test_non_array_json_raises_runtime_error(self):
with pytest.raises(RuntimeError, match="non-array JSON"):
_parse_import_response('{"name": "single rule"}')
def test_skips_non_dict_entries(self):
raw = json.dumps(
[
{"name": "Valid", "prompt_template": "p"},
"this is a string, not a dict",
42,
]
)
result = _parse_import_response(raw)
assert len(result) == 1
assert result[0]["name"] == "Valid"
def test_skips_rules_missing_required_fields(self):
raw = json.dumps(
[
{"name": "Valid", "prompt_template": "p"},
{"description": "no name or template"},
]
)
result = _parse_import_response(raw)
assert len(result) == 1
def test_compound_decomposition_shared_category(self):
"""Multiple rules from the same checklist item share a category."""
rules_json = json.dumps(
[
{
"name": "Budget under 500k",
"category": "IR-3: Budget Compliance",
"prompt_template": "Check if budget < 500k using {{budget_text}}",
},
{
"name": "Budget justification present",
"category": "IR-3: Budget Compliance",
"prompt_template": "Check for budget justification in {{proposal_text}}",
},
{
"name": "Indirect costs correct",
"category": "IR-3: Budget Compliance",
"prompt_template": "Verify IDC rates in {{budget_text}}",
"refinement_needed": True,
"refinement_question": "What is the negotiated IDC rate?",
},
]
)
result = _parse_import_response(rules_json)
assert len(result) == 3
# All share the same category
categories = {r["category"] for r in result}
assert categories == {"IR-3: Budget Compliance"}
def test_refinement_preserved_in_output(self):
raw = json.dumps(
[
{
"name": "IDC Rate Check",
"prompt_template": "Verify {{INSTITUTION_IDC_RATES}} against {{budget_text}}",
"refinement_needed": True,
"refinement_question": "What are your institution's IDC rates?",
}
]
)
result = _parse_import_response(raw)
assert len(result) == 1
assert result[0]["refinement_needed"] is True
assert "IDC rates" in result[0]["refinement_question"]
# =====================================================================
# import_checklist -- top-level function
# =====================================================================
class TestImportChecklist:
"""Tests for the top-level import_checklist function."""
def test_empty_text_returns_empty_list(self):
assert import_checklist("") == []
assert import_checklist(" ") == []
def test_none_text_returns_empty_list(self):
# The function checks `not extracted_text`, which is True for None
assert import_checklist(None) == [] # type: ignore[arg-type]
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.get_default_llm"
)
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.llm_response_to_string"
)
def test_successful_import(self, mock_to_string, mock_get_llm):
rules_json = json.dumps(
[
{"name": "Rule 1", "prompt_template": "Check {{proposal_text}}"},
]
)
mock_to_string.return_value = rules_json
mock_llm = MagicMock()
mock_get_llm.return_value = mock_llm
result = import_checklist("Some checklist content here.")
assert len(result) == 1
assert result[0]["name"] == "Rule 1"
mock_llm.invoke.assert_called_once()
@patch(
"onyx.server.features.proposal_review.engine.checklist_importer.get_default_llm"
)
def test_llm_failure_raises_runtime_error(self, mock_get_llm):
mock_get_llm.side_effect = RuntimeError("No API key")
with pytest.raises(RuntimeError, match="Failed to parse checklist"):
import_checklist("Some checklist content.")

View File

@@ -1,462 +0,0 @@
"""Unit tests for the context assembler engine component.
Tests cover:
- get_proposal_context: full assembly with mocked DB queries
- Budget detection by document role and filename
- FOA detection by document role
- Multiple document concatenation
- Missing documents / missing proposal handling
- _is_budget_filename helper
- _build_parent_document_text helper
- _classify_child_text helper
"""
from unittest.mock import MagicMock
from uuid import uuid4
import pytest
from onyx.server.features.proposal_review.engine.context_assembler import (
_build_parent_document_text,
)
from onyx.server.features.proposal_review.engine.context_assembler import (
_classify_child_text,
)
from onyx.server.features.proposal_review.engine.context_assembler import (
_is_budget_filename,
)
from onyx.server.features.proposal_review.engine.context_assembler import (
get_proposal_context,
)
from onyx.server.features.proposal_review.engine.context_assembler import (
ProposalContext,
)
# =====================================================================
# _is_budget_filename
# =====================================================================
class TestIsBudgetFilename:
"""Tests for _is_budget_filename helper."""
@pytest.mark.parametrize(
"filename",
[
"budget.xlsx",
"BUDGET_justification.pdf",
"project_budget_v2.docx",
"cost_estimate.xlsx",
"financial_plan.pdf",
"annual_expenditure.csv",
],
)
def test_budget_filenames_detected(self, filename):
assert _is_budget_filename(filename) is True
@pytest.mark.parametrize(
"filename",
[
"narrative.pdf",
"abstract.docx",
"biosketch.pdf",
"facilities.docx",
"",
],
)
def test_non_budget_filenames_not_detected(self, filename):
assert _is_budget_filename(filename) is False
def test_none_filename_returns_false(self):
assert _is_budget_filename(None) is False # type: ignore[arg-type]
# =====================================================================
# _build_parent_document_text
# =====================================================================
class TestBuildParentDocumentText:
"""Tests for _build_parent_document_text helper."""
def test_includes_semantic_id(self):
doc = MagicMock()
doc.semantic_id = "PROJ-42"
doc.link = None
doc.doc_metadata = None
result = _build_parent_document_text(doc)
assert "PROJ-42" in result
def test_includes_link(self):
doc = MagicMock()
doc.semantic_id = None
doc.link = "https://jira.example.com/PROJ-42"
doc.doc_metadata = None
result = _build_parent_document_text(doc)
assert "https://jira.example.com/PROJ-42" in result
def test_includes_metadata_as_json(self):
doc = MagicMock()
doc.semantic_id = "PROJ-42"
doc.link = None
doc.doc_metadata = {"sponsor": "NIH", "pi": "Dr. Smith"}
result = _build_parent_document_text(doc)
assert "NIH" in result
assert "Dr. Smith" in result
def test_empty_document_returns_minimal_text(self):
doc = MagicMock()
doc.semantic_id = None
doc.link = None
doc.doc_metadata = None
doc.primary_owners = None
doc.secondary_owners = None
result = _build_parent_document_text(doc)
# With no content at all, the result should be empty or only contain
# structural headers. Verify it doesn't contain any meaningful data.
assert "NIH" not in result
assert "Dr. Smith" not in result
# =====================================================================
# _classify_child_text
# =====================================================================
class TestClassifyChildText:
"""Tests for _classify_child_text helper."""
def test_budget_filename_classified(self):
doc = MagicMock()
doc.semantic_id = "PROJ-42/attachments/budget_v2.xlsx"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "budget content", budget_parts, foa_parts)
assert "budget content" in budget_parts
assert foa_parts == []
def test_foa_filename_classified(self):
doc = MagicMock()
doc.semantic_id = "PROJ-42/attachments/foa_document.pdf"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "foa content", budget_parts, foa_parts)
assert foa_parts == ["foa content"]
assert budget_parts == []
def test_solicitation_keyword_classified_as_foa(self):
doc = MagicMock()
doc.semantic_id = "solicitation_2024.pdf"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "solicitation text", budget_parts, foa_parts)
assert "solicitation text" in foa_parts
def test_rfa_keyword_classified_as_foa(self):
doc = MagicMock()
doc.semantic_id = "RFA-AI-24-001.pdf"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "rfa text", budget_parts, foa_parts)
assert "rfa text" in foa_parts
def test_unrelated_filename_not_classified(self):
doc = MagicMock()
doc.semantic_id = "narrative_v3.docx"
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "narrative text", budget_parts, foa_parts)
assert budget_parts == []
assert foa_parts == []
def test_none_semantic_id_not_classified(self):
doc = MagicMock()
doc.semantic_id = None
budget_parts: list[str] = []
foa_parts: list[str] = []
_classify_child_text(doc, "some text", budget_parts, foa_parts)
assert budget_parts == []
assert foa_parts == []
# =====================================================================
# get_proposal_context -- full assembly with mocked DB
# =====================================================================
def _make_mock_proposal(document_id="DOC-123"):
"""Create a mock ProposalReviewProposal."""
proposal = MagicMock()
proposal.id = uuid4()
proposal.document_id = document_id
return proposal
def _make_mock_document(
doc_id="DOC-123",
semantic_id="PROJ-42",
link=None,
doc_metadata=None,
):
"""Create a mock Document."""
doc = MagicMock()
doc.id = doc_id
doc.semantic_id = semantic_id
doc.link = link
doc.doc_metadata = doc_metadata or {}
return doc
def _make_mock_review_doc(
file_name="doc.pdf",
document_role="SUPPORTING",
extracted_text="Some text.",
):
"""Create a mock ProposalReviewDocument."""
doc = MagicMock()
doc.file_name = file_name
doc.document_role = document_role
doc.extracted_text = extracted_text
return doc
class TestGetProposalContext:
"""Tests for get_proposal_context with mocked DB session."""
def _setup_db(
self,
proposal=None,
parent_doc=None,
child_docs=None,
manual_docs=None,
):
"""Build a mock db_session with controlled query results.
The function under test does three queries:
1. ProposalReviewProposal by id
2. Document by id (parent doc)
3. Document.id.like(...) (child docs)
4. ProposalReviewDocument by proposal_id (manual docs)
We use side_effect on db_session.query() to differentiate them.
"""
db = MagicMock()
# We need to handle multiple .query() calls with different model classes.
# The function calls:
# db_session.query(ProposalReviewProposal).filter(...).one_or_none()
# db_session.query(Document).filter(...).one_or_none()
# db_session.query(Document).filter(..., ...).all()
# db_session.query(ProposalReviewDocument).filter(...).order_by(...).all()
call_count = {"n": 0}
def query_side_effect(model_cls):
call_count["n"] += 1
mock_query = MagicMock()
model_name = getattr(model_cls, "__name__", str(model_cls))
if model_name == "ProposalReviewProposal":
mock_query.filter.return_value = mock_query
mock_query.one_or_none.return_value = proposal
return mock_query
if model_name == "Document":
# First Document query is for parent (one_or_none),
# second is for children (all).
# We track via a sub-counter.
if not hasattr(query_side_effect, "_doc_calls"):
query_side_effect._doc_calls = 0
query_side_effect._doc_calls += 1
if query_side_effect._doc_calls == 1:
# Parent doc query
mock_query.filter.return_value = mock_query
mock_query.one_or_none.return_value = parent_doc
else:
# Child docs query
mock_query.filter.return_value = mock_query
mock_query.all.return_value = child_docs or []
return mock_query
if model_name == "ProposalReviewDocument":
mock_query.filter.return_value = mock_query
mock_query.order_by.return_value = mock_query
mock_query.all.return_value = manual_docs or []
return mock_query
return mock_query
# Reset the doc_calls counter if it exists from a previous test
if hasattr(query_side_effect, "_doc_calls"):
del query_side_effect._doc_calls
db.query.side_effect = query_side_effect
return db
def test_basic_assembly_with_parent_doc(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document(
semantic_id="PROJ-42",
doc_metadata={"sponsor": "NIH", "pi": "Dr. Smith"},
)
db = self._setup_db(proposal=proposal, parent_doc=parent_doc)
ctx = get_proposal_context(proposal.id, db)
assert isinstance(ctx, ProposalContext)
assert ctx.jira_key == "PROJ-42"
assert ctx.metadata["sponsor"] == "NIH"
assert "PROJ-42" in ctx.proposal_text
def test_proposal_not_found_returns_empty_context(self):
db = self._setup_db(proposal=None)
ctx = get_proposal_context(uuid4(), db)
# When proposal is not found, returns a safe empty context
assert isinstance(ctx, ProposalContext)
assert ctx.proposal_text == ""
assert ctx.metadata == {}
def test_budget_document_by_role(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document()
budget_doc = _make_mock_review_doc(
file_name="project_budget.xlsx",
document_role="BUDGET",
extracted_text="Total: $500k direct costs.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[budget_doc],
)
ctx = get_proposal_context(proposal.id, db)
assert "$500k" in ctx.budget_text
# Budget text should also appear in proposal_text (all docs)
assert "$500k" in ctx.proposal_text
def test_budget_document_by_filename(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document()
budget_doc = _make_mock_review_doc(
file_name="budget_justification.pdf",
document_role="SUPPORTING", # role is not BUDGET
extracted_text="Budget justification: $200k.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[budget_doc],
)
ctx = get_proposal_context(proposal.id, db)
assert "$200k" in ctx.budget_text
def test_foa_document_by_role(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document()
foa_doc = _make_mock_review_doc(
file_name="rfa-ai-24-001.html",
document_role="FOA",
extracted_text="This is the funding opportunity announcement.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[foa_doc],
)
ctx = get_proposal_context(proposal.id, db)
assert "funding opportunity announcement" in ctx.foa_text
def test_multiple_documents_concatenated(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document(semantic_id="PROJ-42")
doc_a = _make_mock_review_doc(
file_name="narrative.pdf",
document_role="SUPPORTING",
extracted_text="Section A content.",
)
doc_b = _make_mock_review_doc(
file_name="abstract.pdf",
document_role="SUPPORTING",
extracted_text="Section B content.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[doc_a, doc_b],
)
ctx = get_proposal_context(proposal.id, db)
assert "Section A content" in ctx.proposal_text
assert "Section B content" in ctx.proposal_text
def test_no_documents_returns_minimal_text(self):
proposal = _make_mock_proposal()
# Parent doc exists but has no meaningful content fields
parent_doc = _make_mock_document(
semantic_id=None,
link=None,
doc_metadata=None,
)
parent_doc.primary_owners = None
parent_doc.secondary_owners = None
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[],
)
ctx = get_proposal_context(proposal.id, db)
# No meaningful content — may contain structural headers but no real data
assert "NIH" not in ctx.proposal_text
assert ctx.budget_text == ""
assert ctx.foa_text == ""
def test_no_parent_doc_still_returns_context(self):
proposal = _make_mock_proposal()
manual_doc = _make_mock_review_doc(
file_name="narrative.pdf",
document_role="SUPPORTING",
extracted_text="Manual upload content.",
)
db = self._setup_db(
proposal=proposal,
parent_doc=None,
manual_docs=[manual_doc],
)
ctx = get_proposal_context(proposal.id, db)
assert "Manual upload content" in ctx.proposal_text
assert ctx.jira_key == ""
assert ctx.metadata == {}
def test_manual_doc_with_no_text_is_skipped(self):
proposal = _make_mock_proposal()
parent_doc = _make_mock_document()
empty_doc = _make_mock_review_doc(
file_name="empty.pdf",
document_role="SUPPORTING",
extracted_text=None,
)
db = self._setup_db(
proposal=proposal,
parent_doc=parent_doc,
manual_docs=[empty_doc],
)
ctx = get_proposal_context(proposal.id, db)
# The empty doc should not contribute to proposal_text
assert "empty.pdf" not in ctx.proposal_text

View File

@@ -1,227 +0,0 @@
"""Unit tests for the FOA fetcher engine component.
Tests cover:
- _determine_domain: opportunity ID prefix -> agency domain mapping
- fetch_foa: search flow with mocked web search provider and crawler
- Graceful failure when no web search provider is configured
- Empty / missing opportunity ID handling
"""
from unittest.mock import MagicMock
from unittest.mock import patch
from uuid import uuid4
import pytest
from onyx.server.features.proposal_review.engine.foa_fetcher import _determine_domain
from onyx.server.features.proposal_review.engine.foa_fetcher import fetch_foa
# =====================================================================
# _determine_domain -- prefix -> domain mapping
# =====================================================================
class TestDetermineDomain:
"""Tests for _determine_domain (opportunity ID prefix detection)."""
@pytest.mark.parametrize(
"opp_id, expected_domain",
[
("RFA-AI-24-001", "grants.nih.gov"),
("PA-24-123", "grants.nih.gov"),
("PAR-24-100", "grants.nih.gov"),
("R01-AI-12345", "grants.nih.gov"),
("R21-GM-67890", "grants.nih.gov"),
("U01-CA-11111", "grants.nih.gov"),
("NOT-OD-24-100", "grants.nih.gov"),
("NSF-24-567", "nsf.gov"),
("DE-FOA-0003000", "energy.gov"),
("HRSA-25-001", "hrsa.gov"),
("W911NF-24-R-0001", "grants.gov"),
("FA8750-24-S-0001", "grants.gov"),
("N00014-24-S-0001", "grants.gov"),
("NOFO-2024-001", "grants.gov"),
],
)
def test_known_prefixes(self, opp_id, expected_domain):
assert _determine_domain(opp_id) == expected_domain
def test_unknown_prefix_returns_none(self):
assert _determine_domain("UNKNOWN-123") is None
def test_purely_numeric_id_returns_grants_gov(self):
assert _determine_domain("12345-67890") == "grants.gov"
def test_case_insensitive_matching(self):
assert _determine_domain("rfa-ai-24-001") == "grants.nih.gov"
assert _determine_domain("nsf-24-567") == "nsf.gov"
def test_empty_string_returns_none(self):
# Empty string is not purely numeric after dash removal, so returns None
assert _determine_domain("") is None
# =====================================================================
# fetch_foa -- search flow
# =====================================================================
class TestFetchFoa:
"""Tests for fetch_foa with mocked dependencies."""
def _mock_db_session(self, existing_foa=None):
"""Build a mock db_session that returns existing_foa for the FOA query."""
db_session = MagicMock()
query_mock = MagicMock()
db_session.query.return_value = query_mock
query_mock.filter.return_value = query_mock
query_mock.first.return_value = existing_foa
return db_session
def test_empty_opportunity_id_returns_none(self):
db = MagicMock()
assert fetch_foa("", uuid4(), db) is None
assert fetch_foa(" ", uuid4(), db) is None
def test_none_opportunity_id_returns_none(self):
db = MagicMock()
assert fetch_foa(None, uuid4(), db) is None # type: ignore[arg-type]
def test_existing_foa_is_returned_without_search(self):
existing = MagicMock()
existing.extracted_text = "Previously fetched FOA content."
db = self._mock_db_session(existing_foa=existing)
result = fetch_foa("RFA-AI-24-001", uuid4(), db)
assert result == "Previously fetched FOA content."
def test_search_flow_fetches_and_saves(self):
"""Full happy-path: search returns a URL, crawler fetches content, doc is saved."""
# Setup: no existing FOA
db = self._mock_db_session(existing_foa=None)
# Mock the web search provider
search_result = MagicMock()
search_result.link = "https://grants.nih.gov/foa/RFA-AI-24-001"
provider = MagicMock()
provider.search.return_value = [search_result]
# Mock the crawler
content = MagicMock()
content.scrape_successful = True
content.full_content = "Full FOA text from NIH."
crawler_instance = MagicMock()
crawler_instance.contents.return_value = [content]
# The function does lazy imports, so we patch at the module level
# where the imports happen
import_target_provider = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
import_target_crawler = (
"onyx.tools.tool_implementations.open_url.onyx_web_crawler.OnyxWebCrawler"
)
with (
patch(import_target_provider, return_value=provider),
patch(import_target_crawler, return_value=crawler_instance),
):
result = fetch_foa("RFA-AI-24-001", uuid4(), db)
assert result == "Full FOA text from NIH."
db.add.assert_called_once()
db.flush.assert_called_once()
def test_no_provider_configured_returns_none(self):
"""If get_default_provider raises or returns None, fetch_foa returns None."""
db = self._mock_db_session(existing_foa=None)
import_target = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
with patch(import_target, return_value=None):
result = fetch_foa("RFA-AI-24-001", uuid4(), db)
assert result is None
def test_provider_import_failure_returns_none(self):
"""If the web search provider module can't be imported, returns None."""
db = self._mock_db_session(existing_foa=None)
import_target = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
with patch(import_target, side_effect=ImportError("module not found")):
result = fetch_foa("RFA-AI-24-001", uuid4(), db)
assert result is None
def test_search_returns_no_results(self):
"""If the search returns an empty list, fetch_foa returns None."""
db = self._mock_db_session(existing_foa=None)
provider = MagicMock()
provider.search.return_value = []
import_target = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
with patch(import_target, return_value=provider):
result = fetch_foa("NSF-24-567", uuid4(), db)
assert result is None
def test_crawler_failure_returns_none(self):
"""If the crawler raises an exception, fetch_foa returns None."""
db = self._mock_db_session(existing_foa=None)
search_result = MagicMock()
search_result.link = "https://nsf.gov/foa/24-567"
provider = MagicMock()
provider.search.return_value = [search_result]
import_target_provider = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
import_target_crawler = (
"onyx.tools.tool_implementations.open_url.onyx_web_crawler.OnyxWebCrawler"
)
with (
patch(import_target_provider, return_value=provider),
patch(import_target_crawler, side_effect=Exception("crawl failed")),
):
result = fetch_foa("NSF-24-567", uuid4(), db)
assert result is None
def test_scrape_unsuccessful_returns_none(self):
"""If the crawler succeeds but scrape_successful is False, returns None."""
db = self._mock_db_session(existing_foa=None)
search_result = MagicMock()
search_result.link = "https://nsf.gov/foa/24-567"
provider = MagicMock()
provider.search.return_value = [search_result]
content = MagicMock()
content.scrape_successful = False
content.full_content = None
crawler_instance = MagicMock()
crawler_instance.contents.return_value = [content]
import_target_provider = (
"onyx.tools.tool_implementations.web_search.providers.get_default_provider"
)
import_target_crawler = (
"onyx.tools.tool_implementations.open_url.onyx_web_crawler.OnyxWebCrawler"
)
with (
patch(import_target_provider, return_value=provider),
patch(import_target_crawler, return_value=crawler_instance),
):
result = fetch_foa("NSF-24-567", uuid4(), db)
assert result is None

View File

@@ -1,354 +0,0 @@
"""Unit tests for the rule evaluator engine component.
Tests cover:
- Template variable substitution (_fill_template)
- LLM response parsing (_parse_llm_response)
- Malformed / missing-field responses
- Markdown code fence stripping
- Verdict and confidence validation/normalization
- Token usage extraction
"""
import json
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from onyx.server.features.proposal_review.engine.rule_evaluator import (
_extract_token_usage,
)
from onyx.server.features.proposal_review.engine.rule_evaluator import _fill_template
from onyx.server.features.proposal_review.engine.rule_evaluator import (
_parse_llm_response,
)
from onyx.server.features.proposal_review.engine.rule_evaluator import evaluate_rule
# =====================================================================
# _fill_template -- variable substitution
# =====================================================================
class TestFillTemplate:
"""Tests for _fill_template (prompt variable substitution)."""
def test_replaces_proposal_text(self, make_proposal_context):
ctx = make_proposal_context(proposal_text="My great proposal.")
result = _fill_template("Review: {{proposal_text}}", ctx)
assert result == "Review: My great proposal."
def test_replaces_budget_text(self, make_proposal_context):
ctx = make_proposal_context(budget_text="$100k total")
result = _fill_template("Budget info: {{budget_text}}", ctx)
assert result == "Budget info: $100k total"
def test_replaces_foa_text(self, make_proposal_context):
ctx = make_proposal_context(foa_text="NSF solicitation 24-567")
result = _fill_template("FOA: {{foa_text}}", ctx)
assert result == "FOA: NSF solicitation 24-567"
def test_replaces_jira_key(self, make_proposal_context):
ctx = make_proposal_context(jira_key="PROJ-42")
result = _fill_template("Ticket: {{jira_key}}", ctx)
assert result == "Ticket: PROJ-42"
def test_replaces_metadata_as_json(self, make_proposal_context):
ctx = make_proposal_context(metadata={"sponsor": "NIH", "pi": "Dr. Smith"})
result = _fill_template("Meta: {{metadata}}", ctx)
# Should be valid JSON
parsed = json.loads(result.replace("Meta: ", ""))
assert parsed["sponsor"] == "NIH"
assert parsed["pi"] == "Dr. Smith"
def test_replaces_metadata_dot_field(self, make_proposal_context):
ctx = make_proposal_context(
metadata={"sponsor": "NIH", "deadline": "2025-01-15"}
)
result = _fill_template(
"Sponsor is {{metadata.sponsor}}, due {{metadata.deadline}}", ctx
)
assert result == "Sponsor is NIH, due 2025-01-15"
def test_metadata_dot_field_with_dict_value(self, make_proposal_context):
ctx = make_proposal_context(
metadata={"budget_detail": {"direct": 100, "indirect": 50}}
)
result = _fill_template("Details: {{metadata.budget_detail}}", ctx)
parsed = json.loads(result.replace("Details: ", ""))
assert parsed == {"direct": 100, "indirect": 50}
def test_metadata_dot_field_missing_returns_empty(self, make_proposal_context):
ctx = make_proposal_context(metadata={"sponsor": "NIH"})
result = _fill_template("Agency: {{metadata.agency}}", ctx)
assert result == "Agency: "
def test_replaces_all_placeholders_in_one_template(self, make_proposal_context):
ctx = make_proposal_context(
proposal_text="proposal body",
budget_text="budget body",
foa_text="foa body",
jira_key="PROJ-99",
metadata={"sponsor": "NSF"},
)
template = (
"{{jira_key}}: {{proposal_text}} | "
"Budget: {{budget_text}} | FOA: {{foa_text}} | "
"Sponsor: {{metadata.sponsor}} | All: {{metadata}}"
)
result = _fill_template(template, ctx)
assert "PROJ-99" in result
assert "proposal body" in result
assert "budget body" in result
assert "foa body" in result
assert "NSF" in result
def test_none_values_replaced_with_empty_string(self, make_proposal_context):
ctx = make_proposal_context(
proposal_text=None, # type: ignore[arg-type]
budget_text=None, # type: ignore[arg-type]
foa_text=None, # type: ignore[arg-type]
jira_key=None, # type: ignore[arg-type]
)
result = _fill_template(
"{{proposal_text}}|{{budget_text}}|{{foa_text}}|{{jira_key}}", ctx
)
assert result == "|||"
# =====================================================================
# _parse_llm_response -- JSON parsing and validation
# =====================================================================
class TestParseLLMResponse:
"""Tests for _parse_llm_response (JSON parsing / verdict validation)."""
def test_parses_well_formed_json(self, well_formed_llm_json):
result = _parse_llm_response(well_formed_llm_json)
assert result["verdict"] == "PASS"
assert result["confidence"] == "HIGH"
assert result["evidence"] == "Section 4.2 states the budget is $500k."
assert result["explanation"] == "The proposal meets the budget cap requirement."
assert result["suggested_action"] is None
def test_strips_markdown_json_fence(self):
inner = json.dumps(
{
"verdict": "FAIL",
"confidence": "MEDIUM",
"evidence": "x",
"explanation": "y",
"suggested_action": "Fix it.",
}
)
raw = f"```json\n{inner}\n```"
result = _parse_llm_response(raw)
assert result["verdict"] == "FAIL"
assert result["confidence"] == "MEDIUM"
assert result["suggested_action"] == "Fix it."
def test_strips_bare_code_fence(self):
inner = json.dumps(
{
"verdict": "FLAG",
"confidence": "LOW",
"evidence": "e",
"explanation": "exp",
"suggested_action": None,
}
)
raw = f"```\n{inner}\n```"
result = _parse_llm_response(raw)
assert result["verdict"] == "FLAG"
def test_malformed_json_returns_needs_review(self):
result = _parse_llm_response("this is not json at all")
assert result["verdict"] == "NEEDS_REVIEW"
assert result["confidence"] == "LOW"
assert result["evidence"] is None
assert "Failed to parse" in result["explanation"]
assert result["suggested_action"] is not None
def test_invalid_verdict_normalised_to_needs_review(self):
raw = json.dumps(
{
"verdict": "MAYBE",
"confidence": "HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["verdict"] == "NEEDS_REVIEW"
def test_invalid_confidence_normalised_to_low(self):
raw = json.dumps(
{
"verdict": "PASS",
"confidence": "VERY_HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["confidence"] == "LOW"
def test_missing_verdict_defaults_to_needs_review(self):
raw = json.dumps(
{
"confidence": "HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["verdict"] == "NEEDS_REVIEW"
def test_missing_confidence_defaults_to_low(self):
raw = json.dumps(
{
"verdict": "PASS",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["confidence"] == "LOW"
@pytest.mark.parametrize(
"verdict", ["PASS", "FAIL", "FLAG", "NEEDS_REVIEW", "NOT_APPLICABLE"]
)
def test_all_valid_verdicts_accepted(self, verdict):
raw = json.dumps(
{
"verdict": verdict,
"confidence": "HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["verdict"] == verdict
def test_verdict_case_insensitive(self):
raw = json.dumps(
{
"verdict": "pass",
"confidence": "high",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
result = _parse_llm_response(raw)
assert result["verdict"] == "PASS"
assert result["confidence"] == "HIGH"
def test_whitespace_around_json_is_tolerated(self):
inner = json.dumps(
{
"verdict": "PASS",
"confidence": "HIGH",
"evidence": "e",
"explanation": "x",
"suggested_action": None,
}
)
raw = f" \n {inner} \n "
result = _parse_llm_response(raw)
assert result["verdict"] == "PASS"
# =====================================================================
# _extract_token_usage
# =====================================================================
class TestExtractTokenUsage:
"""Tests for _extract_token_usage (best-effort token extraction)."""
def test_extracts_total_tokens(self):
response = MagicMock()
response.usage.total_tokens = 1234
assert _extract_token_usage(response) == 1234
def test_sums_prompt_and_completion_tokens_when_no_total(self):
response = MagicMock()
response.usage.total_tokens = None
response.usage.prompt_tokens = 100
response.usage.completion_tokens = 50
assert _extract_token_usage(response) == 150
def test_returns_none_when_no_usage(self):
response = MagicMock(spec=[]) # no usage attr
assert _extract_token_usage(response) is None
def test_returns_none_when_usage_is_none(self):
response = MagicMock()
response.usage = None
assert _extract_token_usage(response) is None
# =====================================================================
# evaluate_rule -- integration of template + LLM call + parsing
# =====================================================================
class TestEvaluateRule:
"""Tests for the top-level evaluate_rule function with mocked LLM."""
@patch("onyx.server.features.proposal_review.engine.rule_evaluator.get_default_llm")
@patch(
"onyx.server.features.proposal_review.engine.rule_evaluator.llm_response_to_string"
)
def test_successful_evaluation(
self, mock_to_string, mock_get_llm, make_rule, make_proposal_context
):
llm_response_json = json.dumps(
{
"verdict": "PASS",
"confidence": "HIGH",
"evidence": "Found in section 3.",
"explanation": "Meets requirement.",
"suggested_action": None,
}
)
mock_to_string.return_value = llm_response_json
mock_llm = MagicMock()
mock_llm.config.model_name = "gpt-4o"
mock_llm.invoke.return_value = MagicMock(usage=MagicMock(total_tokens=500))
mock_get_llm.return_value = mock_llm
rule = make_rule(prompt_template="Check: {{proposal_text}}")
ctx = make_proposal_context(proposal_text="Grant text here.")
result = evaluate_rule(rule, ctx)
assert result["verdict"] == "PASS"
assert result["confidence"] == "HIGH"
assert result["llm_model"] == "gpt-4o"
assert result["llm_tokens_used"] == 500
@patch("onyx.server.features.proposal_review.engine.rule_evaluator.get_default_llm")
def test_llm_failure_returns_needs_review(
self, mock_get_llm, make_rule, make_proposal_context
):
mock_get_llm.side_effect = RuntimeError("API key expired")
rule = make_rule()
ctx = make_proposal_context()
result = evaluate_rule(rule, ctx)
assert result["verdict"] == "NEEDS_REVIEW"
assert result["confidence"] == "LOW"
assert "LLM evaluation failed" in result["explanation"]
assert result["llm_model"] is None
assert result["llm_tokens_used"] is None

View File

@@ -1,3 +1,17 @@
# OAuth callback page must be served by the web server (Next.js),
# not the MCP server. Exact match takes priority over the regex below.
location = /mcp/oauth/callback {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_redirect off;
proxy_pass http://web_server;
}
# MCP Server - Model Context Protocol for LLM integrations
# Match /mcp, /mcp/, or /mcp/* but NOT /mcpserver, /mcpapi, etc.
location ~ ^/mcp(/.*)?$ {

View File

@@ -5,7 +5,7 @@ home: https://www.onyx.app/
sources:
- "https://github.com/onyx-dot-app/onyx"
type: application
version: 0.4.42
version: 0.4.43
appVersion: latest
annotations:
category: Productivity

View File

@@ -42,6 +42,22 @@ data:
client_max_body_size 5G;
{{- if .Values.mcpServer.enabled }}
# OAuth callback page must be served by the web server (Next.js),
# not the MCP server. Exact match takes priority over the regex below.
location = /mcp/oauth/callback {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_redirect off;
proxy_connect_timeout {{ .Values.nginx.timeouts.connect }}s;
proxy_send_timeout {{ .Values.nginx.timeouts.send }}s;
proxy_read_timeout {{ .Values.nginx.timeouts.read }}s;
proxy_pass http://web_server;
}
# MCP Server - Model Context Protocol for LLM integrations
# Match /mcp, /mcp/, or /mcp/* but NOT /mcpserver, /mcpapi, etc.
location ~ ^/mcp(/.*)?$ {

View File

@@ -296,7 +296,7 @@ nginx:
# The ingress-nginx subchart doesn't auto-detect our custom ConfigMap changes.
# Workaround: Helm upgrade will restart if the following annotation value changes.
podAnnotations:
onyx.app/nginx-config-version: "3"
onyx.app/nginx-config-version: "4"
# Propagate DOMAIN into nginx so server_name continues to use the same env var
extraEnvs:

View File

@@ -145,7 +145,6 @@ export function Table<TData>(props: DataTableProps<TData>) {
pageSize,
initialSorting,
initialColumnVisibility,
onColumnVisibilityChange,
initialRowSelection,
initialViewSelected,
draggable,
@@ -224,7 +223,6 @@ export function Table<TData>(props: DataTableProps<TData>) {
pageSize: effectivePageSize,
initialSorting,
initialColumnVisibility,
onColumnVisibilityChange,
initialRowSelection,
initialViewSelected,
getRowId,

View File

@@ -1,7 +1,7 @@
"use client";
"use no memo";
import { useState, useEffect, useMemo, useRef, useCallback } from "react";
import { useState, useEffect, useMemo, useRef } from "react";
import {
useReactTable,
getCoreRowModel,
@@ -103,8 +103,6 @@ interface UseDataTableOptions<TData extends RowData> {
initialSorting?: SortingState;
/** Initial column visibility state. @default {} */
initialColumnVisibility?: VisibilityState;
/** Called when column visibility changes. */
onColumnVisibilityChange?: (visibility: VisibilityState) => void;
/** Initial row selection state. Keys are row IDs (from `getRowId`), values are `true`. @default {} */
initialRowSelection?: RowSelectionState;
/** When true AND `initialRowSelection` is non-empty, start in view-selected mode (filtered to selected rows). @default false */
@@ -201,7 +199,6 @@ export default function useDataTable<TData extends RowData>(
columnResizeMode = "onChange",
initialSorting = [],
initialColumnVisibility = {},
onColumnVisibilityChange: onColumnVisibilityChangeProp,
initialRowSelection = {},
initialViewSelected = false,
getRowId,
@@ -218,19 +215,9 @@ export default function useDataTable<TData extends RowData>(
const [rowSelection, setRowSelection] =
useState<RowSelectionState>(initialRowSelection);
const [columnSizing, setColumnSizing] = useState<ColumnSizingState>({});
const [columnVisibility, setColumnVisibilityRaw] = useState<VisibilityState>(
const [columnVisibility, setColumnVisibility] = useState<VisibilityState>(
initialColumnVisibility
);
const setColumnVisibility: typeof setColumnVisibilityRaw = useCallback(
(updater) => {
setColumnVisibilityRaw((prev) => {
const next = typeof updater === "function" ? updater(prev) : updater;
onColumnVisibilityChangeProp?.(next);
return next;
});
},
[onColumnVisibilityChangeProp]
);
const [pagination, setPagination] = useState<PaginationState>({
pageIndex: 0,
pageSize: pageSizeOption,

View File

@@ -146,8 +146,6 @@ export interface DataTableProps<TData> {
initialSorting?: SortingState;
/** Initial column visibility state. */
initialColumnVisibility?: VisibilityState;
/** Called when column visibility changes. Receives the full visibility state. */
onColumnVisibilityChange?: (visibility: VisibilityState) => void;
/** Initial row selection state. Keys are row IDs (from `getRowId`), values are `true`. */
initialRowSelection?: Record<string, boolean>;
/** When true AND `initialRowSelection` is non-empty, start in view-selected mode. @default false */

8
web/package-lock.json generated
View File

@@ -55,7 +55,7 @@
"js-cookie": "^3.0.5",
"katex": "^0.16.38",
"linguist-languages": "^9.3.1",
"lodash": "^4.17.23",
"lodash": "^4.18.1",
"lowlight": "^3.3.0",
"lucide-react": "^0.454.0",
"mdast-util-find-and-replace": "^3.0.1",
@@ -12926,9 +12926,9 @@
}
},
"node_modules/lodash": {
"version": "4.17.23",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
"version": "4.18.1",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz",
"integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==",
"license": "MIT"
},
"node_modules/lodash-es": {

View File

@@ -73,7 +73,7 @@
"js-cookie": "^3.0.5",
"katex": "^0.16.38",
"linguist-languages": "^9.3.1",
"lodash": "^4.17.23",
"lodash": "^4.18.1",
"lowlight": "^3.3.0",
"lucide-react": "^0.454.0",
"mdast-util-find-and-replace": "^3.0.1",

View File

@@ -1,373 +0,0 @@
"use client";
import React, { useState, useRef } from "react";
import { Text, Tag } from "@opal/components";
import { Button } from "@opal/components/buttons/button/components";
import { SvgUploadCloud } from "@opal/icons";
import { IllustrationContent } from "@opal/layouts";
import Modal from "@/refresh-components/Modal";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import Checkbox from "@/refresh-components/inputs/Checkbox";
import { toast } from "@/hooks/useToast";
import type { RuleResponse } from "@/app/admin/proposal-review/interfaces";
import { RULE_TYPE_LABELS } from "@/app/admin/proposal-review/interfaces";
interface ImportFlowProps {
open: boolean;
onClose: () => void;
rulesetId: string;
onImportComplete: () => void;
}
type ImportStep = "upload" | "processing" | "review";
function ImportFlow({
open,
onClose,
rulesetId,
onImportComplete,
}: ImportFlowProps) {
const [step, setStep] = useState<ImportStep>("upload");
const [importedRules, setImportedRules] = useState<RuleResponse[]>([]);
const [selectedRuleIds, setSelectedRuleIds] = useState<Set<string>>(
new Set()
);
const [expandedRuleId, setExpandedRuleId] = useState<string | null>(null);
const [saving, setSaving] = useState(false);
const fileInputRef = useRef<HTMLInputElement>(null);
function handleReset() {
setStep("upload");
setImportedRules([]);
setSelectedRuleIds(new Set());
setExpandedRuleId(null);
setSaving(false);
}
function handleClose() {
handleReset();
onClose();
}
async function handleFileUpload(e: React.ChangeEvent<HTMLInputElement>) {
const file = e.target.files?.[0];
if (!file) return;
setStep("processing");
try {
const formData = new FormData();
formData.append("file", file);
const res = await fetch(
`/api/proposal-review/rulesets/${rulesetId}/import`,
{
method: "POST",
body: formData,
}
);
if (!res.ok) {
const err = await res.json();
throw new Error(err.detail || "Failed to import checklist");
}
const data = await res.json();
setImportedRules(data.rules);
setSelectedRuleIds(new Set(data.rules.map((r: RuleResponse) => r.id)));
setStep("review");
} catch (err) {
toast.error(err instanceof Error ? err.message : "Import failed");
setStep("upload");
} finally {
if (fileInputRef.current) {
fileInputRef.current.value = "";
}
}
}
function toggleRule(ruleId: string) {
setSelectedRuleIds((prev) => {
const next = new Set(prev);
if (next.has(ruleId)) {
next.delete(ruleId);
} else {
next.add(ruleId);
}
return next;
});
}
function handleSelectAll() {
setSelectedRuleIds(new Set(importedRules.map((r) => r.id)));
}
function handleDeselectAll() {
setSelectedRuleIds(new Set());
}
async function handleAccept() {
if (selectedRuleIds.size === 0) return;
setSaving(true);
try {
const unselectedIds = importedRules
.filter((r) => !selectedRuleIds.has(r.id))
.map((r) => r.id);
// Activate selected rules
const activateRes = await fetch(
`/api/proposal-review/rulesets/${rulesetId}/rules/bulk-update`,
{
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
action: "activate",
rule_ids: Array.from(selectedRuleIds),
}),
}
);
if (!activateRes.ok) {
const err = await activateRes.json();
throw new Error(err.detail || "Failed to activate rules");
}
// Delete unselected rules
if (unselectedIds.length > 0) {
const deleteRes = await fetch(
`/api/proposal-review/rulesets/${rulesetId}/rules/bulk-update`,
{
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
action: "delete",
rule_ids: unselectedIds,
}),
}
);
if (!deleteRes.ok) {
const err = await deleteRes.json();
throw new Error(err.detail || "Failed to clean up unselected rules");
}
}
toast.success(
`${selectedRuleIds.size} rule${
selectedRuleIds.size === 1 ? "" : "s"
} imported.`
);
onImportComplete();
handleClose();
} catch (err) {
toast.error(err instanceof Error ? err.message : "Failed to save rules");
} finally {
setSaving(false);
}
}
if (!open) return null;
return (
<Modal open onOpenChange={(isOpen) => !isOpen && handleClose()}>
<Modal.Content width="sm" height="lg">
<Modal.Header
icon={SvgUploadCloud}
title="Import from Checklist"
description="Upload a checklist document to generate rules automatically."
onClose={handleClose}
/>
<Modal.Body>
{step === "upload" && (
<>
<IllustrationContent
illustration={SvgUploadCloud}
title="Upload a checklist document (.xlsx, .docx, or .pdf)"
description="The document will be analyzed to extract review rules."
/>
<input
ref={fileInputRef}
type="file"
accept=".xlsx,.docx,.pdf"
onChange={handleFileUpload}
className="hidden"
/>
<div className="flex justify-center w-full">
<Button
icon={SvgUploadCloud}
onClick={() => fileInputRef.current?.click()}
>
Choose File
</Button>
</div>
</>
)}
{step === "processing" && (
<>
<div className="flex justify-center w-full">
<SimpleLoader />
</div>
<IllustrationContent
title="Analyzing document and generating rules..."
description="This may take up to a minute."
/>
</>
)}
{step === "review" && (
<div className="flex flex-col gap-4">
{importedRules.length === 0 ? (
<div className="flex flex-col items-center gap-2 py-8">
<Text font="main-ui-body" color="text-03">
No rules were generated from the uploaded document.
</Text>
</div>
) : (
<>
<div className="flex items-center justify-between">
<Text font="main-ui-body" color="text-03">
{`${importedRules.length} rules generated - ${selectedRuleIds.size} selected`}
</Text>
<div className="flex gap-2">
<Button
prominence="tertiary"
size="sm"
onClick={handleSelectAll}
>
Select All
</Button>
<Button
prominence="tertiary"
size="sm"
onClick={handleDeselectAll}
>
Deselect All
</Button>
</div>
</div>
<div className="flex flex-col border border-border-02 rounded-08 overflow-hidden">
{/* Header row */}
<div className="flex items-center gap-3 px-4 py-2 bg-background-tint-01 border-b border-border-02">
<div className="w-6" />
<div className="flex-1">
<Text font="main-ui-action" color="text-03">
Name
</Text>
</div>
<div className="w-32">
<Text font="main-ui-action" color="text-03">
Type
</Text>
</div>
<div className="w-40">
<Text font="main-ui-action" color="text-03">
Category
</Text>
</div>
</div>
{/* Rule rows */}
<div className="max-h-[400px] overflow-y-auto">
{importedRules.map((rule) => (
<React.Fragment key={rule.id}>
<div
className="flex items-center gap-3 px-4 py-3 border-b border-border-01 cursor-pointer hover:bg-background-tint-01"
onClick={() =>
setExpandedRuleId(
expandedRuleId === rule.id ? null : rule.id
)
}
>
<div
className="w-6"
onClick={(e) => e.stopPropagation()}
>
<Checkbox
checked={selectedRuleIds.has(rule.id)}
onCheckedChange={() => toggleRule(rule.id)}
/>
</div>
<div className="flex-1">
<Text font="main-ui-body" color="text-04">
{rule.name}
</Text>
</div>
<div className="w-32">
<Tag
title={RULE_TYPE_LABELS[rule.rule_type]}
color="gray"
/>
</div>
<div className="w-40">
<Text font="secondary-body" color="text-03">
{rule.category || "-"}
</Text>
</div>
</div>
{expandedRuleId === rule.id && (
<div className="flex flex-col gap-2 px-4 py-3 bg-background-neutral-01 border-b border-border-01">
{rule.description && (
<div>
<Text font="main-ui-action" color="text-03">
Description
</Text>
<Text
font="secondary-body"
color="text-03"
as="p"
>
{rule.description}
</Text>
</div>
)}
<div>
<Text font="main-ui-action" color="text-03">
Prompt Template
</Text>
<pre className="p-2 bg-background-neutral-02 rounded-08 text-sm font-mono text-text-02 whitespace-pre-wrap overflow-x-auto">
{rule.prompt_template}
</pre>
</div>
</div>
)}
</React.Fragment>
))}
</div>
</div>
</>
)}
</div>
)}
</Modal.Body>
{step === "review" && importedRules.length > 0 && (
<Modal.Footer>
<Button
prominence="secondary"
onClick={handleClose}
disabled={saving}
>
Discard
</Button>
<Button
onClick={handleAccept}
disabled={saving || selectedRuleIds.size === 0}
>
{saving
? "Saving..."
: `Accept ${selectedRuleIds.size} Rule${
selectedRuleIds.size === 1 ? "" : "s"
}`}
</Button>
</Modal.Footer>
)}
</Modal.Content>
</Modal>
);
}
export default ImportFlow;

View File

@@ -1,359 +0,0 @@
"use client";
import { Form, Formik } from "formik";
import { Button, Text } from "@opal/components";
import { SvgEdit, SvgPlus } from "@opal/icons";
import InputTypeIn from "@/refresh-components/inputs/InputTypeIn";
import InputTextArea from "@/refresh-components/inputs/InputTextArea";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { toast } from "@/hooks/useToast";
import { FormikField } from "@/refresh-components/form/FormikField";
import Modal from "@/refresh-components/Modal";
import { Vertical as VerticalInput } from "@/layouts/input-layouts";
import type {
RuleResponse,
RuleCreate,
RuleUpdate,
RuleType,
RuleIntent,
RuleAuthority,
} from "@/app/admin/proposal-review/interfaces";
import {
RULE_TYPE_LABELS,
RULE_INTENT_LABELS,
RULE_AUTHORITY_LABELS,
} from "@/app/admin/proposal-review/interfaces";
interface RuleEditorProps {
open: boolean;
onClose: () => void;
onSave: (rule: RuleCreate | RuleUpdate) => Promise<void>;
existingRule?: RuleResponse | null;
}
interface RuleFormValues {
name: string;
description: string;
category: string;
rule_type: RuleType;
rule_intent: RuleIntent;
authority: RuleAuthority | "none";
is_hard_stop: string;
prompt_template: string;
}
function RuleEditor({ open, onClose, onSave, existingRule }: RuleEditorProps) {
if (!open) return null;
const initialValues: RuleFormValues = existingRule
? {
name: existingRule.name,
description: existingRule.description || "",
category: existingRule.category || "",
rule_type: existingRule.rule_type,
rule_intent: existingRule.rule_intent,
authority: existingRule.authority || "none",
is_hard_stop: existingRule.is_hard_stop ? "yes" : "no",
prompt_template: existingRule.prompt_template,
}
: {
name: "",
description: "",
category: "",
rule_type: "DOCUMENT_CHECK" as RuleType,
rule_intent: "CHECK" as RuleIntent,
authority: "none" as const,
is_hard_stop: "no",
prompt_template: "",
};
return (
<Modal open onOpenChange={(isOpen) => !isOpen && onClose()}>
<Modal.Content
width="md"
height="lg"
onPointerDownOutside={(e) => e.preventDefault()}
onEscapeKeyDown={(e) => e.preventDefault()}
>
<Modal.Header
icon={existingRule ? SvgEdit : SvgPlus}
title={existingRule ? "Edit Rule" : "Add Rule"}
description={
existingRule
? "Update the rule configuration."
: "Define a new rule for this ruleset."
}
onClose={onClose}
/>
<Formik
initialValues={initialValues}
onSubmit={async (values, { setSubmitting }) => {
setSubmitting(true);
try {
const ruleData = {
name: values.name.trim(),
description: values.description.trim() || undefined,
category: values.category.trim() || undefined,
rule_type: values.rule_type,
rule_intent: values.rule_intent,
prompt_template: values.prompt_template,
authority:
values.authority === "none"
? null
: (values.authority as RuleAuthority),
is_hard_stop: values.is_hard_stop === "yes",
};
await onSave(ruleData);
onClose();
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to save rule"
);
} finally {
setSubmitting(false);
}
}}
>
{({ isSubmitting, values }) => (
<Form className="w-full">
<Modal.Body>
<VerticalInput
name="name"
title="Name"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="name"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="Rule name"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</VerticalInput>
<VerticalInput
name="description"
title="Description"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="description"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="Brief description of what this rule checks"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</VerticalInput>
<VerticalInput
name="category"
title="Category"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="category"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="e.g., IR-2: Regulatory Compliance"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</VerticalInput>
<div className="flex gap-4 w-full">
<div className="flex-1 min-w-0">
<VerticalInput
name="rule_type"
title="Rule Type"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="rule_type"
render={(field, helper) => (
<InputSelect
value={field.value}
onValueChange={(v) => helper.setValue(v)}
>
<InputSelect.Trigger placeholder="Select type" />
<InputSelect.Content>
{Object.entries(RULE_TYPE_LABELS).map(
([key, label]) => (
<InputSelect.Item key={key} value={key}>
{label}
</InputSelect.Item>
)
)}
</InputSelect.Content>
</InputSelect>
)}
/>
</VerticalInput>
</div>
<div className="flex-1">
<VerticalInput
name="rule_intent"
title="Intent"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="rule_intent"
render={(field, helper) => (
<InputSelect
value={field.value}
onValueChange={(v) => helper.setValue(v)}
>
<InputSelect.Trigger placeholder="Select intent" />
<InputSelect.Content>
{Object.entries(RULE_INTENT_LABELS).map(
([key, label]) => (
<InputSelect.Item key={key} value={key}>
{label}
</InputSelect.Item>
)
)}
</InputSelect.Content>
</InputSelect>
)}
/>
</VerticalInput>
</div>
</div>
<div className="flex gap-4 w-full">
<div className="flex-1 min-w-0">
<VerticalInput
name="authority"
title="Authority"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="authority"
render={(field, helper) => (
<InputSelect
value={field.value}
onValueChange={(v) => helper.setValue(v)}
>
<InputSelect.Trigger placeholder="Select authority" />
<InputSelect.Content>
<InputSelect.Item value="none">
None
</InputSelect.Item>
{Object.entries(RULE_AUTHORITY_LABELS).map(
([key, label]) => (
<InputSelect.Item key={key} value={key}>
{label}
</InputSelect.Item>
)
)}
</InputSelect.Content>
</InputSelect>
)}
/>
</VerticalInput>
</div>
<div className="flex-1">
<VerticalInput
name="is_hard_stop"
title="Hard Stop"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="is_hard_stop"
render={(field, helper) => (
<InputSelect
value={field.value}
onValueChange={(v) => helper.setValue(v)}
>
<InputSelect.Trigger />
<InputSelect.Content>
<InputSelect.Item value="no">No</InputSelect.Item>
<InputSelect.Item value="yes">
Yes - Fail stops entire review
</InputSelect.Item>
</InputSelect.Content>
</InputSelect>
)}
/>
</VerticalInput>
</div>
</div>
<VerticalInput
name="prompt_template"
title="Prompt Template"
nonInteractive
sizePreset="main-ui"
>
<Text font="secondary-body" color="text-04">
{
"Available variables: {{proposal_text}}, {{metadata}}, {{foa_text}}"
}
</Text>
<FormikField<string>
name="prompt_template"
render={(field, helper) => (
<InputTextArea
value={field.value}
onChange={(e) => helper.setValue(e.target.value)}
placeholder="Enter the LLM prompt template for evaluating this rule..."
rows={8}
/>
)}
/>
</VerticalInput>
</Modal.Body>
<Modal.Footer>
<Button
prominence="secondary"
type="button"
onClick={onClose}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
type="submit"
disabled={
isSubmitting ||
!values.name.trim() ||
!values.prompt_template.trim()
}
>
{isSubmitting
? "Saving..."
: existingRule
? "Update Rule"
: "Add Rule"}
</Button>
</Modal.Footer>
</Form>
)}
</Formik>
</Modal.Content>
</Modal>
);
}
export default RuleEditor;

View File

@@ -1,364 +0,0 @@
"use client";
import { useState, useEffect } from "react";
import useSWR from "swr";
import { Text } from "@opal/components";
import { Button } from "@opal/components/buttons/button/components";
import Checkbox from "@/refresh-components/inputs/Checkbox";
import InputComboBox from "@/refresh-components/inputs/InputComboBox";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import InputTypeIn from "@/refresh-components/inputs/InputTypeIn";
import Separator from "@/refresh-components/Separator";
import { toast } from "@/hooks/useToast";
import { errorHandlingFetcher } from "@/lib/fetcher";
import { SvgPlus, SvgTrash } from "@opal/icons";
import { Section } from "@/layouts/general-layouts";
import { Content } from "@opal/layouts";
import type {
ConfigResponse,
ConfigUpdate,
JiraConnectorInfo,
} from "@/app/admin/proposal-review/interfaces";
const CONNECTORS_URL = "/api/proposal-review/jira-connectors";
interface SettingsFormProps {
config: ConfigResponse;
onSave: (update: ConfigUpdate) => Promise<void>;
onCancel: () => void;
}
function SettingsForm({ config, onSave, onCancel }: SettingsFormProps) {
const [connectorId, setConnectorId] = useState<number | null>(
config.jira_connector_id
);
const [visibleFields, setVisibleFields] = useState<string[]>(
config.field_mapping ?? []
);
const [jiraWriteback, setJiraWriteback] = useState<Record<string, string>>(
(config.jira_writeback as Record<string, string>) || {}
);
const [saving, setSaving] = useState(false);
const [fieldSearch, setFieldSearch] = useState("");
// Writeback add-row state
const [newWritebackKey, setNewWritebackKey] = useState("");
const [newWritebackField, setNewWritebackField] = useState("");
useEffect(() => {
setConnectorId(config.jira_connector_id);
setVisibleFields(config.field_mapping ?? []);
setJiraWriteback((config.jira_writeback as Record<string, string>) || {});
}, [config]);
// Fetch available Jira connectors
const { data: connectors, isLoading: connectorsLoading } = useSWR<
JiraConnectorInfo[]
>(CONNECTORS_URL, errorHandlingFetcher);
// Fetch metadata keys from indexed documents for the selected connector
const { data: metadataKeys, isLoading: fieldsLoading } = useSWR<string[]>(
connectorId
? `/api/proposal-review/jira-connectors/${connectorId}/metadata-keys`
: null,
errorHandlingFetcher
);
const selectedConnector = (connectors ?? []).find(
(c) => c.id === connectorId
);
async function handleSave() {
setSaving(true);
try {
await onSave({
jira_connector_id: connectorId,
jira_project_key: selectedConnector?.project_key || null,
field_mapping: visibleFields.length > 0 ? visibleFields : null,
jira_writeback:
Object.keys(jiraWriteback).length > 0 ? jiraWriteback : null,
});
toast.success("Settings saved.");
} catch {
toast.error("Failed to save settings.");
} finally {
setSaving(false);
}
}
function toggleField(key: string) {
setVisibleFields((prev) =>
prev.includes(key) ? prev.filter((k) => k !== key) : [...prev, key]
);
}
function handleAddWriteback() {
if (!newWritebackKey) return;
setJiraWriteback({
...jiraWriteback,
[newWritebackKey]: newWritebackField,
});
setNewWritebackKey("");
setNewWritebackField("");
}
const writebackEntries = Object.entries(jiraWriteback);
// Filter metadata keys by search
const allKeys = metadataKeys ?? [];
const filteredKeys = fieldSearch
? allKeys.filter((k) => k.toLowerCase().includes(fieldSearch.toLowerCase()))
: allKeys;
return (
<Section gap={2} alignItems="stretch" height="auto">
{/* Jira Connector Selection */}
<Section gap={1} alignItems="stretch" height="auto">
<Content
sizePreset="section"
variant="section"
title="Jira Connector"
description="Select which Jira connector to use for proposal sourcing."
/>
<Section gap={0.25} alignItems="start" height="auto">
<Text font="main-ui-action" color="text-04">
Connector
</Text>
{connectorsLoading ? (
<Text font="main-ui-body" color="text-03" as="p">
Loading connectors...
</Text>
) : connectors && connectors.length > 0 ? (
<InputSelect
value={connectorId != null ? String(connectorId) : undefined}
onValueChange={(val) => {
const newId = val ? Number(val) : null;
if (newId !== connectorId) {
setConnectorId(newId);
setVisibleFields([]);
}
}}
>
<InputSelect.Trigger placeholder="Select a Jira connector..." />
<InputSelect.Content>
{connectors.map((c) => (
<InputSelect.Item
key={c.id}
value={String(c.id)}
description={
c.project_key ? `Project: ${c.project_key}` : undefined
}
>
{c.name}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
) : (
<Text font="main-ui-body" color="text-03" as="p">
No Jira connectors found. Add one in the Connectors settings
first.
</Text>
)}
</Section>
</Section>
<Separator noPadding />
{/* Visible Fields Checklist */}
<Section gap={1} alignItems="stretch" height="auto">
<Content
sizePreset="section"
variant="section"
title="Visible Fields"
description="Choose which metadata fields to display in the proposal queue and review interface. If none are selected, all fields are shown."
/>
{fieldsLoading && connectorId && (
<Text font="secondary-body" color="text-03" as="p">
Loading fields...
</Text>
)}
{!fieldsLoading && connectorId && allKeys.length > 0 && (
<>
<InputTypeIn
placeholder="Filter fields..."
value={fieldSearch}
onChange={(e) => setFieldSearch(e.target.value)}
onClear={() => setFieldSearch("")}
leftSearchIcon
/>
<div className="flex flex-col gap-1 max-h-64 overflow-y-auto">
{filteredKeys.map((key) => (
<label
key={key}
className="flex items-center gap-3 px-2 py-1.5 rounded-8 cursor-pointer hover:bg-background-neutral-02"
>
<Checkbox
checked={visibleFields.includes(key)}
onCheckedChange={() => toggleField(key)}
/>
<Text font="main-ui-body" color="text-04">
{key}
</Text>
</label>
))}
</div>
{visibleFields.length > 0 && (
<Text font="secondary-body" color="text-03" as="p">
{`${visibleFields.length} field${
visibleFields.length !== 1 ? "s" : ""
} selected`}
</Text>
)}
</>
)}
{!fieldsLoading && connectorId && allKeys.length === 0 && (
<Text font="secondary-body" color="text-03" as="p">
No metadata fields found. Make sure the connector has indexed some
documents.
</Text>
)}
{!connectorId && (
<Text font="secondary-body" color="text-03" as="p">
Select a connector above to see available fields.
</Text>
)}
</Section>
<Separator noPadding />
{/* Write-back Configuration */}
<Section gap={1} alignItems="stretch" height="auto">
<Content
sizePreset="section"
variant="section"
title="Write-back Configuration"
description="Map review outcomes to Jira custom fields for automatic status sync."
/>
{writebackEntries.length > 0 && (
<Section gap={0.5} alignItems="stretch" height="auto">
<div className="flex gap-3 px-1">
<span className="flex-1">
<Text font="secondary-action" color="text-03">
Outcome
</Text>
</span>
<span className="flex-1">
<Text font="secondary-action" color="text-03">
Jira Field
</Text>
</span>
<div className="w-8" />
</div>
{writebackEntries.map(([key, value]) => (
<Section
key={key}
flexDirection="row"
gap={0.75}
alignItems="center"
height="auto"
>
<div className="flex-1">
<Text font="main-ui-body" color="text-04">
{key}
</Text>
</div>
<div className="flex-1">
<Text font="main-ui-body" color="text-04">
{value}
</Text>
</div>
<Button
icon={SvgTrash}
prominence="tertiary"
variant="danger"
size="sm"
onClick={() => {
const updated = { ...jiraWriteback };
delete updated[key];
setJiraWriteback(updated);
}}
tooltip="Remove"
/>
</Section>
))}
</Section>
)}
{connectorId && (
<Section
flexDirection="row"
gap={0.75}
alignItems="center"
height="auto"
>
<div className="flex-1">
<InputSelect
value={newWritebackKey || undefined}
onValueChange={setNewWritebackKey}
>
<InputSelect.Trigger placeholder="Select outcome..." />
<InputSelect.Content>
{["decision_field_id", "completion_field_id"].map((key) => (
<InputSelect.Item key={key} value={key}>
{key === "decision_field_id"
? "Decision Field"
: "Completion % Field"}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
</div>
<div className="flex-1">
{allKeys.length > 0 ? (
<InputComboBox
placeholder="Search fields..."
value={newWritebackField}
onValueChange={setNewWritebackField}
options={allKeys.map((key) => ({
value: key,
label: key,
}))}
strict
leftSearchIcon
/>
) : (
<Text font="secondary-body" color="text-03" as="p">
Select a connector first
</Text>
)}
</div>
<Button
icon={SvgPlus}
prominence="tertiary"
size="sm"
onClick={handleAddWriteback}
disabled={!newWritebackKey || !newWritebackField}
tooltip="Add entry"
/>
</Section>
)}
</Section>
<Separator noPadding />
{/* Actions */}
<Section flexDirection="row" gap={0.75} alignItems="center" height="auto">
<Button onClick={handleSave} disabled={saving}>
{saving ? "Saving..." : "Save"}
</Button>
<Button prominence="secondary" onClick={onCancel} disabled={saving}>
Cancel
</Button>
</Section>
</Section>
);
}
export default SettingsForm;

View File

@@ -1,145 +0,0 @@
/** Shared types for Proposal Review (Argus) admin pages. */
export interface RulesetResponse {
id: string;
tenant_id: string;
name: string;
description: string | null;
is_default: boolean;
is_active: boolean;
created_by: string | null;
created_at: string;
updated_at: string;
rules: RuleResponse[];
}
export interface RuleResponse {
id: string;
ruleset_id: string;
name: string;
description: string | null;
category: string | null;
rule_type: RuleType;
rule_intent: RuleIntent;
prompt_template: string;
source: RuleSource;
authority: RuleAuthority | null;
is_hard_stop: boolean;
priority: number;
is_active: boolean;
created_at: string;
updated_at: string;
}
export type RuleType =
| "DOCUMENT_CHECK"
| "METADATA_CHECK"
| "CROSS_REFERENCE"
| "CUSTOM_NL";
export type RuleIntent = "CHECK" | "HIGHLIGHT";
export type RuleSource = "IMPORTED" | "MANUAL";
export type RuleAuthority = "OVERRIDE" | "RETURN";
export interface RulesetCreate {
name: string;
description?: string;
is_default?: boolean;
}
export interface RulesetUpdate {
name?: string;
description?: string;
is_default?: boolean;
is_active?: boolean;
}
export interface RuleCreate {
name: string;
description?: string;
category?: string;
rule_type: RuleType;
rule_intent?: RuleIntent;
prompt_template: string;
source?: RuleSource;
authority?: RuleAuthority | null;
is_hard_stop?: boolean;
priority?: number;
}
export interface RuleUpdate {
name?: string;
description?: string;
category?: string;
rule_type?: RuleType;
rule_intent?: RuleIntent;
prompt_template?: string;
authority?: RuleAuthority | null;
is_hard_stop?: boolean;
priority?: number;
is_active?: boolean;
}
export interface BulkRuleUpdateRequest {
action: "activate" | "deactivate" | "delete";
rule_ids: string[];
}
export interface BulkRuleUpdateResponse {
updated_count: number;
}
export interface ImportResponse {
rules_created: number;
rules: RuleResponse[];
}
export interface ConfigResponse {
id: string;
tenant_id: string;
jira_connector_id: number | null;
jira_project_key: string | null;
field_mapping: string[] | null;
jira_writeback: Record<string, string> | null;
created_at: string;
updated_at: string;
}
export interface ConfigUpdate {
jira_connector_id?: number | null;
jira_project_key?: string | null;
field_mapping?: string[] | null;
jira_writeback?: Record<string, string> | null;
}
export interface JiraConnectorInfo {
id: number;
name: string;
project_key: string;
project_url: string;
}
/** Labels for display purposes. */
export const RULE_TYPE_LABELS: Record<RuleType, string> = {
DOCUMENT_CHECK: "Document Check",
METADATA_CHECK: "Metadata Check",
CROSS_REFERENCE: "Cross Reference",
CUSTOM_NL: "Custom NL",
};
export const RULE_INTENT_LABELS: Record<RuleIntent, string> = {
CHECK: "Check",
HIGHLIGHT: "Highlight",
};
export const RULE_SOURCE_LABELS: Record<RuleSource, string> = {
IMPORTED: "Imported",
MANUAL: "Manual",
};
export const RULE_AUTHORITY_LABELS: Record<string, string> = {
OVERRIDE: "Override",
RETURN: "Return",
};

View File

@@ -1,505 +0,0 @@
"use client";
import { useMemo, useState } from "react";
import { useRouter } from "next/navigation";
import useSWR, { mutate } from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import * as SettingsLayouts from "@/layouts/settings-layouts";
import { ADMIN_ROUTES } from "@/lib/admin-routes";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import { toast } from "@/hooks/useToast";
import { Button, Text, Tag } from "@opal/components";
import { Content, IllustrationContent } from "@opal/layouts";
import SvgNoResult from "@opal/illustrations/no-result";
import {
SvgCheckSquare,
SvgEdit,
SvgMoreHorizontal,
SvgSettings,
SvgTrash,
} from "@opal/icons";
import { Form, Formik } from "formik";
import InputTypeIn from "@/refresh-components/inputs/InputTypeIn";
import { FormikField } from "@/refresh-components/form/FormikField";
import AdminListHeader from "@/sections/admin/AdminListHeader";
import Modal from "@/refresh-components/Modal";
import Popover, { PopoverMenu } from "@/refresh-components/Popover";
import LineItem from "@/refresh-components/buttons/LineItem";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
import { markdown } from "@opal/utils";
import { Table } from "@opal/components";
import { createTableColumns } from "@opal/components/table/columns";
import { Vertical as VerticalInput } from "@/layouts/input-layouts";
import type {
RulesetResponse,
RulesetCreate,
RulesetUpdate,
} from "@/app/admin/proposal-review/interfaces";
const API_URL = "/api/proposal-review/rulesets";
const route = ADMIN_ROUTES.PROPOSAL_REVIEW;
const tc = createTableColumns<RulesetResponse>();
function formatDate(dateStr: string): string {
return new Date(dateStr).toLocaleDateString("en-US", {
month: "short",
day: "numeric",
year: "numeric",
});
}
function RulesetsPage() {
const router = useRouter();
const {
data: rulesets,
isLoading,
error,
} = useSWR<RulesetResponse[]>(API_URL, errorHandlingFetcher);
const [showCreateForm, setShowCreateForm] = useState(false);
const [editTarget, setEditTarget] = useState<RulesetResponse | null>(null);
const [deleteTarget, setDeleteTarget] = useState<RulesetResponse | null>(
null
);
const [search, setSearch] = useState("");
const filteredRulesets = (rulesets ?? []).filter(
(rs) =>
!search ||
rs.name.toLowerCase().includes(search.toLowerCase()) ||
(rs.description ?? "").toLowerCase().includes(search.toLowerCase())
);
function handleEditOpen(ruleset: RulesetResponse) {
setEditTarget(ruleset);
}
async function handleDelete(ruleset: RulesetResponse) {
try {
const res = await fetch(`${API_URL}/${ruleset.id}`, {
method: "DELETE",
});
if (!res.ok && res.status !== 204) {
const err = await res.json();
throw new Error(err.detail || "Failed to delete ruleset");
}
await mutate(API_URL);
setDeleteTarget(null);
toast.success("Ruleset deleted.");
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to delete ruleset"
);
}
}
const columns = useMemo(
() => [
tc.qualifier({
content: "icon",
getContent: () => SvgCheckSquare,
}),
tc.column("name", {
header: "Name",
weight: 30,
cell: (value, row) =>
row.description ? (
<Content
title={value}
description={row.description}
sizePreset="main-ui"
variant="section"
/>
) : (
<Content title={value} sizePreset="main-ui" variant="body" />
),
}),
tc.displayColumn({
id: "rules_count",
header: "Rules",
width: { weight: 10, minWidth: 80 },
cell: (row) => (
<Text font="main-ui-body" color="text-03">
{String(row.rules.length)}
</Text>
),
}),
tc.displayColumn({
id: "status",
header: "Status",
width: { weight: 15, minWidth: 100 },
cell: (row) => (
<Tag
title={row.is_active ? "Active" : "Inactive"}
color={row.is_active ? "green" : "gray"}
/>
),
}),
tc.displayColumn({
id: "default",
header: "Default",
width: { weight: 10, minWidth: 80 },
cell: (row) =>
row.is_default ? <Tag title="Default" color="blue" /> : null,
}),
tc.column("updated_at", {
header: "Last Modified",
weight: 15,
cell: (value) => (
<Text font="secondary-body" color="text-03">
{formatDate(value)}
</Text>
),
}),
tc.actions({
cell: (row) => (
<div className="flex flex-row gap-1">
<Popover>
<Popover.Trigger asChild>
<Button
icon={SvgMoreHorizontal}
prominence="tertiary"
tooltip="More"
/>
</Popover.Trigger>
<Popover.Content side="bottom" align="end" width="md">
<PopoverMenu>
<LineItem icon={SvgEdit} onClick={() => handleEditOpen(row)}>
Edit Ruleset
</LineItem>
<LineItem
icon={SvgTrash}
danger
onClick={() => setDeleteTarget(row)}
>
Delete Ruleset
</LineItem>
</PopoverMenu>
</Popover.Content>
</Popover>
</div>
),
}),
],
[] // eslint-disable-line react-hooks/exhaustive-deps
);
if (error) {
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
title={route.title}
icon={route.icon}
description="Manage review rulesets for automated proposal evaluation."
separator
/>
<SettingsLayouts.Body>
<IllustrationContent
illustration={SvgNoResult}
title="Failed to load rulesets."
description="Please check the console for more details."
/>
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
if (isLoading) {
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
title={route.title}
icon={route.icon}
description="Manage review rulesets for automated proposal evaluation."
separator
/>
<SettingsLayouts.Body>
<SimpleLoader />
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
const hasRulesets = (rulesets ?? []).length > 0;
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
title={route.title}
icon={route.icon}
description="Manage review rulesets for automated proposal evaluation."
separator
rightChildren={
<Button
icon={SvgSettings}
prominence="secondary"
onClick={() => router.push("/admin/proposal-review/settings")}
>
Jira Integration
</Button>
}
/>
<SettingsLayouts.Body>
<div className="flex flex-col">
<AdminListHeader
hasItems={hasRulesets}
searchQuery={search}
onSearchQueryChange={setSearch}
placeholder="Search rulesets..."
emptyStateText="Create rulesets to define automated proposal review rules."
onAction={() => setShowCreateForm(true)}
actionLabel="New Ruleset"
/>
{hasRulesets && (
<Table
data={filteredRulesets}
getRowId={(row) => row.id}
columns={columns}
searchTerm={search}
onRowClick={(row) =>
router.push(`/admin/proposal-review/rulesets/${row.id}`)
}
/>
)}
</div>
</SettingsLayouts.Body>
{/* Create Ruleset Modal */}
{showCreateForm && (
<Modal open onOpenChange={() => setShowCreateForm(false)}>
<Modal.Content width="sm" height="sm">
<Modal.Header
icon={SvgCheckSquare}
title="New Ruleset"
description="Create a new set of review rules."
onClose={() => setShowCreateForm(false)}
/>
<Formik
initialValues={{ name: "", description: "" }}
onSubmit={async (values, { setSubmitting }) => {
setSubmitting(true);
try {
const res = await fetch("/api/proposal-review/rulesets", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(values),
});
if (!res.ok) throw new Error(await res.text());
const created = await res.json();
toast.success("Ruleset created. Add rules to get started.");
setShowCreateForm(false);
router.push(`/admin/proposal-review/rulesets/${created.id}`);
} catch (err) {
toast.error(
err instanceof Error
? err.message
: "Failed to create ruleset."
);
} finally {
setSubmitting(false);
}
}}
>
{({ isSubmitting, values }) => (
<Form className="w-full">
<Modal.Body>
<VerticalInput
name="name"
title="Name"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="name"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="e.g., Institutional Review"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</VerticalInput>
<VerticalInput
name="description"
title="Description"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="description"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="Optional description"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</VerticalInput>
</Modal.Body>
<Modal.Footer>
<Button
prominence="secondary"
type="button"
onClick={() => setShowCreateForm(false)}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
type="submit"
disabled={isSubmitting || !values.name.trim()}
>
{isSubmitting ? "Creating..." : "Create"}
</Button>
</Modal.Footer>
</Form>
)}
</Formik>
</Modal.Content>
</Modal>
)}
{/* Edit Ruleset Modal */}
{editTarget && (
<Modal open onOpenChange={() => setEditTarget(null)}>
<Modal.Content width="sm" height="sm">
<Modal.Header
icon={SvgEdit}
title="Edit Ruleset"
description="Update the ruleset name and description."
onClose={() => setEditTarget(null)}
/>
<Formik
initialValues={{
name: editTarget.name,
description: editTarget.description || "",
}}
onSubmit={async (values, { setSubmitting }) => {
setSubmitting(true);
try {
const res = await fetch(
`/api/proposal-review/rulesets/${editTarget.id}`,
{
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(values),
}
);
if (!res.ok) throw new Error(await res.text());
toast.success("Ruleset updated.");
mutate(API_URL);
setEditTarget(null);
} catch (err) {
toast.error(
err instanceof Error
? err.message
: "Failed to update ruleset."
);
} finally {
setSubmitting(false);
}
}}
>
{({ isSubmitting, values }) => (
<Form className="w-full">
<Modal.Body>
<VerticalInput
name="name"
title="Name"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="name"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="Ruleset name"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</VerticalInput>
<VerticalInput
name="description"
title="Description"
nonInteractive
sizePreset="main-ui"
>
<FormikField<string>
name="description"
render={(field, helper) => (
<InputTypeIn
{...field}
placeholder="Optional description"
onClear={() => helper.setValue("")}
showClearButton={false}
/>
)}
/>
</VerticalInput>
</Modal.Body>
<Modal.Footer>
<Button
prominence="secondary"
type="button"
onClick={() => setEditTarget(null)}
disabled={isSubmitting}
>
Cancel
</Button>
<Button
type="submit"
disabled={isSubmitting || !values.name.trim()}
>
{isSubmitting ? "Saving..." : "Save"}
</Button>
</Modal.Footer>
</Form>
)}
</Formik>
</Modal.Content>
</Modal>
)}
{/* Delete Confirmation */}
{deleteTarget && (
<ConfirmationModalLayout
icon={SvgTrash}
title="Delete Ruleset"
onClose={() => setDeleteTarget(null)}
submit={
<Button
variant="danger"
onClick={async () => {
const target = deleteTarget;
setDeleteTarget(null);
await handleDelete(target);
}}
>
Delete
</Button>
}
>
<Text as="p" color="text-03">
{markdown(
`Are you sure you want to delete *${deleteTarget.name}*? All rules within this ruleset will also be deleted. This action cannot be undone.`
)}
</Text>
</ConfirmationModalLayout>
)}
</SettingsLayouts.Root>
);
}
export default function Page() {
return <RulesetsPage />;
}

View File

@@ -1,577 +0,0 @@
"use client";
import { useState, useMemo } from "react";
import { useParams } from "next/navigation";
import useSWR, { mutate } from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import * as SettingsLayouts from "@/layouts/settings-layouts";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import { toast } from "@/hooks/useToast";
import { Button, Text, Tag, Table } from "@opal/components";
import { Content, IllustrationContent } from "@opal/layouts";
import SvgNoResult from "@opal/illustrations/no-result";
import { createTableColumns } from "@opal/components/table/columns";
import {
SvgCheckSquare,
SvgEdit,
SvgMoreHorizontal,
SvgPlus,
SvgTrash,
SvgUploadCloud,
} from "@opal/icons";
import Popover, { PopoverMenu } from "@/refresh-components/Popover";
import LineItem from "@/refresh-components/buttons/LineItem";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
import { markdown } from "@opal/utils";
import RuleEditor from "@/app/admin/proposal-review/components/RuleEditor";
import ImportFlow from "@/app/admin/proposal-review/components/ImportFlow";
import type {
RulesetResponse,
RulesetUpdate,
RuleResponse,
RuleCreate,
RuleUpdate,
BulkRuleUpdateRequest,
RuleIntent,
} from "@/app/admin/proposal-review/interfaces";
import {
RULE_TYPE_LABELS,
RULE_INTENT_LABELS,
} from "@/app/admin/proposal-review/interfaces";
import type { TagColor } from "@opal/components";
const tc = createTableColumns<RuleResponse>();
function intentColor(intent: RuleIntent): TagColor {
return intent === "CHECK" ? "green" : "purple";
}
function RulesetDetailPage() {
const params = useParams();
const rulesetId = params.id as string;
const apiUrl = `/api/proposal-review/rulesets/${rulesetId}`;
const {
data: ruleset,
isLoading,
error,
} = useSWR<RulesetResponse>(apiUrl, errorHandlingFetcher);
// Modal states
const [showRuleEditor, setShowRuleEditor] = useState(false);
const [editingRule, setEditingRule] = useState<RuleResponse | null>(null);
const [showImportFlow, setShowImportFlow] = useState(false);
const [deleteTarget, setDeleteTarget] = useState<RuleResponse | null>(null);
// Batch selection
const [selectedRuleIds, setSelectedRuleIds] = useState<Set<string>>(
new Set()
);
const [batchSaving, setBatchSaving] = useState(false);
// Toggle handlers
async function handleToggleActive() {
if (!ruleset) return;
try {
const body: RulesetUpdate = { is_active: !ruleset.is_active };
const res = await fetch(apiUrl, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(body),
});
if (!res.ok) {
const err = await res.json().catch(() => ({}));
throw new Error(err.detail || "Failed to toggle active status");
}
await mutate(apiUrl);
toast.success(
ruleset.is_active ? "Ruleset deactivated." : "Ruleset activated."
);
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to toggle active status"
);
}
}
async function handleToggleDefault() {
if (!ruleset) return;
try {
const body: RulesetUpdate = { is_default: !ruleset.is_default };
const res = await fetch(apiUrl, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(body),
});
if (!res.ok) {
const err = await res.json().catch(() => ({}));
throw new Error(err.detail || "Failed to toggle default status");
}
await mutate(apiUrl);
toast.success(
ruleset.is_default
? "Removed default status."
: "Set as default ruleset."
);
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to toggle default status"
);
}
}
async function handleToggleRuleActive(rule: RuleResponse) {
try {
const update: RuleUpdate = { is_active: !rule.is_active };
const res = await fetch(`/api/proposal-review/rules/${rule.id}`, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(update),
});
if (!res.ok) {
const err = await res.json().catch(() => ({}));
throw new Error(err.detail || "Failed to toggle rule active status");
}
await mutate(apiUrl);
toast.success(rule.is_active ? "Rule deactivated." : "Rule activated.");
} catch (err) {
toast.error(
err instanceof Error
? err.message
: "Failed to toggle rule active status"
);
}
}
// Rule CRUD
async function handleSaveRule(ruleData: RuleCreate | RuleUpdate) {
if (editingRule) {
const res = await fetch(`/api/proposal-review/rules/${editingRule.id}`, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(ruleData),
});
if (!res.ok) {
const err = await res.json();
throw new Error(err.detail || "Failed to update rule");
}
toast.success("Rule updated.");
} else {
const res = await fetch(
`/api/proposal-review/rulesets/${rulesetId}/rules`,
{
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(ruleData),
}
);
if (!res.ok) {
const err = await res.json();
throw new Error(err.detail || "Failed to create rule");
}
toast.success("Rule created.");
}
await mutate(apiUrl);
}
async function handleDeleteRule(rule: RuleResponse) {
try {
const res = await fetch(`/api/proposal-review/rules/${rule.id}`, {
method: "DELETE",
});
if (!res.ok && res.status !== 204) {
const err = await res.json();
throw new Error(err.detail || "Failed to delete rule");
}
setSelectedRuleIds((prev) => {
const next = new Set(prev);
next.delete(rule.id);
return next;
});
await mutate(apiUrl);
toast.success("Rule deleted.");
} catch (err) {
toast.error(err instanceof Error ? err.message : "Failed to delete rule");
}
}
// Batch operations
async function handleBulkAction(action: BulkRuleUpdateRequest["action"]) {
if (selectedRuleIds.size === 0) return;
setBatchSaving(true);
try {
const body: BulkRuleUpdateRequest = {
action,
rule_ids: Array.from(selectedRuleIds),
};
const res = await fetch(
`/api/proposal-review/rulesets/${rulesetId}/rules/bulk-update`,
{
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(body),
}
);
if (!res.ok) {
const err = await res.json();
throw new Error(err.detail || "Bulk operation failed");
}
if (action === "delete") {
setSelectedRuleIds(new Set());
}
await mutate(apiUrl);
toast.success(
`Bulk ${action} completed for ${selectedRuleIds.size} rule${
selectedRuleIds.size === 1 ? "" : "s"
}.`
);
} catch (err) {
toast.error(err instanceof Error ? err.message : "Bulk operation failed");
} finally {
setBatchSaving(false);
}
}
// Group rules by category
const groupedRules = useMemo(() => {
if (!ruleset?.rules) return {};
const groups: Record<string, RuleResponse[]> = {};
for (const rule of ruleset.rules) {
const cat = rule.category || "Uncategorized";
if (!groups[cat]) groups[cat] = [];
groups[cat].push(rule);
}
return groups;
}, [ruleset?.rules]);
const allRuleIds = useMemo(
() => new Set(ruleset?.rules.map((r) => r.id) || []),
[ruleset?.rules]
);
const allSelected =
allRuleIds.size > 0 && selectedRuleIds.size === allRuleIds.size;
function toggleSelectAll() {
if (allSelected) {
setSelectedRuleIds(new Set());
} else {
setSelectedRuleIds(new Set(allRuleIds));
}
}
function toggleSelectRule(ruleId: string) {
setSelectedRuleIds((prev) => {
const next = new Set(prev);
if (next.has(ruleId)) {
next.delete(ruleId);
} else {
next.add(ruleId);
}
return next;
});
}
const ruleColumns = useMemo(
() => [
tc.qualifier({
content: "icon",
getContent: () => SvgCheckSquare,
}),
tc.column("name", {
header: "Name",
weight: 25,
cell: (value, row) =>
row.description ? (
<Content
title={value}
description={row.description}
sizePreset="main-ui"
variant="section"
/>
) : (
<Content title={value} sizePreset="main-ui" variant="body" />
),
}),
tc.column("rule_type", {
header: "Type",
weight: 15,
cell: (value) => <Tag title={RULE_TYPE_LABELS[value]} color="gray" />,
}),
tc.column("rule_intent", {
header: "Intent",
weight: 10,
cell: (value) => (
<Tag title={RULE_INTENT_LABELS[value]} color={intentColor(value)} />
),
}),
tc.displayColumn({
id: "source",
header: "Source",
width: { weight: 10, minWidth: 80 },
cell: (row) => (
<Tag
title={row.source === "IMPORTED" ? "Imported" : "Manual"}
color={row.source === "IMPORTED" ? "blue" : "gray"}
/>
),
}),
tc.displayColumn({
id: "hard_stop",
header: "Hard Stop",
width: { weight: 10, minWidth: 80 },
cell: (row) =>
row.is_hard_stop ? <Tag title="Hard Stop" color="amber" /> : null,
}),
tc.displayColumn({
id: "active",
header: "Active",
width: { weight: 8, minWidth: 60 },
cell: (row) => (
<Tag
title={row.is_active ? "Yes" : "No"}
color={row.is_active ? "green" : "gray"}
/>
),
}),
tc.actions({
cell: (row) => (
<div className="flex flex-row gap-1">
<Popover>
<Popover.Trigger asChild>
<Button
icon={SvgMoreHorizontal}
prominence="tertiary"
tooltip="More"
/>
</Popover.Trigger>
<Popover.Content side="bottom" align="end" width="md">
<PopoverMenu>
<LineItem
icon={SvgEdit}
onClick={() => {
setEditingRule(row);
setShowRuleEditor(true);
}}
>
Edit Rule
</LineItem>
<LineItem onClick={() => handleToggleRuleActive(row)}>
{row.is_active ? "Deactivate" : "Activate"}
</LineItem>
<LineItem
icon={SvgTrash}
danger
onClick={() => setDeleteTarget(row)}
>
Delete Rule
</LineItem>
</PopoverMenu>
</Popover.Content>
</Popover>
</div>
),
}),
],
[] // eslint-disable-line react-hooks/exhaustive-deps
);
if (isLoading) {
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
icon={SvgCheckSquare}
title="Loading..."
backButton
separator
/>
<SettingsLayouts.Body>
<SimpleLoader />
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
if (error || !ruleset) {
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
icon={SvgCheckSquare}
title="Ruleset"
backButton
separator
/>
<SettingsLayouts.Body>
<IllustrationContent
illustration={SvgNoResult}
title="Failed to load ruleset."
description={
error?.info?.message ||
error?.info?.detail ||
"Ruleset not found."
}
/>
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
return (
<SettingsLayouts.Root width="lg">
<SettingsLayouts.Header
icon={SvgCheckSquare}
title={ruleset.name}
description={ruleset.description || undefined}
backButton
rightChildren={
<div className="flex items-center gap-2">
<Button
prominence="secondary"
icon={SvgUploadCloud}
onClick={() => setShowImportFlow(true)}
>
Import
</Button>
<Button
icon={SvgPlus}
onClick={() => {
setEditingRule(null);
setShowRuleEditor(true);
}}
>
Add Rule
</Button>
</div>
}
separator
/>
<SettingsLayouts.Body>
{/* Ruleset toggles — only show when rules exist */}
{ruleset.rules.length > 0 && (
<div className="flex items-center gap-4 pb-2">
<Button
prominence={ruleset.is_active ? "primary" : "secondary"}
size="sm"
onClick={handleToggleActive}
>
{ruleset.is_active ? "Active" : "Inactive"}
</Button>
<Button
prominence={ruleset.is_default ? "primary" : "secondary"}
size="sm"
onClick={handleToggleDefault}
>
{ruleset.is_default ? "Default Ruleset" : "Not Default"}
</Button>
</div>
)}
{/* Batch action bar */}
{selectedRuleIds.size > 0 && (
<div className="flex items-center gap-3 p-3 bg-background-neutral-02 rounded-08">
<Text font="main-ui-action" color="text-03">
{`${selectedRuleIds.size} selected`}
</Text>
<Button
prominence="secondary"
size="sm"
onClick={() => handleBulkAction("activate")}
disabled={batchSaving}
>
Activate
</Button>
<Button
prominence="secondary"
size="sm"
onClick={() => handleBulkAction("deactivate")}
disabled={batchSaving}
>
Deactivate
</Button>
<Button
variant="danger"
prominence="secondary"
size="sm"
onClick={() => handleBulkAction("delete")}
disabled={batchSaving}
>
Delete
</Button>
</div>
)}
{/* Rules table grouped by category */}
{ruleset.rules.length === 0 ? (
<IllustrationContent
illustration={SvgNoResult}
title="No rules yet"
description="Add rules manually or import from a checklist."
/>
) : (
Object.entries(groupedRules).map(([category, rules]) => (
<div key={category} className="flex flex-col gap-2">
<Text font="main-ui-action" color="text-03">
{category}
</Text>
<Table
data={rules}
getRowId={(row) => row.id}
columns={ruleColumns}
/>
</div>
))
)}
</SettingsLayouts.Body>
{/* Rule Editor Modal */}
<RuleEditor
open={showRuleEditor}
onClose={() => {
setShowRuleEditor(false);
setEditingRule(null);
}}
onSave={handleSaveRule}
existingRule={editingRule}
/>
{/* Import Flow Modal */}
<ImportFlow
open={showImportFlow}
onClose={() => setShowImportFlow(false)}
rulesetId={rulesetId}
onImportComplete={() => mutate(apiUrl)}
/>
{/* Delete Rule Confirmation */}
{deleteTarget && (
<ConfirmationModalLayout
icon={SvgTrash}
title="Delete Rule"
onClose={() => setDeleteTarget(null)}
submit={
<Button
variant="danger"
onClick={async () => {
const target = deleteTarget;
setDeleteTarget(null);
await handleDeleteRule(target);
}}
>
Delete
</Button>
}
>
<Text as="p" color="text-03">
{markdown(
`Are you sure you want to delete *${deleteTarget.name}*? This action cannot be undone.`
)}
</Text>
</ConfirmationModalLayout>
)}
</SettingsLayouts.Root>
);
}
export default function Page() {
return <RulesetDetailPage />;
}

View File

@@ -1,75 +0,0 @@
"use client";
import { useRouter } from "next/navigation";
import useSWR, { mutate } from "swr";
import { IllustrationContent } from "@opal/layouts";
import SvgNoResult from "@opal/illustrations/no-result";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import * as SettingsLayouts from "@/layouts/settings-layouts";
import { errorHandlingFetcher } from "@/lib/fetcher";
import { SvgSettings } from "@opal/icons";
import SettingsForm from "@/app/admin/proposal-review/components/SettingsForm";
import type {
ConfigResponse,
ConfigUpdate,
} from "@/app/admin/proposal-review/interfaces";
const API_URL = "/api/proposal-review/config";
function ProposalReviewSettingsPage() {
const router = useRouter();
const {
data: config,
isLoading,
error,
} = useSWR<ConfigResponse>(API_URL, errorHandlingFetcher);
async function handleSave(update: ConfigUpdate) {
const res = await fetch(API_URL, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(update),
});
if (!res.ok) {
const err = await res.json();
throw new Error(err.detail || "Failed to save settings");
}
await mutate(API_URL);
}
return (
<SettingsLayouts.Root>
<SettingsLayouts.Header
icon={SvgSettings}
title="Jira Integration"
description="Configure which Jira connector to use and how fields are mapped."
separator
backButton
onBack={() => router.push("/admin/proposal-review")}
/>
<SettingsLayouts.Body>
{isLoading && <SimpleLoader />}
{error && (
<IllustrationContent
illustration={SvgNoResult}
title="Error loading settings"
description={
error?.info?.message || error?.info?.detail || "An error occurred"
}
/>
)}
{config && (
<SettingsForm
config={config}
onSave={handleSave}
onCancel={() => router.push("/admin/proposal-review")}
/>
)}
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
export default function Page() {
return <ProposalReviewSettingsPage />;
}

View File

@@ -1,258 +0,0 @@
"use client";
import { useEffect, useCallback, useState, useRef } from "react";
import { Button, Text } from "@opal/components";
import { SvgPlayCircle, SvgChevronDown, SvgChevronUp } from "@opal/icons";
import { IllustrationContent } from "@opal/layouts";
import SvgEmpty from "@opal/illustrations/empty";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import {
Collapsible,
CollapsibleTrigger,
CollapsibleContent,
} from "@/refresh-components/Collapsible";
import RulesetSelector from "@/app/proposal-review/components/RulesetSelector";
import ReviewProgress from "@/app/proposal-review/components/ReviewProgress";
import FindingCard from "@/app/proposal-review/components/FindingCard";
import { useFindings } from "@/app/proposal-review/hooks/useFindings";
import { useReviewStatus } from "@/app/proposal-review/hooks/useReviewStatus";
import { useProposalReviewContext } from "@/app/proposal-review/contexts/ProposalReviewContext";
import { triggerReview } from "@/app/proposal-review/services/apiServices";
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface ChecklistPanelProps {
proposalId: string;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function ChecklistPanel({ proposalId }: ChecklistPanelProps) {
const {
selectedRulesetId,
isReviewRunning,
setIsReviewRunning,
setCurrentReviewRunId,
findingsLoaded,
setFindingsLoaded,
resetReviewState,
} = useProposalReviewContext();
const [triggerError, setTriggerError] = useState<string | null>(null);
// Reset review state when navigating to a different proposal
const prevProposalIdRef = useRef(proposalId);
useEffect(() => {
if (prevProposalIdRef.current !== proposalId) {
prevProposalIdRef.current = proposalId;
resetReviewState();
setTriggerError(null);
}
}, [proposalId, resetReviewState]);
// Poll review status while running
const { reviewStatus } = useReviewStatus(proposalId, isReviewRunning);
// Fetch findings
const {
findingsByCategory,
isLoading: findingsLoading,
mutate: mutateFindings,
findings,
} = useFindings(proposalId);
// When review completes, stop polling and load findings
useEffect(() => {
if (!reviewStatus) return;
if (
reviewStatus.status === "COMPLETED" ||
reviewStatus.status === "FAILED"
) {
setIsReviewRunning(false);
if (reviewStatus.status === "COMPLETED") {
setFindingsLoaded(true);
mutateFindings();
}
}
}, [reviewStatus, setIsReviewRunning, setFindingsLoaded, mutateFindings]);
// On mount, if there are existing findings, mark as loaded
useEffect(() => {
if (findings.length > 0 && !findingsLoaded) {
setFindingsLoaded(true);
}
}, [findings.length, findingsLoaded, setFindingsLoaded]);
const handleRunReview = useCallback(async () => {
if (!selectedRulesetId) return;
setTriggerError(null);
setIsReviewRunning(true);
try {
const result = await triggerReview(proposalId, selectedRulesetId);
setCurrentReviewRunId(result.id);
} catch (err) {
setIsReviewRunning(false);
setTriggerError(
err instanceof Error ? err.message : "Failed to start review"
);
}
}, [
proposalId,
selectedRulesetId,
setIsReviewRunning,
setCurrentReviewRunId,
]);
return (
<div className="flex flex-col h-full overflow-hidden">
{/* Top bar: ruleset selector + run button */}
<div className="flex items-center gap-3 p-4 border-b border-border-01 shrink-0">
<div className="flex-1 max-w-[200px]">
<RulesetSelector />
</div>
<Button
variant="action"
prominence="primary"
icon={SvgPlayCircle}
disabled={!selectedRulesetId || isReviewRunning}
onClick={handleRunReview}
>
{isReviewRunning ? "Running..." : "Run Review"}
</Button>
</div>
{triggerError && (
<div className="px-4 pt-2">
<Text font="secondary-body" color="text-03">
{triggerError}
</Text>
</div>
)}
{/* Review progress */}
{isReviewRunning && reviewStatus && (
<ReviewProgress reviewStatus={reviewStatus} />
)}
{/* Loading spinner while review is starting */}
{isReviewRunning && !reviewStatus && (
<div className="flex items-center justify-center py-8">
<SimpleLoader className="h-6 w-6" />
</div>
)}
{/* Findings list */}
<div className="flex-1 overflow-y-auto">
{!isReviewRunning && findingsLoading && (
<div className="flex items-center justify-center py-8">
<SimpleLoader className="h-6 w-6" />
</div>
)}
{!isReviewRunning && !findingsLoading && findings.length === 0 && (
<div className="flex items-center justify-center py-12 px-4">
<IllustrationContent
illustration={SvgEmpty}
title="No review results"
description="Select a ruleset and click Run Review to evaluate this proposal."
/>
</div>
)}
{!isReviewRunning && findingsByCategory.length > 0 && (
<div className="flex flex-col gap-3 p-4">
{findingsByCategory.map((group) => (
<CategoryGroup
key={group.category}
category={group.category}
findings={group.findings}
onDecisionSaved={() => mutateFindings()}
/>
))}
</div>
)}
</div>
</div>
);
}
// ---------------------------------------------------------------------------
// CategoryGroup: collapsible group of findings
// ---------------------------------------------------------------------------
interface CategoryGroupProps {
category: string;
findings: import("@/app/proposal-review/types").Finding[];
onDecisionSaved: () => void;
}
function CategoryGroup({
category,
findings,
onDecisionSaved,
}: CategoryGroupProps) {
const failCount = findings.filter(
(f) => f.verdict === "FAIL" || f.verdict === "FLAG"
).length;
const passCount = findings.filter((f) => f.verdict === "PASS").length;
const decidedCount = findings.filter((f) => f.decision !== null).length;
// Default open if there are failures/flags
const [isOpen, setIsOpen] = useState(failCount > 0);
return (
<Collapsible open={isOpen} onOpenChange={setIsOpen}>
<CollapsibleTrigger asChild>
<div
role="button"
tabIndex={0}
className="flex items-center justify-between w-full py-2 px-3 rounded-08 hover:bg-background-neutral-02 cursor-pointer"
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.preventDefault();
setIsOpen((prev) => !prev);
}
}}
>
<div className="flex items-center gap-2">
{isOpen ? (
<SvgChevronUp className="h-4 w-4 text-text-03" />
) : (
<SvgChevronDown className="h-4 w-4 text-text-03" />
)}
<Text font="main-ui-action" color="text-04">
{category}
</Text>
</div>
<div className="flex items-center gap-3">
{failCount > 0 && (
<Text font="secondary-body" color="text-03">
{`${failCount} issue${failCount !== 1 ? "s" : ""}`}
</Text>
)}
<Text font="secondary-body" color="text-03">
{`${decidedCount}/${findings.length} reviewed`}
</Text>
</div>
</div>
</CollapsibleTrigger>
<CollapsibleContent>
<div className="flex flex-col gap-2 pt-2 pl-6">
{findings.map((finding) => (
<FindingCard
key={finding.id}
finding={finding}
onDecisionSaved={onDecisionSaved}
/>
))}
</div>
</CollapsibleContent>
</Collapsible>
);
}

View File

@@ -1,204 +0,0 @@
"use client";
import { useState, useCallback } from "react";
import { Button, Text, Card } from "@opal/components";
import {
SvgCheckCircle,
SvgAlertTriangle,
SvgXCircle,
SvgRefreshCw,
} from "@opal/icons";
import { Section } from "@/layouts/general-layouts";
import InputTextArea from "@/refresh-components/inputs/InputTextArea";
import { toast } from "@/hooks/useToast";
import {
submitProposalDecision,
syncToJira,
} from "@/app/proposal-review/services/apiServices";
import type {
ProposalDecisionOutcome,
Finding,
} from "@/app/proposal-review/types";
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface DecisionPanelProps {
proposalId: string;
findings: Finding[];
onDecisionSubmitted: () => void;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function DecisionPanel({
proposalId,
findings,
onDecisionSubmitted,
}: DecisionPanelProps) {
const [selectedDecision, setSelectedDecision] =
useState<ProposalDecisionOutcome | null>(null);
const [notes, setNotes] = useState("");
const [isSubmitting, setIsSubmitting] = useState(false);
const [isSyncing, setIsSyncing] = useState(false);
const [submitError, setSubmitError] = useState<string | null>(null);
const [decisionSaved, setDecisionSaved] = useState(false);
// Check for unresolved hard stops
const unresolvedHardStops = findings.filter(
(f) =>
f.rule_is_hard_stop &&
(f.verdict === "FAIL" || f.verdict === "FLAG") &&
(!f.decision || f.decision.action === "ISSUE")
);
const hasUnresolvedHardStops = unresolvedHardStops.length > 0;
const handleSubmit = useCallback(async () => {
if (!selectedDecision) return;
setIsSubmitting(true);
setSubmitError(null);
try {
await submitProposalDecision(
proposalId,
selectedDecision,
notes || undefined
);
setDecisionSaved(true);
onDecisionSubmitted();
} catch (err) {
setSubmitError(
err instanceof Error ? err.message : "Failed to submit decision"
);
} finally {
setIsSubmitting(false);
}
}, [proposalId, selectedDecision, notes, onDecisionSubmitted]);
const handleSync = useCallback(async () => {
setIsSyncing(true);
try {
await syncToJira(proposalId);
} catch {
toast.error("Failed to sync to Jira");
} finally {
setIsSyncing(false);
}
}, [proposalId]);
return (
<Card padding="md" border="solid" background="light">
<Section
gap={0.75}
height="auto"
justifyContent="start"
alignItems="start"
>
<Text font="main-ui-action" color="text-04">
Final Decision
</Text>
{/* Decision buttons */}
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Button
variant={selectedDecision === "APPROVED" ? "action" : "default"}
prominence={
selectedDecision === "APPROVED" ? "primary" : "secondary"
}
icon={SvgCheckCircle}
disabled={hasUnresolvedHardStops || isSubmitting}
onClick={() => setSelectedDecision("APPROVED")}
>
Approve
</Button>
{hasUnresolvedHardStops && (
<Text font="secondary-body" color="text-03">
{`Cannot approve: ${
unresolvedHardStops.length
} unresolved hard stop${
unresolvedHardStops.length !== 1 ? "s" : ""
}`}
</Text>
)}
<Button
variant={
selectedDecision === "CHANGES_REQUESTED" ? "action" : "default"
}
prominence={
selectedDecision === "CHANGES_REQUESTED" ? "primary" : "secondary"
}
icon={SvgAlertTriangle}
disabled={isSubmitting}
onClick={() => setSelectedDecision("CHANGES_REQUESTED")}
>
Request Changes
</Button>
<Button
variant={selectedDecision === "REJECTED" ? "danger" : "default"}
prominence={
selectedDecision === "REJECTED" ? "primary" : "secondary"
}
icon={SvgXCircle}
disabled={isSubmitting}
onClick={() => setSelectedDecision("REJECTED")}
>
Reject
</Button>
</Section>
{/* Notes */}
<InputTextArea
placeholder="Decision notes (optional)"
value={notes}
onChange={(e) => setNotes(e.target.value)}
rows={3}
/>
{/* Submit + Sync */}
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Button
variant="action"
prominence="primary"
disabled={!selectedDecision || isSubmitting}
onClick={handleSubmit}
>
{isSubmitting ? "Submitting..." : "Submit Decision"}
</Button>
<Button
variant="default"
prominence="secondary"
icon={SvgRefreshCw}
disabled={!decisionSaved || isSyncing}
onClick={handleSync}
>
{isSyncing ? "Syncing..." : "Sync to Jira"}
</Button>
</Section>
{submitError && (
<Text font="secondary-body" color="text-03">
{submitError}
</Text>
)}
</Section>
</Card>
);
}

View File

@@ -1,99 +0,0 @@
"use client";
import { useState, useRef } from "react";
import { Button, Text } from "@opal/components";
import { SvgUploadCloud } from "@opal/icons";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { uploadDocument } from "@/app/proposal-review/services/apiServices";
import type { DocumentRole } from "@/app/proposal-review/types";
interface DocumentUploadProps {
proposalId: string;
onUploadComplete: () => void;
}
const DOCUMENT_ROLES: { value: DocumentRole; label: string }[] = [
{ value: "PROPOSAL", label: "Proposal" },
{ value: "BUDGET", label: "Budget" },
{ value: "FOA", label: "FOA" },
{ value: "INTERNAL", label: "Internal" },
{ value: "SOW", label: "Scope of Work" },
{ value: "OTHER", label: "Other" },
];
export default function DocumentUpload({
proposalId,
onUploadComplete,
}: DocumentUploadProps) {
const fileInputRef = useRef<HTMLInputElement>(null);
const [selectedRole, setSelectedRole] = useState<DocumentRole>("OTHER");
const [isUploading, setIsUploading] = useState(false);
const [uploadError, setUploadError] = useState<string | null>(null);
async function handleFileSelect(e: React.ChangeEvent<HTMLInputElement>) {
const file = e.target.files?.[0];
if (!file) return;
setIsUploading(true);
setUploadError(null);
try {
await uploadDocument(proposalId, file, selectedRole);
onUploadComplete();
} catch (err) {
setUploadError(err instanceof Error ? err.message : "Upload failed");
} finally {
setIsUploading(false);
// Reset the file input so the same file can be re-selected
if (fileInputRef.current) {
fileInputRef.current.value = "";
}
}
}
return (
<div className="flex flex-col gap-2">
<div className="flex items-center gap-2">
<div className="flex-1">
<InputSelect
value={selectedRole}
onValueChange={(v) => setSelectedRole(v as DocumentRole)}
>
<InputSelect.Trigger placeholder="Document role" />
<InputSelect.Content>
{DOCUMENT_ROLES.map((role) => (
<InputSelect.Item key={role.value} value={role.value}>
{role.label}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
</div>
<Button
variant="default"
prominence="secondary"
icon={SvgUploadCloud}
disabled={isUploading}
onClick={() => fileInputRef.current?.click()}
>
{isUploading ? "Uploading..." : "Upload"}
</Button>
</div>
<input
ref={fileInputRef}
type="file"
className="hidden"
accept=".pdf,.docx,.xlsx,.html,.txt"
onChange={handleFileSelect}
/>
{uploadError && (
<Text font="secondary-body" color="text-03">
{uploadError}
</Text>
)}
</div>
);
}

View File

@@ -1,272 +0,0 @@
"use client";
import { useState, useCallback } from "react";
import { Button, Tag, Text, Card } from "@opal/components";
import {
SvgCheckCircle,
SvgAlertTriangle,
SvgAlertCircle,
SvgShield,
} from "@opal/icons";
import { cn } from "@/lib/utils";
import { Section } from "@/layouts/general-layouts";
import InputTextArea from "@/refresh-components/inputs/InputTextArea";
import { toast } from "@/hooks/useToast";
import { submitFindingDecision } from "@/app/proposal-review/services/apiServices";
import type {
Finding,
FindingVerdict,
DecisionAction,
} from "@/app/proposal-review/types";
import type { TagColor } from "@opal/components";
// ---------------------------------------------------------------------------
// Verdict → Tag mapping
// ---------------------------------------------------------------------------
const VERDICT_CONFIG: Record<
FindingVerdict,
{ color: TagColor; label: string }
> = {
PASS: { color: "green", label: "Pass" },
FAIL: { color: "amber", label: "Fail" },
FLAG: { color: "amber", label: "Flag" },
NEEDS_REVIEW: { color: "blue", label: "Needs Review" },
NOT_APPLICABLE: { color: "gray", label: "N/A" },
};
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface FindingCardProps {
finding: Finding;
onDecisionSaved: () => void;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function FindingCard({
finding,
onDecisionSaved,
}: FindingCardProps) {
const {
rule_name,
rule_is_hard_stop,
verdict,
explanation,
evidence,
suggested_action,
decision,
} = finding;
const isActionable = verdict === "FAIL" || verdict === "FLAG";
const isNeedsReview = verdict === "NEEDS_REVIEW";
const isPass = verdict === "PASS" || verdict === "NOT_APPLICABLE";
// Default expansion: FAIL/FLAG/NEEDS_REVIEW expanded, PASS collapsed
const [isExpanded, setIsExpanded] = useState(!isPass);
const [notes, setNotes] = useState(decision?.notes ?? "");
const [currentAction, setCurrentAction] = useState<DecisionAction | null>(
decision?.action ?? null
);
const [isSaving, setIsSaving] = useState(false);
const verdictConfig = VERDICT_CONFIG[verdict];
const handleDecision = useCallback(
async (action: DecisionAction) => {
setIsSaving(true);
try {
await submitFindingDecision(finding.id, action, notes || undefined);
setCurrentAction(action);
onDecisionSaved();
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to save finding decision"
);
} finally {
setIsSaving(false);
}
},
[finding.id, notes, onDecisionSaved]
);
const handleNotesBlur = useCallback(async () => {
if (currentAction && notes !== (decision?.notes ?? "")) {
setIsSaving(true);
try {
await submitFindingDecision(
finding.id,
currentAction,
notes || undefined
);
onDecisionSaved();
} catch (err) {
toast.error(
err instanceof Error ? err.message : "Failed to save notes"
);
} finally {
setIsSaving(false);
}
}
}, [currentAction, notes, decision?.notes, finding.id, onDecisionSaved]);
return (
<Card
padding="md"
border="solid"
background={rule_is_hard_stop && isActionable ? "heavy" : "light"}
>
<Section
gap={0.75}
height="auto"
justifyContent="start"
alignItems="start"
className={cn(
rule_is_hard_stop &&
isActionable &&
"border-l-2 border-status-error-03 pl-3"
)}
>
{/* Header row: verdict tag + rule name */}
<div
role="button"
tabIndex={0}
className="flex items-center gap-2 text-left w-full cursor-pointer"
onClick={() => setIsExpanded((prev) => !prev)}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.preventDefault();
setIsExpanded((prev) => !prev);
}
}}
>
<Tag title={verdictConfig.label} color={verdictConfig.color} />
<Text font="main-ui-action" color="text-04" as="span">
{rule_name ?? "Unnamed Rule"}
</Text>
{rule_is_hard_stop && isActionable && (
<div className="flex items-center gap-1 pl-2">
<SvgShield className="h-4 w-4 text-status-error-03" />
<Text font="secondary-body" color="text-03">
Hard Stop
</Text>
</div>
)}
</div>
{/* Expanded content */}
{isExpanded && (
<Section
gap={0.75}
height="auto"
justifyContent="start"
alignItems="start"
className="pl-2"
>
{/* Explanation */}
{explanation && (
<Text font="main-ui-body" color="text-03" as="p">
{explanation}
</Text>
)}
{/* Evidence */}
{evidence && (
<Card padding="sm" rounding="sm" background="heavy">
<Text font="secondary-body" color="text-03" as="p">
Evidence:
</Text>
<Text font="main-ui-body" color="text-03" as="p">
{`\u201C${evidence}\u201D`}
</Text>
</Card>
)}
{/* Suggested action */}
{suggested_action && (
<div className="flex items-start gap-2">
<SvgAlertCircle className="h-4 w-4 text-status-warning-03 shrink-0 mt-0.5" />
<Text font="secondary-body" color="text-03" as="p">
{suggested_action}
</Text>
</div>
)}
{/* Action buttons + notes */}
{(isActionable || isNeedsReview) && (
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
className="pt-2 border-t border-border-01"
>
<Section
flexDirection="row"
gap={0.5}
height="auto"
justifyContent="start"
alignItems="center"
>
<Button
variant={
currentAction === "VERIFIED" ? "action" : "default"
}
prominence={
currentAction === "VERIFIED" ? "primary" : "secondary"
}
size="sm"
icon={SvgCheckCircle}
disabled={isSaving}
onClick={() => handleDecision("VERIFIED")}
>
Verify
</Button>
<Button
variant={currentAction === "ISSUE" ? "danger" : "default"}
prominence={
currentAction === "ISSUE" ? "primary" : "secondary"
}
size="sm"
icon={SvgAlertTriangle}
disabled={isSaving}
onClick={() => handleDecision("ISSUE")}
>
Issue
</Button>
<Button
variant={
currentAction === "NOT_APPLICABLE" ? "action" : "default"
}
prominence={
currentAction === "NOT_APPLICABLE"
? "primary"
: "secondary"
}
size="sm"
disabled={isSaving}
onClick={() => handleDecision("NOT_APPLICABLE")}
>
N/A
</Button>
</Section>
<InputTextArea
placeholder="Notes (optional)"
value={notes}
onChange={(e) => setNotes(e.target.value)}
onBlur={handleNotesBlur}
rows={2}
/>
</Section>
)}
</Section>
)}
</Section>
</Card>
);
}

View File

@@ -1,218 +0,0 @@
"use client";
import { useState } from "react";
import useSWR from "swr";
import { Text, Card, Tag } from "@opal/components";
import { Button } from "@opal/components/buttons/button/components";
import { SvgExternalLink, SvgFileText, SvgX } from "@opal/icons";
import { errorHandlingFetcher } from "@/lib/fetcher";
import { IllustrationContent } from "@opal/layouts";
import SvgEmpty from "@opal/illustrations/empty";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import DocumentUpload from "@/app/proposal-review/components/DocumentUpload";
import type {
Proposal,
ProposalDocument,
ProposalStatus,
} from "@/app/proposal-review/types";
import type { TagColor } from "@opal/components";
// ---------------------------------------------------------------------------
// Status → Tag
// ---------------------------------------------------------------------------
const STATUS_TAG: Record<ProposalStatus, { color: TagColor; label: string }> = {
PENDING: { color: "gray", label: "Pending" },
IN_REVIEW: { color: "blue", label: "In Review" },
APPROVED: { color: "green", label: "Approved" },
CHANGES_REQUESTED: { color: "amber", label: "Changes Requested" },
REJECTED: { color: "amber", label: "Rejected" },
};
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
interface MetadataRowProps {
label: string;
value: string | string[] | undefined;
}
function MetadataRow({ label, value }: MetadataRowProps) {
if (!value) return null;
const display = Array.isArray(value) ? value.join(", ") : value;
return (
<div className="flex justify-between gap-2 py-1">
<Text font="secondary-body" color="text-03" nowrap>
{label}
</Text>
<Text font="main-ui-body" color="text-04" as="span">
{display}
</Text>
</div>
);
}
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface ProposalInfoPanelProps {
proposal: Proposal;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function ProposalInfoPanel({
proposal,
}: ProposalInfoPanelProps) {
const { metadata, status, id: proposalId } = proposal;
const statusConfig = STATUS_TAG[status];
const [selectedDoc, setSelectedDoc] = useState<ProposalDocument | null>(null);
// Fetch documents
const {
data: documents,
isLoading: docsLoading,
mutate: mutateDocs,
} = useSWR<ProposalDocument[]>(
`/api/proposal-review/proposals/${proposalId}/documents`,
errorHandlingFetcher
);
return (
<div className="flex flex-col gap-4 h-full overflow-y-auto p-4">
{/* Proposal metadata card */}
<Card padding="md" border="solid" background="light">
<div className="flex flex-col gap-3">
<div className="flex items-center justify-between">
<Text font="main-ui-action" color="text-04">
Proposal Details
</Text>
<Tag title={statusConfig.label} color={statusConfig.color} />
</div>
<div className="flex flex-col">
{/* Jira key — link out if URL available */}
{metadata.jira_key && (
<div className="flex justify-between gap-2 py-1">
<Text font="secondary-body" color="text-03" nowrap>
Jira Key
</Text>
{metadata.link ? (
<a
href={metadata.link}
target="_blank"
rel="noopener noreferrer"
className="flex items-center gap-1 text-action-link-01 hover:text-action-link-02"
>
<Text font="main-ui-body" color="inherit" as="span">
{metadata.jira_key}
</Text>
<SvgExternalLink className="h-3 w-3" />
</a>
) : (
<Text font="main-ui-body" color="text-04" as="span">
{metadata.jira_key}
</Text>
)}
</div>
)}
{Object.entries(metadata)
.filter(
([key]) =>
key !== "title" && key !== "link" && key !== "jira_key"
)
.map(([key, value]) => (
<MetadataRow key={key} label={key} value={value} />
))}
</div>
</div>
</Card>
{/* Documents section */}
<Card padding="md" border="solid" background="light">
<div className="flex flex-col gap-3">
<Text font="main-ui-action" color="text-04">
Documents
</Text>
{docsLoading && (
<div className="flex items-center justify-center py-4">
<SimpleLoader />
</div>
)}
{!docsLoading && (!documents || documents.length === 0) && (
<IllustrationContent
illustration={SvgEmpty}
title="No documents"
description="Upload a document to get started."
/>
)}
{documents && documents.length > 0 && (
<div className="flex flex-col gap-1">
{documents.map((doc) => (
<div
key={doc.id}
className="flex items-center gap-2 py-2 px-2 rounded-08 hover:bg-background-neutral-02 cursor-pointer"
onClick={() =>
setSelectedDoc(selectedDoc?.id === doc.id ? null : doc)
}
>
<SvgFileText className="h-4 w-4 text-text-03 shrink-0" />
<div className="flex-1 min-w-0">
<Text font="main-ui-body" color="text-04" nowrap>
{doc.file_name}
</Text>
</div>
<Tag title={doc.document_role} color="gray" size="sm" />
</div>
))}
</div>
)}
{/* Document text viewer */}
{selectedDoc && (
<Card padding="md" border="dashed" background="light">
<div className="flex flex-col gap-2">
<div className="flex items-center justify-between">
<Text font="secondary-action" color="text-03">
{selectedDoc.file_name}
</Text>
<Button
variant="default"
prominence="tertiary"
size="xs"
icon={SvgX}
onClick={() => setSelectedDoc(null)}
/>
</div>
<div className="max-h-[300px] overflow-y-auto rounded-08 bg-background-neutral-01 p-3">
{selectedDoc.extracted_text ? (
<Text font="secondary-mono" color="text-03" as="p">
{selectedDoc.extracted_text}
</Text>
) : (
<Text font="secondary-body" color="text-03" as="p">
No extracted text available for this document.
</Text>
)}
</div>
</div>
</Card>
)}
<DocumentUpload
proposalId={proposalId}
onUploadComplete={() => mutateDocs()}
/>
</div>
</Card>
</div>
);
}

View File

@@ -1,382 +0,0 @@
"use client";
import { useState, useMemo, useCallback } from "react";
import { useRouter } from "next/navigation";
import { Text, Tag, Table } from "@opal/components";
import { createTableColumns } from "@opal/components/table/columns";
import { IllustrationContent } from "@opal/layouts";
import SvgNoResult from "@opal/illustrations/no-result";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import InputSearch from "@/refresh-components/inputs/InputSearch";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { Button } from "@opal/components/buttons/button/components";
import { useProposals } from "@/app/proposal-review/hooks/useProposals";
import type { Proposal, ProposalStatus } from "@/app/proposal-review/types";
import type { TagColor } from "@opal/components";
// ---------------------------------------------------------------------------
// Status configuration
// ---------------------------------------------------------------------------
const STATUS_TAG: Record<ProposalStatus, { color: TagColor; label: string }> = {
PENDING: { color: "gray", label: "Pending" },
IN_REVIEW: { color: "blue", label: "In Review" },
APPROVED: { color: "green", label: "Approved" },
CHANGES_REQUESTED: { color: "amber", label: "Changes Requested" },
REJECTED: { color: "amber", label: "Rejected" },
};
const STATUS_OPTIONS: { value: string; label: string }[] = [
{ value: "ALL", label: "All statuses" },
{ value: "PENDING", label: "Pending" },
{ value: "IN_REVIEW", label: "In Review" },
{ value: "APPROVED", label: "Approved" },
{ value: "CHANGES_REQUESTED", label: "Changes Requested" },
{ value: "REJECTED", label: "Rejected" },
];
// Keys that are used for fixed columns or are internal — not shown as dynamic columns
const RESERVED_KEYS = new Set([
"jira_key",
"title",
"link",
"key",
"status",
"project",
"project_name",
"issuetype",
"priority",
"created",
"updated",
"reporter",
"reporter_email",
"Rank",
"resolution",
"resolution_date",
"[CHART] Time in Status",
]);
// Jira statuses that mean "finished" — excluded by the default "Open" filter
const DONE_STATUSES = new Set(["Done", "Closed", "Resolved"]);
// Keys to show by default when no prior column visibility state exists
const DEFAULT_VISIBLE_KEYS = new Set([
"PI Name",
"Sponsor",
"Sponsor Deadline",
"Review Team",
]);
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function isDateLike(value: string): boolean {
return /^\d{4}-\d{2}-\d{2}/.test(value);
}
function formatCellValue(value: string | string[] | undefined): string {
if (value === undefined || value === null) return "--";
if (Array.isArray(value)) return value.join(", ");
return String(value);
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
const tc = createTableColumns<Proposal>();
export default function ProposalQueue() {
const router = useRouter();
const { proposals, isLoading, error, configMissing } = useProposals();
const [reviewFilter, setReviewFilter] = useState("ALL");
const [jiraStatusFilter, setJiraStatusFilter] = useState("OPEN");
const [searchQuery, setSearchQuery] = useState("");
// Discover unique Jira ticket statuses from the data
const jiraStatuses = useMemo(() => {
const statuses = new Set<string>();
for (const p of proposals) {
const s = p.metadata.status;
if (typeof s === "string" && s) statuses.add(s);
}
return Array.from(statuses).sort();
}, [proposals]);
// Discover all unique metadata keys across proposals (excluding reserved ones)
const dynamicKeys = useMemo(() => {
const keys = new Set<string>();
for (const p of proposals) {
for (const k of Object.keys(p.metadata)) {
if (!RESERVED_KEYS.has(k)) {
keys.add(k);
}
}
}
return Array.from(keys).sort();
}, [proposals]);
// Build columns: fixed (Jira Key, Title) + dynamic + fixed (Status) + actions
const columns = useMemo(() => {
const cols = [
tc.displayColumn({
id: "jira_key",
header: "Jira Key",
width: { weight: 10, minWidth: 100 },
cell: (row) => (
<Text font="main-ui-body" color="text-04" nowrap>
{row.metadata.jira_key ?? "--"}
</Text>
),
}),
tc.displayColumn({
id: "title",
header: "Title",
width: { weight: 25, minWidth: 150 },
cell: (row) => (
<Text font="main-ui-body" color="text-04">
{row.metadata.title ?? "Untitled"}
</Text>
),
}),
// Dynamic metadata columns
...dynamicKeys.map((key) =>
tc.displayColumn({
id: `meta_${key}`,
header: key,
width: { weight: 12, minWidth: 100 },
cell: (row) => {
const value = row.metadata[key];
// Render dates with locale formatting
if (typeof value === "string" && isDateLike(value)) {
return (
<Text font="main-ui-body" color="text-03" nowrap>
{new Date(value).toLocaleDateString()}
</Text>
);
}
return (
<Text font="main-ui-body" color="text-03" nowrap>
{formatCellValue(value)}
</Text>
);
},
})
),
tc.displayColumn({
id: "review_status",
header: "Review",
width: { weight: 10, minWidth: 120 },
cell: (row) => {
const statusConfig = STATUS_TAG[row.status];
return (
<Tag
title={statusConfig.label}
color={statusConfig.color}
size="sm"
/>
);
},
}),
tc.actions({ showColumnVisibility: true }),
];
return cols;
}, [dynamicKeys]);
// Load saved visibility from localStorage, falling back to defaults
const STORAGE_KEY = "argus-queue-columns";
const initialColumnVisibility = useMemo(() => {
try {
const saved = localStorage.getItem(STORAGE_KEY);
if (saved) return JSON.parse(saved) as Record<string, boolean>;
} catch {
// ignore parse errors
}
// Default: show DEFAULT_VISIBLE_KEYS, hide the rest
const vis: Record<string, boolean> = {};
for (const key of dynamicKeys) {
vis[`meta_${key}`] = DEFAULT_VISIBLE_KEYS.has(key);
}
return vis;
}, [dynamicKeys]);
const handleColumnVisibilityChange = useCallback(
(visibility: Record<string, boolean>) => {
try {
localStorage.setItem(STORAGE_KEY, JSON.stringify(visibility));
} catch {
// localStorage full or unavailable — silently ignore
}
},
[]
);
// Filter proposals
const filteredProposals = useMemo(() => {
let result = proposals;
// Jira ticket status filter
if (jiraStatusFilter === "OPEN") {
result = result.filter((p) => {
const s =
typeof p.metadata.status === "string" ? p.metadata.status : "";
return !DONE_STATUSES.has(s);
});
} else if (jiraStatusFilter !== "ALL") {
result = result.filter((p) => p.metadata.status === jiraStatusFilter);
}
// Review status filter
if (reviewFilter !== "ALL") {
result = result.filter((p) => p.status === reviewFilter);
}
// Search filter
if (searchQuery.trim()) {
const q = searchQuery.toLowerCase();
result = result.filter((p) => {
const m = p.metadata;
return Object.values(m).some((v) => {
if (!v) return false;
const str = Array.isArray(v) ? v.join(" ") : String(v);
return str.toLowerCase().includes(q);
});
});
}
return result;
}, [proposals, jiraStatusFilter, reviewFilter, searchQuery]);
function handleRowClick(proposal: Proposal) {
router.push(`/proposal-review/proposals/${proposal.id}`);
}
if (isLoading) {
return (
<div className="flex items-center justify-center py-16">
<SimpleLoader className="h-8 w-8" />
</div>
);
}
if (error) {
return (
<div className="flex items-center justify-center py-16 px-4">
<IllustrationContent
illustration={SvgNoResult}
title="Failed to load proposals"
description="Please try refreshing the page."
/>
</div>
);
}
return (
<div className="flex flex-col gap-4">
{/* Filters row */}
<div className="flex items-center gap-4 flex-nowrap">
<div className="w-[280px] shrink-0">
<InputSearch
placeholder="Search proposals..."
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
/>
</div>
<div className="flex items-center gap-2 shrink-0">
<Text font="secondary-action" color="text-03">
Ticket:
</Text>
<InputSelect
value={jiraStatusFilter}
onValueChange={setJiraStatusFilter}
>
<InputSelect.Trigger placeholder="Ticket Status" />
<InputSelect.Content>
<InputSelect.Group>
<InputSelect.Item value="ALL">All</InputSelect.Item>
<InputSelect.Item value="OPEN">Open</InputSelect.Item>
</InputSelect.Group>
<InputSelect.Separator />
<InputSelect.Group>
<InputSelect.Label>Jira Statuses</InputSelect.Label>
{jiraStatuses.map((s) => (
<InputSelect.Item key={s} value={s}>
{s}
</InputSelect.Item>
))}
</InputSelect.Group>
</InputSelect.Content>
</InputSelect>
</div>
<div className="flex items-center gap-2 shrink-0">
<Text font="secondary-action" color="text-03">
Review:
</Text>
<InputSelect value={reviewFilter} onValueChange={setReviewFilter}>
<InputSelect.Trigger placeholder="Review Status" />
<InputSelect.Content>
{STATUS_OPTIONS.map((opt) => (
<InputSelect.Item key={opt.value} value={opt.value}>
{opt.label}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
</div>
</div>
{/* Empty state — config missing */}
{filteredProposals.length === 0 && configMissing && (
<div className="flex flex-col items-center justify-center gap-4 py-12">
<IllustrationContent
illustration={SvgNoResult}
title="No proposals yet"
description="Configure a Jira connector in Settings to start seeing proposals."
/>
<Button
href="/admin/proposal-review/settings"
variant="default"
prominence="primary"
>
Go to Settings
</Button>
</div>
)}
{/* Empty state — filtered or no data */}
{filteredProposals.length === 0 && !configMissing && (
<div className="flex items-center justify-center py-12">
<IllustrationContent
illustration={SvgNoResult}
title="No proposals found"
description={
searchQuery ||
reviewFilter !== "ALL" ||
jiraStatusFilter !== "OPEN"
? "Try adjusting your search or filters."
: "Proposals from Jira will appear here once synced."
}
/>
</div>
)}
{/* Table — wrapper adds pointer cursor since onRowClick doesn't set it */}
{filteredProposals.length > 0 && (
<div className="[&_.tbl-row]:cursor-pointer [&_.tbl-row:hover_td]:bg-background-tint-02">
<Table
key={dynamicKeys.join(",")}
data={filteredProposals}
getRowId={(row) => row.id}
columns={columns}
initialColumnVisibility={initialColumnVisibility}
onColumnVisibilityChange={handleColumnVisibilityChange}
onRowClick={(row) => handleRowClick(row)}
/>
</div>
)}
</div>
);
}

View File

@@ -1,108 +0,0 @@
"use client";
import { useCallback } from "react";
import { Text, Button } from "@opal/components";
import { SvgArrowLeft } from "@opal/icons";
import { IllustrationContent } from "@opal/layouts";
import SvgNotFound from "@opal/illustrations/not-found";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import { useProposal } from "@/app/proposal-review/hooks/useProposal";
import { useProposalReviewContext } from "@/app/proposal-review/contexts/ProposalReviewContext";
import ProposalInfoPanel from "@/app/proposal-review/components/ProposalInfoPanel";
import ChecklistPanel from "@/app/proposal-review/components/ChecklistPanel";
import ReviewSidebar from "@/app/proposal-review/components/ReviewSidebar";
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface ProposalReviewProps {
proposalId: string;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function ProposalReview({ proposalId }: ProposalReviewProps) {
const { proposal, isLoading, error, mutate } = useProposal(proposalId);
useProposalReviewContext(); // ensure we're inside the provider
const handleDecisionSubmitted = useCallback(() => {
mutate();
}, [mutate]);
// --- Loading ---
if (isLoading) {
return (
<div className="flex items-center justify-center h-full w-full">
<SimpleLoader className="h-8 w-8" />
</div>
);
}
// --- Error / not found ---
if (error || !proposal) {
return (
<div className="flex flex-col items-center justify-center h-full w-full gap-4 p-8">
<IllustrationContent
illustration={SvgNotFound}
title="Proposal not found"
description="This proposal may have been removed or you may not have access."
/>
<Button
variant="default"
prominence="secondary"
href="/proposal-review"
>
Back to queue
</Button>
</div>
);
}
return (
<div className="flex flex-col h-full w-full">
{/* Top header */}
<div className="flex items-center gap-3 px-4 py-3 border-b border-border-01 shrink-0">
<Button
variant="default"
prominence="tertiary"
icon={SvgArrowLeft}
size="sm"
href="/proposal-review"
/>
<Text font="main-ui-action" color="text-04">
{proposal.metadata.title ?? "Untitled Proposal"}
</Text>
{proposal.metadata.jira_key && (
<Text font="secondary-body" color="text-03">
{proposal.metadata.jira_key}
</Text>
)}
</div>
{/* Three-panel layout */}
<div className="flex flex-1 min-h-0">
{/* Left panel: Proposal info */}
<div className="w-[300px] shrink-0 border-r border-border-01 overflow-y-auto">
<ProposalInfoPanel proposal={proposal} />
</div>
{/* Center panel: Checklist */}
<div className="flex-1 min-w-0 overflow-hidden">
<ChecklistPanel proposalId={proposalId} />
</div>
{/* Right panel: Review sidebar */}
<div className="w-[320px] shrink-0 border-l border-border-01 overflow-y-auto">
<ReviewSidebar
proposalId={proposalId}
onDecisionSubmitted={handleDecisionSubmitted}
/>
</div>
</div>
</div>
);
}

View File

@@ -1,67 +0,0 @@
"use client";
import { memo } from "react";
import { usePathname } from "next/navigation";
import { SvgArrowLeft, SvgCheckSquare, SvgSettings } from "@opal/icons";
import { SidebarTab } from "@opal/components";
import * as SidebarLayouts from "@/layouts/sidebar-layouts";
import { useSidebarState, useSidebarFolded } from "@/layouts/sidebar-layouts";
import AccountPopover from "@/sections/sidebar/AccountPopover";
// ============================================================================
// Sidebar Content
// ============================================================================
const MemoizedSidebarContent = memo(function ProposalReviewSidebarContent() {
const pathname = usePathname();
const folded = useSidebarFolded();
const isProposalsActive =
pathname === "/proposal-review" ||
pathname.startsWith("/proposal-review/proposals");
return (
<>
<SidebarLayouts.Body scrollKey="proposal-review-sidebar">
<div className="flex flex-col gap-0.5">
<SidebarTab
icon={SvgCheckSquare}
folded={folded}
href="/proposal-review"
selected={isProposalsActive}
>
Proposals
</SidebarTab>
</div>
</SidebarLayouts.Body>
<SidebarLayouts.Footer>
<SidebarTab
icon={SvgSettings}
folded={folded}
href="/admin/proposal-review"
selected={pathname.startsWith("/admin/proposal-review")}
>
Settings
</SidebarTab>
<SidebarTab icon={SvgArrowLeft} folded={folded} href="/app">
Back to Onyx
</SidebarTab>
<AccountPopover folded={folded} />
</SidebarLayouts.Footer>
</>
);
});
// ============================================================================
// Sidebar (Main Export)
// ============================================================================
export default function ProposalReviewSidebar() {
const { folded, setFolded } = useSidebarState();
return (
<SidebarLayouts.Root folded={folded} onFoldChange={setFolded} foldable>
<MemoizedSidebarContent />
</SidebarLayouts.Root>
);
}

View File

@@ -1,49 +0,0 @@
"use client";
import { Text } from "@opal/components";
import { cn } from "@/lib/utils";
import type { ReviewRun } from "@/app/proposal-review/types";
interface ReviewProgressProps {
reviewStatus: ReviewRun;
}
export default function ReviewProgress({ reviewStatus }: ReviewProgressProps) {
const { total_rules, completed_rules, status } = reviewStatus;
const pct =
total_rules > 0 ? Math.round((completed_rules / total_rules) * 100) : 0;
const isFailed = status === "FAILED";
return (
<div className="flex flex-col gap-2 p-4">
<div className="flex items-center justify-between">
<Text font="main-ui-action" color="text-04">
{isFailed ? "Review failed" : "Evaluating rules..."}
</Text>
<Text font="secondary-body" color="text-03">
{`${completed_rules} / ${total_rules} rules`}
</Text>
</div>
<div className="h-2 w-full rounded-08 bg-background-neutral-03 overflow-hidden">
<div
className={cn(
"h-full rounded-08 transition-all duration-300",
isFailed
? "bg-status-error-03"
: pct === 100
? "bg-status-success-03"
: "bg-theme-primary-03"
)}
style={{ width: `${pct}%` }}
/>
</div>
{isFailed && (
<Text font="secondary-body" color="text-03">
The review encountered an error. Please try again.
</Text>
)}
</div>
);
}

View File

@@ -1,372 +0,0 @@
"use client";
import { useMemo } from "react";
import useSWR from "swr";
import { Text, Tag, Card } from "@opal/components";
import {
SvgAlertCircle,
SvgCheckCircle,
SvgAlertTriangle,
SvgShield,
} from "@opal/icons";
import { Section } from "@/layouts/general-layouts";
import { errorHandlingFetcher } from "@/lib/fetcher";
import { useFindings } from "@/app/proposal-review/hooks/useFindings";
import DecisionPanel from "@/app/proposal-review/components/DecisionPanel";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import type {
Finding,
FindingsByCategory,
AuditLogEntry,
} from "@/app/proposal-review/types";
// ---------------------------------------------------------------------------
// Props
// ---------------------------------------------------------------------------
interface ReviewSidebarProps {
proposalId: string;
onDecisionSubmitted: () => void;
}
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
export default function ReviewSidebar({
proposalId,
onDecisionSubmitted,
}: ReviewSidebarProps) {
const { findings, findingsByCategory } = useFindings(proposalId);
const { data: auditLog, isLoading: auditLoading } = useSWR<AuditLogEntry[]>(
`/api/proposal-review/proposals/${proposalId}/audit-log`,
errorHandlingFetcher
);
const stats = useMemo(() => {
const failCount = findings.filter((f) => f.verdict === "FAIL").length;
const flagCount = findings.filter((f) => f.verdict === "FLAG").length;
const passCount = findings.filter((f) => f.verdict === "PASS").length;
const naCount = findings.filter(
(f) => f.verdict === "NOT_APPLICABLE"
).length;
const needsReviewCount = findings.filter(
(f) => f.verdict === "NEEDS_REVIEW"
).length;
const hardStops = findings.filter(
(f) =>
f.rule_is_hard_stop && (f.verdict === "FAIL" || f.verdict === "FLAG")
);
const unresolvedFindings = findings.filter(
(f) => (f.verdict === "FAIL" || f.verdict === "FLAG") && !f.decision
);
return {
failCount,
flagCount,
passCount,
naCount,
needsReviewCount,
hardStops,
unresolvedFindings,
total: findings.length,
};
}, [findings]);
if (findings.length === 0) {
return (
<div className="flex items-center justify-center h-full p-4">
<Text font="secondary-body" color="text-03">
Run a review to see results here.
</Text>
</div>
);
}
return (
<div className="flex flex-col gap-4 h-full overflow-y-auto p-4">
{/* Summary counts */}
<Card padding="md" border="solid" background="light">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Text font="main-ui-action" color="text-04">
Summary
</Text>
<div className="grid grid-cols-3 gap-2">
<SummaryCount
icon={SvgAlertCircle}
count={stats.failCount}
label="Failures"
iconClass="text-status-error-03"
/>
<SummaryCount
icon={SvgAlertTriangle}
count={stats.flagCount}
label="Flags"
iconClass="text-status-warning-03"
/>
<SummaryCount
icon={SvgCheckCircle}
count={stats.passCount}
label="Passes"
iconClass="text-status-success-03"
/>
</div>
</Section>
</Card>
{/* Progress by category */}
<Card padding="md" border="solid" background="light">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Text font="main-ui-action" color="text-04">
Progress
</Text>
{findingsByCategory.map((group) => (
<CategoryProgress key={group.category} group={group} />
))}
</Section>
</Card>
{/* Hard stops */}
{stats.hardStops.length > 0 && (
<Card padding="md" border="solid" background="heavy">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Section
flexDirection="row"
gap={0.5}
height="auto"
justifyContent="start"
alignItems="center"
>
<SvgShield className="h-4 w-4 text-status-error-03" />
<Text font="main-ui-action" color="text-04">
{`Hard Stops (${stats.hardStops.length})`}
</Text>
</Section>
{stats.hardStops.map((finding) => (
<div key={finding.id} className="flex items-center gap-2 py-1">
<Text font="secondary-body" color="text-03">
{finding.rule_name ?? "Unnamed Rule"}
</Text>
{finding.decision ? (
<Tag
title={finding.decision.action}
color={
finding.decision.action === "VERIFIED" ? "green" : "amber"
}
size="sm"
/>
) : (
<Tag title="Unresolved" color="amber" size="sm" />
)}
</div>
))}
</Section>
</Card>
)}
{/* Open flags / unresolved items */}
{stats.unresolvedFindings.length > 0 && (
<Card padding="md" border="solid" background="light">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Text font="main-ui-action" color="text-04">
{`Unresolved (${stats.unresolvedFindings.length})`}
</Text>
{stats.unresolvedFindings.map((finding) => (
<div
key={finding.id}
className="flex items-center gap-2 py-1 px-2 rounded-08 hover:bg-background-neutral-02 cursor-pointer"
>
<Tag
title={finding.verdict}
color={finding.verdict === "FAIL" ? "amber" : "blue"}
size="sm"
/>
<Text font="secondary-body" color="text-03" nowrap>
{finding.rule_name ?? "Unnamed Rule"}
</Text>
</div>
))}
</Section>
</Card>
)}
{/* Audit trail */}
<Card padding="md" border="solid" background="light">
<Section
gap={0.5}
height="auto"
justifyContent="start"
alignItems="start"
>
<Text font="main-ui-action" color="text-04">
Audit Trail
</Text>
{auditLoading && (
<div className="flex items-center justify-center py-2">
<SimpleLoader />
</div>
)}
{!auditLoading && (!auditLog || auditLog.length === 0) && (
<Text font="secondary-body" color="text-03">
No activity recorded yet.
</Text>
)}
{auditLog && auditLog.length > 0 && (
<Section
gap={0.25}
height="auto"
justifyContent="start"
alignItems="start"
className="max-h-[200px] overflow-y-auto"
>
{[...auditLog]
.sort(
(a, b) =>
new Date(b.created_at).getTime() -
new Date(a.created_at).getTime()
)
.map((entry) => (
<AuditEntry key={entry.id} entry={entry} />
))}
</Section>
)}
</Section>
</Card>
{/* Decision panel at the bottom */}
<DecisionPanel
proposalId={proposalId}
findings={findings}
onDecisionSubmitted={onDecisionSubmitted}
/>
</div>
);
}
// ---------------------------------------------------------------------------
// Summary count pill
// ---------------------------------------------------------------------------
interface SummaryCountProps {
icon: React.FunctionComponent<{ className?: string }>;
count: number;
label: string;
iconClass: string;
}
function SummaryCount({
icon: Icon,
count,
label,
iconClass,
}: SummaryCountProps) {
return (
<Section
gap={0.25}
height="auto"
padding={0.5}
alignItems="center"
justifyContent="center"
>
<Icon className={`h-5 w-5 ${iconClass}`} />
<Text font="main-ui-action" color="text-04">
{String(count)}
</Text>
<Text font="secondary-body" color="text-03">
{label}
</Text>
</Section>
);
}
// ---------------------------------------------------------------------------
// Category progress row
// ---------------------------------------------------------------------------
interface CategoryProgressProps {
group: FindingsByCategory;
}
function CategoryProgress({ group }: CategoryProgressProps) {
const decidedCount = group.findings.filter((f) => f.decision !== null).length;
const total = group.findings.length;
const allDone = decidedCount === total;
return (
<div className="flex items-center justify-between py-1">
<Text font="secondary-body" color="text-03" nowrap>
{group.category}
</Text>
<div className="flex items-center gap-1">
<Text font="secondary-body" color={allDone ? "text-01" : "text-03"}>
{`${decidedCount}/${total}`}
</Text>
{allDone && (
<SvgCheckCircle className="h-3.5 w-3.5 text-status-success-03" />
)}
</div>
</div>
);
}
// ---------------------------------------------------------------------------
// Audit log entry
// ---------------------------------------------------------------------------
const AUDIT_ACTION_LABELS: Record<string, string> = {
review_triggered: "Review triggered",
finding_decided: "Finding decided",
decision_submitted: "Decision submitted",
jira_synced: "Jira synced",
document_uploaded: "Document uploaded",
};
interface AuditEntryProps {
entry: AuditLogEntry;
}
function AuditEntry({ entry }: AuditEntryProps) {
const timestamp = new Date(entry.created_at).toLocaleString();
const actionLabel = AUDIT_ACTION_LABELS[entry.action] || entry.action;
return (
<div className="flex items-start justify-between gap-2 py-1">
<div className="flex flex-col gap-0.5">
<Text font="secondary-body" color="text-03">
{actionLabel}
</Text>
{entry.user_id && (
<Text font="secondary-body" color="text-03">
{`User: ${entry.user_id.slice(0, 8)}...`}
</Text>
)}
</div>
<Text font="secondary-body" color="text-03" nowrap>
{timestamp}
</Text>
</div>
);
}

View File

@@ -1,52 +0,0 @@
"use client";
import { useEffect } from "react";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { useRulesets } from "@/app/proposal-review/hooks/useRulesets";
import { useProposalReviewContext } from "@/app/proposal-review/contexts/ProposalReviewContext";
import { Text } from "@opal/components";
export default function RulesetSelector() {
const { rulesets, defaultRuleset, isLoading } = useRulesets();
const { selectedRulesetId, setSelectedRulesetId } =
useProposalReviewContext();
// Auto-select the default ruleset on first load
useEffect(() => {
if (!selectedRulesetId && defaultRuleset) {
setSelectedRulesetId(defaultRuleset.id);
}
}, [defaultRuleset, selectedRulesetId, setSelectedRulesetId]);
if (isLoading) {
return (
<Text font="secondary-body" color="text-03">
Loading rulesets...
</Text>
);
}
if (rulesets.length === 0) {
return (
<Text font="secondary-body" color="text-03">
No rulesets available
</Text>
);
}
return (
<InputSelect
value={selectedRulesetId ?? undefined}
onValueChange={setSelectedRulesetId}
>
<InputSelect.Trigger placeholder="Select ruleset" />
<InputSelect.Content>
{rulesets.map((ruleset) => (
<InputSelect.Item key={ruleset.id} value={ruleset.id}>
{ruleset.name}
</InputSelect.Item>
))}
</InputSelect.Content>
</InputSelect>
);
}

View File

@@ -1,111 +0,0 @@
"use client";
import {
createContext,
useContext,
useState,
useCallback,
useMemo,
type ReactNode,
} from "react";
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
interface ProposalReviewContextValue {
/** Currently selected ruleset ID for the review run. */
selectedRulesetId: string | null;
setSelectedRulesetId: (id: string) => void;
/** Whether an AI review is currently running. */
isReviewRunning: boolean;
setIsReviewRunning: (running: boolean) => void;
/** ID of the current review run (set after triggering). */
currentReviewRunId: string | null;
setCurrentReviewRunId: (id: string | null) => void;
/** Whether findings have been loaded after a completed review. */
findingsLoaded: boolean;
setFindingsLoaded: (loaded: boolean) => void;
/** Reset review state (for starting a new review). */
resetReviewState: () => void;
}
// ---------------------------------------------------------------------------
// Context
// ---------------------------------------------------------------------------
const ProposalReviewContext = createContext<ProposalReviewContextValue | null>(
null
);
// ---------------------------------------------------------------------------
// Provider
// ---------------------------------------------------------------------------
interface ProposalReviewProviderProps {
children: ReactNode;
}
export function ProposalReviewProvider({
children,
}: ProposalReviewProviderProps) {
const [selectedRulesetId, setSelectedRulesetId] = useState<string | null>(
null
);
const [isReviewRunning, setIsReviewRunning] = useState(false);
const [currentReviewRunId, setCurrentReviewRunId] = useState<string | null>(
null
);
const [findingsLoaded, setFindingsLoaded] = useState(false);
const resetReviewState = useCallback(() => {
setIsReviewRunning(false);
setCurrentReviewRunId(null);
setFindingsLoaded(false);
}, []);
const value = useMemo<ProposalReviewContextValue>(
() => ({
selectedRulesetId,
setSelectedRulesetId,
isReviewRunning,
setIsReviewRunning,
currentReviewRunId,
setCurrentReviewRunId,
findingsLoaded,
setFindingsLoaded,
resetReviewState,
}),
[
selectedRulesetId,
isReviewRunning,
currentReviewRunId,
findingsLoaded,
resetReviewState,
]
);
return (
<ProposalReviewContext.Provider value={value}>
{children}
</ProposalReviewContext.Provider>
);
}
// ---------------------------------------------------------------------------
// Hook
// ---------------------------------------------------------------------------
export function useProposalReviewContext() {
const context = useContext(ProposalReviewContext);
if (!context) {
throw new Error(
"useProposalReviewContext must be used within a ProposalReviewProvider"
);
}
return context;
}

View File

@@ -1,43 +0,0 @@
"use client";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { Finding, FindingsByCategory } from "@/app/proposal-review/types";
export function useFindings(proposalId: string | null) {
const { data, error, isLoading, mutate } = useSWR<Finding[]>(
proposalId ? `/api/proposal-review/proposals/${proposalId}/findings` : null,
errorHandlingFetcher
);
const findings = data ?? [];
// Group findings by category
const findingsByCategory: FindingsByCategory[] = [];
const categoryMap = new Map<string, Finding[]>();
for (const finding of findings) {
const cat = finding.rule_category ?? "Uncategorized";
const existing = categoryMap.get(cat);
if (existing) {
existing.push(finding);
} else {
categoryMap.set(cat, [finding]);
}
}
categoryMap.forEach((catFindings, category) => {
findingsByCategory.push({
category,
findings: catFindings,
});
});
return {
findings,
findingsByCategory,
error,
isLoading,
mutate,
};
}

View File

@@ -1,19 +0,0 @@
"use client";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { Proposal } from "@/app/proposal-review/types";
export function useProposal(proposalId: string | null) {
const { data, error, isLoading, mutate } = useSWR<Proposal>(
proposalId ? `/api/proposal-review/proposals/${proposalId}` : null,
errorHandlingFetcher
);
return {
proposal: data ?? null,
error,
isLoading,
mutate,
};
}

View File

@@ -1,29 +0,0 @@
"use client";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { Proposal } from "@/app/proposal-review/types";
const PROPOSALS_URL = "/api/proposal-review/proposals";
interface ProposalListResponse {
proposals: Proposal[];
total_count: number;
config_missing: boolean;
}
export function useProposals() {
const { data, error, isLoading, mutate } = useSWR<ProposalListResponse>(
PROPOSALS_URL,
errorHandlingFetcher
);
return {
proposals: data?.proposals ?? [],
totalCount: data?.total_count ?? 0,
configMissing: data?.config_missing ?? false,
error,
isLoading,
mutate,
};
}

View File

@@ -1,35 +0,0 @@
"use client";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { ReviewRun } from "@/app/proposal-review/types";
/**
* Polls the review status endpoint every 2.5 seconds while a review is running.
* Stops polling once the status is COMPLETED or FAILED.
*
* The backend returns a full ReviewRunResponse (mapped to ReviewRun on the
* frontend). Only a subset of fields (status, total_rules, completed_rules)
* is typically consumed by callers.
*/
export function useReviewStatus(
proposalId: string | null,
isReviewRunning: boolean
) {
const { data, error, isLoading } = useSWR<ReviewRun>(
proposalId && isReviewRunning
? `/api/proposal-review/proposals/${proposalId}/review-status`
: null,
errorHandlingFetcher,
{
refreshInterval: isReviewRunning ? 2500 : 0,
revalidateOnFocus: false,
}
);
return {
reviewStatus: data ?? null,
error,
isLoading,
};
}

View File

@@ -1,22 +0,0 @@
"use client";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import type { Ruleset } from "@/app/proposal-review/types";
export function useRulesets() {
const { data, error, isLoading } = useSWR<Ruleset[]>(
"/api/proposal-review/rulesets",
errorHandlingFetcher
);
const rulesets = data ?? [];
const defaultRuleset = rulesets.find((r) => r.is_default) ?? rulesets[0];
return {
rulesets,
defaultRuleset: defaultRuleset ?? null,
error,
isLoading,
};
}

View File

@@ -1,27 +0,0 @@
"use client";
import { StateProvider as SidebarStateProvider } from "@/layouts/sidebar-layouts";
import { ProposalReviewProvider } from "@/app/proposal-review/contexts/ProposalReviewContext";
import ProposalReviewSidebar from "@/app/proposal-review/components/ProposalReviewSidebar";
/**
* Proposal Review Layout
*
* Follows the Craft pattern: custom sidebar on the left, content on the right.
* Sidebar provides navigation back to main app and to admin settings.
* SidebarStateProvider shares fold state with the main app via cookies.
*/
export default function Layout({ children }: { children: React.ReactNode }) {
return (
<SidebarStateProvider>
<ProposalReviewProvider>
<div className="flex flex-row w-full h-full">
<ProposalReviewSidebar />
<div className="relative flex-1 h-full overflow-hidden">
{children}
</div>
</div>
</ProposalReviewProvider>
</SidebarStateProvider>
);
}

View File

@@ -1,33 +0,0 @@
"use client";
import { SvgShield } from "@opal/icons";
import { Content } from "@opal/layouts";
import ProposalQueue from "@/app/proposal-review/components/ProposalQueue";
/**
* Proposal Review Queue Page
*
* Main landing page for officers. Shows a filterable, sortable table
* of proposals imported from Jira.
*/
export default function ProposalReviewPage() {
return (
<div className="flex flex-col h-full w-full">
{/* Header */}
<div className="flex items-center gap-3 px-6 py-4 border-b border-border-01 shrink-0">
<Content
sizePreset="section"
variant="heading"
icon={SvgShield}
title="Proposal Review"
description="Review and evaluate grant proposals"
/>
</div>
{/* Body */}
<div className="flex-1 overflow-y-auto px-6 py-4">
<ProposalQueue />
</div>
</div>
);
}

View File

@@ -1,19 +0,0 @@
"use client";
import { useParams } from "next/navigation";
import ProposalReview from "@/app/proposal-review/components/ProposalReview";
/**
* Proposal Review Detail Page
*
* Three-panel layout for reviewing a single proposal:
* - Left: Proposal info + documents
* - Center: AI review checklist with findings
* - Right: Summary counts + decision panel
*/
export default function ProposalReviewDetailPage() {
const params = useParams<{ id: string }>();
const proposalId = params.id;
return <ProposalReview proposalId={proposalId} />;
}

View File

@@ -1,82 +0,0 @@
// ---------------------------------------------------------------------------
// Proposal Review API Services
//
// All mutation (POST) calls for the proposal-review feature.
// GET requests are handled by SWR hooks — see hooks/.
// ---------------------------------------------------------------------------
const BASE = "/api/proposal-review";
async function handleResponse<T>(res: Response): Promise<T> {
if (!res.ok) {
const body = await res.json().catch(() => ({}));
throw new Error(body.detail ?? body.message ?? "Request failed");
}
return res.json();
}
/** Trigger an AI review for a proposal with a given ruleset. */
export async function triggerReview(
proposalId: string,
rulesetId: string
): Promise<{ id: string }> {
const res = await fetch(`${BASE}/proposals/${proposalId}/review`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ ruleset_id: rulesetId }),
});
return handleResponse(res);
}
/** Record an officer decision on an individual finding. */
export async function submitFindingDecision(
findingId: string,
action: string,
notes?: string
): Promise<void> {
const res = await fetch(`${BASE}/findings/${findingId}/decision`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ action, notes: notes ?? null }),
});
return handleResponse(res);
}
/** Record the final proposal-level decision. */
export async function submitProposalDecision(
proposalId: string,
decision: string,
notes?: string
): Promise<void> {
const res = await fetch(`${BASE}/proposals/${proposalId}/decision`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ decision, notes: notes ?? null }),
});
return handleResponse(res);
}
/** Sync the proposal decision to Jira. */
export async function syncToJira(proposalId: string): Promise<void> {
const res = await fetch(`${BASE}/proposals/${proposalId}/sync-jira`, {
method: "POST",
});
return handleResponse(res);
}
/** Upload a document for a proposal. */
export async function uploadDocument(
proposalId: string,
file: File,
documentRole: string
): Promise<void> {
const formData = new FormData();
formData.append("file", file);
formData.append("document_role", documentRole);
const res = await fetch(`${BASE}/proposals/${proposalId}/documents`, {
method: "POST",
body: formData,
});
return handleResponse(res);
}

View File

@@ -1,192 +0,0 @@
// ---------------------------------------------------------------------------
// Proposal Review Types
//
// TypeScript interfaces matching backend response schemas for the
// proposal-review feature (Argus).
// ---------------------------------------------------------------------------
// --- Enums / Literal Unions ---
export type ProposalStatus =
| "PENDING"
| "IN_REVIEW"
| "APPROVED"
| "CHANGES_REQUESTED"
| "REJECTED";
export type ReviewRunStatus = "PENDING" | "RUNNING" | "COMPLETED" | "FAILED";
export type FindingVerdict =
| "PASS"
| "FAIL"
| "FLAG"
| "NEEDS_REVIEW"
| "NOT_APPLICABLE";
export type FindingConfidence = "HIGH" | "MEDIUM" | "LOW";
export type DecisionAction =
| "VERIFIED"
| "ISSUE"
| "NOT_APPLICABLE"
| "OVERRIDDEN";
export type ProposalDecisionOutcome =
| "APPROVED"
| "CHANGES_REQUESTED"
| "REJECTED";
export type RuleType =
| "DOCUMENT_CHECK"
| "METADATA_CHECK"
| "CROSS_REFERENCE"
| "CUSTOM_NL";
export type RuleIntent = "CHECK" | "HIGHLIGHT";
export type RuleAuthority = "OVERRIDE" | "RETURN" | null;
export type DocumentRole =
| "PROPOSAL"
| "BUDGET"
| "FOA"
| "INTERNAL"
| "SOW"
| "OTHER";
export type AuditAction =
| "review_triggered"
| "finding_decided"
| "decision_submitted"
| "jira_synced"
| "document_uploaded";
// --- Core Interfaces ---
export interface ProposalMetadata {
jira_key?: string;
title?: string;
link?: string;
[key: string]: string | string[] | undefined;
}
export interface Proposal {
id: string;
document_id: string;
tenant_id: string;
status: ProposalStatus;
metadata: ProposalMetadata;
created_at: string;
updated_at: string;
}
export interface Ruleset {
id: string;
name: string;
description: string | null;
is_default: boolean;
is_active: boolean;
created_at: string;
updated_at: string;
}
export interface Rule {
id: string;
ruleset_id: string;
name: string;
description: string | null;
category: string | null;
rule_type: RuleType;
rule_intent: RuleIntent;
authority: RuleAuthority;
is_hard_stop: boolean;
priority: number;
is_active: boolean;
}
export interface ReviewRun {
id: string;
proposal_id: string;
ruleset_id: string;
triggered_by: string;
status: ReviewRunStatus;
total_rules: number;
completed_rules: number;
started_at: string | null;
completed_at: string | null;
created_at: string;
}
export interface ReviewStatus {
status: ReviewRunStatus;
total_rules: number;
completed_rules: number;
}
export interface FindingDecision {
id: string;
finding_id: string;
officer_id: string;
action: DecisionAction;
notes: string | null;
created_at: string;
updated_at: string;
}
export interface Finding {
id: string;
proposal_id: string;
rule_id: string;
review_run_id: string;
verdict: FindingVerdict;
confidence: FindingConfidence | null;
evidence: string | null;
explanation: string | null;
suggested_action: string | null;
llm_model: string | null;
llm_tokens_used: number | null;
created_at: string;
// Flattened rule info from the backend FindingResponse
rule_name: string | null;
rule_category: string | null;
rule_is_hard_stop: boolean | null;
decision: FindingDecision | null;
}
export interface ProposalDocument {
id: string;
proposal_id: string;
file_name: string;
file_type: string | null;
document_role: DocumentRole;
uploaded_by: string | null;
extracted_text: string | null;
created_at: string;
}
export interface ProposalDecision {
id: string;
proposal_id: string;
officer_id: string;
decision: ProposalDecisionOutcome;
notes: string | null;
jira_synced: boolean;
jira_synced_at: string | null;
created_at: string;
}
export interface AuditLogEntry {
id: string;
proposal_id: string;
user_id: string | null;
action: AuditAction;
details: Record<string, unknown> | null;
created_at: string;
}
// --- Grouped findings by category ---
export interface FindingsByCategory {
category: string;
findings: Finding[];
}

View File

@@ -8,7 +8,6 @@ import {
SvgBarChart,
SvgBookOpen,
SvgBubbleText,
SvgCheckSquare,
SvgClipboard,
SvgCpu,
SvgDownload,
@@ -235,12 +234,6 @@ export const ADMIN_ROUTES = {
title: "Hook Extensions",
sidebarLabel: "Hook Extensions",
},
PROPOSAL_REVIEW: {
path: "/admin/proposal-review",
icon: SvgCheckSquare,
title: "Proposal Review",
sidebarLabel: "Proposal Review",
},
SCIM: {
path: "/admin/scim",
icon: SvgUserSync,

View File

@@ -742,25 +742,6 @@ export const connectorConfigs: Record<
"This is generally useful to ignore certain bots. Add user emails which comments should NOT be indexed.",
optional: true,
},
{
type: "checkbox",
query: "Extract custom fields?",
label: "Extract Custom Fields",
name: "extract_custom_fields",
description:
"Include custom Jira fields in document metadata with human-readable names",
optional: true,
default: false,
},
{
type: "checkbox",
query: "Fetch attachments?",
label: "Fetch Attachments",
name: "fetch_attachments",
description: "Download and index file attachments from Jira tickets",
optional: true,
default: false,
},
],
advanced_values: [],
},
@@ -1985,8 +1966,6 @@ export interface JiraConfig {
project_key?: string;
comment_email_blacklist?: string[];
jql_query?: string;
extract_custom_fields?: boolean;
fetch_attachments?: boolean;
}
export interface SalesforceConfig {

View File

@@ -102,7 +102,6 @@ function buildItems(
// 2. Agents & Actions
add(SECTIONS.AGENTS_AND_ACTIONS, ADMIN_ROUTES.AGENTS);
add(SECTIONS.AGENTS_AND_ACTIONS, ADMIN_ROUTES.PROPOSAL_REVIEW);
add(SECTIONS.AGENTS_AND_ACTIONS, ADMIN_ROUTES.MCP_ACTIONS);
add(SECTIONS.AGENTS_AND_ACTIONS, ADMIN_ROUTES.OPENAPI_ACTIONS);

View File

@@ -59,7 +59,6 @@ import useAppFocus from "@/hooks/useAppFocus";
import { useCreateModal } from "@/refresh-components/contexts/ModalContext";
import { useModalContext } from "@/components/context/ModalContext";
import {
SvgCheckSquare,
SvgDevKit,
SvgEditBig,
SvgFolderPlus,
@@ -532,21 +531,6 @@ const MemoizedAppSidebarInner = memo(function AppSidebarInner() {
[folded, posthog]
);
const proposalReviewButton = useMemo(
() => (
<div data-testid="AppSidebar/proposal-review">
<SidebarTab
icon={SvgCheckSquare}
folded={folded}
href="/proposal-review"
>
Argus
</SidebarTab>
</div>
),
[folded]
);
const searchChatsButton = useMemo(
() => (
<ChatSearchCommandMenu
@@ -687,10 +671,6 @@ const MemoizedAppSidebarInner = memo(function AppSidebarInner() {
{newSessionButton}
{searchChatsButton}
{isOnyxCraftEnabled && buildButton}
{/* TODO: gate behind a dedicated ENABLE_PROPOSAL_REVIEW feature flag
once it is exposed to the frontend settings. For now, reuse the
Craft flag so it does not render unconditionally. */}
{isOnyxCraftEnabled && proposalReviewButton}
{folded && moreAgentsButton}
{folded && newProjectButton}
</div>