mirror of
https://github.com/onyx-dot-app/onyx.git
synced 2026-04-10 17:32:43 +00:00
Compare commits
24 Commits
jamison/wo
...
v3.2.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
711651276c | ||
|
|
3731110cf9 | ||
|
|
8fb7a8718e | ||
|
|
c4f8d5370b | ||
|
|
9e434f6a5a | ||
|
|
67dc819319 | ||
|
|
2d12274050 | ||
|
|
c727ba13ee | ||
|
|
6193dd5326 | ||
|
|
387a7d1cea | ||
|
|
869578eeed | ||
|
|
e68648ab74 | ||
|
|
da01002099 | ||
|
|
f5d66f389c | ||
|
|
82d89f78c6 | ||
|
|
6f49c5e32c | ||
|
|
41f2bd2f19 | ||
|
|
bfa2f672f9 | ||
|
|
a823c3ead1 | ||
|
|
bd7d378a9a | ||
|
|
dcec0c8ef3 | ||
|
|
6456b51dcf | ||
|
|
7cfe27e31e | ||
|
|
3c5f77f5a4 |
4
.github/workflows/deployment.yml
vendored
4
.github/workflows/deployment.yml
vendored
@@ -13,7 +13,7 @@ permissions:
|
||||
id-token: write # zizmor: ignore[excessive-permissions]
|
||||
|
||||
env:
|
||||
EDGE_TAG: ${{ startsWith(github.ref_name, 'nightly-latest') }}
|
||||
EDGE_TAG: ${{ startsWith(github.ref_name, 'nightly-latest') || github.ref_name == 'edge' }}
|
||||
|
||||
jobs:
|
||||
# Determine which components to build based on the tag
|
||||
@@ -156,7 +156,7 @@ jobs:
|
||||
check-version-tag:
|
||||
runs-on: ubuntu-slim
|
||||
timeout-minutes: 10
|
||||
if: ${{ !startsWith(github.ref_name, 'nightly-latest') && github.event_name != 'workflow_dispatch' }}
|
||||
if: ${{ !startsWith(github.ref_name, 'nightly-latest') && github.ref_name != 'edge' && github.event_name != 'workflow_dispatch' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # ratchet:actions/checkout@v6
|
||||
|
||||
@@ -9,7 +9,6 @@ repos:
|
||||
rev: d30b4298e4fb63ce8609e29acdbcf4c9018a483c
|
||||
hooks:
|
||||
- id: uv-sync
|
||||
args: ["--locked", "--all-extras"]
|
||||
- id: uv-lock
|
||||
- id: uv-export
|
||||
name: uv-export default.txt
|
||||
@@ -18,7 +17,7 @@ repos:
|
||||
"--no-emit-project",
|
||||
"--no-default-groups",
|
||||
"--no-hashes",
|
||||
"--extra",
|
||||
"--group",
|
||||
"backend",
|
||||
"-o",
|
||||
"backend/requirements/default.txt",
|
||||
@@ -31,7 +30,7 @@ repos:
|
||||
"--no-emit-project",
|
||||
"--no-default-groups",
|
||||
"--no-hashes",
|
||||
"--extra",
|
||||
"--group",
|
||||
"dev",
|
||||
"-o",
|
||||
"backend/requirements/dev.txt",
|
||||
@@ -44,7 +43,7 @@ repos:
|
||||
"--no-emit-project",
|
||||
"--no-default-groups",
|
||||
"--no-hashes",
|
||||
"--extra",
|
||||
"--group",
|
||||
"ee",
|
||||
"-o",
|
||||
"backend/requirements/ee.txt",
|
||||
@@ -57,7 +56,7 @@ repos:
|
||||
"--no-emit-project",
|
||||
"--no-default-groups",
|
||||
"--no-hashes",
|
||||
"--extra",
|
||||
"--group",
|
||||
"model_server",
|
||||
"-o",
|
||||
"backend/requirements/model_server.txt",
|
||||
|
||||
3
.vscode/launch.json
vendored
3
.vscode/launch.json
vendored
@@ -531,8 +531,7 @@
|
||||
"request": "launch",
|
||||
"runtimeExecutable": "uv",
|
||||
"runtimeArgs": [
|
||||
"sync",
|
||||
"--all-extras"
|
||||
"sync"
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "integratedTerminal",
|
||||
|
||||
@@ -117,7 +117,7 @@ If using PowerShell, the command slightly differs:
|
||||
Install the required Python dependencies:
|
||||
|
||||
```bash
|
||||
uv sync --all-extras
|
||||
uv sync
|
||||
```
|
||||
|
||||
Install Playwright for Python (headless browser required by the Web Connector):
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import time
|
||||
from collections.abc import Generator
|
||||
from collections.abc import Iterator
|
||||
from collections.abc import Sequence
|
||||
@@ -30,6 +31,8 @@ from onyx.connectors.models import HierarchyNode
|
||||
from onyx.connectors.models import SlimDocument
|
||||
from onyx.httpx.httpx_pool import HttpxPool
|
||||
from onyx.indexing.indexing_heartbeat import IndexingHeartbeatInterface
|
||||
from onyx.server.metrics.pruning_metrics import inc_pruning_rate_limit_error
|
||||
from onyx.server.metrics.pruning_metrics import observe_pruning_enumeration_duration
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
|
||||
@@ -130,6 +133,7 @@ def _extract_from_batch(
|
||||
def extract_ids_from_runnable_connector(
|
||||
runnable_connector: BaseConnector,
|
||||
callback: IndexingHeartbeatInterface | None = None,
|
||||
connector_type: str = "unknown",
|
||||
) -> SlimConnectorExtractionResult:
|
||||
"""
|
||||
Extract document IDs and hierarchy nodes from a runnable connector.
|
||||
@@ -179,21 +183,38 @@ def extract_ids_from_runnable_connector(
|
||||
)
|
||||
|
||||
# process raw batches to extract both IDs and hierarchy nodes
|
||||
for doc_list in raw_batch_generator:
|
||||
if callback and callback.should_stop():
|
||||
raise RuntimeError(
|
||||
"extract_ids_from_runnable_connector: Stop signal detected"
|
||||
)
|
||||
enumeration_start = time.monotonic()
|
||||
try:
|
||||
for doc_list in raw_batch_generator:
|
||||
if callback and callback.should_stop():
|
||||
raise RuntimeError(
|
||||
"extract_ids_from_runnable_connector: Stop signal detected"
|
||||
)
|
||||
|
||||
batch_result = _extract_from_batch(doc_list)
|
||||
batch_ids = batch_result.raw_id_to_parent
|
||||
batch_nodes = batch_result.hierarchy_nodes
|
||||
doc_batch_processing_func(batch_ids)
|
||||
all_raw_id_to_parent.update(batch_ids)
|
||||
all_hierarchy_nodes.extend(batch_nodes)
|
||||
batch_result = _extract_from_batch(doc_list)
|
||||
batch_ids = batch_result.raw_id_to_parent
|
||||
batch_nodes = batch_result.hierarchy_nodes
|
||||
doc_batch_processing_func(batch_ids)
|
||||
all_raw_id_to_parent.update(batch_ids)
|
||||
all_hierarchy_nodes.extend(batch_nodes)
|
||||
|
||||
if callback:
|
||||
callback.progress("extract_ids_from_runnable_connector", len(batch_ids))
|
||||
if callback:
|
||||
callback.progress("extract_ids_from_runnable_connector", len(batch_ids))
|
||||
except Exception as e:
|
||||
# Best-effort rate limit detection via string matching.
|
||||
# Connectors surface rate limits inconsistently — some raise HTTP 429,
|
||||
# some use SDK-specific exceptions (e.g. google.api_core.exceptions.ResourceExhausted)
|
||||
# that may or may not include "rate limit" or "429" in the message.
|
||||
# TODO(Bo): replace with a standard ConnectorRateLimitError exception that all
|
||||
# connectors raise when rate limited, making this check precise.
|
||||
error_str = str(e)
|
||||
if "rate limit" in error_str.lower() or "429" in error_str:
|
||||
inc_pruning_rate_limit_error(connector_type)
|
||||
raise
|
||||
finally:
|
||||
observe_pruning_enumeration_duration(
|
||||
time.monotonic() - enumeration_start, connector_type
|
||||
)
|
||||
|
||||
return SlimConnectorExtractionResult(
|
||||
raw_id_to_parent=all_raw_id_to_parent,
|
||||
|
||||
@@ -72,6 +72,7 @@ from onyx.redis.redis_hierarchy import get_source_node_id_from_cache
|
||||
from onyx.redis.redis_hierarchy import HierarchyNodeCacheEntry
|
||||
from onyx.redis.redis_pool import get_redis_client
|
||||
from onyx.redis.redis_pool import get_redis_replica_client
|
||||
from onyx.server.metrics.pruning_metrics import observe_pruning_diff_duration
|
||||
from onyx.server.runtime.onyx_runtime import OnyxRuntime
|
||||
from onyx.server.utils import make_short_id
|
||||
from onyx.utils.logger import format_error_for_logging
|
||||
@@ -570,8 +571,9 @@ def connector_pruning_generator_task(
|
||||
)
|
||||
|
||||
# Extract docs and hierarchy nodes from the source
|
||||
connector_type = cc_pair.connector.source.value
|
||||
extraction_result = extract_ids_from_runnable_connector(
|
||||
runnable_connector, callback
|
||||
runnable_connector, callback, connector_type=connector_type
|
||||
)
|
||||
all_connector_doc_ids = extraction_result.raw_id_to_parent
|
||||
|
||||
@@ -636,40 +638,46 @@ def connector_pruning_generator_task(
|
||||
commit=True,
|
||||
)
|
||||
|
||||
# a list of docs in our local index
|
||||
all_indexed_document_ids = {
|
||||
doc.id
|
||||
for doc in get_documents_for_connector_credential_pair(
|
||||
db_session=db_session,
|
||||
connector_id=connector_id,
|
||||
credential_id=credential_id,
|
||||
diff_start = time.monotonic()
|
||||
try:
|
||||
# a list of docs in our local index
|
||||
all_indexed_document_ids = {
|
||||
doc.id
|
||||
for doc in get_documents_for_connector_credential_pair(
|
||||
db_session=db_session,
|
||||
connector_id=connector_id,
|
||||
credential_id=credential_id,
|
||||
)
|
||||
}
|
||||
|
||||
# generate list of docs to remove (no longer in the source)
|
||||
doc_ids_to_remove = list(
|
||||
all_indexed_document_ids - all_connector_doc_ids.keys()
|
||||
)
|
||||
}
|
||||
|
||||
# generate list of docs to remove (no longer in the source)
|
||||
doc_ids_to_remove = list(
|
||||
all_indexed_document_ids - all_connector_doc_ids.keys()
|
||||
)
|
||||
task_logger.info(
|
||||
"Pruning set collected: "
|
||||
f"cc_pair={cc_pair_id} "
|
||||
f"connector_source={cc_pair.connector.source} "
|
||||
f"docs_to_remove={len(doc_ids_to_remove)}"
|
||||
)
|
||||
|
||||
task_logger.info(
|
||||
"Pruning set collected: "
|
||||
f"cc_pair={cc_pair_id} "
|
||||
f"connector_source={cc_pair.connector.source} "
|
||||
f"docs_to_remove={len(doc_ids_to_remove)}"
|
||||
)
|
||||
task_logger.info(
|
||||
f"RedisConnector.prune.generate_tasks starting. cc_pair={cc_pair_id}"
|
||||
)
|
||||
tasks_generated = redis_connector.prune.generate_tasks(
|
||||
set(doc_ids_to_remove), self.app, db_session, None
|
||||
)
|
||||
if tasks_generated is None:
|
||||
return None
|
||||
|
||||
task_logger.info(
|
||||
f"RedisConnector.prune.generate_tasks starting. cc_pair={cc_pair_id}"
|
||||
)
|
||||
tasks_generated = redis_connector.prune.generate_tasks(
|
||||
set(doc_ids_to_remove), self.app, db_session, None
|
||||
)
|
||||
if tasks_generated is None:
|
||||
return None
|
||||
|
||||
task_logger.info(
|
||||
f"RedisConnector.prune.generate_tasks finished. cc_pair={cc_pair_id} tasks_generated={tasks_generated}"
|
||||
)
|
||||
task_logger.info(
|
||||
f"RedisConnector.prune.generate_tasks finished. cc_pair={cc_pair_id} tasks_generated={tasks_generated}"
|
||||
)
|
||||
finally:
|
||||
observe_pruning_diff_duration(
|
||||
time.monotonic() - diff_start, connector_type
|
||||
)
|
||||
|
||||
redis_connector.prune.generator_complete = tasks_generated
|
||||
|
||||
|
||||
@@ -60,8 +60,10 @@ logger = setup_logger()
|
||||
|
||||
ONE_HOUR = 3600
|
||||
|
||||
_MAX_RESULTS_FETCH_IDS = 5000 # 5000
|
||||
_MAX_RESULTS_FETCH_IDS = 5000
|
||||
_JIRA_FULL_PAGE_SIZE = 50
|
||||
# https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issues/
|
||||
_JIRA_BULK_FETCH_LIMIT = 100
|
||||
|
||||
# Constants for Jira field names
|
||||
_FIELD_REPORTER = "reporter"
|
||||
@@ -255,15 +257,13 @@ def _bulk_fetch_request(
|
||||
return resp.json()["issues"]
|
||||
|
||||
|
||||
def bulk_fetch_issues(
|
||||
jira_client: JIRA, issue_ids: list[str], fields: str | None = None
|
||||
) -> list[Issue]:
|
||||
# TODO(evan): move away from this jira library if they continue to not support
|
||||
# the endpoints we need. Using private fields is not ideal, but
|
||||
# is likely fine for now since we pin the library version
|
||||
|
||||
def _bulk_fetch_batch(
|
||||
jira_client: JIRA, issue_ids: list[str], fields: str | None
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Fetch a single batch (must be <= _JIRA_BULK_FETCH_LIMIT).
|
||||
On JSONDecodeError, recursively bisects until it succeeds or reaches size 1."""
|
||||
try:
|
||||
raw_issues = _bulk_fetch_request(jira_client, issue_ids, fields)
|
||||
return _bulk_fetch_request(jira_client, issue_ids, fields)
|
||||
except requests.exceptions.JSONDecodeError:
|
||||
if len(issue_ids) <= 1:
|
||||
logger.exception(
|
||||
@@ -277,12 +277,25 @@ def bulk_fetch_issues(
|
||||
f"Jira bulk-fetch JSON decode failed for batch of {len(issue_ids)} issues. "
|
||||
f"Splitting into sub-batches of {mid} and {len(issue_ids) - mid}."
|
||||
)
|
||||
left = bulk_fetch_issues(jira_client, issue_ids[:mid], fields)
|
||||
right = bulk_fetch_issues(jira_client, issue_ids[mid:], fields)
|
||||
left = _bulk_fetch_batch(jira_client, issue_ids[:mid], fields)
|
||||
right = _bulk_fetch_batch(jira_client, issue_ids[mid:], fields)
|
||||
return left + right
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching issues: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def bulk_fetch_issues(
|
||||
jira_client: JIRA, issue_ids: list[str], fields: str | None = None
|
||||
) -> list[Issue]:
|
||||
# TODO(evan): move away from this jira library if they continue to not support
|
||||
# the endpoints we need. Using private fields is not ideal, but
|
||||
# is likely fine for now since we pin the library version
|
||||
|
||||
raw_issues: list[dict[str, Any]] = []
|
||||
for batch in chunked(issue_ids, _JIRA_BULK_FETCH_LIMIT):
|
||||
try:
|
||||
raw_issues.extend(_bulk_fetch_batch(jira_client, list(batch), fields))
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching issues: {e}")
|
||||
raise
|
||||
|
||||
return [
|
||||
Issue(jira_client._options, jira_client._session, raw=issue)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import TypedDict
|
||||
|
||||
@@ -6,6 +7,14 @@ from pydantic import BaseModel
|
||||
from onyx.onyxbot.slack.models import ChannelType
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class DirectThreadFetch:
|
||||
"""Request to fetch a Slack thread directly by channel and timestamp."""
|
||||
|
||||
channel_id: str
|
||||
thread_ts: str
|
||||
|
||||
|
||||
class ChannelMetadata(TypedDict):
|
||||
"""Type definition for cached channel metadata."""
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ from onyx.configs.chat_configs import DOC_TIME_DECAY
|
||||
from onyx.connectors.models import IndexingDocument
|
||||
from onyx.connectors.models import TextSection
|
||||
from onyx.context.search.federated.models import ChannelMetadata
|
||||
from onyx.context.search.federated.models import DirectThreadFetch
|
||||
from onyx.context.search.federated.models import SlackMessage
|
||||
from onyx.context.search.federated.slack_search_utils import ALL_CHANNEL_TYPES
|
||||
from onyx.context.search.federated.slack_search_utils import build_channel_query_filter
|
||||
@@ -49,7 +50,6 @@ from onyx.server.federated.models import FederatedConnectorDetail
|
||||
from onyx.utils.logger import setup_logger
|
||||
from onyx.utils.threadpool_concurrency import run_functions_tuples_in_parallel
|
||||
from onyx.utils.timing import log_function_time
|
||||
from shared_configs.configs import DOC_EMBEDDING_CONTEXT_SIZE
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
@@ -58,7 +58,6 @@ HIGHLIGHT_END_CHAR = "\ue001"
|
||||
|
||||
CHANNEL_METADATA_CACHE_TTL = 60 * 60 * 24 # 24 hours
|
||||
USER_PROFILE_CACHE_TTL = 60 * 60 * 24 # 24 hours
|
||||
SLACK_THREAD_CONTEXT_WINDOW = 3 # Number of messages before matched message to include
|
||||
CHANNEL_METADATA_MAX_RETRIES = 3 # Maximum retry attempts for channel metadata fetching
|
||||
CHANNEL_METADATA_RETRY_DELAY = 1 # Initial retry delay in seconds (exponential backoff)
|
||||
|
||||
@@ -421,6 +420,94 @@ class SlackQueryResult(BaseModel):
|
||||
filtered_channels: list[str] # Channels filtered out during this query
|
||||
|
||||
|
||||
def _fetch_thread_from_url(
|
||||
thread_fetch: DirectThreadFetch,
|
||||
access_token: str,
|
||||
channel_metadata_dict: dict[str, ChannelMetadata] | None = None,
|
||||
) -> SlackQueryResult:
|
||||
"""Fetch a thread directly from a Slack URL via conversations.replies."""
|
||||
channel_id = thread_fetch.channel_id
|
||||
thread_ts = thread_fetch.thread_ts
|
||||
|
||||
slack_client = WebClient(token=access_token)
|
||||
try:
|
||||
response = slack_client.conversations_replies(
|
||||
channel=channel_id,
|
||||
ts=thread_ts,
|
||||
)
|
||||
response.validate()
|
||||
messages: list[dict[str, Any]] = response.get("messages", [])
|
||||
except SlackApiError as e:
|
||||
logger.warning(
|
||||
f"Failed to fetch thread from URL (channel={channel_id}, ts={thread_ts}): {e}"
|
||||
)
|
||||
return SlackQueryResult(messages=[], filtered_channels=[])
|
||||
|
||||
if not messages:
|
||||
logger.warning(
|
||||
f"No messages found for URL override (channel={channel_id}, ts={thread_ts})"
|
||||
)
|
||||
return SlackQueryResult(messages=[], filtered_channels=[])
|
||||
|
||||
# Build thread text from all messages
|
||||
thread_text = _build_thread_text(messages, access_token, None, slack_client)
|
||||
|
||||
# Get channel name from metadata cache or API
|
||||
channel_name = "unknown"
|
||||
if channel_metadata_dict and channel_id in channel_metadata_dict:
|
||||
channel_name = channel_metadata_dict[channel_id].get("name", "unknown")
|
||||
else:
|
||||
try:
|
||||
ch_response = slack_client.conversations_info(channel=channel_id)
|
||||
ch_response.validate()
|
||||
channel_info: dict[str, Any] = ch_response.get("channel", {})
|
||||
channel_name = channel_info.get("name", "unknown")
|
||||
except SlackApiError:
|
||||
pass
|
||||
|
||||
# Build the SlackMessage
|
||||
parent_msg = messages[0]
|
||||
message_ts = parent_msg.get("ts", thread_ts)
|
||||
username = parent_msg.get("user", "unknown_user")
|
||||
parent_text = parent_msg.get("text", "")
|
||||
snippet = (
|
||||
parent_text[:50].rstrip() + "..." if len(parent_text) > 50 else parent_text
|
||||
).replace("\n", " ")
|
||||
|
||||
doc_time = datetime.fromtimestamp(float(message_ts))
|
||||
decay_factor = DOC_TIME_DECAY
|
||||
doc_age_years = (datetime.now() - doc_time).total_seconds() / (365 * 24 * 60 * 60)
|
||||
recency_bias = max(1 / (1 + decay_factor * doc_age_years), 0.75)
|
||||
|
||||
permalink = (
|
||||
f"https://slack.com/archives/{channel_id}/p{message_ts.replace('.', '')}"
|
||||
)
|
||||
|
||||
slack_message = SlackMessage(
|
||||
document_id=f"{channel_id}_{message_ts}",
|
||||
channel_id=channel_id,
|
||||
message_id=message_ts,
|
||||
thread_id=None, # Prevent double-enrichment in thread context fetch
|
||||
link=permalink,
|
||||
metadata={
|
||||
"channel": channel_name,
|
||||
"time": doc_time.isoformat(),
|
||||
},
|
||||
timestamp=doc_time,
|
||||
recency_bias=recency_bias,
|
||||
semantic_identifier=f"{username} in #{channel_name}: {snippet}",
|
||||
text=thread_text,
|
||||
highlighted_texts=set(),
|
||||
slack_score=100000.0, # High priority — user explicitly asked for this thread
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"URL override: fetched thread from channel={channel_id}, ts={thread_ts}, {len(messages)} messages"
|
||||
)
|
||||
|
||||
return SlackQueryResult(messages=[slack_message], filtered_channels=[])
|
||||
|
||||
|
||||
def query_slack(
|
||||
query_string: str,
|
||||
access_token: str,
|
||||
@@ -432,7 +519,6 @@ def query_slack(
|
||||
available_channels: list[str] | None = None,
|
||||
channel_metadata_dict: dict[str, ChannelMetadata] | None = None,
|
||||
) -> SlackQueryResult:
|
||||
|
||||
# Check if query has channel override (user specified channels in query)
|
||||
has_channel_override = query_string.startswith("__CHANNEL_OVERRIDE__")
|
||||
|
||||
@@ -662,7 +748,6 @@ def _fetch_thread_context(
|
||||
"""
|
||||
channel_id = message.channel_id
|
||||
thread_id = message.thread_id
|
||||
message_id = message.message_id
|
||||
|
||||
# If not a thread, return original text as success
|
||||
if thread_id is None:
|
||||
@@ -695,62 +780,37 @@ def _fetch_thread_context(
|
||||
if len(messages) <= 1:
|
||||
return ThreadContextResult.success(message.text)
|
||||
|
||||
# Build thread text from thread starter + context window around matched message
|
||||
thread_text = _build_thread_text(
|
||||
messages, message_id, thread_id, access_token, team_id, slack_client
|
||||
)
|
||||
# Build thread text from thread starter + all replies
|
||||
thread_text = _build_thread_text(messages, access_token, team_id, slack_client)
|
||||
return ThreadContextResult.success(thread_text)
|
||||
|
||||
|
||||
def _build_thread_text(
|
||||
messages: list[dict[str, Any]],
|
||||
message_id: str,
|
||||
thread_id: str,
|
||||
access_token: str,
|
||||
team_id: str | None,
|
||||
slack_client: WebClient,
|
||||
) -> str:
|
||||
"""Build the thread text from messages."""
|
||||
"""Build thread text including all replies.
|
||||
|
||||
Includes the thread parent message followed by all replies in order.
|
||||
"""
|
||||
msg_text = messages[0].get("text", "")
|
||||
msg_sender = messages[0].get("user", "")
|
||||
thread_text = f"<@{msg_sender}>: {msg_text}"
|
||||
|
||||
# All messages after index 0 are replies
|
||||
replies = messages[1:]
|
||||
if not replies:
|
||||
return thread_text
|
||||
|
||||
logger.debug(f"Thread {messages[0].get('ts')}: {len(replies)} replies included")
|
||||
thread_text += "\n\nReplies:"
|
||||
if thread_id == message_id:
|
||||
message_id_idx = 0
|
||||
else:
|
||||
message_id_idx = next(
|
||||
(i for i, msg in enumerate(messages) if msg.get("ts") == message_id), 0
|
||||
)
|
||||
if not message_id_idx:
|
||||
return thread_text
|
||||
|
||||
start_idx = max(1, message_id_idx - SLACK_THREAD_CONTEXT_WINDOW)
|
||||
|
||||
if start_idx > 1:
|
||||
thread_text += "\n..."
|
||||
|
||||
for i in range(start_idx, message_id_idx):
|
||||
msg_text = messages[i].get("text", "")
|
||||
msg_sender = messages[i].get("user", "")
|
||||
thread_text += f"\n\n<@{msg_sender}>: {msg_text}"
|
||||
|
||||
msg_text = messages[message_id_idx].get("text", "")
|
||||
msg_sender = messages[message_id_idx].get("user", "")
|
||||
thread_text += f"\n\n<@{msg_sender}>: {msg_text}"
|
||||
|
||||
# Add following replies
|
||||
len_replies = 0
|
||||
for msg in messages[message_id_idx + 1 :]:
|
||||
for msg in replies:
|
||||
msg_text = msg.get("text", "")
|
||||
msg_sender = msg.get("user", "")
|
||||
reply = f"\n\n<@{msg_sender}>: {msg_text}"
|
||||
thread_text += reply
|
||||
|
||||
len_replies += len(reply)
|
||||
if len_replies >= DOC_EMBEDDING_CONTEXT_SIZE * 4:
|
||||
thread_text += "\n..."
|
||||
break
|
||||
thread_text += f"\n\n<@{msg_sender}>: {msg_text}"
|
||||
|
||||
# Replace user IDs with names using cached lookups
|
||||
userids: set[str] = set(re.findall(r"<@([A-Z0-9]+)>", thread_text))
|
||||
@@ -976,7 +1036,16 @@ def slack_retrieval(
|
||||
|
||||
# Query slack with entity filtering
|
||||
llm = get_default_llm()
|
||||
query_strings = build_slack_queries(query, llm, entities, available_channels)
|
||||
query_items = build_slack_queries(query, llm, entities, available_channels)
|
||||
|
||||
# Partition into direct thread fetches and search query strings
|
||||
direct_fetches: list[DirectThreadFetch] = []
|
||||
query_strings: list[str] = []
|
||||
for item in query_items:
|
||||
if isinstance(item, DirectThreadFetch):
|
||||
direct_fetches.append(item)
|
||||
else:
|
||||
query_strings.append(item)
|
||||
|
||||
# Determine filtering based on entities OR context (bot)
|
||||
include_dm = False
|
||||
@@ -993,8 +1062,16 @@ def slack_retrieval(
|
||||
f"Private channel context: will only allow messages from {allowed_private_channel} + public channels"
|
||||
)
|
||||
|
||||
# Build search tasks
|
||||
search_tasks = [
|
||||
# Build search tasks — direct thread fetches + keyword searches
|
||||
search_tasks: list[tuple] = [
|
||||
(
|
||||
_fetch_thread_from_url,
|
||||
(fetch, access_token, channel_metadata_dict),
|
||||
)
|
||||
for fetch in direct_fetches
|
||||
]
|
||||
|
||||
search_tasks.extend(
|
||||
(
|
||||
query_slack,
|
||||
(
|
||||
@@ -1010,7 +1087,7 @@ def slack_retrieval(
|
||||
),
|
||||
)
|
||||
for query_string in query_strings
|
||||
]
|
||||
)
|
||||
|
||||
# If include_dm is True AND we're not already searching all channels,
|
||||
# add additional searches without channel filters.
|
||||
|
||||
@@ -10,6 +10,7 @@ from pydantic import ValidationError
|
||||
|
||||
from onyx.configs.app_configs import MAX_SLACK_QUERY_EXPANSIONS
|
||||
from onyx.context.search.federated.models import ChannelMetadata
|
||||
from onyx.context.search.federated.models import DirectThreadFetch
|
||||
from onyx.context.search.models import ChunkIndexRequest
|
||||
from onyx.federated_connectors.slack.models import SlackEntities
|
||||
from onyx.llm.interfaces import LLM
|
||||
@@ -638,12 +639,38 @@ def expand_query_with_llm(query_text: str, llm: LLM) -> list[str]:
|
||||
return [query_text]
|
||||
|
||||
|
||||
SLACK_URL_PATTERN = re.compile(
|
||||
r"https?://[a-z0-9-]+\.slack\.com/archives/([A-Z0-9]+)/p(\d{16})"
|
||||
)
|
||||
|
||||
|
||||
def extract_slack_message_urls(
|
||||
query_text: str,
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Extract Slack message URLs from query text.
|
||||
|
||||
Parses URLs like:
|
||||
https://onyx-company.slack.com/archives/C097NBWMY8Y/p1775491616524769
|
||||
|
||||
Returns list of (channel_id, thread_ts) tuples.
|
||||
The 16-digit timestamp is converted to Slack ts format (with dot).
|
||||
"""
|
||||
results = []
|
||||
for match in SLACK_URL_PATTERN.finditer(query_text):
|
||||
channel_id = match.group(1)
|
||||
raw_ts = match.group(2)
|
||||
# Convert p1775491616524769 -> 1775491616.524769
|
||||
thread_ts = f"{raw_ts[:10]}.{raw_ts[10:]}"
|
||||
results.append((channel_id, thread_ts))
|
||||
return results
|
||||
|
||||
|
||||
def build_slack_queries(
|
||||
query: ChunkIndexRequest,
|
||||
llm: LLM,
|
||||
entities: dict[str, Any] | None = None,
|
||||
available_channels: list[str] | None = None,
|
||||
) -> list[str]:
|
||||
) -> list[str | DirectThreadFetch]:
|
||||
"""Build Slack query strings with date filtering and query expansion."""
|
||||
default_search_days = 30
|
||||
if entities:
|
||||
@@ -668,6 +695,15 @@ def build_slack_queries(
|
||||
cutoff_date = datetime.now(timezone.utc) - timedelta(days=days_back)
|
||||
time_filter = f" after:{cutoff_date.strftime('%Y-%m-%d')}"
|
||||
|
||||
# Check for Slack message URLs — if found, add direct fetch requests
|
||||
url_fetches: list[DirectThreadFetch] = []
|
||||
slack_urls = extract_slack_message_urls(query.query)
|
||||
for channel_id, thread_ts in slack_urls:
|
||||
url_fetches.append(
|
||||
DirectThreadFetch(channel_id=channel_id, thread_ts=thread_ts)
|
||||
)
|
||||
logger.info(f"Detected Slack URL: channel={channel_id}, ts={thread_ts}")
|
||||
|
||||
# ALWAYS extract channel references from the query (not just for recency queries)
|
||||
channel_references = extract_channel_references_from_query(query.query)
|
||||
|
||||
@@ -684,7 +720,9 @@ def build_slack_queries(
|
||||
|
||||
# If valid channels detected, use ONLY those channels with NO keywords
|
||||
# Return query with ONLY time filter + channel filter (no keywords)
|
||||
return [build_channel_override_query(channel_references, time_filter)]
|
||||
return url_fetches + [
|
||||
build_channel_override_query(channel_references, time_filter)
|
||||
]
|
||||
except ValueError as e:
|
||||
# If validation fails, log the error and continue with normal flow
|
||||
logger.warning(f"Channel reference validation failed: {e}")
|
||||
@@ -702,7 +740,8 @@ def build_slack_queries(
|
||||
rephrased_queries = expand_query_with_llm(query.query, llm)
|
||||
|
||||
# Build final query strings with time filters
|
||||
return [
|
||||
search_queries = [
|
||||
rephrased_query.strip() + time_filter
|
||||
for rephrased_query in rephrased_queries[:MAX_SLACK_QUERY_EXPANSIONS]
|
||||
]
|
||||
return url_fetches + search_queries
|
||||
|
||||
@@ -66,7 +66,7 @@ PROVIDER_DISPLAY_NAMES: dict[str, str] = {
|
||||
LlmProviderNames.LM_STUDIO: "LM Studio",
|
||||
LlmProviderNames.LITELLM_PROXY: "LiteLLM Proxy",
|
||||
LlmProviderNames.BIFROST: "Bifrost",
|
||||
LlmProviderNames.OPENAI_COMPATIBLE: "OpenAI Compatible",
|
||||
LlmProviderNames.OPENAI_COMPATIBLE: "OpenAI-Compatible",
|
||||
"groq": "Groq",
|
||||
"anyscale": "Anyscale",
|
||||
"deepseek": "DeepSeek",
|
||||
@@ -87,6 +87,44 @@ PROVIDER_DISPLAY_NAMES: dict[str, str] = {
|
||||
"gemini": "Gemini",
|
||||
"stability": "Stability",
|
||||
"writer": "Writer",
|
||||
# Custom provider display names (used in the custom provider picker)
|
||||
"aiml": "AI/ML",
|
||||
"assemblyai": "AssemblyAI",
|
||||
"aws_polly": "AWS Polly",
|
||||
"azure_ai": "Azure AI",
|
||||
"chatgpt": "ChatGPT",
|
||||
"cohere_chat": "Cohere Chat",
|
||||
"datarobot": "DataRobot",
|
||||
"deepgram": "Deepgram",
|
||||
"deepinfra": "DeepInfra",
|
||||
"elevenlabs": "ElevenLabs",
|
||||
"fal_ai": "fal.ai",
|
||||
"featherless_ai": "Featherless AI",
|
||||
"fireworks_ai": "Fireworks AI",
|
||||
"friendliai": "FriendliAI",
|
||||
"gigachat": "GigaChat",
|
||||
"github_copilot": "GitHub Copilot",
|
||||
"gradient_ai": "Gradient AI",
|
||||
"huggingface": "HuggingFace",
|
||||
"jina_ai": "Jina AI",
|
||||
"lambda_ai": "Lambda AI",
|
||||
"llamagate": "LlamaGate",
|
||||
"meta_llama": "Meta Llama",
|
||||
"minimax": "MiniMax",
|
||||
"nlp_cloud": "NLP Cloud",
|
||||
"nvidia_nim": "NVIDIA NIM",
|
||||
"oci": "OCI",
|
||||
"ovhcloud": "OVHcloud",
|
||||
"palm": "PaLM",
|
||||
"publicai": "PublicAI",
|
||||
"runwayml": "RunwayML",
|
||||
"sambanova": "SambaNova",
|
||||
"together_ai": "Together AI",
|
||||
"vercel_ai_gateway": "Vercel AI Gateway",
|
||||
"volcengine": "Volcengine",
|
||||
"wandb": "W&B",
|
||||
"watsonx": "IBM watsonx",
|
||||
"zai": "ZAI",
|
||||
}
|
||||
|
||||
# Map vendors to their brand names (used for provider_display_name generation)
|
||||
|
||||
@@ -338,7 +338,7 @@ def get_provider_display_name(provider_name: str) -> str:
|
||||
VERTEXAI_PROVIDER_NAME: "Google Vertex AI",
|
||||
OPENROUTER_PROVIDER_NAME: "OpenRouter",
|
||||
LITELLM_PROXY_PROVIDER_NAME: "LiteLLM Proxy",
|
||||
OPENAI_COMPATIBLE_PROVIDER_NAME: "OpenAI Compatible",
|
||||
OPENAI_COMPATIBLE_PROVIDER_NAME: "OpenAI-Compatible",
|
||||
}
|
||||
|
||||
if provider_name in _ONYX_PROVIDER_DISPLAY_NAMES:
|
||||
|
||||
@@ -58,7 +58,7 @@ docker buildx build --platform linux/amd64,linux/arm64 \
|
||||
|
||||
1. **Build and push** the new image (see above)
|
||||
|
||||
2. **Update the ConfigMap** in `cloud-deployment-yamls/danswer/configmap/env-configmap.yaml`:
|
||||
2. **Update the ConfigMap** in in the internal repo
|
||||
```yaml
|
||||
SANDBOX_CONTAINER_IMAGE: "onyxdotapp/sandbox:v0.1.x"
|
||||
```
|
||||
|
||||
@@ -40,6 +40,8 @@ from onyx.db.models import User
|
||||
from onyx.db.persona import user_can_access_persona
|
||||
from onyx.error_handling.error_codes import OnyxErrorCode
|
||||
from onyx.error_handling.exceptions import OnyxError
|
||||
from onyx.llm.constants import PROVIDER_DISPLAY_NAMES
|
||||
from onyx.llm.constants import WELL_KNOWN_PROVIDER_NAMES
|
||||
from onyx.llm.factory import get_default_llm
|
||||
from onyx.llm.factory import get_llm
|
||||
from onyx.llm.factory import get_max_input_tokens_from_llm_provider
|
||||
@@ -60,6 +62,7 @@ from onyx.server.manage.llm.models import BedrockFinalModelResponse
|
||||
from onyx.server.manage.llm.models import BedrockModelsRequest
|
||||
from onyx.server.manage.llm.models import BifrostFinalModelResponse
|
||||
from onyx.server.manage.llm.models import BifrostModelsRequest
|
||||
from onyx.server.manage.llm.models import CustomProviderOption
|
||||
from onyx.server.manage.llm.models import DefaultModel
|
||||
from onyx.server.manage.llm.models import LitellmFinalModelResponse
|
||||
from onyx.server.manage.llm.models import LitellmModelDetails
|
||||
@@ -108,6 +111,43 @@ def _mask_string(value: str) -> str:
|
||||
return value[:4] + "****" + value[-4:]
|
||||
|
||||
|
||||
def _resolve_api_key(
|
||||
api_key: str | None,
|
||||
provider_name: str | None,
|
||||
api_base: str | None,
|
||||
db_session: Session,
|
||||
) -> str | None:
|
||||
"""Return the real API key for model-fetch endpoints.
|
||||
|
||||
When editing an existing provider the form value is masked (e.g.
|
||||
``sk-a****b1c2``). If *provider_name* is supplied we can look up
|
||||
the unmasked key from the database so the external request succeeds.
|
||||
|
||||
The stored key is only returned when the request's *api_base*
|
||||
matches the value stored in the database.
|
||||
"""
|
||||
if not provider_name:
|
||||
return api_key
|
||||
|
||||
existing_provider = fetch_existing_llm_provider(
|
||||
name=provider_name, db_session=db_session
|
||||
)
|
||||
if existing_provider and existing_provider.api_key:
|
||||
# Normalise both URLs before comparing so trailing-slash
|
||||
# differences don't cause a false mismatch.
|
||||
stored_base = (existing_provider.api_base or "").strip().rstrip("/")
|
||||
request_base = (api_base or "").strip().rstrip("/")
|
||||
if stored_base != request_base:
|
||||
return api_key
|
||||
|
||||
stored_key = existing_provider.api_key.get_value(apply_mask=False)
|
||||
# Only resolve when the incoming value is the masked form of the
|
||||
# stored key — i.e. the user hasn't typed a new key.
|
||||
if api_key and api_key == _mask_string(stored_key):
|
||||
return stored_key
|
||||
return api_key
|
||||
|
||||
|
||||
def _sync_fetched_models(
|
||||
db_session: Session,
|
||||
provider_name: str,
|
||||
@@ -250,6 +290,29 @@ def _validate_llm_provider_change(
|
||||
)
|
||||
|
||||
|
||||
@admin_router.get("/custom-provider-names")
|
||||
def fetch_custom_provider_names(
|
||||
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
|
||||
) -> list[CustomProviderOption]:
|
||||
"""Returns the sorted list of LiteLLM provider names that can be used
|
||||
with the custom provider modal (i.e. everything that is not already
|
||||
covered by a well-known provider modal)."""
|
||||
import litellm
|
||||
|
||||
well_known = {p.value for p in WELL_KNOWN_PROVIDER_NAMES}
|
||||
return sorted(
|
||||
(
|
||||
CustomProviderOption(
|
||||
value=name,
|
||||
label=PROVIDER_DISPLAY_NAMES.get(name, name.replace("_", " ").title()),
|
||||
)
|
||||
for name in litellm.models_by_provider.keys()
|
||||
if name not in well_known
|
||||
),
|
||||
key=lambda o: o.label.lower(),
|
||||
)
|
||||
|
||||
|
||||
@admin_router.get("/built-in/options")
|
||||
def fetch_llm_options(
|
||||
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
|
||||
@@ -1148,16 +1211,17 @@ def get_ollama_available_models(
|
||||
return sorted_results
|
||||
|
||||
|
||||
def _get_openrouter_models_response(api_base: str, api_key: str) -> dict:
|
||||
def _get_openrouter_models_response(api_base: str, api_key: str | None) -> dict:
|
||||
"""Perform GET to OpenRouter /models and return parsed JSON."""
|
||||
cleaned_api_base = api_base.strip().rstrip("/")
|
||||
url = f"{cleaned_api_base}/models"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
headers: dict[str, str] = {
|
||||
# Optional headers recommended by OpenRouter for attribution
|
||||
"HTTP-Referer": "https://onyx.app",
|
||||
"X-Title": "Onyx",
|
||||
}
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
try:
|
||||
response = httpx.get(url, headers=headers, timeout=10.0)
|
||||
response.raise_for_status()
|
||||
@@ -1180,8 +1244,12 @@ def get_openrouter_available_models(
|
||||
Parses id, name (display), context_length, and architecture.input_modalities.
|
||||
"""
|
||||
|
||||
api_key = _resolve_api_key(
|
||||
request.api_key, request.provider_name, request.api_base, db_session
|
||||
)
|
||||
|
||||
response_json = _get_openrouter_models_response(
|
||||
api_base=request.api_base, api_key=request.api_key
|
||||
api_base=request.api_base, api_key=api_key
|
||||
)
|
||||
|
||||
data = response_json.get("data", [])
|
||||
@@ -1274,13 +1342,18 @@ def get_lm_studio_available_models(
|
||||
|
||||
# If provider_name is given and the api_key hasn't been changed by the user,
|
||||
# fall back to the stored API key from the database (the form value is masked).
|
||||
# Only do so when the api_base matches what is stored.
|
||||
api_key = request.api_key
|
||||
if request.provider_name and not request.api_key_changed:
|
||||
existing_provider = fetch_existing_llm_provider(
|
||||
name=request.provider_name, db_session=db_session
|
||||
)
|
||||
if existing_provider and existing_provider.custom_config:
|
||||
api_key = existing_provider.custom_config.get(LM_STUDIO_API_KEY_CONFIG_KEY)
|
||||
stored_base = (existing_provider.api_base or "").strip().rstrip("/")
|
||||
if stored_base == cleaned_api_base:
|
||||
api_key = existing_provider.custom_config.get(
|
||||
LM_STUDIO_API_KEY_CONFIG_KEY
|
||||
)
|
||||
|
||||
url = f"{cleaned_api_base}/api/v1/models"
|
||||
headers: dict[str, str] = {}
|
||||
@@ -1364,8 +1437,12 @@ def get_litellm_available_models(
|
||||
db_session: Session = Depends(get_session),
|
||||
) -> list[LitellmFinalModelResponse]:
|
||||
"""Fetch available models from Litellm proxy /v1/models endpoint."""
|
||||
api_key = _resolve_api_key(
|
||||
request.api_key, request.provider_name, request.api_base, db_session
|
||||
)
|
||||
|
||||
response_json = _get_litellm_models_response(
|
||||
api_key=request.api_key, api_base=request.api_base
|
||||
api_key=api_key, api_base=request.api_base
|
||||
)
|
||||
|
||||
models = response_json.get("data", [])
|
||||
@@ -1422,7 +1499,7 @@ def get_litellm_available_models(
|
||||
return sorted_results
|
||||
|
||||
|
||||
def _get_litellm_models_response(api_key: str, api_base: str) -> dict:
|
||||
def _get_litellm_models_response(api_key: str | None, api_base: str) -> dict:
|
||||
"""Perform GET to Litellm proxy /api/v1/models and return parsed JSON."""
|
||||
cleaned_api_base = api_base.strip().rstrip("/")
|
||||
url = f"{cleaned_api_base}/v1/models"
|
||||
@@ -1497,8 +1574,12 @@ def get_bifrost_available_models(
|
||||
db_session: Session = Depends(get_session),
|
||||
) -> list[BifrostFinalModelResponse]:
|
||||
"""Fetch available models from Bifrost gateway /v1/models endpoint."""
|
||||
api_key = _resolve_api_key(
|
||||
request.api_key, request.provider_name, request.api_base, db_session
|
||||
)
|
||||
|
||||
response_json = _get_bifrost_models_response(
|
||||
api_base=request.api_base, api_key=request.api_key
|
||||
api_base=request.api_base, api_key=api_key
|
||||
)
|
||||
|
||||
models = response_json.get("data", [])
|
||||
@@ -1587,8 +1668,12 @@ def get_openai_compatible_server_available_models(
|
||||
db_session: Session = Depends(get_session),
|
||||
) -> list[OpenAICompatibleFinalModelResponse]:
|
||||
"""Fetch available models from a generic OpenAI-compatible /v1/models endpoint."""
|
||||
api_key = _resolve_api_key(
|
||||
request.api_key, request.provider_name, request.api_base, db_session
|
||||
)
|
||||
|
||||
response_json = _get_openai_compatible_server_response(
|
||||
api_base=request.api_base, api_key=request.api_key
|
||||
api_base=request.api_base, api_key=api_key
|
||||
)
|
||||
|
||||
models = response_json.get("data", [])
|
||||
@@ -1648,7 +1733,7 @@ def get_openai_compatible_server_available_models(
|
||||
)
|
||||
for r in sorted_results
|
||||
],
|
||||
source_label="OpenAI Compatible",
|
||||
source_label="OpenAI-Compatible",
|
||||
)
|
||||
|
||||
return sorted_results
|
||||
@@ -1667,6 +1752,6 @@ def _get_openai_compatible_server_response(
|
||||
|
||||
return _get_openai_compatible_models_response(
|
||||
url=url,
|
||||
source_name="OpenAI Compatible",
|
||||
source_name="OpenAI-Compatible",
|
||||
api_key=api_key,
|
||||
)
|
||||
|
||||
@@ -28,6 +28,13 @@ if TYPE_CHECKING:
|
||||
T = TypeVar("T", "LLMProviderDescriptor", "LLMProviderView", "VisionProviderResponse")
|
||||
|
||||
|
||||
class CustomProviderOption(BaseModel):
|
||||
"""A provider slug + human-friendly label for the custom-provider picker."""
|
||||
|
||||
value: str
|
||||
label: str
|
||||
|
||||
|
||||
class TestLLMRequest(BaseModel):
|
||||
# provider level
|
||||
id: int | None = None
|
||||
|
||||
72
backend/onyx/server/metrics/pruning_metrics.py
Normal file
72
backend/onyx/server/metrics/pruning_metrics.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""Pruning-specific Prometheus metrics.
|
||||
|
||||
Tracks three pruning pipeline phases for connector_pruning_generator_task:
|
||||
1. Document ID enumeration duration (extract_ids_from_runnable_connector)
|
||||
2. Diff + dispatch duration (DB lookup, set diff, generate_tasks)
|
||||
3. Rate limit errors during enumeration
|
||||
|
||||
All metrics are labeled by connector_type to identify which connector sources
|
||||
are the most expensive to prune. cc_pair_id is intentionally excluded to avoid
|
||||
unbounded cardinality.
|
||||
|
||||
Usage:
|
||||
from onyx.server.metrics.pruning_metrics import (
|
||||
observe_pruning_enumeration_duration,
|
||||
observe_pruning_diff_duration,
|
||||
inc_pruning_rate_limit_error,
|
||||
)
|
||||
"""
|
||||
|
||||
from prometheus_client import Counter
|
||||
from prometheus_client import Histogram
|
||||
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
PRUNING_ENUMERATION_DURATION = Histogram(
|
||||
"onyx_pruning_enumeration_duration_seconds",
|
||||
"Duration of document ID enumeration from the source connector during pruning",
|
||||
["connector_type"],
|
||||
buckets=[1, 5, 15, 30, 60, 120, 300, 600, 1800, 3600],
|
||||
)
|
||||
|
||||
PRUNING_DIFF_DURATION = Histogram(
|
||||
"onyx_pruning_diff_duration_seconds",
|
||||
"Duration of diff computation and subtask dispatch during pruning",
|
||||
["connector_type"],
|
||||
buckets=[1, 5, 15, 30, 60, 120, 300, 600, 1800, 3600],
|
||||
)
|
||||
|
||||
PRUNING_RATE_LIMIT_ERRORS = Counter(
|
||||
"onyx_pruning_rate_limit_errors_total",
|
||||
"Total rate limit errors encountered during pruning document ID enumeration",
|
||||
["connector_type"],
|
||||
)
|
||||
|
||||
|
||||
def observe_pruning_enumeration_duration(
|
||||
duration_seconds: float, connector_type: str
|
||||
) -> None:
|
||||
try:
|
||||
PRUNING_ENUMERATION_DURATION.labels(connector_type=connector_type).observe(
|
||||
duration_seconds
|
||||
)
|
||||
except Exception:
|
||||
logger.debug("Failed to record pruning enumeration duration", exc_info=True)
|
||||
|
||||
|
||||
def observe_pruning_diff_duration(duration_seconds: float, connector_type: str) -> None:
|
||||
try:
|
||||
PRUNING_DIFF_DURATION.labels(connector_type=connector_type).observe(
|
||||
duration_seconds
|
||||
)
|
||||
except Exception:
|
||||
logger.debug("Failed to record pruning diff duration", exc_info=True)
|
||||
|
||||
|
||||
def inc_pruning_rate_limit_error(connector_type: str) -> None:
|
||||
try:
|
||||
PRUNING_RATE_LIMIT_ERRORS.labels(connector_type=connector_type).inc()
|
||||
except Exception:
|
||||
logger.debug("Failed to record pruning rate limit error", exc_info=True)
|
||||
@@ -1,10 +0,0 @@
|
||||
[project]
|
||||
name = "onyx-backend"
|
||||
version = "0.0.0"
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
"onyx[backend,dev,ee]",
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
onyx = { workspace = true }
|
||||
@@ -46,11 +46,11 @@ curl -LsSf https://astral.py/uv/install.sh | sh
|
||||
|
||||
1. Edit `pyproject.toml`
|
||||
2. Add/update/remove dependencies in the appropriate section:
|
||||
- `[dependency-groups]` for dev tools
|
||||
- `[project.dependencies]` for **shared** dependencies (used by both backend and model_server)
|
||||
- `[project.optional-dependencies.backend]` for backend-only dependencies
|
||||
- `[project.optional-dependencies.model_server]` for model_server-only dependencies (ML packages)
|
||||
- `[project.optional-dependencies.ee]` for EE features
|
||||
- `[dependency-groups.backend]` for backend-only dependencies
|
||||
- `[dependency-groups.dev]` for dev tools
|
||||
- `[dependency-groups.ee]` for EE features
|
||||
- `[dependency-groups.model_server]` for model_server-only dependencies (ML packages)
|
||||
3. Commit your changes - pre-commit hooks will automatically regenerate the lock file and requirements
|
||||
|
||||
### 3. Generating Lock File and Requirements
|
||||
@@ -64,10 +64,10 @@ To manually regenerate:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
uv export --no-emit-project --no-default-groups --no-hashes --extra backend -o backend/requirements/default.txt
|
||||
uv export --no-emit-project --no-default-groups --no-hashes --group backend -o backend/requirements/default.txt
|
||||
uv export --no-emit-project --no-default-groups --no-hashes --group dev -o backend/requirements/dev.txt
|
||||
uv export --no-emit-project --no-default-groups --no-hashes --extra ee -o backend/requirements/ee.txt
|
||||
uv export --no-emit-project --no-default-groups --no-hashes --extra model_server -o backend/requirements/model_server.txt
|
||||
uv export --no-emit-project --no-default-groups --no-hashes --group ee -o backend/requirements/ee.txt
|
||||
uv export --no-emit-project --no-default-groups --no-hashes --group model_server -o backend/requirements/model_server.txt
|
||||
```
|
||||
|
||||
### 4. Installing Dependencies
|
||||
@@ -76,30 +76,14 @@ If enabled, all packages are installed automatically by the `uv-sync` pre-commit
|
||||
branches or pulling new changes.
|
||||
|
||||
```bash
|
||||
# For everything (most common)
|
||||
uv sync --all-extras
|
||||
# For development (most common) — installs shared + backend + dev + ee
|
||||
uv sync
|
||||
|
||||
# For backend production (shared + backend dependencies)
|
||||
uv sync --extra backend
|
||||
|
||||
# For backend development (shared + backend + dev tools)
|
||||
uv sync --extra backend --extra dev
|
||||
|
||||
# For backend with EE (shared + backend + ee)
|
||||
uv sync --extra backend --extra ee
|
||||
# For backend production only (shared + backend dependencies)
|
||||
uv sync --no-default-groups --group backend
|
||||
|
||||
# For model server (shared + model_server, NO backend deps!)
|
||||
uv sync --extra model_server
|
||||
```
|
||||
|
||||
`uv` aggressively [ignores active virtual environments](https://docs.astral.sh/uv/concepts/projects/config/#project-environment-path) and prefers the root virtual environment.
|
||||
When working in workspace packages, be sure to pass `--active` when syncing the virtual environment:
|
||||
|
||||
```bash
|
||||
cd backend/
|
||||
source .venv/bin/activate
|
||||
uv sync --active
|
||||
uv run --active ...
|
||||
uv sync --no-default-groups --group model_server
|
||||
```
|
||||
|
||||
### 5. Upgrading Dependencies
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv export --no-emit-project --no-default-groups --no-hashes --extra backend -o backend/requirements/default.txt
|
||||
# uv export --no-emit-project --no-default-groups --no-hashes --group backend -o backend/requirements/default.txt
|
||||
agent-client-protocol==0.7.1
|
||||
# via onyx
|
||||
aioboto3==15.1.0
|
||||
@@ -19,7 +19,6 @@ aiohttp==3.13.4
|
||||
# aiobotocore
|
||||
# discord-py
|
||||
# litellm
|
||||
# onyx
|
||||
# voyageai
|
||||
aioitertools==0.13.0
|
||||
# via aiobotocore
|
||||
@@ -28,7 +27,6 @@ aiolimiter==1.2.1
|
||||
aiosignal==1.4.0
|
||||
# via aiohttp
|
||||
alembic==1.10.4
|
||||
# via onyx
|
||||
amqp==5.3.1
|
||||
# via kombu
|
||||
annotated-doc==0.0.4
|
||||
@@ -51,13 +49,10 @@ argon2-cffi==23.1.0
|
||||
argon2-cffi-bindings==25.1.0
|
||||
# via argon2-cffi
|
||||
asana==5.0.8
|
||||
# via onyx
|
||||
async-timeout==5.0.1 ; python_full_version < '3.11.3'
|
||||
# via redis
|
||||
asyncpg==0.30.0
|
||||
# via onyx
|
||||
atlassian-python-api==3.41.16
|
||||
# via onyx
|
||||
attrs==25.4.0
|
||||
# via
|
||||
# aiohttp
|
||||
@@ -68,7 +63,6 @@ attrs==25.4.0
|
||||
authlib==1.6.9
|
||||
# via fastmcp
|
||||
azure-cognitiveservices-speech==1.38.0
|
||||
# via onyx
|
||||
babel==2.17.0
|
||||
# via courlan
|
||||
backoff==2.2.1
|
||||
@@ -86,7 +80,6 @@ beautifulsoup4==4.12.3
|
||||
# atlassian-python-api
|
||||
# markdownify
|
||||
# markitdown
|
||||
# onyx
|
||||
# unstructured
|
||||
billiard==4.2.3
|
||||
# via celery
|
||||
@@ -94,9 +87,7 @@ boto3==1.39.11
|
||||
# via
|
||||
# aiobotocore
|
||||
# cohere
|
||||
# onyx
|
||||
boto3-stubs==1.39.11
|
||||
# via onyx
|
||||
botocore==1.39.11
|
||||
# via
|
||||
# aiobotocore
|
||||
@@ -105,7 +96,6 @@ botocore==1.39.11
|
||||
botocore-stubs==1.40.74
|
||||
# via boto3-stubs
|
||||
braintrust==0.3.9
|
||||
# via onyx
|
||||
brotli==1.2.0
|
||||
# via onyx
|
||||
bytecode==0.17.0
|
||||
@@ -115,7 +105,6 @@ cachetools==6.2.2
|
||||
caio==0.9.25
|
||||
# via aiofile
|
||||
celery==5.5.1
|
||||
# via onyx
|
||||
certifi==2025.11.12
|
||||
# via
|
||||
# asana
|
||||
@@ -134,7 +123,6 @@ cffi==2.0.0
|
||||
# pynacl
|
||||
# zstandard
|
||||
chardet==5.2.0
|
||||
# via onyx
|
||||
charset-normalizer==3.4.4
|
||||
# via
|
||||
# htmldate
|
||||
@@ -146,7 +134,6 @@ charset-normalizer==3.4.4
|
||||
chevron==0.14.0
|
||||
# via braintrust
|
||||
chonkie==1.0.10
|
||||
# via onyx
|
||||
claude-agent-sdk==0.1.19
|
||||
# via onyx
|
||||
click==8.3.1
|
||||
@@ -201,15 +188,12 @@ cryptography==46.0.6
|
||||
cyclopts==4.2.4
|
||||
# via fastmcp
|
||||
dask==2026.1.1
|
||||
# via
|
||||
# distributed
|
||||
# onyx
|
||||
# via distributed
|
||||
dataclasses-json==0.6.7
|
||||
# via unstructured
|
||||
dateparser==1.2.2
|
||||
# via htmldate
|
||||
ddtrace==3.10.0
|
||||
# via onyx
|
||||
decorator==5.2.1
|
||||
# via retry
|
||||
defusedxml==0.7.1
|
||||
@@ -223,7 +207,6 @@ deprecated==1.3.1
|
||||
discord-py==2.4.0
|
||||
# via onyx
|
||||
distributed==2026.1.1
|
||||
# via onyx
|
||||
distro==1.9.0
|
||||
# via
|
||||
# openai
|
||||
@@ -235,7 +218,6 @@ docstring-parser==0.17.0
|
||||
docutils==0.22.3
|
||||
# via rich-rst
|
||||
dropbox==12.0.2
|
||||
# via onyx
|
||||
durationpy==0.10
|
||||
# via kubernetes
|
||||
email-validator==2.2.0
|
||||
@@ -251,7 +233,6 @@ et-xmlfile==2.0.0
|
||||
events==0.5
|
||||
# via opensearch-py
|
||||
exa-py==1.15.4
|
||||
# via onyx
|
||||
exceptiongroup==1.3.0
|
||||
# via
|
||||
# braintrust
|
||||
@@ -262,23 +243,16 @@ fastapi==0.133.1
|
||||
# fastapi-users
|
||||
# onyx
|
||||
fastapi-limiter==0.1.6
|
||||
# via onyx
|
||||
fastapi-users==15.0.4
|
||||
# via
|
||||
# fastapi-users-db-sqlalchemy
|
||||
# onyx
|
||||
# via fastapi-users-db-sqlalchemy
|
||||
fastapi-users-db-sqlalchemy==7.0.0
|
||||
# via onyx
|
||||
fastavro==1.12.1
|
||||
# via cohere
|
||||
fastmcp==3.2.0
|
||||
# via onyx
|
||||
fastuuid==0.14.0
|
||||
# via litellm
|
||||
filelock==3.20.3
|
||||
# via
|
||||
# huggingface-hub
|
||||
# onyx
|
||||
# via huggingface-hub
|
||||
filetype==1.2.0
|
||||
# via unstructured
|
||||
flatbuffers==25.9.23
|
||||
@@ -298,7 +272,6 @@ gitpython==3.1.45
|
||||
google-api-core==2.28.1
|
||||
# via google-api-python-client
|
||||
google-api-python-client==2.86.0
|
||||
# via onyx
|
||||
google-auth==2.48.0
|
||||
# via
|
||||
# google-api-core
|
||||
@@ -308,11 +281,8 @@ google-auth==2.48.0
|
||||
# google-genai
|
||||
# kubernetes
|
||||
google-auth-httplib2==0.1.0
|
||||
# via
|
||||
# google-api-python-client
|
||||
# onyx
|
||||
# via google-api-python-client
|
||||
google-auth-oauthlib==1.0.0
|
||||
# via onyx
|
||||
google-genai==1.52.0
|
||||
# via onyx
|
||||
googleapis-common-protos==1.72.0
|
||||
@@ -340,7 +310,6 @@ htmldate==1.9.1
|
||||
httpcore==1.0.9
|
||||
# via
|
||||
# httpx
|
||||
# onyx
|
||||
# unstructured-client
|
||||
httplib2==0.31.0
|
||||
# via
|
||||
@@ -357,21 +326,16 @@ httpx==0.28.1
|
||||
# langsmith
|
||||
# litellm
|
||||
# mcp
|
||||
# onyx
|
||||
# openai
|
||||
# unstructured-client
|
||||
httpx-oauth==0.15.1
|
||||
# via onyx
|
||||
httpx-sse==0.4.3
|
||||
# via
|
||||
# cohere
|
||||
# mcp
|
||||
hubspot-api-client==11.1.0
|
||||
# via onyx
|
||||
huggingface-hub==0.35.3
|
||||
# via
|
||||
# onyx
|
||||
# tokenizers
|
||||
# via tokenizers
|
||||
humanfriendly==10.0
|
||||
# via coloredlogs
|
||||
hyperframe==6.1.0
|
||||
@@ -390,9 +354,7 @@ importlib-metadata==8.7.0
|
||||
# litellm
|
||||
# opentelemetry-api
|
||||
inflection==0.5.1
|
||||
# via
|
||||
# onyx
|
||||
# pyairtable
|
||||
# via pyairtable
|
||||
iniconfig==2.3.0
|
||||
# via pytest
|
||||
isodate==0.7.2
|
||||
@@ -414,7 +376,6 @@ jinja2==3.1.6
|
||||
# distributed
|
||||
# litellm
|
||||
jira==3.10.5
|
||||
# via onyx
|
||||
jiter==0.12.0
|
||||
# via openai
|
||||
jmespath==1.0.1
|
||||
@@ -430,9 +391,7 @@ jsonpatch==1.33
|
||||
jsonpointer==3.0.0
|
||||
# via jsonpatch
|
||||
jsonref==1.1.0
|
||||
# via
|
||||
# fastmcp
|
||||
# onyx
|
||||
# via fastmcp
|
||||
jsonschema==4.25.1
|
||||
# via
|
||||
# litellm
|
||||
@@ -450,15 +409,12 @@ kombu==5.5.4
|
||||
kubernetes==31.0.0
|
||||
# via onyx
|
||||
langchain-core==1.2.22
|
||||
# via onyx
|
||||
langdetect==1.0.9
|
||||
# via unstructured
|
||||
langfuse==3.10.0
|
||||
# via onyx
|
||||
langsmith==0.3.45
|
||||
# via langchain-core
|
||||
lazy-imports==1.0.1
|
||||
# via onyx
|
||||
legacy-cgi==2.6.4 ; python_full_version >= '3.13'
|
||||
# via ddtrace
|
||||
litellm==1.81.6
|
||||
@@ -473,7 +429,6 @@ lxml==5.3.0
|
||||
# justext
|
||||
# lxml-html-clean
|
||||
# markitdown
|
||||
# onyx
|
||||
# python-docx
|
||||
# python-pptx
|
||||
# python3-saml
|
||||
@@ -488,9 +443,7 @@ magika==0.6.3
|
||||
makefun==1.16.0
|
||||
# via fastapi-users
|
||||
mako==1.2.4
|
||||
# via
|
||||
# alembic
|
||||
# onyx
|
||||
# via alembic
|
||||
mammoth==1.11.0
|
||||
# via markitdown
|
||||
markdown-it-py==4.0.0
|
||||
@@ -498,7 +451,6 @@ markdown-it-py==4.0.0
|
||||
markdownify==1.2.2
|
||||
# via markitdown
|
||||
markitdown==0.1.2
|
||||
# via onyx
|
||||
markupsafe==3.0.3
|
||||
# via
|
||||
# jinja2
|
||||
@@ -512,11 +464,9 @@ mcp==1.26.0
|
||||
# via
|
||||
# claude-agent-sdk
|
||||
# fastmcp
|
||||
# onyx
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
mistune==3.2.0
|
||||
# via onyx
|
||||
more-itertools==10.8.0
|
||||
# via
|
||||
# jaraco-classes
|
||||
@@ -525,13 +475,10 @@ more-itertools==10.8.0
|
||||
mpmath==1.3.0
|
||||
# via sympy
|
||||
msal==1.34.0
|
||||
# via
|
||||
# office365-rest-python-client
|
||||
# onyx
|
||||
# via office365-rest-python-client
|
||||
msgpack==1.1.2
|
||||
# via distributed
|
||||
msoffcrypto-tool==5.4.2
|
||||
# via onyx
|
||||
multidict==6.7.0
|
||||
# via
|
||||
# aiobotocore
|
||||
@@ -548,7 +495,6 @@ mypy-extensions==1.0.0
|
||||
# mypy
|
||||
# typing-inspect
|
||||
nest-asyncio==1.6.0
|
||||
# via onyx
|
||||
nltk==3.9.4
|
||||
# via unstructured
|
||||
numpy==2.4.1
|
||||
@@ -563,10 +509,8 @@ oauthlib==3.2.2
|
||||
# via
|
||||
# atlassian-python-api
|
||||
# kubernetes
|
||||
# onyx
|
||||
# requests-oauthlib
|
||||
office365-rest-python-client==2.6.2
|
||||
# via onyx
|
||||
olefile==0.47
|
||||
# via
|
||||
# msoffcrypto-tool
|
||||
@@ -582,15 +526,11 @@ openai==2.14.0
|
||||
openapi-pydantic==0.5.1
|
||||
# via fastmcp
|
||||
openinference-instrumentation==0.1.42
|
||||
# via onyx
|
||||
openinference-semantic-conventions==0.1.25
|
||||
# via openinference-instrumentation
|
||||
openpyxl==3.0.10
|
||||
# via
|
||||
# markitdown
|
||||
# onyx
|
||||
# via markitdown
|
||||
opensearch-py==3.0.0
|
||||
# via onyx
|
||||
opentelemetry-api==1.39.1
|
||||
# via
|
||||
# ddtrace
|
||||
@@ -606,7 +546,6 @@ opentelemetry-exporter-otlp-proto-http==1.39.1
|
||||
# via langfuse
|
||||
opentelemetry-proto==1.39.1
|
||||
# via
|
||||
# onyx
|
||||
# opentelemetry-exporter-otlp-proto-common
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-sdk==1.39.1
|
||||
@@ -640,7 +579,6 @@ parameterized==0.9.0
|
||||
partd==1.4.2
|
||||
# via dask
|
||||
passlib==1.7.4
|
||||
# via onyx
|
||||
pathable==0.4.4
|
||||
# via jsonschema-path
|
||||
pdfminer-six==20251107
|
||||
@@ -652,9 +590,7 @@ platformdirs==4.5.0
|
||||
# fastmcp
|
||||
# zeep
|
||||
playwright==1.55.0
|
||||
# via
|
||||
# onyx
|
||||
# pytest-playwright
|
||||
# via pytest-playwright
|
||||
pluggy==1.6.0
|
||||
# via pytest
|
||||
ply==3.11
|
||||
@@ -684,12 +620,9 @@ protobuf==6.33.5
|
||||
psutil==7.1.3
|
||||
# via
|
||||
# distributed
|
||||
# onyx
|
||||
# unstructured
|
||||
psycopg2-binary==2.9.9
|
||||
# via onyx
|
||||
puremagic==1.28
|
||||
# via onyx
|
||||
pwdlib==0.3.0
|
||||
# via fastapi-users
|
||||
py==1.11.0
|
||||
@@ -697,7 +630,6 @@ py==1.11.0
|
||||
py-key-value-aio==0.4.4
|
||||
# via fastmcp
|
||||
pyairtable==3.0.1
|
||||
# via onyx
|
||||
pyasn1==0.6.3
|
||||
# via
|
||||
# pyasn1-modules
|
||||
@@ -707,7 +639,6 @@ pyasn1-modules==0.4.2
|
||||
pycparser==2.23 ; implementation_name != 'PyPy'
|
||||
# via cffi
|
||||
pycryptodome==3.19.1
|
||||
# via onyx
|
||||
pydantic==2.11.7
|
||||
# via
|
||||
# agent-client-protocol
|
||||
@@ -734,7 +665,6 @@ pydantic-settings==2.12.0
|
||||
pyee==13.0.0
|
||||
# via playwright
|
||||
pygithub==2.5.0
|
||||
# via onyx
|
||||
pygments==2.20.0
|
||||
# via rich
|
||||
pyjwt==2.12.0
|
||||
@@ -745,17 +675,13 @@ pyjwt==2.12.0
|
||||
# pygithub
|
||||
# simple-salesforce
|
||||
pympler==1.1
|
||||
# via onyx
|
||||
pynacl==1.6.2
|
||||
# via pygithub
|
||||
pypandoc-binary==1.16.2
|
||||
# via onyx
|
||||
pyparsing==3.2.5
|
||||
# via httplib2
|
||||
pypdf==6.9.2
|
||||
# via
|
||||
# onyx
|
||||
# unstructured-client
|
||||
# via unstructured-client
|
||||
pyperclip==1.11.0
|
||||
# via fastmcp
|
||||
pyreadline3==3.5.4 ; sys_platform == 'win32'
|
||||
@@ -768,9 +694,7 @@ pytest==8.3.5
|
||||
pytest-base-url==2.1.0
|
||||
# via pytest-playwright
|
||||
pytest-mock==3.12.0
|
||||
# via onyx
|
||||
pytest-playwright==0.7.0
|
||||
# via onyx
|
||||
python-dateutil==2.8.2
|
||||
# via
|
||||
# aiobotocore
|
||||
@@ -781,11 +705,9 @@ python-dateutil==2.8.2
|
||||
# htmldate
|
||||
# hubspot-api-client
|
||||
# kubernetes
|
||||
# onyx
|
||||
# opensearch-py
|
||||
# pandas
|
||||
python-docx==1.1.2
|
||||
# via onyx
|
||||
python-dotenv==1.1.1
|
||||
# via
|
||||
# braintrust
|
||||
@@ -793,10 +715,8 @@ python-dotenv==1.1.1
|
||||
# litellm
|
||||
# magika
|
||||
# mcp
|
||||
# onyx
|
||||
# pydantic-settings
|
||||
python-gitlab==5.6.0
|
||||
# via onyx
|
||||
python-http-client==3.3.7
|
||||
# via sendgrid
|
||||
python-iso639==2025.11.16
|
||||
@@ -807,19 +727,15 @@ python-multipart==0.0.22
|
||||
# via
|
||||
# fastapi-users
|
||||
# mcp
|
||||
# onyx
|
||||
python-oxmsg==0.0.2
|
||||
# via unstructured
|
||||
python-pptx==0.6.23
|
||||
# via
|
||||
# markitdown
|
||||
# onyx
|
||||
# via markitdown
|
||||
python-slugify==8.0.4
|
||||
# via
|
||||
# braintrust
|
||||
# pytest-playwright
|
||||
python3-saml==1.15.0
|
||||
# via onyx
|
||||
pytz==2025.2
|
||||
# via
|
||||
# dateparser
|
||||
@@ -827,7 +743,6 @@ pytz==2025.2
|
||||
# pandas
|
||||
# zeep
|
||||
pywikibot==9.0.0
|
||||
# via onyx
|
||||
pywin32==311 ; sys_platform == 'win32'
|
||||
# via
|
||||
# mcp
|
||||
@@ -844,13 +759,9 @@ pyyaml==6.0.3
|
||||
# kubernetes
|
||||
# langchain-core
|
||||
rapidfuzz==3.13.0
|
||||
# via
|
||||
# onyx
|
||||
# unstructured
|
||||
# via unstructured
|
||||
redis==5.0.8
|
||||
# via
|
||||
# fastapi-limiter
|
||||
# onyx
|
||||
# via fastapi-limiter
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# jsonschema
|
||||
@@ -881,7 +792,6 @@ requests==2.33.0
|
||||
# matrix-client
|
||||
# msal
|
||||
# office365-rest-python-client
|
||||
# onyx
|
||||
# opensearch-py
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
# pyairtable
|
||||
@@ -907,7 +817,6 @@ requests-oauthlib==1.3.1
|
||||
# google-auth-oauthlib
|
||||
# jira
|
||||
# kubernetes
|
||||
# onyx
|
||||
requests-toolbelt==1.0.0
|
||||
# via
|
||||
# jira
|
||||
@@ -918,7 +827,6 @@ requests-toolbelt==1.0.0
|
||||
retry==0.9.2
|
||||
# via onyx
|
||||
rfc3986==1.5.0
|
||||
# via onyx
|
||||
rich==14.2.0
|
||||
# via
|
||||
# cyclopts
|
||||
@@ -938,15 +846,12 @@ s3transfer==0.13.1
|
||||
secretstorage==3.5.0 ; sys_platform == 'linux'
|
||||
# via keyring
|
||||
sendgrid==6.12.5
|
||||
# via onyx
|
||||
sentry-sdk==2.14.0
|
||||
# via onyx
|
||||
shapely==2.0.6
|
||||
# via onyx
|
||||
shellingham==1.5.4
|
||||
# via typer
|
||||
simple-salesforce==1.12.6
|
||||
# via onyx
|
||||
six==1.17.0
|
||||
# via
|
||||
# asana
|
||||
@@ -961,7 +866,6 @@ six==1.17.0
|
||||
# python-dateutil
|
||||
# stone
|
||||
slack-sdk==3.20.2
|
||||
# via onyx
|
||||
smmap==5.0.2
|
||||
# via gitdb
|
||||
sniffio==1.3.1
|
||||
@@ -976,7 +880,6 @@ sqlalchemy==2.0.15
|
||||
# via
|
||||
# alembic
|
||||
# fastapi-users-db-sqlalchemy
|
||||
# onyx
|
||||
sse-starlette==3.0.3
|
||||
# via mcp
|
||||
sseclient-py==1.8.0
|
||||
@@ -985,14 +888,11 @@ starlette==0.49.3
|
||||
# via
|
||||
# fastapi
|
||||
# mcp
|
||||
# onyx
|
||||
# prometheus-fastapi-instrumentator
|
||||
stone==3.3.1
|
||||
# via dropbox
|
||||
stripe==10.12.0
|
||||
# via onyx
|
||||
supervisor==4.3.0
|
||||
# via onyx
|
||||
sympy==1.14.0
|
||||
# via onnxruntime
|
||||
tblib==3.2.2
|
||||
@@ -1005,11 +905,8 @@ tenacity==9.1.2
|
||||
text-unidecode==1.3
|
||||
# via python-slugify
|
||||
tiktoken==0.7.0
|
||||
# via
|
||||
# litellm
|
||||
# onyx
|
||||
# via litellm
|
||||
timeago==1.0.16
|
||||
# via onyx
|
||||
tld==0.13.1
|
||||
# via courlan
|
||||
tokenizers==0.21.4
|
||||
@@ -1033,13 +930,11 @@ tqdm==4.67.1
|
||||
# openai
|
||||
# unstructured
|
||||
trafilatura==1.12.2
|
||||
# via onyx
|
||||
typer==0.20.0
|
||||
# via mcp
|
||||
types-awscrt==0.28.4
|
||||
# via botocore-stubs
|
||||
types-openpyxl==3.0.4.7
|
||||
# via onyx
|
||||
types-requests==2.32.0.20250328
|
||||
# via cohere
|
||||
types-s3transfer==0.14.0
|
||||
@@ -1105,11 +1000,8 @@ tzlocal==5.3.1
|
||||
uncalled-for==0.2.0
|
||||
# via fastmcp
|
||||
unstructured==0.18.27
|
||||
# via onyx
|
||||
unstructured-client==0.42.6
|
||||
# via
|
||||
# onyx
|
||||
# unstructured
|
||||
# via unstructured
|
||||
uritemplate==4.2.0
|
||||
# via google-api-python-client
|
||||
urllib3==2.6.3
|
||||
@@ -1121,7 +1013,6 @@ urllib3==2.6.3
|
||||
# htmldate
|
||||
# hubspot-api-client
|
||||
# kubernetes
|
||||
# onyx
|
||||
# opensearch-py
|
||||
# pyairtable
|
||||
# pygithub
|
||||
@@ -1171,9 +1062,7 @@ xlrd==2.0.2
|
||||
xlsxwriter==3.2.9
|
||||
# via python-pptx
|
||||
xmlsec==1.3.14
|
||||
# via
|
||||
# onyx
|
||||
# python3-saml
|
||||
# via python3-saml
|
||||
xmltodict==1.0.2
|
||||
# via ddtrace
|
||||
yarl==1.22.0
|
||||
@@ -1187,4 +1076,3 @@ zipp==3.23.0
|
||||
zstandard==0.23.0
|
||||
# via langsmith
|
||||
zulip==0.8.2
|
||||
# via onyx
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv export --no-emit-project --no-default-groups --no-hashes --extra dev -o backend/requirements/dev.txt
|
||||
# uv export --no-emit-project --no-default-groups --no-hashes --group dev -o backend/requirements/dev.txt
|
||||
agent-client-protocol==0.7.1
|
||||
# via onyx
|
||||
aioboto3==15.1.0
|
||||
@@ -47,7 +47,6 @@ attrs==25.4.0
|
||||
# jsonschema
|
||||
# referencing
|
||||
black==25.1.0
|
||||
# via onyx
|
||||
boto3==1.39.11
|
||||
# via
|
||||
# aiobotocore
|
||||
@@ -60,7 +59,6 @@ botocore==1.39.11
|
||||
brotli==1.2.0
|
||||
# via onyx
|
||||
celery-types==0.19.0
|
||||
# via onyx
|
||||
certifi==2025.11.12
|
||||
# via
|
||||
# httpcore
|
||||
@@ -122,7 +120,6 @@ execnet==2.1.2
|
||||
executing==2.2.1
|
||||
# via stack-data
|
||||
faker==40.1.2
|
||||
# via onyx
|
||||
fastapi==0.133.1
|
||||
# via
|
||||
# onyx
|
||||
@@ -156,7 +153,6 @@ h11==0.16.0
|
||||
# httpcore
|
||||
# uvicorn
|
||||
hatchling==1.28.0
|
||||
# via onyx
|
||||
hf-xet==1.2.0 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
|
||||
# via huggingface-hub
|
||||
httpcore==1.0.9
|
||||
@@ -187,7 +183,6 @@ importlib-metadata==8.7.0
|
||||
iniconfig==2.3.0
|
||||
# via pytest
|
||||
ipykernel==6.29.5
|
||||
# via onyx
|
||||
ipython==9.7.0
|
||||
# via ipykernel
|
||||
ipython-pygments-lexers==1.1.1
|
||||
@@ -224,13 +219,11 @@ litellm==1.81.6
|
||||
mako==1.2.4
|
||||
# via alembic
|
||||
manygo==0.2.0
|
||||
# via onyx
|
||||
markupsafe==3.0.3
|
||||
# via
|
||||
# jinja2
|
||||
# mako
|
||||
matplotlib==3.10.8
|
||||
# via onyx
|
||||
matplotlib-inline==0.2.1
|
||||
# via
|
||||
# ipykernel
|
||||
@@ -243,12 +236,10 @@ multidict==6.7.0
|
||||
# aiohttp
|
||||
# yarl
|
||||
mypy==1.13.0
|
||||
# via onyx
|
||||
mypy-extensions==1.0.0
|
||||
# via
|
||||
# black
|
||||
# mypy
|
||||
# onyx
|
||||
nest-asyncio==1.6.0
|
||||
# via ipykernel
|
||||
nodeenv==1.9.1
|
||||
@@ -263,16 +254,13 @@ oauthlib==3.2.2
|
||||
# via
|
||||
# kubernetes
|
||||
# requests-oauthlib
|
||||
onyx-devtools==0.7.2
|
||||
# via onyx
|
||||
onyx-devtools==0.7.3
|
||||
openai==2.14.0
|
||||
# via
|
||||
# litellm
|
||||
# onyx
|
||||
openapi-generator-cli==7.17.0
|
||||
# via
|
||||
# onyx
|
||||
# onyx-devtools
|
||||
# via onyx-devtools
|
||||
packaging==24.2
|
||||
# via
|
||||
# black
|
||||
@@ -282,7 +270,6 @@ packaging==24.2
|
||||
# matplotlib
|
||||
# pytest
|
||||
pandas-stubs==2.3.3.251201
|
||||
# via onyx
|
||||
parameterized==0.9.0
|
||||
# via cohere
|
||||
parso==0.8.5
|
||||
@@ -305,7 +292,6 @@ pluggy==1.6.0
|
||||
# hatchling
|
||||
# pytest
|
||||
pre-commit==3.2.2
|
||||
# via onyx
|
||||
prometheus-client==0.23.1
|
||||
# via
|
||||
# onyx
|
||||
@@ -359,22 +345,16 @@ pyparsing==3.2.5
|
||||
# via matplotlib
|
||||
pytest==8.3.5
|
||||
# via
|
||||
# onyx
|
||||
# pytest-alembic
|
||||
# pytest-asyncio
|
||||
# pytest-dotenv
|
||||
# pytest-repeat
|
||||
# pytest-xdist
|
||||
pytest-alembic==0.12.1
|
||||
# via onyx
|
||||
pytest-asyncio==1.3.0
|
||||
# via onyx
|
||||
pytest-dotenv==0.5.2
|
||||
# via onyx
|
||||
pytest-repeat==0.9.4
|
||||
# via onyx
|
||||
pytest-xdist==3.8.0
|
||||
# via onyx
|
||||
python-dateutil==2.8.2
|
||||
# via
|
||||
# aiobotocore
|
||||
@@ -407,9 +387,7 @@ referencing==0.36.2
|
||||
regex==2025.11.3
|
||||
# via tiktoken
|
||||
release-tag==0.5.2
|
||||
# via onyx
|
||||
reorder-python-imports-black==3.14.0
|
||||
# via onyx
|
||||
requests==2.33.0
|
||||
# via
|
||||
# cohere
|
||||
@@ -430,7 +408,6 @@ rpds-py==0.29.0
|
||||
rsa==4.9.1
|
||||
# via google-auth
|
||||
ruff==0.12.0
|
||||
# via onyx
|
||||
s3transfer==0.13.1
|
||||
# via boto3
|
||||
sentry-sdk==2.14.0
|
||||
@@ -484,39 +461,22 @@ traitlets==5.14.3
|
||||
trove-classifiers==2025.12.1.14
|
||||
# via hatchling
|
||||
types-beautifulsoup4==4.12.0.3
|
||||
# via onyx
|
||||
types-html5lib==1.1.11.13
|
||||
# via
|
||||
# onyx
|
||||
# types-beautifulsoup4
|
||||
# via types-beautifulsoup4
|
||||
types-oauthlib==3.2.0.9
|
||||
# via onyx
|
||||
types-passlib==1.7.7.20240106
|
||||
# via onyx
|
||||
types-pillow==10.2.0.20240822
|
||||
# via onyx
|
||||
types-psutil==7.1.3.20251125
|
||||
# via onyx
|
||||
types-psycopg2==2.9.21.10
|
||||
# via onyx
|
||||
types-python-dateutil==2.8.19.13
|
||||
# via onyx
|
||||
types-pytz==2023.3.1.1
|
||||
# via
|
||||
# onyx
|
||||
# pandas-stubs
|
||||
# via pandas-stubs
|
||||
types-pyyaml==6.0.12.11
|
||||
# via onyx
|
||||
types-regex==2023.3.23.1
|
||||
# via onyx
|
||||
types-requests==2.32.0.20250328
|
||||
# via
|
||||
# cohere
|
||||
# onyx
|
||||
# via cohere
|
||||
types-retry==0.9.9.3
|
||||
# via onyx
|
||||
types-setuptools==68.0.0.3
|
||||
# via onyx
|
||||
typing-extensions==4.15.0
|
||||
# via
|
||||
# aiosignal
|
||||
@@ -574,4 +534,3 @@ yarl==1.22.0
|
||||
zipp==3.23.0
|
||||
# via importlib-metadata
|
||||
zizmor==1.18.0
|
||||
# via onyx
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv export --no-emit-project --no-default-groups --no-hashes --extra ee -o backend/requirements/ee.txt
|
||||
# uv export --no-emit-project --no-default-groups --no-hashes --group ee -o backend/requirements/ee.txt
|
||||
agent-client-protocol==0.7.1
|
||||
# via onyx
|
||||
aioboto3==15.1.0
|
||||
@@ -182,7 +182,6 @@ packaging==24.2
|
||||
parameterized==0.9.0
|
||||
# via cohere
|
||||
posthog==3.7.4
|
||||
# via onyx
|
||||
prometheus-client==0.23.1
|
||||
# via
|
||||
# onyx
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# This file was autogenerated by uv via the following command:
|
||||
# uv export --no-emit-project --no-default-groups --no-hashes --extra model_server -o backend/requirements/model_server.txt
|
||||
# uv export --no-emit-project --no-default-groups --no-hashes --group model_server -o backend/requirements/model_server.txt
|
||||
accelerate==1.6.0
|
||||
# via onyx
|
||||
agent-client-protocol==0.7.1
|
||||
# via onyx
|
||||
aioboto3==15.1.0
|
||||
@@ -105,7 +104,6 @@ distro==1.9.0
|
||||
durationpy==0.10
|
||||
# via kubernetes
|
||||
einops==0.8.1
|
||||
# via onyx
|
||||
fastapi==0.133.1
|
||||
# via
|
||||
# onyx
|
||||
@@ -207,7 +205,6 @@ networkx==3.5
|
||||
numpy==2.4.1
|
||||
# via
|
||||
# accelerate
|
||||
# onyx
|
||||
# scikit-learn
|
||||
# scipy
|
||||
# transformers
|
||||
@@ -363,7 +360,6 @@ s3transfer==0.13.1
|
||||
safetensors==0.5.3
|
||||
# via
|
||||
# accelerate
|
||||
# onyx
|
||||
# transformers
|
||||
scikit-learn==1.7.2
|
||||
# via sentence-transformers
|
||||
@@ -372,7 +368,6 @@ scipy==1.16.3
|
||||
# scikit-learn
|
||||
# sentence-transformers
|
||||
sentence-transformers==4.0.2
|
||||
# via onyx
|
||||
sentry-sdk==2.14.0
|
||||
# via onyx
|
||||
setuptools==80.9.0 ; python_full_version >= '3.12'
|
||||
@@ -411,7 +406,6 @@ tokenizers==0.21.4
|
||||
torch==2.9.1
|
||||
# via
|
||||
# accelerate
|
||||
# onyx
|
||||
# sentence-transformers
|
||||
tqdm==4.67.1
|
||||
# via
|
||||
@@ -420,9 +414,7 @@ tqdm==4.67.1
|
||||
# sentence-transformers
|
||||
# transformers
|
||||
transformers==4.53.0
|
||||
# via
|
||||
# onyx
|
||||
# sentence-transformers
|
||||
# via sentence-transformers
|
||||
triton==3.5.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
|
||||
# via torch
|
||||
types-requests==2.32.0.20250328
|
||||
|
||||
0
backend/tests/unit/background/__init__.py
Normal file
0
backend/tests/unit/background/__init__.py
Normal file
0
backend/tests/unit/background/celery/__init__.py
Normal file
0
backend/tests/unit/background/celery/__init__.py
Normal file
149
backend/tests/unit/background/celery/test_celery_utils.py
Normal file
149
backend/tests/unit/background/celery/test_celery_utils.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""Unit tests for extract_ids_from_runnable_connector metrics instrumentation."""
|
||||
|
||||
from collections.abc import Iterator
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from onyx.background.celery.celery_utils import extract_ids_from_runnable_connector
|
||||
from onyx.connectors.interfaces import SlimConnector
|
||||
from onyx.connectors.models import SlimDocument
|
||||
from onyx.server.metrics.pruning_metrics import PRUNING_ENUMERATION_DURATION
|
||||
from onyx.server.metrics.pruning_metrics import PRUNING_RATE_LIMIT_ERRORS
|
||||
|
||||
|
||||
def _make_slim_connector(doc_ids: list[str]) -> SlimConnector:
|
||||
"""Mock SlimConnector that yields the given doc IDs in one batch."""
|
||||
connector = MagicMock(spec=SlimConnector)
|
||||
docs = [
|
||||
MagicMock(spec=SlimDocument, id=doc_id, parent_hierarchy_raw_node_id=None)
|
||||
for doc_id in doc_ids
|
||||
]
|
||||
connector.retrieve_all_slim_docs.return_value = iter([docs])
|
||||
return connector
|
||||
|
||||
|
||||
def _raising_connector(message: str) -> SlimConnector:
|
||||
"""Mock SlimConnector whose generator raises with the given message."""
|
||||
connector = MagicMock(spec=SlimConnector)
|
||||
|
||||
def raising_iter() -> Iterator:
|
||||
raise Exception(message)
|
||||
yield
|
||||
|
||||
connector.retrieve_all_slim_docs.return_value = raising_iter()
|
||||
return connector
|
||||
|
||||
|
||||
class TestEnumerationDuration:
|
||||
def test_recorded_on_success(self) -> None:
|
||||
connector = _make_slim_connector(["doc1"])
|
||||
before = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="google_drive"
|
||||
)._sum.get()
|
||||
|
||||
extract_ids_from_runnable_connector(connector, connector_type="google_drive")
|
||||
|
||||
after = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="google_drive"
|
||||
)._sum.get()
|
||||
assert after >= before # duration observed (non-negative)
|
||||
|
||||
def test_recorded_on_exception(self) -> None:
|
||||
connector = _raising_connector("unexpected error")
|
||||
before = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="confluence"
|
||||
)._sum.get()
|
||||
|
||||
with pytest.raises(Exception):
|
||||
extract_ids_from_runnable_connector(connector, connector_type="confluence")
|
||||
|
||||
after = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="confluence"
|
||||
)._sum.get()
|
||||
assert after >= before # duration observed even on exception
|
||||
|
||||
|
||||
class TestRateLimitDetection:
|
||||
def test_increments_on_rate_limit_message(self) -> None:
|
||||
connector = _raising_connector("rate limit exceeded")
|
||||
before = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="google_drive"
|
||||
)._value.get()
|
||||
|
||||
with pytest.raises(Exception, match="rate limit exceeded"):
|
||||
extract_ids_from_runnable_connector(
|
||||
connector, connector_type="google_drive"
|
||||
)
|
||||
|
||||
after = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="google_drive"
|
||||
)._value.get()
|
||||
assert after == before + 1
|
||||
|
||||
def test_increments_on_429_in_message(self) -> None:
|
||||
connector = _raising_connector("HTTP 429 Too Many Requests")
|
||||
before = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="confluence"
|
||||
)._value.get()
|
||||
|
||||
with pytest.raises(Exception, match="429"):
|
||||
extract_ids_from_runnable_connector(connector, connector_type="confluence")
|
||||
|
||||
after = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="confluence"
|
||||
)._value.get()
|
||||
assert after == before + 1
|
||||
|
||||
def test_does_not_increment_on_non_rate_limit_exception(self) -> None:
|
||||
connector = _raising_connector("connection timeout")
|
||||
before = PRUNING_RATE_LIMIT_ERRORS.labels(connector_type="slack")._value.get()
|
||||
|
||||
with pytest.raises(Exception, match="connection timeout"):
|
||||
extract_ids_from_runnable_connector(connector, connector_type="slack")
|
||||
|
||||
after = PRUNING_RATE_LIMIT_ERRORS.labels(connector_type="slack")._value.get()
|
||||
assert after == before
|
||||
|
||||
def test_rate_limit_detection_is_case_insensitive(self) -> None:
|
||||
connector = _raising_connector("RATE LIMIT exceeded")
|
||||
before = PRUNING_RATE_LIMIT_ERRORS.labels(connector_type="jira")._value.get()
|
||||
|
||||
with pytest.raises(Exception):
|
||||
extract_ids_from_runnable_connector(connector, connector_type="jira")
|
||||
|
||||
after = PRUNING_RATE_LIMIT_ERRORS.labels(connector_type="jira")._value.get()
|
||||
assert after == before + 1
|
||||
|
||||
def test_connector_type_label_matches_input(self) -> None:
|
||||
connector = _raising_connector("rate limit exceeded")
|
||||
before_gd = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="google_drive"
|
||||
)._value.get()
|
||||
before_jira = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="jira"
|
||||
)._value.get()
|
||||
|
||||
with pytest.raises(Exception):
|
||||
extract_ids_from_runnable_connector(
|
||||
connector, connector_type="google_drive"
|
||||
)
|
||||
|
||||
assert (
|
||||
PRUNING_RATE_LIMIT_ERRORS.labels(connector_type="google_drive")._value.get()
|
||||
== before_gd + 1
|
||||
)
|
||||
assert (
|
||||
PRUNING_RATE_LIMIT_ERRORS.labels(connector_type="jira")._value.get()
|
||||
== before_jira
|
||||
)
|
||||
|
||||
def test_defaults_to_unknown_connector_type(self) -> None:
|
||||
connector = _raising_connector("rate limit exceeded")
|
||||
before = PRUNING_RATE_LIMIT_ERRORS.labels(connector_type="unknown")._value.get()
|
||||
|
||||
with pytest.raises(Exception):
|
||||
extract_ids_from_runnable_connector(connector)
|
||||
|
||||
after = PRUNING_RATE_LIMIT_ERRORS.labels(connector_type="unknown")._value.get()
|
||||
assert after == before + 1
|
||||
@@ -6,6 +6,7 @@ import requests
|
||||
from jira import JIRA
|
||||
from jira.resources import Issue
|
||||
|
||||
from onyx.connectors.jira.connector import _JIRA_BULK_FETCH_LIMIT
|
||||
from onyx.connectors.jira.connector import bulk_fetch_issues
|
||||
|
||||
|
||||
@@ -145,3 +146,29 @@ def test_bulk_fetch_recursive_splitting_raises_on_bad_issue() -> None:
|
||||
|
||||
with pytest.raises(requests.exceptions.JSONDecodeError):
|
||||
bulk_fetch_issues(client, ["1", "2", bad_id, "3", "4", "5"])
|
||||
|
||||
|
||||
def test_bulk_fetch_respects_api_batch_limit() -> None:
|
||||
"""Requests to the bulkfetch endpoint never exceed _JIRA_BULK_FETCH_LIMIT IDs."""
|
||||
client = _mock_jira_client()
|
||||
total_issues = _JIRA_BULK_FETCH_LIMIT * 3 + 7
|
||||
all_ids = [str(i) for i in range(total_issues)]
|
||||
|
||||
batch_sizes: list[int] = []
|
||||
|
||||
def _post_side_effect(url: str, json: dict[str, Any]) -> MagicMock: # noqa: ARG001
|
||||
ids = json["issueIdsOrKeys"]
|
||||
batch_sizes.append(len(ids))
|
||||
resp = MagicMock()
|
||||
resp.json.return_value = {"issues": [_make_raw_issue(i) for i in ids]}
|
||||
return resp
|
||||
|
||||
client._session.post.side_effect = _post_side_effect
|
||||
|
||||
result = bulk_fetch_issues(client, all_ids)
|
||||
|
||||
assert len(result) == total_issues
|
||||
# keeping this hardcoded because it's the documented limit
|
||||
# https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issues/
|
||||
assert all(size <= 100 for size in batch_sizes)
|
||||
assert len(batch_sizes) == 4
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
"""Tests for _build_thread_text function."""
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
from unittest.mock import patch
|
||||
|
||||
from onyx.context.search.federated.slack_search import _build_thread_text
|
||||
|
||||
|
||||
def _make_msg(user: str, text: str, ts: str) -> dict[str, str]:
|
||||
return {"user": user, "text": text, "ts": ts}
|
||||
|
||||
|
||||
class TestBuildThreadText:
|
||||
"""Verify _build_thread_text includes full thread replies up to cap."""
|
||||
|
||||
@patch("onyx.context.search.federated.slack_search.batch_get_user_profiles")
|
||||
def test_includes_all_replies(self, mock_profiles: MagicMock) -> None:
|
||||
"""All replies within cap are included in output."""
|
||||
mock_profiles.return_value = {}
|
||||
messages = [
|
||||
_make_msg("U1", "parent msg", "1000.0"),
|
||||
_make_msg("U2", "reply 1", "1001.0"),
|
||||
_make_msg("U3", "reply 2", "1002.0"),
|
||||
_make_msg("U4", "reply 3", "1003.0"),
|
||||
]
|
||||
result = _build_thread_text(messages, "token", "T123", MagicMock())
|
||||
assert "parent msg" in result
|
||||
assert "reply 1" in result
|
||||
assert "reply 2" in result
|
||||
assert "reply 3" in result
|
||||
assert "..." not in result
|
||||
|
||||
@patch("onyx.context.search.federated.slack_search.batch_get_user_profiles")
|
||||
def test_non_thread_returns_parent_only(self, mock_profiles: MagicMock) -> None:
|
||||
"""Single message (no replies) returns just the parent text."""
|
||||
mock_profiles.return_value = {}
|
||||
messages = [_make_msg("U1", "just a message", "1000.0")]
|
||||
result = _build_thread_text(messages, "token", "T123", MagicMock())
|
||||
assert "just a message" in result
|
||||
assert "Replies:" not in result
|
||||
|
||||
@patch("onyx.context.search.federated.slack_search.batch_get_user_profiles")
|
||||
def test_parent_always_first(self, mock_profiles: MagicMock) -> None:
|
||||
"""Thread parent message is always the first line of output."""
|
||||
mock_profiles.return_value = {}
|
||||
messages = [
|
||||
_make_msg("U1", "I am the parent", "1000.0"),
|
||||
_make_msg("U2", "I am a reply", "1001.0"),
|
||||
]
|
||||
result = _build_thread_text(messages, "token", "T123", MagicMock())
|
||||
parent_pos = result.index("I am the parent")
|
||||
reply_pos = result.index("I am a reply")
|
||||
assert parent_pos < reply_pos
|
||||
|
||||
@patch("onyx.context.search.federated.slack_search.batch_get_user_profiles")
|
||||
def test_user_profiles_resolved(self, mock_profiles: MagicMock) -> None:
|
||||
"""User IDs in thread text are replaced with display names."""
|
||||
mock_profiles.return_value = {"U1": "Alice", "U2": "Bob"}
|
||||
messages = [
|
||||
_make_msg("U1", "hello", "1000.0"),
|
||||
_make_msg("U2", "world", "1001.0"),
|
||||
]
|
||||
result = _build_thread_text(messages, "token", "T123", MagicMock())
|
||||
assert "Alice" in result
|
||||
assert "Bob" in result
|
||||
assert "<@U1>" not in result
|
||||
assert "<@U2>" not in result
|
||||
@@ -0,0 +1,108 @@
|
||||
"""Tests for Slack URL parsing and direct thread fetch via URL override."""
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
from unittest.mock import patch
|
||||
|
||||
from onyx.context.search.federated.models import DirectThreadFetch
|
||||
from onyx.context.search.federated.slack_search import _fetch_thread_from_url
|
||||
from onyx.context.search.federated.slack_search_utils import extract_slack_message_urls
|
||||
|
||||
|
||||
class TestExtractSlackMessageUrls:
|
||||
"""Verify URL parsing extracts channel_id and timestamp correctly."""
|
||||
|
||||
def test_standard_url(self) -> None:
|
||||
query = "summarize https://mycompany.slack.com/archives/C097NBWMY8Y/p1775491616524769"
|
||||
results = extract_slack_message_urls(query)
|
||||
assert len(results) == 1
|
||||
assert results[0] == ("C097NBWMY8Y", "1775491616.524769")
|
||||
|
||||
def test_multiple_urls(self) -> None:
|
||||
query = (
|
||||
"compare https://co.slack.com/archives/C111/p1234567890123456 "
|
||||
"and https://co.slack.com/archives/C222/p9876543210987654"
|
||||
)
|
||||
results = extract_slack_message_urls(query)
|
||||
assert len(results) == 2
|
||||
assert results[0] == ("C111", "1234567890.123456")
|
||||
assert results[1] == ("C222", "9876543210.987654")
|
||||
|
||||
def test_no_urls(self) -> None:
|
||||
query = "what happened in #general last week?"
|
||||
results = extract_slack_message_urls(query)
|
||||
assert len(results) == 0
|
||||
|
||||
def test_non_slack_url_ignored(self) -> None:
|
||||
query = "check https://google.com/archives/C111/p1234567890123456"
|
||||
results = extract_slack_message_urls(query)
|
||||
assert len(results) == 0
|
||||
|
||||
def test_timestamp_conversion(self) -> None:
|
||||
"""p prefix removed, dot inserted after 10th digit."""
|
||||
query = "https://x.slack.com/archives/CABC123/p1775491616524769"
|
||||
results = extract_slack_message_urls(query)
|
||||
channel_id, ts = results[0]
|
||||
assert channel_id == "CABC123"
|
||||
assert ts == "1775491616.524769"
|
||||
assert not ts.startswith("p")
|
||||
assert "." in ts
|
||||
|
||||
|
||||
class TestFetchThreadFromUrl:
|
||||
"""Verify _fetch_thread_from_url calls conversations.replies and returns SlackMessage."""
|
||||
|
||||
@patch("onyx.context.search.federated.slack_search._build_thread_text")
|
||||
@patch("onyx.context.search.federated.slack_search.WebClient")
|
||||
def test_successful_fetch(
|
||||
self, mock_webclient_cls: MagicMock, mock_build_thread: MagicMock
|
||||
) -> None:
|
||||
mock_client = MagicMock()
|
||||
mock_webclient_cls.return_value = mock_client
|
||||
|
||||
# Mock conversations_replies
|
||||
mock_response = MagicMock()
|
||||
mock_response.get.return_value = [
|
||||
{"user": "U1", "text": "parent", "ts": "1775491616.524769"},
|
||||
{"user": "U2", "text": "reply 1", "ts": "1775491617.000000"},
|
||||
{"user": "U3", "text": "reply 2", "ts": "1775491618.000000"},
|
||||
]
|
||||
mock_client.conversations_replies.return_value = mock_response
|
||||
|
||||
# Mock channel info
|
||||
mock_ch_response = MagicMock()
|
||||
mock_ch_response.get.return_value = {"name": "general"}
|
||||
mock_client.conversations_info.return_value = mock_ch_response
|
||||
|
||||
mock_build_thread.return_value = (
|
||||
"U1: parent\n\nReplies:\n\nU2: reply 1\n\nU3: reply 2"
|
||||
)
|
||||
|
||||
fetch = DirectThreadFetch(
|
||||
channel_id="C097NBWMY8Y", thread_ts="1775491616.524769"
|
||||
)
|
||||
result = _fetch_thread_from_url(fetch, "xoxp-token")
|
||||
|
||||
assert len(result.messages) == 1
|
||||
msg = result.messages[0]
|
||||
assert msg.channel_id == "C097NBWMY8Y"
|
||||
assert msg.thread_id is None # Prevents double-enrichment
|
||||
assert msg.slack_score == 100000.0
|
||||
assert "parent" in msg.text
|
||||
mock_client.conversations_replies.assert_called_once_with(
|
||||
channel="C097NBWMY8Y", ts="1775491616.524769"
|
||||
)
|
||||
|
||||
@patch("onyx.context.search.federated.slack_search.WebClient")
|
||||
def test_api_error_returns_empty(self, mock_webclient_cls: MagicMock) -> None:
|
||||
from slack_sdk.errors import SlackApiError
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_webclient_cls.return_value = mock_client
|
||||
mock_client.conversations_replies.side_effect = SlackApiError(
|
||||
message="channel_not_found",
|
||||
response=MagicMock(status_code=404),
|
||||
)
|
||||
|
||||
fetch = DirectThreadFetch(channel_id="CBAD", thread_ts="1234567890.123456")
|
||||
result = _fetch_thread_from_url(fetch, "xoxp-token")
|
||||
assert len(result.messages) == 0
|
||||
@@ -505,6 +505,7 @@ class TestGetLMStudioAvailableModels:
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_provider = MagicMock()
|
||||
mock_provider.api_base = "http://localhost:1234"
|
||||
mock_provider.custom_config = {"LM_STUDIO_API_KEY": "stored-secret"}
|
||||
|
||||
response = {
|
||||
|
||||
128
backend/tests/unit/server/metrics/test_pruning_metrics.py
Normal file
128
backend/tests/unit/server/metrics/test_pruning_metrics.py
Normal file
@@ -0,0 +1,128 @@
|
||||
"""Tests for pruning-specific Prometheus metrics."""
|
||||
|
||||
import pytest
|
||||
|
||||
from onyx.server.metrics.pruning_metrics import inc_pruning_rate_limit_error
|
||||
from onyx.server.metrics.pruning_metrics import observe_pruning_diff_duration
|
||||
from onyx.server.metrics.pruning_metrics import observe_pruning_enumeration_duration
|
||||
from onyx.server.metrics.pruning_metrics import PRUNING_DIFF_DURATION
|
||||
from onyx.server.metrics.pruning_metrics import PRUNING_ENUMERATION_DURATION
|
||||
from onyx.server.metrics.pruning_metrics import PRUNING_RATE_LIMIT_ERRORS
|
||||
|
||||
|
||||
class TestObservePruningEnumerationDuration:
|
||||
def test_observes_duration(self) -> None:
|
||||
before = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="google_drive"
|
||||
)._sum.get()
|
||||
|
||||
observe_pruning_enumeration_duration(10.0, "google_drive")
|
||||
|
||||
after = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="google_drive"
|
||||
)._sum.get()
|
||||
assert after == pytest.approx(before + 10.0)
|
||||
|
||||
def test_labels_by_connector_type(self) -> None:
|
||||
before_gd = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="google_drive"
|
||||
)._sum.get()
|
||||
before_conf = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="confluence"
|
||||
)._sum.get()
|
||||
|
||||
observe_pruning_enumeration_duration(5.0, "google_drive")
|
||||
|
||||
after_gd = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="google_drive"
|
||||
)._sum.get()
|
||||
after_conf = PRUNING_ENUMERATION_DURATION.labels(
|
||||
connector_type="confluence"
|
||||
)._sum.get()
|
||||
|
||||
assert after_gd == pytest.approx(before_gd + 5.0)
|
||||
assert after_conf == pytest.approx(before_conf)
|
||||
|
||||
def test_does_not_raise_on_exception(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setattr(
|
||||
PRUNING_ENUMERATION_DURATION,
|
||||
"labels",
|
||||
lambda **_: (_ for _ in ()).throw(RuntimeError("boom")),
|
||||
)
|
||||
observe_pruning_enumeration_duration(1.0, "google_drive")
|
||||
|
||||
|
||||
class TestObservePruningDiffDuration:
|
||||
def test_observes_duration(self) -> None:
|
||||
before = PRUNING_DIFF_DURATION.labels(connector_type="confluence")._sum.get()
|
||||
|
||||
observe_pruning_diff_duration(3.0, "confluence")
|
||||
|
||||
after = PRUNING_DIFF_DURATION.labels(connector_type="confluence")._sum.get()
|
||||
assert after == pytest.approx(before + 3.0)
|
||||
|
||||
def test_labels_by_connector_type(self) -> None:
|
||||
before_conf = PRUNING_DIFF_DURATION.labels(
|
||||
connector_type="confluence"
|
||||
)._sum.get()
|
||||
before_slack = PRUNING_DIFF_DURATION.labels(connector_type="slack")._sum.get()
|
||||
|
||||
observe_pruning_diff_duration(2.0, "confluence")
|
||||
|
||||
after_conf = PRUNING_DIFF_DURATION.labels(
|
||||
connector_type="confluence"
|
||||
)._sum.get()
|
||||
after_slack = PRUNING_DIFF_DURATION.labels(connector_type="slack")._sum.get()
|
||||
|
||||
assert after_conf == pytest.approx(before_conf + 2.0)
|
||||
assert after_slack == pytest.approx(before_slack)
|
||||
|
||||
def test_does_not_raise_on_exception(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setattr(
|
||||
PRUNING_DIFF_DURATION,
|
||||
"labels",
|
||||
lambda **_: (_ for _ in ()).throw(RuntimeError("boom")),
|
||||
)
|
||||
observe_pruning_diff_duration(1.0, "confluence")
|
||||
|
||||
|
||||
class TestIncPruningRateLimitError:
|
||||
def test_increments_counter(self) -> None:
|
||||
before = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="google_drive"
|
||||
)._value.get()
|
||||
|
||||
inc_pruning_rate_limit_error("google_drive")
|
||||
|
||||
after = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="google_drive"
|
||||
)._value.get()
|
||||
assert after == before + 1
|
||||
|
||||
def test_labels_by_connector_type(self) -> None:
|
||||
before_gd = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="google_drive"
|
||||
)._value.get()
|
||||
before_jira = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="jira"
|
||||
)._value.get()
|
||||
|
||||
inc_pruning_rate_limit_error("google_drive")
|
||||
|
||||
after_gd = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="google_drive"
|
||||
)._value.get()
|
||||
after_jira = PRUNING_RATE_LIMIT_ERRORS.labels(
|
||||
connector_type="jira"
|
||||
)._value.get()
|
||||
|
||||
assert after_gd == before_gd + 1
|
||||
assert after_jira == before_jira
|
||||
|
||||
def test_does_not_raise_on_exception(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
monkeypatch.setattr(
|
||||
PRUNING_RATE_LIMIT_ERRORS,
|
||||
"labels",
|
||||
lambda **_: (_ for _ in ()).throw(RuntimeError("boom")),
|
||||
)
|
||||
inc_pruning_rate_limit_error("google_drive")
|
||||
@@ -19,6 +19,6 @@ dependencies:
|
||||
version: 5.4.0
|
||||
- name: code-interpreter
|
||||
repository: https://onyx-dot-app.github.io/python-sandbox/
|
||||
version: 0.3.2
|
||||
digest: sha256:74908ea45ace2b4be913ff762772e6d87e40bab64e92c6662aa51730eaeb9d87
|
||||
generated: "2026-04-06T15:34:02.597166-07:00"
|
||||
version: 0.3.3
|
||||
digest: sha256:a57f29088b1624a72f6c70e4c3ccc2f2aad675e4624278c4e9be92083d6d5dad
|
||||
generated: "2026-04-08T16:47:29.33368-07:00"
|
||||
|
||||
@@ -45,6 +45,6 @@ dependencies:
|
||||
repository: https://charts.min.io/
|
||||
condition: minio.enabled
|
||||
- name: code-interpreter
|
||||
version: 0.3.2
|
||||
version: 0.3.3
|
||||
repository: https://onyx-dot-app.github.io/python-sandbox/
|
||||
condition: codeInterpreter.enabled
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
{{- /* Metrics port must match the default in metrics_server.py (_DEFAULT_PORTS).
|
||||
Do NOT use PROMETHEUS_METRICS_PORT env var in Helm — each worker needs its own port. */ -}}
|
||||
{{- if gt (int .Values.celery_worker_heavy.replicaCount) 0 }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "onyx.fullname" . }}-celery-worker-heavy-metrics
|
||||
labels:
|
||||
{{- include "onyx.labels" . | nindent 4 }}
|
||||
{{- if .Values.celery_worker_heavy.deploymentLabels }}
|
||||
{{- toYaml .Values.celery_worker_heavy.deploymentLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
metrics: "true"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 9094
|
||||
targetPort: metrics
|
||||
protocol: TCP
|
||||
name: metrics
|
||||
selector:
|
||||
{{- include "onyx.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.celery_worker_heavy.deploymentLabels }}
|
||||
{{- toYaml .Values.celery_worker_heavy.deploymentLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -70,6 +70,10 @@ spec:
|
||||
"-Q",
|
||||
"connector_pruning,connector_doc_permissions_sync,connector_external_group_sync,csv_generation,sandbox",
|
||||
]
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 9094
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.celery_worker_heavy.resources | nindent 12 }}
|
||||
envFrom:
|
||||
|
||||
@@ -28,7 +28,7 @@ dependencies = [
|
||||
"kubernetes>=31.0.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
[dependency-groups]
|
||||
# Main backend application dependencies
|
||||
backend = [
|
||||
"aiohttp==3.13.4",
|
||||
@@ -148,7 +148,7 @@ dev = [
|
||||
"matplotlib==3.10.8",
|
||||
"mypy-extensions==1.0.0",
|
||||
"mypy==1.13.0",
|
||||
"onyx-devtools==0.7.2",
|
||||
"onyx-devtools==0.7.3",
|
||||
"openapi-generator-cli==7.17.0",
|
||||
"pandas-stubs~=2.3.3",
|
||||
"pre-commit==3.2.2",
|
||||
@@ -195,6 +195,9 @@ model_server = [
|
||||
"sentry-sdk[fastapi,celery,starlette]==2.14.0",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
default-groups = ["backend", "dev", "ee", "model_server"]
|
||||
|
||||
[tool.mypy]
|
||||
plugins = "sqlalchemy.ext.mypy.plugin"
|
||||
mypy_path = "backend"
|
||||
@@ -230,7 +233,7 @@ follow_imports = "skip"
|
||||
ignore_errors = true
|
||||
|
||||
[tool.uv.workspace]
|
||||
members = ["backend", "tools/ods"]
|
||||
members = ["tools/ods"]
|
||||
|
||||
[tool.basedpyright]
|
||||
include = ["backend"]
|
||||
|
||||
19
tools/ods/cmd/deploy.go
Normal file
19
tools/ods/cmd/deploy.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewDeployCommand creates the parent `ods deploy` command. Subcommands hang
|
||||
// off it (e.g. `ods deploy edge`) and represent ad-hoc deployment workflows.
|
||||
func NewDeployCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "deploy",
|
||||
Short: "Trigger ad-hoc deployments",
|
||||
Long: "Trigger ad-hoc deployments to Onyx-managed environments.",
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewDeployEdgeCommand())
|
||||
|
||||
return cmd
|
||||
}
|
||||
353
tools/ods/cmd/deploy_edge.go
Normal file
353
tools/ods/cmd/deploy_edge.go
Normal file
@@ -0,0 +1,353 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/onyx-dot-app/onyx/tools/ods/internal/config"
|
||||
"github.com/onyx-dot-app/onyx/tools/ods/internal/git"
|
||||
"github.com/onyx-dot-app/onyx/tools/ods/internal/paths"
|
||||
"github.com/onyx-dot-app/onyx/tools/ods/internal/prompt"
|
||||
)
|
||||
|
||||
const (
|
||||
onyxRepo = "onyx-dot-app/onyx"
|
||||
deploymentWorkflowFile = "deployment.yml"
|
||||
edgeTagName = "edge"
|
||||
|
||||
// Polling configuration. Build runs typically take 20-30 minutes; deploys
|
||||
// are much shorter. The "discover" phase polls fast for a short window
|
||||
// because the run usually appears within seconds of pushing the tag /
|
||||
// dispatching the workflow.
|
||||
runDiscoveryInterval = 5 * time.Second
|
||||
runDiscoveryTimeout = 2 * time.Minute
|
||||
runProgressInterval = 30 * time.Second
|
||||
buildPollTimeout = 60 * time.Minute
|
||||
deployPollTimeout = 30 * time.Minute
|
||||
)
|
||||
|
||||
// DeployEdgeOptions holds options for the deploy edge command.
|
||||
type DeployEdgeOptions struct {
|
||||
TargetRepo string
|
||||
TargetWorkflow string
|
||||
DryRun bool
|
||||
Yes bool
|
||||
NoWaitDeploy bool
|
||||
}
|
||||
|
||||
// NewDeployEdgeCommand creates the `ods deploy edge` command.
|
||||
func NewDeployEdgeCommand() *cobra.Command {
|
||||
opts := &DeployEdgeOptions{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "edge",
|
||||
Short: "Build edge images off main and deploy to the configured target",
|
||||
Long: `Build edge images off origin/main and dispatch the configured deploy workflow.
|
||||
|
||||
This command will:
|
||||
1. Force-push the 'edge' tag to origin/main, triggering the build
|
||||
2. Wait for the build workflow to finish
|
||||
3. Dispatch the configured deploy workflow with version_tag=edge
|
||||
4. Wait for the deploy workflow to finish
|
||||
|
||||
All GitHub operations run through the gh CLI, so authorization is enforced
|
||||
by your gh credentials and GitHub's repo/workflow permissions.
|
||||
|
||||
On first run, you'll be prompted for the deploy target repo and workflow
|
||||
filename. These are saved to the ods config file (~/.config/onyx-dev/config.json
|
||||
on Linux/macOS) and reused on subsequent runs. Pass --target-repo or
|
||||
--target-workflow to override the saved values.
|
||||
|
||||
Example usage:
|
||||
|
||||
$ ods deploy edge`,
|
||||
Args: cobra.NoArgs,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
deployEdge(opts)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&opts.TargetRepo, "target-repo", "", "GitHub repo (owner/name) hosting the deploy workflow; overrides saved config")
|
||||
cmd.Flags().StringVar(&opts.TargetWorkflow, "target-workflow", "", "Filename of the deploy workflow within the target repo; overrides saved config")
|
||||
cmd.Flags().BoolVar(&opts.DryRun, "dry-run", false, "Perform local operations only; skip pushing the tag and dispatching workflows")
|
||||
cmd.Flags().BoolVar(&opts.Yes, "yes", false, "Skip the confirmation prompt")
|
||||
cmd.Flags().BoolVar(&opts.NoWaitDeploy, "no-wait-deploy", false, "Do not wait for the deploy workflow to finish after dispatching it")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func deployEdge(opts *DeployEdgeOptions) {
|
||||
git.CheckGitHubCLI()
|
||||
|
||||
deployRepo, deployWorkflow := resolveDeployTarget(opts)
|
||||
|
||||
if opts.DryRun {
|
||||
log.Warning("=== DRY RUN MODE: tag push and workflow dispatch will be skipped (read-only gh and git fetch still run) ===")
|
||||
}
|
||||
|
||||
if !opts.Yes {
|
||||
msg := "About to force-push tag 'edge' to origin/main and trigger an ad-hoc deploy. Continue? (Y/n): "
|
||||
if !prompt.Confirm(msg) {
|
||||
log.Info("Exiting...")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Capture the most recent existing edge build run id BEFORE pushing, so we
|
||||
// can reliably identify the new run we trigger and not pick up a stale one.
|
||||
priorBuildRunID, err := latestWorkflowRunID(onyxRepo, deploymentWorkflowFile, "push", edgeTagName)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query existing deployment runs: %v", err)
|
||||
}
|
||||
log.Debugf("Most recent prior edge build run id: %d", priorBuildRunID)
|
||||
|
||||
log.Info("Fetching origin/main...")
|
||||
if err := git.RunCommand("fetch", "origin", "main"); err != nil {
|
||||
log.Fatalf("Failed to fetch origin/main: %v", err)
|
||||
}
|
||||
|
||||
if opts.DryRun {
|
||||
log.Warnf("[DRY RUN] Would move local '%s' tag to origin/main", edgeTagName)
|
||||
log.Warnf("[DRY RUN] Would force-push tag '%s' to origin", edgeTagName)
|
||||
log.Warn("[DRY RUN] Would wait for build then dispatch the configured deploy workflow")
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Moving local '%s' tag to origin/main...", edgeTagName)
|
||||
if err := git.RunCommand("tag", "-f", edgeTagName, "origin/main"); err != nil {
|
||||
log.Fatalf("Failed to move local tag: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Force-pushing tag '%s' to origin...", edgeTagName)
|
||||
if err := git.RunCommand("push", "-f", "origin", edgeTagName); err != nil {
|
||||
log.Fatalf("Failed to push edge tag: %v", err)
|
||||
}
|
||||
|
||||
// Find the new build run, then poll it to completion.
|
||||
log.Info("Waiting for build workflow to start...")
|
||||
buildRun, err := waitForNewRun(onyxRepo, deploymentWorkflowFile, "push", edgeTagName, priorBuildRunID)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to find triggered build run: %v", err)
|
||||
}
|
||||
log.Infof("Build run started: %s", buildRun.URL)
|
||||
|
||||
if err := waitForRunCompletion(onyxRepo, buildRun.DatabaseID, buildPollTimeout, "build"); err != nil {
|
||||
log.Fatalf("Build did not complete successfully: %v", err)
|
||||
}
|
||||
log.Info("Build completed successfully.")
|
||||
|
||||
// Dispatch the deploy workflow.
|
||||
priorDeployRunID, err := latestWorkflowRunID(deployRepo, deployWorkflow, "workflow_dispatch", "")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query existing deploy runs: %v", err)
|
||||
}
|
||||
log.Debugf("Most recent prior deploy run id: %d", priorDeployRunID)
|
||||
|
||||
log.Info("Dispatching deploy workflow with version_tag=edge...")
|
||||
if err := dispatchWorkflow(deployRepo, deployWorkflow, map[string]string{"version_tag": edgeTagName}); err != nil {
|
||||
log.Fatalf("Failed to dispatch deploy workflow: %v", err)
|
||||
}
|
||||
|
||||
deployRun, err := waitForNewRun(deployRepo, deployWorkflow, "workflow_dispatch", "", priorDeployRunID)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to find dispatched deploy run: %v", err)
|
||||
}
|
||||
log.Infof("Deploy run started: %s", deployRun.URL)
|
||||
log.Info("A kickoff Slack message will appear in #monitor-deployments.")
|
||||
|
||||
if opts.NoWaitDeploy {
|
||||
log.Info("--no-wait-deploy set; not waiting for deploy completion.")
|
||||
return
|
||||
}
|
||||
|
||||
if err := waitForRunCompletion(deployRepo, deployRun.DatabaseID, deployPollTimeout, "deploy"); err != nil {
|
||||
log.Fatalf("Deploy did not complete successfully: %v", err)
|
||||
}
|
||||
log.Info("Deploy completed successfully.")
|
||||
}
|
||||
|
||||
// resolveDeployTarget returns the deploy target repo and workflow to use,
|
||||
// preferring explicit flags, then saved config, then prompting the user on
|
||||
// first-time setup. Any newly-prompted values are persisted back to the
|
||||
// config file so subsequent runs are non-interactive.
|
||||
func resolveDeployTarget(opts *DeployEdgeOptions) (string, string) {
|
||||
cfg, err := config.Load()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load ods config: %v", err)
|
||||
}
|
||||
|
||||
repo := opts.TargetRepo
|
||||
if repo == "" {
|
||||
repo = cfg.DeployEdge.TargetRepo
|
||||
}
|
||||
workflow := opts.TargetWorkflow
|
||||
if workflow == "" {
|
||||
workflow = cfg.DeployEdge.TargetWorkflow
|
||||
}
|
||||
|
||||
prompted := false
|
||||
if repo == "" {
|
||||
log.Infof("First-time setup: ods will save your deploy target to %s", paths.ConfigFilePath())
|
||||
repo = prompt.String("Deploy target repo (owner/name): ")
|
||||
prompted = true
|
||||
}
|
||||
if workflow == "" {
|
||||
workflow = prompt.String("Deploy workflow filename (e.g. some-workflow.yml): ")
|
||||
prompted = true
|
||||
}
|
||||
|
||||
if prompted {
|
||||
cfg.DeployEdge.TargetRepo = repo
|
||||
cfg.DeployEdge.TargetWorkflow = workflow
|
||||
if err := config.Save(cfg); err != nil {
|
||||
log.Fatalf("Failed to save ods config: %v", err)
|
||||
}
|
||||
log.Infof("Saved deploy target to %s", paths.ConfigFilePath())
|
||||
}
|
||||
|
||||
return repo, workflow
|
||||
}
|
||||
|
||||
// workflowRun is a partial representation of a `gh run list` JSON entry.
|
||||
type workflowRun struct {
|
||||
DatabaseID int64 `json:"databaseId"`
|
||||
Status string `json:"status"`
|
||||
Conclusion string `json:"conclusion"`
|
||||
URL string `json:"url"`
|
||||
Event string `json:"event"`
|
||||
HeadBranch string `json:"headBranch"`
|
||||
}
|
||||
|
||||
// latestWorkflowRunID returns the highest databaseId for runs of the given
|
||||
// workflow filtered by event (and optional branch). Returns 0 if no runs
|
||||
// exist yet, which is a valid state.
|
||||
func latestWorkflowRunID(repo, workflowFile, event, branch string) (int64, error) {
|
||||
runs, err := listWorkflowRuns(repo, workflowFile, event, branch, 10)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var maxID int64
|
||||
for _, r := range runs {
|
||||
if r.DatabaseID > maxID {
|
||||
maxID = r.DatabaseID
|
||||
}
|
||||
}
|
||||
return maxID, nil
|
||||
}
|
||||
|
||||
func listWorkflowRuns(repo, workflowFile, event, branch string, limit int) ([]workflowRun, error) {
|
||||
args := []string{
|
||||
"run", "list",
|
||||
"-R", repo,
|
||||
"--workflow", workflowFile,
|
||||
"--limit", fmt.Sprintf("%d", limit),
|
||||
"--json", "databaseId,status,conclusion,url,event,headBranch",
|
||||
}
|
||||
if event != "" {
|
||||
args = append(args, "--event", event)
|
||||
}
|
||||
if branch != "" {
|
||||
args = append(args, "--branch", branch)
|
||||
}
|
||||
cmd := exec.Command("gh", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return nil, fmt.Errorf("gh run list failed: %w: %s", err, string(exitErr.Stderr))
|
||||
}
|
||||
return nil, fmt.Errorf("gh run list failed: %w", err)
|
||||
}
|
||||
var runs []workflowRun
|
||||
if err := json.Unmarshal(output, &runs); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse gh run list output: %w", err)
|
||||
}
|
||||
// Sort newest-first by databaseId for predictable iteration.
|
||||
sort.Slice(runs, func(i, j int) bool { return runs[i].DatabaseID > runs[j].DatabaseID })
|
||||
return runs, nil
|
||||
}
|
||||
|
||||
// waitForNewRun polls until a workflow run with databaseId > priorRunID
|
||||
// appears, or the discovery timeout fires.
|
||||
func waitForNewRun(repo, workflowFile, event, branch string, priorRunID int64) (*workflowRun, error) {
|
||||
deadline := time.Now().Add(runDiscoveryTimeout)
|
||||
for {
|
||||
runs, err := listWorkflowRuns(repo, workflowFile, event, branch, 5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, r := range runs {
|
||||
if r.DatabaseID > priorRunID {
|
||||
return &r, nil
|
||||
}
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
return nil, fmt.Errorf("no new run appeared within %s", runDiscoveryTimeout)
|
||||
}
|
||||
time.Sleep(runDiscoveryInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// waitForRunCompletion polls a specific run until it reaches a terminal
|
||||
// status. Returns an error if the run does not conclude with success or the
|
||||
// timeout fires.
|
||||
func waitForRunCompletion(repo string, runID int64, timeout time.Duration, label string) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
for {
|
||||
run, err := getRun(repo, runID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("[%s] run %d status=%s conclusion=%s", label, runID, run.Status, run.Conclusion)
|
||||
if run.Status == "completed" {
|
||||
if run.Conclusion == "success" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s run %d concluded with status %q (see %s)", label, runID, run.Conclusion, run.URL)
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
return fmt.Errorf("%s run %d did not complete within %s (see %s)", label, runID, timeout, run.URL)
|
||||
}
|
||||
time.Sleep(runProgressInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func getRun(repo string, runID int64) (*workflowRun, error) {
|
||||
cmd := exec.Command(
|
||||
"gh", "run", "view", fmt.Sprintf("%d", runID),
|
||||
"-R", repo,
|
||||
"--json", "databaseId,status,conclusion,url,event,headBranch",
|
||||
)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return nil, fmt.Errorf("gh run view failed: %w: %s", err, string(exitErr.Stderr))
|
||||
}
|
||||
return nil, fmt.Errorf("gh run view failed: %w", err)
|
||||
}
|
||||
var run workflowRun
|
||||
if err := json.Unmarshal(output, &run); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse gh run view output: %w", err)
|
||||
}
|
||||
return &run, nil
|
||||
}
|
||||
|
||||
// dispatchWorkflow fires a workflow_dispatch event for the given workflow with
|
||||
// the supplied string inputs.
|
||||
func dispatchWorkflow(repo, workflowFile string, inputs map[string]string) error {
|
||||
args := []string{"workflow", "run", workflowFile, "-R", repo}
|
||||
for k, v := range inputs {
|
||||
args = append(args, "-f", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
cmd := exec.Command("gh", args...)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("gh workflow run failed: %w: %s", err, string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -45,6 +45,7 @@ func NewRootCommand() *cobra.Command {
|
||||
cmd.AddCommand(NewCheckLazyImportsCommand())
|
||||
cmd.AddCommand(NewCherryPickCommand())
|
||||
cmd.AddCommand(NewDBCommand())
|
||||
cmd.AddCommand(NewDeployCommand())
|
||||
cmd.AddCommand(NewOpenAPICommand())
|
||||
cmd.AddCommand(NewComposeCommand())
|
||||
cmd.AddCommand(NewLogsCommand())
|
||||
|
||||
56
tools/ods/internal/config/config.go
Normal file
56
tools/ods/internal/config/config.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onyx-dot-app/onyx/tools/ods/internal/paths"
|
||||
)
|
||||
|
||||
// DeployEdgeConfig holds the persisted settings for `ods deploy edge`.
|
||||
type DeployEdgeConfig struct {
|
||||
TargetRepo string `json:"target_repo,omitempty"`
|
||||
TargetWorkflow string `json:"target_workflow,omitempty"`
|
||||
}
|
||||
|
||||
// Config is the top-level on-disk schema for ~/.config/onyx-dev/config.json.
|
||||
// New per-command sections should be added as additional fields.
|
||||
type Config struct {
|
||||
DeployEdge DeployEdgeConfig `json:"deploy_edge,omitempty"`
|
||||
}
|
||||
|
||||
// Load reads the config file. Returns a zero-valued Config if the file does
|
||||
// not exist (a fresh first-run state, not an error).
|
||||
func Load() (*Config, error) {
|
||||
path := paths.ConfigFilePath()
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return &Config{}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read config file %s: %w", path, err)
|
||||
}
|
||||
var cfg Config
|
||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config file %s: %w", path, err)
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// Save persists the config to disk, creating the parent directory if needed.
|
||||
func Save(cfg *Config) error {
|
||||
if err := paths.EnsureConfigDir(); err != nil {
|
||||
return fmt.Errorf("failed to create config directory: %w", err)
|
||||
}
|
||||
data, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal config: %w", err)
|
||||
}
|
||||
path := paths.ConfigFilePath()
|
||||
if err := os.WriteFile(path, data, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write config file %s: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -47,6 +47,43 @@ func DataDir() string {
|
||||
return filepath.Join(base, "onyx-dev")
|
||||
}
|
||||
|
||||
// ConfigDir returns the per-user config directory for onyx-dev tools.
|
||||
// On Linux/macOS: ~/.config/onyx-dev/ (respects XDG_CONFIG_HOME)
|
||||
// On Windows: %APPDATA%/onyx-dev/
|
||||
func ConfigDir() string {
|
||||
var base string
|
||||
if runtime.GOOS == "windows" {
|
||||
base = os.Getenv("APPDATA")
|
||||
if base == "" {
|
||||
base = os.Getenv("USERPROFILE")
|
||||
if base == "" {
|
||||
log.Fatalf("Cannot determine config directory: APPDATA and USERPROFILE are not set")
|
||||
}
|
||||
base = filepath.Join(base, "AppData", "Roaming")
|
||||
}
|
||||
} else {
|
||||
base = os.Getenv("XDG_CONFIG_HOME")
|
||||
if base == "" {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil || home == "" {
|
||||
log.Fatalf("Cannot determine config directory: XDG_CONFIG_HOME not set and home directory unknown: %v", err)
|
||||
}
|
||||
base = filepath.Join(home, ".config")
|
||||
}
|
||||
}
|
||||
return filepath.Join(base, "onyx-dev")
|
||||
}
|
||||
|
||||
// ConfigFilePath returns the path to the ods config file.
|
||||
func ConfigFilePath() string {
|
||||
return filepath.Join(ConfigDir(), "config.json")
|
||||
}
|
||||
|
||||
// EnsureConfigDir creates the config directory if it doesn't exist.
|
||||
func EnsureConfigDir() error {
|
||||
return os.MkdirAll(ConfigDir(), 0755)
|
||||
}
|
||||
|
||||
// SnapshotsDir returns the directory for database snapshots.
|
||||
func SnapshotsDir() string {
|
||||
return filepath.Join(DataDir(), "snapshots")
|
||||
|
||||
@@ -12,6 +12,23 @@ import (
|
||||
// reader is the input reader, can be replaced for testing
|
||||
var reader = bufio.NewReader(os.Stdin)
|
||||
|
||||
// String prompts the user for a free-form line of input. Re-prompts until a
|
||||
// non-empty value is entered.
|
||||
func String(prompt string) string {
|
||||
for {
|
||||
fmt.Print(prompt)
|
||||
response, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read input: %v", err)
|
||||
}
|
||||
response = strings.TrimSpace(response)
|
||||
if response != "" {
|
||||
return response
|
||||
}
|
||||
fmt.Println("Value cannot be empty.")
|
||||
}
|
||||
}
|
||||
|
||||
// Confirm prompts the user with a yes/no question and returns true for yes, false for no.
|
||||
// It will keep prompting until a valid response is given.
|
||||
// Empty input (just pressing Enter) defaults to yes.
|
||||
|
||||
324
uv.lock
generated
324
uv.lock
generated
@@ -14,12 +14,6 @@ resolution-markers = [
|
||||
"python_full_version < '3.12' and sys_platform != 'win32'",
|
||||
]
|
||||
|
||||
[manifest]
|
||||
members = [
|
||||
"onyx",
|
||||
"onyx-backend",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "accelerate"
|
||||
version = "1.6.0"
|
||||
@@ -4234,7 +4228,7 @@ dependencies = [
|
||||
{ name = "voyageai" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
[package.dev-dependencies]
|
||||
backend = [
|
||||
{ name = "aiohttp" },
|
||||
{ name = "alembic" },
|
||||
@@ -4388,195 +4382,191 @@ model-server = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "accelerate", marker = "extra == 'model-server'", specifier = "==1.6.0" },
|
||||
{ name = "agent-client-protocol", specifier = ">=0.7.1" },
|
||||
{ name = "aioboto3", specifier = "==15.1.0" },
|
||||
{ name = "aiohttp", marker = "extra == 'backend'", specifier = "==3.13.4" },
|
||||
{ name = "alembic", marker = "extra == 'backend'", specifier = "==1.10.4" },
|
||||
{ name = "asana", marker = "extra == 'backend'", specifier = "==5.0.8" },
|
||||
{ name = "asyncpg", marker = "extra == 'backend'", specifier = "==0.30.0" },
|
||||
{ name = "atlassian-python-api", marker = "extra == 'backend'", specifier = "==3.41.16" },
|
||||
{ name = "azure-cognitiveservices-speech", marker = "extra == 'backend'", specifier = "==1.38.0" },
|
||||
{ name = "beautifulsoup4", marker = "extra == 'backend'", specifier = "==4.12.3" },
|
||||
{ name = "black", marker = "extra == 'dev'", specifier = "==25.1.0" },
|
||||
{ name = "boto3", marker = "extra == 'backend'", specifier = "==1.39.11" },
|
||||
{ name = "boto3-stubs", extras = ["s3"], marker = "extra == 'backend'", specifier = "==1.39.11" },
|
||||
{ name = "braintrust", marker = "extra == 'backend'", specifier = "==0.3.9" },
|
||||
{ name = "brotli", specifier = ">=1.2.0" },
|
||||
{ name = "celery", marker = "extra == 'backend'", specifier = "==5.5.1" },
|
||||
{ name = "celery-types", marker = "extra == 'dev'", specifier = "==0.19.0" },
|
||||
{ name = "chardet", marker = "extra == 'backend'", specifier = "==5.2.0" },
|
||||
{ name = "chonkie", marker = "extra == 'backend'", specifier = "==1.0.10" },
|
||||
{ name = "claude-agent-sdk", specifier = ">=0.1.19" },
|
||||
{ name = "cohere", specifier = "==5.6.1" },
|
||||
{ name = "dask", marker = "extra == 'backend'", specifier = "==2026.1.1" },
|
||||
{ name = "ddtrace", marker = "extra == 'backend'", specifier = "==3.10.0" },
|
||||
{ name = "discord-py", specifier = "==2.4.0" },
|
||||
{ name = "discord-py", marker = "extra == 'backend'", specifier = "==2.4.0" },
|
||||
{ name = "distributed", marker = "extra == 'backend'", specifier = "==2026.1.1" },
|
||||
{ name = "dropbox", marker = "extra == 'backend'", specifier = "==12.0.2" },
|
||||
{ name = "einops", marker = "extra == 'model-server'", specifier = "==0.8.1" },
|
||||
{ name = "exa-py", marker = "extra == 'backend'", specifier = "==1.15.4" },
|
||||
{ name = "faker", marker = "extra == 'dev'", specifier = "==40.1.2" },
|
||||
{ name = "fastapi", specifier = "==0.133.1" },
|
||||
{ name = "fastapi-limiter", marker = "extra == 'backend'", specifier = "==0.1.6" },
|
||||
{ name = "fastapi-users", marker = "extra == 'backend'", specifier = "==15.0.4" },
|
||||
{ name = "fastapi-users-db-sqlalchemy", marker = "extra == 'backend'", specifier = "==7.0.0" },
|
||||
{ name = "fastmcp", marker = "extra == 'backend'", specifier = "==3.2.0" },
|
||||
{ name = "filelock", marker = "extra == 'backend'", specifier = "==3.20.3" },
|
||||
{ name = "google-api-python-client", marker = "extra == 'backend'", specifier = "==2.86.0" },
|
||||
{ name = "google-auth-httplib2", marker = "extra == 'backend'", specifier = "==0.1.0" },
|
||||
{ name = "google-auth-oauthlib", marker = "extra == 'backend'", specifier = "==1.0.0" },
|
||||
{ name = "google-genai", specifier = "==1.52.0" },
|
||||
{ name = "hatchling", marker = "extra == 'dev'", specifier = "==1.28.0" },
|
||||
{ name = "httpcore", marker = "extra == 'backend'", specifier = "==1.0.9" },
|
||||
{ name = "httpx", extras = ["http2"], marker = "extra == 'backend'", specifier = "==0.28.1" },
|
||||
{ name = "httpx-oauth", marker = "extra == 'backend'", specifier = "==0.15.1" },
|
||||
{ name = "hubspot-api-client", marker = "extra == 'backend'", specifier = "==11.1.0" },
|
||||
{ name = "huggingface-hub", marker = "extra == 'backend'", specifier = "==0.35.3" },
|
||||
{ name = "inflection", marker = "extra == 'backend'", specifier = "==0.5.1" },
|
||||
{ name = "ipykernel", marker = "extra == 'dev'", specifier = "==6.29.5" },
|
||||
{ name = "jira", marker = "extra == 'backend'", specifier = "==3.10.5" },
|
||||
{ name = "jsonref", marker = "extra == 'backend'", specifier = "==1.1.0" },
|
||||
{ name = "kubernetes", specifier = ">=31.0.0" },
|
||||
{ name = "kubernetes", marker = "extra == 'backend'", specifier = "==31.0.0" },
|
||||
{ name = "langchain-core", marker = "extra == 'backend'", specifier = "==1.2.22" },
|
||||
{ name = "langfuse", marker = "extra == 'backend'", specifier = "==3.10.0" },
|
||||
{ name = "lazy-imports", marker = "extra == 'backend'", specifier = "==1.0.1" },
|
||||
{ name = "litellm", specifier = "==1.81.6" },
|
||||
{ name = "lxml", marker = "extra == 'backend'", specifier = "==5.3.0" },
|
||||
{ name = "mako", marker = "extra == 'backend'", specifier = "==1.2.4" },
|
||||
{ name = "manygo", marker = "extra == 'dev'", specifier = "==0.2.0" },
|
||||
{ name = "markitdown", extras = ["pdf", "docx", "pptx", "xlsx", "xls"], marker = "extra == 'backend'", specifier = "==0.1.2" },
|
||||
{ name = "matplotlib", marker = "extra == 'dev'", specifier = "==3.10.8" },
|
||||
{ name = "mcp", extras = ["cli"], marker = "extra == 'backend'", specifier = "==1.26.0" },
|
||||
{ name = "mistune", marker = "extra == 'backend'", specifier = "==3.2.0" },
|
||||
{ name = "msal", marker = "extra == 'backend'", specifier = "==1.34.0" },
|
||||
{ name = "msoffcrypto-tool", marker = "extra == 'backend'", specifier = "==5.4.2" },
|
||||
{ name = "mypy", marker = "extra == 'dev'", specifier = "==1.13.0" },
|
||||
{ name = "mypy-extensions", marker = "extra == 'dev'", specifier = "==1.0.0" },
|
||||
{ name = "nest-asyncio", marker = "extra == 'backend'", specifier = "==1.6.0" },
|
||||
{ name = "numpy", marker = "extra == 'model-server'", specifier = "==2.4.1" },
|
||||
{ name = "oauthlib", marker = "extra == 'backend'", specifier = "==3.2.2" },
|
||||
{ name = "office365-rest-python-client", marker = "extra == 'backend'", specifier = "==2.6.2" },
|
||||
{ name = "onyx-devtools", marker = "extra == 'dev'", specifier = "==0.7.2" },
|
||||
{ name = "openai", specifier = "==2.14.0" },
|
||||
{ name = "openapi-generator-cli", marker = "extra == 'dev'", specifier = "==7.17.0" },
|
||||
{ name = "openinference-instrumentation", marker = "extra == 'backend'", specifier = "==0.1.42" },
|
||||
{ name = "openpyxl", marker = "extra == 'backend'", specifier = "==3.0.10" },
|
||||
{ name = "opensearch-py", marker = "extra == 'backend'", specifier = "==3.0.0" },
|
||||
{ name = "opentelemetry-proto", marker = "extra == 'backend'", specifier = ">=1.39.0" },
|
||||
{ name = "pandas-stubs", marker = "extra == 'dev'", specifier = "~=2.3.3" },
|
||||
{ name = "passlib", marker = "extra == 'backend'", specifier = "==1.7.4" },
|
||||
{ name = "playwright", marker = "extra == 'backend'", specifier = "==1.55.0" },
|
||||
{ name = "posthog", marker = "extra == 'ee'", specifier = "==3.7.4" },
|
||||
{ name = "pre-commit", marker = "extra == 'dev'", specifier = "==3.2.2" },
|
||||
{ name = "prometheus-client", specifier = ">=0.21.1" },
|
||||
{ name = "prometheus-fastapi-instrumentator", specifier = "==7.1.0" },
|
||||
{ name = "psutil", marker = "extra == 'backend'", specifier = "==7.1.3" },
|
||||
{ name = "psycopg2-binary", marker = "extra == 'backend'", specifier = "==2.9.9" },
|
||||
{ name = "puremagic", marker = "extra == 'backend'", specifier = "==1.28" },
|
||||
{ name = "pyairtable", marker = "extra == 'backend'", specifier = "==3.0.1" },
|
||||
{ name = "pycryptodome", marker = "extra == 'backend'", specifier = "==3.19.1" },
|
||||
{ name = "pydantic", specifier = "==2.11.7" },
|
||||
{ name = "pygithub", marker = "extra == 'backend'", specifier = "==2.5.0" },
|
||||
{ name = "pympler", marker = "extra == 'backend'", specifier = "==1.1" },
|
||||
{ name = "pypandoc-binary", marker = "extra == 'backend'", specifier = "==1.16.2" },
|
||||
{ name = "pypdf", marker = "extra == 'backend'", specifier = "==6.9.2" },
|
||||
{ name = "pytest", marker = "extra == 'dev'", specifier = "==8.3.5" },
|
||||
{ name = "pytest-alembic", marker = "extra == 'dev'", specifier = "==0.12.1" },
|
||||
{ name = "pytest-asyncio", marker = "extra == 'dev'", specifier = "==1.3.0" },
|
||||
{ name = "pytest-dotenv", marker = "extra == 'dev'", specifier = "==0.5.2" },
|
||||
{ name = "pytest-mock", marker = "extra == 'backend'", specifier = "==3.12.0" },
|
||||
{ name = "pytest-playwright", marker = "extra == 'backend'", specifier = "==0.7.0" },
|
||||
{ name = "pytest-repeat", marker = "extra == 'dev'", specifier = "==0.9.4" },
|
||||
{ name = "pytest-xdist", marker = "extra == 'dev'", specifier = "==3.8.0" },
|
||||
{ name = "python-dateutil", marker = "extra == 'backend'", specifier = "==2.8.2" },
|
||||
{ name = "python-docx", marker = "extra == 'backend'", specifier = "==1.1.2" },
|
||||
{ name = "python-dotenv", marker = "extra == 'backend'", specifier = "==1.1.1" },
|
||||
{ name = "python-gitlab", marker = "extra == 'backend'", specifier = "==5.6.0" },
|
||||
{ name = "python-multipart", marker = "extra == 'backend'", specifier = "==0.0.22" },
|
||||
{ name = "python-pptx", marker = "extra == 'backend'", specifier = "==0.6.23" },
|
||||
{ name = "python3-saml", marker = "extra == 'backend'", specifier = "==1.15.0" },
|
||||
{ name = "pywikibot", marker = "extra == 'backend'", specifier = "==9.0.0" },
|
||||
{ name = "rapidfuzz", marker = "extra == 'backend'", specifier = "==3.13.0" },
|
||||
{ name = "redis", marker = "extra == 'backend'", specifier = "==5.0.8" },
|
||||
{ name = "release-tag", marker = "extra == 'dev'", specifier = "==0.5.2" },
|
||||
{ name = "reorder-python-imports-black", marker = "extra == 'dev'", specifier = "==3.14.0" },
|
||||
{ name = "requests", marker = "extra == 'backend'", specifier = "==2.33.0" },
|
||||
{ name = "requests-oauthlib", marker = "extra == 'backend'", specifier = "==1.3.1" },
|
||||
{ name = "retry", specifier = "==0.9.2" },
|
||||
{ name = "rfc3986", marker = "extra == 'backend'", specifier = "==1.5.0" },
|
||||
{ name = "ruff", marker = "extra == 'dev'", specifier = "==0.12.0" },
|
||||
{ name = "safetensors", marker = "extra == 'model-server'", specifier = "==0.5.3" },
|
||||
{ name = "sendgrid", marker = "extra == 'backend'", specifier = "==6.12.5" },
|
||||
{ name = "sentence-transformers", marker = "extra == 'model-server'", specifier = "==4.0.2" },
|
||||
{ name = "sentry-sdk", specifier = "==2.14.0" },
|
||||
{ name = "sentry-sdk", extras = ["fastapi", "celery", "starlette"], marker = "extra == 'model-server'", specifier = "==2.14.0" },
|
||||
{ name = "shapely", marker = "extra == 'backend'", specifier = "==2.0.6" },
|
||||
{ name = "simple-salesforce", marker = "extra == 'backend'", specifier = "==1.12.6" },
|
||||
{ name = "slack-sdk", marker = "extra == 'backend'", specifier = "==3.20.2" },
|
||||
{ name = "sqlalchemy", extras = ["mypy"], marker = "extra == 'backend'", specifier = "==2.0.15" },
|
||||
{ name = "starlette", marker = "extra == 'backend'", specifier = "==0.49.3" },
|
||||
{ name = "stripe", marker = "extra == 'backend'", specifier = "==10.12.0" },
|
||||
{ name = "supervisor", marker = "extra == 'backend'", specifier = "==4.3.0" },
|
||||
{ name = "tiktoken", marker = "extra == 'backend'", specifier = "==0.7.0" },
|
||||
{ name = "timeago", marker = "extra == 'backend'", specifier = "==1.0.16" },
|
||||
{ name = "torch", marker = "extra == 'model-server'", specifier = "==2.9.1" },
|
||||
{ name = "trafilatura", marker = "extra == 'backend'", specifier = "==1.12.2" },
|
||||
{ name = "transformers", marker = "extra == 'model-server'", specifier = "==4.53.0" },
|
||||
{ name = "types-beautifulsoup4", marker = "extra == 'dev'", specifier = "==4.12.0.3" },
|
||||
{ name = "types-html5lib", marker = "extra == 'dev'", specifier = "==1.1.11.13" },
|
||||
{ name = "types-oauthlib", marker = "extra == 'dev'", specifier = "==3.2.0.9" },
|
||||
{ name = "types-openpyxl", marker = "extra == 'backend'", specifier = "==3.0.4.7" },
|
||||
{ name = "types-passlib", marker = "extra == 'dev'", specifier = "==1.7.7.20240106" },
|
||||
{ name = "types-pillow", marker = "extra == 'dev'", specifier = "==10.2.0.20240822" },
|
||||
{ name = "types-psutil", marker = "extra == 'dev'", specifier = "==7.1.3.20251125" },
|
||||
{ name = "types-psycopg2", marker = "extra == 'dev'", specifier = "==2.9.21.10" },
|
||||
{ name = "types-python-dateutil", marker = "extra == 'dev'", specifier = "==2.8.19.13" },
|
||||
{ name = "types-pytz", marker = "extra == 'dev'", specifier = "==2023.3.1.1" },
|
||||
{ name = "types-pyyaml", marker = "extra == 'dev'", specifier = "==6.0.12.11" },
|
||||
{ name = "types-regex", marker = "extra == 'dev'", specifier = "==2023.3.23.1" },
|
||||
{ name = "types-requests", marker = "extra == 'dev'", specifier = "==2.32.0.20250328" },
|
||||
{ name = "types-retry", marker = "extra == 'dev'", specifier = "==0.9.9.3" },
|
||||
{ name = "types-setuptools", marker = "extra == 'dev'", specifier = "==68.0.0.3" },
|
||||
{ name = "unstructured", marker = "extra == 'backend'", specifier = "==0.18.27" },
|
||||
{ name = "unstructured-client", marker = "extra == 'backend'", specifier = "==0.42.6" },
|
||||
{ name = "urllib3", marker = "extra == 'backend'", specifier = "==2.6.3" },
|
||||
{ name = "uvicorn", specifier = "==0.35.0" },
|
||||
{ name = "voyageai", specifier = "==0.2.3" },
|
||||
{ name = "xmlsec", marker = "extra == 'backend'", specifier = "==1.3.14" },
|
||||
{ name = "zizmor", marker = "extra == 'dev'", specifier = "==1.18.0" },
|
||||
{ name = "zulip", marker = "extra == 'backend'", specifier = "==0.8.2" },
|
||||
]
|
||||
provides-extras = ["backend", "dev", "ee", "model-server"]
|
||||
|
||||
[[package]]
|
||||
name = "onyx-backend"
|
||||
version = "0.0.0"
|
||||
source = { virtual = "backend" }
|
||||
dependencies = [
|
||||
{ name = "onyx", extra = ["backend", "dev", "ee"] },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [{ name = "onyx", extras = ["backend", "dev", "ee"], editable = "." }]
|
||||
[package.metadata.requires-dev]
|
||||
backend = [
|
||||
{ name = "aiohttp", specifier = "==3.13.4" },
|
||||
{ name = "alembic", specifier = "==1.10.4" },
|
||||
{ name = "asana", specifier = "==5.0.8" },
|
||||
{ name = "asyncpg", specifier = "==0.30.0" },
|
||||
{ name = "atlassian-python-api", specifier = "==3.41.16" },
|
||||
{ name = "azure-cognitiveservices-speech", specifier = "==1.38.0" },
|
||||
{ name = "beautifulsoup4", specifier = "==4.12.3" },
|
||||
{ name = "boto3", specifier = "==1.39.11" },
|
||||
{ name = "boto3-stubs", extras = ["s3"], specifier = "==1.39.11" },
|
||||
{ name = "braintrust", specifier = "==0.3.9" },
|
||||
{ name = "celery", specifier = "==5.5.1" },
|
||||
{ name = "chardet", specifier = "==5.2.0" },
|
||||
{ name = "chonkie", specifier = "==1.0.10" },
|
||||
{ name = "dask", specifier = "==2026.1.1" },
|
||||
{ name = "ddtrace", specifier = "==3.10.0" },
|
||||
{ name = "discord-py", specifier = "==2.4.0" },
|
||||
{ name = "distributed", specifier = "==2026.1.1" },
|
||||
{ name = "dropbox", specifier = "==12.0.2" },
|
||||
{ name = "exa-py", specifier = "==1.15.4" },
|
||||
{ name = "fastapi-limiter", specifier = "==0.1.6" },
|
||||
{ name = "fastapi-users", specifier = "==15.0.4" },
|
||||
{ name = "fastapi-users-db-sqlalchemy", specifier = "==7.0.0" },
|
||||
{ name = "fastmcp", specifier = "==3.2.0" },
|
||||
{ name = "filelock", specifier = "==3.20.3" },
|
||||
{ name = "google-api-python-client", specifier = "==2.86.0" },
|
||||
{ name = "google-auth-httplib2", specifier = "==0.1.0" },
|
||||
{ name = "google-auth-oauthlib", specifier = "==1.0.0" },
|
||||
{ name = "httpcore", specifier = "==1.0.9" },
|
||||
{ name = "httpx", extras = ["http2"], specifier = "==0.28.1" },
|
||||
{ name = "httpx-oauth", specifier = "==0.15.1" },
|
||||
{ name = "hubspot-api-client", specifier = "==11.1.0" },
|
||||
{ name = "huggingface-hub", specifier = "==0.35.3" },
|
||||
{ name = "inflection", specifier = "==0.5.1" },
|
||||
{ name = "jira", specifier = "==3.10.5" },
|
||||
{ name = "jsonref", specifier = "==1.1.0" },
|
||||
{ name = "kubernetes", specifier = "==31.0.0" },
|
||||
{ name = "langchain-core", specifier = "==1.2.22" },
|
||||
{ name = "langfuse", specifier = "==3.10.0" },
|
||||
{ name = "lazy-imports", specifier = "==1.0.1" },
|
||||
{ name = "lxml", specifier = "==5.3.0" },
|
||||
{ name = "mako", specifier = "==1.2.4" },
|
||||
{ name = "markitdown", extras = ["pdf", "docx", "pptx", "xlsx", "xls"], specifier = "==0.1.2" },
|
||||
{ name = "mcp", extras = ["cli"], specifier = "==1.26.0" },
|
||||
{ name = "mistune", specifier = "==3.2.0" },
|
||||
{ name = "msal", specifier = "==1.34.0" },
|
||||
{ name = "msoffcrypto-tool", specifier = "==5.4.2" },
|
||||
{ name = "nest-asyncio", specifier = "==1.6.0" },
|
||||
{ name = "oauthlib", specifier = "==3.2.2" },
|
||||
{ name = "office365-rest-python-client", specifier = "==2.6.2" },
|
||||
{ name = "openinference-instrumentation", specifier = "==0.1.42" },
|
||||
{ name = "openpyxl", specifier = "==3.0.10" },
|
||||
{ name = "opensearch-py", specifier = "==3.0.0" },
|
||||
{ name = "opentelemetry-proto", specifier = ">=1.39.0" },
|
||||
{ name = "passlib", specifier = "==1.7.4" },
|
||||
{ name = "playwright", specifier = "==1.55.0" },
|
||||
{ name = "psutil", specifier = "==7.1.3" },
|
||||
{ name = "psycopg2-binary", specifier = "==2.9.9" },
|
||||
{ name = "puremagic", specifier = "==1.28" },
|
||||
{ name = "pyairtable", specifier = "==3.0.1" },
|
||||
{ name = "pycryptodome", specifier = "==3.19.1" },
|
||||
{ name = "pygithub", specifier = "==2.5.0" },
|
||||
{ name = "pympler", specifier = "==1.1" },
|
||||
{ name = "pypandoc-binary", specifier = "==1.16.2" },
|
||||
{ name = "pypdf", specifier = "==6.9.2" },
|
||||
{ name = "pytest-mock", specifier = "==3.12.0" },
|
||||
{ name = "pytest-playwright", specifier = "==0.7.0" },
|
||||
{ name = "python-dateutil", specifier = "==2.8.2" },
|
||||
{ name = "python-docx", specifier = "==1.1.2" },
|
||||
{ name = "python-dotenv", specifier = "==1.1.1" },
|
||||
{ name = "python-gitlab", specifier = "==5.6.0" },
|
||||
{ name = "python-multipart", specifier = "==0.0.22" },
|
||||
{ name = "python-pptx", specifier = "==0.6.23" },
|
||||
{ name = "python3-saml", specifier = "==1.15.0" },
|
||||
{ name = "pywikibot", specifier = "==9.0.0" },
|
||||
{ name = "rapidfuzz", specifier = "==3.13.0" },
|
||||
{ name = "redis", specifier = "==5.0.8" },
|
||||
{ name = "requests", specifier = "==2.33.0" },
|
||||
{ name = "requests-oauthlib", specifier = "==1.3.1" },
|
||||
{ name = "rfc3986", specifier = "==1.5.0" },
|
||||
{ name = "sendgrid", specifier = "==6.12.5" },
|
||||
{ name = "shapely", specifier = "==2.0.6" },
|
||||
{ name = "simple-salesforce", specifier = "==1.12.6" },
|
||||
{ name = "slack-sdk", specifier = "==3.20.2" },
|
||||
{ name = "sqlalchemy", extras = ["mypy"], specifier = "==2.0.15" },
|
||||
{ name = "starlette", specifier = "==0.49.3" },
|
||||
{ name = "stripe", specifier = "==10.12.0" },
|
||||
{ name = "supervisor", specifier = "==4.3.0" },
|
||||
{ name = "tiktoken", specifier = "==0.7.0" },
|
||||
{ name = "timeago", specifier = "==1.0.16" },
|
||||
{ name = "trafilatura", specifier = "==1.12.2" },
|
||||
{ name = "types-openpyxl", specifier = "==3.0.4.7" },
|
||||
{ name = "unstructured", specifier = "==0.18.27" },
|
||||
{ name = "unstructured-client", specifier = "==0.42.6" },
|
||||
{ name = "urllib3", specifier = "==2.6.3" },
|
||||
{ name = "xmlsec", specifier = "==1.3.14" },
|
||||
{ name = "zulip", specifier = "==0.8.2" },
|
||||
]
|
||||
dev = [
|
||||
{ name = "black", specifier = "==25.1.0" },
|
||||
{ name = "celery-types", specifier = "==0.19.0" },
|
||||
{ name = "faker", specifier = "==40.1.2" },
|
||||
{ name = "hatchling", specifier = "==1.28.0" },
|
||||
{ name = "ipykernel", specifier = "==6.29.5" },
|
||||
{ name = "manygo", specifier = "==0.2.0" },
|
||||
{ name = "matplotlib", specifier = "==3.10.8" },
|
||||
{ name = "mypy", specifier = "==1.13.0" },
|
||||
{ name = "mypy-extensions", specifier = "==1.0.0" },
|
||||
{ name = "onyx-devtools", specifier = "==0.7.3" },
|
||||
{ name = "openapi-generator-cli", specifier = "==7.17.0" },
|
||||
{ name = "pandas-stubs", specifier = "~=2.3.3" },
|
||||
{ name = "pre-commit", specifier = "==3.2.2" },
|
||||
{ name = "pytest", specifier = "==8.3.5" },
|
||||
{ name = "pytest-alembic", specifier = "==0.12.1" },
|
||||
{ name = "pytest-asyncio", specifier = "==1.3.0" },
|
||||
{ name = "pytest-dotenv", specifier = "==0.5.2" },
|
||||
{ name = "pytest-repeat", specifier = "==0.9.4" },
|
||||
{ name = "pytest-xdist", specifier = "==3.8.0" },
|
||||
{ name = "release-tag", specifier = "==0.5.2" },
|
||||
{ name = "reorder-python-imports-black", specifier = "==3.14.0" },
|
||||
{ name = "ruff", specifier = "==0.12.0" },
|
||||
{ name = "types-beautifulsoup4", specifier = "==4.12.0.3" },
|
||||
{ name = "types-html5lib", specifier = "==1.1.11.13" },
|
||||
{ name = "types-oauthlib", specifier = "==3.2.0.9" },
|
||||
{ name = "types-passlib", specifier = "==1.7.7.20240106" },
|
||||
{ name = "types-pillow", specifier = "==10.2.0.20240822" },
|
||||
{ name = "types-psutil", specifier = "==7.1.3.20251125" },
|
||||
{ name = "types-psycopg2", specifier = "==2.9.21.10" },
|
||||
{ name = "types-python-dateutil", specifier = "==2.8.19.13" },
|
||||
{ name = "types-pytz", specifier = "==2023.3.1.1" },
|
||||
{ name = "types-pyyaml", specifier = "==6.0.12.11" },
|
||||
{ name = "types-regex", specifier = "==2023.3.23.1" },
|
||||
{ name = "types-requests", specifier = "==2.32.0.20250328" },
|
||||
{ name = "types-retry", specifier = "==0.9.9.3" },
|
||||
{ name = "types-setuptools", specifier = "==68.0.0.3" },
|
||||
{ name = "zizmor", specifier = "==1.18.0" },
|
||||
]
|
||||
ee = [{ name = "posthog", specifier = "==3.7.4" }]
|
||||
model-server = [
|
||||
{ name = "accelerate", specifier = "==1.6.0" },
|
||||
{ name = "einops", specifier = "==0.8.1" },
|
||||
{ name = "numpy", specifier = "==2.4.1" },
|
||||
{ name = "safetensors", specifier = "==0.5.3" },
|
||||
{ name = "sentence-transformers", specifier = "==4.0.2" },
|
||||
{ name = "sentry-sdk", extras = ["fastapi", "celery", "starlette"], specifier = "==2.14.0" },
|
||||
{ name = "torch", specifier = "==2.9.1" },
|
||||
{ name = "transformers", specifier = "==4.53.0" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "onyx-devtools"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "fastapi" },
|
||||
{ name = "openapi-generator-cli" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/22/b0/765ed49157470e8ccc8ab89e6a896ade50cde3aa2a494662ad4db92a48c4/onyx_devtools-0.7.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:553a2b5e61b29b7913c991c8d5aed78f930f0f81a0f42229c6a8de2b1e8ff57e", size = 4203859, upload-time = "2026-03-27T15:09:49.63Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/9d/bba0a44a16d2fc27e5441aaf10727e10514e7a49bce70eca02bced566eb9/onyx_devtools-0.7.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5cf0782dca8b3d861de9e18e65e990cfce5161cd559df44d8fabd3fefd54fdcd", size = 3879750, upload-time = "2026-03-27T15:09:42.413Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/d8/c5725e8af14c74fe0aeed29e4746400bb3c0a078fd1240df729dc6432b84/onyx_devtools-0.7.2-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:9a0d67373e16b4fbb38a5290c0d9dfd4cfa837e5da0c165b32841b9d37f7455b", size = 3743529, upload-time = "2026-03-27T15:09:44.546Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/82/b7c398a21dbc3e14fd7a29e49caa86b1bc0f8d7c75c051514785441ab779/onyx_devtools-0.7.2-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:794af14b2de575d0ae41b94551399eca8f8ba9b950c5db7acb7612767fd228f9", size = 4166562, upload-time = "2026-03-27T15:09:49.471Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/76/be129e2baafc91fe792d919b1f4d73fc943ba9c2b728a60f1fb98e0c115a/onyx_devtools-0.7.2-py3-none-win_amd64.whl", hash = "sha256:83b3eb84df58d865e4f714222a5fab3ea464836e2c8690569454a940bbb651ff", size = 4282270, upload-time = "2026-03-27T15:09:44.676Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/72/29b8c8dbcf069c56475f00511f04c4aaa5ba3faba1dfc8276107d4b3ef7f/onyx_devtools-0.7.2-py3-none-win_arm64.whl", hash = "sha256:62f0836624ee6a5b31e64fd93162e7fce142ac8a4f959607e411824bc2b88174", size = 3823053, upload-time = "2026-03-27T15:09:43.546Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/64/c75be8ab325896cc64bccd0e1e139a03ce305bf05598967922d380fc4694/onyx_devtools-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:675e2fdbd8d291fba4b8a6dfcf2bc94c56d22d11f395a9f0d0c3c0e5b39d7f9b", size = 4220613, upload-time = "2026-04-09T00:04:36.624Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/1f/589ff6bd446c4498f5bcdfd2a315709e91fc15edf5440c91ff64cbf0800f/onyx_devtools-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:bf3993de8ba02d6c2f1ab12b5b9b965e005040b37502f97db8a7d88d9b0cde4b", size = 3897867, upload-time = "2026-04-09T00:04:40.781Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/c0/53c9173eefc13218707282c5b99753960d039684994c3b3caf90ce286094/onyx_devtools-0.7.3-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:6138a94084bed05c674ad210a0bc4006c43bc4384e8eb54d469233de85c72bd7", size = 3762408, upload-time = "2026-04-09T00:04:41.592Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/37/69fadb65112854a596d200f704da94b837817d4dd0f46cb4482dc0309c94/onyx_devtools-0.7.3-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:90dac91b0cdc32eb8861f6e83545009a34c439fd3c41fc7dd499acd0105b660e", size = 4184427, upload-time = "2026-04-09T00:04:41.525Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/45/91c829ccb45f1a15e7c9641eccc6dd154adb540e03c7dee2a8f28cea24d0/onyx_devtools-0.7.3-py3-none-win_amd64.whl", hash = "sha256:abc68d70bec06e349481beec4b212de28a1a8b7ed6ef3b41daf7093ee10b44f3", size = 4299935, upload-time = "2026-04-09T00:04:40.262Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/30/c5adcb8e3b46b71d8d92c3f9ee0c1d0bc5e2adc9f46e93931f21b36a3ee4/onyx_devtools-0.7.3-py3-none-win_arm64.whl", hash = "sha256:9e4411cadc5e81fabc9ed991402e3b4b40f02800681299c277b2142e5af0dcee", size = 3840228, upload-time = "2026-04-09T00:04:39.708Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -14,21 +14,23 @@ All scripts in this directory should be run from the **opal package root** (`web
|
||||
web/lib/opal/
|
||||
├── scripts/ # SVG conversion tooling (this directory)
|
||||
│ ├── convert-svg.sh # Converts SVGs into React components
|
||||
│ └── icon-template.js # Shared SVGR template (used for both icons and illustrations)
|
||||
│ └── icon-template.js # Shared SVGR template (used for icons, logos, and illustrations)
|
||||
├── src/
|
||||
│ ├── icons/ # Small, single-colour icons (stroke = currentColor)
|
||||
│ ├── logos/ # Brand/vendor logos (original colours preserved)
|
||||
│ └── illustrations/ # Larger, multi-colour illustrations (colours preserved)
|
||||
└── package.json
|
||||
```
|
||||
|
||||
## Icons vs Illustrations
|
||||
## Icons vs Logos vs Illustrations
|
||||
|
||||
| | Icons | Illustrations |
|
||||
|---|---|---|
|
||||
| **Import path** | `@opal/icons` | `@opal/illustrations` |
|
||||
| **Location** | `src/icons/` | `src/illustrations/` |
|
||||
| **Colour** | Overridable via `currentColor` | Fixed — original SVG colours preserved |
|
||||
| **Script flag** | (none) | `--illustration` |
|
||||
| | Icons | Logos | Illustrations |
|
||||
|---|---|---|---|
|
||||
| **Import path** | `@opal/icons` | `@opal/logos` | `@opal/illustrations` |
|
||||
| **Location** | `src/icons/` | `src/logos/` | `src/illustrations/` |
|
||||
| **Colour** | Overridable via `currentColor` | Fixed — original brand colours preserved | Fixed — original SVG colours preserved |
|
||||
| **Script flag** | (none) | `--logo` | `--illustration` |
|
||||
| **Use case** | UI elements, actions, navigation | Provider logos, platform logos, brand marks | Empty states, error pages, placeholders |
|
||||
|
||||
## Files in This Directory
|
||||
|
||||
@@ -49,12 +51,19 @@ Converts an SVG into a React component. Behaviour depends on the mode:
|
||||
- Adds `width={size}`, `height={size}`, and `stroke="currentColor"`
|
||||
- Result is colour-overridable via CSS `color` property
|
||||
|
||||
**Logo mode** (`--logo`):
|
||||
- Strips only `width` and `height` attributes (all colours preserved)
|
||||
- Adds `width={size}` and `height={size}`
|
||||
- Does **not** add `stroke="currentColor"` — logos keep their original brand colours
|
||||
|
||||
**Illustration mode** (`--illustration`):
|
||||
- Strips only `width` and `height` attributes (all colours preserved)
|
||||
- Adds `width={size}` and `height={size}`
|
||||
- Does **not** add `stroke="currentColor"` — illustrations keep their original colours
|
||||
|
||||
Both modes automatically delete the source SVG file after successful conversion.
|
||||
Both `--logo` and `--illustration` produce the same output — the distinction is purely organizational (different directories, different barrel exports).
|
||||
|
||||
All modes automatically delete the source SVG file after successful conversion.
|
||||
|
||||
## Adding New SVGs
|
||||
|
||||
@@ -70,6 +79,18 @@ Then add the export to `src/icons/index.ts`:
|
||||
export { default as SvgMyIcon } from "@opal/icons/my-icon";
|
||||
```
|
||||
|
||||
### Logos
|
||||
|
||||
```sh
|
||||
# From web/lib/opal/
|
||||
./scripts/convert-svg.sh --logo src/logos/my-logo.svg
|
||||
```
|
||||
|
||||
Then add the export to `src/logos/index.ts`:
|
||||
```ts
|
||||
export { default as SvgMyLogo } from "@opal/logos/my-logo";
|
||||
```
|
||||
|
||||
### Illustrations
|
||||
|
||||
```sh
|
||||
@@ -91,7 +112,7 @@ If you prefer to run the SVGR command directly:
|
||||
bunx @svgr/cli <file>.svg --typescript --svgo-config '{"plugins":[{"name":"removeAttrs","params":{"attrs":["stroke","stroke-opacity","width","height"]}}]}' --template scripts/icon-template.js > <file>.tsx
|
||||
```
|
||||
|
||||
**For illustrations** (preserves colours):
|
||||
**For logos and illustrations** (preserves colours):
|
||||
```sh
|
||||
bunx @svgr/cli <file>.svg --typescript --svgo-config '{"plugins":[{"name":"removeAttrs","params":{"attrs":["width","height"]}}]}' --template scripts/icon-template.js > <file>.tsx
|
||||
```
|
||||
|
||||
@@ -4,30 +4,36 @@
|
||||
#
|
||||
# By default, converts to a colour-overridable icon (stroke colours stripped, replaced with currentColor).
|
||||
# With --illustration, converts to a fixed-colour illustration (all original colours preserved).
|
||||
# With --logo, converts to a fixed-colour logo (all original colours preserved, same as illustration).
|
||||
#
|
||||
# Usage (from the opal package root — web/lib/opal/):
|
||||
# ./scripts/convert-svg.sh src/icons/<filename.svg>
|
||||
# ./scripts/convert-svg.sh --illustration src/illustrations/<filename.svg>
|
||||
# ./scripts/convert-svg.sh --logo src/logos/<filename.svg>
|
||||
|
||||
ILLUSTRATION=false
|
||||
MODE="icon"
|
||||
|
||||
# Parse flags
|
||||
while [[ "$1" == --* ]]; do
|
||||
case "$1" in
|
||||
--illustration)
|
||||
ILLUSTRATION=true
|
||||
MODE="illustration"
|
||||
shift
|
||||
;;
|
||||
--logo)
|
||||
MODE="logo"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown flag: $1" >&2
|
||||
echo "Usage: ./scripts/convert-svg.sh [--illustration] <filename.svg>" >&2
|
||||
echo "Usage: ./scripts/convert-svg.sh [--illustration | --logo] <filename.svg>" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "Usage: ./scripts/convert-svg.sh [--illustration] <filename.svg>" >&2
|
||||
echo "Usage: ./scripts/convert-svg.sh [--illustration | --logo] <filename.svg>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -49,12 +55,12 @@ fi
|
||||
BASE_NAME="${SVG_FILE%.svg}"
|
||||
|
||||
# Build the SVGO config based on mode
|
||||
if [ "$ILLUSTRATION" = true ]; then
|
||||
# Illustrations: only strip width and height (preserve all colours)
|
||||
SVGO_CONFIG='{"plugins":[{"name":"removeAttrs","params":{"attrs":["width","height"]}}]}'
|
||||
else
|
||||
if [ "$MODE" = "icon" ]; then
|
||||
# Icons: strip stroke, stroke-opacity, width, and height
|
||||
SVGO_CONFIG='{"plugins":[{"name":"removeAttrs","params":{"attrs":["stroke","stroke-opacity","width","height"]}}]}'
|
||||
else
|
||||
# Illustrations and logos: only strip width and height (preserve all colours)
|
||||
SVGO_CONFIG='{"plugins":[{"name":"removeAttrs","params":{"attrs":["width","height"]}}]}'
|
||||
fi
|
||||
|
||||
# Resolve the template path relative to this script (not the caller's CWD)
|
||||
@@ -85,7 +91,7 @@ if [ $? -eq 0 ]; then
|
||||
fi
|
||||
|
||||
# Icons additionally get stroke="currentColor"
|
||||
if [ "$ILLUSTRATION" = false ]; then
|
||||
if [ "$MODE" = "icon" ]; then
|
||||
perl -i -pe 's/\{\.\.\.props\}/stroke="currentColor" {...props}/g' "${BASE_NAME}.tsx"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to add stroke attribute" >&2
|
||||
@@ -106,7 +112,7 @@ if [ $? -eq 0 ]; then
|
||||
fi
|
||||
|
||||
# For icons, also verify stroke="currentColor" was added
|
||||
if [ "$ILLUSTRATION" = false ]; then
|
||||
if [ "$MODE" = "icon" ]; then
|
||||
if ! grep -q 'stroke="currentColor"' "${BASE_NAME}.tsx"; then
|
||||
echo "Error: Post-processing did not add stroke=\"currentColor\"" >&2
|
||||
exit 1
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
const SvgDiscordMono = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 52 52"
|
||||
fill="currentColor"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
stroke="currentColor"
|
||||
{...props}
|
||||
>
|
||||
<path d="M32.7571 7.80005C32.288 8.63286 31.8668 9.4944 31.4839 10.3751C27.8463 9.82945 24.1417 9.82945 20.4946 10.3751C20.1213 9.4944 19.6905 8.63286 19.2214 7.80005C15.804 8.384 12.4727 9.40825 9.31379 10.8537C3.05329 20.1296 1.35894 29.1661 2.20134 38.0782C5.86763 40.7872 9.97429 42.8549 14.349 44.1759C15.3349 42.8549 16.2061 41.4477 16.9527 39.9831C15.536 39.4566 14.1671 38.7961 12.8556 38.0303C13.2002 37.7814 13.5353 37.523 13.8608 37.2741C21.5476 40.8925 30.4501 40.8925 38.1465 37.2741C38.4719 37.5421 38.807 37.8006 39.1516 38.0303C37.8401 38.8057 36.4713 39.4566 35.0449 39.9927C35.7916 41.4573 36.6627 42.8645 37.6487 44.1855C42.0233 42.8645 46.1299 40.8064 49.7965 38.0973C50.7918 27.7589 48.0924 18.799 42.6646 10.8633C39.5154 9.41784 36.1841 8.39355 32.7666 7.81919L32.7571 7.80005ZM18.0248 32.5931C15.6604 32.5931 13.698 30.4488 13.698 27.7972C13.698 25.1456 15.5838 22.9918 18.0153 22.9918C20.4468 22.9918 22.3804 25.1552 22.3421 27.7972C22.3038 30.4393 20.4372 32.5931 18.0248 32.5931ZM33.9728 32.5931C31.5988 32.5931 29.6556 30.4488 29.6556 27.7972C29.6556 25.1456 31.5414 22.9918 33.9728 22.9918C36.4043 22.9918 38.3284 25.1552 38.29 27.7972C38.2518 30.4393 36.3851 32.5931 33.9728 32.5931Z" />
|
||||
</svg>
|
||||
);
|
||||
export default SvgDiscordMono;
|
||||
19
web/lib/opal/src/icons/discord.tsx
Normal file
19
web/lib/opal/src/icons/discord.tsx
Normal file
@@ -0,0 +1,19 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
|
||||
const SvgDiscord = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 16 16"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
stroke="currentColor"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
{...props}
|
||||
>
|
||||
<path d="M5.5 12.5C5.5 12.5 4.88936 13.6396 4.60178 13.9974C3.32584 13.6396 2.12806 13.0796 1.05872 12.3459C0.813023 9.93224 1.30721 6.33924 3.13319 3.82703C4.05454 3.43555 5.00325 3.15815 6 3C6.1368 3.22555 6.39111 3.76148 6.5 4C7.56375 3.85223 8.43903 3.85223 9.5 4C9.61167 3.76148 9.86319 3.22555 10 3C10.9968 3.15556 11.942 3.43815 12.8605 3.82963C14.4436 5.97887 15.2309 9.55113 14.9406 12.3511C13.8712 13.0848 12.6735 13.6422 11.3975 14C11.11 13.6422 10.5 12.5 10.5 12.5M5.5 12.5C5.14663 12.3965 4.25 12 4.25 12M5.5 12.5C7.12611 12.9761 8.87249 12.9759 10.5 12.5M10.5 12.5C10.854 12.3965 11.75 12 11.75 12M5.66002 10C5.02612 10 4.5 9.44167 4.5 8.75125C4.5 8.06083 5.00558 7.5 5.65746 7.5C6.30934 7.5 6.82775 8.06331 6.81749 8.75125C6.80722 9.43918 6.30677 10 5.66002 10ZM10.3424 10C9.70591 10 9.18493 9.44167 9.18493 8.75125C9.18493 8.06083 9.69052 7.5 10.3424 7.5C10.9943 7.5 11.5101 8.06331 11.4998 8.75125C11.4896 9.43918 10.9891 10 10.3424 10Z" />
|
||||
</svg>
|
||||
);
|
||||
export default SvgDiscord;
|
||||
44
web/lib/opal/src/icons/icons.stories.tsx
Normal file
44
web/lib/opal/src/icons/icons.stories.tsx
Normal file
@@ -0,0 +1,44 @@
|
||||
import React from "react";
|
||||
import type { Meta, StoryObj } from "@storybook/react";
|
||||
import * as Icons from "@opal/icons";
|
||||
|
||||
const icons = Object.entries(Icons).map(([name, Component]) => ({
|
||||
name: name.replace(/^Svg/, ""),
|
||||
Component,
|
||||
}));
|
||||
|
||||
const meta: Meta = {
|
||||
title: "opal/icons/All Icons",
|
||||
tags: ["autodocs"],
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj;
|
||||
|
||||
export const AllIcons: Story = {
|
||||
render: () => (
|
||||
<div
|
||||
style={{
|
||||
display: "grid",
|
||||
gridTemplateColumns: "repeat(auto-fill, 100px)",
|
||||
gap: 16,
|
||||
}}
|
||||
>
|
||||
{icons.map(({ name, Component }) => (
|
||||
<div
|
||||
key={name}
|
||||
style={{
|
||||
display: "flex",
|
||||
flexDirection: "column",
|
||||
alignItems: "center",
|
||||
gap: 8,
|
||||
padding: 8,
|
||||
}}
|
||||
>
|
||||
<Component size={24} />
|
||||
<span style={{ fontSize: 11, textAlign: "center" }}>{name}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
),
|
||||
};
|
||||
@@ -19,12 +19,9 @@ export { default as SvgArrowUpRight } from "@opal/icons/arrow-up-right";
|
||||
export { default as SvgArrowWallRight } from "@opal/icons/arrow-wall-right";
|
||||
export { default as SvgAudio } from "@opal/icons/audio";
|
||||
export { default as SvgAudioEqSmall } from "@opal/icons/audio-eq-small";
|
||||
export { default as SvgAws } from "@opal/icons/aws";
|
||||
export { default as SvgAzure } from "@opal/icons/azure";
|
||||
export { default as SvgBarChart } from "@opal/icons/bar-chart";
|
||||
export { default as SvgBarChartSmall } from "@opal/icons/bar-chart-small";
|
||||
export { default as SvgBell } from "@opal/icons/bell";
|
||||
export { default as SvgBifrost } from "@opal/icons/bifrost";
|
||||
export { default as SvgBlocks } from "@opal/icons/blocks";
|
||||
export { default as SvgBookOpen } from "@opal/icons/book-open";
|
||||
export { default as SvgBookmark } from "@opal/icons/bookmark";
|
||||
@@ -45,7 +42,6 @@ export { default as SvgChevronRight } from "@opal/icons/chevron-right";
|
||||
export { default as SvgChevronUp } from "@opal/icons/chevron-up";
|
||||
export { default as SvgChevronUpSmall } from "@opal/icons/chevron-up-small";
|
||||
export { default as SvgCircle } from "@opal/icons/circle";
|
||||
export { default as SvgClaude } from "@opal/icons/claude";
|
||||
export { default as SvgClipboard } from "@opal/icons/clipboard";
|
||||
export { default as SvgClock } from "@opal/icons/clock";
|
||||
export { default as SvgClockHandsSmall } from "@opal/icons/clock-hands-small";
|
||||
@@ -59,8 +55,8 @@ export { default as SvgCurate } from "@opal/icons/curate";
|
||||
export { default as SvgCreditCard } from "@opal/icons/credit-card";
|
||||
export { default as SvgDashboard } from "@opal/icons/dashboard";
|
||||
export { default as SvgDevKit } from "@opal/icons/dev-kit";
|
||||
export { default as SvgDiscord } from "@opal/icons/discord";
|
||||
export { default as SvgDownload } from "@opal/icons/download";
|
||||
export { default as SvgDiscordMono } from "@opal/icons/DiscordMono";
|
||||
export { default as SvgDownloadCloud } from "@opal/icons/download-cloud";
|
||||
export { default as SvgEdit } from "@opal/icons/edit";
|
||||
export { default as SvgEditBig } from "@opal/icons/edit-big";
|
||||
@@ -84,7 +80,6 @@ export { default as SvgFolderIn } from "@opal/icons/folder-in";
|
||||
export { default as SvgFolderOpen } from "@opal/icons/folder-open";
|
||||
export { default as SvgFolderPartialOpen } from "@opal/icons/folder-partial-open";
|
||||
export { default as SvgFolderPlus } from "@opal/icons/folder-plus";
|
||||
export { default as SvgGemini } from "@opal/icons/gemini";
|
||||
export { default as SvgGlobe } from "@opal/icons/globe";
|
||||
export { default as SvgHandle } from "@opal/icons/handle";
|
||||
export { default as SvgHardDrive } from "@opal/icons/hard-drive";
|
||||
@@ -105,8 +100,6 @@ export { default as SvgLightbulbSimple } from "@opal/icons/lightbulb-simple";
|
||||
export { default as SvgLineChartUp } from "@opal/icons/line-chart-up";
|
||||
export { default as SvgLink } from "@opal/icons/link";
|
||||
export { default as SvgLinkedDots } from "@opal/icons/linked-dots";
|
||||
export { default as SvgLitellm } from "@opal/icons/litellm";
|
||||
export { default as SvgLmStudio } from "@opal/icons/lm-studio";
|
||||
export { default as SvgLoader } from "@opal/icons/loader";
|
||||
export { default as SvgLock } from "@opal/icons/lock";
|
||||
export { default as SvgLogOut } from "@opal/icons/log-out";
|
||||
@@ -122,13 +115,7 @@ export { default as SvgMoreHorizontal } from "@opal/icons/more-horizontal";
|
||||
export { default as SvgMusicSmall } from "@opal/icons/music-small";
|
||||
export { default as SvgNetworkGraph } from "@opal/icons/network-graph";
|
||||
export { default as SvgNotificationBubble } from "@opal/icons/notification-bubble";
|
||||
export { default as SvgOllama } from "@opal/icons/ollama";
|
||||
export { default as SvgOnyxLogo } from "@opal/icons/onyx-logo";
|
||||
export { default as SvgOnyxLogoTyped } from "@opal/icons/onyx-logo-typed";
|
||||
export { default as SvgOnyxOctagon } from "@opal/icons/onyx-octagon";
|
||||
export { default as SvgOnyxTyped } from "@opal/icons/onyx-typed";
|
||||
export { default as SvgOpenai } from "@opal/icons/openai";
|
||||
export { default as SvgOpenrouter } from "@opal/icons/openrouter";
|
||||
export { default as SvgOrganization } from "@opal/icons/organization";
|
||||
export { default as SvgPaintBrush } from "@opal/icons/paint-brush";
|
||||
export { default as SvgPaperclip } from "@opal/icons/paperclip";
|
||||
@@ -184,6 +171,7 @@ export { default as SvgTrash } from "@opal/icons/trash";
|
||||
export { default as SvgTwoLineSmall } from "@opal/icons/two-line-small";
|
||||
export { default as SvgUnplug } from "@opal/icons/unplug";
|
||||
export { default as SvgUploadCloud } from "@opal/icons/upload-cloud";
|
||||
export { default as SvgUploadSquare } from "@opal/icons/upload-square";
|
||||
export { default as SvgUser } from "@opal/icons/user";
|
||||
export { default as SvgUserCheck } from "@opal/icons/user-check";
|
||||
export { default as SvgUserEdit } from "@opal/icons/user-edit";
|
||||
|
||||
@@ -8,63 +8,19 @@ const SvgSlack = ({ size, ...props }: IconProps) => (
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
stroke="currentColor"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
{...props}
|
||||
>
|
||||
<g clipPath="url(#clip0_259_269)">
|
||||
<path
|
||||
d="M9.66666 6.66665C9.11333 6.66665 8.66666 6.21998 8.66666 5.66665V2.33331C8.66666 1.77998 9.11333 1.33331 9.66666 1.33331C10.22 1.33331 10.6667 1.77998 10.6667 2.33331V5.66665C10.6667 6.21998 10.22 6.66665 9.66666 6.66665Z"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
<path
|
||||
d="M13.6667 6.66665H12.6667V5.66665C12.6667 5.11331 13.1133 4.66665 13.6667 4.66665C14.22 4.66665 14.6667 5.11331 14.6667 5.66665C14.6667 6.21998 14.22 6.66665 13.6667 6.66665Z"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
<path
|
||||
d="M6.33333 9.33331C6.88666 9.33331 7.33333 9.77998 7.33333 10.3333V13.6666C7.33333 14.22 6.88666 14.6666 6.33333 14.6666C5.78 14.6666 5.33333 14.22 5.33333 13.6666V10.3333C5.33333 9.77998 5.78 9.33331 6.33333 9.33331Z"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
<path
|
||||
d="M2.33333 9.33331H3.33333V10.3333C3.33333 10.8866 2.88666 11.3333 2.33333 11.3333C1.77999 11.3333 1.33333 10.8866 1.33333 10.3333C1.33333 9.77998 1.77999 9.33331 2.33333 9.33331Z"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
<path
|
||||
d="M9.33333 9.66665C9.33333 9.11331 9.78 8.66665 10.3333 8.66665H13.6667C14.22 8.66665 14.6667 9.11331 14.6667 9.66665C14.6667 10.22 14.22 10.6666 13.6667 10.6666H10.3333C9.78 10.6666 9.33333 10.22 9.33333 9.66665Z"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
<path
|
||||
d="M10.3333 12.6666H9.33333V13.6666C9.33333 14.22 9.78 14.6666 10.3333 14.6666C10.8867 14.6666 11.3333 14.22 11.3333 13.6666C11.3333 13.1133 10.8867 12.6666 10.3333 12.6666Z"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
<path
|
||||
d="M6.66666 6.33331C6.66666 5.77998 6.22 5.33331 5.66666 5.33331H2.33333C1.77999 5.33331 1.33333 5.77998 1.33333 6.33331C1.33333 6.88665 1.77999 7.33331 2.33333 7.33331H5.66666C6.22 7.33331 6.66666 6.88665 6.66666 6.33331Z"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
<path
|
||||
d="M5.66666 3.33331H6.66666V2.33331C6.66666 1.77998 6.22 1.33331 5.66666 1.33331C5.11333 1.33331 4.66666 1.77998 4.66666 2.33331C4.66666 2.88665 5.11333 3.33331 5.66666 3.33331Z"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_259_269">
|
||||
<rect width={16} height={16} fill="white" />
|
||||
</clipPath>
|
||||
</defs>
|
||||
<path d="M9.66668 6.66671C9.11334 6.66671 8.66668 6.22004 8.66668 5.66671V2.33337C8.66668 1.78004 9.11334 1.33337 9.66668 1.33337C10.22 1.33337 10.6667 1.78004 10.6667 2.33337V5.66671C10.6667 6.22004 10.22 6.66671 9.66668 6.66671Z" />
|
||||
<path d="M13.6667 6.66671H12.6667V5.66671C12.6667 5.11337 13.1133 4.66671 13.6667 4.66671C14.22 4.66671 14.6667 5.11337 14.6667 5.66671C14.6667 6.22004 14.22 6.66671 13.6667 6.66671Z" />
|
||||
<path d="M6.33334 9.33337C6.88668 9.33337 7.33334 9.78004 7.33334 10.3334V13.6667C7.33334 14.22 6.88668 14.6667 6.33334 14.6667C5.78001 14.6667 5.33334 14.22 5.33334 13.6667V10.3334C5.33334 9.78004 5.78001 9.33337 6.33334 9.33337Z" />
|
||||
<path d="M2.33334 9.33337H3.33334V10.3334C3.33334 10.8867 2.88668 11.3334 2.33334 11.3334C1.78001 11.3334 1.33334 10.8867 1.33334 10.3334C1.33334 9.78004 1.78001 9.33337 2.33334 9.33337Z" />
|
||||
<path d="M9.33334 9.66671C9.33334 9.11337 9.78001 8.66671 10.3333 8.66671H13.6667C14.22 8.66671 14.6667 9.11337 14.6667 9.66671C14.6667 10.22 14.22 10.6667 13.6667 10.6667H10.3333C9.78001 10.6667 9.33334 10.22 9.33334 9.66671Z" />
|
||||
<path d="M10.3333 12.6667H9.33334V13.6667C9.33334 14.22 9.78001 14.6667 10.3333 14.6667C10.8867 14.6667 11.3333 14.22 11.3333 13.6667C11.3333 13.1134 10.8867 12.6667 10.3333 12.6667Z" />
|
||||
<path d="M6.66668 6.33337C6.66668 5.78004 6.22001 5.33337 5.66668 5.33337H2.33334C1.78001 5.33337 1.33334 5.78004 1.33334 6.33337C1.33334 6.88671 1.78001 7.33337 2.33334 7.33337H5.66668C6.22001 7.33337 6.66668 6.88671 6.66668 6.33337Z" />
|
||||
<path d="M5.66668 3.33337H6.66668V2.33337C6.66668 1.78004 6.22001 1.33337 5.66668 1.33337C5.11334 1.33337 4.66668 1.78004 4.66668 2.33337C4.66668 2.88671 5.11334 3.33337 5.66668 3.33337Z" />
|
||||
</svg>
|
||||
);
|
||||
export default SvgSlack;
|
||||
|
||||
22
web/lib/opal/src/icons/upload-square.tsx
Normal file
22
web/lib/opal/src/icons/upload-square.tsx
Normal file
@@ -0,0 +1,22 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
|
||||
const SvgUploadSquare = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 16 16"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
stroke="currentColor"
|
||||
{...props}
|
||||
>
|
||||
<path
|
||||
d="M11 14H12.6667C13.3929 14 14 13.3929 14 12.6667V3.33333C14 2.60711 13.3929 2 12.6667 2H3.33333C2.60711 2 2 2.60711 2 3.33333V12.6667C2 13.3929 2.60711 14 3.33333 14H5M10.6666 8.16667L7.99998 5.5M7.99998 5.5L5.33331 8.16667M7.99998 5.5V14"
|
||||
strokeWidth={1.5}
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
|
||||
export default SvgUploadSquare;
|
||||
46
web/lib/opal/src/illustrations/illustrations.stories.tsx
Normal file
46
web/lib/opal/src/illustrations/illustrations.stories.tsx
Normal file
@@ -0,0 +1,46 @@
|
||||
import React from "react";
|
||||
import type { Meta, StoryObj } from "@storybook/react";
|
||||
import * as Illustrations from "@opal/illustrations";
|
||||
|
||||
const illustrations = Object.entries(Illustrations).map(
|
||||
([name, Component]) => ({
|
||||
name: name.replace(/^Svg/, ""),
|
||||
Component,
|
||||
})
|
||||
);
|
||||
|
||||
const meta: Meta = {
|
||||
title: "opal/illustrations/All Illustrations",
|
||||
tags: ["autodocs"],
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj;
|
||||
|
||||
export const AllIllustrations: Story = {
|
||||
render: () => (
|
||||
<div
|
||||
style={{
|
||||
display: "grid",
|
||||
gridTemplateColumns: "repeat(auto-fill, 140px)",
|
||||
gap: 24,
|
||||
}}
|
||||
>
|
||||
{illustrations.map(({ name, Component }) => (
|
||||
<div
|
||||
key={name}
|
||||
style={{
|
||||
display: "flex",
|
||||
flexDirection: "column",
|
||||
alignItems: "center",
|
||||
gap: 8,
|
||||
padding: 8,
|
||||
}}
|
||||
>
|
||||
<Component size={80} />
|
||||
<span style={{ fontSize: 11, textAlign: "center" }}>{name}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
),
|
||||
};
|
||||
17
web/lib/opal/src/logos/anthropic.tsx
Normal file
17
web/lib/opal/src/logos/anthropic.tsx
Normal file
@@ -0,0 +1,17 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
const SvgAnthropic = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 52 52"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
{...props}
|
||||
>
|
||||
<path
|
||||
d="M36.1779 9.78003H29.1432L41.9653 42.2095H49L36.1779 9.78003ZM15.8221 9.78003L3 42.2095H10.1844L12.8286 35.4243H26.2495L28.8438 42.2095H36.0282L23.2061 9.78003H15.8221ZM15.1236 29.3874L19.5141 18.0121L23.9046 29.3874H15.1236Z"
|
||||
fill="currentColor"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
export default SvgAnthropic;
|
||||
17
web/lib/opal/src/logos/deepseek.tsx
Normal file
17
web/lib/opal/src/logos/deepseek.tsx
Normal file
@@ -0,0 +1,17 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
const SvgDeepseek = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 52 52"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
{...props}
|
||||
>
|
||||
<path
|
||||
d="M50.4754 11.5882C49.9458 11.3287 49.7177 11.8233 49.408 12.0745C49.3021 12.1556 49.2125 12.2609 49.1228 12.3581C48.3487 13.1848 47.4443 13.728 46.2628 13.6631C44.5354 13.5657 43.0605 14.1089 41.7568 15.4302C41.4797 13.801 40.559 12.8281 39.1575 12.2041C38.4243 11.8799 37.6828 11.5556 37.1693 10.8504C36.8108 10.348 36.713 9.78867 36.5338 9.23745C36.4196 8.90517 36.3056 8.56462 35.9226 8.50786C35.507 8.44309 35.3441 8.79147 35.1811 9.08346C34.5292 10.275 34.2767 11.5882 34.301 12.9175C34.3581 15.9085 35.6211 18.2914 38.1307 19.9856C38.4159 20.1801 38.4892 20.3745 38.3996 20.6584C38.2285 21.242 38.0247 21.8093 37.8456 22.3929C37.7314 22.7659 37.5602 22.8469 37.1611 22.6846C35.784 22.1093 34.5943 21.2581 33.5432 20.2288C31.7587 18.5023 30.1454 16.5975 28.1328 15.106C27.6602 14.7574 27.1876 14.4333 26.6986 14.1251C24.6452 12.1313 26.9676 10.4938 27.5053 10.2994C28.0675 10.0968 27.7009 9.39954 25.8838 9.40782C24.0667 9.41582 22.4045 10.0238 20.286 10.8344C19.9764 10.956 19.6503 11.045 19.3163 11.118C17.3933 10.7533 15.397 10.6721 13.311 10.9073C9.38354 11.3449 6.24657 13.2012 3.94062 16.3705C1.17028 20.1801 0.518441 24.5084 1.31698 29.0233C2.15627 33.7812 4.58437 37.7206 8.31632 40.8008C12.1868 43.9943 16.6439 45.5587 21.7284 45.2588C24.8166 45.0806 28.2551 44.6672 32.1337 41.3845C33.1114 41.8708 34.1382 42.0652 35.8412 42.2111C37.1531 42.3327 38.4161 42.1463 39.3938 41.9438C40.9257 41.6195 40.8198 40.201 40.2657 39.9416C35.7761 37.8504 36.7619 38.7015 35.8657 38.0124C38.1471 35.3134 41.5858 32.5087 42.9302 23.4222C43.0361 22.7009 42.9465 22.2469 42.9302 21.6633C42.922 21.3068 43.0035 21.1691 43.411 21.1284C44.5354 20.9987 45.6272 20.6908 46.6295 20.1396C49.5385 18.5509 50.7117 15.9409 50.9888 12.8121C51.0296 12.3338 50.9807 11.8393 50.4754 11.5882ZM25.1262 39.747C20.775 36.3266 18.6647 35.1998 17.7928 35.2484C16.978 35.2971 17.1247 36.2292 17.3038 36.8372C17.4913 37.4369 17.7358 37.8504 18.0779 38.3772C18.3142 38.7258 18.4773 39.2444 17.8417 39.6336C16.4402 40.501 14.0038 39.3418 13.8897 39.2851C11.0542 37.6152 8.68303 35.4104 7.01255 32.3953C5.39919 29.4933 4.46222 26.3809 4.30733 23.0576C4.26659 22.2552 4.50288 21.9713 5.30142 21.8256C6.35253 21.631 7.43629 21.5904 8.4874 21.7444C12.9283 22.3929 16.709 24.3788 19.8786 27.5238C21.6875 29.315 23.0564 31.4551 24.4662 33.5463C25.9654 35.7671 27.5787 37.8828 29.6321 39.6174C30.3573 40.2253 30.9358 40.6872 31.4899 41.0278C29.8196 41.2142 27.0329 41.2548 25.1262 39.747ZM27.2121 26.3323C27.2121 25.9756 27.4973 25.692 27.856 25.692C27.9374 25.692 28.0108 25.708 28.076 25.7323C28.1656 25.7648 28.2471 25.8135 28.3123 25.8863C28.4264 25.9999 28.4915 26.1619 28.4915 26.3322C28.4915 26.6888 28.2064 26.9724 27.8479 26.9724C27.4895 26.9724 27.2121 26.6889 27.2121 26.3323ZM33.69 29.6556C33.2745 29.8259 32.8589 29.9716 32.4597 29.9879C31.8404 30.0203 31.1641 29.7689 30.7975 29.461C30.2271 28.9827 29.8197 28.7153 29.6486 27.8804C29.5752 27.5238 29.6159 26.9725 29.6812 26.6565C29.8278 25.9756 29.6649 25.538 29.1842 25.1407C28.793 24.8164 28.296 24.7273 27.75 24.7273C27.5463 24.7273 27.359 24.6381 27.2204 24.5652C26.9922 24.4517 26.8049 24.168 26.9841 23.8195C27.0411 23.7061 27.3183 23.4304 27.3835 23.3819C28.125 22.9603 28.9805 23.0981 29.7709 23.4142C30.5043 23.7141 31.0584 24.2654 31.8568 25.0434C32.6717 25.9836 32.8183 26.2432 33.2828 26.9482C33.6496 27.4995 33.9836 28.0668 34.2117 28.7153C34.3502 29.1207 34.1708 29.4529 33.69 29.6556Z"
|
||||
fill="#4D6BFE"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
export default SvgDeepseek;
|
||||
17
web/lib/opal/src/logos/discord.tsx
Normal file
17
web/lib/opal/src/logos/discord.tsx
Normal file
@@ -0,0 +1,17 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
const SvgDiscordMono = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 52 52"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
{...props}
|
||||
>
|
||||
<path
|
||||
d="M32.7571 7.80005C32.288 8.63286 31.8668 9.4944 31.4839 10.3751C27.8463 9.82945 24.1417 9.82945 20.4946 10.3751C20.1213 9.4944 19.6905 8.63286 19.2214 7.80005C15.804 8.384 12.4727 9.40825 9.31379 10.8537C3.05329 20.1296 1.35894 29.1661 2.20134 38.0782C5.86763 40.7872 9.97429 42.8549 14.349 44.1759C15.3349 42.8549 16.2061 41.4477 16.9527 39.9831C15.536 39.4566 14.1671 38.7961 12.8556 38.0303C13.2002 37.7814 13.5353 37.523 13.8608 37.2741C21.5476 40.8925 30.4501 40.8925 38.1465 37.2741C38.4719 37.5421 38.807 37.8006 39.1516 38.0303C37.8401 38.8057 36.4713 39.4566 35.0449 39.9927C35.7916 41.4573 36.6627 42.8645 37.6487 44.1855C42.0233 42.8645 46.1299 40.8064 49.7965 38.0973C50.7918 27.7589 48.0924 18.799 42.6646 10.8633C39.5154 9.41784 36.1841 8.39355 32.7666 7.81919L32.7571 7.80005ZM18.0248 32.5931C15.6604 32.5931 13.698 30.4488 13.698 27.7972C13.698 25.1456 15.5838 22.9918 18.0153 22.9918C20.4468 22.9918 22.3804 25.1552 22.3421 27.7972C22.3038 30.4393 20.4372 32.5931 18.0248 32.5931ZM33.9728 32.5931C31.5988 32.5931 29.6556 30.4488 29.6556 27.7972C29.6556 25.1456 31.5414 22.9918 33.9728 22.9918C36.4043 22.9918 38.3284 25.1552 38.29 27.7972C38.2518 30.4393 36.3851 32.5931 33.9728 32.5931Z"
|
||||
fill="#5865F2"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
export default SvgDiscordMono;
|
||||
33
web/lib/opal/src/logos/google.tsx
Normal file
33
web/lib/opal/src/logos/google.tsx
Normal file
@@ -0,0 +1,33 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
const SvgGoogle = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 52 52"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
{...props}
|
||||
>
|
||||
<path
|
||||
d="M26.4758 21.5474H48.9506C49.2333 23.1186 49.4 24.8471 49.4 26.5493C49.4 33.5757 47.0924 39.5382 42.3041 44.2244C38.0758 48.3709 32.7153 50 26.4758 50C19.4411 50 13.3274 47.1642 8.85118 42.6473C4.41266 38.1688 2.6 32.1174 2.6 26C2.6 19.8826 4.41266 13.8312 8.85118 9.35266C13.3274 4.83583 19.4411 2 26.4758 2C32.9333 2 38.3468 4.36633 42.4994 8.27151L35.6726 15.1286C33.0902 12.6396 29.9845 11.5201 26.4871 11.5201C20.2689 11.5201 14.9692 15.7628 13.0825 21.4432C12.6042 22.8835 12.3419 24.4084 12.3419 26.0015C12.3419 27.5946 12.6042 29.1195 13.0825 30.5598C14.9692 36.2402 20.2689 40.4829 26.4871 40.4829C29.6527 40.4829 32.3416 39.7368 34.4897 38.4527C37.0882 36.8883 38.8266 34.4622 39.3995 31.5747H26.4758V21.5474Z"
|
||||
fill="#4285F4"
|
||||
/>
|
||||
<path
|
||||
d="M26.4758 21.5474V31.5747H39.3995C38.8266 34.4622 37.0882 36.8883 34.4897 38.4527L34.49 38.4525L42.3041 44.2244C47.0924 39.5382 49.4 33.5757 49.4 26.5493C49.4 24.8471 49.2333 23.1186 48.9506 21.5474H26.4758Z"
|
||||
fill="#4285F4"
|
||||
/>
|
||||
<path
|
||||
d="M13.0825 30.5598C12.6042 29.1195 12.3419 27.5946 12.3419 26.0015C12.3419 24.4084 12.6042 22.8835 13.0825 21.4432L5.12352 15.2415C3.49286 18.5013 2.6 22.1138 2.6 26C2.6 29.8862 3.49286 33.4987 5.12352 36.7585L13.0825 30.5598Z"
|
||||
fill="#FBBC05"
|
||||
/>
|
||||
<path
|
||||
d="M26.4758 11.5201C29.9845 11.5201 33.0902 12.6396 35.6726 15.1286L42.4994 8.27151C38.3468 4.36633 32.9333 2 26.4758 2C19.4411 2 13.3274 4.83583 8.85118 9.35266C4.41266 13.8312 2.6 19.8826 2.6 26C2.6 22.1138 3.49286 18.5013 5.12352 15.2415L13.0825 21.4432C14.9692 15.7628 20.2689 11.5201 26.4871 11.5201H26.4758Z"
|
||||
fill="#EA4335"
|
||||
/>
|
||||
<path
|
||||
d="M13.0825 30.5598L5.12352 36.7585C6.87664 40.2742 9.47433 43.3014 12.6417 45.5624C16.5474 48.3504 21.3134 50 26.4758 50C32.7153 50 38.0758 48.3709 42.3041 44.2244L34.49 38.4525C32.3416 39.7368 29.6527 40.4829 26.4871 40.4829C20.2689 40.4829 14.9692 36.2402 13.0825 30.5598Z"
|
||||
fill="#34A853"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
export default SvgGoogle;
|
||||
21
web/lib/opal/src/logos/index.ts
Normal file
21
web/lib/opal/src/logos/index.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
export { default as SvgAnthropic } from "@opal/logos/anthropic";
|
||||
export { default as SvgAws } from "@opal/logos/aws";
|
||||
export { default as SvgAzure } from "@opal/logos/azure";
|
||||
export { default as SvgBifrost } from "@opal/logos/bifrost";
|
||||
export { default as SvgClaude } from "@opal/logos/claude";
|
||||
export { default as SvgDeepseek } from "@opal/logos/deepseek";
|
||||
export { default as SvgDiscord } from "@opal/logos/discord";
|
||||
export { default as SvgGemini } from "@opal/logos/gemini";
|
||||
export { default as SvgGoogle } from "@opal/logos/google";
|
||||
export { default as SvgLitellm } from "@opal/logos/litellm";
|
||||
export { default as SvgLmStudio } from "@opal/logos/lm-studio";
|
||||
export { default as SvgMicrosoft } from "@opal/logos/microsoft";
|
||||
export { default as SvgMistral } from "@opal/logos/mistral";
|
||||
export { default as SvgOllama } from "@opal/logos/ollama";
|
||||
export { default as SvgOnyxLogo } from "@opal/logos/onyx-logo";
|
||||
export { default as SvgOnyxLogoTyped } from "@opal/logos/onyx-logo-typed";
|
||||
export { default as SvgOnyxTyped } from "@opal/logos/onyx-typed";
|
||||
export { default as SvgOpenai } from "@opal/logos/openai";
|
||||
export { default as SvgOpenrouter } from "@opal/logos/openrouter";
|
||||
export { default as SvgQwen } from "@opal/logos/qwen";
|
||||
export { default as SvgSlack } from "@opal/logos/slack";
|
||||
44
web/lib/opal/src/logos/logos.stories.tsx
Normal file
44
web/lib/opal/src/logos/logos.stories.tsx
Normal file
@@ -0,0 +1,44 @@
|
||||
import React from "react";
|
||||
import type { Meta, StoryObj } from "@storybook/react";
|
||||
import * as Logos from "@opal/logos";
|
||||
|
||||
const logos = Object.entries(Logos).map(([name, Component]) => ({
|
||||
name: name.replace(/^Svg/, ""),
|
||||
Component,
|
||||
}));
|
||||
|
||||
const meta: Meta = {
|
||||
title: "opal/logos/All Logos",
|
||||
tags: ["autodocs"],
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj;
|
||||
|
||||
export const AllLogos: Story = {
|
||||
render: () => (
|
||||
<div
|
||||
style={{
|
||||
display: "grid",
|
||||
gridTemplateColumns: "repeat(auto-fill, 120px)",
|
||||
gap: 16,
|
||||
}}
|
||||
>
|
||||
{logos.map(({ name, Component }) => (
|
||||
<div
|
||||
key={name}
|
||||
style={{
|
||||
display: "flex",
|
||||
flexDirection: "column",
|
||||
alignItems: "center",
|
||||
gap: 8,
|
||||
padding: 8,
|
||||
}}
|
||||
>
|
||||
<Component size={32} />
|
||||
<span style={{ fontSize: 11, textAlign: "center" }}>{name}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
),
|
||||
};
|
||||
17
web/lib/opal/src/logos/microsoft.tsx
Normal file
17
web/lib/opal/src/logos/microsoft.tsx
Normal file
@@ -0,0 +1,17 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
const SvgMicrosoft = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 52 52"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
{...props}
|
||||
>
|
||||
<path d="M5 5H25V25H5V5Z" fill="#F35325" />
|
||||
<path d="M27 5H47V25H27V5Z" fill="#81BC06" />
|
||||
<path d="M5 27H25V47H5V27Z" fill="#05A6F0" />
|
||||
<path d="M27 27H47V47H27V27Z" fill="#FFBA08" />
|
||||
</svg>
|
||||
);
|
||||
export default SvgMicrosoft;
|
||||
29
web/lib/opal/src/logos/mistral.tsx
Normal file
29
web/lib/opal/src/logos/mistral.tsx
Normal file
@@ -0,0 +1,29 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
const SvgMistral = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 52 52"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
{...props}
|
||||
>
|
||||
<path d="M15.5004 8H8.50043V15H15.5004V8Z" fill="#FFD800" />
|
||||
<path d="M43.5004 8H36.5004L36.5001 15H43.5004V8Z" fill="#FFD800" />
|
||||
<path d="M22.5004 15H15.5004H8.50043V22H22.5004V15Z" fill="#FFAF00" />
|
||||
<path d="M43.5004 15H36.5001H29.4998V22H43.5004V15Z" fill="#FFAF00" />
|
||||
<path
|
||||
d="M43.5004 22H29.4998H22.5004H8.50043V29H15.5004H22.5004H29.4998H36.5001H43.5004V22Z"
|
||||
fill="#FF8205"
|
||||
/>
|
||||
<path d="M15.5004 29H8.50043L8.50021 36H15.5004V29Z" fill="#FA500F" />
|
||||
<path d="M29.4998 29H22.5004V36H29.4998V29Z" fill="#FA500F" />
|
||||
<path
|
||||
d="M43.5004 29H36.5001L36.5004 36H43.5002L43.5004 29Z"
|
||||
fill="#FA500F"
|
||||
/>
|
||||
<path d="M22.5004 36H15.5004H8.50021H1.5V43H22.5004V36Z" fill="#E10500" />
|
||||
<path d="M50.5 36H43.5002H36.5004H29.4998V43H50.5V36Z" fill="#E10500" />
|
||||
</svg>
|
||||
);
|
||||
export default SvgMistral;
|
||||
@@ -1,5 +1,5 @@
|
||||
import SvgOnyxLogo from "@opal/icons/onyx-logo";
|
||||
import SvgOnyxTyped from "@opal/icons/onyx-typed";
|
||||
import SvgOnyxLogo from "@opal/logos/onyx-logo";
|
||||
import SvgOnyxTyped from "@opal/logos/onyx-typed";
|
||||
import { cn } from "@opal/utils";
|
||||
|
||||
interface OnyxLogoTypedProps {
|
||||
36
web/lib/opal/src/logos/qwen.tsx
Normal file
36
web/lib/opal/src/logos/qwen.tsx
Normal file
@@ -0,0 +1,36 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
const SvgQwen = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 52 52"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
{...props}
|
||||
>
|
||||
<path
|
||||
d="M27.3186 2.74182C28.176 4.24727 29.0291 5.75708 29.88 7.26908C29.9488 7.39001 30.0834 7.46804 30.2225 7.46763H42.3358C42.7155 7.46763 43.0384 7.70762 43.3089 8.18108L46.4812 13.7883C46.8958 14.5236 47.0049 14.8312 46.5336 15.6145C45.9663 16.5527 45.4143 17.4996 44.8754 18.4509L44.0747 19.8865C43.8434 20.3141 43.5882 20.4974 43.9874 21.0036L49.7735 31.1207C50.1488 31.7774 50.0157 32.1985 49.6797 32.8007C48.7263 34.5134 47.7554 36.213 46.767 37.9061C46.4201 38.4996 45.999 38.7243 45.2834 38.7134C43.5882 38.6785 41.8973 38.6916 40.2064 38.7483C40.1339 38.752 40.0655 38.7942 40.0297 38.8574C38.0788 42.314 36.1115 45.7613 34.1279 49.1992C33.7592 49.8385 33.2988 49.9912 32.5461 49.9934C30.3709 49.9999 28.1782 50.0021 25.9637 49.9977C25.5513 49.9966 25.1534 49.7647 24.9491 49.4065L22.0364 44.3381C22.0026 44.2718 21.9298 44.2288 21.8554 44.2312H10.689C10.0671 44.2967 9.48242 44.229 8.93261 44.0305L5.4352 37.9868C5.22833 37.629 5.22663 37.1681 5.43084 36.8087L8.06426 32.1832C8.13928 32.0524 8.13928 31.8842 8.06426 31.7534C6.10931 28.3688 4.17585 24.972 2.24979 21.5709C1.9007 20.8945 1.87234 20.4887 2.45706 19.4654C3.47159 17.6916 4.47958 15.92 5.4832 14.1505C5.7712 13.64 6.14647 13.4218 6.75737 13.4196C8.64024 13.4117 10.5231 13.411 12.406 13.4174C12.5011 13.4167 12.5927 13.3628 12.6395 13.28L18.8008 2.53205C18.9864 2.20711 19.3474 2.00139 19.7216 2.00003L25.3549 2C26.0989 1.99996 26.9346 2.06982 27.3186 2.74182ZM19.7172 3.68654L13.4642 14.6283C13.4041 14.7315 13.289 14.798 13.1696 14.7985H6.91664C6.79446 14.7985 6.76391 14.8531 6.82718 14.96L19.5034 37.1185C19.5579 37.2101 19.5317 37.2538 19.4292 37.2559L13.3311 37.2887C13.1528 37.2827 12.9781 37.384 12.8947 37.5418L10.0148 42.5817C9.91878 42.7519 9.96896 42.8392 10.1631 42.8392L22.6343 42.8567C22.7346 42.8567 22.8088 42.9003 22.8612 42.9897L25.9222 48.3439C26.0226 48.5206 26.1229 48.5228 26.2255 48.3439L37.1475 29.2312L38.8559 26.216C38.9007 26.1358 39.0205 26.1358 39.0653 26.216L42.1722 31.7359C42.2188 31.8186 42.3108 31.8718 42.4056 31.8712L48.4339 31.8276C48.4954 31.8281 48.5403 31.7503 48.5103 31.6967L42.1831 20.6C42.1376 20.5258 42.1376 20.4276 42.1831 20.3534L42.8224 19.2472L45.266 14.9338C45.3183 14.8443 45.2921 14.7985 45.1896 14.7985H19.8917C19.763 14.7985 19.7325 14.7418 19.7979 14.6305L22.9266 9.16508C22.9501 9.12783 22.9625 9.08472 22.9625 9.04071C22.9625 8.99671 22.9501 8.95359 22.9266 8.91635L19.9463 3.68872C19.898 3.60117 19.7672 3.60001 19.7172 3.68654ZM33.5564 21.1192C33.6549 21.1199 33.6803 21.1635 33.6283 21.2501L26.112 34.4501C26.1013 34.4696 26.0855 34.4858 26.0663 34.4969C26.0471 34.5081 26.0252 34.5138 26.0029 34.5134C25.9808 34.5133 25.9591 34.5074 25.94 34.4963C25.9208 34.4852 25.9049 34.4693 25.8939 34.4501L18.3601 21.2894C18.3165 21.2152 18.3383 21.176 18.4212 21.1716L33.5564 21.1192Z"
|
||||
fill="url(#paint0_linear_3448_616)"
|
||||
/>
|
||||
<path
|
||||
fillRule="evenodd"
|
||||
clipRule="evenodd"
|
||||
d="M19.7172 3.68654L13.4642 14.6283C13.4041 14.7315 13.289 14.798 13.1696 14.7985H6.91664C6.79446 14.7985 6.76391 14.8531 6.82718 14.96L19.5034 37.1185C19.5579 37.2101 19.5317 37.2538 19.4292 37.2559L13.3311 37.2887C13.1528 37.2827 12.9781 37.384 12.8947 37.5418L10.0148 42.5817C9.91878 42.7519 9.96896 42.8392 10.1631 42.8392L22.6343 42.8567C22.7346 42.8567 22.8088 42.9003 22.8612 42.9897L25.9222 48.3439C26.0226 48.5206 26.1229 48.5228 26.2255 48.3439L37.1475 29.2312L38.8559 26.216C38.9007 26.1358 39.0205 26.1358 39.0653 26.216L42.1722 31.7359C42.2188 31.8186 42.3108 31.8718 42.4056 31.8712L48.4339 31.8276C48.4954 31.8281 48.5403 31.7503 48.5103 31.6967L42.1831 20.6C42.1376 20.5258 42.1376 20.4276 42.1831 20.3534L42.8224 19.2472L45.266 14.9338C45.3183 14.8443 45.2921 14.7985 45.1896 14.7985H19.8917C19.763 14.7985 19.7325 14.7418 19.7979 14.6305L22.9266 9.16508C22.9501 9.12783 22.9625 9.08472 22.9625 9.04071C22.9625 8.99671 22.9501 8.95359 22.9266 8.91635L19.9463 3.68872C19.898 3.60117 19.7672 3.60001 19.7172 3.68654ZM33.5564 21.1192C33.5556 21.1192 33.5549 21.1192 33.5541 21.1192L18.4212 21.1716C18.3383 21.176 18.3165 21.2152 18.3601 21.2894L25.8939 34.4501C25.9049 34.4693 25.9208 34.4852 25.94 34.4963C25.9591 34.5074 25.9808 34.5133 26.0029 34.5134C26.0252 34.5138 26.0471 34.5081 26.0663 34.4969C26.0855 34.4858 26.1013 34.4696 26.112 34.4501L33.6283 21.2501C33.6803 21.1635 33.6549 21.1199 33.5564 21.1192Z"
|
||||
fill="white"
|
||||
/>
|
||||
<defs>
|
||||
<linearGradient
|
||||
id="paint0_linear_3448_616"
|
||||
x1={2}
|
||||
y1={1.99971}
|
||||
x2={4802}
|
||||
y2={1.99971}
|
||||
gradientUnits="userSpaceOnUse"
|
||||
>
|
||||
<stop stopColor="#6336E7" stopOpacity={0.84} />
|
||||
<stop offset={1} stopColor="#6F69F7" stopOpacity={0.84} />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
);
|
||||
export default SvgQwen;
|
||||
29
web/lib/opal/src/logos/slack.tsx
Normal file
29
web/lib/opal/src/logos/slack.tsx
Normal file
@@ -0,0 +1,29 @@
|
||||
import type { IconProps } from "@opal/types";
|
||||
const SvgSlack = ({ size, ...props }: IconProps) => (
|
||||
<svg
|
||||
width={size}
|
||||
height={size}
|
||||
viewBox="0 0 52 52"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
{...props}
|
||||
>
|
||||
<path
|
||||
d="M12.6977 32.0796C12.6977 34.7532 10.5386 36.914 7.86714 36.914C5.1957 36.914 3.0366 34.7532 3.0366 32.0796C3.0366 29.406 5.1957 27.2452 7.86714 27.2452H12.6977V32.0796ZM15.113 32.0796C15.113 29.406 17.2721 27.2452 19.9435 27.2452C22.615 27.2452 24.7741 29.406 24.7741 32.0796V44.1656C24.7741 46.8392 22.615 49 19.9435 49C17.2721 49 15.113 46.8392 15.113 44.1656V32.0796Z"
|
||||
fill="#E01E5A"
|
||||
/>
|
||||
<path
|
||||
d="M19.9435 12.6688C17.2721 12.6688 15.113 10.508 15.113 7.83439C15.113 5.16083 17.2721 3 19.9435 3C22.615 3 24.7741 5.16083 24.7741 7.83439V12.6688H19.9435ZM19.9435 15.1226C22.615 15.1226 24.7741 17.2834 24.7741 19.957C24.7741 22.6306 22.615 24.7914 19.9435 24.7914H7.83055C5.15911 24.7914 3 22.6306 3 19.957C3 17.2834 5.15911 15.1226 7.83055 15.1226H19.9435Z"
|
||||
fill="#36C5F0"
|
||||
/>
|
||||
<path
|
||||
d="M39.3023 19.957C39.3023 17.2834 41.4614 15.1226 44.1329 15.1226C46.8043 15.1226 48.9634 17.2834 48.9634 19.957C48.9634 22.6306 46.8043 24.7914 44.1329 24.7914H39.3023V19.957ZM36.887 19.957C36.887 22.6306 34.7279 24.7914 32.0565 24.7914C29.385 24.7914 27.2259 22.6306 27.2259 19.957V7.83439C27.2259 5.16083 29.385 3 32.0565 3C34.7279 3 36.887 5.16083 36.887 7.83439V19.957Z"
|
||||
fill="#2EB67D"
|
||||
/>
|
||||
<path
|
||||
d="M32.0565 39.3312C34.7279 39.3312 36.887 41.492 36.887 44.1656C36.887 46.8392 34.7279 49 32.0565 49C29.385 49 27.2259 46.8392 27.2259 44.1656V39.3312H32.0565ZM32.0565 36.914C29.385 36.914 27.2259 34.7532 27.2259 32.0796C27.2259 29.406 29.385 27.2452 32.0565 27.2452H44.1694C46.8409 27.2452 49 29.406 49 32.0796C49 34.7532 46.8409 36.914 44.1694 36.914H32.0565Z"
|
||||
fill="#ECB22E"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
export default SvgSlack;
|
||||
@@ -5,7 +5,7 @@ import { useRouter } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
import { SlackTokensForm } from "./SlackTokensForm";
|
||||
import * as SettingsLayouts from "@/layouts/settings-layouts";
|
||||
import { SvgSlack } from "@opal/icons";
|
||||
import { SvgSlack } from "@opal/logos";
|
||||
|
||||
export function NewSlackBotForm() {
|
||||
const [formValues] = useState({
|
||||
|
||||
@@ -5,7 +5,7 @@ import { SlackChannelConfigCreationForm } from "@/app/admin/bots/[bot-id]/channe
|
||||
import { ErrorCallout } from "@/components/ErrorCallout";
|
||||
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
|
||||
import * as SettingsLayouts from "@/layouts/settings-layouts";
|
||||
import { SvgSlack } from "@opal/icons";
|
||||
import { SvgSlack } from "@opal/logos";
|
||||
import { useSlackChannelConfigs } from "@/app/admin/bots/[bot-id]/hooks";
|
||||
import { useDocumentSets } from "@/app/admin/documents/sets/hooks";
|
||||
import { useAgents } from "@/hooks/useAgents";
|
||||
|
||||
@@ -5,7 +5,7 @@ import { SlackChannelConfigCreationForm } from "@/app/admin/bots/[bot-id]/channe
|
||||
import { ErrorCallout } from "@/components/ErrorCallout";
|
||||
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
|
||||
import * as SettingsLayouts from "@/layouts/settings-layouts";
|
||||
import { SvgSlack } from "@opal/icons";
|
||||
import { SvgSlack } from "@opal/logos";
|
||||
import { useDocumentSets } from "@/app/admin/documents/sets/hooks";
|
||||
import { useAgents } from "@/hooks/useAgents";
|
||||
import { useStandardAnswerCategories } from "@/app/ee/admin/standard-answer/hooks";
|
||||
|
||||
@@ -7,7 +7,7 @@ import SlackChannelConfigsTable from "./SlackChannelConfigsTable";
|
||||
import { useSlackBot, useSlackChannelConfigsByBot } from "./hooks";
|
||||
import { ExistingSlackBotForm } from "../SlackBotUpdateForm";
|
||||
import * as SettingsLayouts from "@/layouts/settings-layouts";
|
||||
import { SvgSlack } from "@opal/icons";
|
||||
import { SvgSlack } from "@opal/logos";
|
||||
import { getErrorMsg } from "@/lib/error";
|
||||
|
||||
function SlackBotEditContent({ botId }: { botId: string }) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { defaultTailwindCSS } from "@/components/icons/icons";
|
||||
import { getModelIcon } from "@/lib/llmConfig/providers";
|
||||
import { getModelIcon } from "@/lib/llmConfig";
|
||||
import { IconProps } from "@opal/types";
|
||||
|
||||
export interface ModelIconProps extends IconProps {
|
||||
|
||||
@@ -1 +1 @@
|
||||
export { default } from "@/refresh-pages/admin/LLMProviderConfigurationPage";
|
||||
export { default } from "@/refresh-pages/admin/LLMConfigurationPage";
|
||||
|
||||
@@ -17,7 +17,8 @@ import InputComboBox from "@/refresh-components/inputs/InputComboBox";
|
||||
import { FormField } from "@/refresh-components/form/FormField";
|
||||
import { Vertical, Horizontal } from "@/layouts/input-layouts";
|
||||
import { Section } from "@/layouts/general-layouts";
|
||||
import { SvgArrowExchange, SvgOnyxLogo } from "@opal/icons";
|
||||
import { SvgArrowExchange } from "@opal/icons";
|
||||
import { SvgOnyxLogo } from "@opal/logos";
|
||||
import { Disabled } from "@opal/core";
|
||||
import type { IconProps } from "@opal/types";
|
||||
import { VoiceProviderView } from "@/hooks/useVoiceProviders";
|
||||
@@ -401,7 +402,7 @@ export default function VoiceProviderSetupModal({
|
||||
options={existingApiKeyOptions}
|
||||
separatorLabel="Reuse OpenAI API Keys"
|
||||
strict={false}
|
||||
showAddPrefix
|
||||
createPrefix="Add"
|
||||
/>
|
||||
) : (
|
||||
<PasswordInputTypeIn
|
||||
|
||||
@@ -5,7 +5,7 @@ import { Button } from "@opal/components";
|
||||
import { Text } from "@opal/components";
|
||||
import { ContentAction } from "@opal/layouts";
|
||||
import { SvgEyeOff, SvgX } from "@opal/icons";
|
||||
import { getModelIcon } from "@/lib/llmConfig/providers";
|
||||
import { getModelIcon } from "@/lib/llmConfig";
|
||||
import AgentMessage, {
|
||||
AgentMessageProps,
|
||||
} from "@/app/app/message/messageComponents/AgentMessage";
|
||||
@@ -28,6 +28,8 @@ export interface MultiModelPanelProps {
|
||||
isNonPreferredInSelection: boolean;
|
||||
/** Callback when user clicks this panel to select as preferred */
|
||||
onSelect: () => void;
|
||||
/** Callback to deselect this panel as preferred */
|
||||
onDeselect?: () => void;
|
||||
/** Callback to hide/show this panel */
|
||||
onToggleVisibility: () => void;
|
||||
/** Props to pass through to AgentMessage */
|
||||
@@ -63,6 +65,7 @@ export default function MultiModelPanel({
|
||||
isHidden,
|
||||
isNonPreferredInSelection,
|
||||
onSelect,
|
||||
onDeselect,
|
||||
onToggleVisibility,
|
||||
agentMessageProps,
|
||||
errorMessage,
|
||||
@@ -93,11 +96,25 @@ export default function MultiModelPanel({
|
||||
rightChildren={
|
||||
<div className="flex items-center gap-1 px-2">
|
||||
{isPreferred && (
|
||||
<span className="text-action-link-05 shrink-0">
|
||||
<Text font="secondary-body" color="inherit" nowrap>
|
||||
Preferred Response
|
||||
</Text>
|
||||
</span>
|
||||
<>
|
||||
<span className="text-action-link-05 shrink-0">
|
||||
<Text font="secondary-body" color="inherit" nowrap>
|
||||
Preferred Response
|
||||
</Text>
|
||||
</span>
|
||||
{onDeselect && (
|
||||
<Button
|
||||
prominence="tertiary"
|
||||
icon={SvgX}
|
||||
size="sm"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
onDeselect();
|
||||
}}
|
||||
tooltip="Deselect preferred response"
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
{!isPreferred && (
|
||||
<Button
|
||||
|
||||
@@ -30,7 +30,7 @@ const SELECTION_PANEL_W = 400;
|
||||
// Compact width for hidden panels in the carousel track
|
||||
const HIDDEN_PANEL_W = 220;
|
||||
// Generation-mode panel widths (from Figma)
|
||||
const GEN_PANEL_W_2 = 640; // 2 panels side-by-side
|
||||
const GEN_PANEL_W_2 = 720; // 2 panels side-by-side
|
||||
const GEN_PANEL_W_3 = 436; // 3 panels side-by-side
|
||||
// Gap between panels — matches CSS gap-6 (24px)
|
||||
const PANEL_GAP = 24;
|
||||
@@ -64,14 +64,31 @@ export default function MultiModelResponseView({
|
||||
onMessageSelection,
|
||||
onHiddenPanelsChange,
|
||||
}: MultiModelResponseViewProps) {
|
||||
const [preferredIndex, setPreferredIndex] = useState<number | null>(null);
|
||||
// Initialize preferredIndex from the backend's preferred_response_id when
|
||||
// loading an existing conversation.
|
||||
const [preferredIndex, setPreferredIndex] = useState<number | null>(() => {
|
||||
if (!parentMessage?.preferredResponseId) return null;
|
||||
const match = responses.find(
|
||||
(r) => r.messageId === parentMessage.preferredResponseId
|
||||
);
|
||||
return match?.modelIndex ?? null;
|
||||
});
|
||||
const [hiddenPanels, setHiddenPanels] = useState<Set<number>>(new Set());
|
||||
// Controls animation: false = panels at start position, true = panels at peek position
|
||||
const [selectionEntered, setSelectionEntered] = useState(false);
|
||||
const [selectionEntered, setSelectionEntered] = useState(
|
||||
() => preferredIndex !== null
|
||||
);
|
||||
// Tracks the deselect animation timeout so it can be cancelled if the user
|
||||
// re-selects a panel during the 450ms animation window.
|
||||
const deselectTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(null);
|
||||
// True while the reverse animation is playing (deselect → back to equal panels)
|
||||
const [selectionExiting, setSelectionExiting] = useState(false);
|
||||
// Measures the overflow-hidden carousel container for responsive preferred-panel sizing.
|
||||
const [trackContainerW, setTrackContainerW] = useState(0);
|
||||
const roRef = useRef<ResizeObserver | null>(null);
|
||||
const trackContainerElRef = useRef<HTMLDivElement | null>(null);
|
||||
const trackContainerRef = useCallback((el: HTMLDivElement | null) => {
|
||||
trackContainerElRef.current = el;
|
||||
if (roRef.current) {
|
||||
roRef.current.disconnect();
|
||||
roRef.current = null;
|
||||
@@ -90,6 +107,9 @@ export default function MultiModelResponseView({
|
||||
number | null
|
||||
>(null);
|
||||
const preferredRoRef = useRef<ResizeObserver | null>(null);
|
||||
// Refs to each panel wrapper for height animation on deselect
|
||||
const panelElsRef = useRef<Map<number, HTMLDivElement>>(new Map());
|
||||
|
||||
// Tracks which non-preferred panels overflow the preferred height cap
|
||||
const [overflowingPanels, setOverflowingPanels] = useState<Set<number>>(
|
||||
new Set()
|
||||
@@ -152,12 +172,43 @@ export default function MultiModelResponseView({
|
||||
const handleSelectPreferred = useCallback(
|
||||
(modelIndex: number) => {
|
||||
if (isGenerating) return;
|
||||
|
||||
// Cancel any pending deselect animation so it doesn't overwrite this selection
|
||||
if (deselectTimeoutRef.current !== null) {
|
||||
clearTimeout(deselectTimeoutRef.current);
|
||||
deselectTimeoutRef.current = null;
|
||||
setSelectionExiting(false);
|
||||
}
|
||||
|
||||
// Only freeze scroll when entering selection mode for the first time.
|
||||
// When switching preferred within selection mode, panels are already
|
||||
// capped and the track just slides — no height changes to worry about.
|
||||
const alreadyInSelection = preferredIndex !== null;
|
||||
if (!alreadyInSelection) {
|
||||
const scrollContainer = trackContainerElRef.current?.closest(
|
||||
"[data-chat-scroll]"
|
||||
) as HTMLElement | null;
|
||||
const scrollTop = scrollContainer?.scrollTop ?? 0;
|
||||
if (scrollContainer) scrollContainer.style.overflow = "hidden";
|
||||
|
||||
setTimeout(() => {
|
||||
if (scrollContainer) {
|
||||
scrollContainer.scrollTop = scrollTop;
|
||||
requestAnimationFrame(() => {
|
||||
requestAnimationFrame(() => {
|
||||
if (scrollContainer) {
|
||||
scrollContainer.scrollTop = scrollTop;
|
||||
scrollContainer.style.overflow = "";
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}, 450);
|
||||
}
|
||||
|
||||
setPreferredIndex(modelIndex);
|
||||
const response = responses.find((r) => r.modelIndex === modelIndex);
|
||||
if (!response) return;
|
||||
if (onMessageSelection) {
|
||||
onMessageSelection(response.nodeId);
|
||||
}
|
||||
|
||||
// Persist preferred response to backend + update local tree so the
|
||||
// input bar unblocks (awaitingPreferredSelection clears).
|
||||
@@ -185,17 +236,111 @@ export default function MultiModelResponseView({
|
||||
[
|
||||
isGenerating,
|
||||
responses,
|
||||
onMessageSelection,
|
||||
preferredIndex,
|
||||
parentMessage,
|
||||
currentSessionId,
|
||||
updateSessionMessageTree,
|
||||
]
|
||||
);
|
||||
|
||||
// NOTE: Deselect only clears the local tree — no backend call to clear
|
||||
// preferred_response_id. The SetPreferredResponseRequest model doesn't
|
||||
// accept null. A backend endpoint for clearing preference would be needed
|
||||
// if deselect should persist across reloads.
|
||||
const handleDeselectPreferred = useCallback(() => {
|
||||
const scrollContainer = trackContainerElRef.current?.closest(
|
||||
"[data-chat-scroll]"
|
||||
) as HTMLElement | null;
|
||||
|
||||
// Animate panels back to equal positions, then clear preferred after transition
|
||||
setSelectionExiting(true);
|
||||
setSelectionEntered(false);
|
||||
deselectTimeoutRef.current = setTimeout(() => {
|
||||
deselectTimeoutRef.current = null;
|
||||
const scrollTop = scrollContainer?.scrollTop ?? 0;
|
||||
if (scrollContainer) scrollContainer.style.overflow = "hidden";
|
||||
|
||||
// Before clearing state, animate each capped panel's height from
|
||||
// its current clientHeight to its natural scrollHeight.
|
||||
const animations: Animation[] = [];
|
||||
panelElsRef.current.forEach((el, modelIndex) => {
|
||||
if (modelIndex === preferredIndex) return;
|
||||
if (hiddenPanels.has(modelIndex)) return;
|
||||
const from = el.clientHeight;
|
||||
const to = el.scrollHeight;
|
||||
if (to <= from) return;
|
||||
// Lock current height, remove maxHeight cap, then animate
|
||||
el.style.maxHeight = `${from}px`;
|
||||
el.style.overflow = "hidden";
|
||||
const anim = el.animate(
|
||||
[{ maxHeight: `${from}px` }, { maxHeight: `${to}px` }],
|
||||
{
|
||||
duration: 350,
|
||||
easing: "cubic-bezier(0.2, 0, 0, 1)",
|
||||
fill: "forwards",
|
||||
}
|
||||
);
|
||||
animations.push(anim);
|
||||
anim.onfinish = () => {
|
||||
el.style.maxHeight = "";
|
||||
el.style.overflow = "";
|
||||
};
|
||||
});
|
||||
|
||||
setSelectionExiting(false);
|
||||
setPreferredIndex(null);
|
||||
|
||||
// Restore scroll after animations + React settle
|
||||
const restoreScroll = () => {
|
||||
requestAnimationFrame(() => {
|
||||
if (scrollContainer) {
|
||||
scrollContainer.scrollTop = scrollTop;
|
||||
scrollContainer.style.overflow = "";
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
if (animations.length > 0) {
|
||||
Promise.all(animations.map((a) => a.finished))
|
||||
.then(restoreScroll)
|
||||
.catch(restoreScroll);
|
||||
} else {
|
||||
restoreScroll();
|
||||
}
|
||||
|
||||
// Clear preferredResponseId in the local tree so input bar re-gates
|
||||
if (parentMessage && currentSessionId) {
|
||||
const tree = useChatSessionStore
|
||||
.getState()
|
||||
.sessions.get(currentSessionId)?.messageTree;
|
||||
if (tree) {
|
||||
const userMsg = tree.get(parentMessage.nodeId);
|
||||
if (userMsg) {
|
||||
const updated = new Map(tree);
|
||||
updated.set(parentMessage.nodeId, {
|
||||
...userMsg,
|
||||
preferredResponseId: undefined,
|
||||
});
|
||||
updateSessionMessageTree(currentSessionId, updated);
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 450);
|
||||
}, [
|
||||
parentMessage,
|
||||
currentSessionId,
|
||||
updateSessionMessageTree,
|
||||
preferredIndex,
|
||||
hiddenPanels,
|
||||
]);
|
||||
|
||||
// Clear preferred selection when generation starts
|
||||
// Reset selection state when generation restarts
|
||||
useEffect(() => {
|
||||
if (isGenerating) {
|
||||
setPreferredIndex(null);
|
||||
setHasEnteredSelection(false);
|
||||
setSelectionExiting(false);
|
||||
}
|
||||
}, [isGenerating]);
|
||||
|
||||
@@ -204,22 +349,39 @@ export default function MultiModelResponseView({
|
||||
(r) => r.modelIndex === preferredIndex
|
||||
);
|
||||
|
||||
// Selection mode when preferred is set, found in responses, not generating, and at least 2 visible panels
|
||||
const showSelectionMode =
|
||||
// Track whether selection mode was ever entered — once it has been,
|
||||
// we stay in the selection layout (even after deselect) to avoid a
|
||||
// jarring DOM swap between the two layout strategies.
|
||||
const [hasEnteredSelection, setHasEnteredSelection] = useState(
|
||||
() => preferredIndex !== null
|
||||
);
|
||||
|
||||
const isActivelySelected =
|
||||
preferredIndex !== null &&
|
||||
preferredIdx !== -1 &&
|
||||
!isGenerating &&
|
||||
visibleResponses.length > 1;
|
||||
|
||||
// Trigger the slide-out animation one frame after entering selection mode
|
||||
useEffect(() => {
|
||||
if (!showSelectionMode) {
|
||||
setSelectionEntered(false);
|
||||
if (isActivelySelected) setHasEnteredSelection(true);
|
||||
}, [isActivelySelected]);
|
||||
|
||||
// Use the selection layout once a preferred response has been chosen,
|
||||
// even after deselect. Only fall through to generation layout before
|
||||
// the first selection or during active streaming.
|
||||
const showSelectionMode = isActivelySelected || hasEnteredSelection;
|
||||
|
||||
// Trigger the slide-out animation one frame after a preferred panel is selected.
|
||||
// Uses isActivelySelected (not showSelectionMode) so re-selecting after a
|
||||
// deselect still triggers the animation.
|
||||
useEffect(() => {
|
||||
if (!isActivelySelected) {
|
||||
// Don't reset selectionEntered here — handleDeselectPreferred manages it
|
||||
return;
|
||||
}
|
||||
const raf = requestAnimationFrame(() => setSelectionEntered(true));
|
||||
return () => cancelAnimationFrame(raf);
|
||||
}, [showSelectionMode]);
|
||||
}, [isActivelySelected]);
|
||||
|
||||
// Build panel props — isHidden reflects actual hidden state
|
||||
const buildPanelProps = useCallback(
|
||||
@@ -231,6 +393,7 @@ export default function MultiModelResponseView({
|
||||
isHidden: hiddenPanels.has(response.modelIndex),
|
||||
isNonPreferredInSelection: isNonPreferred,
|
||||
onSelect: () => handleSelectPreferred(response.modelIndex),
|
||||
onDeselect: handleDeselectPreferred,
|
||||
onToggleVisibility: () => toggleVisibility(response.modelIndex),
|
||||
agentMessageProps: {
|
||||
rawPackets: response.packets,
|
||||
@@ -255,6 +418,7 @@ export default function MultiModelResponseView({
|
||||
preferredIndex,
|
||||
hiddenPanels,
|
||||
handleSelectPreferred,
|
||||
handleDeselectPreferred,
|
||||
toggleVisibility,
|
||||
chatState,
|
||||
llmManager,
|
||||
@@ -310,25 +474,30 @@ export default function MultiModelResponseView({
|
||||
<div
|
||||
ref={trackContainerRef}
|
||||
className="w-full overflow-hidden"
|
||||
style={{
|
||||
maskImage: `linear-gradient(to right, transparent 0px, black ${PEEK_W}px, black calc(100% - ${PEEK_W}px), transparent 100%)`,
|
||||
WebkitMaskImage: `linear-gradient(to right, transparent 0px, black ${PEEK_W}px, black calc(100% - ${PEEK_W}px), transparent 100%)`,
|
||||
}}
|
||||
style={
|
||||
isActivelySelected
|
||||
? {
|
||||
maskImage: `linear-gradient(to right, transparent 0px, black ${PEEK_W}px, black calc(100% - ${PEEK_W}px), transparent 100%)`,
|
||||
WebkitMaskImage: `linear-gradient(to right, transparent 0px, black ${PEEK_W}px, black calc(100% - ${PEEK_W}px), transparent 100%)`,
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
>
|
||||
<div
|
||||
className="flex items-start"
|
||||
style={{
|
||||
gap: `${PANEL_GAP}px`,
|
||||
transition: selectionEntered
|
||||
? "transform 0.45s cubic-bezier(0.2, 0, 0, 1)"
|
||||
: "none",
|
||||
transition:
|
||||
selectionEntered || selectionExiting
|
||||
? "transform 0.45s cubic-bezier(0.2, 0, 0, 1)"
|
||||
: "none",
|
||||
transform: trackTransform,
|
||||
}}
|
||||
>
|
||||
{responses.map((r, i) => {
|
||||
const isHidden = hiddenPanels.has(r.modelIndex);
|
||||
const isPref = r.modelIndex === preferredIndex;
|
||||
const isNonPref = !isHidden && !isPref;
|
||||
const isNonPref = !isHidden && !isPref && preferredIndex !== null;
|
||||
const finalW = selectionWidths[i]!;
|
||||
const startW = isHidden ? HIDDEN_PANEL_W : SELECTION_PANEL_W;
|
||||
const capped = isNonPref && preferredPanelHeight != null;
|
||||
@@ -337,6 +506,11 @@ export default function MultiModelResponseView({
|
||||
<div
|
||||
key={r.modelIndex}
|
||||
ref={(el) => {
|
||||
if (el) {
|
||||
panelElsRef.current.set(r.modelIndex, el);
|
||||
} else {
|
||||
panelElsRef.current.delete(r.modelIndex);
|
||||
}
|
||||
if (isPref) preferredPanelRef(el);
|
||||
if (capped && el) {
|
||||
const doesOverflow = el.scrollHeight > el.clientHeight;
|
||||
@@ -353,9 +527,10 @@ export default function MultiModelResponseView({
|
||||
style={{
|
||||
width: `${selectionEntered ? finalW : startW}px`,
|
||||
flexShrink: 0,
|
||||
transition: selectionEntered
|
||||
? "width 0.45s cubic-bezier(0.2, 0, 0, 1)"
|
||||
: "none",
|
||||
transition:
|
||||
selectionEntered || selectionExiting
|
||||
? "width 0.45s cubic-bezier(0.2, 0, 0, 1)"
|
||||
: "none",
|
||||
maxHeight: capped ? preferredPanelHeight : undefined,
|
||||
overflow: capped ? "hidden" : undefined,
|
||||
position: capped ? "relative" : undefined,
|
||||
@@ -388,7 +563,7 @@ export default function MultiModelResponseView({
|
||||
|
||||
return (
|
||||
<div className="overflow-x-auto">
|
||||
<div className="flex gap-6 items-start w-full">
|
||||
<div className="flex gap-6 items-start justify-center w-full">
|
||||
{responses.map((r) => {
|
||||
const isHidden = hiddenPanels.has(r.modelIndex);
|
||||
return (
|
||||
|
||||
@@ -18,7 +18,7 @@ import {
|
||||
isRecommendedModel,
|
||||
} from "@/app/craft/onboarding/constants";
|
||||
import { ToggleWarningModal } from "./ToggleWarningModal";
|
||||
import { getModelIcon } from "@/lib/llmConfig/providers";
|
||||
import { getModelIcon } from "@/lib/llmConfig";
|
||||
import { Section } from "@/layouts/general-layouts";
|
||||
import {
|
||||
Accordion,
|
||||
|
||||
@@ -48,7 +48,7 @@ import NotAllowedModal from "@/app/craft/onboarding/components/NotAllowedModal";
|
||||
import { useOnboarding } from "@/app/craft/onboarding/BuildOnboardingProvider";
|
||||
import { useLLMProviders } from "@/hooks/useLLMProviders";
|
||||
import { useUser } from "@/providers/UserProvider";
|
||||
import { getModelIcon } from "@/lib/llmConfig/providers";
|
||||
import { getModelIcon } from "@/lib/llmConfig";
|
||||
import {
|
||||
getBuildUserPersona,
|
||||
getPersonaInfo,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
:root {
|
||||
--app-page-main-content-width: 52.5rem;
|
||||
--app-page-main-content-width: 45rem;
|
||||
--block-width-form-input-min: 10rem;
|
||||
|
||||
--container-sm: 42rem;
|
||||
|
||||
@@ -45,6 +45,9 @@ import { personaIncludesRetrieval } from "@/app/app/services/lib";
|
||||
import { useQueryController } from "@/providers/QueryControllerProvider";
|
||||
import { eeGated } from "@/ce";
|
||||
import EESearchUI from "@/ee/sections/SearchUI";
|
||||
import useMultiModelChat from "@/hooks/useMultiModelChat";
|
||||
import ModelSelector from "@/refresh-components/popovers/ModelSelector";
|
||||
import { Section } from "@/layouts/general-layouts";
|
||||
|
||||
const SearchUI = eeGated(EESearchUI);
|
||||
|
||||
@@ -105,6 +108,20 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
|
||||
// If no LLM provider is configured (e.g., fresh signup), the input bar is
|
||||
// disabled and a "Set up an LLM" button is shown (see bottom of component).
|
||||
const llmManager = useLlmManager(undefined, liveAgent ?? undefined);
|
||||
const multiModel = useMultiModelChat(llmManager);
|
||||
|
||||
// Sync single-model selection to llmManager so the submission path
|
||||
// uses the correct provider/version (mirrors AppPage behaviour).
|
||||
useEffect(() => {
|
||||
if (multiModel.selectedModels.length === 1) {
|
||||
const model = multiModel.selectedModels[0]!;
|
||||
llmManager.updateCurrentLlm({
|
||||
name: model.name,
|
||||
provider: model.provider,
|
||||
modelName: model.modelName,
|
||||
});
|
||||
}
|
||||
}, [multiModel.selectedModels]);
|
||||
|
||||
// Deep research toggle
|
||||
const { deepResearchEnabled, toggleDeepResearch } = useDeepResearchToggle({
|
||||
@@ -295,12 +312,17 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
|
||||
|
||||
// If we already have messages (chat session started), always use chat mode
|
||||
// (matches AppPage behavior where existing sessions bypass classification)
|
||||
const selectedModels = multiModel.isMultiModelActive
|
||||
? multiModel.selectedModels
|
||||
: undefined;
|
||||
|
||||
if (hasMessages) {
|
||||
onSubmit({
|
||||
message: submittedMessage,
|
||||
currentMessageFiles: currentMessageFiles,
|
||||
deepResearch: deepResearchEnabled,
|
||||
additionalContext,
|
||||
selectedModels,
|
||||
});
|
||||
return;
|
||||
}
|
||||
@@ -312,6 +334,7 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
|
||||
currentMessageFiles: currentMessageFiles,
|
||||
deepResearch: deepResearchEnabled,
|
||||
additionalContext,
|
||||
selectedModels,
|
||||
});
|
||||
};
|
||||
|
||||
@@ -328,6 +351,8 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
|
||||
submitQuery,
|
||||
tabReadingEnabled,
|
||||
currentTabUrl,
|
||||
multiModel.isMultiModelActive,
|
||||
multiModel.selectedModels,
|
||||
]
|
||||
);
|
||||
|
||||
@@ -456,6 +481,7 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
|
||||
onResubmit={handleResubmitLastMessage}
|
||||
deepResearchEnabled={deepResearchEnabled}
|
||||
anchorNodeId={anchorNodeId}
|
||||
selectedModels={multiModel.selectedModels}
|
||||
/>
|
||||
</ChatScrollContainer>
|
||||
</>
|
||||
@@ -464,7 +490,23 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
|
||||
{/* Welcome message - centered when no messages and not in search mode */}
|
||||
{!hasMessages && !isSearch && (
|
||||
<div className="relative w-full flex-1 flex flex-col items-center justify-end">
|
||||
<WelcomeMessage isDefaultAgent />
|
||||
<Section
|
||||
flexDirection="row"
|
||||
justifyContent="between"
|
||||
alignItems="end"
|
||||
className="max-w-[var(--app-page-main-content-width)]"
|
||||
>
|
||||
<WelcomeMessage isDefaultAgent />
|
||||
{liveAgent && !llmManager.isLoadingProviders && (
|
||||
<ModelSelector
|
||||
llmManager={llmManager}
|
||||
selectedModels={multiModel.selectedModels}
|
||||
onAdd={multiModel.addModel}
|
||||
onRemove={multiModel.removeModel}
|
||||
onReplace={multiModel.replaceModel}
|
||||
/>
|
||||
)}
|
||||
</Section>
|
||||
<Spacer rem={1.5} />
|
||||
</div>
|
||||
)}
|
||||
@@ -478,6 +520,17 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
|
||||
"max-w-[var(--app-page-main-content-width)] px-4"
|
||||
)}
|
||||
>
|
||||
{hasMessages && liveAgent && !llmManager.isLoadingProviders && (
|
||||
<div className="pb-1">
|
||||
<ModelSelector
|
||||
llmManager={llmManager}
|
||||
selectedModels={multiModel.selectedModels}
|
||||
onAdd={multiModel.addModel}
|
||||
onRemove={multiModel.removeModel}
|
||||
onReplace={multiModel.replaceModel}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<AppInputBar
|
||||
ref={chatInputBarRef}
|
||||
deepResearchEnabled={deepResearchEnabled}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
* Tests logo icons to ensure they render correctly with proper accessibility
|
||||
* and support various display sizes.
|
||||
*/
|
||||
import { SvgBifrost } from "@opal/icons";
|
||||
import { SvgBifrost } from "@opal/logos";
|
||||
import { render } from "@tests/setup/test-utils";
|
||||
import { GithubIcon, GitbookIcon, ConfluenceIcon } from "./icons";
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import { useMemo } from "react";
|
||||
import { parseLlmDescriptor, structureValue } from "@/lib/llmConfig/utils";
|
||||
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
|
||||
import { getModelIcon } from "@/lib/llmConfig/providers";
|
||||
import { getModelIcon } from "@/lib/llmConfig";
|
||||
import InputSelect from "@/refresh-components/inputs/InputSelect";
|
||||
import { createIcon } from "@/components/icons/icons";
|
||||
|
||||
|
||||
@@ -144,7 +144,9 @@ export function useAdminLLMProviders() {
|
||||
*/
|
||||
export function useWellKnownLLMProvider(providerName: LLMProviderName) {
|
||||
const { data, error, isLoading } = useSWR<WellKnownLLMProviderDescriptor>(
|
||||
providerName ? SWR_KEYS.wellKnownLlmProvider(providerName) : null,
|
||||
providerName && providerName !== LLMProviderName.CUSTOM
|
||||
? SWR_KEYS.wellKnownLlmProvider(providerName)
|
||||
: null,
|
||||
errorHandlingFetcher,
|
||||
{
|
||||
revalidateOnFocus: false,
|
||||
@@ -160,6 +162,35 @@ export function useWellKnownLLMProvider(providerName: LLMProviderName) {
|
||||
};
|
||||
}
|
||||
|
||||
export interface CustomProviderOption {
|
||||
value: string;
|
||||
label: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches the list of LiteLLM provider names available for custom provider
|
||||
* configuration (i.e. providers that don't have a dedicated well-known modal).
|
||||
*
|
||||
* Hits `GET /api/admin/llm/custom-provider-names`.
|
||||
*/
|
||||
export function useCustomProviderNames() {
|
||||
const { data, error, isLoading } = useSWR<CustomProviderOption[]>(
|
||||
SWR_KEYS.customProviderNames,
|
||||
errorHandlingFetcher,
|
||||
{
|
||||
revalidateOnFocus: false,
|
||||
revalidateIfStale: false,
|
||||
dedupingInterval: 60000,
|
||||
}
|
||||
);
|
||||
|
||||
return {
|
||||
customProviderNames: data ?? null,
|
||||
isLoading,
|
||||
error,
|
||||
};
|
||||
}
|
||||
|
||||
export function useWellKnownLLMProviders() {
|
||||
const {
|
||||
data: wellKnownLLMProviders,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useCallback, useEffect, useMemo, useRef } from "react";
|
||||
import { useState, useCallback, useMemo } from "react";
|
||||
import {
|
||||
MAX_MODELS,
|
||||
SelectedModel,
|
||||
@@ -40,7 +40,6 @@ export default function useMultiModelChat(
|
||||
llmManager: LlmManager
|
||||
): UseMultiModelChatReturn {
|
||||
const [selectedModels, setSelectedModels] = useState<SelectedModel[]>([]);
|
||||
const [defaultInitialized, setDefaultInitialized] = useState(false);
|
||||
|
||||
// Initialize with the default model from llmManager once providers load
|
||||
const llmOptions = useMemo(
|
||||
@@ -49,89 +48,99 @@ export default function useMultiModelChat(
|
||||
[llmManager.llmProviders]
|
||||
);
|
||||
|
||||
// Sync selectedModels[0] with llmManager.currentLlm when in single-model
|
||||
// mode. This handles both initial load and session override changes (e.g.
|
||||
// page reload restores the persisted model after providers load).
|
||||
// Skip when user has manually added multiple models (multi-model mode).
|
||||
const selectedModelsRef = useRef(selectedModels);
|
||||
selectedModelsRef.current = selectedModels;
|
||||
|
||||
useEffect(() => {
|
||||
if (llmOptions.length === 0) return;
|
||||
// In single-model mode, derive the displayed model directly from
|
||||
// llmManager.currentLlm so it always stays in sync (no stale state).
|
||||
// Only use the selectedModels state array when the user has manually
|
||||
// added multiple models (multi-model mode).
|
||||
const currentLlmModel = useMemo((): SelectedModel | null => {
|
||||
if (llmOptions.length === 0) return null;
|
||||
const { currentLlm } = llmManager;
|
||||
if (!currentLlm.modelName) return;
|
||||
|
||||
const current = selectedModelsRef.current;
|
||||
|
||||
// Don't override multi-model selections
|
||||
if (current.length > 1) return;
|
||||
|
||||
// Skip if already showing the correct model
|
||||
if (
|
||||
current.length === 1 &&
|
||||
current[0]!.provider === currentLlm.provider &&
|
||||
current[0]!.modelName === currentLlm.modelName
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!currentLlm.modelName) return null;
|
||||
const match = llmOptions.find(
|
||||
(opt) =>
|
||||
opt.provider === currentLlm.provider &&
|
||||
opt.modelName === currentLlm.modelName
|
||||
);
|
||||
if (match) {
|
||||
setSelectedModels([
|
||||
{
|
||||
name: match.name,
|
||||
provider: match.provider,
|
||||
modelName: match.modelName,
|
||||
displayName: match.displayName,
|
||||
},
|
||||
]);
|
||||
setDefaultInitialized(true);
|
||||
}
|
||||
if (!match) return null;
|
||||
return {
|
||||
name: match.name,
|
||||
provider: match.provider,
|
||||
modelName: match.modelName,
|
||||
displayName: match.displayName,
|
||||
};
|
||||
}, [llmOptions, llmManager.currentLlm]);
|
||||
|
||||
const isMultiModelActive = selectedModels.length > 1;
|
||||
|
||||
const addModel = useCallback((model: SelectedModel) => {
|
||||
setSelectedModels((prev) => {
|
||||
if (prev.length >= MAX_MODELS) return prev;
|
||||
if (
|
||||
prev.some(
|
||||
(m) =>
|
||||
m.provider === model.provider && m.modelName === model.modelName
|
||||
)
|
||||
) {
|
||||
return prev;
|
||||
}
|
||||
return [...prev, model];
|
||||
});
|
||||
}, []);
|
||||
// Expose the effective selection: multi-model state when active,
|
||||
// otherwise the single model derived from llmManager.
|
||||
const effectiveSelectedModels = useMemo(
|
||||
() =>
|
||||
isMultiModelActive
|
||||
? selectedModels
|
||||
: currentLlmModel
|
||||
? [currentLlmModel]
|
||||
: [],
|
||||
[isMultiModelActive, selectedModels, currentLlmModel]
|
||||
);
|
||||
|
||||
const addModel = useCallback(
|
||||
(model: SelectedModel) => {
|
||||
setSelectedModels((prev) => {
|
||||
// When in effective single-model mode (prev <= 1), always re-seed from
|
||||
// the current derived model so stale state from a prior remove doesn't persist.
|
||||
const base =
|
||||
prev.length <= 1 && currentLlmModel ? [currentLlmModel] : prev;
|
||||
if (base.length >= MAX_MODELS) return base;
|
||||
if (
|
||||
base.some(
|
||||
(m) =>
|
||||
m.provider === model.provider && m.modelName === model.modelName
|
||||
)
|
||||
) {
|
||||
return base;
|
||||
}
|
||||
return [...base, model];
|
||||
});
|
||||
},
|
||||
[currentLlmModel]
|
||||
);
|
||||
|
||||
const removeModel = useCallback((index: number) => {
|
||||
setSelectedModels((prev) => prev.filter((_, i) => i !== index));
|
||||
}, []);
|
||||
|
||||
const replaceModel = useCallback((index: number, model: SelectedModel) => {
|
||||
setSelectedModels((prev) => {
|
||||
// Don't replace with a model that's already selected elsewhere
|
||||
if (
|
||||
prev.some(
|
||||
(m, i) =>
|
||||
i !== index &&
|
||||
m.provider === model.provider &&
|
||||
m.modelName === model.modelName
|
||||
)
|
||||
) {
|
||||
return prev;
|
||||
const replaceModel = useCallback(
|
||||
(index: number, model: SelectedModel) => {
|
||||
// In single-model mode, update llmManager directly so currentLlm
|
||||
// (and thus effectiveSelectedModels) reflects the change immediately.
|
||||
if (!isMultiModelActive) {
|
||||
llmManager.updateCurrentLlm({
|
||||
name: model.name,
|
||||
provider: model.provider,
|
||||
modelName: model.modelName,
|
||||
});
|
||||
return;
|
||||
}
|
||||
const next = [...prev];
|
||||
next[index] = model;
|
||||
return next;
|
||||
});
|
||||
}, []);
|
||||
setSelectedModels((prev) => {
|
||||
// Don't replace with a model that's already selected elsewhere
|
||||
if (
|
||||
prev.some(
|
||||
(m, i) =>
|
||||
i !== index &&
|
||||
m.provider === model.provider &&
|
||||
m.modelName === model.modelName
|
||||
)
|
||||
) {
|
||||
return prev;
|
||||
}
|
||||
const next = [...prev];
|
||||
next[index] = model;
|
||||
return next;
|
||||
});
|
||||
},
|
||||
[isMultiModelActive, llmManager]
|
||||
);
|
||||
|
||||
const clearModels = useCallback(() => {
|
||||
setSelectedModels([]);
|
||||
@@ -161,7 +170,6 @@ export default function useMultiModelChat(
|
||||
}
|
||||
if (restored.length >= 2) {
|
||||
setSelectedModels(restored.slice(0, MAX_MODELS));
|
||||
setDefaultInitialized(true);
|
||||
}
|
||||
},
|
||||
[llmOptions]
|
||||
@@ -191,15 +199,15 @@ export default function useMultiModelChat(
|
||||
);
|
||||
|
||||
const buildLlmOverrides = useCallback((): LLMOverride[] => {
|
||||
return selectedModels.map((m) => ({
|
||||
return effectiveSelectedModels.map((m) => ({
|
||||
model_provider: m.name,
|
||||
model_version: m.modelName,
|
||||
display_name: m.displayName,
|
||||
}));
|
||||
}, [selectedModels]);
|
||||
}, [effectiveSelectedModels]);
|
||||
|
||||
return {
|
||||
selectedModels,
|
||||
selectedModels: effectiveSelectedModels,
|
||||
isMultiModelActive,
|
||||
addModel,
|
||||
removeModel,
|
||||
|
||||
@@ -10,7 +10,6 @@ import {
|
||||
SvgBubbleText,
|
||||
SvgClipboard,
|
||||
SvgCpu,
|
||||
SvgDiscordMono,
|
||||
SvgDownload,
|
||||
SvgEmpty,
|
||||
SvgFileText,
|
||||
@@ -24,7 +23,6 @@ import {
|
||||
SvgPaintBrush,
|
||||
SvgProgressBars,
|
||||
SvgSearchMenu,
|
||||
SvgSlack,
|
||||
SvgTerminal,
|
||||
SvgThumbsUp,
|
||||
SvgUploadCloud,
|
||||
@@ -34,6 +32,8 @@ import {
|
||||
SvgUsers,
|
||||
SvgWallet,
|
||||
SvgZoomIn,
|
||||
SvgDiscord,
|
||||
SvgSlack,
|
||||
} from "@opal/icons";
|
||||
|
||||
export interface AdminRouteEntry {
|
||||
@@ -92,7 +92,7 @@ export const ADMIN_ROUTES = {
|
||||
},
|
||||
DISCORD_BOTS: {
|
||||
path: "/admin/discord-bot",
|
||||
icon: SvgDiscordMono,
|
||||
icon: SvgDiscord,
|
||||
title: "Discord Integration",
|
||||
sidebarLabel: "Discord Integration",
|
||||
},
|
||||
|
||||
@@ -671,7 +671,8 @@ export function useLlmManager(
|
||||
const [userHasManuallyOverriddenLLM, setUserHasManuallyOverriddenLLM] =
|
||||
useState(false);
|
||||
const [chatSession, setChatSession] = useState<ChatSession | null>(null);
|
||||
const [currentLlm, setCurrentLlm] = useState<LlmDescriptor>({
|
||||
// Manual override value — only used when userHasManuallyOverriddenLLM is true
|
||||
const [manualLlm, setManualLlm] = useState<LlmDescriptor>({
|
||||
name: "",
|
||||
provider: "",
|
||||
modelName: "",
|
||||
@@ -693,55 +694,77 @@ export function useLlmManager(
|
||||
prevAgentIdRef.current = liveAgent?.id;
|
||||
}, [liveAgent?.id]);
|
||||
|
||||
const llmUpdate = () => {
|
||||
/* Should be called when the live assistant or current chat session changes */
|
||||
|
||||
// Don't update if providers haven't loaded yet (undefined/null)
|
||||
// Empty arrays are valid (user has no provider access for this assistant)
|
||||
if (llmProviders === undefined || llmProviders === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// separate function so we can `return` to break out
|
||||
const _llmUpdate = () => {
|
||||
// if the user has overridden in this session and just switched to a brand
|
||||
// new session, use their manually specified model
|
||||
if (userHasManuallyOverriddenLLM && !currentChatSession) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (currentChatSession?.current_alternate_model) {
|
||||
setCurrentLlm(
|
||||
getValidLlmDescriptor(currentChatSession.current_alternate_model)
|
||||
);
|
||||
} else if (liveAgent?.llm_model_version_override) {
|
||||
setCurrentLlm(
|
||||
getValidLlmDescriptor(liveAgent.llm_model_version_override)
|
||||
);
|
||||
} else if (userHasManuallyOverriddenLLM) {
|
||||
// if the user has an override and there's nothing special about the
|
||||
// current chat session, use the override
|
||||
return;
|
||||
} else if (user?.preferences?.default_model) {
|
||||
setCurrentLlm(getValidLlmDescriptor(user.preferences.default_model));
|
||||
} else {
|
||||
const defaultLlm = getDefaultLlmDescriptor(llmProviders, defaultText);
|
||||
if (defaultLlm) {
|
||||
setCurrentLlm(defaultLlm);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
_llmUpdate();
|
||||
setChatSession(currentChatSession || null);
|
||||
};
|
||||
|
||||
function getValidLlmDescriptor(
|
||||
modelName: string | null | undefined
|
||||
): LlmDescriptor {
|
||||
return getValidLlmDescriptorForProviders(modelName, llmProviders);
|
||||
}
|
||||
|
||||
// Compute the resolved LLM synchronously so it's never one render behind.
|
||||
// This replaces the old llmUpdate() effect for model resolution.
|
||||
// Wrapped with a ref for referential stability — returns the same object
|
||||
// when the resolved name/provider/modelName haven't actually changed,
|
||||
// preventing unnecessary re-creation of downstream callbacks (e.g. onSubmit).
|
||||
const prevLlmRef = useRef<LlmDescriptor>({
|
||||
name: "",
|
||||
provider: "",
|
||||
modelName: "",
|
||||
});
|
||||
const currentLlm = useMemo((): LlmDescriptor => {
|
||||
let resolved: LlmDescriptor;
|
||||
|
||||
if (llmProviders === undefined || llmProviders === null) {
|
||||
resolved = manualLlm;
|
||||
} else if (userHasManuallyOverriddenLLM && !currentChatSession) {
|
||||
// User has overridden in this session and switched to a new session
|
||||
resolved = manualLlm;
|
||||
} else if (currentChatSession?.current_alternate_model) {
|
||||
resolved = getValidLlmDescriptorForProviders(
|
||||
currentChatSession.current_alternate_model,
|
||||
llmProviders
|
||||
);
|
||||
} else if (liveAgent?.llm_model_version_override) {
|
||||
resolved = getValidLlmDescriptorForProviders(
|
||||
liveAgent.llm_model_version_override,
|
||||
llmProviders
|
||||
);
|
||||
} else if (userHasManuallyOverriddenLLM) {
|
||||
resolved = manualLlm;
|
||||
} else if (user?.preferences?.default_model) {
|
||||
resolved = getValidLlmDescriptorForProviders(
|
||||
user.preferences.default_model,
|
||||
llmProviders
|
||||
);
|
||||
} else {
|
||||
resolved =
|
||||
getDefaultLlmDescriptor(llmProviders, defaultText) ?? manualLlm;
|
||||
}
|
||||
|
||||
const prev = prevLlmRef.current;
|
||||
if (
|
||||
prev.name === resolved.name &&
|
||||
prev.provider === resolved.provider &&
|
||||
prev.modelName === resolved.modelName
|
||||
) {
|
||||
return prev;
|
||||
}
|
||||
prevLlmRef.current = resolved;
|
||||
return resolved;
|
||||
}, [
|
||||
llmProviders,
|
||||
defaultText,
|
||||
currentChatSession,
|
||||
liveAgent?.llm_model_version_override,
|
||||
userHasManuallyOverriddenLLM,
|
||||
manualLlm,
|
||||
user?.preferences?.default_model,
|
||||
]);
|
||||
|
||||
// Keep chatSession state in sync (used by temperature effect)
|
||||
useEffect(() => {
|
||||
setChatSession(currentChatSession || null);
|
||||
}, [currentChatSession]);
|
||||
|
||||
const [imageFilesPresent, setImageFilesPresent] = useState(false);
|
||||
|
||||
const updateImageFilesPresent = (present: boolean) => {
|
||||
@@ -750,18 +773,18 @@ export function useLlmManager(
|
||||
|
||||
// Manually set the LLM
|
||||
const updateCurrentLlm = (newLlm: LlmDescriptor) => {
|
||||
setCurrentLlm(newLlm);
|
||||
setManualLlm(newLlm);
|
||||
setUserHasManuallyOverriddenLLM(true);
|
||||
};
|
||||
|
||||
const updateCurrentLlmToModelName = (modelName: string) => {
|
||||
setCurrentLlm(getValidLlmDescriptor(modelName));
|
||||
setManualLlm(getValidLlmDescriptor(modelName));
|
||||
setUserHasManuallyOverriddenLLM(true);
|
||||
};
|
||||
|
||||
const updateModelOverrideBasedOnChatSession = (chatSession?: ChatSession) => {
|
||||
if (chatSession && chatSession.current_alternate_model?.length > 0) {
|
||||
setCurrentLlm(getValidLlmDescriptor(chatSession.current_alternate_model));
|
||||
setManualLlm(getValidLlmDescriptor(chatSession.current_alternate_model));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -811,8 +834,6 @@ export function useLlmManager(
|
||||
}, [currentLlm]);
|
||||
|
||||
useEffect(() => {
|
||||
llmUpdate();
|
||||
|
||||
if (!chatSession && currentChatSession) {
|
||||
if (temperature) {
|
||||
updateTemperatureOverrideForChatSession(
|
||||
|
||||
251
web/src/lib/llmConfig/index.ts
Normal file
251
web/src/lib/llmConfig/index.ts
Normal file
@@ -0,0 +1,251 @@
|
||||
import type { IconFunctionComponent } from "@opal/types";
|
||||
import { SvgCpu, SvgPlug, SvgServer } from "@opal/icons";
|
||||
import {
|
||||
SvgBifrost,
|
||||
SvgOpenai,
|
||||
SvgClaude,
|
||||
SvgOllama,
|
||||
SvgAws,
|
||||
SvgOpenrouter,
|
||||
SvgAzure,
|
||||
SvgGemini,
|
||||
SvgLitellm,
|
||||
SvgLmStudio,
|
||||
SvgMicrosoft,
|
||||
SvgMistral,
|
||||
SvgDeepseek,
|
||||
SvgQwen,
|
||||
SvgGoogle,
|
||||
} from "@opal/logos";
|
||||
import { ZAIIcon } from "@/components/icons/icons";
|
||||
import { LLMProviderFormProps, LLMProviderName } from "@/interfaces/llm";
|
||||
import type { LLMProviderView } from "@/interfaces/llm";
|
||||
import OpenAIModal from "@/sections/modals/llmConfig/OpenAIModal";
|
||||
import AnthropicModal from "@/sections/modals/llmConfig/AnthropicModal";
|
||||
import OllamaModal from "@/sections/modals/llmConfig/OllamaModal";
|
||||
import AzureModal from "@/sections/modals/llmConfig/AzureModal";
|
||||
import BedrockModal from "@/sections/modals/llmConfig/BedrockModal";
|
||||
import VertexAIModal from "@/sections/modals/llmConfig/VertexAIModal";
|
||||
import OpenRouterModal from "@/sections/modals/llmConfig/OpenRouterModal";
|
||||
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
|
||||
import LMStudioModal from "@/sections/modals/llmConfig/LMStudioModal";
|
||||
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
|
||||
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
|
||||
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
|
||||
|
||||
// ─── Text (LLM) providers ────────────────────────────────────────────────────
|
||||
|
||||
export interface ProviderEntry {
|
||||
icon: IconFunctionComponent;
|
||||
productName: string;
|
||||
companyName: string;
|
||||
Modal: React.ComponentType<LLMProviderFormProps>;
|
||||
}
|
||||
|
||||
const PROVIDERS: Record<string, ProviderEntry> = {
|
||||
[LLMProviderName.OPENAI]: {
|
||||
icon: SvgOpenai,
|
||||
productName: "GPT",
|
||||
companyName: "OpenAI",
|
||||
Modal: OpenAIModal,
|
||||
},
|
||||
[LLMProviderName.ANTHROPIC]: {
|
||||
icon: SvgClaude,
|
||||
productName: "Claude",
|
||||
companyName: "Anthropic",
|
||||
Modal: AnthropicModal,
|
||||
},
|
||||
[LLMProviderName.VERTEX_AI]: {
|
||||
icon: SvgGemini,
|
||||
productName: "Gemini",
|
||||
companyName: "Google Cloud Vertex AI",
|
||||
Modal: VertexAIModal,
|
||||
},
|
||||
[LLMProviderName.BEDROCK]: {
|
||||
icon: SvgAws,
|
||||
productName: "Amazon Bedrock",
|
||||
companyName: "AWS",
|
||||
Modal: BedrockModal,
|
||||
},
|
||||
[LLMProviderName.AZURE]: {
|
||||
icon: SvgAzure,
|
||||
productName: "Azure OpenAI",
|
||||
companyName: "Microsoft Azure",
|
||||
Modal: AzureModal,
|
||||
},
|
||||
[LLMProviderName.LITELLM]: {
|
||||
icon: SvgLitellm,
|
||||
productName: "LiteLLM",
|
||||
companyName: "LiteLLM",
|
||||
Modal: CustomModal,
|
||||
},
|
||||
[LLMProviderName.LITELLM_PROXY]: {
|
||||
icon: SvgLitellm,
|
||||
productName: "LiteLLM Proxy",
|
||||
companyName: "LiteLLM Proxy",
|
||||
Modal: LiteLLMProxyModal,
|
||||
},
|
||||
[LLMProviderName.OLLAMA_CHAT]: {
|
||||
icon: SvgOllama,
|
||||
productName: "Ollama",
|
||||
companyName: "Ollama",
|
||||
Modal: OllamaModal,
|
||||
},
|
||||
[LLMProviderName.OPENROUTER]: {
|
||||
icon: SvgOpenrouter,
|
||||
productName: "OpenRouter",
|
||||
companyName: "OpenRouter",
|
||||
Modal: OpenRouterModal,
|
||||
},
|
||||
[LLMProviderName.LM_STUDIO]: {
|
||||
icon: SvgLmStudio,
|
||||
productName: "LM Studio",
|
||||
companyName: "LM Studio",
|
||||
Modal: LMStudioModal,
|
||||
},
|
||||
[LLMProviderName.BIFROST]: {
|
||||
icon: SvgBifrost,
|
||||
productName: "Bifrost",
|
||||
companyName: "Bifrost",
|
||||
Modal: BifrostModal,
|
||||
},
|
||||
[LLMProviderName.OPENAI_COMPATIBLE]: {
|
||||
icon: SvgPlug,
|
||||
productName: "OpenAI-Compatible",
|
||||
companyName: "OpenAI-Compatible",
|
||||
Modal: OpenAICompatibleModal,
|
||||
},
|
||||
[LLMProviderName.CUSTOM]: {
|
||||
icon: SvgServer,
|
||||
productName: "Custom Models",
|
||||
companyName: "models from other LiteLLM-compatible providers",
|
||||
Modal: CustomModal,
|
||||
},
|
||||
};
|
||||
|
||||
const DEFAULT_ENTRY: ProviderEntry = {
|
||||
icon: SvgCpu,
|
||||
productName: "",
|
||||
companyName: "",
|
||||
Modal: CustomModal,
|
||||
};
|
||||
|
||||
// Providers that don't use custom_config themselves — if custom_config is
|
||||
// present it means the provider was originally created via CustomModal.
|
||||
const CUSTOM_CONFIG_OVERRIDES = new Set<string>([
|
||||
LLMProviderName.OPENAI,
|
||||
LLMProviderName.ANTHROPIC,
|
||||
LLMProviderName.AZURE,
|
||||
LLMProviderName.OPENROUTER,
|
||||
]);
|
||||
|
||||
export function getProvider(
|
||||
providerName: string,
|
||||
existingProvider?: LLMProviderView
|
||||
): ProviderEntry {
|
||||
const entry = PROVIDERS[providerName] ?? {
|
||||
...DEFAULT_ENTRY,
|
||||
productName: providerName,
|
||||
companyName: providerName,
|
||||
};
|
||||
|
||||
if (
|
||||
existingProvider?.custom_config != null &&
|
||||
CUSTOM_CONFIG_OVERRIDES.has(providerName)
|
||||
) {
|
||||
return { ...entry, Modal: CustomModal };
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// ─── Aggregator providers ────────────────────────────────────────────────────
|
||||
// Providers that host models from multiple vendors (e.g. Bedrock hosts Claude,
|
||||
// Llama, etc.) Used by the model-icon resolver to prioritise vendor icons.
|
||||
|
||||
export const AGGREGATOR_PROVIDERS = new Set([
|
||||
LLMProviderName.BEDROCK,
|
||||
"bedrock_converse",
|
||||
LLMProviderName.OPENROUTER,
|
||||
LLMProviderName.OLLAMA_CHAT,
|
||||
LLMProviderName.LM_STUDIO,
|
||||
LLMProviderName.LITELLM_PROXY,
|
||||
LLMProviderName.BIFROST,
|
||||
LLMProviderName.OPENAI_COMPATIBLE,
|
||||
LLMProviderName.VERTEX_AI,
|
||||
]);
|
||||
|
||||
// ─── Model-aware icon resolver ───────────────────────────────────────────────
|
||||
|
||||
const MODEL_ICON_MAP: Record<string, IconFunctionComponent> = {
|
||||
[LLMProviderName.OPENAI]: SvgOpenai,
|
||||
[LLMProviderName.ANTHROPIC]: SvgClaude,
|
||||
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
|
||||
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
|
||||
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
|
||||
[LLMProviderName.VERTEX_AI]: SvgGemini,
|
||||
[LLMProviderName.BEDROCK]: SvgAws,
|
||||
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
|
||||
[LLMProviderName.BIFROST]: SvgBifrost,
|
||||
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
|
||||
|
||||
amazon: SvgAws,
|
||||
phi: SvgMicrosoft,
|
||||
mistral: SvgMistral,
|
||||
ministral: SvgMistral,
|
||||
llama: SvgCpu,
|
||||
ollama: SvgOllama,
|
||||
gemini: SvgGemini,
|
||||
deepseek: SvgDeepseek,
|
||||
claude: SvgClaude,
|
||||
azure: SvgAzure,
|
||||
microsoft: SvgMicrosoft,
|
||||
meta: SvgCpu,
|
||||
google: SvgGoogle,
|
||||
qwen: SvgQwen,
|
||||
qwq: SvgQwen,
|
||||
zai: ZAIIcon,
|
||||
bedrock_converse: SvgAws,
|
||||
};
|
||||
|
||||
/**
|
||||
* Model-aware icon resolver that checks both provider name and model name
|
||||
* to pick the most specific icon (e.g. Claude icon for a Bedrock Claude model).
|
||||
*/
|
||||
export function getModelIcon(
|
||||
providerName: string,
|
||||
modelName?: string
|
||||
): IconFunctionComponent {
|
||||
const lowerProviderName = providerName.toLowerCase();
|
||||
|
||||
// For aggregator providers, prioritise showing the vendor icon based on model name
|
||||
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
|
||||
const lowerModelName = modelName.toLowerCase();
|
||||
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
|
||||
if (lowerModelName.includes(key)) {
|
||||
return icon;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if provider name directly matches an icon
|
||||
if (lowerProviderName in MODEL_ICON_MAP) {
|
||||
const icon = MODEL_ICON_MAP[lowerProviderName];
|
||||
if (icon) {
|
||||
return icon;
|
||||
}
|
||||
}
|
||||
|
||||
// For non-aggregator providers, check if model name contains any of the keys
|
||||
if (modelName) {
|
||||
const lowerModelName = modelName.toLowerCase();
|
||||
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
|
||||
if (lowerModelName.includes(key)) {
|
||||
return icon;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to CPU icon if no matches
|
||||
return SvgCpu;
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
import type { IconFunctionComponent } from "@opal/types";
|
||||
import {
|
||||
SvgBifrost,
|
||||
SvgCpu,
|
||||
SvgOpenai,
|
||||
SvgClaude,
|
||||
SvgOllama,
|
||||
SvgAws,
|
||||
SvgOpenrouter,
|
||||
SvgPlug,
|
||||
SvgServer,
|
||||
SvgAzure,
|
||||
SvgGemini,
|
||||
SvgLitellm,
|
||||
SvgLmStudio,
|
||||
} from "@opal/icons";
|
||||
import {
|
||||
MicrosoftIconSVG,
|
||||
MistralIcon,
|
||||
MetaIcon,
|
||||
DeepseekIcon,
|
||||
QwenIcon,
|
||||
ZAIIcon,
|
||||
} from "@/components/icons/icons";
|
||||
import { LLMProviderName } from "@/interfaces/llm";
|
||||
|
||||
export const AGGREGATOR_PROVIDERS = new Set([
|
||||
LLMProviderName.BEDROCK,
|
||||
"bedrock_converse",
|
||||
LLMProviderName.OPENROUTER,
|
||||
LLMProviderName.OLLAMA_CHAT,
|
||||
LLMProviderName.LM_STUDIO,
|
||||
LLMProviderName.LITELLM_PROXY,
|
||||
LLMProviderName.BIFROST,
|
||||
LLMProviderName.OPENAI_COMPATIBLE,
|
||||
LLMProviderName.VERTEX_AI,
|
||||
]);
|
||||
|
||||
const PROVIDER_ICONS: Record<string, IconFunctionComponent> = {
|
||||
[LLMProviderName.OPENAI]: SvgOpenai,
|
||||
[LLMProviderName.ANTHROPIC]: SvgClaude,
|
||||
[LLMProviderName.VERTEX_AI]: SvgGemini,
|
||||
[LLMProviderName.BEDROCK]: SvgAws,
|
||||
[LLMProviderName.AZURE]: SvgAzure,
|
||||
[LLMProviderName.LITELLM]: SvgLitellm,
|
||||
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
|
||||
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
|
||||
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
|
||||
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
|
||||
[LLMProviderName.BIFROST]: SvgBifrost,
|
||||
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
|
||||
|
||||
// fallback
|
||||
[LLMProviderName.CUSTOM]: SvgServer,
|
||||
};
|
||||
|
||||
const PROVIDER_PRODUCT_NAMES: Record<string, string> = {
|
||||
[LLMProviderName.OPENAI]: "GPT",
|
||||
[LLMProviderName.ANTHROPIC]: "Claude",
|
||||
[LLMProviderName.VERTEX_AI]: "Gemini",
|
||||
[LLMProviderName.BEDROCK]: "Amazon Bedrock",
|
||||
[LLMProviderName.AZURE]: "Azure OpenAI",
|
||||
[LLMProviderName.LITELLM]: "LiteLLM",
|
||||
[LLMProviderName.LITELLM_PROXY]: "LiteLLM Proxy",
|
||||
[LLMProviderName.OLLAMA_CHAT]: "Ollama",
|
||||
[LLMProviderName.OPENROUTER]: "OpenRouter",
|
||||
[LLMProviderName.LM_STUDIO]: "LM Studio",
|
||||
[LLMProviderName.BIFROST]: "Bifrost",
|
||||
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI Compatible",
|
||||
|
||||
// fallback
|
||||
[LLMProviderName.CUSTOM]: "Custom Models",
|
||||
};
|
||||
|
||||
const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
|
||||
[LLMProviderName.OPENAI]: "OpenAI",
|
||||
[LLMProviderName.ANTHROPIC]: "Anthropic",
|
||||
[LLMProviderName.VERTEX_AI]: "Google Cloud Vertex AI",
|
||||
[LLMProviderName.BEDROCK]: "AWS",
|
||||
[LLMProviderName.AZURE]: "Microsoft Azure",
|
||||
[LLMProviderName.LITELLM]: "LiteLLM",
|
||||
[LLMProviderName.LITELLM_PROXY]: "LiteLLM Proxy",
|
||||
[LLMProviderName.OLLAMA_CHAT]: "Ollama",
|
||||
[LLMProviderName.OPENROUTER]: "OpenRouter",
|
||||
[LLMProviderName.LM_STUDIO]: "LM Studio",
|
||||
[LLMProviderName.BIFROST]: "Bifrost",
|
||||
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI Compatible",
|
||||
|
||||
// fallback
|
||||
[LLMProviderName.CUSTOM]: "Other providers or self-hosted",
|
||||
};
|
||||
|
||||
export function getProviderProductName(providerName: string): string {
|
||||
return PROVIDER_PRODUCT_NAMES[providerName] ?? providerName;
|
||||
}
|
||||
|
||||
export function getProviderDisplayName(providerName: string): string {
|
||||
return PROVIDER_DISPLAY_NAMES[providerName] ?? providerName;
|
||||
}
|
||||
|
||||
export function getProviderIcon(providerName: string): IconFunctionComponent {
|
||||
return PROVIDER_ICONS[providerName] ?? SvgCpu;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Model-aware icon resolver (legacy icon set)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const MODEL_ICON_MAP: Record<string, IconFunctionComponent> = {
|
||||
[LLMProviderName.OPENAI]: SvgOpenai,
|
||||
[LLMProviderName.ANTHROPIC]: SvgClaude,
|
||||
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
|
||||
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
|
||||
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
|
||||
[LLMProviderName.VERTEX_AI]: SvgGemini,
|
||||
[LLMProviderName.BEDROCK]: SvgAws,
|
||||
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
|
||||
[LLMProviderName.BIFROST]: SvgBifrost,
|
||||
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
|
||||
|
||||
amazon: SvgAws,
|
||||
phi: MicrosoftIconSVG,
|
||||
mistral: MistralIcon,
|
||||
ministral: MistralIcon,
|
||||
llama: MetaIcon,
|
||||
ollama: SvgOllama,
|
||||
gemini: SvgGemini,
|
||||
deepseek: DeepseekIcon,
|
||||
claude: SvgClaude,
|
||||
azure: SvgAzure,
|
||||
microsoft: MicrosoftIconSVG,
|
||||
meta: MetaIcon,
|
||||
google: SvgGemini,
|
||||
qwen: QwenIcon,
|
||||
qwq: QwenIcon,
|
||||
zai: ZAIIcon,
|
||||
bedrock_converse: SvgAws,
|
||||
};
|
||||
|
||||
/**
|
||||
* Model-aware icon resolver that checks both provider name and model name
|
||||
* to pick the most specific icon (e.g. Claude icon for a Bedrock Claude model).
|
||||
*/
|
||||
export const getModelIcon = (
|
||||
providerName: string,
|
||||
modelName?: string
|
||||
): IconFunctionComponent => {
|
||||
const lowerProviderName = providerName.toLowerCase();
|
||||
|
||||
// For aggregator providers, prioritise showing the vendor icon based on model name
|
||||
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
|
||||
const lowerModelName = modelName.toLowerCase();
|
||||
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
|
||||
if (lowerModelName.includes(key)) {
|
||||
return icon;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if provider name directly matches an icon
|
||||
if (lowerProviderName in MODEL_ICON_MAP) {
|
||||
const icon = MODEL_ICON_MAP[lowerProviderName];
|
||||
if (icon) {
|
||||
return icon;
|
||||
}
|
||||
}
|
||||
|
||||
// For non-aggregator providers, check if model name contains any of the keys
|
||||
if (modelName) {
|
||||
const lowerModelName = modelName.toLowerCase();
|
||||
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
|
||||
if (lowerModelName.includes(key)) {
|
||||
return icon;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to CPU icon if no matches
|
||||
return SvgCpu;
|
||||
};
|
||||
@@ -44,7 +44,7 @@ export function getFinalLLM(
|
||||
return [provider, model];
|
||||
}
|
||||
|
||||
export function getLLMProviderOverrideForPersona(
|
||||
export function getProviderOverrideForPersona(
|
||||
liveAgent: MinimalPersonaSnapshot,
|
||||
llmProviders: LLMProviderDescriptor[]
|
||||
): LlmDescriptor | null {
|
||||
@@ -144,7 +144,7 @@ export function getDisplayName(
|
||||
agent: MinimalPersonaSnapshot,
|
||||
llmProviders: LLMProviderDescriptor[]
|
||||
): string | undefined {
|
||||
const llmDescriptor = getLLMProviderOverrideForPersona(
|
||||
const llmDescriptor = getProviderOverrideForPersona(
|
||||
agent,
|
||||
llmProviders ?? []
|
||||
);
|
||||
|
||||
@@ -32,6 +32,7 @@ export const SWR_KEYS = {
|
||||
`/api/llm/persona/${personaId}/providers`,
|
||||
adminLlmProviders: "/api/admin/llm/provider",
|
||||
llmProvidersWithImageGen: "/api/admin/llm/provider?include_image_gen=true",
|
||||
customProviderNames: "/api/admin/llm/custom-provider-names",
|
||||
wellKnownLlmProviders: "/api/admin/llm/built-in/options",
|
||||
wellKnownLlmProvider: (providerEndpoint: string) =>
|
||||
`/api/admin/llm/built-in/options/${providerEndpoint}`,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import React, { memo } from "react";
|
||||
import { SvgArrowExchange, SvgOnyxLogo } from "@opal/icons";
|
||||
import { SvgArrowExchange } from "@opal/icons";
|
||||
import { SvgOnyxLogo } from "@opal/logos";
|
||||
|
||||
type ConnectionProviderIconProps = {
|
||||
icon: React.ReactNode;
|
||||
|
||||
@@ -9,7 +9,7 @@ import { cn } from "@/lib/utils";
|
||||
import Text from "@/refresh-components/texts/Text";
|
||||
import Truncated from "@/refresh-components/texts/Truncated";
|
||||
import { useMemo } from "react";
|
||||
import { SvgOnyxLogo, SvgOnyxLogoTyped } from "@opal/icons";
|
||||
import { SvgOnyxLogo, SvgOnyxLogoTyped } from "@opal/logos";
|
||||
|
||||
export interface LogoProps {
|
||||
folded?: boolean;
|
||||
|
||||
@@ -129,8 +129,9 @@ const InputComboBox = ({
|
||||
leftSearchIcon = false,
|
||||
rightSection,
|
||||
separatorLabel = "Other options",
|
||||
showAddPrefix = false,
|
||||
createPrefix,
|
||||
showOtherOptions = false,
|
||||
dropdownMaxHeight,
|
||||
...rest
|
||||
}: WithoutStyles<InputComboBoxProps>) => {
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
@@ -446,7 +447,8 @@ const InputComboBox = ({
|
||||
inputValue={inputValue}
|
||||
allowCreate={!strict}
|
||||
showCreateOption={showCreateOption}
|
||||
showAddPrefix={showAddPrefix}
|
||||
createPrefix={createPrefix}
|
||||
dropdownMaxHeight={dropdownMaxHeight}
|
||||
/>
|
||||
</>
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user