Compare commits

..

4 Commits

Author SHA1 Message Date
pablodanswer
1225a8f117 nit 2025-01-24 11:22:05 -08:00
pablodanswer
c4f100f47b quick nit 2025-01-24 10:39:18 -08:00
pablodanswer
f8fb2e4411 quick nit 2025-01-24 10:34:35 -08:00
pablodanswer
9dfd57fbaa unzip files 2025-01-24 10:31:54 -08:00
59 changed files with 724 additions and 1622 deletions

View File

@@ -9,9 +9,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check PR body for Linear link or override
env:
PR_BODY: ${{ github.event.pull_request.body }}
run: |
PR_BODY="${{ github.event.pull_request.body }}"
# Looking for "https://linear.app" in the body
if echo "$PR_BODY" | grep -qE "https://linear\.app"; then
echo "Found a Linear link. Check passed."

View File

@@ -9,10 +9,8 @@ founders@onyx.app for more information. Please visit https://github.com/onyx-dot
# Default ONYX_VERSION, typically overriden during builds by GitHub Actions.
ARG ONYX_VERSION=0.8-dev
# DO_NOT_TRACK is used to disable telemetry for Unstructured
ENV ONYX_VERSION=${ONYX_VERSION} \
DANSWER_RUNNING_IN_DOCKER="true" \
DO_NOT_TRACK="true"
DANSWER_RUNNING_IN_DOCKER="true"
RUN echo "ONYX_VERSION: ${ONYX_VERSION}"

View File

@@ -32,7 +32,6 @@ def perform_ttl_management_task(
@celery_app.task(
name="check_ttl_management_task",
ignore_result=True,
soft_time_limit=JOB_TIMEOUT,
)
def check_ttl_management_task(*, tenant_id: str | None) -> None:
@@ -57,7 +56,6 @@ def check_ttl_management_task(*, tenant_id: str | None) -> None:
@celery_app.task(
name="autogenerate_usage_report_task",
ignore_result=True,
soft_time_limit=JOB_TIMEOUT,
)
def autogenerate_usage_report_task(*, tenant_id: str | None) -> None:

View File

@@ -42,22 +42,24 @@ def _fetch_permissions_for_permission_ids(
if not permission_info or not doc_id:
return []
# Check cache first for all permission IDs
permissions = [
_PERMISSION_ID_PERMISSION_MAP[pid]
for pid in permission_ids
if pid in _PERMISSION_ID_PERMISSION_MAP
]
# If we found all permissions in cache, return them
if len(permissions) == len(permission_ids):
return permissions
owner_email = permission_info.get("owner_email")
drive_service = get_drive_service(
creds=google_drive_connector.creds,
user_email=(owner_email or google_drive_connector.primary_admin_email),
)
# Otherwise, fetch all permissions and update cache
fetched_permissions = execute_paginated_retrieval(
retrieval_function=drive_service.permissions().list,
list_key="permissions",
@@ -67,6 +69,7 @@ def _fetch_permissions_for_permission_ids(
)
permissions_for_doc_id = []
# Update cache and return all permissions
for permission in fetched_permissions:
permissions_for_doc_id.append(permission)
_PERMISSION_ID_PERMISSION_MAP[permission["id"]] = permission

View File

@@ -1,5 +1,6 @@
from datetime import timedelta
from typing import Any
from typing import cast
from celery import Celery
from celery import signals
@@ -7,6 +8,7 @@ from celery.beat import PersistentScheduler # type: ignore
from celery.signals import beat_init
import onyx.background.celery.apps.app_base as app_base
from onyx.configs.constants import ONYX_CLOUD_CELERY_TASK_PREFIX
from onyx.configs.constants import POSTGRES_CELERY_BEAT_APP_NAME
from onyx.db.engine import get_all_tenant_ids
from onyx.db.engine import SqlEngine
@@ -130,25 +132,21 @@ class DynamicTenantScheduler(PersistentScheduler):
# get current schedule and extract current tenants
current_schedule = self.schedule.items()
# there are no more per tenant beat tasks, so comment this out
# NOTE: we may not actualy need this scheduler any more and should
# test reverting to a regular beat schedule implementation
current_tenants = set()
for task_name, _ in current_schedule:
task_name = cast(str, task_name)
if task_name.startswith(ONYX_CLOUD_CELERY_TASK_PREFIX):
continue
# current_tenants = set()
# for task_name, _ in current_schedule:
# task_name = cast(str, task_name)
# if task_name.startswith(ONYX_CLOUD_CELERY_TASK_PREFIX):
# continue
if "_" in task_name:
# example: "check-for-condition-tenant_12345678-abcd-efgh-ijkl-12345678"
# -> "12345678-abcd-efgh-ijkl-12345678"
current_tenants.add(task_name.split("_")[-1])
logger.info(f"Found {len(current_tenants)} existing items in schedule")
# if "_" in task_name:
# # example: "check-for-condition-tenant_12345678-abcd-efgh-ijkl-12345678"
# # -> "12345678-abcd-efgh-ijkl-12345678"
# current_tenants.add(task_name.split("_")[-1])
# logger.info(f"Found {len(current_tenants)} existing items in schedule")
# for tenant_id in tenant_ids:
# if tenant_id not in current_tenants:
# logger.info(f"Processing new tenant: {tenant_id}")
for tenant_id in tenant_ids:
if tenant_id not in current_tenants:
logger.info(f"Processing new tenant: {tenant_id}")
new_schedule = self._generate_schedule(tenant_ids)

View File

@@ -16,10 +16,6 @@ from shared_configs.configs import MULTI_TENANT
# it's only important that they run relatively regularly
BEAT_EXPIRES_DEFAULT = 15 * 60 # 15 minutes (in seconds)
# hack to slow down task dispatch in the cloud until
# we have a better implementation (backpressure, etc)
CLOUD_BEAT_SCHEDULE_MULTIPLIER = 8
# tasks that only run in the cloud
# the name attribute must start with ONYX_CLOUD_CELERY_TASK_PREFIX = "cloud" to be filtered
# by the DynamicTenantScheduler
@@ -28,7 +24,7 @@ cloud_tasks_to_schedule = [
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_check-alembic",
"task": OnyxCeleryTask.CLOUD_CHECK_ALEMBIC,
"schedule": timedelta(hours=1 * CLOUD_BEAT_SCHEDULE_MULTIPLIER),
"schedule": timedelta(hours=1),
"options": {
"queue": OnyxCeleryQueues.MONITORING,
"priority": OnyxCeleryPriority.HIGH,
@@ -39,7 +35,7 @@ cloud_tasks_to_schedule = [
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_check-for-indexing",
"task": OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
"schedule": timedelta(seconds=15 * CLOUD_BEAT_SCHEDULE_MULTIPLIER),
"schedule": timedelta(seconds=15),
"options": {
"priority": OnyxCeleryPriority.HIGHEST,
"expires": BEAT_EXPIRES_DEFAULT,
@@ -51,7 +47,7 @@ cloud_tasks_to_schedule = [
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_check-for-connector-deletion",
"task": OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
"schedule": timedelta(seconds=20 * CLOUD_BEAT_SCHEDULE_MULTIPLIER),
"schedule": timedelta(seconds=20),
"options": {
"priority": OnyxCeleryPriority.HIGHEST,
"expires": BEAT_EXPIRES_DEFAULT,
@@ -63,7 +59,7 @@ cloud_tasks_to_schedule = [
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_check-for-vespa-sync",
"task": OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
"schedule": timedelta(seconds=20 * CLOUD_BEAT_SCHEDULE_MULTIPLIER),
"schedule": timedelta(seconds=20),
"options": {
"priority": OnyxCeleryPriority.HIGHEST,
"expires": BEAT_EXPIRES_DEFAULT,
@@ -75,7 +71,7 @@ cloud_tasks_to_schedule = [
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_check-for-prune",
"task": OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
"schedule": timedelta(seconds=15 * CLOUD_BEAT_SCHEDULE_MULTIPLIER),
"schedule": timedelta(seconds=15),
"options": {
"priority": OnyxCeleryPriority.HIGHEST,
"expires": BEAT_EXPIRES_DEFAULT,
@@ -87,7 +83,7 @@ cloud_tasks_to_schedule = [
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_monitor-vespa-sync",
"task": OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
"schedule": timedelta(seconds=15 * CLOUD_BEAT_SCHEDULE_MULTIPLIER),
"schedule": timedelta(seconds=5),
"options": {
"priority": OnyxCeleryPriority.HIGHEST,
"expires": BEAT_EXPIRES_DEFAULT,
@@ -99,7 +95,7 @@ cloud_tasks_to_schedule = [
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_check-for-doc-permissions-sync",
"task": OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
"schedule": timedelta(seconds=30 * CLOUD_BEAT_SCHEDULE_MULTIPLIER),
"schedule": timedelta(seconds=30),
"options": {
"priority": OnyxCeleryPriority.HIGHEST,
"expires": BEAT_EXPIRES_DEFAULT,
@@ -111,7 +107,7 @@ cloud_tasks_to_schedule = [
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_check-for-external-group-sync",
"task": OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
"schedule": timedelta(seconds=20 * CLOUD_BEAT_SCHEDULE_MULTIPLIER),
"schedule": timedelta(seconds=20),
"options": {
"priority": OnyxCeleryPriority.HIGHEST,
"expires": BEAT_EXPIRES_DEFAULT,
@@ -123,7 +119,7 @@ cloud_tasks_to_schedule = [
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_monitor-background-processes",
"task": OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
"schedule": timedelta(minutes=5 * CLOUD_BEAT_SCHEDULE_MULTIPLIER),
"schedule": timedelta(minutes=5),
"options": {
"priority": OnyxCeleryPriority.HIGHEST,
"expires": BEAT_EXPIRES_DEFAULT,
@@ -141,9 +137,7 @@ if LLM_MODEL_UPDATE_API_URL:
{
"name": f"{ONYX_CLOUD_CELERY_TASK_PREFIX}_check-for-llm-model-update",
"task": OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
"schedule": timedelta(
hours=1 * CLOUD_BEAT_SCHEDULE_MULTIPLIER
), # Check every hour
"schedule": timedelta(hours=1), # Check every hour
"options": {
"priority": OnyxCeleryPriority.HIGHEST,
"expires": BEAT_EXPIRES_DEFAULT,
@@ -227,7 +221,7 @@ if not MULTI_TENANT:
{
"name": "monitor-background-processes",
"task": OnyxCeleryTask.MONITOR_BACKGROUND_PROCESSES,
"schedule": timedelta(minutes=15),
"schedule": timedelta(minutes=5),
"options": {
"priority": OnyxCeleryPriority.LOW,
"expires": BEAT_EXPIRES_DEFAULT,

View File

@@ -33,7 +33,6 @@ class TaskDependencyError(RuntimeError):
@shared_task(
name=OnyxCeleryTask.CHECK_FOR_CONNECTOR_DELETION,
ignore_result=True,
soft_time_limit=JOB_TIMEOUT,
trail=False,
bind=True,
@@ -140,6 +139,13 @@ def try_generate_document_cc_pair_cleanup_tasks(
submitted=datetime.now(timezone.utc),
)
# create before setting fence to avoid race condition where the monitoring
# task updates the sync record before it is created
insert_sync_record(
db_session=db_session,
entity_id=cc_pair_id,
sync_type=SyncType.CONNECTOR_DELETION,
)
redis_connector.delete.set_fence(fence_payload)
try:
@@ -178,13 +184,6 @@ def try_generate_document_cc_pair_cleanup_tasks(
)
if tasks_generated is None:
raise ValueError("RedisConnectorDeletion.generate_tasks returned None")
insert_sync_record(
db_session=db_session,
entity_id=cc_pair_id,
sync_type=SyncType.CONNECTOR_DELETION,
)
except TaskDependencyError:
redis_connector.delete.set_fence(None)
raise

View File

@@ -91,7 +91,6 @@ def _is_external_doc_permissions_sync_due(cc_pair: ConnectorCredentialPair) -> b
@shared_task(
name=OnyxCeleryTask.CHECK_FOR_DOC_PERMISSIONS_SYNC,
ignore_result=True,
soft_time_limit=JOB_TIMEOUT,
bind=True,
)

View File

@@ -91,7 +91,6 @@ def _is_external_group_sync_due(cc_pair: ConnectorCredentialPair) -> bool:
@shared_task(
name=OnyxCeleryTask.CHECK_FOR_EXTERNAL_GROUP_SYNC,
ignore_result=True,
soft_time_limit=JOB_TIMEOUT,
bind=True,
)

View File

@@ -45,7 +45,6 @@ from onyx.natural_language_processing.search_nlp_models import EmbeddingModel
from onyx.natural_language_processing.search_nlp_models import warm_up_bi_encoder
from onyx.redis.redis_connector import RedisConnector
from onyx.redis.redis_pool import get_redis_client
from onyx.redis.redis_pool import get_redis_replica_client
from onyx.redis.redis_pool import redis_lock_dump
from onyx.utils.logger import setup_logger
from onyx.utils.variable_functionality import global_version
@@ -70,7 +69,6 @@ def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None:
tasks_created = 0
locked = False
redis_client = get_redis_client(tenant_id=tenant_id)
redis_client_replica = get_redis_replica_client(tenant_id=tenant_id)
# we need to use celery's redis client to access its redis data
# (which lives on a different db number)
@@ -229,7 +227,7 @@ def check_for_indexing(self: Task, *, tenant_id: str | None) -> int | None:
# or be currently executing
try:
validate_indexing_fences(
tenant_id, redis_client_replica, redis_client_celery, lock_beat
tenant_id, self.app, redis_client, redis_client_celery, lock_beat
)
except Exception:
task_logger.exception("Exception while validating indexing fences")

View File

@@ -291,20 +291,17 @@ def validate_indexing_fence(
def validate_indexing_fences(
tenant_id: str | None,
r_replica: Redis,
celery_app: Celery,
r: Redis,
r_celery: Redis,
lock_beat: RedisLock,
) -> None:
"""Validates all indexing fences for this tenant ... aka makes sure
indexing tasks sent to celery are still in flight.
"""
reserved_indexing_tasks = celery_get_unacked_task_ids(
OnyxCeleryQueues.CONNECTOR_INDEXING, r_celery
)
# Use replica for this because the worst thing that happens
# is that we don't run the validation on this pass
for key_bytes in r_replica.scan_iter(
# validate all existing indexing jobs
for key_bytes in r.scan_iter(
RedisConnectorIndex.FENCE_PREFIX + "*", count=SCAN_ITER_COUNT_DEFAULT
):
lock_beat.reacquire()

View File

@@ -14,16 +14,8 @@ from onyx.db.models import LLMProvider
def _process_model_list_response(model_list_json: Any) -> list[str]:
# Handle case where response is wrapped in a "data" field
if isinstance(model_list_json, dict):
if "data" in model_list_json:
model_list_json = model_list_json["data"]
elif "models" in model_list_json:
model_list_json = model_list_json["models"]
else:
raise ValueError(
"Invalid response from API - expected dict with 'data' or "
f"'models' field, got {type(model_list_json)}"
)
if isinstance(model_list_json, dict) and "data" in model_list_json:
model_list_json = model_list_json["data"]
if not isinstance(model_list_json, list):
raise ValueError(
@@ -35,18 +27,11 @@ def _process_model_list_response(model_list_json: Any) -> list[str]:
for item in model_list_json:
if isinstance(item, str):
model_names.append(item)
elif isinstance(item, dict):
if "model_name" in item:
model_names.append(item["model_name"])
elif "id" in item:
model_names.append(item["id"])
else:
raise ValueError(
f"Invalid item in model list - expected dict with model_name or id, got {type(item)}"
)
elif isinstance(item, dict) and "model_name" in item:
model_names.append(item["model_name"])
else:
raise ValueError(
f"Invalid item in model list - expected string or dict, got {type(item)}"
f"Invalid item in model list - expected string or dict with model_name, got {type(item)}"
)
return model_names
@@ -54,7 +39,6 @@ def _process_model_list_response(model_list_json: Any) -> list[str]:
@shared_task(
name=OnyxCeleryTask.CHECK_FOR_LLM_MODEL_UPDATE,
ignore_result=True,
soft_time_limit=JOB_TIMEOUT,
trail=False,
bind=True,

View File

@@ -4,7 +4,6 @@ from collections.abc import Callable
from datetime import timedelta
from itertools import islice
from typing import Any
from typing import Literal
from celery import shared_task
from celery import Task
@@ -27,7 +26,6 @@ from onyx.db.engine import get_all_tenant_ids
from onyx.db.engine import get_db_current_time
from onyx.db.engine import get_session_with_tenant
from onyx.db.enums import IndexingStatus
from onyx.db.enums import SyncStatus
from onyx.db.enums import SyncType
from onyx.db.models import ConnectorCredentialPair
from onyx.db.models import DocumentSet
@@ -40,7 +38,6 @@ from onyx.redis.redis_pool import redis_lock_dump
from onyx.utils.telemetry import optional_telemetry
from onyx.utils.telemetry import RecordType
_MONITORING_SOFT_TIME_LIMIT = 60 * 5 # 5 minutes
_MONITORING_TIME_LIMIT = _MONITORING_SOFT_TIME_LIMIT + 60 # 6 minutes
@@ -52,12 +49,6 @@ _CONNECTOR_INDEX_ATTEMPT_RUN_SUCCESS_KEY_FMT = (
"monitoring_connector_index_attempt_run_success:{cc_pair_id}:{index_attempt_id}"
)
_FINAL_METRIC_KEY_FMT = "sync_final_metrics:{sync_type}:{entity_id}:{sync_record_id}"
_SYNC_START_LATENCY_KEY_FMT = (
"sync_start_latency:{sync_type}:{entity_id}:{sync_record_id}"
)
def _mark_metric_as_emitted(redis_std: Redis, key: str) -> None:
"""Mark a metric as having been emitted by setting a Redis key with expiration"""
@@ -120,7 +111,6 @@ class Metric(BaseModel):
}.items()
if v is not None
}
task_logger.info(f"Emitting metric: {data}")
optional_telemetry(
record_type=RecordType.METRIC,
data=data,
@@ -199,371 +189,239 @@ def _build_connector_start_latency_metric(
f"Start latency for index attempt {recent_attempt.id}: {start_latency:.2f}s "
f"(desired: {desired_start_time}, actual: {recent_attempt.time_started})"
)
job_id = build_job_id("connector", str(cc_pair.id), str(recent_attempt.id))
return Metric(
key=metric_key,
name="connector_start_latency",
value=start_latency,
tags={
"job_id": job_id,
"connector_id": str(cc_pair.connector.id),
"source": str(cc_pair.connector.source),
},
tags={},
)
def _build_connector_final_metrics(
def _build_run_success_metrics(
cc_pair: ConnectorCredentialPair,
recent_attempts: list[IndexAttempt],
redis_std: Redis,
) -> list[Metric]:
"""
Final metrics for connector index attempts:
- Boolean success/fail metric
- If success, emit:
* duration (seconds)
* doc_count
"""
metrics = []
for attempt in recent_attempts:
metric_key = _CONNECTOR_INDEX_ATTEMPT_RUN_SUCCESS_KEY_FMT.format(
cc_pair_id=cc_pair.id,
index_attempt_id=attempt.id,
)
if _has_metric_been_emitted(redis_std, metric_key):
task_logger.info(
f"Skipping final metrics for connector {cc_pair.connector.id} "
f"index attempt {attempt.id}, already emitted."
f"Skipping metric for connector {cc_pair.connector.id} "
f"index attempt {attempt.id} because it has already been "
"emitted"
)
continue
# We only emit final metrics if the attempt is in a terminal state
if attempt.status not in [
if attempt.status in [
IndexingStatus.SUCCESS,
IndexingStatus.FAILED,
IndexingStatus.CANCELED,
]:
# Not finished; skip
continue
job_id = build_job_id("connector", str(cc_pair.id), str(attempt.id))
success = attempt.status == IndexingStatus.SUCCESS
metrics.append(
Metric(
key=metric_key, # We'll mark the same key for any final metrics
name="connector_run_succeeded",
value=success,
tags={
"job_id": job_id,
"connector_id": str(cc_pair.connector.id),
"source": str(cc_pair.connector.source),
"status": attempt.status.value,
},
task_logger.info(
f"Adding run success metric for index attempt {attempt.id} with status {attempt.status}"
)
)
if success:
# Make sure we have valid time_started
if attempt.time_started and attempt.time_updated:
duration_seconds = (
attempt.time_updated - attempt.time_started
).total_seconds()
metrics.append(
Metric(
key=None, # No need for a new key, or you can reuse the same if you prefer
name="connector_index_duration_seconds",
value=duration_seconds,
tags={
"job_id": job_id,
"connector_id": str(cc_pair.connector.id),
"source": str(cc_pair.connector.source),
},
)
)
else:
task_logger.error(
f"Index attempt {attempt.id} succeeded but has missing time "
f"(time_started={attempt.time_started}, time_updated={attempt.time_updated})."
)
# For doc counts, choose whichever field is more relevant
doc_count = attempt.total_docs_indexed or 0
metrics.append(
Metric(
key=None,
name="connector_index_doc_count",
value=doc_count,
tags={
"job_id": job_id,
"connector_id": str(cc_pair.connector.id),
"source": str(cc_pair.connector.source),
},
key=metric_key,
name="connector_run_succeeded",
value=attempt.status == IndexingStatus.SUCCESS,
tags={"source": str(cc_pair.connector.source)},
)
)
_mark_metric_as_emitted(redis_std, metric_key)
return metrics
def _collect_connector_metrics(db_session: Session, redis_std: Redis) -> list[Metric]:
"""Collect metrics about connector runs from the past hour"""
# NOTE: use get_db_current_time since the IndexAttempt times are set based on DB time
one_hour_ago = get_db_current_time(db_session) - timedelta(hours=1)
# Get all connector credential pairs
cc_pairs = db_session.scalars(select(ConnectorCredentialPair)).all()
# Might be more than one search setting, or just one
active_search_settings = get_active_search_settings(db_session)
active_search_settings = get_active_search_settings(db_session)
metrics = []
# If you want to process each cc_pair against each search setting:
for cc_pair in cc_pairs:
for search_settings in active_search_settings:
recent_attempts = (
db_session.query(IndexAttempt)
.filter(
IndexAttempt.connector_credential_pair_id == cc_pair.id,
IndexAttempt.search_settings_id == search_settings.id,
)
.order_by(IndexAttempt.time_created.desc())
.limit(2)
.all()
for cc_pair, search_settings in zip(cc_pairs, active_search_settings):
recent_attempts = (
db_session.query(IndexAttempt)
.filter(
IndexAttempt.connector_credential_pair_id == cc_pair.id,
IndexAttempt.search_settings_id == search_settings.id,
)
.order_by(IndexAttempt.time_created.desc())
.limit(2)
.all()
)
if not recent_attempts:
continue
if not recent_attempts:
continue
most_recent_attempt = recent_attempts[0]
second_most_recent_attempt = (
recent_attempts[1] if len(recent_attempts) > 1 else None
)
most_recent_attempt = recent_attempts[0]
second_most_recent_attempt = (
recent_attempts[1] if len(recent_attempts) > 1 else None
)
if one_hour_ago > most_recent_attempt.time_created:
continue
if one_hour_ago > most_recent_attempt.time_created:
continue
# Connector start latency
start_latency_metric = _build_connector_start_latency_metric(
cc_pair, most_recent_attempt, second_most_recent_attempt, redis_std
)
if start_latency_metric:
metrics.append(start_latency_metric)
# Connector start latency
start_latency_metric = _build_connector_start_latency_metric(
cc_pair, most_recent_attempt, second_most_recent_attempt, redis_std
)
if start_latency_metric:
metrics.append(start_latency_metric)
# Connector run success/failure
final_metrics = _build_connector_final_metrics(
cc_pair, recent_attempts, redis_std
)
metrics.extend(final_metrics)
# Connector run success/failure
run_success_metrics = _build_run_success_metrics(
cc_pair, recent_attempts, redis_std
)
metrics.extend(run_success_metrics)
return metrics
def _collect_sync_metrics(db_session: Session, redis_std: Redis) -> list[Metric]:
"""
Collect metrics for document set and group syncing:
- Success/failure status
- Start latency (always)
- Duration & doc count (only if success)
- Throughput (docs/min) (only if success)
"""
"""Collect metrics about document set and group syncing speed"""
# NOTE: use get_db_current_time since the SyncRecord times are set based on DB time
one_hour_ago = get_db_current_time(db_session) - timedelta(hours=1)
# Get all sync records that ended in the last hour
# Get all sync records from the last hour
recent_sync_records = db_session.scalars(
select(SyncRecord)
.where(SyncRecord.sync_end_time.isnot(None))
.where(SyncRecord.sync_end_time >= one_hour_ago)
.order_by(SyncRecord.sync_end_time.desc())
.where(SyncRecord.sync_start_time >= one_hour_ago)
.order_by(SyncRecord.sync_start_time.desc())
).all()
task_logger.info(
f"Collecting sync metrics for {len(recent_sync_records)} sync records"
)
metrics = []
for sync_record in recent_sync_records:
# Build a job_id for correlation
job_id = build_job_id("sync_record", str(sync_record.id))
# Skip if no end time (sync still in progress)
if not sync_record.sync_end_time:
continue
# Emit a SUCCESS/FAIL boolean metric
# Use a single Redis key to avoid re-emitting final metrics
final_metric_key = _FINAL_METRIC_KEY_FMT.format(
sync_type=sync_record.sync_type,
entity_id=sync_record.entity_id,
sync_record_id=sync_record.id,
# Check if we already emitted a metric for this sync record
metric_key = (
f"sync_speed:{sync_record.sync_type}:"
f"{sync_record.entity_id}:{sync_record.id}"
)
if not _has_metric_been_emitted(redis_std, final_metric_key):
# Evaluate success
sync_succeeded = sync_record.sync_status == SyncStatus.SUCCESS
metrics.append(
Metric(
key=final_metric_key,
name="sync_run_succeeded",
value=sync_succeeded,
tags={
"job_id": job_id,
"sync_type": str(sync_record.sync_type),
"status": str(sync_record.sync_status),
},
)
if _has_metric_been_emitted(redis_std, metric_key):
task_logger.info(
f"Skipping metric for sync record {sync_record.id} "
"because it has already been emitted"
)
continue
# If successful, emit additional metrics
if sync_succeeded:
if sync_record.sync_end_time and sync_record.sync_start_time:
duration_seconds = (
sync_record.sync_end_time - sync_record.sync_start_time
).total_seconds()
else:
task_logger.error(
f"Invalid times for sync record {sync_record.id}: "
f"start={sync_record.sync_start_time}, end={sync_record.sync_end_time}"
)
duration_seconds = None
# Calculate sync duration in minutes
sync_duration_mins = (
sync_record.sync_end_time - sync_record.sync_start_time
).total_seconds() / 60.0
doc_count = sync_record.num_docs_synced or 0
sync_speed = None
if duration_seconds and duration_seconds > 0:
duration_mins = duration_seconds / 60.0
sync_speed = (
doc_count / duration_mins if duration_mins > 0 else None
)
# Emit duration, doc count, speed
if duration_seconds is not None:
metrics.append(
Metric(
key=None,
name="sync_duration_seconds",
value=duration_seconds,
tags={
"job_id": job_id,
"sync_type": str(sync_record.sync_type),
},
)
)
else:
task_logger.error(
f"Invalid sync record {sync_record.id} with no duration"
)
metrics.append(
Metric(
key=None,
name="sync_doc_count",
value=doc_count,
tags={
"job_id": job_id,
"sync_type": str(sync_record.sync_type),
},
)
)
if sync_speed is not None:
metrics.append(
Metric(
key=None,
name="sync_speed_docs_per_min",
value=sync_speed,
tags={
"job_id": job_id,
"sync_type": str(sync_record.sync_type),
},
)
)
else:
task_logger.error(
f"Invalid sync record {sync_record.id} with no duration"
)
# Mark final metrics as emitted so we don't re-emit
_mark_metric_as_emitted(redis_std, final_metric_key)
# Emit start latency
start_latency_key = _SYNC_START_LATENCY_KEY_FMT.format(
sync_type=sync_record.sync_type,
entity_id=sync_record.entity_id,
sync_record_id=sync_record.id,
# Calculate sync speed (docs/min) - avoid division by zero
sync_speed = (
sync_record.num_docs_synced / sync_duration_mins
if sync_duration_mins > 0
else None
)
if not _has_metric_been_emitted(redis_std, start_latency_key):
# Get the entity's last update time based on sync type
entity: DocumentSet | UserGroup | None = None
if sync_record.sync_type == SyncType.DOCUMENT_SET:
entity = db_session.scalar(
select(DocumentSet).where(DocumentSet.id == sync_record.entity_id)
)
elif sync_record.sync_type == SyncType.USER_GROUP:
entity = db_session.scalar(
select(UserGroup).where(UserGroup.id == sync_record.entity_id)
)
else:
task_logger.info(
f"Skipping sync record {sync_record.id} of type {sync_record.sync_type}."
)
continue
if entity is None:
task_logger.error(
f"Could not find entity for sync record {sync_record.id} "
f"(type={sync_record.sync_type}, id={sync_record.entity_id})."
)
continue
if sync_speed is None:
task_logger.error(
f"Something went wrong with sync speed calculation. "
f"Sync record: {sync_record.id}, duration: {sync_duration_mins}, "
f"docs synced: {sync_record.num_docs_synced}"
)
continue
# Calculate start latency in seconds:
# (actual sync start) - (last modified time)
if entity.time_last_modified_by_user and sync_record.sync_start_time:
start_latency = (
sync_record.sync_start_time - entity.time_last_modified_by_user
).total_seconds()
task_logger.info(
f"Calculated sync speed for record {sync_record.id}: {sync_speed} docs/min"
)
metrics.append(
Metric(
key=metric_key,
name="sync_speed_docs_per_min",
value=sync_speed,
tags={
"sync_type": str(sync_record.sync_type),
"status": str(sync_record.sync_status),
},
)
)
if start_latency < 0:
task_logger.error(
f"Negative start latency for sync record {sync_record.id} "
f"(start={sync_record.sync_start_time}, entity_modified={entity.time_last_modified_by_user})"
)
continue
# Add sync start latency metric
start_latency_key = (
f"sync_start_latency:{sync_record.sync_type}"
f":{sync_record.entity_id}:{sync_record.id}"
)
if _has_metric_been_emitted(redis_std, start_latency_key):
task_logger.info(
f"Skipping start latency metric for sync record {sync_record.id} "
"because it has already been emitted"
)
continue
metrics.append(
Metric(
key=start_latency_key,
name="sync_start_latency_seconds",
value=start_latency,
tags={
"job_id": job_id,
"sync_type": str(sync_record.sync_type),
},
)
)
# Get the entity's last update time based on sync type
entity: DocumentSet | UserGroup | None = None
if sync_record.sync_type == SyncType.DOCUMENT_SET:
entity = db_session.scalar(
select(DocumentSet).where(DocumentSet.id == sync_record.entity_id)
)
elif sync_record.sync_type == SyncType.USER_GROUP:
entity = db_session.scalar(
select(UserGroup).where(UserGroup.id == sync_record.entity_id)
)
else:
# Skip other sync types
task_logger.info(
f"Skipping sync record {sync_record.id} "
f"with type {sync_record.sync_type} "
f"and id {sync_record.entity_id} "
"because it is not a document set or user group"
)
continue
_mark_metric_as_emitted(redis_std, start_latency_key)
if entity is None:
task_logger.error(
f"Could not find entity for sync record {sync_record.id} "
f"with type {sync_record.sync_type} and id {sync_record.entity_id}"
)
continue
# Calculate start latency in seconds
start_latency = (
sync_record.sync_start_time - entity.time_last_modified_by_user
).total_seconds()
task_logger.info(
f"Calculated start latency for sync record {sync_record.id}: {start_latency} seconds"
)
if start_latency < 0:
task_logger.error(
f"Start latency is negative for sync record {sync_record.id} "
f"with type {sync_record.sync_type} and id {sync_record.entity_id}. "
f"Sync start time: {sync_record.sync_start_time}, "
f"Entity last modified: {entity.time_last_modified_by_user}"
)
continue
metrics.append(
Metric(
key=start_latency_key,
name="sync_start_latency_seconds",
value=start_latency,
tags={
"sync_type": str(sync_record.sync_type),
},
)
)
return metrics
def build_job_id(
job_type: Literal["connector", "sync_record"],
primary_id: str,
secondary_id: str | None = None,
) -> str:
if job_type == "connector":
if secondary_id is None:
raise ValueError(
"secondary_id (attempt_id) is required for connector job_type"
)
return f"connector:{primary_id}:attempt:{secondary_id}"
elif job_type == "sync_record":
return f"sync_record:{primary_id}"
@shared_task(
name=OnyxCeleryTask.MONITOR_BACKGROUND_PROCESSES,
ignore_result=True,
soft_time_limit=_MONITORING_SOFT_TIME_LIMIT,
time_limit=_MONITORING_TIME_LIMIT,
queue=OnyxCeleryQueues.MONITORING,
@@ -601,7 +459,6 @@ def monitor_background_processes(self: Task, *, tenant_id: str | None) -> None:
lambda: _collect_connector_metrics(db_session, redis_std),
lambda: _collect_sync_metrics(db_session, redis_std),
]
# Collect and log each metric
with get_session_with_tenant(tenant_id) as db_session:
for metric_fn in metric_functions:

View File

@@ -78,7 +78,6 @@ def _is_pruning_due(cc_pair: ConnectorCredentialPair) -> bool:
@shared_task(
name=OnyxCeleryTask.CHECK_FOR_PRUNING,
ignore_result=True,
soft_time_limit=JOB_TIMEOUT,
bind=True,
)

View File

@@ -33,7 +33,6 @@ from onyx.document_index.interfaces import VespaDocumentFields
from onyx.redis.redis_pool import get_redis_client
from onyx.redis.redis_pool import redis_lock_dump
from onyx.server.documents.models import ConnectorCredentialPairIdentifier
from shared_configs.configs import IGNORED_SYNCING_TENANT_LIST
DOCUMENT_BY_CC_PAIR_CLEANUP_MAX_RETRIES = 3
@@ -214,7 +213,6 @@ def document_by_cc_pair_cleanup_task(
@shared_task(
name=OnyxCeleryTask.CLOUD_BEAT_TASK_GENERATOR,
ignore_result=True,
trail=False,
bind=True,
)
@@ -249,10 +247,6 @@ def cloud_beat_task_generator(
lock_beat.reacquire()
last_lock_time = current_time
# needed in the cloud
if IGNORED_SYNCING_TENANT_LIST and tenant_id in IGNORED_SYNCING_TENANT_LIST:
continue
self.app.send_task(
task_name,
kwargs=dict(

View File

@@ -78,7 +78,6 @@ from onyx.redis.redis_connector_index import RedisConnectorIndex
from onyx.redis.redis_connector_prune import RedisConnectorPrune
from onyx.redis.redis_document_set import RedisDocumentSet
from onyx.redis.redis_pool import get_redis_client
from onyx.redis.redis_pool import get_redis_replica_client
from onyx.redis.redis_pool import redis_lock_dump
from onyx.redis.redis_pool import SCAN_ITER_COUNT_DEFAULT
from onyx.redis.redis_usergroup import RedisUserGroup
@@ -98,7 +97,6 @@ logger = setup_logger()
# which bloats the result metadata considerably. trail=False prevents this.
@shared_task(
name=OnyxCeleryTask.CHECK_FOR_VESPA_SYNC_TASK,
ignore_result=True,
soft_time_limit=JOB_TIMEOUT,
trail=False,
bind=True,
@@ -873,12 +871,7 @@ def monitor_ccpair_indexing_taskset(
redis_connector_index.reset()
@shared_task(
name=OnyxCeleryTask.MONITOR_VESPA_SYNC,
ignore_result=True,
soft_time_limit=300,
bind=True,
)
@shared_task(name=OnyxCeleryTask.MONITOR_VESPA_SYNC, soft_time_limit=300, bind=True)
def monitor_vespa_sync(self: Task, tenant_id: str | None) -> bool | None:
"""This is a celery beat task that monitors and finalizes various long running tasks.
@@ -902,17 +895,6 @@ def monitor_vespa_sync(self: Task, tenant_id: str | None) -> bool | None:
r = get_redis_client(tenant_id=tenant_id)
# Replica usage notes
#
# False negatives are OK. (aka fail to to see a key that exists on the master).
# We simply skip the monitoring work and it will be caught on the next pass.
#
# False positives are not OK, and are possible if we clear a fence on the master and
# then read from the replica. In this case, monitoring work could be done on a fence
# that no longer exists. To avoid this, we scan from the replica, but double check
# the result on the master.
r_replica = get_redis_replica_client(tenant_id=tenant_id)
lock_beat: RedisLock = r.lock(
OnyxRedisLocks.MONITOR_VESPA_SYNC_BEAT_LOCK,
timeout=CELERY_VESPA_SYNC_BEAT_LOCK_TIMEOUT,
@@ -972,19 +954,17 @@ def monitor_vespa_sync(self: Task, tenant_id: str | None) -> bool | None:
# scan and monitor activity to completion
phase_start = time.monotonic()
lock_beat.reacquire()
if r_replica.exists(RedisConnectorCredentialPair.get_fence_key()):
if r.exists(RedisConnectorCredentialPair.get_fence_key()):
monitor_connector_taskset(r)
if r.exists(RedisConnectorCredentialPair.get_fence_key()):
monitor_connector_taskset(r)
timings["connector"] = time.monotonic() - phase_start
timings["connector_ttl"] = r.ttl(OnyxRedisLocks.MONITOR_VESPA_SYNC_BEAT_LOCK)
phase_start = time.monotonic()
lock_beat.reacquire()
for key_bytes in r_replica.scan_iter(
for key_bytes in r.scan_iter(
RedisConnectorDelete.FENCE_PREFIX + "*", count=SCAN_ITER_COUNT_DEFAULT
):
if r.exists(key_bytes):
monitor_connector_deletion_taskset(tenant_id, key_bytes, r)
monitor_connector_deletion_taskset(tenant_id, key_bytes, r)
lock_beat.reacquire()
timings["connector_deletion"] = time.monotonic() - phase_start
@@ -994,74 +974,66 @@ def monitor_vespa_sync(self: Task, tenant_id: str | None) -> bool | None:
phase_start = time.monotonic()
lock_beat.reacquire()
for key_bytes in r_replica.scan_iter(
for key_bytes in r.scan_iter(
RedisDocumentSet.FENCE_PREFIX + "*", count=SCAN_ITER_COUNT_DEFAULT
):
if r.exists(key_bytes):
with get_session_with_tenant(tenant_id) as db_session:
monitor_document_set_taskset(tenant_id, key_bytes, r, db_session)
with get_session_with_tenant(tenant_id) as db_session:
monitor_document_set_taskset(tenant_id, key_bytes, r, db_session)
lock_beat.reacquire()
timings["documentset"] = time.monotonic() - phase_start
timings["documentset_ttl"] = r.ttl(OnyxRedisLocks.MONITOR_VESPA_SYNC_BEAT_LOCK)
phase_start = time.monotonic()
lock_beat.reacquire()
for key_bytes in r_replica.scan_iter(
for key_bytes in r.scan_iter(
RedisUserGroup.FENCE_PREFIX + "*", count=SCAN_ITER_COUNT_DEFAULT
):
if r.exists(key_bytes):
monitor_usergroup_taskset = (
fetch_versioned_implementation_with_fallback(
"onyx.background.celery.tasks.vespa.tasks",
"monitor_usergroup_taskset",
noop_fallback,
)
)
with get_session_with_tenant(tenant_id) as db_session:
monitor_usergroup_taskset(tenant_id, key_bytes, r, db_session)
monitor_usergroup_taskset = fetch_versioned_implementation_with_fallback(
"onyx.background.celery.tasks.vespa.tasks",
"monitor_usergroup_taskset",
noop_fallback,
)
with get_session_with_tenant(tenant_id) as db_session:
monitor_usergroup_taskset(tenant_id, key_bytes, r, db_session)
lock_beat.reacquire()
timings["usergroup"] = time.monotonic() - phase_start
timings["usergroup_ttl"] = r.ttl(OnyxRedisLocks.MONITOR_VESPA_SYNC_BEAT_LOCK)
phase_start = time.monotonic()
lock_beat.reacquire()
for key_bytes in r_replica.scan_iter(
for key_bytes in r.scan_iter(
RedisConnectorPrune.FENCE_PREFIX + "*", count=SCAN_ITER_COUNT_DEFAULT
):
if r.exists(key_bytes):
with get_session_with_tenant(tenant_id) as db_session:
monitor_ccpair_pruning_taskset(tenant_id, key_bytes, r, db_session)
with get_session_with_tenant(tenant_id) as db_session:
monitor_ccpair_pruning_taskset(tenant_id, key_bytes, r, db_session)
lock_beat.reacquire()
timings["pruning"] = time.monotonic() - phase_start
timings["pruning_ttl"] = r.ttl(OnyxRedisLocks.MONITOR_VESPA_SYNC_BEAT_LOCK)
phase_start = time.monotonic()
lock_beat.reacquire()
for key_bytes in r_replica.scan_iter(
for key_bytes in r.scan_iter(
RedisConnectorIndex.FENCE_PREFIX + "*", count=SCAN_ITER_COUNT_DEFAULT
):
if r.exists(key_bytes):
with get_session_with_tenant(tenant_id) as db_session:
monitor_ccpair_indexing_taskset(tenant_id, key_bytes, r, db_session)
with get_session_with_tenant(tenant_id) as db_session:
monitor_ccpair_indexing_taskset(tenant_id, key_bytes, r, db_session)
lock_beat.reacquire()
timings["indexing"] = time.monotonic() - phase_start
timings["indexing_ttl"] = r.ttl(OnyxRedisLocks.MONITOR_VESPA_SYNC_BEAT_LOCK)
phase_start = time.monotonic()
lock_beat.reacquire()
for key_bytes in r_replica.scan_iter(
for key_bytes in r.scan_iter(
RedisConnectorPermissionSync.FENCE_PREFIX + "*",
count=SCAN_ITER_COUNT_DEFAULT,
):
if r.exists(key_bytes):
with get_session_with_tenant(tenant_id) as db_session:
monitor_ccpair_permissions_taskset(
tenant_id, key_bytes, r, db_session
)
with get_session_with_tenant(tenant_id) as db_session:
monitor_ccpair_permissions_taskset(tenant_id, key_bytes, r, db_session)
lock_beat.reacquire()
timings["permissions"] = time.monotonic() - phase_start
timings["permissions_ttl"] = r.ttl(OnyxRedisLocks.MONITOR_VESPA_SYNC_BEAT_LOCK)
except SoftTimeLimitExceeded:
task_logger.info(
"Soft time limit exceeded, task is being terminated gracefully."

View File

@@ -15,7 +15,6 @@ from onyx.llm.models import PreviousMessage
from onyx.llm.utils import build_content_with_imgs
from onyx.llm.utils import check_message_tokens
from onyx.llm.utils import message_to_prompt_and_imgs
from onyx.llm.utils import model_supports_image_input
from onyx.natural_language_processing.utils import get_tokenizer
from onyx.prompts.chat_prompts import CHAT_USER_CONTEXT_FREE_PROMPT
from onyx.prompts.direct_qa_prompts import HISTORY_BLOCK
@@ -91,7 +90,6 @@ class AnswerPromptBuilder:
provider_type=llm_config.model_provider,
model_name=llm_config.model_name,
)
self.llm_config = llm_config
self.llm_tokenizer_encode_func = cast(
Callable[[str], list[int]], llm_tokenizer.encode
)
@@ -100,21 +98,12 @@ class AnswerPromptBuilder:
(
self.message_history,
self.history_token_cnts,
) = translate_history_to_basemessages(
message_history,
exclude_images=not model_supports_image_input(
self.llm_config.model_name,
self.llm_config.model_provider,
),
)
) = translate_history_to_basemessages(message_history)
self.system_message_and_token_cnt: tuple[SystemMessage, int] | None = None
self.user_message_and_token_cnt = (
user_message,
check_message_tokens(
user_message,
self.llm_tokenizer_encode_func,
),
check_message_tokens(user_message, self.llm_tokenizer_encode_func),
)
self.new_messages_and_token_cnts: list[tuple[BaseMessage, int]] = []

View File

@@ -11,7 +11,6 @@ from onyx.llm.utils import build_content_with_imgs
def translate_onyx_msg_to_langchain(
msg: ChatMessage | PreviousMessage,
exclude_images: bool = False,
) -> BaseMessage:
files: list[InMemoryChatFile] = []
@@ -19,9 +18,7 @@ def translate_onyx_msg_to_langchain(
# attached. Just ignore them for now.
if not isinstance(msg, ChatMessage):
files = msg.files
content = build_content_with_imgs(
msg.message, files, message_type=msg.message_type, exclude_images=exclude_images
)
content = build_content_with_imgs(msg.message, files, message_type=msg.message_type)
if msg.message_type == MessageType.SYSTEM:
raise ValueError("System messages are not currently part of history")
@@ -35,12 +32,9 @@ def translate_onyx_msg_to_langchain(
def translate_history_to_basemessages(
history: list[ChatMessage] | list["PreviousMessage"],
exclude_images: bool = False,
) -> tuple[list[BaseMessage], list[int]]:
history_basemessages = [
translate_onyx_msg_to_langchain(msg, exclude_images)
for msg in history
if msg.token_count != 0
translate_onyx_msg_to_langchain(msg) for msg in history if msg.token_count != 0
]
history_token_counts = [msg.token_count for msg in history if msg.token_count != 0]
return history_basemessages, history_token_counts

View File

@@ -200,8 +200,6 @@ REDIS_HOST = os.environ.get("REDIS_HOST") or "localhost"
REDIS_PORT = int(os.environ.get("REDIS_PORT", 6379))
REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD") or ""
# this assumes that other redis settings remain the same as the primary
REDIS_REPLICA_HOST = os.environ.get("REDIS_REPLICA_HOST") or REDIS_HOST
REDIS_AUTH_KEY_PREFIX = "fastapi_users_token:"

View File

@@ -232,29 +232,20 @@ class ConfluenceConnector(LoadConnector, PollConnector, SlimConnector):
}
# Get labels
label_dicts = (
confluence_object.get("metadata", {}).get("labels", {}).get("results", [])
)
page_labels = [label.get("name") for label in label_dicts if label.get("name")]
label_dicts = confluence_object["metadata"]["labels"]["results"]
page_labels = [label["name"] for label in label_dicts]
if page_labels:
doc_metadata["labels"] = page_labels
# Get last modified and author email
version_dict = confluence_object.get("version", {})
last_modified = (
datetime_from_string(version_dict.get("when"))
if version_dict.get("when")
else None
)
author_email = version_dict.get("by", {}).get("email")
title = confluence_object.get("title", "Untitled Document")
last_modified = datetime_from_string(confluence_object["version"]["when"])
author_email = confluence_object["version"].get("by", {}).get("email")
return Document(
id=object_url,
sections=[Section(link=object_url, text=object_text)],
source=DocumentSource.CONFLUENCE,
semantic_identifier=title,
semantic_identifier=confluence_object["title"],
doc_updated_at=last_modified,
primary_owners=(
[BasicExpertInfo(email=author_email)] if author_email else None

View File

@@ -6,7 +6,6 @@ from datetime import datetime
from datetime import timezone
from typing import Any
from typing import Optional
from urllib.parse import unquote
import msal # type: ignore
from office365.graph_client import GraphClient # type: ignore
@@ -83,13 +82,8 @@ class SharepointConnector(LoadConnector, PollConnector):
sites_index = parts.index("sites")
site_url = "/".join(parts[: sites_index + 2])
folder = (
"/".join(unquote(part) for part in parts[sites_index + 2 :])
if len(parts) > sites_index + 2
else None
parts[sites_index + 2] if len(parts) > sites_index + 2 else None
)
# Handling for new URL structure
if folder and folder.startswith("Shared Documents/"):
folder = folder[len("Shared Documents/") :]
site_data_list.append(
SiteData(url=site_url, folder=folder, sites=[], driveitems=[])
)
@@ -117,19 +111,11 @@ class SharepointConnector(LoadConnector, PollConnector):
query = query.filter(filter_str)
driveitems = query.execute_query()
if element.folder:
expected_path = f"/root:/{element.folder}"
filtered_driveitems = [
item
for item in driveitems
if item.parent_reference.path.endswith(expected_path)
if element.folder in item.parent_reference.path
]
if len(filtered_driveitems) == 0:
all_paths = [
item.parent_reference.path for item in driveitems
]
logger.warning(
f"Nothing found for folder '{expected_path}' in any of valid paths: {all_paths}"
)
element.driveitems.extend(filtered_driveitems)
else:
element.driveitems.extend(driveitems)

View File

@@ -193,13 +193,13 @@ def fetch_input_prompts_by_user(
"""
Returns all prompts belonging to the user or public prompts,
excluding those the user has specifically disabled.
Also, if `user_id` is None and AUTH_TYPE is DISABLED, then all prompts are returned.
"""
# Start with a basic query for InputPrompt
query = select(InputPrompt)
# If we have a user, left join to InputPrompt__User so we can check "disabled"
if user_id is not None:
# If we have a user, left join to InputPrompt__User to check "disabled"
IPU = aliased(InputPrompt__User)
query = query.join(
IPU,
@@ -208,30 +208,25 @@ def fetch_input_prompts_by_user(
)
# Exclude disabled prompts
# i.e. keep only those where (IPU.disabled is NULL or False)
query = query.where(or_(IPU.disabled.is_(None), IPU.disabled.is_(False)))
if include_public:
# Return both user-owned and public prompts
# user-owned or public
query = query.where(
or_(
InputPrompt.user_id == user_id,
InputPrompt.is_public,
)
(InputPrompt.user_id == user_id) | (InputPrompt.is_public)
)
else:
# Return only user-owned prompts
# only user-owned prompts
query = query.where(InputPrompt.user_id == user_id)
else:
# user_id is None
if AUTH_TYPE == AuthType.DISABLED:
# If auth is disabled, return all prompts
query = query.where(True) # type: ignore
elif include_public:
# Anonymous usage
query = query.where(InputPrompt.is_public)
# If no user is logged in, get all prompts (public and private)
if user_id is None and AUTH_TYPE == AuthType.DISABLED:
query = query.where(True) # type: ignore
# Default to returning all prompts
# If no user is logged in but we want to include public prompts
elif include_public:
query = query.where(InputPrompt.is_public)
if active is not None:
query = query.where(InputPrompt.active == active)

View File

@@ -8,64 +8,20 @@ from sqlalchemy.orm import Session
from onyx.db.enums import SyncStatus
from onyx.db.enums import SyncType
from onyx.db.models import SyncRecord
from onyx.setup import setup_logger
logger = setup_logger()
def insert_sync_record(
db_session: Session,
entity_id: int,
entity_id: int | None,
sync_type: SyncType,
) -> SyncRecord:
"""Insert a new sync record into the database, cancelling any existing in-progress records.
"""Insert a new sync record into the database.
Args:
db_session: The database session to use
entity_id: The ID of the entity being synced (document set ID, user group ID, etc.)
sync_type: The type of sync operation
"""
# If an existing in-progress sync record exists, mark as cancelled
existing_in_progress_sync_record = fetch_latest_sync_record(
db_session, entity_id, sync_type, sync_status=SyncStatus.IN_PROGRESS
)
if existing_in_progress_sync_record is not None:
logger.info(
f"Cancelling existing in-progress sync record {existing_in_progress_sync_record.id} "
f"for entity_id={entity_id} sync_type={sync_type}"
)
mark_sync_records_as_cancelled(db_session, entity_id, sync_type)
return _create_sync_record(db_session, entity_id, sync_type)
def mark_sync_records_as_cancelled(
db_session: Session,
entity_id: int | None,
sync_type: SyncType,
) -> None:
stmt = (
update(SyncRecord)
.where(
and_(
SyncRecord.entity_id == entity_id,
SyncRecord.sync_type == sync_type,
SyncRecord.sync_status == SyncStatus.IN_PROGRESS,
)
)
.values(sync_status=SyncStatus.CANCELED)
)
db_session.execute(stmt)
db_session.commit()
def _create_sync_record(
db_session: Session,
entity_id: int | None,
sync_type: SyncType,
) -> SyncRecord:
"""Create and insert a new sync record into the database."""
sync_record = SyncRecord(
entity_id=entity_id,
sync_type=sync_type,
@@ -83,7 +39,6 @@ def fetch_latest_sync_record(
db_session: Session,
entity_id: int,
sync_type: SyncType,
sync_status: SyncStatus | None = None,
) -> SyncRecord | None:
"""Fetch the most recent sync record for a given entity ID and status.
@@ -104,9 +59,6 @@ def fetch_latest_sync_record(
.limit(1)
)
if sync_status is not None:
stmt = stmt.where(SyncRecord.sync_status == sync_status)
result = db_session.execute(stmt)
return result.scalar_one_or_none()

View File

@@ -142,7 +142,6 @@ def build_content_with_imgs(
img_urls: list[str] | None = None,
b64_imgs: list[str] | None = None,
message_type: MessageType = MessageType.USER,
exclude_images: bool = False,
) -> str | list[str | dict[str, Any]]: # matching Langchain's BaseMessage content type
files = files or []
@@ -158,7 +157,7 @@ def build_content_with_imgs(
message_main_content = _build_content(message, files)
if exclude_images or (not img_files and not img_urls):
if not img_files and not img_urls:
return message_main_content
return cast(
@@ -383,19 +382,9 @@ def _strip_colon_from_model_name(model_name: str) -> str:
return ":".join(model_name.split(":")[:-1]) if ":" in model_name else model_name
def _find_model_obj(model_map: dict, provider: str, model_name: str) -> dict | None:
stripped_model_name = _strip_extra_provider_from_model_name(model_name)
model_names = [
model_name,
_strip_extra_provider_from_model_name(model_name),
# Remove leading extra provider. Usually for cases where user has a
# customer model proxy which appends another prefix
# remove :XXXX from the end, if present. Needed for ollama.
_strip_colon_from_model_name(model_name),
_strip_colon_from_model_name(stripped_model_name),
]
def _find_model_obj(
model_map: dict, provider: str, model_names: list[str | None]
) -> dict | None:
# Filter out None values and deduplicate model names
filtered_model_names = [name for name in model_names if name]
@@ -428,10 +417,21 @@ def get_llm_max_tokens(
return GEN_AI_MAX_TOKENS
try:
extra_provider_stripped_model_name = _strip_extra_provider_from_model_name(
model_name
)
model_obj = _find_model_obj(
model_map,
model_provider,
model_name,
[
model_name,
# Remove leading extra provider. Usually for cases where user has a
# customer model proxy which appends another prefix
extra_provider_stripped_model_name,
# remove :XXXX from the end, if present. Needed for ollama.
_strip_colon_from_model_name(model_name),
_strip_colon_from_model_name(extra_provider_stripped_model_name),
],
)
if not model_obj:
raise RuntimeError(
@@ -523,23 +523,3 @@ def get_max_input_tokens(
raise RuntimeError("No tokens for input for the LLM given settings")
return input_toks
def model_supports_image_input(model_name: str, model_provider: str) -> bool:
model_map = get_model_map()
try:
model_obj = _find_model_obj(
model_map,
model_provider,
model_name,
)
if not model_obj:
raise RuntimeError(
f"No litellm entry found for {model_provider}/{model_name}"
)
return model_obj.get("supports_vision", False)
except Exception:
logger.exception(
f"Failed to get model object for {model_provider}/{model_name}"
)
return False

View File

@@ -21,7 +21,6 @@ from onyx.configs.app_configs import REDIS_HOST
from onyx.configs.app_configs import REDIS_PASSWORD
from onyx.configs.app_configs import REDIS_POOL_MAX_CONNECTIONS
from onyx.configs.app_configs import REDIS_PORT
from onyx.configs.app_configs import REDIS_REPLICA_HOST
from onyx.configs.app_configs import REDIS_SSL
from onyx.configs.app_configs import REDIS_SSL_CA_CERTS
from onyx.configs.app_configs import REDIS_SSL_CERT_REQS
@@ -133,32 +132,23 @@ class RedisPool:
_instance: Optional["RedisPool"] = None
_lock: threading.Lock = threading.Lock()
_pool: redis.BlockingConnectionPool
_replica_pool: redis.BlockingConnectionPool
def __new__(cls) -> "RedisPool":
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = super(RedisPool, cls).__new__(cls)
cls._instance._init_pools()
cls._instance._init_pool()
return cls._instance
def _init_pools(self) -> None:
def _init_pool(self) -> None:
self._pool = RedisPool.create_pool(ssl=REDIS_SSL)
self._replica_pool = RedisPool.create_pool(
host=REDIS_REPLICA_HOST, ssl=REDIS_SSL
)
def get_client(self, tenant_id: str | None) -> Redis:
if tenant_id is None:
tenant_id = "public"
return TenantRedis(tenant_id, connection_pool=self._pool)
def get_replica_client(self, tenant_id: str | None) -> Redis:
if tenant_id is None:
tenant_id = "public"
return TenantRedis(tenant_id, connection_pool=self._replica_pool)
@staticmethod
def create_pool(
host: str = REDIS_HOST,
@@ -222,10 +212,6 @@ def get_redis_client(*, tenant_id: str | None) -> Redis:
return redis_pool.get_client(tenant_id)
def get_redis_replica_client(*, tenant_id: str | None) -> Redis:
return redis_pool.get_replica_client(tenant_id)
SSL_CERT_REQS_MAP = {
"none": ssl.CERT_NONE,
"optional": ssl.CERT_OPTIONAL,

View File

@@ -6184,7 +6184,7 @@
"chunk_ind": 0
},
{
"url": "https://docs.onyx.app/more/use_cases/support",
"url": "https://docs.onyx.app/more/use_cases/customer_support",
"title": "Customer Support",
"content": "Help your customer support team instantly answer any question across your entire product.\n\nAI Enabled Support\nCustomer support agents have one of the highest breadth jobs. They field requests that cover the entire surface area of the product and need to help your users find success on extremely short timelines. Because they're not the same people who designed or built the system, they often lack the depth of understanding needed - resulting in delays and escalations to other teams. Modern teams are leveraging AI to help their CS team optimize the speed and quality of these critical customer-facing interactions.\n\nThe Importance of Context\nThere are two critical components of AI copilots for customer support. The first is that the AI system needs to be connected with as much information as possible (not just support tools like Zendesk or Intercom) and that the knowledge needs to be as fresh as possible. Sometimes a fix might even be in places rarely checked by CS such as pull requests in a code repository. The second critical component is the ability of the AI system to break down difficult concepts and convoluted processes into more digestible descriptions and for your team members to be able to chat back and forth with the system to build a better understanding.\n\nOnyx takes care of both of these. The system connects up to over 30+ different applications and the knowledge is pulled in constantly so that the information access is always up to date.",
"title_embedding": [

View File

@@ -24,7 +24,7 @@
"chunk_ind": 0
},
{
"url": "https://docs.onyx.app/more/use_cases/support",
"url": "https://docs.onyx.app/more/use_cases/customer_support",
"title": "Customer Support",
"content": "Help your customer support team instantly answer any question across your entire product.\n\nAI Enabled Support\nCustomer support agents have one of the highest breadth jobs. They field requests that cover the entire surface area of the product and need to help your users find success on extremely short timelines. Because they're not the same people who designed or built the system, they often lack the depth of understanding needed - resulting in delays and escalations to other teams. Modern teams are leveraging AI to help their CS team optimize the speed and quality of these critical customer-facing interactions.\n\nThe Importance of Context\nThere are two critical components of AI copilots for customer support. The first is that the AI system needs to be connected with as much information as possible (not just support tools like Zendesk or Intercom) and that the knowledge needs to be as fresh as possible. Sometimes a fix might even be in places rarely checked by CS such as pull requests in a code repository. The second critical component is the ability of the AI system to break down difficult concepts and convoluted processes into more digestible descriptions and for your team members to be able to chat back and forth with the system to build a better understanding.\n\nOnyx takes care of both of these. The system connects up to over 30+ different applications and the knowledge is pulled in constantly so that the information access is always up to date.",
"chunk_ind": 0

View File

@@ -16,7 +16,6 @@ from onyx.llm.interfaces import LLM
from onyx.llm.models import PreviousMessage
from onyx.llm.utils import build_content_with_imgs
from onyx.llm.utils import message_to_string
from onyx.llm.utils import model_supports_image_input
from onyx.prompts.constants import GENERAL_SEP_PAT
from onyx.tools.message import ToolCallSummary
from onyx.tools.models import ToolResponse
@@ -317,22 +316,12 @@ class ImageGenerationTool(Tool):
for img in img_generation_response
if img.image_data is not None
]
user_prompt = build_image_generation_user_prompt(
query=prompt_builder.get_user_message_content(),
supports_image_input=model_supports_image_input(
prompt_builder.llm_config.model_name,
prompt_builder.llm_config.model_provider,
),
prompts=[
prompt
for response in img_generation_response
for prompt in response.revised_prompt
],
img_urls=img_urls,
b64_imgs=b64_imgs,
prompt_builder.update_user_prompt(
build_image_generation_user_prompt(
query=prompt_builder.get_user_message_content(),
img_urls=img_urls,
b64_imgs=b64_imgs,
)
)
prompt_builder.update_user_prompt(user_prompt)
return prompt_builder

View File

@@ -9,34 +9,16 @@ You have just created the attached images in response to the following query: "{
Can you please summarize them in a sentence or two? Do NOT include image urls or bulleted lists.
"""
IMG_GENERATION_SUMMARY_PROMPT_NO_IMAGES = """
You have generated images based on the following query: "{query}".
The prompts used to create these images were: {prompts}
Describe the two images you generated, summarizing the key elements and content in a sentence or two.
Be specific about what was generated and respond as if you have seen them,
without including any disclaimers or speculations.
"""
def build_image_generation_user_prompt(
query: str,
supports_image_input: bool,
img_urls: list[str] | None = None,
b64_imgs: list[str] | None = None,
prompts: list[str] | None = None,
) -> HumanMessage:
if supports_image_input:
return HumanMessage(
content=build_content_with_imgs(
message=IMG_GENERATION_SUMMARY_PROMPT.format(query=query).strip(),
b64_imgs=b64_imgs,
img_urls=img_urls,
)
)
else:
return HumanMessage(
content=IMG_GENERATION_SUMMARY_PROMPT_NO_IMAGES.format(
query=query, prompts=prompts
).strip()
return HumanMessage(
content=build_content_with_imgs(
message=IMG_GENERATION_SUMMARY_PROMPT.format(query=query).strip(),
b64_imgs=b64_imgs,
img_urls=img_urls,
)
)

View File

@@ -123,7 +123,6 @@ def optional_telemetry(
headers={"Content-Type": "application/json"},
json=payload,
)
except Exception:
# This way it silences all thread level logging as well
pass

View File

@@ -197,7 +197,7 @@ ai_platform_doc = SeedPresaveDocument(
)
customer_support_doc = SeedPresaveDocument(
url="https://docs.onyx.app/more/use_cases/support",
url="https://docs.onyx.app/more/use_cases/customer_support",
title=customer_support_title,
content=customer_support,
title_embedding=model.encode(f"search_document: {customer_support_title}"),

View File

@@ -21,144 +21,35 @@ Options:
--doc-id : Document ID
--fields : Fields to update (JSON)
Example:
Example: (gets docs for a given tenant id and connector id)
python vespa_debug_tool.py --action list_docs --tenant-id my_tenant --connector-id 1 --n 5
"""
import argparse
import json
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from uuid import UUID
from pydantic import BaseModel
from sqlalchemy import and_
from onyx.configs.constants import INDEX_SEPARATOR
from onyx.context.search.models import IndexFilters
from onyx.context.search.models import SearchRequest
from onyx.db.connector_credential_pair import get_connector_credential_pair_from_id
from onyx.db.engine import get_session_with_tenant
from onyx.db.models import ConnectorCredentialPair
from onyx.db.models import Document
from onyx.db.models import DocumentByConnectorCredentialPair
from onyx.db.search_settings import get_current_search_settings
from onyx.document_index.document_index_utils import get_document_chunk_ids
from onyx.document_index.interfaces import EnrichedDocumentIndexingInfo
from onyx.document_index.vespa.index import VespaIndex
from onyx.document_index.vespa.shared_utils.utils import get_vespa_http_client
from onyx.document_index.vespa_constants import ACCESS_CONTROL_LIST
from onyx.document_index.vespa_constants import DOC_UPDATED_AT
from onyx.document_index.vespa_constants import DOCUMENT_ID_ENDPOINT
from onyx.document_index.vespa_constants import DOCUMENT_SETS
from onyx.document_index.vespa_constants import HIDDEN
from onyx.document_index.vespa_constants import METADATA_LIST
from onyx.document_index.vespa_constants import SEARCH_ENDPOINT
from onyx.document_index.vespa_constants import SOURCE_TYPE
from onyx.document_index.vespa_constants import TENANT_ID
from onyx.document_index.vespa_constants import VESPA_APP_CONTAINER_URL
from onyx.document_index.vespa_constants import VESPA_APPLICATION_ENDPOINT
from onyx.utils.logger import setup_logger
from shared_configs.configs import MULTI_TENANT
from shared_configs.configs import POSTGRES_DEFAULT_SCHEMA
logger = setup_logger()
class DocumentFilter(BaseModel):
# Document filter for link matching.
link: str | None = None
def build_vespa_filters(
filters: IndexFilters,
*,
include_hidden: bool = False,
remove_trailing_and: bool = False,
) -> str:
# Build a combined Vespa filter string from the given IndexFilters.
def _build_or_filters(key: str, vals: list[str] | None) -> str:
if vals is None:
return ""
valid_vals = [val for val in vals if val]
if not key or not valid_vals:
return ""
eq_elems = [f'{key} contains "{elem}"' for elem in valid_vals]
or_clause = " or ".join(eq_elems)
return f"({or_clause})"
def _build_time_filter(
cutoff: datetime | None,
untimed_doc_cutoff: timedelta = timedelta(days=92),
) -> str:
if not cutoff:
return ""
include_untimed = datetime.now(timezone.utc) - untimed_doc_cutoff > cutoff
cutoff_secs = int(cutoff.timestamp())
if include_untimed:
return f"!({DOC_UPDATED_AT} < {cutoff_secs})"
return f"({DOC_UPDATED_AT} >= {cutoff_secs})"
filter_str = ""
if not include_hidden:
filter_str += f"AND !({HIDDEN}=true) "
if filters.tenant_id and MULTI_TENANT:
filter_str += f'AND ({TENANT_ID} contains "{filters.tenant_id}") '
if filters.access_control_list is not None:
acl_str = _build_or_filters(ACCESS_CONTROL_LIST, filters.access_control_list)
if acl_str:
filter_str += f"AND {acl_str} "
source_strs = (
[s.value for s in filters.source_type] if filters.source_type else None
)
source_str = _build_or_filters(SOURCE_TYPE, source_strs)
if source_str:
filter_str += f"AND {source_str} "
tags = filters.tags
if tags:
tag_attributes = [tag.tag_key + INDEX_SEPARATOR + tag.tag_value for tag in tags]
else:
tag_attributes = None
tag_str = _build_or_filters(METADATA_LIST, tag_attributes)
if tag_str:
filter_str += f"AND {tag_str} "
doc_set_str = _build_or_filters(DOCUMENT_SETS, filters.document_set)
if doc_set_str:
filter_str += f"AND {doc_set_str} "
time_filter = _build_time_filter(filters.time_cutoff)
if time_filter:
filter_str += f"AND {time_filter} "
if remove_trailing_and:
while filter_str.endswith(" and "):
filter_str = filter_str[:-5]
while filter_str.endswith("AND "):
filter_str = filter_str[:-4]
return filter_str.strip()
# Print Vespa configuration URLs
def print_vespa_config() -> None:
# Print Vespa configuration.
logger.info("Printing Vespa configuration.")
print(f"Vespa Application Endpoint: {VESPA_APPLICATION_ENDPOINT}")
print(f"Vespa App Container URL: {VESPA_APP_CONTAINER_URL}")
print(f"Vespa Search Endpoint: {SEARCH_ENDPOINT}")
print(f"Vespa Document ID Endpoint: {DOCUMENT_ID_ENDPOINT}")
# Check connectivity to Vespa endpoints
def check_vespa_connectivity() -> None:
# Check connectivity to Vespa endpoints.
logger.info("Checking Vespa connectivity.")
endpoints = [
f"{VESPA_APPLICATION_ENDPOINT}/ApplicationStatus",
f"{VESPA_APPLICATION_ENDPOINT}/tenant",
@@ -170,21 +61,17 @@ def check_vespa_connectivity() -> None:
try:
with get_vespa_http_client() as client:
response = client.get(endpoint)
logger.info(
f"Connected to Vespa at {endpoint}, status code {response.status_code}"
)
print(f"Successfully connected to Vespa at {endpoint}")
print(f"Status code: {response.status_code}")
print(f"Response: {response.text[:200]}...")
except Exception as e:
logger.error(f"Failed to connect to Vespa at {endpoint}: {str(e)}")
print(f"Failed to connect to Vespa at {endpoint}: {str(e)}")
print("Vespa connectivity check completed.")
# Get info about the default Vespa application
def get_vespa_info() -> Dict[str, Any]:
# Get info about the default Vespa application.
url = f"{VESPA_APPLICATION_ENDPOINT}/tenant/default/application/default"
with get_vespa_http_client() as client:
response = client.get(url)
@@ -192,298 +79,121 @@ def get_vespa_info() -> Dict[str, Any]:
return response.json()
def get_index_name(tenant_id: str) -> str:
# Return the index name for a given tenant.
# Get index name for a tenant and connector pair
def get_index_name(tenant_id: str, connector_id: int) -> str:
with get_session_with_tenant(tenant_id=tenant_id) as db_session:
cc_pair = get_connector_credential_pair_from_id(db_session, connector_id)
if not cc_pair:
raise ValueError(f"No connector found for id {connector_id}")
search_settings = get_current_search_settings(db_session)
if not search_settings:
raise ValueError(f"No search settings found for tenant {tenant_id}")
return search_settings.index_name
return search_settings.index_name if search_settings else "public"
def query_vespa(
yql: str, tenant_id: Optional[str] = None, limit: int = 10
) -> List[Dict[str, Any]]:
# Perform a Vespa query using YQL syntax.
filters = IndexFilters(tenant_id=tenant_id, access_control_list=[])
filter_string = build_vespa_filters(filters, remove_trailing_and=True)
full_yql = yql.strip()
if filter_string:
full_yql = f"{full_yql} {filter_string}"
full_yql = f"{full_yql} limit {limit}"
params = {"yql": full_yql, "timeout": "10s"}
search_request = SearchRequest(query="", limit=limit, offset=0)
params.update(search_request.model_dump())
logger.info(f"Executing Vespa query: {full_yql}")
# Perform a Vespa query using YQL syntax
def query_vespa(yql: str) -> List[Dict[str, Any]]:
params = {
"yql": yql,
"timeout": "10s",
}
with get_vespa_http_client() as client:
response = client.get(SEARCH_ENDPOINT, params=params)
response.raise_for_status()
result = response.json()
documents = result.get("root", {}).get("children", [])
logger.info(f"Found {len(documents)} documents from query.")
return documents
return response.json()["root"]["children"]
# Get first N documents
def get_first_n_documents(n: int = 10) -> List[Dict[str, Any]]:
# Get the first n documents from any source.
yql = "select * from sources * where true"
return query_vespa(yql, limit=n)
yql = f"select * from sources * where true limit {n};"
return query_vespa(yql)
# Pretty-print a list of documents
def print_documents(documents: List[Dict[str, Any]]) -> None:
# Pretty-print a list of documents.
for doc in documents:
print(json.dumps(doc, indent=2))
print("-" * 80)
# Get and print documents for a specific tenant and connector
def get_documents_for_tenant_connector(
tenant_id: str, connector_id: int, n: int = 10
) -> None:
# Get and print documents for a specific tenant and connector.
index_name = get_index_name(tenant_id)
logger.info(
f"Fetching documents for tenant={tenant_id}, connector_id={connector_id}"
)
yql = f"select * from sources {index_name} where true"
documents = query_vespa(yql, tenant_id, limit=n)
print(
f"First {len(documents)} documents for tenant {tenant_id}, connector {connector_id}:"
)
get_index_name(tenant_id, connector_id)
documents = get_first_n_documents(n)
print(f"First {n} documents for tenant {tenant_id}, connector {connector_id}:")
print_documents(documents)
# Search documents for a specific tenant and connector
def search_documents(
tenant_id: str, connector_id: int, query: str, n: int = 10
) -> None:
# Search documents for a specific tenant and connector.
index_name = get_index_name(tenant_id)
logger.info(
f"Searching documents for tenant={tenant_id}, connector_id={connector_id}, query='{query}'"
)
yql = f"select * from sources {index_name} where userInput(@query)"
documents = query_vespa(yql, tenant_id, limit=n)
print(f"Search results for query '{query}' in tenant {tenant_id}:")
index_name = get_index_name(tenant_id, connector_id)
yql = f"select * from sources {index_name} where userInput(@query) limit {n};"
documents = query_vespa(yql)
print(f"Search results for query '{query}':")
print_documents(documents)
# Update a specific document
def update_document(
tenant_id: str, connector_id: int, doc_id: str, fields: Dict[str, Any]
) -> None:
# Update a specific document.
index_name = get_index_name(tenant_id)
logger.info(
f"Updating document doc_id={doc_id} in tenant={tenant_id}, connector_id={connector_id}"
)
index_name = get_index_name(tenant_id, connector_id)
url = DOCUMENT_ID_ENDPOINT.format(index_name=index_name) + f"/{doc_id}"
update_request = {"fields": {k: {"assign": v} for k, v in fields.items()}}
with get_vespa_http_client() as client:
response = client.put(url, json=update_request)
response.raise_for_status()
logger.info(f"Document {doc_id} updated successfully.")
print(f"Document {doc_id} updated successfully")
# Delete a specific document
def delete_document(tenant_id: str, connector_id: int, doc_id: str) -> None:
# Delete a specific document.
index_name = get_index_name(tenant_id)
logger.info(
f"Deleting document doc_id={doc_id} in tenant={tenant_id}, connector_id={connector_id}"
)
index_name = get_index_name(tenant_id, connector_id)
url = DOCUMENT_ID_ENDPOINT.format(index_name=index_name) + f"/{doc_id}"
with get_vespa_http_client() as client:
response = client.delete(url)
response.raise_for_status()
logger.info(f"Document {doc_id} deleted successfully.")
print(f"Document {doc_id} deleted successfully")
def list_documents(n: int = 10, tenant_id: Optional[str] = None) -> None:
# List documents from any source, filtered by tenant if provided.
logger.info(f"Listing up to {n} documents for tenant={tenant_id or 'ALL'}")
yql = "select * from sources * where true"
if tenant_id:
yql += f" and tenant_id contains '{tenant_id}'"
documents = query_vespa(yql, tenant_id=tenant_id, limit=n)
print(f"Total documents found: {len(documents)}")
logger.info(f"Total documents found: {len(documents)}")
print(f"First {min(n, len(documents))} documents:")
for doc in documents[:n]:
print(json.dumps(doc, indent=2))
# List documents from any source
def list_documents(n: int = 10) -> None:
yql = f"select * from sources * where true limit {n};"
url = f"{VESPA_APP_CONTAINER_URL}/search/"
params = {
"yql": yql,
"timeout": "10s",
}
try:
with get_vespa_http_client() as client:
response = client.get(url, params=params)
response.raise_for_status()
documents = response.json()["root"]["children"]
print(f"First {n} documents:")
print_documents(documents)
except Exception as e:
print(f"Failed to list documents: {str(e)}")
# Get and print ACLs for documents of a specific tenant and connector
def get_document_acls(tenant_id: str, connector_id: int, n: int = 10) -> None:
index_name = get_index_name(tenant_id, connector_id)
yql = f"select documentid, access_control_list from sources {index_name} where true limit {n};"
documents = query_vespa(yql)
print(f"ACLs for {n} documents from tenant {tenant_id}, connector {connector_id}:")
for doc in documents:
print(f"Document ID: {doc['fields']['documentid']}")
print(
f"ACL: {json.dumps(doc['fields'].get('access_control_list', {}), indent=2)}"
)
print("-" * 80)
def get_document_and_chunk_counts(
tenant_id: str, cc_pair_id: int, filter_doc: DocumentFilter | None = None
) -> Dict[str, int]:
# Return a dict mapping each document ID to its chunk count for a given connector.
with get_session_with_tenant(tenant_id=tenant_id) as session:
doc_ids_data = (
session.query(DocumentByConnectorCredentialPair.id, Document.link)
.join(
ConnectorCredentialPair,
and_(
DocumentByConnectorCredentialPair.connector_id
== ConnectorCredentialPair.connector_id,
DocumentByConnectorCredentialPair.credential_id
== ConnectorCredentialPair.credential_id,
),
)
.join(Document, DocumentByConnectorCredentialPair.id == Document.id)
.filter(ConnectorCredentialPair.id == cc_pair_id)
.distinct()
.all()
)
doc_ids = []
for doc_id, link in doc_ids_data:
if filter_doc and filter_doc.link:
if link and filter_doc.link.lower() in link.lower():
doc_ids.append(doc_id)
else:
doc_ids.append(doc_id)
chunk_counts_data = (
session.query(Document.id, Document.chunk_count)
.filter(Document.id.in_(doc_ids))
.all()
)
return {
doc_id: chunk_count
for doc_id, chunk_count in chunk_counts_data
if chunk_count is not None
}
def get_chunk_ids_for_connector(
tenant_id: str,
cc_pair_id: int,
index_name: str,
filter_doc: DocumentFilter | None = None,
) -> List[UUID]:
# Return chunk IDs for a given connector.
doc_id_to_new_chunk_cnt = get_document_and_chunk_counts(
tenant_id, cc_pair_id, filter_doc
)
doc_infos: List[EnrichedDocumentIndexingInfo] = [
VespaIndex.enrich_basic_chunk_info(
index_name=index_name,
http_client=get_vespa_http_client(),
document_id=doc_id,
previous_chunk_count=doc_id_to_new_chunk_cnt.get(doc_id, 0),
new_chunk_count=0,
)
for doc_id in doc_id_to_new_chunk_cnt.keys()
]
chunk_ids = get_document_chunk_ids(
enriched_document_info_list=doc_infos,
tenant_id=tenant_id,
large_chunks_enabled=False,
)
if not isinstance(chunk_ids, list):
raise ValueError(f"Expected list of chunk IDs, got {type(chunk_ids)}")
return chunk_ids
def get_document_acls(
tenant_id: str,
cc_pair_id: int,
n: int | None = 10,
filter_doc: DocumentFilter | None = None,
) -> None:
# Fetch document ACLs for the given tenant and connector pair.
index_name = get_index_name(tenant_id)
logger.info(
f"Fetching document ACLs for tenant={tenant_id}, cc_pair_id={cc_pair_id}"
)
chunk_ids: List[UUID] = get_chunk_ids_for_connector(
tenant_id, cc_pair_id, index_name, filter_doc
)
vespa_client = get_vespa_http_client()
target_ids = chunk_ids if n is None else chunk_ids[:n]
logger.info(
f"Found {len(chunk_ids)} chunk IDs, showing ACLs for {len(target_ids)}."
)
for doc_chunk_id in target_ids:
document_url = (
f"{DOCUMENT_ID_ENDPOINT.format(index_name=index_name)}/{str(doc_chunk_id)}"
)
response = vespa_client.get(document_url)
if response.status_code == 200:
fields = response.json().get("fields", {})
document_id = fields.get("document_id") or fields.get(
"documentid", "Unknown"
)
acls = fields.get("access_control_list", {})
title = fields.get("title", "")
source_type = fields.get("source_type", "")
source_links_raw = fields.get("source_links", "{}")
try:
source_links = json.loads(source_links_raw)
except json.JSONDecodeError:
source_links = {}
print(f"Document Chunk ID: {doc_chunk_id}")
print(f"Document ID: {document_id}")
print(f"ACLs:\n{json.dumps(acls, indent=2)}")
print(f"Source Links: {source_links}")
print(f"Title: {title}")
print(f"Source Type: {source_type}")
if MULTI_TENANT:
print(f"Tenant ID: {fields.get('tenant_id', 'N/A')}")
print("-" * 80)
else:
logger.error(f"Failed to fetch document for chunk ID: {doc_chunk_id}")
print(f"Failed to fetch document for chunk ID: {doc_chunk_id}")
print(f"Status Code: {response.status_code}")
print("-" * 80)
class VespaDebugging:
# Class for managing Vespa debugging actions.
def __init__(self, tenant_id: str | None = None):
self.tenant_id = POSTGRES_DEFAULT_SCHEMA if not tenant_id else tenant_id
def print_config(self) -> None:
# Print Vespa config.
print_vespa_config()
def check_connectivity(self) -> None:
# Check Vespa connectivity.
check_vespa_connectivity()
def list_documents(self, n: int = 10) -> None:
# List documents for a tenant.
list_documents(n, self.tenant_id)
def search_documents(self, connector_id: int, query: str, n: int = 10) -> None:
# Search documents for a tenant and connector.
search_documents(self.tenant_id, connector_id, query, n)
def update_document(
self, connector_id: int, doc_id: str, fields: Dict[str, Any]
) -> None:
# Update a document.
update_document(self.tenant_id, connector_id, doc_id, fields)
def delete_document(self, connector_id: int, doc_id: str) -> None:
# Delete a document.
delete_document(self.tenant_id, connector_id, doc_id)
def acls_by_link(self, cc_pair_id: int, link: str) -> None:
# Get ACLs for a document matching a link.
get_document_acls(
self.tenant_id, cc_pair_id, n=None, filter_doc=DocumentFilter(link=link)
)
def acls(self, cc_pair_id: int, n: int | None = 10) -> None:
# Get ACLs for a connector.
get_document_acls(self.tenant_id, cc_pair_id, n)
def main() -> None:
# Main CLI entry point.
parser = argparse.ArgumentParser(description="Vespa debugging tool")
parser.add_argument(
"--action",
@@ -499,45 +209,60 @@ def main() -> None:
required=True,
help="Action to perform",
)
parser.add_argument("--tenant-id", help="Tenant ID")
parser.add_argument("--connector-id", type=int, help="Connector ID")
parser.add_argument(
"--n", type=int, default=10, help="Number of documents to retrieve"
"--tenant-id", help="Tenant ID (for update, delete, and get_acls actions)"
)
parser.add_argument(
"--connector-id",
type=int,
help="Connector ID (for update, delete, and get_acls actions)",
)
parser.add_argument(
"--n",
type=int,
default=10,
help="Number of documents to retrieve (for list_docs, search, update, and get_acls actions)",
)
parser.add_argument("--query", help="Search query (for search action)")
parser.add_argument("--doc-id", help="Document ID (for update and delete actions)")
parser.add_argument(
"--fields", help="Fields to update, in JSON format (for update)"
"--fields", help="Fields to update, in JSON format (for update action)"
)
args = parser.parse_args()
vespa_debug = VespaDebugging(args.tenant_id)
if args.action == "config":
vespa_debug.print_config()
print_vespa_config()
elif args.action == "connect":
vespa_debug.check_connectivity()
check_vespa_connectivity()
elif args.action == "list_docs":
vespa_debug.list_documents(args.n)
elif args.action == "search":
if not args.query or args.connector_id is None:
parser.error("--query and --connector-id are required for search action")
vespa_debug.search_documents(args.connector_id, args.query, args.n)
elif args.action == "update":
if not args.doc_id or not args.fields or args.connector_id is None:
parser.error(
"--doc-id, --fields, and --connector-id are required for update action"
# If tenant_id and connector_id are provided, list docs for that tenant/connector.
# Otherwise, list documents from any source.
if args.tenant_id and args.connector_id:
get_documents_for_tenant_connector(
args.tenant_id, args.connector_id, args.n
)
else:
list_documents(args.n)
elif args.action == "search":
if not args.query:
parser.error("--query is required for search action")
search_documents(args.tenant_id, args.connector_id, args.query, args.n)
elif args.action == "update":
if not args.doc_id or not args.fields:
parser.error("--doc-id and --fields are required for update action")
fields = json.loads(args.fields)
vespa_debug.update_document(args.connector_id, args.doc_id, fields)
update_document(args.tenant_id, args.connector_id, args.doc_id, fields)
elif args.action == "delete":
if not args.doc_id or args.connector_id is None:
parser.error("--doc-id and --connector-id are required for delete action")
vespa_debug.delete_document(args.connector_id, args.doc_id)
if not args.doc_id:
parser.error("--doc-id is required for delete action")
delete_document(args.tenant_id, args.connector_id, args.doc_id)
elif args.action == "get_acls":
if args.connector_id is None:
parser.error("--connector-id is required for get_acls action")
vespa_debug.acls(args.connector_id, args.n)
if not args.tenant_id or args.connector_id is None:
parser.error(
"--tenant-id and --connector-id are required for get_acls action"
)
get_document_acls(args.tenant_id, args.connector_id, args.n)
if __name__ == "__main__":

0
backend/test Normal file
View File

View File

@@ -1,92 +0,0 @@
import pytest
from onyx.background.celery.tasks.llm_model_update.tasks import (
_process_model_list_response,
)
@pytest.mark.parametrize(
"input_data,expected_result,expected_error,error_match",
[
# Success cases
(
["gpt-4", "gpt-3.5-turbo", "claude-2"],
["gpt-4", "gpt-3.5-turbo", "claude-2"],
None,
None,
),
(
[
{"model_name": "gpt-4", "other_field": "value"},
{"model_name": "gpt-3.5-turbo", "other_field": "value"},
],
["gpt-4", "gpt-3.5-turbo"],
None,
None,
),
(
[
{"id": "gpt-4", "other_field": "value"},
{"id": "gpt-3.5-turbo", "other_field": "value"},
],
["gpt-4", "gpt-3.5-turbo"],
None,
None,
),
(
{"data": ["gpt-4", "gpt-3.5-turbo"]},
["gpt-4", "gpt-3.5-turbo"],
None,
None,
),
(
{"models": ["gpt-4", "gpt-3.5-turbo"]},
["gpt-4", "gpt-3.5-turbo"],
None,
None,
),
(
{"models": [{"id": "gpt-4"}, {"id": "gpt-3.5-turbo"}]},
["gpt-4", "gpt-3.5-turbo"],
None,
None,
),
# Error cases
(
"not a list",
None,
ValueError,
"Invalid response from API - expected list",
),
(
{"wrong_field": []},
None,
ValueError,
"Invalid response from API - expected dict with 'data' or 'models' field",
),
(
[{"wrong_field": "value"}],
None,
ValueError,
"Invalid item in model list - expected dict with model_name or id",
),
(
[42],
None,
ValueError,
"Invalid item in model list - expected string or dict",
),
],
)
def test_process_model_list_response(
input_data: dict | list,
expected_result: list[str] | None,
expected_error: type[Exception] | None,
error_match: str | None,
) -> None:
if expected_error:
with pytest.raises(expected_error, match=error_match):
_process_model_list_response(input_data)
else:
result = _process_model_list_response(input_data)
assert result == expected_result

View File

@@ -18,9 +18,6 @@ FROM base AS builder
RUN apk add --no-cache libc6-compat
WORKDIR /app
# Add NODE_OPTIONS argument
ARG NODE_OPTIONS
# pull in source code / package.json / package-lock.json
COPY . .
@@ -81,8 +78,7 @@ ENV NEXT_PUBLIC_GTM_ENABLED=${NEXT_PUBLIC_GTM_ENABLED}
ARG NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED
ENV NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=${NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED}
# Use NODE_OPTIONS in the build command
RUN NODE_OPTIONS="${NODE_OPTIONS}" npx next build
RUN npx next build
# Step 2. Production image, copy all the files and run next
FROM base AS runner

View File

@@ -86,16 +86,14 @@ const sentryWebpackPluginOptions = {
authToken: process.env.SENTRY_AUTH_TOKEN,
silent: !sentryEnabled, // Silence output when Sentry is disabled
dryRun: !sentryEnabled, // Don't upload source maps when Sentry is disabled
...(sentryEnabled && {
sourceMaps: {
include: ["./.next"],
ignore: ["node_modules"],
urlPrefix: "~/_next",
stripPrefix: ["webpack://_N_E/"],
validate: true,
cleanArtifacts: true,
},
}),
sourceMaps: {
include: ["./.next"],
ignore: ["node_modules"],
urlPrefix: "~/_next",
stripPrefix: ["webpack://_N_E/"],
validate: true,
cleanArtifacts: true,
},
};
// Export the module with conditional Sentry configuration

View File

@@ -444,10 +444,26 @@ export function AssistantEditor({
let enabledTools = Object.keys(values.enabled_tools_map)
.map((toolId) => Number(toolId))
.filter((toolId) => values.enabled_tools_map[toolId]);
const searchToolEnabled = searchTool
? enabledTools.includes(searchTool.id)
: false;
const imageGenerationToolEnabled = imageGenerationTool
? enabledTools.includes(imageGenerationTool.id)
: false;
if (imageGenerationToolEnabled) {
if (
// model must support image input for image generation
// to work
!checkLLMSupportsImageInput(
values.llm_model_version_override || defaultModelName || ""
)
) {
enabledTools = enabledTools.filter(
(toolId) => toolId !== imageGenerationTool!.id
);
}
}
// if disable_retrieval is set, set num_chunks to 0
// to tell the backend to not fetch any documents
@@ -890,20 +906,52 @@ export function AssistantEditor({
{imageGenerationTool && (
<>
<div className="flex items-center content-start mb-2">
<BooleanFormField
name={`enabled_tools_map.${imageGenerationTool.id}`}
label={imageGenerationTool.display_name}
subtext="Generate and manipulate images using AI-powered tools"
disabled={
!currentLLMSupportsImageOutput ||
!isImageGenerationAvailable
}
disabledTooltip={
!currentLLMSupportsImageOutput
? "To use Image Generation, select GPT-4 or another image compatible model as the default model for this Assistant."
: "Image Generation requires an OpenAI or Azure Dall-E configuration."
}
/>
<TooltipProvider>
<Tooltip>
<TooltipTrigger>
<CheckboxField
size="sm"
id={`enabled_tools_map.${imageGenerationTool.id}`}
name={`enabled_tools_map.${imageGenerationTool.id}`}
onCheckedChange={() => {
if (
currentLLMSupportsImageOutput &&
isImageGenerationAvailable
) {
toggleToolInValues(
imageGenerationTool.id
);
}
}}
className={
!currentLLMSupportsImageOutput ||
!isImageGenerationAvailable
? "opacity-50 cursor-not-allowed"
: ""
}
/>
</TooltipTrigger>
{(!currentLLMSupportsImageOutput ||
!isImageGenerationAvailable) && (
<TooltipContent side="top" align="center">
<p className="bg-background-900 max-w-[200px] mb-1 text-sm rounded-lg p-1.5 text-white">
{!currentLLMSupportsImageOutput
? "To use Image Generation, select GPT-4 or another image compatible model as the default model for this Assistant."
: "Image Generation requires an OpenAI or Azure Dalle configuration."}
</p>
</TooltipContent>
)}
</Tooltip>
</TooltipProvider>
<div className="flex flex-col ml-2">
<span className="text-sm">
{imageGenerationTool.display_name}
</span>
<span className="text-xs text-subtle">
Generate and manipulate images using AI-powered
tools
</span>
</div>
</div>
</>
)}
@@ -937,12 +985,23 @@ export function AssistantEditor({
{customTools.length > 0 &&
customTools.map((tool) => (
<BooleanFormField
key={tool.id}
name={`enabled_tools_map.${tool.id}`}
label={tool.display_name}
subtext={tool.description}
/>
<React.Fragment key={tool.id}>
<div className="flex items-center content-start mb-2">
<Checkbox
size="sm"
id={`enabled_tools_map.${tool.id}`}
checked={values.enabled_tools_map[tool.id]}
onCheckedChange={() => {
toggleToolInValues(tool.id);
}}
/>
<div className="ml-2">
<span className="text-sm">
{tool.display_name}
</span>
</div>
</div>
</React.Fragment>
))}
</div>
</div>
@@ -1295,6 +1354,7 @@ export function AssistantEditor({
<BooleanFormField
small
removeIndent
alignTop
name="llm_relevance_filter"
label="AI Relevance Filter"
subtext="If enabled, the LLM will filter out documents that are not useful for answering the user query prior to generating a response. This typically improves the quality of the response but incurs slightly higher cost."
@@ -1303,6 +1363,7 @@ export function AssistantEditor({
<BooleanFormField
small
removeIndent
alignTop
name="include_citations"
label="Citations"
subtext="Response will include citations ([1], [2], etc.) for documents referenced by the LLM. In general, we recommend to leave this enabled in order to increase trust in the LLM answer."
@@ -1315,6 +1376,7 @@ export function AssistantEditor({
<BooleanFormField
small
removeIndent
alignTop
name="datetime_aware"
label="Date and Time Aware"
subtext='Toggle this option to let the assistant know the current date and time (formatted like: "Thursday Jan 1, 1970 00:01"). To inject it in a specific place in the prompt, use the pattern [[CURRENT_DATETIME]]'

View File

@@ -50,7 +50,7 @@ export const rerankingModels: RerankingModel[] = [
cloud: true,
displayName: "LiteLLM",
description: "Host your own reranker or router with LiteLLM proxy",
link: "https://docs.litellm.ai/docs/simple_proxy",
link: "https://docs.litellm.ai/docs/proxy",
},
{
rerank_provider_type: null,
@@ -82,7 +82,7 @@ export const rerankingModels: RerankingModel[] = [
modelName: "rerank-english-v3.0",
displayName: "Cohere English",
description: "High-performance English-focused reranking model.",
link: "https://docs.cohere.com/v2/reference/rerank",
link: "https://docs.cohere.com/docs/rerank",
},
{
cloud: true,
@@ -90,7 +90,7 @@ export const rerankingModels: RerankingModel[] = [
modelName: "rerank-multilingual-v3.0",
displayName: "Cohere Multilingual",
description: "Powerful multilingual reranking model.",
link: "https://docs.cohere.com/v2/reference/rerank",
link: "https://docs.cohere.com/docs/rerank",
},
];

View File

@@ -1,13 +1,14 @@
"use client";
import React, { useMemo, useState } from "react";
import { Persona } from "@/app/admin/assistants/interfaces";
import { useRouter } from "next/navigation";
import { Modal } from "@/components/Modal";
import AssistantCard from "./AssistantCard";
import { useAssistants } from "@/components/context/AssistantsContext";
import { useUser } from "@/components/user/UserProvider";
import { FilterIcon } from "lucide-react";
import { checkUserOwnsAssistant } from "@/lib/assistants/checkOwnership";
import { Dialog, DialogContent } from "@/components/ui/dialog";
export const AssistantBadgeSelector = ({
text,
@@ -20,12 +21,11 @@ export const AssistantBadgeSelector = ({
}) => {
return (
<div
className={`
select-none ${
selected
? "bg-neutral-900 text-white"
: "bg-transparent text-neutral-900"
} w-12 h-5 text-center px-1 py-0.5 rounded-lg cursor-pointer text-[12px] font-normal leading-[10px] border border-black justify-center items-center gap-1 inline-flex`}
className={`${
selected
? "bg-neutral-900 text-white"
: "bg-transparent text-neutral-900"
} w-12 h-5 text-center px-1 py-0.5 rounded-lg cursor-pointer text-[12px] font-normal leading-[10px] border border-black justify-center items-center gap-1 inline-flex`}
onClick={toggleFilter}
>
{text}
@@ -60,15 +60,11 @@ const useAssistantFilter = () => {
return { assistantFilters, toggleAssistantFilter, setAssistantFilters };
};
interface AssistantModalProps {
hideModal: () => void;
modalHeight?: string;
}
export function AssistantModal({
export default function AssistantModal({
hideModal,
modalHeight,
}: AssistantModalProps) {
}: {
hideModal: () => void;
}) {
const { assistants, pinnedAssistants } = useAssistants();
const { assistantFilters, toggleAssistantFilter } = useAssistantFilter();
const router = useRouter();
@@ -90,11 +86,11 @@ export function AssistantModal({
!assistantFilters[AssistantFilter.Private] || !assistant.is_public;
const pinnedFilter =
!assistantFilters[AssistantFilter.Pinned] ||
(pinnedAssistants.map((a) => a.id).includes(assistant.id) ?? false);
(user?.preferences?.pinned_assistants?.includes(assistant.id) ?? false);
const mineFilter =
!assistantFilters[AssistantFilter.Mine] ||
checkUserOwnsAssistant(user, assistant);
assistants.map((a: Persona) => checkUserOwnsAssistant(user, a));
return (
(nameMatches || labelMatches) &&
@@ -115,145 +111,142 @@ export function AssistantModal({
(assistant) => !assistant.builtin_persona && !assistant.is_default_persona
);
const maxHeight = 900;
const calculatedHeight = Math.min(
Math.ceil(assistants.length / 2) * 170 + 200,
window.innerHeight * 0.8
);
const height = Math.min(calculatedHeight, maxHeight);
return (
<Dialog open={true} onOpenChange={(open) => !open && hideModal()}>
<DialogContent
className="p-0 overflow-hidden max-h-[80vh] max-w-none w-[95%] bg-background rounded-sm shadow-2xl transform transition-all duration-300 ease-in-out relative w-11/12 max-w-4xl pt-10 pb-10 px-10 overflow-hidden flex flex-col max-w-4xl"
style={{
position: "fixed",
top: "10vh",
left: "50%",
transform: "translateX(-50%)",
margin: 0,
}}
>
<div className="flex overflow-hidden flex-col h-full">
<div className="flex flex-col sticky top-0 z-10">
<div className="flex px-2 justify-between items-center gap-x-2 mb-0">
<div className="h-12 w-full rounded-lg flex-col justify-center items-start gap-2.5 inline-flex">
<div className="h-12 rounded-md w-full shadow-[0px_0px_2px_0px_rgba(0,0,0,0.25)] border border-[#dcdad4] flex items-center px-3">
{!isSearchFocused && (
<svg
xmlns="http://www.w3.org/2000/svg"
className="h-5 w-5 text-gray-400"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth={2}
d="M21 21l-6-6m2-5a7 7 0 11-14 0 7 7 0 0114 0z"
/>
</svg>
)}
<input
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
onFocus={() => setIsSearchFocused(true)}
onBlur={() => setIsSearchFocused(false)}
type="text"
className="w-full h-full bg-transparent outline-none text-black"
<Modal
heightOverride={`${height}px`}
onOutsideClick={hideModal}
removeBottomPadding
className={`max-w-4xl max-h-[90vh] ${height} w-[95%] overflow-hidden`}
>
<div className="flex flex-col h-full">
<div className="flex bg-background flex-col sticky top-0 z-10">
<div className="flex px-2 justify-between items-center gap-x-2 mb-0">
<div className="h-12 w-full rounded-lg flex-col justify-center items-start gap-2.5 inline-flex">
<div className="h-12 rounded-md w-full shadow-[0px_0px_2px_0px_rgba(0,0,0,0.25)] border border-[#dcdad4] flex items-center px-3">
{!isSearchFocused && (
<svg
xmlns="http://www.w3.org/2000/svg"
className="h-5 w-5 text-gray-400"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth={2}
d="M21 21l-6-6m2-5a7 7 0 11-14 0 7 7 0 0114 0z"
/>
</svg>
)}
<input
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
onFocus={() => setIsSearchFocused(true)}
onBlur={() => setIsSearchFocused(false)}
type="text"
className="w-full h-full bg-transparent outline-none text-black"
/>
</div>
</div>
<button
onClick={() => router.push("/assistants/new")}
className="h-10 cursor-pointer px-6 py-3 bg-black rounded-md border border-black justify-center items-center gap-2.5 inline-flex"
>
<div className="text-[#fffcf4] text-lg font-normal leading-normal">
Create
</div>
</button>
</div>
<div className="px-2 flex py-4 items-center gap-x-2 flex-wrap">
<FilterIcon size={16} />
<AssistantBadgeSelector
text="Pinned"
selected={assistantFilters[AssistantFilter.Pinned]}
toggleFilter={() => toggleAssistantFilter(AssistantFilter.Pinned)}
/>
<AssistantBadgeSelector
text="Mine"
selected={assistantFilters[AssistantFilter.Mine]}
toggleFilter={() => toggleAssistantFilter(AssistantFilter.Mine)}
/>
<AssistantBadgeSelector
text="Private"
selected={assistantFilters[AssistantFilter.Private]}
toggleFilter={() =>
toggleAssistantFilter(AssistantFilter.Private)
}
/>
<AssistantBadgeSelector
text="Public"
selected={assistantFilters[AssistantFilter.Public]}
toggleFilter={() => toggleAssistantFilter(AssistantFilter.Public)}
/>
</div>
<div className="w-full border-t border-neutral-200" />
</div>
<div className="flex-grow overflow-y-auto">
<h2 className="text-2xl font-semibold text-gray-800 mb-2 px-4 py-2">
Featured Assistants
</h2>
<div className="w-full px-2 pb-2 grid grid-cols-1 md:grid-cols-2 gap-x-6 gap-y-6">
{featuredAssistants.length > 0 ? (
featuredAssistants.map((assistant, index) => (
<div key={index}>
<AssistantCard
pinned={pinnedAssistants
.map((a) => a.id)
.includes(assistant.id)}
persona={assistant}
closeModal={hideModal}
/>
</div>
))
) : (
<div className="col-span-2 text-center text-gray-500">
No featured assistants match filters
</div>
<button
onClick={() => router.push("/assistants/new")}
className="h-10 cursor-pointer px-6 py-3 bg-black rounded-md border border-black justify-center items-center gap-2.5 inline-flex"
>
<div className="text-[#fffcf4] text-lg font-normal leading-normal">
Create
</div>
</button>
</div>
<div className="px-2 flex py-4 items-center gap-x-2 flex-wrap">
<FilterIcon size={16} />
<AssistantBadgeSelector
text="Pinned"
selected={assistantFilters[AssistantFilter.Pinned]}
toggleFilter={() =>
toggleAssistantFilter(AssistantFilter.Pinned)
}
/>
<AssistantBadgeSelector
text="Mine"
selected={assistantFilters[AssistantFilter.Mine]}
toggleFilter={() => toggleAssistantFilter(AssistantFilter.Mine)}
/>
<AssistantBadgeSelector
text="Private"
selected={assistantFilters[AssistantFilter.Private]}
toggleFilter={() =>
toggleAssistantFilter(AssistantFilter.Private)
}
/>
<AssistantBadgeSelector
text="Public"
selected={assistantFilters[AssistantFilter.Public]}
toggleFilter={() =>
toggleAssistantFilter(AssistantFilter.Public)
}
/>
</div>
<div className="w-full border-t border-neutral-200" />
</div>
<div className="flex-grow overflow-y-auto">
<h2 className="text-2xl font-semibold text-gray-800 mb-2 px-4 py-2">
Featured Assistants
</h2>
<div className="w-full px-2 pb-2 grid grid-cols-1 md:grid-cols-2 gap-x-6 gap-y-6">
{featuredAssistants.length > 0 ? (
featuredAssistants.map((assistant, index) => (
<div key={index}>
<AssistantCard
pinned={pinnedAssistants
.map((a) => a.id)
.includes(assistant.id)}
persona={assistant}
closeModal={hideModal}
/>
</div>
))
) : (
<div className="col-span-2 text-center text-gray-500">
No featured assistants match filters
</div>
)}
</div>
{allAssistants && allAssistants.length > 0 && (
<>
<h2 className="text-2xl font-semibold text-gray-800 mt-4 mb-2 px-4 py-2">
All Assistants
</h2>
<div className="w-full mt-2 px-2 pb-2 grid grid-cols-1 md:grid-cols-2 gap-x-6 gap-y-6">
{allAssistants
.sort((a, b) => b.id - a.id)
.map((assistant, index) => (
<div key={index}>
<AssistantCard
pinned={
user?.preferences?.pinned_assistants?.includes(
assistant.id
) ?? false
}
persona={assistant}
closeModal={hideModal}
/>
</div>
))}
</div>
</>
)}
</div>
{allAssistants && allAssistants.length > 0 && (
<>
<h2 className="text-2xl font-semibold text-gray-800 mt-4 mb-2 px-4 py-2">
All Assistants
</h2>
<div className="w-full mt-2 px-2 pb-2 grid grid-cols-1 md:grid-cols-2 gap-x-6 gap-y-6">
{allAssistants
.sort((a, b) => b.id - a.id)
.map((assistant, index) => (
<div key={index}>
<AssistantCard
pinned={
user?.preferences?.pinned_assistants?.includes(
assistant.id
) ?? false
}
persona={assistant}
closeModal={hideModal}
/>
</div>
))}
</div>
</>
)}
</div>
</DialogContent>
</Dialog>
</div>
</Modal>
);
}
export default AssistantModal;

View File

@@ -49,7 +49,6 @@ import {
useContext,
useEffect,
useLayoutEffect,
useMemo,
useRef,
useState,
} from "react";
@@ -293,22 +292,19 @@ export function ChatPage({
);
};
const llmOverrideManager = useLlmOverride(
llmProviders,
user?.preferences.default_model,
selectedChatSession
);
const [alternativeAssistant, setAlternativeAssistant] =
useState<Persona | null>(null);
const [presentingDocument, setPresentingDocument] =
useState<OnyxDocument | null>(null);
const { recentAssistants, refreshRecentAssistants, assistants } =
useAssistants();
const llmOverrideManager = useLlmOverride(
llmProviders,
user?.preferences.default_model,
selectedChatSession,
undefined,
assistants
);
const { recentAssistants, refreshRecentAssistants } = useAssistants();
const liveAssistant: Persona | undefined =
alternativeAssistant ||
@@ -339,7 +335,7 @@ export function ChatPage({
);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [liveAssistant, user?.preferences.default_model, selectedChatSession]);
}, [liveAssistant, user?.preferences.default_model]);
const stopGenerating = () => {
const currentSession = currentSessionId();
@@ -1627,7 +1623,7 @@ export function ChatPage({
setPopup({
type: "error",
message:
"The current model does not support image input. Please select a model with Vision support.",
"The current Assistant does not support image input. Please select an assistant with Vision support.",
});
return;
}
@@ -1845,14 +1841,6 @@ export function ChatPage({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [messageHistory]);
const imageFileInMessageHistory = useMemo(() => {
return messageHistory
.filter((message) => message.type === "user")
.some((message) =>
message.files.some((file) => file.type === ChatFileType.IMAGE)
);
}, [messageHistory]);
const currentVisibleRange = visibleRange.get(currentSessionId()) || {
start: 0,
end: 0,
@@ -1933,10 +1921,6 @@ export function ChatPage({
handleSlackChatRedirect();
}, [searchParams, router]);
useEffect(() => {
llmOverrideManager.updateImageFilesPresent(imageFileInMessageHistory);
}, [imageFileInMessageHistory]);
useEffect(() => {
const handleKeyDown = (event: KeyboardEvent) => {
if (event.metaKey || event.ctrlKey) {
@@ -2602,7 +2586,6 @@ export function ChatPage({
});
return;
}
onSubmit({
messageIdToResend:
previousMessage.messageId,

View File

@@ -147,6 +147,24 @@ export const DocumentResults = forwardRef<HTMLDivElement, DocumentResultsProps>(
)}
</div>
</div>
<div
className={`sticky bottom-4 w-full left-0 flex justify-center transition-opacity duration-300 ${
hasSelectedDocuments
? "opacity-100"
: "opacity-0 pointer-events-none"
}`}
>
<button
className="text-sm font-medium py-2 px-4 rounded-full transition-colors bg-neutral-900 text-white"
onClick={clearSelectedDocuments}
>
{`Remove ${
delayedSelectedDocumentCount > 0
? delayedSelectedDocumentCount
: ""
} Source${delayedSelectedDocumentCount > 1 ? "s" : ""}`}
</button>
</div>
</div>
</div>
</>

View File

@@ -694,7 +694,6 @@ export function ChatInputBar({
flexPriority="stiff"
name="Filters"
Icon={FiFilter}
toggle
tooltipContent="Filter your search"
/>
}

View File

@@ -5,6 +5,7 @@ import {
PopoverTrigger,
} from "@/components/ui/popover";
import { ChatInputOption } from "./ChatInputOption";
import { AnthropicSVG } from "@/components/icons/icons";
import { getDisplayNameForModel } from "@/lib/hooks";
import {
checkLLMSupportsImageInput,
@@ -18,14 +19,6 @@ import {
import { Persona } from "@/app/admin/assistants/interfaces";
import { LlmOverrideManager } from "@/lib/hooks";
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { FiAlertTriangle } from "react-icons/fi";
interface LLMPopoverProps {
llmProviders: LLMProviderDescriptor[];
llmOverrideManager: LlmOverrideManager;
@@ -146,22 +139,6 @@ export default function LLMPopover({
);
}
})()}
{llmOverrideManager.imageFilesPresent &&
!checkLLMSupportsImageInput(name) && (
<TooltipProvider>
<Tooltip delayDuration={0}>
<TooltipTrigger className="my-auto flex items-center ml-auto">
<FiAlertTriangle className="text-alert" size={16} />
</TooltipTrigger>
<TooltipContent>
<p className="text-xs">
This LLM is not vision-capable and cannot process
image files present in your chat session.
</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)}
</button>
);
}

View File

@@ -22,7 +22,7 @@ export const MemoizedAnchor = memo(
const index = parseInt(match[1], 10) - 1;
const associatedDoc = docs?.[index];
if (!associatedDoc) {
return <a href={children as string}>{children}</a>;
return <>{children}</>;
}
let icon: React.ReactNode = null;
@@ -77,24 +77,9 @@ export const MemoizedLink = memo((props: any) => {
);
}
const handleMouseDown = () => {
let url = rest.href || rest.children?.toString();
if (url && !url.startsWith("http://") && !url.startsWith("https://")) {
// Try to construct a valid URL
const httpsUrl = `https://${url}`;
try {
new URL(httpsUrl);
url = httpsUrl;
} catch {
// If not a valid URL, don't modify original url
}
}
window.open(url, "_blank");
};
return (
<a
onMouseDown={handleMouseDown}
onMouseDown={() => rest.href && window.open(rest.href, "_blank")}
className="cursor-pointer text-link hover:text-link-hover"
>
{rest.children}

View File

@@ -375,11 +375,7 @@ export const AIMessage = ({
return (
<>
<div
style={{
position: "absolute",
left: "-9999px",
display: "none",
}}
style={{ position: "absolute", left: "-9999px" }}
dangerouslySetInnerHTML={{ __html: htmlContent }}
/>
<ReactMarkdown

View File

@@ -198,7 +198,7 @@ export function SearchSummary({
) : null;
return (
<div className="flex group w-fit items-center">
<div className="flex items-center">
{isEditing ? (
editInput
) : (
@@ -225,7 +225,7 @@ export function SearchSummary({
<Tooltip>
<TooltipTrigger asChild>
<button
className="ml-2 -my-2 mobile:hidden hover:bg-hover p-1 rounded flex-shrink-0 group-hover:opacity-100 opacity-0"
className="ml-2 -my-2 mobile:hidden hover:bg-hover p-1 rounded flex-shrink-0"
onClick={() => {
setIsEditing(true);
}}

View File

@@ -6,17 +6,8 @@ import React, {
useContext,
useState,
useCallback,
useLayoutEffect,
useRef,
} from "react";
import Link from "next/link";
import {
Tooltip,
TooltipTrigger,
TooltipContent,
TooltipProvider,
} from "@/components/ui/tooltip";
import { useRouter, useSearchParams } from "next/navigation";
import { ChatSession } from "../interfaces";
import { NEXT_PUBLIC_NEW_CHAT_DIRECTS_TO_SAME_PERSONA } from "@/lib/constants";
@@ -53,7 +44,6 @@ import {
import { useSortable } from "@dnd-kit/sortable";
import { CSS } from "@dnd-kit/utilities";
import { CircleX } from "lucide-react";
import { restrictToVerticalAxis } from "@dnd-kit/modifiers";
interface HistorySidebarProps {
page: pageType;
@@ -100,24 +90,6 @@ const SortableAssistant: React.FC<SortableAssistantProps> = ({
...(isDragging ? { zIndex: 1000, position: "relative" as const } : {}),
};
const nameRef = useRef<HTMLParagraphElement>(null);
const hiddenNameRef = useRef<HTMLSpanElement>(null);
const [isNameTruncated, setIsNameTruncated] = useState(false);
useLayoutEffect(() => {
const checkTruncation = () => {
if (nameRef.current && hiddenNameRef.current) {
const visibleWidth = nameRef.current.offsetWidth;
const fullTextWidth = hiddenNameRef.current.offsetWidth;
setIsNameTruncated(fullTextWidth > visibleWidth);
}
};
checkTruncation();
window.addEventListener("resize", checkTruncation);
return () => window.removeEventListener("resize", checkTruncation);
}, [assistant.name]);
return (
<div
ref={setNodeRef}
@@ -143,28 +115,10 @@ const SortableAssistant: React.FC<SortableAssistantProps> = ({
: ""
} relative flex items-center gap-x-2 py-1 px-2 rounded-md`}
>
<AssistantIcon assistant={assistant} size={20} className="flex-none" />
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<p
ref={nameRef}
className="text-base text-left w-fit line-clamp-1 text-ellipsis text-black"
>
{assistant.name}
</p>
</TooltipTrigger>
{isNameTruncated && (
<TooltipContent>{assistant.name}</TooltipContent>
)}
</Tooltip>
</TooltipProvider>
<span
ref={hiddenNameRef}
className="absolute left-[-9999px] whitespace-nowrap"
>
<AssistantIcon assistant={assistant} size={16} className="flex-none" />
<p className="text-base text-left w-fit line-clamp-1 text-ellipsis text-black">
{assistant.name}
</span>
</p>
<button
onClick={(e) => {
e.stopPropagation();
@@ -341,7 +295,7 @@ export const HistorySidebar = forwardRef<HTMLDivElement, HistorySidebarProps>(
</div>
)}
<div className="h-full relative overflow-x-hidden overflow-y-auto">
<div className="h-full relative overflow-y-auto">
<div className="flex px-4 font-normal text-sm gap-x-2 leading-normal text-[#6c6c6c]/80 items-center font-normal leading-normal">
Assistants
</div>
@@ -349,7 +303,6 @@ export const HistorySidebar = forwardRef<HTMLDivElement, HistorySidebarProps>(
sensors={sensors}
collisionDetection={closestCenter}
onDragEnd={handleDragEnd}
modifiers={[restrictToVerticalAxis]}
>
<SortableContext
items={pinnedAssistants.map((a) =>

View File

@@ -13,7 +13,7 @@ import { FiPlus, FiTrash2, FiCheck, FiX } from "react-icons/fi";
import { NEXT_PUBLIC_DELETE_ALL_CHATS_ENABLED } from "@/lib/constants";
import { FolderDropdown } from "../folders/FolderDropdown";
import { ChatSessionDisplay } from "./ChatSessionDisplay";
import { useState, useCallback, useRef, useContext, useEffect } from "react";
import { useState, useCallback, useRef, useContext } from "react";
import { Caret } from "@/components/icons/icons";
import { groupSessionsByDateRange } from "../lib";
import React from "react";
@@ -36,7 +36,6 @@ import { useSortable } from "@dnd-kit/sortable";
import { CSS } from "@dnd-kit/utilities";
import { useChatContext } from "@/components/context/ChatContext";
import { SettingsContext } from "@/components/settings/SettingsProvider";
import { restrictToVerticalAxis } from "@dnd-kit/modifiers";
interface SortableFolderProps {
folder: Folder;
@@ -54,41 +53,34 @@ interface SortableFolderProps {
const SortableFolder: React.FC<SortableFolderProps> = (props) => {
const settings = useContext(SettingsContext);
const mobile = settings?.isMobile;
const [isDragging, setIsDragging] = useState(false);
const {
attributes,
listeners,
setNodeRef,
transform,
transition,
isDragging: isDraggingDndKit,
} = useSortable({
id: props.folder.folder_id?.toString() ?? "",
disabled: mobile,
});
const { attributes, listeners, setNodeRef, transform, transition } =
useSortable({
id: props.folder.folder_id?.toString() ?? "",
data: {
activationConstraint: {
distance: 8,
},
},
disabled: mobile,
});
const ref = useRef<HTMLDivElement>(null);
const style: React.CSSProperties = {
const style = {
transform: CSS.Transform.toString(transform),
transition,
zIndex: isDragging ? 1000 : "auto",
position: isDragging ? "relative" : "static",
opacity: isDragging ? 0.6 : 1,
};
useEffect(() => {
setIsDragging(isDraggingDndKit);
}, [isDraggingDndKit]);
return (
<div
ref={setNodeRef}
className="pr-3 ml-4 overflow-visible flex items-start"
style={style}
{...attributes}
{...listeners}
>
<FolderDropdown ref={ref} {...props} />
<FolderDropdown
ref={ref}
{...props}
{...(mobile ? {} : attributes)}
{...(mobile ? {} : listeners)}
/>
</div>
);
};
@@ -367,7 +359,6 @@ export function PagesTab({
{folders && folders.length > 0 && (
<DndContext
modifiers={[restrictToVerticalAxis]}
sensors={sensors}
collisionDetection={closestCenter}
onDragEnd={handleDragEnd}
@@ -445,7 +436,7 @@ export function PagesTab({
)}
{isHistoryEmpty && (!folders || folders.length === 0) && (
<p className="text-sm max-w-full mt-2 w-[250px]">
<p className="text-sm mt-2 w-[250px]">
Try sending a message! Your chat history will appear here.
</p>
)}

View File

@@ -287,53 +287,11 @@
overflow-x: hidden;
}
.scrollbar {
width: 100%;
height: 100%;
}
/* Styling for textarea scrollbar */
textarea::-webkit-scrollbar {
width: 8px;
}
textarea::-webkit-scrollbar-track {
background: var(--scrollbar-track);
border-radius: 4px;
}
textarea::-webkit-scrollbar-thumb {
background: var(--scrollbar-thumb);
border-radius: 4px;
}
textarea::-webkit-scrollbar-thumb:hover {
background: var(--scrollbar-thumb-hover);
}
/* Styling for textarea resize handle */
textarea {
resize: vertical;
}
/* For Firefox */
textarea {
scrollbar-width: thin;
scrollbar-color: var(--scrollbar-thumb) var(--scrollbar-track);
}
.inputscroll::-webkit-scrollbar-track {
background: #e5e7eb;
scrollbar-width: none;
}
::-webkit-scrollbar {
width: 0px;
/* Vertical scrollbar width */
height: 8px;
/* Horizontal scrollbar height */
}
::-webkit-scrollbar-track {
background: transparent;
/* background: theme("colors.scrollbar.track"); */

View File

@@ -23,7 +23,6 @@ import PostHogPageView from "./PostHogPageView";
import Script from "next/script";
import { LogoType } from "@/components/logo/Logo";
import { Hanken_Grotesk } from "next/font/google";
import { WebVitals } from "./web-vitals";
const inter = Inter({
subsets: ["latin"],
@@ -207,7 +206,6 @@ export default async function RootLayout({
<PostHogPageView />
</Suspense>
{children}
{process.env.NEXT_PUBLIC_POSTHOG_KEY && <WebVitals />}
</AppProvider>
);
}

View File

@@ -1,5 +1,5 @@
import { redirect } from "next/navigation";
export default async function Page() {
redirect("/chat");
redirect("/auth/login");
}

View File

@@ -1,12 +0,0 @@
"use client";
import { useReportWebVitals } from "next/web-vitals";
import { usePostHog } from "posthog-js/react";
export function WebVitals() {
const posthog = usePostHog();
useReportWebVitals((metric) => {
posthog.capture(metric.name, metric);
});
return <></>;
}

View File

@@ -63,7 +63,7 @@ export function Modal({
<div
onMouseDown={handleMouseDown}
className={cn(
`fixed inset-0 bg-black border boder-border bg-opacity-10 backdrop-blur-sm h-full
`fixed inset-0 bg-black bg-opacity-25 backdrop-blur-sm h-full
flex items-center justify-center z-[9999] transition-opacity duration-300 ease-in-out`
)}
>

View File

@@ -25,13 +25,11 @@ import {
} from "@/components/ui/tooltip";
import ReactMarkdown from "react-markdown";
import { FaMarkdown } from "react-icons/fa";
import { useRef, useState, useCallback } from "react";
import { useRef, useState } from "react";
import remarkGfm from "remark-gfm";
import { EditIcon } from "@/components/icons/icons";
import { Button } from "@/components/ui/button";
import Link from "next/link";
import { CheckboxField } from "@/components/ui/checkbox";
import { CheckedState } from "@radix-ui/react-checkbox";
export function SectionHeader({
children,
@@ -53,7 +51,7 @@ export function Label({
return (
<div
className={`block font-medium base ${className} ${
small ? "text-xs" : "text-sm"
small ? "text-sm" : "text-base"
}`}
>
{children}
@@ -77,9 +75,7 @@ export function LabelWithTooltip({
}
export function SubLabel({ children }: { children: string | JSX.Element }) {
return (
<div className="text-xs text-subtle whitespace-pre-line">{children}</div>
);
return <div className="text-sm text-subtle mb-2">{children}</div>;
}
export function ManualErrorMessage({ children }: { children: string }) {
@@ -443,62 +439,53 @@ interface BooleanFormFieldProps {
name: string;
label: string;
subtext?: string | JSX.Element;
onChange?: (e: React.ChangeEvent<HTMLInputElement>) => void;
removeIndent?: boolean;
small?: boolean;
alignTop?: boolean;
noLabel?: boolean;
disabled?: boolean;
checked?: boolean;
optional?: boolean;
tooltip?: string;
disabledTooltip?: string;
}
export const BooleanFormField = ({
name,
label,
subtext,
onChange,
removeIndent,
noLabel,
optional,
small,
disabled,
alignTop,
checked,
tooltip,
disabledTooltip,
}: BooleanFormFieldProps) => {
const { setFieldValue } = useFormikContext<any>();
const [field, meta, helpers] = useField<boolean>(name);
const { setValue } = helpers;
const handleChange = useCallback(
(checked: CheckedState) => {
if (!disabled) {
setFieldValue(name, checked);
}
},
[disabled, name, setFieldValue]
);
const handleChange = (e: React.ChangeEvent<HTMLInputElement>) => {
setValue(e.target.checked);
if (onChange) {
onChange(e);
}
};
return (
<div>
<label className="flex items-center text-sm cursor-pointer">
<TooltipProvider>
<Tooltip>
<TooltipTrigger>
<CheckboxField
name={name}
size="sm"
className={`
${disabled ? "opacity-50" : ""}
${removeIndent ? "mr-2" : "mx-3"}`}
onCheckedChange={handleChange}
/>
</TooltipTrigger>
{disabled && disabledTooltip && (
<TooltipContent side="top" align="center">
<p className="bg-background-900 max-w-[200px] mb-1 text-sm rounded-lg p-1.5 text-white">
{disabledTooltip}
</p>
</TooltipContent>
)}
</Tooltip>
</TooltipProvider>
<label className="flex text-sm">
<Field
type="checkbox"
{...field}
checked={checked !== undefined ? checked : field.value}
disabled={disabled}
onChange={handleChange}
className={`${removeIndent ? "mr-2" : "mx-3"}
px-5 w-3.5 h-3.5 ${alignTop ? "mt-1" : "my-auto"}`}
/>
{!noLabel && (
<div>
<div className="flex items-center gap-x-2">

View File

@@ -1,8 +1,7 @@
import { useRef, useState } from "react";
import { cva, type VariantProps } from "class-variance-authority";
import { cn } from "@/lib/utils";
import { Check, CheckCircle, XCircle } from "lucide-react";
import { Warning } from "@phosphor-icons/react";
import { CheckCircle, XCircle } from "lucide-react";
const popupVariants = cva(
"fixed bottom-4 left-4 p-4 rounded-lg shadow-xl text-white z-[10000] flex items-center space-x-3 transition-all duration-300 ease-in-out",
{
@@ -27,9 +26,9 @@ export interface PopupSpec extends VariantProps<typeof popupVariants> {
export const Popup: React.FC<PopupSpec> = ({ message, type }) => (
<div className={cn(popupVariants({ type }))}>
{type === "success" ? (
<Check className="w-6 h-6" />
<CheckCircle className="w-6 h-6 animate-pulse" />
) : type === "error" ? (
<Warning className="w-6 h-6 " />
<XCircle className="w-6 h-6 animate-pulse" />
) : type === "info" ? (
<svg
className="w-6 h-6"

View File

@@ -139,7 +139,7 @@ export function AssistantIcon({
alt={assistant.name}
src={buildImgUrl(assistant.uploaded_image_id)}
loading="lazy"
className={`h-[${dimension}px] w-[${dimension}px] rounded-full object-cover object-center transition-opacity duration-300 ${wrapperClass}`}
className={`h-[${dimension}px] w-[${dimension}px] object-cover object-center rounded-sm transition-opacity duration-300 ${wrapperClass}`}
style={style}
/>
) : (

View File

@@ -21,7 +21,7 @@ const DialogOverlay = React.forwardRef<
<DialogPrimitive.Overlay
ref={ref}
className={cn(
"fixed inset-0 z-50 bg-black/50 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0",
"fixed inset-0 z-50 bg-black/80 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0",
className
)}
{...props}

View File

@@ -481,9 +481,7 @@ Hint: Use the singular form of the object name (e.g., 'Opportunity' instead of '
name: "sites",
optional: true,
description: `• If no sites are specified, all sites in your organization will be indexed (Sites.Read.All permission required).
• Specifying 'https://onyxai.sharepoint.com/sites/support' for example will only index documents within this site.
• Specifying 'https://onyxai.sharepoint.com/sites/support/subfolder' for example will only index documents within this folder.
`,
},

View File

@@ -13,16 +13,12 @@ import { errorHandlingFetcher } from "./fetcher";
import { useContext, useEffect, useState } from "react";
import { DateRangePickerValue } from "@/app/ee/admin/performance/DateRangeSelector";
import { Filters, SourceMetadata } from "./search/interfaces";
import {
destructureValue,
getLLMProviderOverrideForPersona,
structureValue,
} from "./llm/utils";
import { destructureValue, structureValue } from "./llm/utils";
import { ChatSession } from "@/app/chat/interfaces";
import { AllUsersResponse } from "./types";
import { Credential } from "./connectors/credentials";
import { SettingsContext } from "@/components/settings/SettingsProvider";
import { Persona, PersonaLabel } from "@/app/admin/assistants/interfaces";
import { PersonaLabel } from "@/app/admin/assistants/interfaces";
import { LLMProviderDescriptor } from "@/app/admin/configuration/llm/interfaces";
import { isAnthropic } from "@/app/admin/configuration/llm/interfaces";
import { getSourceMetadata } from "./sources";
@@ -364,15 +360,12 @@ export interface LlmOverrideManager {
temperature: number | null;
updateTemperature: (temperature: number | null) => void;
updateModelOverrideForChatSession: (chatSession?: ChatSession) => void;
imageFilesPresent: boolean;
updateImageFilesPresent: (present: boolean) => void;
}
export function useLlmOverride(
llmProviders: LLMProviderDescriptor[],
globalModel?: string | null,
currentChatSession?: ChatSession,
defaultTemperature?: number,
assistants?: Persona[]
defaultTemperature?: number
): LlmOverrideManager {
const getValidLlmOverride = (
overrideModel: string | null | undefined
@@ -390,11 +383,6 @@ export function useLlmOverride(
}
return { name: "", provider: "", modelName: "" };
};
const [imageFilesPresent, setImageFilesPresent] = useState(false);
const updateImageFilesPresent = (present: boolean) => {
setImageFilesPresent(present);
};
const [globalDefault, setGlobalDefault] = useState<LlmOverride>(
getValidLlmOverride(globalModel)
@@ -429,32 +417,6 @@ export function useLlmOverride(
defaultTemperature !== undefined ? defaultTemperature : 0
);
useEffect(() => {
const currentPersona = assistants?.find(
(a) => a.id === currentChatSession?.persona_id
);
const personaDefault = currentPersona
? getLLMProviderOverrideForPersona(currentPersona, llmProviders)
: undefined;
if (personaDefault) {
updateLLMOverride(personaDefault);
} else {
updateLLMOverride(globalDefault);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [currentChatSession]);
useEffect(() => {
if (currentChatSession?.current_alternate_model) {
setLlmOverride(
getValidLlmOverride(currentChatSession.current_alternate_model)
);
} else {
setLlmOverride(globalDefault);
}
}, [currentChatSession]);
useEffect(() => {
setGlobalDefault(getValidLlmOverride(globalModel));
}, [globalModel, llmProviders]);
@@ -485,8 +447,6 @@ export function useLlmOverride(
setGlobalDefault,
temperature,
updateTemperature,
imageFilesPresent,
updateImageFilesPresent,
};
}