Compare commits

..

18 Commits

Author SHA1 Message Date
pablonyx
730c0d12a8 k 2025-04-01 11:36:52 -07:00
pablonyx
1027fbe845 validation 2025-04-01 11:34:01 -07:00
pablonyx
3ec738caa5 update 2025-04-01 11:18:31 -07:00
pablonyx
4fbdbbbfa0 k 2025-04-01 10:59:12 -07:00
pablonyx
e655b4b248 fix build 2025-04-01 10:55:06 -07:00
pablonyx
7535280e6e stop using sharp 2025-04-01 10:51:54 -07:00
pablonyx
9aa4037434 nit 2025-04-01 10:35:04 -07:00
pablonyx
fe70cae942 k 2025-04-01 10:30:00 -07:00
pablonyx
e978e4e26f k 2025-04-01 10:18:11 -07:00
pablonyx
995d4feafd k 2025-04-01 10:16:05 -07:00
pablonyx
999af49687 k 2025-04-01 10:08:38 -07:00
pablonyx
6df124f593 k# Please enter the commit message for your changes. Lines starting 2025-04-01 10:04:08 -07:00
pablonyx
12f8627799 very minor standardization 2025-04-01 09:41:48 -07:00
pablonyx
2f3020a4d3 Update migration (#4410) 2025-04-01 09:10:24 -07:00
SubashMohan
4bae1318bb refactor tests for Highspot connector to use mocking for API key retrieval (#4346) 2025-04-01 02:39:05 +00:00
Weves
11c3f44c76 Init engine in slackbot 2025-03-31 17:04:20 -07:00
rkuo-danswer
cb38ac8a97 also set permission upsert to medium priority (#4405)
Co-authored-by: Richard Kuo (Onyx) <rkuo@onyx.app>
2025-03-31 14:59:31 -07:00
pablonyx
b2120b9f39 add user files (#4152) 2025-03-31 21:06:59 +00:00
35 changed files with 349 additions and 478 deletions

View File

@@ -1,50 +0,0 @@
"""add prompt length limit
Revision ID: f71470ba9274
Revises: 6a804aeb4830
Create Date: 2025-04-01 15:07:14.977435
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "f71470ba9274"
down_revision = "6a804aeb4830"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.alter_column(
"prompt",
"system_prompt",
existing_type=sa.TEXT(),
type_=sa.String(length=8000),
existing_nullable=False,
)
op.alter_column(
"prompt",
"task_prompt",
existing_type=sa.TEXT(),
type_=sa.String(length=8000),
existing_nullable=False,
)
def downgrade() -> None:
op.alter_column(
"prompt",
"system_prompt",
existing_type=sa.String(length=8000),
type_=sa.TEXT(),
existing_nullable=False,
)
op.alter_column(
"prompt",
"task_prompt",
existing_type=sa.String(length=8000),
type_=sa.TEXT(),
existing_nullable=False,
)

View File

@@ -1,77 +0,0 @@
"""updated constraints for ccpairs
Revision ID: f7505c5b0284
Revises: f71470ba9274
Create Date: 2025-04-01 17:50:42.504818
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "f7505c5b0284"
down_revision = "f71470ba9274"
branch_labels = None
depends_on = None
def upgrade() -> None:
# 1) Drop the old foreign-key constraints
op.drop_constraint(
"document_by_connector_credential_pair_connector_id_fkey",
"document_by_connector_credential_pair",
type_="foreignkey",
)
op.drop_constraint(
"document_by_connector_credential_pair_credential_id_fkey",
"document_by_connector_credential_pair",
type_="foreignkey",
)
# 2) Re-add them with ondelete='CASCADE'
op.create_foreign_key(
"document_by_connector_credential_pair_connector_id_fkey",
source_table="document_by_connector_credential_pair",
referent_table="connector",
local_cols=["connector_id"],
remote_cols=["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"document_by_connector_credential_pair_credential_id_fkey",
source_table="document_by_connector_credential_pair",
referent_table="credential",
local_cols=["credential_id"],
remote_cols=["id"],
ondelete="CASCADE",
)
def downgrade() -> None:
# Reverse the changes for rollback
op.drop_constraint(
"document_by_connector_credential_pair_connector_id_fkey",
"document_by_connector_credential_pair",
type_="foreignkey",
)
op.drop_constraint(
"document_by_connector_credential_pair_credential_id_fkey",
"document_by_connector_credential_pair",
type_="foreignkey",
)
# Recreate without CASCADE
op.create_foreign_key(
"document_by_connector_credential_pair_connector_id_fkey",
"document_by_connector_credential_pair",
"connector",
["connector_id"],
["id"],
)
op.create_foreign_key(
"document_by_connector_credential_pair_credential_id_fkey",
"document_by_connector_credential_pair",
"credential",
["credential_id"],
["id"],
)

View File

@@ -58,7 +58,6 @@ def _get_objects_access_for_user_email_from_salesforce(
f"Time taken to get Salesforce user ID: {end_time - start_time} seconds"
)
if user_id is None:
logger.warning(f"User '{user_email}' not found in Salesforce")
return None
# This is the only query that is not cached in the function
@@ -66,7 +65,6 @@ def _get_objects_access_for_user_email_from_salesforce(
object_id_to_access = get_objects_access_for_user_id(
salesforce_client, user_id, list(object_ids)
)
logger.debug(f"Object ID to access: {object_id_to_access}")
return object_id_to_access

View File

@@ -23,7 +23,6 @@ from onyx.utils.url import add_url_params
from onyx.utils.variable_functionality import fetch_versioned_implementation
from shared_configs.configs import MULTI_TENANT
HTML_EMAIL_TEMPLATE = """\
<!DOCTYPE html>
<html lang="en">

View File

@@ -56,7 +56,6 @@ from httpx_oauth.oauth2 import OAuth2Token
from pydantic import BaseModel
from sqlalchemy.ext.asyncio import AsyncSession
from ee.onyx.configs.app_configs import ANONYMOUS_USER_COOKIE_NAME
from onyx.auth.api_key import get_hashed_api_key_from_request
from onyx.auth.email_utils import send_forgot_password_email
from onyx.auth.email_utils import send_user_verification_email
@@ -514,25 +513,6 @@ class UserManager(UUIDIDMixin, BaseUserManager[User, uuid.UUID]):
return user
async def on_after_login(
self,
user: User,
request: Optional[Request] = None,
response: Optional[Response] = None,
) -> None:
try:
if response and request and ANONYMOUS_USER_COOKIE_NAME in request.cookies:
response.delete_cookie(
ANONYMOUS_USER_COOKIE_NAME,
# Ensure cookie deletion doesn't override other cookies by setting the same path/domain
path="/",
domain=None,
secure=WEB_DOMAIN.startswith("https"),
)
logger.debug(f"Deleted anonymous user cookie for user {user.email}")
except Exception:
logger.exception("Error deleting anonymous user cookie")
async def on_after_register(
self, user: User, request: Optional[Request] = None
) -> None:
@@ -1322,7 +1302,6 @@ def get_oauth_router(
# Login user
response = await backend.login(strategy, user)
await user_manager.on_after_login(user, request, response)
# Prepare redirect response
if tenant_id is None:
# Use URL utility to add parameters
@@ -1332,14 +1311,9 @@ def get_oauth_router(
# No parameters to add
redirect_response = RedirectResponse(next_url, status_code=302)
# Copy headers from auth response to redirect response, with special handling for Set-Cookie
# Copy headers and other attributes from 'response' to 'redirect_response'
for header_name, header_value in response.headers.items():
# FastAPI can have multiple Set-Cookie headers as a list
if header_name.lower() == "set-cookie" and isinstance(header_value, list):
for cookie_value in header_value:
redirect_response.headers.append(header_name, cookie_value)
else:
redirect_response.headers[header_name] = header_value
redirect_response.headers[header_name] = header_value
if hasattr(response, "body"):
redirect_response.body = response.body

View File

@@ -1,6 +1,5 @@
import logging
import multiprocessing
import os
import time
from typing import Any
from typing import cast
@@ -306,7 +305,7 @@ def wait_for_db(sender: Any, **kwargs: Any) -> None:
def on_secondary_worker_init(sender: Any, **kwargs: Any) -> None:
logger.info(f"Running as a secondary celery worker: pid={os.getpid()}")
logger.info("Running as a secondary celery worker.")
# Set up variables for waiting on primary worker
WAIT_INTERVAL = 5

View File

@@ -1,7 +0,0 @@
from celery import Celery
import onyx.background.celery.apps.app_base as app_base
celery_app = Celery(__name__)
celery_app.config_from_object("onyx.background.celery.configs.client")
celery_app.Task = app_base.TenantAwareTask # type: ignore [misc]

View File

@@ -1,5 +1,4 @@
import logging
import os
from typing import Any
from typing import cast
@@ -96,7 +95,7 @@ def on_worker_init(sender: Worker, **kwargs: Any) -> None:
app_base.wait_for_db(sender, **kwargs)
app_base.wait_for_vespa_or_shutdown(sender, **kwargs)
logger.info(f"Running as the primary celery worker: pid={os.getpid()}")
logger.info("Running as the primary celery worker.")
# Less startup checks in multi-tenant case
if MULTI_TENANT:

View File

@@ -1,16 +0,0 @@
import onyx.background.celery.configs.base as shared_config
broker_url = shared_config.broker_url
broker_connection_retry_on_startup = shared_config.broker_connection_retry_on_startup
broker_pool_limit = shared_config.broker_pool_limit
broker_transport_options = shared_config.broker_transport_options
redis_socket_keepalive = shared_config.redis_socket_keepalive
redis_retry_on_timeout = shared_config.redis_retry_on_timeout
redis_backend_health_check_interval = shared_config.redis_backend_health_check_interval
result_backend = shared_config.result_backend
result_expires = shared_config.result_expires # 86400 seconds is the default
task_default_priority = shared_config.task_default_priority
task_acks_late = shared_config.task_acks_late

View File

@@ -886,8 +886,11 @@ def monitor_ccpair_permissions_taskset(
record_type=RecordType.PERMISSION_SYNC_PROGRESS,
data={
"cc_pair_id": cc_pair_id,
"total_docs_synced": initial if initial is not None else 0,
"remaining_docs_to_sync": remaining,
"id": payload.id if payload else None,
"total_docs": initial if initial is not None else 0,
"remaining_docs": remaining,
"synced_docs": (initial - remaining) if initial is not None else 0,
"is_complete": remaining == 0,
},
tenant_id=tenant_id,
)
@@ -903,13 +906,6 @@ def monitor_ccpair_permissions_taskset(
f"num_synced={initial}"
)
# Add telemetry for permission syncing complete
optional_telemetry(
record_type=RecordType.PERMISSION_SYNC_COMPLETE,
data={"cc_pair_id": cc_pair_id},
tenant_id=tenant_id,
)
update_sync_record_status(
db_session=db_session,
entity_id=cc_pair_id,

View File

@@ -1,20 +0,0 @@
"""Factory stub for running celery worker / celery beat.
This code is different from the primary/beat stubs because there is no EE version to
fetch. Port over the code in those files if we add an EE version of this worker.
This is an app stub purely for sending tasks as a client.
"""
from celery import Celery
from onyx.utils.variable_functionality import set_is_ee_based_on_env_variable
set_is_ee_based_on_env_variable()
def get_app() -> Celery:
from onyx.background.celery.apps.client import celery_app
return celery_app
app = get_app()

View File

@@ -56,6 +56,7 @@ from onyx.indexing.indexing_pipeline import build_indexing_pipeline
from onyx.natural_language_processing.search_nlp_models import (
InformationContentClassificationModel,
)
from onyx.redis.redis_connector import RedisConnector
from onyx.utils.logger import setup_logger
from onyx.utils.logger import TaskAttemptSingleton
from onyx.utils.telemetry import create_milestone_and_report
@@ -577,8 +578,11 @@ def _run_indexing(
data={
"index_attempt_id": index_attempt_id,
"cc_pair_id": ctx.cc_pair_id,
"current_docs_indexed": document_count,
"current_chunks_indexed": chunk_count,
"connector_id": ctx.connector_id,
"credential_id": ctx.credential_id,
"total_docs_indexed": document_count,
"total_chunks": chunk_count,
"batch_num": batch_num,
"source": ctx.source.value,
},
tenant_id=tenant_id,
@@ -599,15 +603,26 @@ def _run_indexing(
checkpoint=checkpoint,
)
# Add telemetry for completed indexing
redis_connector = RedisConnector(tenant_id, ctx.cc_pair_id)
redis_connector_index = redis_connector.new_index(
index_attempt_start.search_settings_id
)
final_progress = redis_connector_index.get_progress() or 0
optional_telemetry(
record_type=RecordType.INDEXING_COMPLETE,
data={
"index_attempt_id": index_attempt_id,
"cc_pair_id": ctx.cc_pair_id,
"connector_id": ctx.connector_id,
"credential_id": ctx.credential_id,
"total_docs_indexed": document_count,
"total_chunks": chunk_count,
"batch_count": batch_num,
"time_elapsed_seconds": time.monotonic() - start_time,
"source": ctx.source.value,
"redis_progress": final_progress,
},
tenant_id=tenant_id,
)

View File

@@ -13,7 +13,6 @@ from typing import TYPE_CHECKING
from typing import TypeVar
from urllib.parse import parse_qs
from urllib.parse import quote
from urllib.parse import urljoin
from urllib.parse import urlparse
import requests
@@ -343,14 +342,9 @@ def build_confluence_document_id(
Returns:
str: The document id
"""
# NOTE: urljoin is tricky and will drop the last segment of the base if it doesn't
# end with "/" because it believes that makes it a file.
final_url = base_url.rstrip("/") + "/"
if is_cloud and not final_url.endswith("/wiki/"):
final_url = urljoin(final_url, "wiki") + "/"
final_url = urljoin(final_url, content_url.lstrip("/"))
return final_url
if is_cloud and not base_url.endswith("/wiki"):
base_url += "/wiki"
return f"{base_url}{content_url}"
def datetime_from_string(datetime_string: str) -> datetime:
@@ -460,19 +454,6 @@ def _handle_http_error(e: requests.HTTPError, attempt: int) -> int:
logger.warning("HTTPError with `None` as response or as headers")
raise e
# Confluence Server returns 403 when rate limited
if e.response.status_code == 403:
FORBIDDEN_MAX_RETRY_ATTEMPTS = 7
FORBIDDEN_RETRY_DELAY = 10
if attempt < FORBIDDEN_MAX_RETRY_ATTEMPTS:
logger.warning(
"403 error. This sometimes happens when we hit "
f"Confluence rate limits. Retrying in {FORBIDDEN_RETRY_DELAY} seconds..."
)
return FORBIDDEN_RETRY_DELAY
raise e
if (
e.response.status_code != 429
and RATE_LIMIT_MESSAGE_LOWERCASE not in e.response.text.lower()

View File

@@ -5,13 +5,11 @@ from typing import cast
from sqlalchemy.orm import Session
from onyx.chat.models import ContextualPruningConfig
from onyx.chat.models import PromptConfig
from onyx.chat.models import SectionRelevancePiece
from onyx.chat.prune_and_merge import _merge_sections
from onyx.chat.prune_and_merge import ChunkRange
from onyx.chat.prune_and_merge import merge_chunk_intervals
from onyx.chat.prune_and_merge import prune_and_merge_sections
from onyx.configs.chat_configs import DISABLE_LLM_DOC_RELEVANCE
from onyx.context.search.enums import LLMEvaluationType
from onyx.context.search.enums import QueryFlow
@@ -63,7 +61,6 @@ class SearchPipeline:
| None = None,
rerank_metrics_callback: Callable[[RerankMetricsContainer], None] | None = None,
prompt_config: PromptConfig | None = None,
contextual_pruning_config: ContextualPruningConfig | None = None,
):
# NOTE: The Search Request contains a lot of fields that are overrides, many of them can be None
# and typically are None. The preprocessing will fetch default values to replace these empty overrides.
@@ -80,9 +77,6 @@ class SearchPipeline:
self.search_settings = get_current_search_settings(db_session)
self.document_index = get_default_document_index(self.search_settings, None)
self.prompt_config: PromptConfig | None = prompt_config
self.contextual_pruning_config: ContextualPruningConfig | None = (
contextual_pruning_config
)
# Preprocessing steps generate this
self._search_query: SearchQuery | None = None
@@ -426,26 +420,7 @@ class SearchPipeline:
if self._final_context_sections is not None:
return self._final_context_sections
if (
self.contextual_pruning_config is not None
and self.prompt_config is not None
):
self._final_context_sections = prune_and_merge_sections(
sections=self.reranked_sections,
section_relevance_list=None,
prompt_config=self.prompt_config,
llm_config=self.llm.config,
question=self.search_query.query,
contextual_pruning_config=self.contextual_pruning_config,
)
else:
logger.error(
"Contextual pruning or prompt config not set, using default merge"
)
self._final_context_sections = _merge_sections(
sections=self.reranked_sections
)
self._final_context_sections = _merge_sections(sections=self.reranked_sections)
return self._final_context_sections
@property

View File

@@ -217,6 +217,7 @@ def mark_attempt_in_progress(
"index_attempt_id": index_attempt.id,
"status": IndexingStatus.IN_PROGRESS.value,
"cc_pair_id": index_attempt.connector_credential_pair_id,
"search_settings_id": index_attempt.search_settings_id,
},
)
except Exception:
@@ -245,6 +246,9 @@ def mark_attempt_succeeded(
"index_attempt_id": index_attempt_id,
"status": IndexingStatus.SUCCESS.value,
"cc_pair_id": attempt.connector_credential_pair_id,
"search_settings_id": attempt.search_settings_id,
"total_docs_indexed": attempt.total_docs_indexed,
"new_docs_indexed": attempt.new_docs_indexed,
},
)
except Exception:
@@ -273,6 +277,9 @@ def mark_attempt_partially_succeeded(
"index_attempt_id": index_attempt_id,
"status": IndexingStatus.COMPLETED_WITH_ERRORS.value,
"cc_pair_id": attempt.connector_credential_pair_id,
"search_settings_id": attempt.search_settings_id,
"total_docs_indexed": attempt.total_docs_indexed,
"new_docs_indexed": attempt.new_docs_indexed,
},
)
except Exception:
@@ -305,6 +312,10 @@ def mark_attempt_canceled(
"index_attempt_id": index_attempt_id,
"status": IndexingStatus.CANCELED.value,
"cc_pair_id": attempt.connector_credential_pair_id,
"search_settings_id": attempt.search_settings_id,
"reason": reason,
"total_docs_indexed": attempt.total_docs_indexed,
"new_docs_indexed": attempt.new_docs_indexed,
},
)
except Exception:
@@ -339,6 +350,10 @@ def mark_attempt_failed(
"index_attempt_id": index_attempt_id,
"status": IndexingStatus.FAILED.value,
"cc_pair_id": attempt.connector_credential_pair_id,
"search_settings_id": attempt.search_settings_id,
"reason": failure_reason,
"total_docs_indexed": attempt.total_docs_indexed,
"new_docs_indexed": attempt.new_docs_indexed,
},
)
except Exception:

View File

@@ -703,11 +703,7 @@ class Connector(Base):
)
documents_by_connector: Mapped[
list["DocumentByConnectorCredentialPair"]
] = relationship(
"DocumentByConnectorCredentialPair",
back_populates="connector",
passive_deletes=True,
)
] = relationship("DocumentByConnectorCredentialPair", back_populates="connector")
# synchronize this validation logic with RefreshFrequencySchema etc on front end
# until we have a centralized validation schema
@@ -761,11 +757,7 @@ class Credential(Base):
)
documents_by_credential: Mapped[
list["DocumentByConnectorCredentialPair"]
] = relationship(
"DocumentByConnectorCredentialPair",
back_populates="credential",
passive_deletes=True,
)
] = relationship("DocumentByConnectorCredentialPair", back_populates="credential")
user: Mapped[User | None] = relationship("User", back_populates="credentials")
@@ -1118,10 +1110,10 @@ class DocumentByConnectorCredentialPair(Base):
id: Mapped[str] = mapped_column(ForeignKey("document.id"), primary_key=True)
# TODO: transition this to use the ConnectorCredentialPair id directly
connector_id: Mapped[int] = mapped_column(
ForeignKey("connector.id", ondelete="CASCADE"), primary_key=True
ForeignKey("connector.id"), primary_key=True
)
credential_id: Mapped[int] = mapped_column(
ForeignKey("credential.id", ondelete="CASCADE"), primary_key=True
ForeignKey("credential.id"), primary_key=True
)
# used to better keep track of document counts at a connector level
@@ -1131,10 +1123,10 @@ class DocumentByConnectorCredentialPair(Base):
has_been_indexed: Mapped[bool] = mapped_column(Boolean)
connector: Mapped[Connector] = relationship(
"Connector", back_populates="documents_by_connector", passive_deletes=True
"Connector", back_populates="documents_by_connector"
)
credential: Mapped[Credential] = relationship(
"Credential", back_populates="documents_by_credential", passive_deletes=True
"Credential", back_populates="documents_by_credential"
)
__table_args__ = (
@@ -1658,8 +1650,8 @@ class Prompt(Base):
)
name: Mapped[str] = mapped_column(String)
description: Mapped[str] = mapped_column(String)
system_prompt: Mapped[str] = mapped_column(String(length=8000))
task_prompt: Mapped[str] = mapped_column(String(length=8000))
system_prompt: Mapped[str] = mapped_column(Text)
task_prompt: Mapped[str] = mapped_column(Text)
include_citations: Mapped[bool] = mapped_column(Boolean, default=True)
datetime_aware: Mapped[bool] = mapped_column(Boolean, default=True)
# Default prompts are configured via backend during deployment

View File

@@ -5,7 +5,6 @@ from datetime import timezone
from onyx.configs.constants import INDEX_SEPARATOR
from onyx.context.search.models import IndexFilters
from onyx.document_index.interfaces import VespaChunkRequest
from onyx.document_index.vespa_constants import ACCESS_CONTROL_LIST
from onyx.document_index.vespa_constants import CHUNK_ID
from onyx.document_index.vespa_constants import DOC_UPDATED_AT
from onyx.document_index.vespa_constants import DOCUMENT_ID
@@ -75,10 +74,8 @@ def build_vespa_filters(
filter_str += f'({TENANT_ID} contains "{filters.tenant_id}") and '
# ACL filters
if filters.access_control_list is not None:
filter_str += _build_or_filters(
ACCESS_CONTROL_LIST, filters.access_control_list
)
# if filters.access_control_list is not None:
# filter_str += _build_or_filters(ACCESS_CONTROL_LIST, filters.access_control_list)
# Source type filters
source_strs = (

View File

@@ -602,7 +602,7 @@ def get_max_input_tokens(
)
if input_toks <= 0:
return GEN_AI_MODEL_FALLBACK_MAX_TOKENS
raise RuntimeError("No tokens for input for the LLM given settings")
return input_toks

View File

@@ -21,7 +21,7 @@ from onyx.background.celery.tasks.external_group_syncing.tasks import (
from onyx.background.celery.tasks.pruning.tasks import (
try_creating_prune_generator_task,
)
from onyx.background.celery.versioned_apps.client import app as client_app
from onyx.background.celery.versioned_apps.primary import app as primary_app
from onyx.background.indexing.models import IndexAttemptErrorPydantic
from onyx.configs.constants import OnyxCeleryPriority
from onyx.configs.constants import OnyxCeleryTask
@@ -219,7 +219,7 @@ def update_cc_pair_status(
continue
# Revoke the task to prevent it from running
client_app.control.revoke(index_payload.celery_task_id)
primary_app.control.revoke(index_payload.celery_task_id)
# If it is running, then signaling for termination will get the
# watchdog thread to kill the spawned task
@@ -238,7 +238,7 @@ def update_cc_pair_status(
db_session.commit()
# this speeds up the start of indexing by firing the check immediately
client_app.send_task(
primary_app.send_task(
OnyxCeleryTask.CHECK_FOR_INDEXING,
kwargs=dict(tenant_id=tenant_id),
priority=OnyxCeleryPriority.HIGH,
@@ -376,7 +376,7 @@ def prune_cc_pair(
f"{cc_pair.connector.name} connector."
)
payload_id = try_creating_prune_generator_task(
client_app, cc_pair, db_session, r, tenant_id
primary_app, cc_pair, db_session, r, tenant_id
)
if not payload_id:
raise HTTPException(
@@ -450,7 +450,7 @@ def sync_cc_pair(
f"{cc_pair.connector.name} connector."
)
payload_id = try_creating_permissions_sync_task(
client_app, cc_pair_id, r, tenant_id
primary_app, cc_pair_id, r, tenant_id
)
if not payload_id:
raise HTTPException(
@@ -524,7 +524,7 @@ def sync_cc_pair_groups(
f"{cc_pair.connector.name} connector."
)
payload_id = try_creating_external_group_sync_task(
client_app, cc_pair_id, r, tenant_id
primary_app, cc_pair_id, r, tenant_id
)
if not payload_id:
raise HTTPException(
@@ -634,7 +634,7 @@ def associate_credential_to_connector(
)
# trigger indexing immediately
client_app.send_task(
primary_app.send_task(
OnyxCeleryTask.CHECK_FOR_INDEXING,
priority=OnyxCeleryPriority.HIGH,
kwargs={"tenant_id": tenant_id},

View File

@@ -20,7 +20,7 @@ from onyx.auth.users import current_admin_user
from onyx.auth.users import current_chat_accessible_user
from onyx.auth.users import current_curator_or_admin_user
from onyx.auth.users import current_user
from onyx.background.celery.versioned_apps.client import app as client_app
from onyx.background.celery.versioned_apps.primary import app as primary_app
from onyx.configs.app_configs import ENABLED_CONNECTOR_TYPES
from onyx.configs.app_configs import MOCK_CONNECTOR_FILE_PATH
from onyx.configs.constants import DocumentSource
@@ -928,7 +928,7 @@ def create_connector_with_mock_credential(
)
# trigger indexing immediately
client_app.send_task(
primary_app.send_task(
OnyxCeleryTask.CHECK_FOR_INDEXING,
priority=OnyxCeleryPriority.HIGH,
kwargs={"tenant_id": tenant_id},
@@ -1314,7 +1314,7 @@ def trigger_indexing_for_cc_pair(
# run the beat task to pick up the triggers immediately
priority = OnyxCeleryPriority.HIGHEST if is_user_file else OnyxCeleryPriority.HIGH
logger.info(f"Sending indexing check task with priority {priority}")
client_app.send_task(
primary_app.send_task(
OnyxCeleryTask.CHECK_FOR_INDEXING,
priority=priority,
kwargs={"tenant_id": tenant_id},

View File

@@ -6,7 +6,7 @@ from sqlalchemy.orm import Session
from onyx.auth.users import current_curator_or_admin_user
from onyx.auth.users import current_user
from onyx.background.celery.versioned_apps.client import app as client_app
from onyx.background.celery.versioned_apps.primary import app as primary_app
from onyx.configs.constants import OnyxCeleryPriority
from onyx.configs.constants import OnyxCeleryTask
from onyx.db.document_set import check_document_sets_are_public
@@ -52,7 +52,7 @@ def create_document_set(
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
client_app.send_task(
primary_app.send_task(
OnyxCeleryTask.CHECK_FOR_VESPA_SYNC_TASK,
kwargs={"tenant_id": tenant_id},
priority=OnyxCeleryPriority.HIGH,
@@ -85,7 +85,7 @@ def patch_document_set(
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
client_app.send_task(
primary_app.send_task(
OnyxCeleryTask.CHECK_FOR_VESPA_SYNC_TASK,
kwargs={"tenant_id": tenant_id},
priority=OnyxCeleryPriority.HIGH,
@@ -108,7 +108,7 @@ def delete_document_set(
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
client_app.send_task(
primary_app.send_task(
OnyxCeleryTask.CHECK_FOR_VESPA_SYNC_TASK,
kwargs={"tenant_id": tenant_id},
priority=OnyxCeleryPriority.HIGH,

View File

@@ -10,7 +10,7 @@ from sqlalchemy.orm import Session
from onyx.auth.users import current_admin_user
from onyx.auth.users import current_curator_or_admin_user
from onyx.background.celery.versioned_apps.client import app as client_app
from onyx.background.celery.versioned_apps.primary import app as primary_app
from onyx.configs.app_configs import GENERATIVE_MODEL_ACCESS_CHECK_FREQ
from onyx.configs.constants import DocumentSource
from onyx.configs.constants import KV_GEN_AI_KEY_CHECK_TIME
@@ -192,7 +192,7 @@ def create_deletion_attempt_for_connector_id(
db_session.commit()
# run the beat task to pick up this deletion from the db immediately
client_app.send_task(
primary_app.send_task(
OnyxCeleryTask.CHECK_FOR_CONNECTOR_DELETION,
priority=OnyxCeleryPriority.HIGH,
kwargs={"tenant_id": tenant_id},

View File

@@ -376,7 +376,6 @@ class SearchTool(Tool[SearchToolOverrideKwargs]):
db_session=alternate_db_session or self.db_session,
prompt_config=self.prompt_config,
retrieved_sections_callback=retrieved_sections_callback,
contextual_pruning_config=self.contextual_pruning_config,
)
search_query_info = SearchQueryInfo(
@@ -448,7 +447,6 @@ class SearchTool(Tool[SearchToolOverrideKwargs]):
db_session=self.db_session,
bypass_acl=self.bypass_acl,
prompt_config=self.prompt_config,
contextual_pruning_config=self.contextual_pruning_config,
)
# Log what we're doing

View File

@@ -39,7 +39,6 @@ class RecordType(str, Enum):
INDEXING_PROGRESS = "indexing_progress"
INDEXING_COMPLETE = "indexing_complete"
PERMISSION_SYNC_PROGRESS = "permission_sync_progress"
PERMISSION_SYNC_COMPLETE = "permission_sync_complete"
INDEX_ATTEMPT_STATUS = "index_attempt_status"

View File

@@ -887,7 +887,6 @@ def main() -> None:
type=int,
help="Maximum number of documents to delete (for delete-all-documents)",
)
parser.add_argument("--link", help="Document link (for get_acls filter)")
args = parser.parse_args()
vespa_debug = VespaDebugging(args.tenant_id)
@@ -925,11 +924,7 @@ def main() -> None:
elif args.action == "get_acls":
if args.cc_pair_id is None:
parser.error("--cc-pair-id is required for get_acls action")
if args.link is None:
vespa_debug.acls(args.cc_pair_id, args.n)
else:
vespa_debug.acls_by_link(args.cc_pair_id, args.link)
vespa_debug.acls(args.cc_pair_id, args.n)
if __name__ == "__main__":

View File

@@ -165,18 +165,17 @@ class DocumentManager:
doc["fields"]["document_id"]: doc["fields"] for doc in retrieved_docs_dict
}
# NOTE(rkuo): too much log spam
# Left this here for debugging purposes.
# import json
import json
# print("DEBUGGING DOCUMENTS")
# print(retrieved_docs)
# for doc in retrieved_docs.values():
# printable_doc = doc.copy()
# print(printable_doc.keys())
# printable_doc.pop("embeddings")
# printable_doc.pop("title_embedding")
# print(json.dumps(printable_doc, indent=2))
print("DEBUGGING DOCUMENTS")
print(retrieved_docs)
for doc in retrieved_docs.values():
printable_doc = doc.copy()
print(printable_doc.keys())
printable_doc.pop("embeddings")
printable_doc.pop("title_embedding")
print(json.dumps(printable_doc, indent=2))
for document in cc_pair.documents:
retrieved_doc = retrieved_docs.get(document.id)

View File

@@ -1,4 +1,3 @@
import time
from datetime import datetime
from datetime import timedelta
from urllib.parse import urlencode
@@ -192,7 +191,7 @@ class IndexAttemptManager:
user_performing_action: DATestUser | None = None,
) -> None:
"""Wait for an IndexAttempt to complete"""
start = time.monotonic()
start = datetime.now()
while True:
index_attempt = IndexAttemptManager.get_index_attempt_by_id(
index_attempt_id=index_attempt_id,
@@ -204,7 +203,7 @@ class IndexAttemptManager:
print(f"IndexAttempt {index_attempt_id} completed")
return
elapsed = time.monotonic() - start
elapsed = (datetime.now() - start).total_seconds()
if elapsed > timeout:
raise TimeoutError(
f"IndexAttempt {index_attempt_id} did not complete within {timeout} seconds"

View File

@@ -242,7 +242,15 @@ export function AssistantEditor({
enabledToolsMap[tool.id] = personaCurrentToolIds.includes(tool.id);
});
const { selectedFiles, selectedFolders } = useDocumentsContext();
const {
selectedFiles,
selectedFolders,
addSelectedFile,
removeSelectedFile,
addSelectedFolder,
removeSelectedFolder,
clearSelectedItems,
} = useDocumentsContext();
const [showVisibilityWarning, setShowVisibilityWarning] = useState(false);

View File

@@ -302,17 +302,11 @@ export default function AddConnector({
...connector_specific_config
} = values;
// Apply special transforms according to application logic
// Apply transforms from connectors.ts configuration
const transformedConnectorSpecificConfig = Object.entries(
connector_specific_config
).reduce(
(acc, [key, value]) => {
// Filter out empty strings from arrays
if (Array.isArray(value)) {
value = (value as any[]).filter(
(item) => typeof item !== "string" || item.trim() !== ""
);
}
const matchingConfigValue = configuration.values.find(
(configValue) => configValue.name === key
);

View File

@@ -4,62 +4,177 @@ import {
LlmDescriptor,
useLlmManager,
} from "@/lib/hooks";
import { StringOrNumberOption } from "@/components/Dropdown";
import { Persona } from "@/app/admin/assistants/interfaces";
import { destructureValue } from "@/lib/llm/utils";
import { destructureValue, getFinalLLM, structureValue } from "@/lib/llm/utils";
import { useState } from "react";
import { Hoverable } from "@/components/Hoverable";
import { Popover } from "@/components/popover/Popover";
import { IconType } from "react-icons";
import { FiRefreshCw } from "react-icons/fi";
import LLMPopover from "./input/LLMPopover";
import { FiRefreshCw, FiCheck } from "react-icons/fi";
export default function RegenerateOption({
selectedAssistant,
regenerate,
overriddenModel,
export function RegenerateDropdown({
options,
selected,
onSelect,
side,
maxHeight,
alternate,
onDropdownVisibleChange,
}: {
selectedAssistant: Persona;
regenerate: (modelOverRide: LlmDescriptor) => Promise<void>;
overriddenModel?: string;
alternate?: string;
options: StringOrNumberOption[];
selected: string | null;
onSelect: (value: string | number | null) => void;
includeDefault?: boolean;
side?: "top" | "right" | "bottom" | "left";
maxHeight?: string;
onDropdownVisibleChange: (isVisible: boolean) => void;
}) {
const { llmProviders } = useChatContext();
const llmManager = useLlmManager(llmProviders);
const [isOpen, setIsOpen] = useState(false);
const toggleDropdownVisible = (isVisible: boolean) => {
setIsOpen(isVisible);
onDropdownVisibleChange(isVisible);
};
const Dropdown = (
<div className="overflow-y-auto border border-neutral-800 py-2 min-w-fit bg-neutral-50 dark:bg-neutral-900 rounded-md shadow-lg">
<div className="mb-1 flex items-center justify-between px-4 pt-2">
<span className="text-sm text-neutral-600 dark:text-neutral-400">
Regenerate with
</span>
</div>
{options.map((option) => (
<div
key={option.value}
role="menuitem"
className={`flex items-center m-1.5 p-1.5 text-sm cursor-pointer focus-visible:outline-0 group relative hover:bg-neutral-200 dark:hover:bg-neutral-800 rounded-md my-0 px-3 mx-2 gap-2.5 py-3 !pr-3 ${
option.value === selected
? "bg-neutral-200 dark:bg-neutral-800"
: ""
}`}
onClick={() => onSelect(option.value)}
>
<div className="flex grow items-center justify-between gap-2">
<div>
<div className="flex items-center gap-3">
<div>{getDisplayNameForModel(option.name)}</div>
</div>
</div>
</div>
{option.value === selected && (
<FiCheck className="text-neutral-700 dark:text-neutral-300" />
)}
</div>
))}
</div>
);
return (
<LLMPopover
llmManager={llmManager}
llmProviders={llmProviders}
requiresImageGeneration={false}
currentAssistant={selectedAssistant}
currentModelName={overriddenModel}
trigger={
<Popover
open={isOpen}
onOpenChange={toggleDropdownVisible}
content={
<div onClick={() => toggleDropdownVisible(!isOpen)}>
{!overriddenModel ? (
{!alternate ? (
<Hoverable size={16} icon={FiRefreshCw as IconType} />
) : (
<Hoverable
size={16}
icon={FiRefreshCw as IconType}
hoverText={getDisplayNameForModel(overriddenModel)}
hoverText={getDisplayNameForModel(alternate)}
/>
)}
</div>
}
onSelect={(value) => {
const { name, provider, modelName } = destructureValue(value as string);
regenerate({
name: name,
provider: provider,
modelName: modelName,
});
}}
popover={Dropdown}
align="start"
side={side}
sideOffset={5}
triggerMaxWidth
/>
);
}
export default function RegenerateOption({
selectedAssistant,
regenerate,
overriddenModel,
onHoverChange,
onDropdownVisibleChange,
}: {
selectedAssistant: Persona;
regenerate: (modelOverRide: LlmDescriptor) => Promise<void>;
overriddenModel?: string;
onHoverChange: (isHovered: boolean) => void;
onDropdownVisibleChange: (isVisible: boolean) => void;
}) {
const { llmProviders } = useChatContext();
const llmManager = useLlmManager(llmProviders);
const [_, llmName] = getFinalLLM(llmProviders, selectedAssistant, null);
const llmOptionsByProvider: {
[provider: string]: { name: string; value: string }[];
} = {};
const uniqueModelNames = new Set<string>();
llmProviders.forEach((llmProvider) => {
if (!llmOptionsByProvider[llmProvider.provider]) {
llmOptionsByProvider[llmProvider.provider] = [];
}
(llmProvider.display_model_names || llmProvider.model_names).forEach(
(modelName) => {
if (!uniqueModelNames.has(modelName)) {
uniqueModelNames.add(modelName);
llmOptionsByProvider[llmProvider.provider].push({
name: modelName,
value: structureValue(
llmProvider.name,
llmProvider.provider,
modelName
),
});
}
}
);
});
const llmOptions = Object.entries(llmOptionsByProvider).flatMap(
([provider, options]) => [...options]
);
const currentModelName =
llmManager?.currentLlm.modelName ||
(selectedAssistant
? selectedAssistant.llm_model_version_override || llmName
: llmName);
return (
<div
className="group flex items-center relative"
onMouseEnter={() => onHoverChange(true)}
onMouseLeave={() => onHoverChange(false)}
>
<RegenerateDropdown
onDropdownVisibleChange={onDropdownVisibleChange}
alternate={overriddenModel}
options={llmOptions}
selected={currentModelName}
onSelect={(value) => {
const { name, provider, modelName } = destructureValue(
value as string
);
regenerate({
name: name,
provider: provider,
modelName: modelName,
});
}}
/>
</div>
);
}

View File

@@ -6,7 +6,7 @@ import { Persona } from "@/app/admin/assistants/interfaces";
import LLMPopover from "./LLMPopover";
import { InputPrompt } from "@/app/chat/interfaces";
import { FilterManager, getDisplayNameForModel, LlmManager } from "@/lib/hooks";
import { FilterManager, LlmManager } from "@/lib/hooks";
import { useChatContext } from "@/components/context/ChatContext";
import { ChatFileType, FileDescriptor } from "../interfaces";
import {
@@ -38,7 +38,6 @@ import { useUser } from "@/components/user/UserProvider";
import { useDocumentSelection } from "../useDocumentSelection";
import { AgenticToggle } from "./AgenticToggle";
import { SettingsContext } from "@/components/settings/SettingsProvider";
import { getProviderIcon } from "@/app/admin/configuration/llm/interfaces";
import { LoadingIndicator } from "react-select/dist/declarations/src/components/indicators";
import { FidgetSpinner } from "react-loader-spinner";
import { LoadingAnimation } from "@/components/Loading";
@@ -800,27 +799,6 @@ export function ChatInputBar({
llmManager={llmManager}
requiresImageGeneration={false}
currentAssistant={selectedAssistant}
trigger={
<button
className="dark:text-white text-black focus:outline-none"
data-testid="llm-popover-trigger"
>
<ChatInputOption
minimize
toggle
flexPriority="stiff"
name={getDisplayNameForModel(
llmManager?.currentLlm.modelName || "Models"
)}
Icon={getProviderIcon(
llmManager?.currentLlm.provider || "anthropic",
llmManager?.currentLlm.modelName ||
"claude-3-5-sonnet-20240620"
)}
tooltipContent="Switch models"
/>
</button>
}
/>
{retrievalEnabled && (

View File

@@ -1,9 +1,16 @@
import React, { useState, useEffect, useCallback, useMemo } from "react";
import React, {
useState,
useEffect,
useCallback,
useLayoutEffect,
useMemo,
} from "react";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
import { ChatInputOption } from "./ChatInputOption";
import { getDisplayNameForModel } from "@/lib/hooks";
import {
checkLLMSupportsImageInput,
@@ -28,16 +35,12 @@ import { FiAlertTriangle } from "react-icons/fi";
import { Slider } from "@/components/ui/slider";
import { useUser } from "@/components/user/UserProvider";
import { TruncatedText } from "@/components/ui/truncatedText";
import { ChatInputOption } from "./ChatInputOption";
interface LLMPopoverProps {
llmProviders: LLMProviderDescriptor[];
llmManager: LlmManager;
requiresImageGeneration?: boolean;
currentAssistant?: Persona;
trigger?: React.ReactElement;
onSelect?: (value: string) => void;
currentModelName?: string;
}
export default function LLMPopover({
@@ -45,69 +48,70 @@ export default function LLMPopover({
llmManager,
requiresImageGeneration,
currentAssistant,
trigger,
onSelect,
currentModelName,
}: LLMPopoverProps) {
const [isOpen, setIsOpen] = useState(false);
const { user } = useUser();
// Memoize the options to prevent unnecessary recalculations
const { llmOptions, defaultProvider, defaultModelDisplayName } =
useMemo(() => {
const llmOptionsByProvider: {
[provider: string]: {
name: string;
value: string;
icon: React.FC<{ size?: number; className?: string }>;
}[];
} = {};
const {
llmOptionsByProvider,
llmOptions,
defaultProvider,
defaultModelDisplayName,
} = useMemo(() => {
const llmOptionsByProvider: {
[provider: string]: {
name: string;
value: string;
icon: React.FC<{ size?: number; className?: string }>;
}[];
} = {};
const uniqueModelNames = new Set<string>();
const uniqueModelNames = new Set<string>();
llmProviders.forEach((llmProvider) => {
if (!llmOptionsByProvider[llmProvider.provider]) {
llmOptionsByProvider[llmProvider.provider] = [];
}
llmProviders.forEach((llmProvider) => {
if (!llmOptionsByProvider[llmProvider.provider]) {
llmOptionsByProvider[llmProvider.provider] = [];
}
(llmProvider.display_model_names || llmProvider.model_names).forEach(
(modelName) => {
if (!uniqueModelNames.has(modelName)) {
uniqueModelNames.add(modelName);
llmOptionsByProvider[llmProvider.provider].push({
name: modelName,
value: structureValue(
llmProvider.name,
llmProvider.provider,
modelName
),
icon: getProviderIcon(llmProvider.provider, modelName),
});
}
(llmProvider.display_model_names || llmProvider.model_names).forEach(
(modelName) => {
if (!uniqueModelNames.has(modelName)) {
uniqueModelNames.add(modelName);
llmOptionsByProvider[llmProvider.provider].push({
name: modelName,
value: structureValue(
llmProvider.name,
llmProvider.provider,
modelName
),
icon: getProviderIcon(llmProvider.provider, modelName),
});
}
);
});
const llmOptions = Object.entries(llmOptionsByProvider).flatMap(
([provider, options]) => [...options]
}
);
});
const defaultProvider = llmProviders.find(
(llmProvider) => llmProvider.is_default_provider
);
const llmOptions = Object.entries(llmOptionsByProvider).flatMap(
([provider, options]) => [...options]
);
const defaultModelName = defaultProvider?.default_model_name;
const defaultModelDisplayName = defaultModelName
? getDisplayNameForModel(defaultModelName)
: null;
const defaultProvider = llmProviders.find(
(llmProvider) => llmProvider.is_default_provider
);
return {
llmOptionsByProvider,
llmOptions,
defaultProvider,
defaultModelDisplayName,
};
}, [llmProviders]);
const defaultModelName = defaultProvider?.default_model_name;
const defaultModelDisplayName = defaultModelName
? getDisplayNameForModel(defaultModelName)
: null;
return {
llmOptionsByProvider,
llmOptions,
defaultProvider,
defaultModelDisplayName,
};
}, [llmProviders]);
const [localTemperature, setLocalTemperature] = useState(
llmManager.temperature ?? 0.5
@@ -131,34 +135,32 @@ export default function LLMPopover({
// Memoize trigger content to prevent rerendering
const triggerContent = useMemo(
trigger
? () => trigger
: () => (
<button
className="dark:text-[#fff] text-[#000] focus:outline-none"
data-testid="llm-popover-trigger"
>
<ChatInputOption
minimize
toggle
flexPriority="stiff"
name={getDisplayNameForModel(
llmManager?.currentLlm.modelName ||
defaultModelDisplayName ||
"Models"
)}
Icon={getProviderIcon(
llmManager?.currentLlm.provider ||
defaultProvider?.provider ||
"anthropic",
llmManager?.currentLlm.modelName ||
defaultProvider?.default_model_name ||
"claude-3-5-sonnet-20240620"
)}
tooltipContent="Switch models"
/>
</button>
),
() => (
<button
className="dark:text-[#fff] text-[#000] focus:outline-none"
data-testid="llm-popover-trigger"
>
<ChatInputOption
minimize
toggle
flexPriority="stiff"
name={getDisplayNameForModel(
llmManager?.currentLlm.modelName ||
defaultModelDisplayName ||
"Models"
)}
Icon={getProviderIcon(
llmManager?.currentLlm.provider ||
defaultProvider?.provider ||
"anthropic",
llmManager?.currentLlm.modelName ||
defaultProvider?.default_model_name ||
"claude-3-5-sonnet-20240620"
)}
tooltipContent="Switch models"
/>
</button>
),
[defaultModelDisplayName, defaultProvider, llmManager?.currentLlm]
);
@@ -176,14 +178,12 @@ export default function LLMPopover({
<button
key={index}
className={`w-full flex items-center gap-x-2 px-3 py-2 text-sm text-left hover:bg-background-100 dark:hover:bg-neutral-800 transition-colors duration-150 ${
(currentModelName || llmManager.currentLlm.modelName) ===
name
llmManager.currentLlm.modelName === name
? "bg-background-100 dark:bg-neutral-900 text-text"
: "text-text-darker"
}`}
onClick={() => {
llmManager.updateCurrentLlm(destructureValue(value));
onSelect?.(value);
setIsOpen(false);
}}
>

View File

@@ -178,6 +178,7 @@ export const AgenticMessage = ({
const [isViewingInitialAnswer, setIsViewingInitialAnswer] = useState(true);
const [canShowResponse, setCanShowResponse] = useState(isComplete);
const [isRegenerateHovered, setIsRegenerateHovered] = useState(false);
const [isRegenerateDropdownVisible, setIsRegenerateDropdownVisible] =
useState(false);
@@ -596,6 +597,7 @@ export const AgenticMessage = ({
onDropdownVisibleChange={
setIsRegenerateDropdownVisible
}
onHoverChange={setIsRegenerateHovered}
selectedAssistant={currentPersona!}
regenerate={regenerate}
overriddenModel={overriddenModel}
@@ -611,10 +613,16 @@ export const AgenticMessage = ({
absolute -bottom-5
z-10
invisible ${
(isHovering || settings?.isMobile) && "!visible"
(isHovering ||
isRegenerateHovered ||
settings?.isMobile) &&
"!visible"
}
opacity-0 ${
(isHovering || settings?.isMobile) && "!opacity-100"
(isHovering ||
isRegenerateHovered ||
settings?.isMobile) &&
"!opacity-100"
}
translate-y-2 ${
(isHovering || settings?.isMobile) &&
@@ -689,6 +697,7 @@ export const AgenticMessage = ({
}
regenerate={regenerate}
overriddenModel={overriddenModel}
onHoverChange={setIsRegenerateHovered}
/>
</CustomTooltip>
)}

View File

@@ -301,6 +301,7 @@ export const AIMessage = ({
const finalContent = processContent(content as string);
const [isRegenerateHovered, setIsRegenerateHovered] = useState(false);
const [isRegenerateDropdownVisible, setIsRegenerateDropdownVisible] =
useState(false);
const { isHovering, trackedElementRef, hoverElementRef } = useMouseTracking();
@@ -727,6 +728,7 @@ export const AIMessage = ({
onDropdownVisibleChange={
setIsRegenerateDropdownVisible
}
onHoverChange={setIsRegenerateHovered}
selectedAssistant={currentPersona!}
regenerate={regenerate}
overriddenModel={overriddenModel}
@@ -742,10 +744,16 @@ export const AIMessage = ({
absolute -bottom-5
z-10
invisible ${
(isHovering || settings?.isMobile) && "!visible"
(isHovering ||
isRegenerateHovered ||
settings?.isMobile) &&
"!visible"
}
opacity-0 ${
(isHovering || settings?.isMobile) && "!opacity-100"
(isHovering ||
isRegenerateHovered ||
settings?.isMobile) &&
"!opacity-100"
}
flex md:flex-row gap-x-0.5 bg-background-125/40 -mx-1.5 p-1.5 rounded-lg
`}
@@ -810,6 +818,7 @@ export const AIMessage = ({
}
regenerate={regenerate}
overriddenModel={overriddenModel}
onHoverChange={setIsRegenerateHovered}
/>
</CustomTooltip>
)}

View File

@@ -1333,10 +1333,10 @@ export function createConnectorValidationSchema(
): Yup.ObjectSchema<Record<string, any>> {
const configuration = connectorConfigs[connector];
const object = Yup.object().shape({
return Yup.object().shape({
access_type: Yup.string().required("Access Type is required"),
name: Yup.string().required("Connector Name is required"),
...[...configuration.values, ...configuration.advanced_values].reduce(
...configuration.values.reduce(
(acc, field) => {
let schema: any =
field.type === "select"
@@ -1363,8 +1363,6 @@ export function createConnectorValidationSchema(
pruneFreq: Yup.number().min(0, "Prune frequency must be non-negative"),
refreshFreq: Yup.number().min(0, "Refresh frequency must be non-negative"),
});
return object;
}
export const defaultPruneFreqDays = 30; // 30 days