mirror of
https://github.com/onyx-dot-app/onyx.git
synced 2026-02-16 23:35:46 +00:00
Compare commits
3 Commits
pro-search
...
cloud_debu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
09e6bd3c9c | ||
|
|
c1803cdd56 | ||
|
|
a5b9c76012 |
18
.github/pull_request_template.md
vendored
18
.github/pull_request_template.md
vendored
@@ -6,6 +6,24 @@
|
||||
[Describe the tests you ran to verify your changes]
|
||||
|
||||
|
||||
## Accepted Risk (provide if relevant)
|
||||
N/A
|
||||
|
||||
|
||||
## Related Issue(s) (provide if relevant)
|
||||
N/A
|
||||
|
||||
|
||||
## Mental Checklist:
|
||||
- All of the automated tests pass
|
||||
- All PR comments are addressed and marked resolved
|
||||
- If there are migrations, they have been rebased to latest main
|
||||
- If there are new dependencies, they are added to the requirements
|
||||
- If there are new environment variables, they are added to all of the deployment methods
|
||||
- If there are new APIs that don't require auth, they are added to PUBLIC_ENDPOINT_SPECS
|
||||
- Docker images build and basic functionalities work
|
||||
- Author has done a final read through of the PR right before merge
|
||||
|
||||
## Backporting (check the box to trigger backport action)
|
||||
Note: You have to check that the action passes, otherwise resolve the conflicts manually and tag the patches.
|
||||
- [ ] This PR should be backported (make sure to check that the backport attempt succeeds)
|
||||
|
||||
@@ -66,7 +66,6 @@ jobs:
|
||||
NEXT_PUBLIC_POSTHOG_HOST=${{ secrets.POSTHOG_HOST }}
|
||||
NEXT_PUBLIC_SENTRY_DSN=${{ secrets.SENTRY_DSN }}
|
||||
NEXT_PUBLIC_GTM_ENABLED=true
|
||||
NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=true
|
||||
# needed due to weird interactions with the builds for different platforms
|
||||
no-cache: true
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
@@ -118,6 +118,6 @@ jobs:
|
||||
TRIVY_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-db:2"
|
||||
TRIVY_JAVA_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-java-db:1"
|
||||
with:
|
||||
image-ref: docker.io/${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}
|
||||
image-ref: docker.io/onyxdotapp/onyx-model-server:${{ github.ref_name }}
|
||||
severity: "CRITICAL,HIGH"
|
||||
timeout: "10m"
|
||||
|
||||
14
.github/workflows/pr-python-connector-tests.yml
vendored
14
.github/workflows/pr-python-connector-tests.yml
vendored
@@ -26,19 +26,7 @@ env:
|
||||
GOOGLE_GMAIL_OAUTH_CREDENTIALS_JSON_STR: ${{ secrets.GOOGLE_GMAIL_OAUTH_CREDENTIALS_JSON_STR }}
|
||||
# Slab
|
||||
SLAB_BOT_TOKEN: ${{ secrets.SLAB_BOT_TOKEN }}
|
||||
# Zendesk
|
||||
ZENDESK_SUBDOMAIN: ${{ secrets.ZENDESK_SUBDOMAIN }}
|
||||
ZENDESK_EMAIL: ${{ secrets.ZENDESK_EMAIL }}
|
||||
ZENDESK_TOKEN: ${{ secrets.ZENDESK_TOKEN }}
|
||||
# Salesforce
|
||||
SF_USERNAME: ${{ secrets.SF_USERNAME }}
|
||||
SF_PASSWORD: ${{ secrets.SF_PASSWORD }}
|
||||
SF_SECURITY_TOKEN: ${{ secrets.SF_SECURITY_TOKEN }}
|
||||
# Airtable
|
||||
AIRTABLE_TEST_BASE_ID: ${{ secrets.AIRTABLE_TEST_BASE_ID }}
|
||||
AIRTABLE_TEST_TABLE_ID: ${{ secrets.AIRTABLE_TEST_TABLE_ID }}
|
||||
AIRTABLE_TEST_TABLE_NAME: ${{ secrets.AIRTABLE_TEST_TABLE_NAME }}
|
||||
AIRTABLE_ACCESS_TOKEN: ${{ secrets.AIRTABLE_ACCESS_TOKEN }}
|
||||
|
||||
jobs:
|
||||
connectors-check:
|
||||
# See https://runs-on.com/runners/linux/
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -7,6 +7,4 @@
|
||||
.vscode/
|
||||
*.sw?
|
||||
/backend/tests/regression/answer_quality/search_test_config.yaml
|
||||
/web/test-results/
|
||||
backend/onyx/agent_search/main/test_data.json
|
||||
backend/tests/regression/answer_quality/test_data.json
|
||||
/web/test-results/
|
||||
8
.vscode/env_template.txt
vendored
8
.vscode/env_template.txt
vendored
@@ -5,8 +5,6 @@
|
||||
# For local dev, often user Authentication is not needed
|
||||
AUTH_TYPE=disabled
|
||||
|
||||
# Skip warm up for dev
|
||||
SKIP_WARM_UP=True
|
||||
|
||||
# Always keep these on for Dev
|
||||
# Logs all model prompts to stdout
|
||||
@@ -51,9 +49,3 @@ BING_API_KEY=<REPLACE THIS>
|
||||
# Enable the full set of Danswer Enterprise Edition features
|
||||
# NOTE: DO NOT ENABLE THIS UNLESS YOU HAVE A PAID ENTERPRISE LICENSE (or if you are using this for local testing/development)
|
||||
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=False
|
||||
|
||||
# Agent Search configs # TODO: Remove give proper namings
|
||||
AGENT_RETRIEVAL_STATS=False # Note: This setting will incur substantial re-ranking effort
|
||||
AGENT_RERANKING_STATS=True
|
||||
AGENT_MAX_QUERY_RETRIEVAL_RESULTS=20
|
||||
AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS=20
|
||||
|
||||
15
.vscode/launch.template.jsonc
vendored
15
.vscode/launch.template.jsonc
vendored
@@ -355,20 +355,5 @@
|
||||
"PYTHONPATH": "."
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "Install Python Requirements",
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"runtimeExecutable": "bash",
|
||||
"runtimeArgs": [
|
||||
"-c",
|
||||
"pip install -r backend/requirements/default.txt && pip install -r backend/requirements/dev.txt && pip install -r backend/requirements/ee.txt && pip install -r backend/requirements/model_server.txt"
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "integratedTerminal",
|
||||
"presentation": {
|
||||
"group": "3"
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
@@ -12,10 +12,6 @@ As an open source project in a rapidly changing space, we welcome all contributi
|
||||
|
||||
The [GitHub Issues](https://github.com/onyx-dot-app/onyx/issues) page is a great place to start for contribution ideas.
|
||||
|
||||
To ensure that your contribution is aligned with the project's direction, please reach out to Hagen (or any other maintainer) on the Onyx team
|
||||
via [Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA) /
|
||||
[Discord](https://discord.gg/TDJ59cGV2X) or [email](mailto:founders@onyx.app).
|
||||
|
||||
Issues that have been explicitly approved by the maintainers (aligned with the direction of the project)
|
||||
will be marked with the `approved by maintainers` label.
|
||||
Issues marked `good first issue` are an especially great place to start.
|
||||
@@ -27,8 +23,8 @@ If you have a new/different contribution in mind, we'd love to hear about it!
|
||||
Your input is vital to making sure that Onyx moves in the right direction.
|
||||
Before starting on implementation, please raise a GitHub issue.
|
||||
|
||||
Also, always feel free to message the founders (Chris Weaver / Yuhong Sun) on
|
||||
[Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA) /
|
||||
And always feel free to message us (Chris Weaver / Yuhong Sun) on
|
||||
[Slack](https://join.slack.com/t/danswer/shared_invite/zt-1w76msxmd-HJHLe3KNFIAIzk_0dSOKaQ) /
|
||||
[Discord](https://discord.gg/TDJ59cGV2X) directly about anything at all.
|
||||
|
||||
### Contributing Code
|
||||
@@ -46,7 +42,7 @@ Our goal is to make contributing as easy as possible. If you run into any issues
|
||||
That way we can help future contributors and users can avoid the same issue.
|
||||
|
||||
We also have support channels and generally interesting discussions on our
|
||||
[Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA)
|
||||
[Slack](https://join.slack.com/t/danswer/shared_invite/zt-1w76msxmd-HJHLe3KNFIAIzk_0dSOKaQ)
|
||||
and
|
||||
[Discord](https://discord.gg/TDJ59cGV2X).
|
||||
|
||||
@@ -127,47 +123,7 @@ Once the above is done, navigate to `onyx/web` run:
|
||||
npm i
|
||||
```
|
||||
|
||||
## Formatting and Linting
|
||||
|
||||
### Backend
|
||||
|
||||
For the backend, you'll need to setup pre-commit hooks (black / reorder-python-imports).
|
||||
First, install pre-commit (if you don't have it already) following the instructions
|
||||
[here](https://pre-commit.com/#installation).
|
||||
|
||||
With the virtual environment active, install the pre-commit library with:
|
||||
|
||||
```bash
|
||||
pip install pre-commit
|
||||
```
|
||||
|
||||
Then, from the `onyx/backend` directory, run:
|
||||
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
Additionally, we use `mypy` for static type checking.
|
||||
Onyx is fully type-annotated, and we want to keep it that way!
|
||||
To run the mypy checks manually, run `python -m mypy .` from the `onyx/backend` directory.
|
||||
|
||||
### Web
|
||||
|
||||
We use `prettier` for formatting. The desired version (2.8.8) will be installed via a `npm i` from the `onyx/web` directory.
|
||||
To run the formatter, use `npx prettier --write .` from the `onyx/web` directory.
|
||||
Please double check that prettier passes before creating a pull request.
|
||||
|
||||
# Running the application for development
|
||||
|
||||
## Developing using VSCode Debugger (recommended)
|
||||
|
||||
We highly recommend using VSCode debugger for development.
|
||||
See [CONTRIBUTING_VSCODE.md](./CONTRIBUTING_VSCODE.md) for more details.
|
||||
|
||||
Otherwise, you can follow the instructions below to run the application for development.
|
||||
|
||||
## Manually running the application for development
|
||||
### Docker containers for external software
|
||||
#### Docker containers for external software
|
||||
|
||||
You will need Docker installed to run these containers.
|
||||
|
||||
@@ -179,7 +135,7 @@ docker compose -f docker-compose.dev.yml -p onyx-stack up -d index relational_db
|
||||
|
||||
(index refers to Vespa, relational_db refers to Postgres, and cache refers to Redis)
|
||||
|
||||
### Running Onyx locally
|
||||
#### Running Onyx locally
|
||||
|
||||
To start the frontend, navigate to `onyx/web` and run:
|
||||
|
||||
@@ -267,6 +223,35 @@ If you want to make changes to Onyx and run those changes in Docker, you can als
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack up -d --build
|
||||
```
|
||||
|
||||
### Formatting and Linting
|
||||
|
||||
#### Backend
|
||||
|
||||
For the backend, you'll need to setup pre-commit hooks (black / reorder-python-imports).
|
||||
First, install pre-commit (if you don't have it already) following the instructions
|
||||
[here](https://pre-commit.com/#installation).
|
||||
|
||||
With the virtual environment active, install the pre-commit library with:
|
||||
|
||||
```bash
|
||||
pip install pre-commit
|
||||
```
|
||||
|
||||
Then, from the `onyx/backend` directory, run:
|
||||
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
Additionally, we use `mypy` for static type checking.
|
||||
Onyx is fully type-annotated, and we want to keep it that way!
|
||||
To run the mypy checks manually, run `python -m mypy .` from the `onyx/backend` directory.
|
||||
|
||||
#### Web
|
||||
|
||||
We use `prettier` for formatting. The desired version (2.8.8) will be installed via a `npm i` from the `onyx/web` directory.
|
||||
To run the formatter, use `npx prettier --write .` from the `onyx/web` directory.
|
||||
Please double check that prettier passes before creating a pull request.
|
||||
|
||||
### Release Process
|
||||
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
# VSCode Debugging Setup
|
||||
|
||||
This guide explains how to set up and use VSCode's debugging capabilities with this project.
|
||||
|
||||
## Initial Setup
|
||||
|
||||
1. **Environment Setup**:
|
||||
- Copy `.vscode/.env.template` to `.vscode/.env`
|
||||
- Fill in the necessary environment variables in `.vscode/.env`
|
||||
2. **launch.json**:
|
||||
- Copy `.vscode/launch.template.jsonc` to `.vscode/launch.json`
|
||||
|
||||
## Using the Debugger
|
||||
|
||||
Before starting, make sure the Docker Daemon is running.
|
||||
|
||||
1. Open the Debug view in VSCode (Cmd+Shift+D on macOS)
|
||||
2. From the dropdown at the top, select "Clear and Restart External Volumes and Containers" and press the green play button
|
||||
3. From the dropdown at the top, select "Run All Onyx Services" and press the green play button
|
||||
4. Now, you can navigate to onyx in your browser (default is http://localhost:3000) and start using the app
|
||||
5. You can set breakpoints by clicking to the left of line numbers to help debug while the app is running
|
||||
6. Use the debug toolbar to step through code, inspect variables, etc.
|
||||
|
||||
## Features
|
||||
|
||||
- Hot reload is enabled for the web server and API servers
|
||||
- Python debugging is configured with debugpy
|
||||
- Environment variables are loaded from `.vscode/.env`
|
||||
- Console output is organized in the integrated terminal with labeled tabs
|
||||
18
README.md
18
README.md
@@ -3,7 +3,7 @@
|
||||
<a name="readme-top"></a>
|
||||
|
||||
<h2 align="center">
|
||||
<a href="https://www.onyx.app/"> <img width="50%" src="https://github.com/onyx-dot-app/onyx/blob/logo/OnyxLogoCropped.jpg?raw=true)" /></a>
|
||||
<a href="https://www.onyx.app/"> <img width="50%" src="https://github.com/onyx-dot-app/onyx/blob/logo/LogoOnyx.png?raw=true)" /></a>
|
||||
</h2>
|
||||
|
||||
<p align="center">
|
||||
@@ -13,7 +13,7 @@
|
||||
<a href="https://docs.onyx.app/" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docs-view-blue" alt="Documentation">
|
||||
</a>
|
||||
<a href="https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA" target="_blank">
|
||||
<a href="https://join.slack.com/t/danswer/shared_invite/zt-1w76msxmd-HJHLe3KNFIAIzk_0dSOKaQ" target="_blank">
|
||||
<img src="https://img.shields.io/badge/slack-join-blue.svg?logo=slack" alt="Slack">
|
||||
</a>
|
||||
<a href="https://discord.gg/TDJ59cGV2X" target="_blank">
|
||||
@@ -24,7 +24,7 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<strong>[Onyx](https://www.onyx.app/)</strong> (formerly Danswer) is the AI Assistant connected to your company's docs, apps, and people.
|
||||
<strong>[Onyx](https://www.onyx.app/)</strong> (Formerly Danswer) is the AI Assistant connected to your company's docs, apps, and people.
|
||||
Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any
|
||||
scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your
|
||||
own control. Onyx is dual Licensed with most of it under MIT license and designed to be modular and easily extensible. The system also comes fully ready
|
||||
@@ -133,3 +133,15 @@ Looking to contribute? Please check out the [Contribution Guide](CONTRIBUTING.md
|
||||
## ⭐Star History
|
||||
|
||||
[](https://star-history.com/#onyx-dot-app/onyx&Date)
|
||||
|
||||
## ✨Contributors
|
||||
|
||||
<a href="https://github.com/onyx-dot-app/onyx/graphs/contributors">
|
||||
<img alt="contributors" src="https://contrib.rocks/image?repo=onyx-dot-app/onyx"/>
|
||||
</a>
|
||||
|
||||
<p align="right" style="font-size: 14px; color: #555; margin-top: 20px;">
|
||||
<a href="#readme-top" style="text-decoration: none; color: #007bff; font-weight: bold;">
|
||||
↑ Back to Top ↑
|
||||
</a>
|
||||
</p>
|
||||
|
||||
1
backend/.gitignore
vendored
1
backend/.gitignore
vendored
@@ -9,4 +9,3 @@ api_keys.py
|
||||
vespa-app.zip
|
||||
dynamic_config_storage/
|
||||
celerybeat-schedule*
|
||||
onyx/connectors/salesforce/data/
|
||||
@@ -4,7 +4,7 @@ from onyx.configs.app_configs import USE_IAM_AUTH
|
||||
from onyx.configs.app_configs import POSTGRES_HOST
|
||||
from onyx.configs.app_configs import POSTGRES_PORT
|
||||
from onyx.configs.app_configs import POSTGRES_USER
|
||||
from onyx.configs.app_configs import AWS_REGION_NAME
|
||||
from onyx.configs.app_configs import AWS_REGION
|
||||
from onyx.db.engine import build_connection_string
|
||||
from onyx.db.engine import get_all_tenant_ids
|
||||
from sqlalchemy import event
|
||||
@@ -120,7 +120,7 @@ def provide_iam_token_for_alembic(
|
||||
) -> None:
|
||||
if USE_IAM_AUTH:
|
||||
# Database connection settings
|
||||
region = AWS_REGION_NAME
|
||||
region = AWS_REGION
|
||||
host = POSTGRES_HOST
|
||||
port = POSTGRES_PORT
|
||||
user = POSTGRES_USER
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
"""add shortcut option for users
|
||||
|
||||
Revision ID: 027381bce97c
|
||||
Revises: 6fc7886d665d
|
||||
Create Date: 2025-01-14 12:14:00.814390
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "027381bce97c"
|
||||
down_revision = "6fc7886d665d"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"user",
|
||||
sa.Column(
|
||||
"shortcut_enabled", sa.Boolean(), nullable=False, server_default="true"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("user", "shortcut_enabled")
|
||||
@@ -1,29 +0,0 @@
|
||||
"""agent_doc_result_col
|
||||
|
||||
Revision ID: 1adf5ea20d2b
|
||||
Revises: e9cf2bd7baed
|
||||
Create Date: 2025-01-05 13:14:58.344316
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "1adf5ea20d2b"
|
||||
down_revision = "e9cf2bd7baed"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add the new column with JSONB type
|
||||
op.add_column(
|
||||
"sub_question",
|
||||
sa.Column("sub_question_doc_results", postgresql.JSONB(), nullable=True),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop the column
|
||||
op.drop_column("sub_question", "sub_question_doc_results")
|
||||
@@ -1,24 +0,0 @@
|
||||
"""add chunk count to document
|
||||
|
||||
Revision ID: 2955778aa44c
|
||||
Revises: c0aab6edb6dd
|
||||
Create Date: 2025-01-04 11:39:43.268612
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "2955778aa44c"
|
||||
down_revision = "c0aab6edb6dd"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column("document", sa.Column("chunk_count", sa.Integer(), nullable=True))
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("document", "chunk_count")
|
||||
@@ -1,35 +0,0 @@
|
||||
"""add composite index for index attempt time updated
|
||||
|
||||
Revision ID: 369644546676
|
||||
Revises: 2955778aa44c
|
||||
Create Date: 2025-01-08 15:38:17.224380
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
from sqlalchemy import text
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "369644546676"
|
||||
down_revision = "2955778aa44c"
|
||||
branch_labels: None = None
|
||||
depends_on: None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_index(
|
||||
"ix_index_attempt_ccpair_search_settings_time_updated",
|
||||
"index_attempt",
|
||||
[
|
||||
"connector_credential_pair_id",
|
||||
"search_settings_id",
|
||||
text("time_updated DESC"),
|
||||
],
|
||||
unique=False,
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_index(
|
||||
"ix_index_attempt_ccpair_search_settings_time_updated",
|
||||
table_name="index_attempt",
|
||||
)
|
||||
@@ -1,58 +0,0 @@
|
||||
"""add back input prompts
|
||||
|
||||
Revision ID: 3c6531f32351
|
||||
Revises: aeda5f2df4f6
|
||||
Create Date: 2025-01-13 12:49:51.705235
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import fastapi_users_db_sqlalchemy
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "3c6531f32351"
|
||||
down_revision = "aeda5f2df4f6"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"inputprompt",
|
||||
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column("prompt", sa.String(), nullable=False),
|
||||
sa.Column("content", sa.String(), nullable=False),
|
||||
sa.Column("active", sa.Boolean(), nullable=False),
|
||||
sa.Column("is_public", sa.Boolean(), nullable=False),
|
||||
sa.Column(
|
||||
"user_id",
|
||||
fastapi_users_db_sqlalchemy.generics.GUID(),
|
||||
nullable=True,
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["user_id"],
|
||||
["user.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
op.create_table(
|
||||
"inputprompt__user",
|
||||
sa.Column("input_prompt_id", sa.Integer(), nullable=False),
|
||||
sa.Column(
|
||||
"user_id", fastapi_users_db_sqlalchemy.generics.GUID(), nullable=False
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["input_prompt_id"],
|
||||
["inputprompt.id"],
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["user_id"],
|
||||
["user.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("input_prompt_id", "user_id"),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("inputprompt__user")
|
||||
op.drop_table("inputprompt")
|
||||
@@ -1,79 +0,0 @@
|
||||
"""make categories labels and many to many
|
||||
|
||||
Revision ID: 6fc7886d665d
|
||||
Revises: 3c6531f32351
|
||||
Create Date: 2025-01-13 18:12:18.029112
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "6fc7886d665d"
|
||||
down_revision = "3c6531f32351"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Rename persona_category table to persona_label
|
||||
op.rename_table("persona_category", "persona_label")
|
||||
|
||||
# Create the new association table
|
||||
op.create_table(
|
||||
"persona__persona_label",
|
||||
sa.Column("persona_id", sa.Integer(), nullable=False),
|
||||
sa.Column("persona_label_id", sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["persona_id"],
|
||||
["persona.id"],
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["persona_label_id"],
|
||||
["persona_label.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("persona_id", "persona_label_id"),
|
||||
)
|
||||
|
||||
# Copy existing relationships to the new table
|
||||
op.execute(
|
||||
"""
|
||||
INSERT INTO persona__persona_label (persona_id, persona_label_id)
|
||||
SELECT id, category_id FROM persona WHERE category_id IS NOT NULL
|
||||
"""
|
||||
)
|
||||
|
||||
# Remove the old category_id column from persona table
|
||||
op.drop_column("persona", "category_id")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Rename persona_label table back to persona_category
|
||||
op.rename_table("persona_label", "persona_category")
|
||||
|
||||
# Add back the category_id column to persona table
|
||||
op.add_column("persona", sa.Column("category_id", sa.Integer(), nullable=True))
|
||||
op.create_foreign_key(
|
||||
"persona_category_id_fkey",
|
||||
"persona",
|
||||
"persona_category",
|
||||
["category_id"],
|
||||
["id"],
|
||||
)
|
||||
|
||||
# Copy the first label relationship back to the persona table
|
||||
op.execute(
|
||||
"""
|
||||
UPDATE persona
|
||||
SET category_id = (
|
||||
SELECT persona_label_id
|
||||
FROM persona__persona_label
|
||||
WHERE persona__persona_label.persona_id = persona.id
|
||||
LIMIT 1
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
# Drop the association table
|
||||
op.drop_table("persona__persona_label")
|
||||
@@ -1,35 +0,0 @@
|
||||
"""agent_metric_col_rename__s
|
||||
|
||||
Revision ID: 925b58bd75b6
|
||||
Revises: 9787be927e58
|
||||
Create Date: 2025-01-06 11:20:26.752441
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "925b58bd75b6"
|
||||
down_revision = "9787be927e58"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Rename columns using PostgreSQL syntax
|
||||
op.alter_column(
|
||||
"agent__search_metrics", "base_duration_s", new_column_name="base_duration__s"
|
||||
)
|
||||
op.alter_column(
|
||||
"agent__search_metrics", "full_duration_s", new_column_name="full_duration__s"
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Revert the column renames
|
||||
op.alter_column(
|
||||
"agent__search_metrics", "base_duration__s", new_column_name="base_duration_s"
|
||||
)
|
||||
op.alter_column(
|
||||
"agent__search_metrics", "full_duration__s", new_column_name="full_duration_s"
|
||||
)
|
||||
@@ -1,25 +0,0 @@
|
||||
"""agent_metric_table_renames__agent__
|
||||
|
||||
Revision ID: 9787be927e58
|
||||
Revises: bceb76d618ec
|
||||
Create Date: 2025-01-06 11:01:44.210160
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "9787be927e58"
|
||||
down_revision = "bceb76d618ec"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Rename table from agent_search_metrics to agent__search_metrics
|
||||
op.rename_table("agent_search_metrics", "agent__search_metrics")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Rename table back from agent__search_metrics to agent_search_metrics
|
||||
op.rename_table("agent__search_metrics", "agent_search_metrics")
|
||||
@@ -1,42 +0,0 @@
|
||||
"""agent_tracking
|
||||
|
||||
Revision ID: 98a5008d8711
|
||||
Revises: 027381bce97c
|
||||
Create Date: 2025-01-04 14:41:52.732238
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "98a5008d8711"
|
||||
down_revision = "027381bce97c"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"agent_search_metrics",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column("persona_id", sa.Integer(), nullable=True),
|
||||
sa.Column("agent_type", sa.String(), nullable=False),
|
||||
sa.Column("start_time", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("base_duration_s", sa.Float(), nullable=False),
|
||||
sa.Column("full_duration_s", sa.Float(), nullable=False),
|
||||
sa.Column("base_metrics", postgresql.JSONB(), nullable=True),
|
||||
sa.Column("refined_metrics", postgresql.JSONB(), nullable=True),
|
||||
sa.Column("all_metrics", postgresql.JSONB(), nullable=True),
|
||||
sa.ForeignKeyConstraint(
|
||||
["persona_id"],
|
||||
["persona.id"],
|
||||
),
|
||||
sa.ForeignKeyConstraint(["user_id"], ["user.id"], ondelete="CASCADE"),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("agent_search_metrics")
|
||||
@@ -1,27 +0,0 @@
|
||||
"""add pinned assistants
|
||||
|
||||
Revision ID: aeda5f2df4f6
|
||||
Revises: 369644546676
|
||||
Create Date: 2025-01-09 16:04:10.770636
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "aeda5f2df4f6"
|
||||
down_revision = "369644546676"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"user", sa.Column("pinned_assistants", postgresql.JSONB(), nullable=True)
|
||||
)
|
||||
op.execute('UPDATE "user" SET pinned_assistants = chosen_assistants')
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("user", "pinned_assistants")
|
||||
@@ -1,84 +0,0 @@
|
||||
"""agent_table_renames__agent__
|
||||
|
||||
Revision ID: bceb76d618ec
|
||||
Revises: c0132518a25b
|
||||
Create Date: 2025-01-06 10:50:48.109285
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "bceb76d618ec"
|
||||
down_revision = "c0132518a25b"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.drop_constraint(
|
||||
"sub_query__search_doc_sub_query_id_fkey",
|
||||
"sub_query__search_doc",
|
||||
type_="foreignkey",
|
||||
)
|
||||
op.drop_constraint(
|
||||
"sub_query__search_doc_search_doc_id_fkey",
|
||||
"sub_query__search_doc",
|
||||
type_="foreignkey",
|
||||
)
|
||||
# Rename tables
|
||||
op.rename_table("sub_query", "agent__sub_query")
|
||||
op.rename_table("sub_question", "agent__sub_question")
|
||||
op.rename_table("sub_query__search_doc", "agent__sub_query__search_doc")
|
||||
|
||||
# Update both foreign key constraints for agent__sub_query__search_doc
|
||||
|
||||
# Create new foreign keys with updated names
|
||||
op.create_foreign_key(
|
||||
"agent__sub_query__search_doc_sub_query_id_fkey",
|
||||
"agent__sub_query__search_doc",
|
||||
"agent__sub_query",
|
||||
["sub_query_id"],
|
||||
["id"],
|
||||
)
|
||||
op.create_foreign_key(
|
||||
"agent__sub_query__search_doc_search_doc_id_fkey",
|
||||
"agent__sub_query__search_doc",
|
||||
"search_doc", # This table name doesn't change
|
||||
["search_doc_id"],
|
||||
["id"],
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Update foreign key constraints for sub_query__search_doc
|
||||
op.drop_constraint(
|
||||
"agent__sub_query__search_doc_sub_query_id_fkey",
|
||||
"agent__sub_query__search_doc",
|
||||
type_="foreignkey",
|
||||
)
|
||||
op.drop_constraint(
|
||||
"agent__sub_query__search_doc_search_doc_id_fkey",
|
||||
"agent__sub_query__search_doc",
|
||||
type_="foreignkey",
|
||||
)
|
||||
|
||||
# Rename tables back
|
||||
op.rename_table("agent__sub_query__search_doc", "sub_query__search_doc")
|
||||
op.rename_table("agent__sub_question", "sub_question")
|
||||
op.rename_table("agent__sub_query", "sub_query")
|
||||
|
||||
op.create_foreign_key(
|
||||
"sub_query__search_doc_sub_query_id_fkey",
|
||||
"sub_query__search_doc",
|
||||
"sub_query",
|
||||
["sub_query_id"],
|
||||
["id"],
|
||||
)
|
||||
op.create_foreign_key(
|
||||
"sub_query__search_doc_search_doc_id_fkey",
|
||||
"sub_query__search_doc",
|
||||
"search_doc", # This table name doesn't change
|
||||
["search_doc_id"],
|
||||
["id"],
|
||||
)
|
||||
@@ -1,40 +0,0 @@
|
||||
"""agent_table_changes_rename_level
|
||||
|
||||
Revision ID: c0132518a25b
|
||||
Revises: 1adf5ea20d2b
|
||||
Create Date: 2025-01-05 16:38:37.660152
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "c0132518a25b"
|
||||
down_revision = "1adf5ea20d2b"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add level and level_question_nr columns with NOT NULL constraint
|
||||
op.add_column(
|
||||
"sub_question",
|
||||
sa.Column("level", sa.Integer(), nullable=False, server_default="0"),
|
||||
)
|
||||
op.add_column(
|
||||
"sub_question",
|
||||
sa.Column(
|
||||
"level_question_nr", sa.Integer(), nullable=False, server_default="0"
|
||||
),
|
||||
)
|
||||
|
||||
# Remove the server_default after the columns are created
|
||||
op.alter_column("sub_question", "level", server_default=None)
|
||||
op.alter_column("sub_question", "level_question_nr", server_default=None)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Remove the columns
|
||||
op.drop_column("sub_question", "level_question_nr")
|
||||
op.drop_column("sub_question", "level")
|
||||
@@ -1,68 +0,0 @@
|
||||
"""create pro search persistence tables
|
||||
|
||||
Revision ID: e9cf2bd7baed
|
||||
Revises: 98a5008d8711
|
||||
Create Date: 2025-01-02 17:55:56.544246
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "e9cf2bd7baed"
|
||||
down_revision = "98a5008d8711"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Create sub_question table
|
||||
op.create_table(
|
||||
"sub_question",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("primary_question_id", sa.Integer, sa.ForeignKey("chat_message.id")),
|
||||
sa.Column(
|
||||
"chat_session_id", UUID(as_uuid=True), sa.ForeignKey("chat_session.id")
|
||||
),
|
||||
sa.Column("sub_question", sa.Text),
|
||||
sa.Column(
|
||||
"time_created", sa.DateTime(timezone=True), server_default=sa.func.now()
|
||||
),
|
||||
sa.Column("sub_answer", sa.Text),
|
||||
)
|
||||
|
||||
# Create sub_query table
|
||||
op.create_table(
|
||||
"sub_query",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("parent_question_id", sa.Integer, sa.ForeignKey("sub_question.id")),
|
||||
sa.Column(
|
||||
"chat_session_id", UUID(as_uuid=True), sa.ForeignKey("chat_session.id")
|
||||
),
|
||||
sa.Column("sub_query", sa.Text),
|
||||
sa.Column(
|
||||
"time_created", sa.DateTime(timezone=True), server_default=sa.func.now()
|
||||
),
|
||||
)
|
||||
|
||||
# Create sub_query__search_doc association table
|
||||
op.create_table(
|
||||
"sub_query__search_doc",
|
||||
sa.Column(
|
||||
"sub_query_id", sa.Integer, sa.ForeignKey("sub_query.id"), primary_key=True
|
||||
),
|
||||
sa.Column(
|
||||
"search_doc_id",
|
||||
sa.Integer,
|
||||
sa.ForeignKey("search_doc.id"),
|
||||
primary_key=True,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("sub_query__search_doc")
|
||||
op.drop_table("sub_query")
|
||||
op.drop_table("sub_question")
|
||||
@@ -1,31 +0,0 @@
|
||||
"""mapping for anonymous user path
|
||||
|
||||
Revision ID: a4f6ee863c47
|
||||
Revises: 14a83a331951
|
||||
Create Date: 2025-01-04 14:16:58.697451
|
||||
|
||||
"""
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "a4f6ee863c47"
|
||||
down_revision = "14a83a331951"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"tenant_anonymous_user_path",
|
||||
sa.Column("tenant_id", sa.String(), primary_key=True, nullable=False),
|
||||
sa.Column("anonymous_user_path", sa.String(), nullable=False),
|
||||
sa.PrimaryKeyConstraint("tenant_id"),
|
||||
sa.UniqueConstraint("anonymous_user_path"),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("tenant_anonymous_user_path")
|
||||
@@ -1,536 +0,0 @@
|
||||
"{\"user_message_id\": 475, \"reserved_assistant_message_id\": 476}\n"
|
||||
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \"1\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \" specifications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_question\": \"2\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \" cases\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_question\": \"3\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_question\": \"4\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" differences\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" product\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" specifications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" previous\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" versions\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" cases\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" previous\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" other\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" versions\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 4?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
|
||||
"{\"llm_selected_doc_indices\": []}\n"
|
||||
"{\"final_context_docs\": []}\n"
|
||||
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" formerly\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" known\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" D\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" an\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Assistant\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" that\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" connects\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" company's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" documents\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" personnel\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" provides\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" can\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" integrate\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" any\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" large\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" language\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" model\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" (\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"LL\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"M\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \")\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 2?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
|
||||
"{\"llm_selected_doc_indices\": []}\n"
|
||||
"{\"final_context_docs\": []}\n"
|
||||
"{\"answer_piece\": \" of\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" choice\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" designed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" modular\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" easily\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" extens\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"ible\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" allowing\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" deployment\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" various\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" platforms\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" including\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" laptops\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"-prem\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"ise\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" or\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" cloud\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" environments\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" ensures\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" that\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" data\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" chats\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" remain\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" under\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" control\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" deployment\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" owned\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" by\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" MIT\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" licensed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" comes\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" ready\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" production\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" featuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" authentication\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" role\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" management\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" persistence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" configuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Assist\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"ants\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" their\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" prompts\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Additionally\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" serves\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" unified\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" search\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" tool\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" across\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" common\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" workplace\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" like\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Slack\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Google\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Drive\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Con\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"fluence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" enabling\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" it\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" act\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" subject\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" matter\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" expert\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" teams\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" by\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" combining\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" L\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"LM\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"s\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" team\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"-specific\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" knowledge\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" [[1]]()\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 3?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
|
||||
"{\"llm_selected_doc_indices\": []}\n"
|
||||
"{\"final_context_docs\": []}\n"
|
||||
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"top_documents\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"chunk_ind\": 0, \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"link\": \"https://docs.onyx.app/introduction\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"source_type\": \"web\", \"boost\": 0, \"hidden\": false, \"metadata\": {}, \"score\": 0.6275177643886491, \"is_relevant\": null, \"relevance_explanation\": null, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"], \"updated_at\": null, \"primary_owners\": null, \"secondary_owners\": null, \"is_internet\": false, \"db_doc_id\": 35923}], \"rephrased_query\": \"what is onyx 1, 2, 3, 4\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
|
||||
"{\"llm_selected_doc_indices\": []}\n"
|
||||
"{\"final_context_docs\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"content\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible. The system also comes fully ready for production usage with user authentication, role management (admin/basic users), chat persistence, and a UI for configuring Personas (AI Assistants) and their Prompts.\\nOnyx also serves as a Unified Search across all common workplace tools such as Slack, Google Drive, Confluence, etc. By combining LLMs and team specific knowledge, Onyx becomes a subject matter expert for the team. Its like ChatGPT if it had access to your teams unique knowledge! It enables questions such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\nOnyx can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain Features \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants with different prompts and backing knowledge sets.\\n- Connect Onyx with LLM of your choice (self-host for a fully airgapped solution).\\n- Document Search + AI Answers for natural language queries.\\n- Connectors to all common workplace tools like Google Drive, Confluence, Slack, etc.\\n- Slack integration to get answers and search results directly in Slack.\\n\\nUpcoming\\n- Chat/Prompt sharing with specific teammates and user groups.\\n- Multi-modal model support, chat with images, video etc.\\n- Choosing between LLMs and parameters during chat session.\\n- Tool calling and agent configurations options.\\n- Organizational understanding and ability to locate and suggest experts from your team.\\n\\nOther Noteable Benefits of Onyx\\n- User Authentication with document level access management.\\n- Best in class Hybrid Search across all sources (BM-25 + prefix aware embedding models).\\n- Admin Dashboard to configure connectors, document-sets, access, etc.\\n- Custom deep learning models + learn from user feedback.\\n- Easy deployment and ability to host Onyx anywhere of your choosing.\\nQuickstart\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"source_type\": \"web\", \"metadata\": {}, \"updated_at\": null, \"link\": \"https://docs.onyx.app/introduction\", \"source_links\": {\"0\": \"https://docs.onyx.app/introduction\"}, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"]}]}\n"
|
||||
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" cannot\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" reliably\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" question\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" about\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" provided\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" only\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" describes\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" which\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" an\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Assistant\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" formerly\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" known\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" D\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" connects\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" company's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" documents\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" personnel\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" providing\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" integration\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" any\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" large\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" language\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" model\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" (\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"LL\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"M\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \")\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" of\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" choice\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" designed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" modular\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" easily\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" extens\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"ible\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" can\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" deployed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" various\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" platforms\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" while\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" ensuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" data\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" control\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" also\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" serves\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" unified\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" search\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" tool\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" across\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" common\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" workplace\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" like\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Slack\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Google\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Drive\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Con\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"fluence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" acting\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" subject\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" matter\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" expert\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" teams\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" [[1]]()\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"{{1}}\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"There\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" no\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" available\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" regarding\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" or\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" so\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" I\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" cannot\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" provide\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" details\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" about\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" them\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"citations\": []}\n"
|
||||
"{\"message_id\": 476, \"parent_message\": 475, \"latest_child_message\": null, \"message\": \"I cannot reliably answer the question about Onyx 2, 3, and 4, as the provided information only describes Onyx 1, which is an AI Assistant formerly known as Danswer. Onyx 1 connects to a company's documents, applications, and personnel, providing a chat interface and integration with any large language model (LLM) of choice. It is designed to be modular, easily extensible, and can be deployed on various platforms while ensuring user data control. It also serves as a unified search tool across common workplace applications like Slack, Google Drive, and Confluence, acting as a subject matter expert for teams [[1]](){{1}}There is no information available regarding Onyx 2, 3, or 4, so I cannot provide details about them.\", \"rephrased_query\": \"what is onyx 1, 2, 3, 4\", \"context_docs\": {\"top_documents\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"chunk_ind\": 0, \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"link\": \"https://docs.onyx.app/introduction\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"source_type\": \"web\", \"boost\": 0, \"hidden\": false, \"metadata\": {}, \"score\": 0.6275177643886491, \"is_relevant\": null, \"relevance_explanation\": null, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"], \"updated_at\": null, \"primary_owners\": null, \"secondary_owners\": null, \"is_internet\": false, \"db_doc_id\": 35923}]}, \"message_type\": \"assistant\", \"time_sent\": \"2025-01-12T05:37:18.318251+00:00\", \"overridden_model\": \"gpt-4o\", \"alternate_assistant_id\": 0, \"chat_session_id\": \"40f91916-7419-48d1-9681-5882b0869d88\", \"citations\": {}, \"sub_questions\": [], \"files\": [], \"tool_call\": null}\n"
|
||||
@@ -3,10 +3,6 @@ from sqlalchemy.orm import Session
|
||||
from ee.onyx.db.external_perm import fetch_external_groups_for_user
|
||||
from ee.onyx.db.user_group import fetch_user_groups_for_documents
|
||||
from ee.onyx.db.user_group import fetch_user_groups_for_user
|
||||
from ee.onyx.external_permissions.post_query_censoring import (
|
||||
DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION,
|
||||
)
|
||||
from ee.onyx.external_permissions.sync_params import DOC_PERMISSIONS_FUNC_MAP
|
||||
from onyx.access.access import (
|
||||
_get_access_for_documents as get_access_for_documents_without_groups,
|
||||
)
|
||||
@@ -14,7 +10,6 @@ from onyx.access.access import _get_acl_for_user as get_acl_for_user_without_gro
|
||||
from onyx.access.models import DocumentAccess
|
||||
from onyx.access.utils import prefix_external_group
|
||||
from onyx.access.utils import prefix_user_group
|
||||
from onyx.db.document import get_document_sources
|
||||
from onyx.db.document import get_documents_by_ids
|
||||
from onyx.db.models import User
|
||||
|
||||
@@ -57,20 +52,9 @@ def _get_access_for_documents(
|
||||
)
|
||||
doc_id_map = {doc.id: doc for doc in documents}
|
||||
|
||||
# Get all sources in one batch
|
||||
doc_id_to_source_map = get_document_sources(
|
||||
db_session=db_session,
|
||||
document_ids=document_ids,
|
||||
)
|
||||
|
||||
access_map = {}
|
||||
for document_id, non_ee_access in non_ee_access_dict.items():
|
||||
document = doc_id_map[document_id]
|
||||
source = doc_id_to_source_map.get(document_id)
|
||||
is_only_censored = (
|
||||
source in DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION
|
||||
and source not in DOC_PERMISSIONS_FUNC_MAP
|
||||
)
|
||||
|
||||
ext_u_emails = (
|
||||
set(document.external_user_emails)
|
||||
@@ -86,11 +70,7 @@ def _get_access_for_documents(
|
||||
|
||||
# If the document is determined to be "public" externally (through a SYNC connector)
|
||||
# then it's given the same access level as if it were marked public within Onyx
|
||||
# If its censored, then it's public anywhere during the search and then permissions are
|
||||
# applied after the search
|
||||
is_public_anywhere = (
|
||||
document.is_public or non_ee_access.is_public or is_only_censored
|
||||
)
|
||||
is_public_anywhere = document.is_public or non_ee_access.is_public
|
||||
|
||||
# To avoid collisions of group namings between connectors, they need to be prefixed
|
||||
access_map[document_id] = DocumentAccess(
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from datetime import datetime
|
||||
from functools import lru_cache
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
from fastapi import Depends
|
||||
from fastapi import HTTPException
|
||||
@@ -22,7 +20,6 @@ from ee.onyx.server.seeding import get_seed_config
|
||||
from ee.onyx.utils.secrets import extract_hashed_cookie
|
||||
from onyx.auth.users import current_admin_user
|
||||
from onyx.configs.app_configs import AUTH_TYPE
|
||||
from onyx.configs.app_configs import USER_AUTH_SECRET
|
||||
from onyx.configs.constants import AuthType
|
||||
from onyx.db.models import User
|
||||
from onyx.utils.logger import setup_logger
|
||||
@@ -121,17 +118,3 @@ async def current_cloud_superuser(
|
||||
detail="Access denied. User must be a cloud superuser to perform this action.",
|
||||
)
|
||||
return user
|
||||
|
||||
|
||||
def generate_anonymous_user_jwt_token(tenant_id: str) -> str:
|
||||
payload = {
|
||||
"tenant_id": tenant_id,
|
||||
# Token does not expire
|
||||
"iat": datetime.utcnow(), # Issued at time
|
||||
}
|
||||
|
||||
return jwt.encode(payload, USER_AUTH_SECRET, algorithm="HS256")
|
||||
|
||||
|
||||
def decode_anonymous_user_jwt_token(token: str) -> dict:
|
||||
return jwt.decode(token, USER_AUTH_SECRET, algorithms=["HS256"])
|
||||
|
||||
@@ -6,7 +6,6 @@ from sqlalchemy.orm import Session
|
||||
from ee.onyx.db.user_group import delete_user_group
|
||||
from ee.onyx.db.user_group import fetch_user_group
|
||||
from ee.onyx.db.user_group import mark_user_group_as_synced
|
||||
from ee.onyx.db.user_group import prepare_user_group_for_deletion
|
||||
from onyx.background.celery.apps.app_base import task_logger
|
||||
from onyx.redis.redis_usergroup import RedisUserGroup
|
||||
from onyx.utils.logger import setup_logger
|
||||
@@ -47,20 +46,11 @@ def monitor_usergroup_taskset(
|
||||
|
||||
user_group = fetch_user_group(db_session=db_session, user_group_id=usergroup_id)
|
||||
if user_group:
|
||||
usergroup_name = user_group.name
|
||||
if user_group.is_up_for_deletion:
|
||||
# this prepare should have been run when the deletion was scheduled,
|
||||
# but run it again to be sure we're ready to go
|
||||
mark_user_group_as_synced(db_session, user_group)
|
||||
prepare_user_group_for_deletion(db_session, usergroup_id)
|
||||
delete_user_group(db_session=db_session, user_group=user_group)
|
||||
task_logger.info(
|
||||
f"Deleted usergroup: name={usergroup_name} id={usergroup_id}"
|
||||
)
|
||||
task_logger.info(f"Deleted usergroup. id='{usergroup_id}'")
|
||||
else:
|
||||
mark_user_group_as_synced(db_session=db_session, user_group=user_group)
|
||||
task_logger.info(
|
||||
f"Synced usergroup. name={usergroup_name} id={usergroup_id}"
|
||||
)
|
||||
task_logger.info(f"Synced usergroup. id='{usergroup_id}'")
|
||||
|
||||
rug.reset()
|
||||
|
||||
@@ -15,12 +15,6 @@ SAML_CONF_DIR = os.environ.get("SAML_CONF_DIR") or "/app/ee/onyx/configs/saml_co
|
||||
CONFLUENCE_PERMISSION_GROUP_SYNC_FREQUENCY = int(
|
||||
os.environ.get("CONFLUENCE_PERMISSION_GROUP_SYNC_FREQUENCY") or 5 * 60
|
||||
)
|
||||
# This is a boolean that determines if anonymous access is public
|
||||
# Default behavior is to not make the page public and instead add a group
|
||||
# that contains all the users that we found in Confluence
|
||||
CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC = (
|
||||
os.environ.get("CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC", "").lower() == "true"
|
||||
)
|
||||
# In seconds, default is 5 minutes
|
||||
CONFLUENCE_PERMISSION_DOC_SYNC_FREQUENCY = int(
|
||||
os.environ.get("CONFLUENCE_PERMISSION_DOC_SYNC_FREQUENCY") or 5 * 60
|
||||
@@ -61,5 +55,3 @@ POSTHOG_API_KEY = os.environ.get("POSTHOG_API_KEY") or "FooBar"
|
||||
POSTHOG_HOST = os.environ.get("POSTHOG_HOST") or "https://us.i.posthog.com"
|
||||
|
||||
HUBSPOT_TRACKING_URL = os.environ.get("HUBSPOT_TRACKING_URL")
|
||||
|
||||
ANONYMOUS_USER_COOKIE_NAME = "onyx_anonymous_user"
|
||||
|
||||
@@ -2,7 +2,6 @@ import datetime
|
||||
from collections.abc import Sequence
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy import and_
|
||||
from sqlalchemy import case
|
||||
from sqlalchemy import cast
|
||||
from sqlalchemy import Date
|
||||
@@ -15,9 +14,6 @@ from onyx.configs.constants import MessageType
|
||||
from onyx.db.models import ChatMessage
|
||||
from onyx.db.models import ChatMessageFeedback
|
||||
from onyx.db.models import ChatSession
|
||||
from onyx.db.models import Persona
|
||||
from onyx.db.models import User
|
||||
from onyx.db.models import UserRole
|
||||
|
||||
|
||||
def fetch_query_analytics(
|
||||
@@ -238,122 +234,3 @@ def fetch_persona_unique_users(
|
||||
)
|
||||
|
||||
return [tuple(row) for row in db_session.execute(query).all()]
|
||||
|
||||
|
||||
def fetch_assistant_message_analytics(
|
||||
db_session: Session,
|
||||
assistant_id: int,
|
||||
start: datetime.datetime,
|
||||
end: datetime.datetime,
|
||||
) -> list[tuple[int, datetime.date]]:
|
||||
"""
|
||||
Gets the daily message counts for a specific assistant in the given time range.
|
||||
"""
|
||||
query = (
|
||||
select(
|
||||
func.count(ChatMessage.id),
|
||||
cast(ChatMessage.time_sent, Date),
|
||||
)
|
||||
.join(
|
||||
ChatSession,
|
||||
ChatMessage.chat_session_id == ChatSession.id,
|
||||
)
|
||||
.where(
|
||||
or_(
|
||||
ChatMessage.alternate_assistant_id == assistant_id,
|
||||
ChatSession.persona_id == assistant_id,
|
||||
),
|
||||
ChatMessage.time_sent >= start,
|
||||
ChatMessage.time_sent <= end,
|
||||
ChatMessage.message_type == MessageType.ASSISTANT,
|
||||
)
|
||||
.group_by(cast(ChatMessage.time_sent, Date))
|
||||
.order_by(cast(ChatMessage.time_sent, Date))
|
||||
)
|
||||
|
||||
return [tuple(row) for row in db_session.execute(query).all()]
|
||||
|
||||
|
||||
def fetch_assistant_unique_users(
|
||||
db_session: Session,
|
||||
assistant_id: int,
|
||||
start: datetime.datetime,
|
||||
end: datetime.datetime,
|
||||
) -> list[tuple[int, datetime.date]]:
|
||||
"""
|
||||
Gets the daily unique user counts for a specific assistant in the given time range.
|
||||
"""
|
||||
query = (
|
||||
select(
|
||||
func.count(func.distinct(ChatSession.user_id)),
|
||||
cast(ChatMessage.time_sent, Date),
|
||||
)
|
||||
.join(
|
||||
ChatSession,
|
||||
ChatMessage.chat_session_id == ChatSession.id,
|
||||
)
|
||||
.where(
|
||||
or_(
|
||||
ChatMessage.alternate_assistant_id == assistant_id,
|
||||
ChatSession.persona_id == assistant_id,
|
||||
),
|
||||
ChatMessage.time_sent >= start,
|
||||
ChatMessage.time_sent <= end,
|
||||
ChatMessage.message_type == MessageType.ASSISTANT,
|
||||
)
|
||||
.group_by(cast(ChatMessage.time_sent, Date))
|
||||
.order_by(cast(ChatMessage.time_sent, Date))
|
||||
)
|
||||
|
||||
return [tuple(row) for row in db_session.execute(query).all()]
|
||||
|
||||
|
||||
def fetch_assistant_unique_users_total(
|
||||
db_session: Session,
|
||||
assistant_id: int,
|
||||
start: datetime.datetime,
|
||||
end: datetime.datetime,
|
||||
) -> int:
|
||||
"""
|
||||
Gets the total number of distinct users who have sent or received messages from
|
||||
the specified assistant in the given time range.
|
||||
"""
|
||||
query = (
|
||||
select(func.count(func.distinct(ChatSession.user_id)))
|
||||
.select_from(ChatMessage)
|
||||
.join(
|
||||
ChatSession,
|
||||
ChatMessage.chat_session_id == ChatSession.id,
|
||||
)
|
||||
.where(
|
||||
or_(
|
||||
ChatMessage.alternate_assistant_id == assistant_id,
|
||||
ChatSession.persona_id == assistant_id,
|
||||
),
|
||||
ChatMessage.time_sent >= start,
|
||||
ChatMessage.time_sent <= end,
|
||||
ChatMessage.message_type == MessageType.ASSISTANT,
|
||||
)
|
||||
)
|
||||
|
||||
result = db_session.execute(query).scalar()
|
||||
return result if result else 0
|
||||
|
||||
|
||||
# Users can view assistant stats if they created the persona,
|
||||
# or if they are an admin
|
||||
def user_can_view_assistant_stats(
|
||||
db_session: Session, user: User | None, assistant_id: int
|
||||
) -> bool:
|
||||
# If user is None and auth is disabled, assume the user is an admin
|
||||
|
||||
if user is None or user.role == UserRole.ADMIN:
|
||||
return True
|
||||
|
||||
# Check if the user created the persona
|
||||
stmt = select(Persona).where(
|
||||
and_(Persona.id == assistant_id, Persona.user_id == user.id)
|
||||
)
|
||||
|
||||
persona = db_session.execute(stmt).scalar_one_or_none()
|
||||
return persona is not None
|
||||
|
||||
@@ -10,7 +10,6 @@ from onyx.access.utils import prefix_group_w_source
|
||||
from onyx.configs.constants import DocumentSource
|
||||
from onyx.db.models import User__ExternalUserGroupId
|
||||
from onyx.db.users import batch_add_ext_perm_user_if_not_exists
|
||||
from onyx.db.users import get_user_by_email
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
@@ -107,21 +106,3 @@ def fetch_external_groups_for_user(
|
||||
User__ExternalUserGroupId.user_id == user_id
|
||||
)
|
||||
).all()
|
||||
|
||||
|
||||
def fetch_external_groups_for_user_email_and_group_ids(
|
||||
db_session: Session,
|
||||
user_email: str,
|
||||
group_ids: list[str],
|
||||
) -> list[User__ExternalUserGroupId]:
|
||||
user = get_user_by_email(db_session=db_session, email=user_email)
|
||||
if user is None:
|
||||
return []
|
||||
user_id = user.id
|
||||
user_ext_groups = db_session.scalars(
|
||||
select(User__ExternalUserGroupId).where(
|
||||
User__ExternalUserGroupId.user_id == user_id,
|
||||
User__ExternalUserGroupId.external_user_group_id.in_(group_ids),
|
||||
)
|
||||
).all()
|
||||
return list(user_ext_groups)
|
||||
|
||||
@@ -7,7 +7,6 @@ from sqlalchemy import select
|
||||
from sqlalchemy.orm import aliased
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.configs.app_configs import DISABLE_AUTH
|
||||
from onyx.configs.constants import TokenRateLimitScope
|
||||
from onyx.db.models import TokenRateLimit
|
||||
from onyx.db.models import TokenRateLimit__UserGroup
|
||||
@@ -21,11 +20,10 @@ from onyx.server.token_rate_limits.models import TokenRateLimitArgs
|
||||
def _add_user_filters(
|
||||
stmt: Select, user: User | None, get_editable: bool = True
|
||||
) -> Select:
|
||||
# If user is None and auth is disabled, assume the user is an admin
|
||||
if (user is None and DISABLE_AUTH) or (user and user.role == UserRole.ADMIN):
|
||||
# If user is None, assume the user is an admin or auth is disabled
|
||||
if user is None or user.role == UserRole.ADMIN:
|
||||
return stmt
|
||||
|
||||
stmt = stmt.distinct()
|
||||
TRLimit_UG = aliased(TokenRateLimit__UserGroup)
|
||||
User__UG = aliased(User__UserGroup)
|
||||
|
||||
@@ -48,12 +46,6 @@ def _add_user_filters(
|
||||
that the user isn't a curator for
|
||||
- if we are not editing, we show all token_rate_limits in the groups the user curates
|
||||
"""
|
||||
|
||||
# If user is None, this is an anonymous user and we should only show public token_rate_limits
|
||||
if user is None:
|
||||
where_clause = TokenRateLimit.scope == TokenRateLimitScope.GLOBAL
|
||||
return stmt.where(where_clause)
|
||||
|
||||
where_clause = User__UG.user_id == user.id
|
||||
if user.role == UserRole.CURATOR and get_editable:
|
||||
where_clause &= User__UG.is_curator == True # noqa: E712
|
||||
|
||||
@@ -122,7 +122,7 @@ def _cleanup_document_set__user_group_relationships__no_commit(
|
||||
)
|
||||
|
||||
|
||||
def validate_object_creation_for_user(
|
||||
def validate_user_creation_permissions(
|
||||
db_session: Session,
|
||||
user: User | None,
|
||||
target_group_ids: list[int] | None = None,
|
||||
@@ -440,108 +440,32 @@ def remove_curator_status__no_commit(db_session: Session, user: User) -> None:
|
||||
_validate_curator_status__no_commit(db_session, [user])
|
||||
|
||||
|
||||
def _validate_curator_relationship_update_requester(
|
||||
db_session: Session,
|
||||
user_group_id: int,
|
||||
user_making_change: User | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
This function validates that the user making the change has the necessary permissions
|
||||
to update the curator relationship for the target user in the given user group.
|
||||
"""
|
||||
|
||||
if user_making_change is None or user_making_change.role == UserRole.ADMIN:
|
||||
return
|
||||
|
||||
# check if the user making the change is a curator in the group they are changing the curator relationship for
|
||||
user_making_change_curator_groups = fetch_user_groups_for_user(
|
||||
db_session=db_session,
|
||||
user_id=user_making_change.id,
|
||||
# only check if the user making the change is a curator if they are a curator
|
||||
# otherwise, they are a global_curator and can update the curator relationship
|
||||
# for any group they are a member of
|
||||
only_curator_groups=user_making_change.role == UserRole.CURATOR,
|
||||
)
|
||||
requestor_curator_group_ids = [
|
||||
group.id for group in user_making_change_curator_groups
|
||||
]
|
||||
if user_group_id not in requestor_curator_group_ids:
|
||||
raise ValueError(
|
||||
f"user making change {user_making_change.email} is not a curator,"
|
||||
f" admin, or global_curator for group '{user_group_id}'"
|
||||
)
|
||||
|
||||
|
||||
def _validate_curator_relationship_update_request(
|
||||
db_session: Session,
|
||||
user_group_id: int,
|
||||
target_user: User,
|
||||
) -> None:
|
||||
"""
|
||||
This function validates that the curator_relationship_update request itself is valid.
|
||||
"""
|
||||
if target_user.role == UserRole.ADMIN:
|
||||
raise ValueError(
|
||||
f"User '{target_user.email}' is an admin and therefore has all permissions "
|
||||
"of a curator. If you'd like this user to only have curator permissions, "
|
||||
"you must update their role to BASIC then assign them to be CURATOR in the "
|
||||
"appropriate groups."
|
||||
)
|
||||
elif target_user.role == UserRole.GLOBAL_CURATOR:
|
||||
raise ValueError(
|
||||
f"User '{target_user.email}' is a global_curator and therefore has all "
|
||||
"permissions of a curator for all groups. If you'd like this user to only "
|
||||
"have curator permissions for a specific group, you must update their role "
|
||||
"to BASIC then assign them to be CURATOR in the appropriate groups."
|
||||
)
|
||||
elif target_user.role not in [UserRole.CURATOR, UserRole.BASIC]:
|
||||
raise ValueError(
|
||||
f"This endpoint can only be used to update the curator relationship for "
|
||||
"users with the CURATOR or BASIC role. \n"
|
||||
f"Target user: {target_user.email} \n"
|
||||
f"Target user role: {target_user.role} \n"
|
||||
)
|
||||
|
||||
# check if the target user is in the group they are changing the curator relationship for
|
||||
requested_user_groups = fetch_user_groups_for_user(
|
||||
db_session=db_session,
|
||||
user_id=target_user.id,
|
||||
only_curator_groups=False,
|
||||
)
|
||||
group_ids = [group.id for group in requested_user_groups]
|
||||
if user_group_id not in group_ids:
|
||||
raise ValueError(
|
||||
f"target user {target_user.email} is not in group '{user_group_id}'"
|
||||
)
|
||||
|
||||
|
||||
def update_user_curator_relationship(
|
||||
db_session: Session,
|
||||
user_group_id: int,
|
||||
set_curator_request: SetCuratorRequest,
|
||||
user_making_change: User | None = None,
|
||||
) -> None:
|
||||
target_user = fetch_user_by_id(db_session, set_curator_request.user_id)
|
||||
if not target_user:
|
||||
user = fetch_user_by_id(db_session, set_curator_request.user_id)
|
||||
if not user:
|
||||
raise ValueError(f"User with id '{set_curator_request.user_id}' not found")
|
||||
|
||||
_validate_curator_relationship_update_request(
|
||||
if user.role == UserRole.ADMIN:
|
||||
raise ValueError(
|
||||
f"User '{user.email}' is an admin and therefore has all permissions "
|
||||
"of a curator. If you'd like this user to only have curator permissions, "
|
||||
"you must update their role to BASIC then assign them to be CURATOR in the "
|
||||
"appropriate groups."
|
||||
)
|
||||
|
||||
requested_user_groups = fetch_user_groups_for_user(
|
||||
db_session=db_session,
|
||||
user_group_id=user_group_id,
|
||||
target_user=target_user,
|
||||
user_id=set_curator_request.user_id,
|
||||
only_curator_groups=False,
|
||||
)
|
||||
|
||||
_validate_curator_relationship_update_requester(
|
||||
db_session=db_session,
|
||||
user_group_id=user_group_id,
|
||||
user_making_change=user_making_change,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"user_making_change={user_making_change.email if user_making_change else 'None'} is "
|
||||
f"updating the curator relationship for user={target_user.email} "
|
||||
f"in group={user_group_id} to is_curator={set_curator_request.is_curator}"
|
||||
)
|
||||
group_ids = [group.id for group in requested_user_groups]
|
||||
if user_group_id not in group_ids:
|
||||
raise ValueError(f"user is not in group '{user_group_id}'")
|
||||
|
||||
relationship_to_update = (
|
||||
db_session.query(User__UserGroup)
|
||||
@@ -562,7 +486,7 @@ def update_user_curator_relationship(
|
||||
)
|
||||
db_session.add(relationship_to_update)
|
||||
|
||||
_validate_curator_status__no_commit(db_session, [target_user])
|
||||
_validate_curator_status__no_commit(db_session, [user])
|
||||
db_session.commit()
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
# This is a group that we use to store all the users that we found in Confluence
|
||||
# Instead of setting a page to public, we just add this group so that the page
|
||||
# is only accessible to users who have confluence accounts.
|
||||
ALL_CONF_EMAILS_GROUP_NAME = "All_Confluence_Users_Found_By_Onyx"
|
||||
@@ -4,8 +4,6 @@ https://confluence.atlassian.com/conf85/check-who-can-view-a-page-1283360557.htm
|
||||
"""
|
||||
from typing import Any
|
||||
|
||||
from ee.onyx.configs.app_configs import CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC
|
||||
from ee.onyx.external_permissions.confluence.constants import ALL_CONF_EMAILS_GROUP_NAME
|
||||
from onyx.access.models import DocExternalAccess
|
||||
from onyx.access.models import ExternalAccess
|
||||
from onyx.connectors.confluence.connector import ConfluenceConnector
|
||||
@@ -24,9 +22,7 @@ _REQUEST_PAGINATION_LIMIT = 5000
|
||||
def _get_server_space_permissions(
|
||||
confluence_client: OnyxConfluence, space_key: str
|
||||
) -> ExternalAccess:
|
||||
space_permissions = confluence_client.get_all_space_permissions_server(
|
||||
space_key=space_key
|
||||
)
|
||||
space_permissions = confluence_client.get_space_permissions(space_key=space_key)
|
||||
|
||||
viewspace_permissions = []
|
||||
for permission_category in space_permissions:
|
||||
@@ -35,32 +31,14 @@ def _get_server_space_permissions(
|
||||
permission_category.get("spacePermissions", [])
|
||||
)
|
||||
|
||||
is_public = False
|
||||
user_names = set()
|
||||
group_names = set()
|
||||
for permission in viewspace_permissions:
|
||||
user_name = permission.get("userName")
|
||||
if user_name:
|
||||
if user_name := permission.get("userName"):
|
||||
user_names.add(user_name)
|
||||
group_name = permission.get("groupName")
|
||||
if group_name:
|
||||
if group_name := permission.get("groupName"):
|
||||
group_names.add(group_name)
|
||||
|
||||
# It seems that if anonymous access is turned on for the site and space,
|
||||
# then the space is publicly accessible.
|
||||
# For confluence server, we make a group that contains all users
|
||||
# that exist in confluence and then just add that group to the space permissions
|
||||
# if anonymous access is turned on for the site and space or we set is_public = True
|
||||
# if they set the env variable CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC to True so
|
||||
# that we can support confluence server deployments that want anonymous access
|
||||
# to be public (we cant test this because its paywalled)
|
||||
if user_name is None and group_name is None:
|
||||
# Defaults to False
|
||||
if CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC:
|
||||
is_public = True
|
||||
else:
|
||||
group_names.add(ALL_CONF_EMAILS_GROUP_NAME)
|
||||
|
||||
user_emails = set()
|
||||
for user_name in user_names:
|
||||
user_email = get_user_email_from_username__server(confluence_client, user_name)
|
||||
@@ -69,17 +47,14 @@ def _get_server_space_permissions(
|
||||
else:
|
||||
logger.warning(f"Email for user {user_name} not found in Confluence")
|
||||
|
||||
if not user_emails and not group_names:
|
||||
logger.warning(
|
||||
"No user emails or group names found in Confluence space permissions"
|
||||
f"\nSpace key: {space_key}"
|
||||
f"\nSpace permissions: {space_permissions}"
|
||||
)
|
||||
|
||||
return ExternalAccess(
|
||||
external_user_emails=user_emails,
|
||||
external_user_group_ids=group_names,
|
||||
is_public=is_public,
|
||||
# TODO: Check if the space is publicly accessible
|
||||
# Currently, we assume the space is not public
|
||||
# We need to check if anonymous access is turned on for the site and space
|
||||
# This information is paywalled so it remains unimplemented
|
||||
is_public=False,
|
||||
)
|
||||
|
||||
|
||||
@@ -159,7 +134,7 @@ def _get_space_permissions(
|
||||
|
||||
def _extract_read_access_restrictions(
|
||||
confluence_client: OnyxConfluence, restrictions: dict[str, Any]
|
||||
) -> tuple[set[str], set[str]]:
|
||||
) -> ExternalAccess | None:
|
||||
"""
|
||||
Converts a page's restrictions dict into an ExternalAccess object.
|
||||
If there are no restrictions, then return None
|
||||
@@ -202,57 +177,21 @@ def _extract_read_access_restrictions(
|
||||
group["name"] for group in read_access_group_jsons if group.get("name")
|
||||
]
|
||||
|
||||
return set(read_access_user_emails), set(read_access_group_names)
|
||||
|
||||
|
||||
def _get_all_page_restrictions(
|
||||
confluence_client: OnyxConfluence,
|
||||
perm_sync_data: dict[str, Any],
|
||||
) -> ExternalAccess | None:
|
||||
"""
|
||||
This function gets the restrictions for a page by taking the intersection
|
||||
of the page's restrictions and the restrictions of all the ancestors
|
||||
of the page.
|
||||
If the page/ancestor has no restrictions, then it is ignored (no intersection).
|
||||
If no restrictions are found anywhere, then return None, indicating that the page
|
||||
should inherit the space's restrictions.
|
||||
"""
|
||||
found_user_emails: set[str] = set()
|
||||
found_group_names: set[str] = set()
|
||||
|
||||
found_user_emails, found_group_names = _extract_read_access_restrictions(
|
||||
confluence_client=confluence_client,
|
||||
restrictions=perm_sync_data.get("restrictions", {}),
|
||||
)
|
||||
|
||||
ancestors: list[dict[str, Any]] = perm_sync_data.get("ancestors", [])
|
||||
for ancestor in ancestors:
|
||||
ancestor_user_emails, ancestor_group_names = _extract_read_access_restrictions(
|
||||
confluence_client=confluence_client,
|
||||
restrictions=ancestor.get("restrictions", {}),
|
||||
)
|
||||
if not ancestor_user_emails and not ancestor_group_names:
|
||||
# This ancestor has no restrictions, so it has no effect on
|
||||
# the page's restrictions, so we ignore it
|
||||
continue
|
||||
|
||||
found_user_emails.intersection_update(ancestor_user_emails)
|
||||
found_group_names.intersection_update(ancestor_group_names)
|
||||
|
||||
# If there are no restrictions found, then the page
|
||||
# inherits the space's restrictions so return None
|
||||
if not found_user_emails and not found_group_names:
|
||||
is_space_public = read_access_user_emails == [] and read_access_group_names == []
|
||||
if is_space_public:
|
||||
return None
|
||||
|
||||
return ExternalAccess(
|
||||
external_user_emails=found_user_emails,
|
||||
external_user_group_ids=found_group_names,
|
||||
external_user_emails=set(read_access_user_emails),
|
||||
external_user_group_ids=set(read_access_group_names),
|
||||
# there is no way for a page to be individually public if the space isn't public
|
||||
is_public=False,
|
||||
)
|
||||
|
||||
|
||||
def _fetch_all_page_restrictions(
|
||||
def _fetch_all_page_restrictions_for_space(
|
||||
confluence_client: OnyxConfluence,
|
||||
slim_docs: list[SlimDocument],
|
||||
space_permissions_by_space_key: dict[str, ExternalAccess],
|
||||
@@ -269,11 +208,11 @@ def _fetch_all_page_restrictions(
|
||||
raise ValueError(
|
||||
f"No permission sync data found for document {slim_doc.id}"
|
||||
)
|
||||
|
||||
if restrictions := _get_all_page_restrictions(
|
||||
restrictions = _extract_read_access_restrictions(
|
||||
confluence_client=confluence_client,
|
||||
perm_sync_data=slim_doc.perm_sync_data,
|
||||
):
|
||||
restrictions=slim_doc.perm_sync_data.get("restrictions", {}),
|
||||
)
|
||||
if restrictions:
|
||||
document_restrictions.append(
|
||||
DocExternalAccess(
|
||||
doc_id=slim_doc.id,
|
||||
@@ -362,7 +301,7 @@ def confluence_doc_sync(
|
||||
slim_docs.extend(doc_batch)
|
||||
|
||||
logger.debug("Fetching all page restrictions for space")
|
||||
return _fetch_all_page_restrictions(
|
||||
return _fetch_all_page_restrictions_for_space(
|
||||
confluence_client=confluence_connector.confluence_client,
|
||||
slim_docs=slim_docs,
|
||||
space_permissions_by_space_key=space_permissions_by_space_key,
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from ee.onyx.db.external_perm import ExternalUserGroup
|
||||
from ee.onyx.external_permissions.confluence.constants import ALL_CONF_EMAILS_GROUP_NAME
|
||||
from onyx.connectors.confluence.onyx_confluence import build_confluence_client
|
||||
from onyx.connectors.confluence.onyx_confluence import OnyxConfluence
|
||||
from onyx.connectors.confluence.utils import get_user_email_from_username__server
|
||||
from onyx.db.models import ConnectorCredentialPair
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
@@ -30,7 +30,6 @@ def _build_group_member_email_map(
|
||||
)
|
||||
if not email:
|
||||
# If we still don't have an email, skip this user
|
||||
logger.warning(f"user result missing email field: {user_result}")
|
||||
continue
|
||||
|
||||
for group in confluence_client.paginated_groups_by_user_retrieval(user):
|
||||
@@ -54,7 +53,6 @@ def confluence_group_sync(
|
||||
confluence_client=confluence_client,
|
||||
)
|
||||
onyx_groups: list[ExternalUserGroup] = []
|
||||
all_found_emails = set()
|
||||
for group_id, group_member_emails in group_member_email_map.items():
|
||||
onyx_groups.append(
|
||||
ExternalUserGroup(
|
||||
@@ -62,15 +60,5 @@ def confluence_group_sync(
|
||||
user_emails=list(group_member_emails),
|
||||
)
|
||||
)
|
||||
all_found_emails.update(group_member_emails)
|
||||
|
||||
# This is so that when we find a public confleunce server page, we can
|
||||
# give access to all users only in if they have an email in Confluence
|
||||
if cc_pair.connector.connector_specific_config.get("is_cloud", False):
|
||||
all_found_group = ExternalUserGroup(
|
||||
id=ALL_CONF_EMAILS_GROUP_NAME,
|
||||
user_emails=list(all_found_emails),
|
||||
)
|
||||
onyx_groups.append(all_found_group)
|
||||
|
||||
return onyx_groups
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
from collections.abc import Callable
|
||||
|
||||
from ee.onyx.db.connector_credential_pair import get_all_auto_sync_cc_pairs
|
||||
from ee.onyx.external_permissions.salesforce.postprocessing import (
|
||||
censor_salesforce_chunks,
|
||||
)
|
||||
from onyx.configs.constants import DocumentSource
|
||||
from onyx.context.search.pipeline import InferenceChunk
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.db.models import User
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION: dict[
|
||||
DocumentSource,
|
||||
# list of chunks to be censored and the user email. returns censored chunks
|
||||
Callable[[list[InferenceChunk], str], list[InferenceChunk]],
|
||||
] = {
|
||||
DocumentSource.SALESFORCE: censor_salesforce_chunks,
|
||||
}
|
||||
|
||||
|
||||
def _get_all_censoring_enabled_sources() -> set[DocumentSource]:
|
||||
"""
|
||||
Returns the set of sources that have censoring enabled.
|
||||
This is based on if the access_type is set to sync and the connector
|
||||
source is included in DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION.
|
||||
|
||||
NOTE: This means if there is a source has a single cc_pair that is sync,
|
||||
all chunks for that source will be censored, even if the connector that
|
||||
indexed that chunk is not sync. This was done to avoid getting the cc_pair
|
||||
for every single chunk.
|
||||
"""
|
||||
with get_session_context_manager() as db_session:
|
||||
enabled_sync_connectors = get_all_auto_sync_cc_pairs(db_session)
|
||||
return {
|
||||
cc_pair.connector.source
|
||||
for cc_pair in enabled_sync_connectors
|
||||
if cc_pair.connector.source in DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION
|
||||
}
|
||||
|
||||
|
||||
# NOTE: This is only called if ee is enabled.
|
||||
def _post_query_chunk_censoring(
|
||||
chunks: list[InferenceChunk],
|
||||
user: User | None,
|
||||
) -> list[InferenceChunk]:
|
||||
"""
|
||||
This function checks all chunks to see if they need to be sent to a censoring
|
||||
function. If they do, it sends them to the censoring function and returns the
|
||||
censored chunks. If they don't, it returns the original chunks.
|
||||
"""
|
||||
if user is None:
|
||||
# if user is None, permissions are not enforced
|
||||
return chunks
|
||||
|
||||
chunks_to_keep = []
|
||||
chunks_to_process: dict[DocumentSource, list[InferenceChunk]] = {}
|
||||
|
||||
sources_to_censor = _get_all_censoring_enabled_sources()
|
||||
for chunk in chunks:
|
||||
# Separate out chunks that require permission post-processing by source
|
||||
if chunk.source_type in sources_to_censor:
|
||||
chunks_to_process.setdefault(chunk.source_type, []).append(chunk)
|
||||
else:
|
||||
chunks_to_keep.append(chunk)
|
||||
|
||||
# For each source, filter out the chunks using the permission
|
||||
# check function for that source
|
||||
# TODO: Use a threadpool/multiprocessing to process the sources in parallel
|
||||
for source, chunks_for_source in chunks_to_process.items():
|
||||
censor_chunks_for_source = DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION[source]
|
||||
try:
|
||||
censored_chunks = censor_chunks_for_source(chunks_for_source, user.email)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
f"Failed to censor chunks for source {source} so throwing out all"
|
||||
f" chunks for this source and continuing: {e}"
|
||||
)
|
||||
continue
|
||||
chunks_to_keep.extend(censored_chunks)
|
||||
|
||||
return chunks_to_keep
|
||||
@@ -1,226 +0,0 @@
|
||||
import time
|
||||
|
||||
from ee.onyx.db.external_perm import fetch_external_groups_for_user_email_and_group_ids
|
||||
from ee.onyx.external_permissions.salesforce.utils import (
|
||||
get_any_salesforce_client_for_doc_id,
|
||||
)
|
||||
from ee.onyx.external_permissions.salesforce.utils import get_objects_access_for_user_id
|
||||
from ee.onyx.external_permissions.salesforce.utils import (
|
||||
get_salesforce_user_id_from_email,
|
||||
)
|
||||
from onyx.configs.app_configs import BLURB_SIZE
|
||||
from onyx.context.search.models import InferenceChunk
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
# Types
|
||||
ChunkKey = tuple[str, int] # (doc_id, chunk_id)
|
||||
ContentRange = tuple[int, int | None] # (start_index, end_index) None means to the end
|
||||
|
||||
|
||||
# NOTE: Used for testing timing
|
||||
def _get_dummy_object_access_map(
|
||||
object_ids: set[str], user_email: str, chunks: list[InferenceChunk]
|
||||
) -> dict[str, bool]:
|
||||
time.sleep(0.15)
|
||||
# return {object_id: True for object_id in object_ids}
|
||||
import random
|
||||
|
||||
return {object_id: random.choice([True, False]) for object_id in object_ids}
|
||||
|
||||
|
||||
def _get_objects_access_for_user_email_from_salesforce(
|
||||
object_ids: set[str],
|
||||
user_email: str,
|
||||
chunks: list[InferenceChunk],
|
||||
) -> dict[str, bool] | None:
|
||||
"""
|
||||
This function wraps the salesforce call as we may want to change how this
|
||||
is done in the future. (E.g. replace it with the above function)
|
||||
"""
|
||||
# This is cached in the function so the first query takes an extra 0.1-0.3 seconds
|
||||
# but subsequent queries for this source are essentially instant
|
||||
first_doc_id = chunks[0].document_id
|
||||
with get_session_context_manager() as db_session:
|
||||
salesforce_client = get_any_salesforce_client_for_doc_id(
|
||||
db_session, first_doc_id
|
||||
)
|
||||
|
||||
# This is cached in the function so the first query takes an extra 0.1-0.3 seconds
|
||||
# but subsequent queries by the same user are essentially instant
|
||||
start_time = time.time()
|
||||
user_id = get_salesforce_user_id_from_email(salesforce_client, user_email)
|
||||
end_time = time.time()
|
||||
logger.info(
|
||||
f"Time taken to get Salesforce user ID: {end_time - start_time} seconds"
|
||||
)
|
||||
if user_id is None:
|
||||
return None
|
||||
|
||||
# This is the only query that is not cached in the function
|
||||
# so it takes 0.1-0.2 seconds total
|
||||
object_id_to_access = get_objects_access_for_user_id(
|
||||
salesforce_client, user_id, list(object_ids)
|
||||
)
|
||||
return object_id_to_access
|
||||
|
||||
|
||||
def _extract_salesforce_object_id_from_url(url: str) -> str:
|
||||
return url.split("/")[-1]
|
||||
|
||||
|
||||
def _get_object_ranges_for_chunk(
|
||||
chunk: InferenceChunk,
|
||||
) -> dict[str, list[ContentRange]]:
|
||||
"""
|
||||
Given a chunk, return a dictionary of salesforce object ids and the content ranges
|
||||
for that object id in the current chunk
|
||||
"""
|
||||
if chunk.source_links is None:
|
||||
return {}
|
||||
|
||||
object_ranges: dict[str, list[ContentRange]] = {}
|
||||
end_index = None
|
||||
descending_source_links = sorted(
|
||||
chunk.source_links.items(), key=lambda x: x[0], reverse=True
|
||||
)
|
||||
for start_index, url in descending_source_links:
|
||||
object_id = _extract_salesforce_object_id_from_url(url)
|
||||
if object_id not in object_ranges:
|
||||
object_ranges[object_id] = []
|
||||
object_ranges[object_id].append((start_index, end_index))
|
||||
end_index = start_index
|
||||
return object_ranges
|
||||
|
||||
|
||||
def _create_empty_censored_chunk(uncensored_chunk: InferenceChunk) -> InferenceChunk:
|
||||
"""
|
||||
Create a copy of the unfiltered chunk where potentially sensitive content is removed
|
||||
to be added later if the user has access to each of the sub-objects
|
||||
"""
|
||||
empty_censored_chunk = InferenceChunk(
|
||||
**uncensored_chunk.model_dump(),
|
||||
)
|
||||
empty_censored_chunk.content = ""
|
||||
empty_censored_chunk.blurb = ""
|
||||
empty_censored_chunk.source_links = {}
|
||||
return empty_censored_chunk
|
||||
|
||||
|
||||
def _update_censored_chunk(
|
||||
censored_chunk: InferenceChunk,
|
||||
uncensored_chunk: InferenceChunk,
|
||||
content_range: ContentRange,
|
||||
) -> InferenceChunk:
|
||||
"""
|
||||
Update the filtered chunk with the content and source links from the unfiltered chunk using the content ranges
|
||||
"""
|
||||
start_index, end_index = content_range
|
||||
|
||||
# Update the content of the filtered chunk
|
||||
permitted_content = uncensored_chunk.content[start_index:end_index]
|
||||
permitted_section_start_index = len(censored_chunk.content)
|
||||
censored_chunk.content = permitted_content + censored_chunk.content
|
||||
|
||||
# Update the source links of the filtered chunk
|
||||
if uncensored_chunk.source_links is not None:
|
||||
if censored_chunk.source_links is None:
|
||||
censored_chunk.source_links = {}
|
||||
link_content = uncensored_chunk.source_links[start_index]
|
||||
censored_chunk.source_links[permitted_section_start_index] = link_content
|
||||
|
||||
# Update the blurb of the filtered chunk
|
||||
censored_chunk.blurb = censored_chunk.content[:BLURB_SIZE]
|
||||
|
||||
return censored_chunk
|
||||
|
||||
|
||||
# TODO: Generalize this to other sources
|
||||
def censor_salesforce_chunks(
|
||||
chunks: list[InferenceChunk],
|
||||
user_email: str,
|
||||
# This is so we can provide a mock access map for testing
|
||||
access_map: dict[str, bool] | None = None,
|
||||
) -> list[InferenceChunk]:
|
||||
# object_id -> list[((doc_id, chunk_id), (start_index, end_index))]
|
||||
object_to_content_map: dict[str, list[tuple[ChunkKey, ContentRange]]] = {}
|
||||
|
||||
# (doc_id, chunk_id) -> chunk
|
||||
uncensored_chunks: dict[ChunkKey, InferenceChunk] = {}
|
||||
|
||||
# keep track of all object ids that we have seen to make it easier to get
|
||||
# the access for these object ids
|
||||
object_ids: set[str] = set()
|
||||
|
||||
for chunk in chunks:
|
||||
chunk_key = (chunk.document_id, chunk.chunk_id)
|
||||
# create a dictionary to quickly look up the unfiltered chunk
|
||||
uncensored_chunks[chunk_key] = chunk
|
||||
|
||||
# for each chunk, get a dictionary of object ids and the content ranges
|
||||
# for that object id in the current chunk
|
||||
object_ranges_for_chunk = _get_object_ranges_for_chunk(chunk)
|
||||
for object_id, ranges in object_ranges_for_chunk.items():
|
||||
object_ids.add(object_id)
|
||||
for start_index, end_index in ranges:
|
||||
object_to_content_map.setdefault(object_id, []).append(
|
||||
(chunk_key, (start_index, end_index))
|
||||
)
|
||||
|
||||
# This is so we can provide a mock access map for testing
|
||||
if access_map is None:
|
||||
access_map = _get_objects_access_for_user_email_from_salesforce(
|
||||
object_ids=object_ids,
|
||||
user_email=user_email,
|
||||
chunks=chunks,
|
||||
)
|
||||
if access_map is None:
|
||||
# If the user is not found in Salesforce, access_map will be None
|
||||
# so we should just return an empty list because no chunks will be
|
||||
# censored
|
||||
return []
|
||||
|
||||
censored_chunks: dict[ChunkKey, InferenceChunk] = {}
|
||||
for object_id, content_list in object_to_content_map.items():
|
||||
# if the user does not have access to the object, or the object is not in the
|
||||
# access_map, do not include its content in the filtered chunks
|
||||
if not access_map.get(object_id, False):
|
||||
continue
|
||||
|
||||
# if we got this far, the user has access to the object so we can create or update
|
||||
# the filtered chunk(s) for this object
|
||||
# NOTE: we only create a censored chunk if the user has access to some
|
||||
# part of the chunk
|
||||
for chunk_key, content_range in content_list:
|
||||
if chunk_key not in censored_chunks:
|
||||
censored_chunks[chunk_key] = _create_empty_censored_chunk(
|
||||
uncensored_chunks[chunk_key]
|
||||
)
|
||||
|
||||
uncensored_chunk = uncensored_chunks[chunk_key]
|
||||
censored_chunk = _update_censored_chunk(
|
||||
censored_chunk=censored_chunks[chunk_key],
|
||||
uncensored_chunk=uncensored_chunk,
|
||||
content_range=content_range,
|
||||
)
|
||||
censored_chunks[chunk_key] = censored_chunk
|
||||
|
||||
return list(censored_chunks.values())
|
||||
|
||||
|
||||
# NOTE: This is not used anywhere.
|
||||
def _get_objects_access_for_user_email(
|
||||
object_ids: set[str], user_email: str
|
||||
) -> dict[str, bool]:
|
||||
with get_session_context_manager() as db_session:
|
||||
external_groups = fetch_external_groups_for_user_email_and_group_ids(
|
||||
db_session=db_session,
|
||||
user_email=user_email,
|
||||
# Maybe make a function that adds a salesforce prefix to the group ids
|
||||
group_ids=list(object_ids),
|
||||
)
|
||||
external_group_ids = {group.external_user_group_id for group in external_groups}
|
||||
return {group_id: group_id in external_group_ids for group_id in object_ids}
|
||||
@@ -1,174 +0,0 @@
|
||||
from simple_salesforce import Salesforce
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.connectors.salesforce.sqlite_functions import get_user_id_by_email
|
||||
from onyx.connectors.salesforce.sqlite_functions import init_db
|
||||
from onyx.connectors.salesforce.sqlite_functions import NULL_ID_STRING
|
||||
from onyx.connectors.salesforce.sqlite_functions import update_email_to_id_table
|
||||
from onyx.db.connector_credential_pair import get_connector_credential_pair_from_id
|
||||
from onyx.db.document import get_cc_pairs_for_document
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
_ANY_SALESFORCE_CLIENT: Salesforce | None = None
|
||||
|
||||
|
||||
def get_any_salesforce_client_for_doc_id(
|
||||
db_session: Session, doc_id: str
|
||||
) -> Salesforce:
|
||||
"""
|
||||
We create a salesforce client for the first cc_pair for the first doc_id where
|
||||
salesforce censoring is enabled. After that we just cache and reuse the same
|
||||
client for all queries.
|
||||
|
||||
We do this to reduce the number of postgres queries we make at query time.
|
||||
|
||||
This may be problematic if they are using multiple cc_pairs for salesforce.
|
||||
E.g. there are 2 different credential sets for 2 different salesforce cc_pairs
|
||||
but only one has the permissions to access the permissions needed for the query.
|
||||
"""
|
||||
global _ANY_SALESFORCE_CLIENT
|
||||
if _ANY_SALESFORCE_CLIENT is None:
|
||||
cc_pairs = get_cc_pairs_for_document(db_session, doc_id)
|
||||
first_cc_pair = cc_pairs[0]
|
||||
credential_json = first_cc_pair.credential.credential_json
|
||||
_ANY_SALESFORCE_CLIENT = Salesforce(
|
||||
username=credential_json["sf_username"],
|
||||
password=credential_json["sf_password"],
|
||||
security_token=credential_json["sf_security_token"],
|
||||
)
|
||||
return _ANY_SALESFORCE_CLIENT
|
||||
|
||||
|
||||
def _query_salesforce_user_id(sf_client: Salesforce, user_email: str) -> str | None:
|
||||
query = f"SELECT Id FROM User WHERE Email = '{user_email}'"
|
||||
result = sf_client.query(query)
|
||||
if len(result["records"]) == 0:
|
||||
return None
|
||||
return result["records"][0]["Id"]
|
||||
|
||||
|
||||
# This contains only the user_ids that we have found in Salesforce.
|
||||
# If we don't know their user_id, we don't store anything in the cache.
|
||||
_CACHED_SF_EMAIL_TO_ID_MAP: dict[str, str] = {}
|
||||
|
||||
|
||||
def get_salesforce_user_id_from_email(
|
||||
sf_client: Salesforce,
|
||||
user_email: str,
|
||||
) -> str | None:
|
||||
"""
|
||||
We cache this so we don't have to query Salesforce for every query and salesforce
|
||||
user IDs never change.
|
||||
Memory usage is fine because we just store 2 small strings per user.
|
||||
|
||||
If the email is not in the cache, we check the local salesforce database for the info.
|
||||
If the user is not found in the local salesforce database, we query Salesforce.
|
||||
Whatever we get back from Salesforce is added to the database.
|
||||
If no user_id is found, we add a NULL_ID_STRING to the database for that email so
|
||||
we don't query Salesforce again (which is slow) but we still check the local salesforce
|
||||
database every query until a user id is found. This is acceptable because the query time
|
||||
is quite fast.
|
||||
If a user_id is created in Salesforce, it will be added to the local salesforce database
|
||||
next time the connector is run. Then that value will be found in this function and cached.
|
||||
|
||||
NOTE: First time this runs, it may be slow if it hasn't already been updated in the local
|
||||
salesforce database. (Around 0.1-0.3 seconds)
|
||||
If it's cached or stored in the local salesforce database, it's fast (<0.001 seconds).
|
||||
"""
|
||||
global _CACHED_SF_EMAIL_TO_ID_MAP
|
||||
if user_email in _CACHED_SF_EMAIL_TO_ID_MAP:
|
||||
if _CACHED_SF_EMAIL_TO_ID_MAP[user_email] is not None:
|
||||
return _CACHED_SF_EMAIL_TO_ID_MAP[user_email]
|
||||
|
||||
db_exists = True
|
||||
try:
|
||||
# Check if the user is already in the database
|
||||
user_id = get_user_id_by_email(user_email)
|
||||
except Exception:
|
||||
init_db()
|
||||
try:
|
||||
user_id = get_user_id_by_email(user_email)
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking if user is in database: {e}")
|
||||
user_id = None
|
||||
db_exists = False
|
||||
|
||||
# If no entry is found in the database (indicated by user_id being None)...
|
||||
if user_id is None:
|
||||
# ...query Salesforce and store the result in the database
|
||||
user_id = _query_salesforce_user_id(sf_client, user_email)
|
||||
if db_exists:
|
||||
update_email_to_id_table(user_email, user_id)
|
||||
return user_id
|
||||
elif user_id is None:
|
||||
return None
|
||||
elif user_id == NULL_ID_STRING:
|
||||
return None
|
||||
# If the found user_id is real, cache it
|
||||
_CACHED_SF_EMAIL_TO_ID_MAP[user_email] = user_id
|
||||
return user_id
|
||||
|
||||
|
||||
_MAX_RECORD_IDS_PER_QUERY = 200
|
||||
|
||||
|
||||
def get_objects_access_for_user_id(
|
||||
salesforce_client: Salesforce,
|
||||
user_id: str,
|
||||
record_ids: list[str],
|
||||
) -> dict[str, bool]:
|
||||
"""
|
||||
Salesforce has a limit of 200 record ids per query. So we just truncate
|
||||
the list of record ids to 200. We only ever retrieve 50 chunks at a time
|
||||
so this should be fine (unlikely that we retrieve all 50 chunks contain
|
||||
4 unique objects).
|
||||
If we decide this isn't acceptable we can use multiple queries but they
|
||||
should be in parallel so query time doesn't get too long.
|
||||
"""
|
||||
truncated_record_ids = record_ids[:_MAX_RECORD_IDS_PER_QUERY]
|
||||
record_ids_str = "'" + "','".join(truncated_record_ids) + "'"
|
||||
access_query = f"""
|
||||
SELECT RecordId, HasReadAccess
|
||||
FROM UserRecordAccess
|
||||
WHERE RecordId IN ({record_ids_str})
|
||||
AND UserId = '{user_id}'
|
||||
"""
|
||||
result = salesforce_client.query_all(access_query)
|
||||
return {record["RecordId"]: record["HasReadAccess"] for record in result["records"]}
|
||||
|
||||
|
||||
_CC_PAIR_ID_SALESFORCE_CLIENT_MAP: dict[int, Salesforce] = {}
|
||||
_DOC_ID_TO_CC_PAIR_ID_MAP: dict[str, int] = {}
|
||||
|
||||
|
||||
# NOTE: This is not used anywhere.
|
||||
def _get_salesforce_client_for_doc_id(db_session: Session, doc_id: str) -> Salesforce:
|
||||
"""
|
||||
Uses a document id to get the cc_pair that indexed that document and uses the credentials
|
||||
for that cc_pair to create a Salesforce client.
|
||||
Problems:
|
||||
- There may be multiple cc_pairs for a document, and we don't know which one to use.
|
||||
- right now we just use the first one
|
||||
- Building a new Salesforce client for each document is slow.
|
||||
- Memory usage could be an issue as we build these dictionaries.
|
||||
"""
|
||||
if doc_id not in _DOC_ID_TO_CC_PAIR_ID_MAP:
|
||||
cc_pairs = get_cc_pairs_for_document(db_session, doc_id)
|
||||
first_cc_pair = cc_pairs[0]
|
||||
_DOC_ID_TO_CC_PAIR_ID_MAP[doc_id] = first_cc_pair.id
|
||||
|
||||
cc_pair_id = _DOC_ID_TO_CC_PAIR_ID_MAP[doc_id]
|
||||
if cc_pair_id not in _CC_PAIR_ID_SALESFORCE_CLIENT_MAP:
|
||||
cc_pair = get_connector_credential_pair_from_id(cc_pair_id, db_session)
|
||||
if cc_pair is None:
|
||||
raise ValueError(f"CC pair {cc_pair_id} not found")
|
||||
credential_json = cc_pair.credential.credential_json
|
||||
_CC_PAIR_ID_SALESFORCE_CLIENT_MAP[cc_pair_id] = Salesforce(
|
||||
username=credential_json["sf_username"],
|
||||
password=credential_json["sf_password"],
|
||||
security_token=credential_json["sf_security_token"],
|
||||
)
|
||||
|
||||
return _CC_PAIR_ID_SALESFORCE_CLIENT_MAP[cc_pair_id]
|
||||
@@ -8,9 +8,6 @@ from ee.onyx.external_permissions.confluence.group_sync import confluence_group_
|
||||
from ee.onyx.external_permissions.gmail.doc_sync import gmail_doc_sync
|
||||
from ee.onyx.external_permissions.google_drive.doc_sync import gdrive_doc_sync
|
||||
from ee.onyx.external_permissions.google_drive.group_sync import gdrive_group_sync
|
||||
from ee.onyx.external_permissions.post_query_censoring import (
|
||||
DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION,
|
||||
)
|
||||
from ee.onyx.external_permissions.slack.doc_sync import slack_doc_sync
|
||||
from onyx.access.models import DocExternalAccess
|
||||
from onyx.configs.constants import DocumentSource
|
||||
@@ -74,7 +71,4 @@ EXTERNAL_GROUP_SYNC_PERIODS: dict[DocumentSource, int] = {
|
||||
|
||||
|
||||
def check_if_valid_sync_source(source_type: DocumentSource) -> bool:
|
||||
return (
|
||||
source_type in DOC_PERMISSIONS_FUNC_MAP
|
||||
or source_type in DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION
|
||||
)
|
||||
return source_type in DOC_PERMISSIONS_FUNC_MAP
|
||||
|
||||
@@ -40,7 +40,6 @@ from onyx.configs.app_configs import USER_AUTH_SECRET
|
||||
from onyx.configs.app_configs import WEB_DOMAIN
|
||||
from onyx.configs.constants import AuthType
|
||||
from onyx.main import get_application as get_application_base
|
||||
from onyx.main import include_auth_router_with_prefix
|
||||
from onyx.main import include_router_with_global_prefix_prepended
|
||||
from onyx.utils.logger import setup_logger
|
||||
from onyx.utils.variable_functionality import global_version
|
||||
@@ -63,7 +62,7 @@ def get_application() -> FastAPI:
|
||||
|
||||
if AUTH_TYPE == AuthType.CLOUD:
|
||||
oauth_client = GoogleOAuth2(OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET)
|
||||
include_auth_router_with_prefix(
|
||||
include_router_with_global_prefix_prepended(
|
||||
application,
|
||||
create_onyx_oauth_router(
|
||||
oauth_client,
|
||||
@@ -75,17 +74,19 @@ def get_application() -> FastAPI:
|
||||
redirect_url=f"{WEB_DOMAIN}/auth/oauth/callback",
|
||||
),
|
||||
prefix="/auth/oauth",
|
||||
tags=["auth"],
|
||||
)
|
||||
|
||||
# Need basic auth router for `logout` endpoint
|
||||
include_auth_router_with_prefix(
|
||||
include_router_with_global_prefix_prepended(
|
||||
application,
|
||||
fastapi_users.get_logout_router(auth_backend),
|
||||
prefix="/auth",
|
||||
tags=["auth"],
|
||||
)
|
||||
|
||||
if AUTH_TYPE == AuthType.OIDC:
|
||||
include_auth_router_with_prefix(
|
||||
include_router_with_global_prefix_prepended(
|
||||
application,
|
||||
create_onyx_oauth_router(
|
||||
OpenID(OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET, OPENID_CONFIG_URL),
|
||||
@@ -96,20 +97,19 @@ def get_application() -> FastAPI:
|
||||
redirect_url=f"{WEB_DOMAIN}/auth/oidc/callback",
|
||||
),
|
||||
prefix="/auth/oidc",
|
||||
tags=["auth"],
|
||||
)
|
||||
|
||||
# need basic auth router for `logout` endpoint
|
||||
include_auth_router_with_prefix(
|
||||
include_router_with_global_prefix_prepended(
|
||||
application,
|
||||
fastapi_users.get_auth_router(auth_backend),
|
||||
prefix="/auth",
|
||||
tags=["auth"],
|
||||
)
|
||||
|
||||
elif AUTH_TYPE == AuthType.SAML:
|
||||
include_auth_router_with_prefix(
|
||||
application,
|
||||
saml_router,
|
||||
)
|
||||
include_router_with_global_prefix_prepended(application, saml_router)
|
||||
|
||||
# RBAC / group access control
|
||||
include_router_with_global_prefix_prepended(application, user_group_router)
|
||||
|
||||
@@ -1,24 +1,17 @@
|
||||
import datetime
|
||||
from collections import defaultdict
|
||||
from typing import List
|
||||
|
||||
from fastapi import APIRouter
|
||||
from fastapi import Depends
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from ee.onyx.db.analytics import fetch_assistant_message_analytics
|
||||
from ee.onyx.db.analytics import fetch_assistant_unique_users
|
||||
from ee.onyx.db.analytics import fetch_assistant_unique_users_total
|
||||
from ee.onyx.db.analytics import fetch_onyxbot_analytics
|
||||
from ee.onyx.db.analytics import fetch_per_user_query_analytics
|
||||
from ee.onyx.db.analytics import fetch_persona_message_analytics
|
||||
from ee.onyx.db.analytics import fetch_persona_unique_users
|
||||
from ee.onyx.db.analytics import fetch_query_analytics
|
||||
from ee.onyx.db.analytics import user_can_view_assistant_stats
|
||||
from onyx.auth.users import current_admin_user
|
||||
from onyx.auth.users import current_user
|
||||
from onyx.db.engine import get_session
|
||||
from onyx.db.models import User
|
||||
|
||||
@@ -198,76 +191,3 @@ def get_persona_unique_users(
|
||||
)
|
||||
)
|
||||
return unique_user_counts
|
||||
|
||||
|
||||
class AssistantDailyUsageResponse(BaseModel):
|
||||
date: datetime.date
|
||||
total_messages: int
|
||||
total_unique_users: int
|
||||
|
||||
|
||||
class AssistantStatsResponse(BaseModel):
|
||||
daily_stats: List[AssistantDailyUsageResponse]
|
||||
total_messages: int
|
||||
total_unique_users: int
|
||||
|
||||
|
||||
@router.get("/assistant/{assistant_id}/stats")
|
||||
def get_assistant_stats(
|
||||
assistant_id: int,
|
||||
start: datetime.datetime | None = None,
|
||||
end: datetime.datetime | None = None,
|
||||
user: User | None = Depends(current_user),
|
||||
db_session: Session = Depends(get_session),
|
||||
) -> AssistantStatsResponse:
|
||||
"""
|
||||
Returns daily message and unique user counts for a user's assistant,
|
||||
along with the overall total messages and total distinct users.
|
||||
"""
|
||||
start = start or (
|
||||
datetime.datetime.utcnow() - datetime.timedelta(days=_DEFAULT_LOOKBACK_DAYS)
|
||||
)
|
||||
end = end or datetime.datetime.utcnow()
|
||||
print("current user")
|
||||
print(user)
|
||||
|
||||
if not user_can_view_assistant_stats(db_session, user, assistant_id):
|
||||
raise HTTPException(
|
||||
status_code=403, detail="Not allowed to access this assistant's stats."
|
||||
)
|
||||
|
||||
# Pull daily usage from the DB calls
|
||||
messages_data = fetch_assistant_message_analytics(
|
||||
db_session, assistant_id, start, end
|
||||
)
|
||||
unique_users_data = fetch_assistant_unique_users(
|
||||
db_session, assistant_id, start, end
|
||||
)
|
||||
|
||||
# Map each day => (messages, unique_users).
|
||||
daily_messages_map = {date: count for count, date in messages_data}
|
||||
daily_unique_users_map = {date: count for count, date in unique_users_data}
|
||||
all_dates = set(daily_messages_map.keys()) | set(daily_unique_users_map.keys())
|
||||
|
||||
# Merge both sets of metrics by date
|
||||
daily_results: list[AssistantDailyUsageResponse] = []
|
||||
for date in sorted(all_dates):
|
||||
daily_results.append(
|
||||
AssistantDailyUsageResponse(
|
||||
date=date,
|
||||
total_messages=daily_messages_map.get(date, 0),
|
||||
total_unique_users=daily_unique_users_map.get(date, 0),
|
||||
)
|
||||
)
|
||||
|
||||
# Now pull a single total distinct user count across the entire time range
|
||||
total_msgs = sum(d.total_messages for d in daily_results)
|
||||
total_users = fetch_assistant_unique_users_total(
|
||||
db_session, assistant_id, start, end
|
||||
)
|
||||
|
||||
return AssistantStatsResponse(
|
||||
daily_stats=daily_results,
|
||||
total_messages=total_msgs,
|
||||
total_unique_users=total_users,
|
||||
)
|
||||
|
||||
@@ -2,16 +2,15 @@ import logging
|
||||
from collections.abc import Awaitable
|
||||
from collections.abc import Callable
|
||||
|
||||
import jwt
|
||||
from fastapi import FastAPI
|
||||
from fastapi import HTTPException
|
||||
from fastapi import Request
|
||||
from fastapi import Response
|
||||
|
||||
from ee.onyx.auth.users import decode_anonymous_user_jwt_token
|
||||
from ee.onyx.configs.app_configs import ANONYMOUS_USER_COOKIE_NAME
|
||||
from onyx.auth.api_key import extract_tenant_from_api_key_header
|
||||
from onyx.configs.app_configs import USER_AUTH_SECRET
|
||||
from onyx.db.engine import is_valid_schema_name
|
||||
from onyx.redis.redis_pool import retrieve_auth_token_data_from_redis
|
||||
from shared_configs.configs import MULTI_TENANT
|
||||
from shared_configs.configs import POSTGRES_DEFAULT_SCHEMA
|
||||
from shared_configs.contextvars import CURRENT_TENANT_ID_CONTEXTVAR
|
||||
@@ -23,11 +22,11 @@ def add_tenant_id_middleware(app: FastAPI, logger: logging.LoggerAdapter) -> Non
|
||||
request: Request, call_next: Callable[[Request], Awaitable[Response]]
|
||||
) -> Response:
|
||||
try:
|
||||
if MULTI_TENANT:
|
||||
tenant_id = await _get_tenant_id_from_request(request, logger)
|
||||
else:
|
||||
tenant_id = POSTGRES_DEFAULT_SCHEMA
|
||||
|
||||
tenant_id = (
|
||||
_get_tenant_id_from_request(request, logger)
|
||||
if MULTI_TENANT
|
||||
else POSTGRES_DEFAULT_SCHEMA
|
||||
)
|
||||
CURRENT_TENANT_ID_CONTEXTVAR.set(tenant_id)
|
||||
return await call_next(request)
|
||||
|
||||
@@ -36,46 +35,27 @@ def add_tenant_id_middleware(app: FastAPI, logger: logging.LoggerAdapter) -> Non
|
||||
raise
|
||||
|
||||
|
||||
async def _get_tenant_id_from_request(
|
||||
request: Request, logger: logging.LoggerAdapter
|
||||
) -> str:
|
||||
"""
|
||||
Attempt to extract tenant_id from:
|
||||
1) The API key header
|
||||
2) The Redis-based token (stored in Cookie: fastapiusersauth)
|
||||
Fallback: POSTGRES_DEFAULT_SCHEMA
|
||||
"""
|
||||
# Check for API key
|
||||
def _get_tenant_id_from_request(request: Request, logger: logging.LoggerAdapter) -> str:
|
||||
# First check for API key
|
||||
tenant_id = extract_tenant_from_api_key_header(request)
|
||||
if tenant_id:
|
||||
if tenant_id is not None:
|
||||
return tenant_id
|
||||
|
||||
# Check for anonymous user cookie
|
||||
anonymous_user_cookie = request.cookies.get(ANONYMOUS_USER_COOKIE_NAME)
|
||||
if anonymous_user_cookie:
|
||||
try:
|
||||
anonymous_user_data = decode_anonymous_user_jwt_token(anonymous_user_cookie)
|
||||
return anonymous_user_data.get("tenant_id", POSTGRES_DEFAULT_SCHEMA)
|
||||
except Exception as e:
|
||||
logger.error(f"Error decoding anonymous user cookie: {str(e)}")
|
||||
# Continue and attempt to authenticate
|
||||
# Check for cookie-based auth
|
||||
token = request.cookies.get("fastapiusersauth")
|
||||
if not token:
|
||||
return POSTGRES_DEFAULT_SCHEMA
|
||||
|
||||
try:
|
||||
# Look up token data in Redis
|
||||
token_data = await retrieve_auth_token_data_from_redis(request)
|
||||
payload = jwt.decode(
|
||||
token,
|
||||
USER_AUTH_SECRET,
|
||||
audience=["fastapi-users:auth"],
|
||||
algorithms=["HS256"],
|
||||
)
|
||||
tenant_id_from_payload = payload.get("tenant_id", POSTGRES_DEFAULT_SCHEMA)
|
||||
|
||||
if not token_data:
|
||||
logger.debug(
|
||||
"Token data not found or expired in Redis, defaulting to POSTGRES_DEFAULT_SCHEMA"
|
||||
)
|
||||
# Return POSTGRES_DEFAULT_SCHEMA, so non-authenticated requests are sent to the default schema
|
||||
# The CURRENT_TENANT_ID_CONTEXTVAR is initialized with POSTGRES_DEFAULT_SCHEMA,
|
||||
# so we maintain consistency by returning it here when no valid tenant is found.
|
||||
return POSTGRES_DEFAULT_SCHEMA
|
||||
|
||||
tenant_id_from_payload = token_data.get("tenant_id", POSTGRES_DEFAULT_SCHEMA)
|
||||
|
||||
# Since token_data.get() can return None, ensure we have a string
|
||||
# Since payload.get() can return None, ensure we have a string
|
||||
tenant_id = (
|
||||
str(tenant_id_from_payload)
|
||||
if tenant_id_from_payload is not None
|
||||
@@ -87,6 +67,9 @@ async def _get_tenant_id_from_request(
|
||||
|
||||
return tenant_id
|
||||
|
||||
except jwt.InvalidTokenError:
|
||||
return POSTGRES_DEFAULT_SCHEMA
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in _get_tenant_id_from_request: {str(e)}")
|
||||
logger.error(f"Unexpected error in set_tenant_id_middleware: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail="Internal server error")
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import base64
|
||||
import json
|
||||
import uuid
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
|
||||
import requests
|
||||
@@ -12,29 +10,11 @@ from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from ee.onyx.configs.app_configs import OAUTH_CONFLUENCE_CLIENT_ID
|
||||
from ee.onyx.configs.app_configs import OAUTH_CONFLUENCE_CLIENT_SECRET
|
||||
from ee.onyx.configs.app_configs import OAUTH_GOOGLE_DRIVE_CLIENT_ID
|
||||
from ee.onyx.configs.app_configs import OAUTH_GOOGLE_DRIVE_CLIENT_SECRET
|
||||
from ee.onyx.configs.app_configs import OAUTH_SLACK_CLIENT_ID
|
||||
from ee.onyx.configs.app_configs import OAUTH_SLACK_CLIENT_SECRET
|
||||
from onyx.auth.users import current_user
|
||||
from onyx.configs.app_configs import WEB_DOMAIN
|
||||
from onyx.configs.constants import DocumentSource
|
||||
from onyx.connectors.google_utils.google_auth import get_google_oauth_creds
|
||||
from onyx.connectors.google_utils.google_auth import sanitize_oauth_credentials
|
||||
from onyx.connectors.google_utils.shared_constants import (
|
||||
DB_CREDENTIALS_AUTHENTICATION_METHOD,
|
||||
)
|
||||
from onyx.connectors.google_utils.shared_constants import (
|
||||
DB_CREDENTIALS_DICT_TOKEN_KEY,
|
||||
)
|
||||
from onyx.connectors.google_utils.shared_constants import (
|
||||
DB_CREDENTIALS_PRIMARY_ADMIN_KEY,
|
||||
)
|
||||
from onyx.connectors.google_utils.shared_constants import (
|
||||
GoogleOAuthAuthenticationMethod,
|
||||
)
|
||||
from onyx.db.credentials import create_credential
|
||||
from onyx.db.engine import get_current_tenant_id
|
||||
from onyx.db.engine import get_session
|
||||
@@ -82,7 +62,14 @@ class SlackOAuth:
|
||||
|
||||
@classmethod
|
||||
def generate_oauth_url(cls, state: str) -> str:
|
||||
return cls._generate_oauth_url_helper(cls.REDIRECT_URI, state)
|
||||
url = (
|
||||
f"https://slack.com/oauth/v2/authorize"
|
||||
f"?client_id={cls.CLIENT_ID}"
|
||||
f"&redirect_uri={cls.REDIRECT_URI}"
|
||||
f"&scope={cls.BOT_SCOPE}"
|
||||
f"&state={state}"
|
||||
)
|
||||
return url
|
||||
|
||||
@classmethod
|
||||
def generate_dev_oauth_url(cls, state: str) -> str:
|
||||
@@ -90,14 +77,10 @@ class SlackOAuth:
|
||||
- https://www.nango.dev/blog/oauth-redirects-on-localhost-with-https
|
||||
"""
|
||||
|
||||
return cls._generate_oauth_url_helper(cls.DEV_REDIRECT_URI, state)
|
||||
|
||||
@classmethod
|
||||
def _generate_oauth_url_helper(cls, redirect_uri: str, state: str) -> str:
|
||||
url = (
|
||||
f"https://slack.com/oauth/v2/authorize"
|
||||
f"?client_id={cls.CLIENT_ID}"
|
||||
f"&redirect_uri={redirect_uri}"
|
||||
f"&redirect_uri={cls.DEV_REDIRECT_URI}"
|
||||
f"&scope={cls.BOT_SCOPE}"
|
||||
f"&state={state}"
|
||||
)
|
||||
@@ -119,151 +102,82 @@ class SlackOAuth:
|
||||
return session
|
||||
|
||||
|
||||
class ConfluenceCloudOAuth:
|
||||
"""work in progress"""
|
||||
# Work in progress
|
||||
# class ConfluenceCloudOAuth:
|
||||
# """work in progress"""
|
||||
|
||||
# https://developer.atlassian.com/cloud/confluence/oauth-2-3lo-apps/
|
||||
# # https://developer.atlassian.com/cloud/confluence/oauth-2-3lo-apps/
|
||||
|
||||
class OAuthSession(BaseModel):
|
||||
"""Stored in redis to be looked up on callback"""
|
||||
# class OAuthSession(BaseModel):
|
||||
# """Stored in redis to be looked up on callback"""
|
||||
|
||||
email: str
|
||||
redirect_on_success: str | None # Where to send the user if OAuth flow succeeds
|
||||
# email: str
|
||||
# redirect_on_success: str | None # Where to send the user if OAuth flow succeeds
|
||||
|
||||
CLIENT_ID = OAUTH_CONFLUENCE_CLIENT_ID
|
||||
CLIENT_SECRET = OAUTH_CONFLUENCE_CLIENT_SECRET
|
||||
TOKEN_URL = "https://auth.atlassian.com/oauth/token"
|
||||
# CLIENT_ID = OAUTH_CONFLUENCE_CLIENT_ID
|
||||
# CLIENT_SECRET = OAUTH_CONFLUENCE_CLIENT_SECRET
|
||||
# TOKEN_URL = "https://auth.atlassian.com/oauth/token"
|
||||
|
||||
# All read scopes per https://developer.atlassian.com/cloud/confluence/scopes-for-oauth-2-3LO-and-forge-apps/
|
||||
CONFLUENCE_OAUTH_SCOPE = (
|
||||
"read:confluence-props%20"
|
||||
"read:confluence-content.all%20"
|
||||
"read:confluence-content.summary%20"
|
||||
"read:confluence-content.permission%20"
|
||||
"read:confluence-user%20"
|
||||
"read:confluence-groups%20"
|
||||
"readonly:content.attachment:confluence"
|
||||
)
|
||||
# # All read scopes per https://developer.atlassian.com/cloud/confluence/scopes-for-oauth-2-3LO-and-forge-apps/
|
||||
# CONFLUENCE_OAUTH_SCOPE = (
|
||||
# "read:confluence-props%20"
|
||||
# "read:confluence-content.all%20"
|
||||
# "read:confluence-content.summary%20"
|
||||
# "read:confluence-content.permission%20"
|
||||
# "read:confluence-user%20"
|
||||
# "read:confluence-groups%20"
|
||||
# "readonly:content.attachment:confluence"
|
||||
# )
|
||||
|
||||
REDIRECT_URI = f"{WEB_DOMAIN}/admin/connectors/confluence/oauth/callback"
|
||||
DEV_REDIRECT_URI = f"https://redirectmeto.com/{REDIRECT_URI}"
|
||||
# REDIRECT_URI = f"{WEB_DOMAIN}/admin/connectors/confluence/oauth/callback"
|
||||
# DEV_REDIRECT_URI = f"https://redirectmeto.com/{REDIRECT_URI}"
|
||||
|
||||
# eventually for Confluence Data Center
|
||||
# oauth_url = (
|
||||
# f"http://localhost:8090/rest/oauth/v2/authorize?client_id={CONFLUENCE_OAUTH_CLIENT_ID}"
|
||||
# f"&scope={CONFLUENCE_OAUTH_SCOPE_2}"
|
||||
# f"&redirect_uri={redirectme_uri}"
|
||||
# )
|
||||
# # eventually for Confluence Data Center
|
||||
# # oauth_url = (
|
||||
# # f"http://localhost:8090/rest/oauth/v2/authorize?client_id={CONFLUENCE_OAUTH_CLIENT_ID}"
|
||||
# # f"&scope={CONFLUENCE_OAUTH_SCOPE_2}"
|
||||
# # f"&redirect_uri={redirectme_uri}"
|
||||
# # )
|
||||
|
||||
@classmethod
|
||||
def generate_oauth_url(cls, state: str) -> str:
|
||||
return cls._generate_oauth_url_helper(cls.REDIRECT_URI, state)
|
||||
# @classmethod
|
||||
# def generate_oauth_url(cls, state: str) -> str:
|
||||
# return cls._generate_oauth_url_helper(cls.REDIRECT_URI, state)
|
||||
|
||||
@classmethod
|
||||
def generate_dev_oauth_url(cls, state: str) -> str:
|
||||
"""dev mode workaround for localhost testing
|
||||
- https://www.nango.dev/blog/oauth-redirects-on-localhost-with-https
|
||||
"""
|
||||
return cls._generate_oauth_url_helper(cls.DEV_REDIRECT_URI, state)
|
||||
# @classmethod
|
||||
# def generate_dev_oauth_url(cls, state: str) -> str:
|
||||
# """dev mode workaround for localhost testing
|
||||
# - https://www.nango.dev/blog/oauth-redirects-on-localhost-with-https
|
||||
# """
|
||||
# return cls._generate_oauth_url_helper(cls.DEV_REDIRECT_URI, state)
|
||||
|
||||
@classmethod
|
||||
def _generate_oauth_url_helper(cls, redirect_uri: str, state: str) -> str:
|
||||
url = (
|
||||
"https://auth.atlassian.com/authorize"
|
||||
f"?audience=api.atlassian.com"
|
||||
f"&client_id={cls.CLIENT_ID}"
|
||||
f"&redirect_uri={redirect_uri}"
|
||||
f"&scope={cls.CONFLUENCE_OAUTH_SCOPE}"
|
||||
f"&state={state}"
|
||||
"&response_type=code"
|
||||
"&prompt=consent"
|
||||
)
|
||||
return url
|
||||
# @classmethod
|
||||
# def _generate_oauth_url_helper(cls, redirect_uri: str, state: str) -> str:
|
||||
# url = (
|
||||
# "https://auth.atlassian.com/authorize"
|
||||
# f"?audience=api.atlassian.com"
|
||||
# f"&client_id={cls.CLIENT_ID}"
|
||||
# f"&redirect_uri={redirect_uri}"
|
||||
# f"&scope={cls.CONFLUENCE_OAUTH_SCOPE}"
|
||||
# f"&state={state}"
|
||||
# "&response_type=code"
|
||||
# "&prompt=consent"
|
||||
# )
|
||||
# return url
|
||||
|
||||
@classmethod
|
||||
def session_dump_json(cls, email: str, redirect_on_success: str | None) -> str:
|
||||
"""Temporary state to store in redis. to be looked up on auth response.
|
||||
Returns a json string.
|
||||
"""
|
||||
session = ConfluenceCloudOAuth.OAuthSession(
|
||||
email=email, redirect_on_success=redirect_on_success
|
||||
)
|
||||
return session.model_dump_json()
|
||||
# @classmethod
|
||||
# def session_dump_json(cls, email: str, redirect_on_success: str | None) -> str:
|
||||
# """Temporary state to store in redis. to be looked up on auth response.
|
||||
# Returns a json string.
|
||||
# """
|
||||
# session = ConfluenceCloudOAuth.OAuthSession(
|
||||
# email=email, redirect_on_success=redirect_on_success
|
||||
# )
|
||||
# return session.model_dump_json()
|
||||
|
||||
@classmethod
|
||||
def parse_session(cls, session_json: str) -> SlackOAuth.OAuthSession:
|
||||
session = SlackOAuth.OAuthSession.model_validate_json(session_json)
|
||||
return session
|
||||
|
||||
|
||||
class GoogleDriveOAuth:
|
||||
# https://developers.google.com/identity/protocols/oauth2
|
||||
# https://developers.google.com/identity/protocols/oauth2/web-server
|
||||
|
||||
class OAuthSession(BaseModel):
|
||||
"""Stored in redis to be looked up on callback"""
|
||||
|
||||
email: str
|
||||
redirect_on_success: str | None # Where to send the user if OAuth flow succeeds
|
||||
|
||||
CLIENT_ID = OAUTH_GOOGLE_DRIVE_CLIENT_ID
|
||||
CLIENT_SECRET = OAUTH_GOOGLE_DRIVE_CLIENT_SECRET
|
||||
|
||||
TOKEN_URL = "https://oauth2.googleapis.com/token"
|
||||
|
||||
# SCOPE is per https://docs.onyx.app/connectors/google-drive
|
||||
# TODO: Merge with or use google_utils.GOOGLE_SCOPES
|
||||
SCOPE = (
|
||||
"https://www.googleapis.com/auth/drive.readonly%20"
|
||||
"https://www.googleapis.com/auth/drive.metadata.readonly%20"
|
||||
"https://www.googleapis.com/auth/admin.directory.user.readonly%20"
|
||||
"https://www.googleapis.com/auth/admin.directory.group.readonly"
|
||||
)
|
||||
|
||||
REDIRECT_URI = f"{WEB_DOMAIN}/admin/connectors/google-drive/oauth/callback"
|
||||
DEV_REDIRECT_URI = f"https://redirectmeto.com/{REDIRECT_URI}"
|
||||
|
||||
@classmethod
|
||||
def generate_oauth_url(cls, state: str) -> str:
|
||||
return cls._generate_oauth_url_helper(cls.REDIRECT_URI, state)
|
||||
|
||||
@classmethod
|
||||
def generate_dev_oauth_url(cls, state: str) -> str:
|
||||
"""dev mode workaround for localhost testing
|
||||
- https://www.nango.dev/blog/oauth-redirects-on-localhost-with-https
|
||||
"""
|
||||
|
||||
return cls._generate_oauth_url_helper(cls.DEV_REDIRECT_URI, state)
|
||||
|
||||
@classmethod
|
||||
def _generate_oauth_url_helper(cls, redirect_uri: str, state: str) -> str:
|
||||
# without prompt=consent, a refresh token is only issued the first time the user approves
|
||||
url = (
|
||||
f"https://accounts.google.com/o/oauth2/v2/auth"
|
||||
f"?client_id={cls.CLIENT_ID}"
|
||||
f"&redirect_uri={redirect_uri}"
|
||||
"&response_type=code"
|
||||
f"&scope={cls.SCOPE}"
|
||||
"&access_type=offline"
|
||||
f"&state={state}"
|
||||
"&prompt=consent"
|
||||
)
|
||||
return url
|
||||
|
||||
@classmethod
|
||||
def session_dump_json(cls, email: str, redirect_on_success: str | None) -> str:
|
||||
"""Temporary state to store in redis. to be looked up on auth response.
|
||||
Returns a json string.
|
||||
"""
|
||||
session = GoogleDriveOAuth.OAuthSession(
|
||||
email=email, redirect_on_success=redirect_on_success
|
||||
)
|
||||
return session.model_dump_json()
|
||||
|
||||
@classmethod
|
||||
def parse_session(cls, session_json: str) -> OAuthSession:
|
||||
session = GoogleDriveOAuth.OAuthSession.model_validate_json(session_json)
|
||||
return session
|
||||
# @classmethod
|
||||
# def parse_session(cls, session_json: str) -> SlackOAuth.OAuthSession:
|
||||
# session = SlackOAuth.OAuthSession.model_validate_json(session_json)
|
||||
# return session
|
||||
|
||||
|
||||
@router.post("/prepare-authorization-request")
|
||||
@@ -278,11 +192,8 @@ def prepare_authorization_request(
|
||||
Example: https://www.oauth.com/oauth2-servers/authorization/the-authorization-request/
|
||||
"""
|
||||
|
||||
# create random oauth state param for security and to retrieve user data later
|
||||
oauth_uuid = uuid.uuid4()
|
||||
oauth_uuid_str = str(oauth_uuid)
|
||||
|
||||
# urlsafe b64 encode the uuid for the oauth url
|
||||
oauth_state = (
|
||||
base64.urlsafe_b64encode(oauth_uuid.bytes).rstrip(b"=").decode("utf-8")
|
||||
)
|
||||
@@ -292,11 +203,6 @@ def prepare_authorization_request(
|
||||
session = SlackOAuth.session_dump_json(
|
||||
email=user.email, redirect_on_success=redirect_on_success
|
||||
)
|
||||
elif connector == DocumentSource.GOOGLE_DRIVE:
|
||||
oauth_url = GoogleDriveOAuth.generate_oauth_url(oauth_state)
|
||||
session = GoogleDriveOAuth.session_dump_json(
|
||||
email=user.email, redirect_on_success=redirect_on_success
|
||||
)
|
||||
# elif connector == DocumentSource.CONFLUENCE:
|
||||
# oauth_url = ConfluenceCloudOAuth.generate_oauth_url(oauth_state)
|
||||
# session = ConfluenceCloudOAuth.session_dump_json(
|
||||
@@ -304,6 +210,8 @@ def prepare_authorization_request(
|
||||
# )
|
||||
# elif connector == DocumentSource.JIRA:
|
||||
# oauth_url = JiraCloudOAuth.generate_dev_oauth_url(oauth_state)
|
||||
# elif connector == DocumentSource.GOOGLE_DRIVE:
|
||||
# oauth_url = GoogleDriveOAuth.generate_dev_oauth_url(oauth_state)
|
||||
else:
|
||||
oauth_url = None
|
||||
|
||||
@@ -315,7 +223,6 @@ def prepare_authorization_request(
|
||||
|
||||
r = get_redis_client(tenant_id=tenant_id)
|
||||
|
||||
# store important session state to retrieve when the user is redirected back
|
||||
# 10 min is the max we want an oauth flow to be valid
|
||||
r.set(f"da_oauth:{oauth_uuid_str}", session, ex=600)
|
||||
|
||||
@@ -514,116 +421,3 @@ def handle_slack_oauth_callback(
|
||||
# "redirect_on_success": session.redirect_on_success,
|
||||
# }
|
||||
# )
|
||||
|
||||
|
||||
@router.post("/connector/google-drive/callback")
|
||||
def handle_google_drive_oauth_callback(
|
||||
code: str,
|
||||
state: str,
|
||||
user: User = Depends(current_user),
|
||||
db_session: Session = Depends(get_session),
|
||||
tenant_id: str | None = Depends(get_current_tenant_id),
|
||||
) -> JSONResponse:
|
||||
if not GoogleDriveOAuth.CLIENT_ID or not GoogleDriveOAuth.CLIENT_SECRET:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Google Drive client ID or client secret is not configured.",
|
||||
)
|
||||
|
||||
r = get_redis_client(tenant_id=tenant_id)
|
||||
|
||||
# recover the state
|
||||
padded_state = state + "=" * (
|
||||
-len(state) % 4
|
||||
) # Add padding back (Base64 decoding requires padding)
|
||||
uuid_bytes = base64.urlsafe_b64decode(
|
||||
padded_state
|
||||
) # Decode the Base64 string back to bytes
|
||||
|
||||
# Convert bytes back to a UUID
|
||||
oauth_uuid = uuid.UUID(bytes=uuid_bytes)
|
||||
oauth_uuid_str = str(oauth_uuid)
|
||||
|
||||
r_key = f"da_oauth:{oauth_uuid_str}"
|
||||
|
||||
session_json_bytes = cast(bytes, r.get(r_key))
|
||||
if not session_json_bytes:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Google Drive OAuth failed - OAuth state key not found: key={r_key}",
|
||||
)
|
||||
|
||||
session_json = session_json_bytes.decode("utf-8")
|
||||
try:
|
||||
session = GoogleDriveOAuth.parse_session(session_json)
|
||||
|
||||
# Exchange the authorization code for an access token
|
||||
response = requests.post(
|
||||
GoogleDriveOAuth.TOKEN_URL,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
data={
|
||||
"client_id": GoogleDriveOAuth.CLIENT_ID,
|
||||
"client_secret": GoogleDriveOAuth.CLIENT_SECRET,
|
||||
"code": code,
|
||||
"redirect_uri": GoogleDriveOAuth.REDIRECT_URI,
|
||||
"grant_type": "authorization_code",
|
||||
},
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
authorization_response: dict[str, Any] = response.json()
|
||||
|
||||
# the connector wants us to store the json in its authorized_user_info format
|
||||
# returned from OAuthCredentials.get_authorized_user_info().
|
||||
# So refresh immediately via get_google_oauth_creds with the params filled in
|
||||
# from fields in authorization_response to get the json we need
|
||||
authorized_user_info = {}
|
||||
authorized_user_info["client_id"] = OAUTH_GOOGLE_DRIVE_CLIENT_ID
|
||||
authorized_user_info["client_secret"] = OAUTH_GOOGLE_DRIVE_CLIENT_SECRET
|
||||
authorized_user_info["refresh_token"] = authorization_response["refresh_token"]
|
||||
|
||||
token_json_str = json.dumps(authorized_user_info)
|
||||
oauth_creds = get_google_oauth_creds(
|
||||
token_json_str=token_json_str, source=DocumentSource.GOOGLE_DRIVE
|
||||
)
|
||||
if not oauth_creds:
|
||||
raise RuntimeError("get_google_oauth_creds returned None.")
|
||||
|
||||
# save off the credentials
|
||||
oauth_creds_sanitized_json_str = sanitize_oauth_credentials(oauth_creds)
|
||||
|
||||
credential_dict: dict[str, str] = {}
|
||||
credential_dict[DB_CREDENTIALS_DICT_TOKEN_KEY] = oauth_creds_sanitized_json_str
|
||||
credential_dict[DB_CREDENTIALS_PRIMARY_ADMIN_KEY] = session.email
|
||||
credential_dict[
|
||||
DB_CREDENTIALS_AUTHENTICATION_METHOD
|
||||
] = GoogleOAuthAuthenticationMethod.OAUTH_INTERACTIVE.value
|
||||
|
||||
credential_info = CredentialBase(
|
||||
credential_json=credential_dict,
|
||||
admin_public=True,
|
||||
source=DocumentSource.GOOGLE_DRIVE,
|
||||
name="OAuth (interactive)",
|
||||
)
|
||||
|
||||
create_credential(credential_info, user, db_session)
|
||||
except Exception as e:
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content={
|
||||
"success": False,
|
||||
"message": f"An error occurred during Google Drive OAuth: {str(e)}",
|
||||
},
|
||||
)
|
||||
finally:
|
||||
r.delete(r_key)
|
||||
|
||||
# return the result
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": "Google Drive OAuth completed successfully.",
|
||||
"redirect_on_success": session.redirect_on_success,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -179,7 +179,6 @@ def handle_simplified_chat_message(
|
||||
chunks_below=0,
|
||||
full_doc=chat_message_req.full_doc,
|
||||
structured_response_format=chat_message_req.structured_response_format,
|
||||
use_pro_search=chat_message_req.use_pro_search,
|
||||
)
|
||||
|
||||
packets = stream_chat_message_objects(
|
||||
@@ -302,7 +301,6 @@ def handle_send_message_simple_with_history(
|
||||
chunks_below=0,
|
||||
full_doc=req.full_doc,
|
||||
structured_response_format=req.structured_response_format,
|
||||
use_pro_search=req.use_pro_search,
|
||||
)
|
||||
|
||||
packets = stream_chat_message_objects(
|
||||
|
||||
@@ -57,9 +57,6 @@ class BasicCreateChatMessageRequest(ChunkContext):
|
||||
# https://platform.openai.com/docs/guides/structured-outputs/introduction
|
||||
structured_response_format: dict | None = None
|
||||
|
||||
# If True, uses pro search instead of basic search
|
||||
use_pro_search: bool = False
|
||||
|
||||
|
||||
class BasicCreateChatMessageWithHistoryRequest(ChunkContext):
|
||||
# Last element is the new query. All previous elements are historical context
|
||||
@@ -74,8 +71,6 @@ class BasicCreateChatMessageWithHistoryRequest(ChunkContext):
|
||||
# only works if using an OpenAI model. See the following for more details:
|
||||
# https://platform.openai.com/docs/guides/structured-outputs/introduction
|
||||
structured_response_format: dict | None = None
|
||||
# If True, uses pro search instead of basic search
|
||||
use_pro_search: bool = False
|
||||
|
||||
|
||||
class SimpleDoc(BaseModel):
|
||||
@@ -128,9 +123,6 @@ class OneShotQARequest(ChunkContext):
|
||||
# If True, skips generative an AI response to the search query
|
||||
skip_gen_ai_answer_generation: bool = False
|
||||
|
||||
# If True, uses pro search instead of basic search
|
||||
use_pro_search: bool = False
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_persona_fields(self) -> "OneShotQARequest":
|
||||
if self.persona_override_config is None and self.persona_id is None:
|
||||
|
||||
@@ -196,7 +196,6 @@ def get_answer_stream(
|
||||
retrieval_details=query_request.retrieval_options,
|
||||
rerank_settings=query_request.rerank_settings,
|
||||
db_session=db_session,
|
||||
use_pro_search=query_request.use_pro_search,
|
||||
)
|
||||
|
||||
packets = stream_chat_message_objects(
|
||||
|
||||
@@ -13,8 +13,9 @@ from ee.onyx.db.usage_export import get_all_empty_chat_message_entries
|
||||
from ee.onyx.db.usage_export import write_usage_report
|
||||
from ee.onyx.server.reporting.usage_export_models import UsageReportMetadata
|
||||
from ee.onyx.server.reporting.usage_export_models import UserSkeleton
|
||||
from onyx.auth.schemas import UserStatus
|
||||
from onyx.configs.constants import FileOrigin
|
||||
from onyx.db.users import get_all_users
|
||||
from onyx.db.users import list_users
|
||||
from onyx.file_store.constants import MAX_IN_MEMORY_SIZE
|
||||
from onyx.file_store.file_store import FileStore
|
||||
from onyx.file_store.file_store import get_default_file_store
|
||||
@@ -83,15 +84,15 @@ def generate_user_report(
|
||||
max_size=MAX_IN_MEMORY_SIZE, mode="w+"
|
||||
) as temp_file:
|
||||
csvwriter = csv.writer(temp_file, delimiter=",")
|
||||
csvwriter.writerow(["user_id", "is_active"])
|
||||
csvwriter.writerow(["user_id", "status"])
|
||||
|
||||
users = get_all_users(db_session)
|
||||
users = list_users(db_session)
|
||||
for user in users:
|
||||
user_skeleton = UserSkeleton(
|
||||
user_id=str(user.id),
|
||||
is_active=user.is_active,
|
||||
status=UserStatus.LIVE if user.is_active else UserStatus.DEACTIVATED,
|
||||
)
|
||||
csvwriter.writerow([user_skeleton.user_id, user_skeleton.is_active])
|
||||
csvwriter.writerow([user_skeleton.user_id, user_skeleton.status])
|
||||
|
||||
temp_file.seek(0)
|
||||
file_store.save_file(
|
||||
|
||||
@@ -4,6 +4,8 @@ from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.auth.schemas import UserStatus
|
||||
|
||||
|
||||
class FlowType(str, Enum):
|
||||
CHAT = "chat"
|
||||
@@ -20,7 +22,7 @@ class ChatMessageSkeleton(BaseModel):
|
||||
|
||||
class UserSkeleton(BaseModel):
|
||||
user_id: str
|
||||
is_active: bool
|
||||
status: UserStatus
|
||||
|
||||
|
||||
class UsageReportMetadata(BaseModel):
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.db.models import TenantAnonymousUserPath
|
||||
|
||||
|
||||
def get_anonymous_user_path(tenant_id: str, db_session: Session) -> str | None:
|
||||
result = db_session.execute(
|
||||
select(TenantAnonymousUserPath).where(
|
||||
TenantAnonymousUserPath.tenant_id == tenant_id
|
||||
)
|
||||
)
|
||||
result_scalar = result.scalar_one_or_none()
|
||||
if result_scalar:
|
||||
return result_scalar.anonymous_user_path
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def modify_anonymous_user_path(
|
||||
tenant_id: str, anonymous_user_path: str, db_session: Session
|
||||
) -> None:
|
||||
# Enforce lowercase path at DB operation level
|
||||
anonymous_user_path = anonymous_user_path.lower()
|
||||
|
||||
existing_entry = (
|
||||
db_session.query(TenantAnonymousUserPath).filter_by(tenant_id=tenant_id).first()
|
||||
)
|
||||
|
||||
if existing_entry:
|
||||
existing_entry.anonymous_user_path = anonymous_user_path
|
||||
|
||||
else:
|
||||
new_entry = TenantAnonymousUserPath(
|
||||
tenant_id=tenant_id, anonymous_user_path=anonymous_user_path
|
||||
)
|
||||
db_session.add(new_entry)
|
||||
|
||||
db_session.commit()
|
||||
|
||||
|
||||
def get_tenant_id_for_anonymous_user_path(
|
||||
anonymous_user_path: str, db_session: Session
|
||||
) -> str | None:
|
||||
result = db_session.execute(
|
||||
select(TenantAnonymousUserPath).where(
|
||||
TenantAnonymousUserPath.anonymous_user_path == anonymous_user_path
|
||||
)
|
||||
)
|
||||
result_scalar = result.scalar_one_or_none()
|
||||
if result_scalar:
|
||||
return result_scalar.tenant_id
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def validate_anonymous_user_path(path: str) -> None:
|
||||
if not path or "/" in path or not path.replace("-", "").isalnum():
|
||||
raise ValueError("Invalid path. Use only letters, numbers, and hyphens.")
|
||||
@@ -3,124 +3,35 @@ from fastapi import APIRouter
|
||||
from fastapi import Depends
|
||||
from fastapi import HTTPException
|
||||
from fastapi import Response
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from ee.onyx.auth.users import current_cloud_superuser
|
||||
from ee.onyx.auth.users import generate_anonymous_user_jwt_token
|
||||
from ee.onyx.configs.app_configs import ANONYMOUS_USER_COOKIE_NAME
|
||||
from ee.onyx.configs.app_configs import STRIPE_SECRET_KEY
|
||||
from ee.onyx.server.tenants.access import control_plane_dep
|
||||
from ee.onyx.server.tenants.anonymous_user_path import get_anonymous_user_path
|
||||
from ee.onyx.server.tenants.anonymous_user_path import (
|
||||
get_tenant_id_for_anonymous_user_path,
|
||||
)
|
||||
from ee.onyx.server.tenants.anonymous_user_path import modify_anonymous_user_path
|
||||
from ee.onyx.server.tenants.anonymous_user_path import validate_anonymous_user_path
|
||||
from ee.onyx.server.tenants.billing import fetch_billing_information
|
||||
from ee.onyx.server.tenants.billing import fetch_tenant_stripe_information
|
||||
from ee.onyx.server.tenants.models import AnonymousUserPath
|
||||
from ee.onyx.server.tenants.models import BillingInformation
|
||||
from ee.onyx.server.tenants.models import ImpersonateRequest
|
||||
from ee.onyx.server.tenants.models import ProductGatingRequest
|
||||
from ee.onyx.server.tenants.provisioning import delete_user_from_control_plane
|
||||
from ee.onyx.server.tenants.user_mapping import get_tenant_id_for_email
|
||||
from ee.onyx.server.tenants.user_mapping import remove_all_users_from_tenant
|
||||
from ee.onyx.server.tenants.user_mapping import remove_users_from_tenant
|
||||
from onyx.auth.users import anonymous_user_enabled
|
||||
from onyx.auth.users import auth_backend
|
||||
from onyx.auth.users import current_admin_user
|
||||
from onyx.auth.users import get_redis_strategy
|
||||
from onyx.auth.users import optional_user
|
||||
from onyx.auth.users import get_jwt_strategy
|
||||
from onyx.auth.users import User
|
||||
from onyx.configs.app_configs import WEB_DOMAIN
|
||||
from onyx.db.auth import get_user_count
|
||||
from onyx.db.engine import get_current_tenant_id
|
||||
from onyx.db.engine import get_session
|
||||
from onyx.db.engine import get_session_with_tenant
|
||||
from onyx.db.notification import create_notification
|
||||
from onyx.db.users import delete_user_from_db
|
||||
from onyx.db.users import get_user_by_email
|
||||
from onyx.server.manage.models import UserByEmail
|
||||
from onyx.server.settings.store import load_settings
|
||||
from onyx.server.settings.store import store_settings
|
||||
from onyx.utils.logger import setup_logger
|
||||
from shared_configs.contextvars import CURRENT_TENANT_ID_CONTEXTVAR
|
||||
|
||||
stripe.api_key = STRIPE_SECRET_KEY
|
||||
|
||||
logger = setup_logger()
|
||||
router = APIRouter(prefix="/tenants")
|
||||
|
||||
|
||||
@router.get("/anonymous-user-path")
|
||||
async def get_anonymous_user_path_api(
|
||||
tenant_id: str | None = Depends(get_current_tenant_id),
|
||||
_: User | None = Depends(current_admin_user),
|
||||
) -> AnonymousUserPath:
|
||||
if tenant_id is None:
|
||||
raise HTTPException(status_code=404, detail="Tenant not found")
|
||||
|
||||
with get_session_with_tenant(tenant_id=None) as db_session:
|
||||
current_path = get_anonymous_user_path(tenant_id, db_session)
|
||||
|
||||
return AnonymousUserPath(anonymous_user_path=current_path)
|
||||
|
||||
|
||||
@router.post("/anonymous-user-path")
|
||||
async def set_anonymous_user_path_api(
|
||||
anonymous_user_path: str,
|
||||
tenant_id: str = Depends(get_current_tenant_id),
|
||||
_: User | None = Depends(current_admin_user),
|
||||
) -> None:
|
||||
try:
|
||||
validate_anonymous_user_path(anonymous_user_path)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
with get_session_with_tenant(tenant_id=None) as db_session:
|
||||
try:
|
||||
modify_anonymous_user_path(tenant_id, anonymous_user_path, db_session)
|
||||
except IntegrityError:
|
||||
raise HTTPException(
|
||||
status_code=409,
|
||||
detail="The anonymous user path is already in use. Please choose a different path.",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(f"Failed to modify anonymous user path: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="An unexpected error occurred while modifying the anonymous user path",
|
||||
)
|
||||
|
||||
|
||||
@router.post("/anonymous-user")
|
||||
async def login_as_anonymous_user(
|
||||
anonymous_user_path: str,
|
||||
_: User | None = Depends(optional_user),
|
||||
) -> Response:
|
||||
with get_session_with_tenant(tenant_id=None) as db_session:
|
||||
tenant_id = get_tenant_id_for_anonymous_user_path(
|
||||
anonymous_user_path, db_session
|
||||
)
|
||||
if not tenant_id:
|
||||
raise HTTPException(status_code=404, detail="Tenant not found")
|
||||
|
||||
if not anonymous_user_enabled(tenant_id=tenant_id):
|
||||
raise HTTPException(status_code=403, detail="Anonymous user is not enabled")
|
||||
|
||||
token = generate_anonymous_user_jwt_token(tenant_id)
|
||||
|
||||
response = Response()
|
||||
response.set_cookie(
|
||||
key=ANONYMOUS_USER_COOKIE_NAME,
|
||||
value=token,
|
||||
httponly=True,
|
||||
secure=True,
|
||||
samesite="strict",
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
@router.post("/product-gating")
|
||||
def gate_product(
|
||||
product_gating_request: ProductGatingRequest, _: None = Depends(control_plane_dep)
|
||||
@@ -192,7 +103,7 @@ async def impersonate_user(
|
||||
)
|
||||
if user_to_impersonate is None:
|
||||
raise HTTPException(status_code=404, detail="User not found")
|
||||
token = await get_redis_strategy().write_token(user_to_impersonate)
|
||||
token = await get_jwt_strategy().write_token(user_to_impersonate)
|
||||
|
||||
response = await auth_backend.transport.get_login_response(token)
|
||||
response.set_cookie(
|
||||
@@ -203,48 +114,3 @@ async def impersonate_user(
|
||||
samesite="lax",
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
@router.post("/leave-organization")
|
||||
async def leave_organization(
|
||||
user_email: UserByEmail,
|
||||
current_user: User | None = Depends(current_admin_user),
|
||||
db_session: Session = Depends(get_session),
|
||||
tenant_id: str = Depends(get_current_tenant_id),
|
||||
) -> None:
|
||||
if current_user is None or current_user.email != user_email.user_email:
|
||||
raise HTTPException(
|
||||
status_code=403, detail="You can only leave the organization as yourself"
|
||||
)
|
||||
|
||||
user_to_delete = get_user_by_email(user_email.user_email, db_session)
|
||||
if user_to_delete is None:
|
||||
raise HTTPException(status_code=404, detail="User not found")
|
||||
|
||||
num_admin_users = await get_user_count(only_admin_users=True)
|
||||
|
||||
should_delete_tenant = num_admin_users == 1
|
||||
|
||||
if should_delete_tenant:
|
||||
logger.info(
|
||||
"Last admin user is leaving the organization. Deleting tenant from control plane."
|
||||
)
|
||||
try:
|
||||
await delete_user_from_control_plane(tenant_id, user_to_delete.email)
|
||||
logger.debug("User deleted from control plane")
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
f"Failed to delete user from control plane for tenant {tenant_id}: {e}"
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to remove user from control plane: {str(e)}",
|
||||
)
|
||||
|
||||
db_session.expunge(user_to_delete)
|
||||
delete_user_from_db(user_to_delete, db_session)
|
||||
|
||||
if should_delete_tenant:
|
||||
remove_all_users_from_tenant(tenant_id)
|
||||
else:
|
||||
remove_users_from_tenant([user_to_delete.email], tenant_id)
|
||||
|
||||
@@ -46,7 +46,6 @@ def register_tenant_users(tenant_id: str, number_of_users: int) -> stripe.Subscr
|
||||
"""
|
||||
Send a request to the control service to register the number of users for a tenant.
|
||||
"""
|
||||
|
||||
if not STRIPE_PRICE_ID:
|
||||
raise Exception("STRIPE_PRICE_ID is not set")
|
||||
|
||||
|
||||
@@ -39,12 +39,3 @@ class TenantCreationPayload(BaseModel):
|
||||
tenant_id: str
|
||||
email: str
|
||||
referral_source: str | None = None
|
||||
|
||||
|
||||
class TenantDeletionPayload(BaseModel):
|
||||
tenant_id: str
|
||||
email: str
|
||||
|
||||
|
||||
class AnonymousUserPath(BaseModel):
|
||||
anonymous_user_path: str | None
|
||||
|
||||
@@ -15,7 +15,6 @@ from ee.onyx.configs.app_configs import HUBSPOT_TRACKING_URL
|
||||
from ee.onyx.configs.app_configs import OPENAI_DEFAULT_API_KEY
|
||||
from ee.onyx.server.tenants.access import generate_data_plane_token
|
||||
from ee.onyx.server.tenants.models import TenantCreationPayload
|
||||
from ee.onyx.server.tenants.models import TenantDeletionPayload
|
||||
from ee.onyx.server.tenants.schema_management import create_schema_if_not_exists
|
||||
from ee.onyx.server.tenants.schema_management import drop_schema
|
||||
from ee.onyx.server.tenants.schema_management import run_alembic_migrations
|
||||
@@ -186,7 +185,6 @@ async def rollback_tenant_provisioning(tenant_id: str) -> None:
|
||||
try:
|
||||
# Drop the tenant's schema to rollback provisioning
|
||||
drop_schema(tenant_id)
|
||||
|
||||
# Remove tenant mapping
|
||||
with Session(get_sqlalchemy_engine()) as db_session:
|
||||
db_session.query(UserTenantMapping).filter(
|
||||
@@ -322,26 +320,3 @@ async def submit_to_hubspot(
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.error(f"Failed to submit to HubSpot: {response.text}")
|
||||
|
||||
|
||||
async def delete_user_from_control_plane(tenant_id: str, email: str) -> None:
|
||||
token = generate_data_plane_token()
|
||||
headers = {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
payload = TenantDeletionPayload(tenant_id=tenant_id, email=email)
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.delete(
|
||||
f"{CONTROL_PLANE_API_BASE_URL}/tenants/delete",
|
||||
headers=headers,
|
||||
json=payload.model_dump(),
|
||||
) as response:
|
||||
print(response)
|
||||
if response.status != 200:
|
||||
error_text = await response.text()
|
||||
logger.error(f"Control plane tenant creation failed: {error_text}")
|
||||
raise Exception(
|
||||
f"Failed to delete tenant on control plane: {error_text}"
|
||||
)
|
||||
|
||||
@@ -68,11 +68,3 @@ def remove_users_from_tenant(emails: list[str], tenant_id: str) -> None:
|
||||
f"Failed to remove users from tenant {tenant_id}: {str(e)}"
|
||||
)
|
||||
db_session.rollback()
|
||||
|
||||
|
||||
def remove_all_users_from_tenant(tenant_id: str) -> None:
|
||||
with get_session_with_tenant(POSTGRES_DEFAULT_SCHEMA) as db_session:
|
||||
db_session.query(UserTenantMapping).filter(
|
||||
UserTenantMapping.tenant_id == tenant_id
|
||||
).delete()
|
||||
db_session.commit()
|
||||
|
||||
@@ -83,7 +83,7 @@ def patch_user_group(
|
||||
def set_user_curator(
|
||||
user_group_id: int,
|
||||
set_curator_request: SetCuratorRequest,
|
||||
user: User | None = Depends(current_curator_or_admin_user),
|
||||
_: User | None = Depends(current_admin_user),
|
||||
db_session: Session = Depends(get_session),
|
||||
) -> None:
|
||||
try:
|
||||
@@ -91,7 +91,6 @@ def set_user_curator(
|
||||
db_session=db_session,
|
||||
user_group_id=user_group_id,
|
||||
set_curator_request=set_curator_request,
|
||||
user_making_change=user,
|
||||
)
|
||||
except ValueError as e:
|
||||
logger.error(f"Error setting user curator: {e}")
|
||||
|
||||
@@ -10,7 +10,6 @@ logger = setup_logger()
|
||||
|
||||
|
||||
def posthog_on_error(error: Any, items: Any) -> None:
|
||||
"""Log any PostHog delivery errors."""
|
||||
logger.error(f"PostHog error: {error}, items: {items}")
|
||||
|
||||
|
||||
@@ -25,10 +24,15 @@ posthog = Posthog(
|
||||
def event_telemetry(
|
||||
distinct_id: str, event: str, properties: dict | None = None
|
||||
) -> None:
|
||||
"""Capture and send an event to PostHog, flushing immediately."""
|
||||
logger.info(f"Capturing PostHog event: {distinct_id} {event} {properties}")
|
||||
logger.info(f"Capturing Posthog event: {distinct_id} {event} {properties}")
|
||||
print("API KEY", POSTHOG_API_KEY)
|
||||
print("HOST", POSTHOG_HOST)
|
||||
try:
|
||||
posthog.capture(distinct_id, event, properties)
|
||||
print(type(distinct_id))
|
||||
print(type(event))
|
||||
print(type(properties))
|
||||
response = posthog.capture(distinct_id, event, properties)
|
||||
posthog.flush()
|
||||
print(response)
|
||||
except Exception as e:
|
||||
logger.error(f"Error capturing PostHog event: {e}")
|
||||
logger.error(f"Error capturing Posthog event: {e}")
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from types import TracebackType
|
||||
from typing import cast
|
||||
from typing import Optional
|
||||
@@ -321,6 +320,8 @@ async def embed_text(
|
||||
api_url: str | None,
|
||||
api_version: str | None,
|
||||
) -> list[Embedding]:
|
||||
logger.info(f"Embedding {len(texts)} texts with provider: {provider_type}")
|
||||
|
||||
if not all(texts):
|
||||
logger.error("Empty strings provided for embedding")
|
||||
raise ValueError("Empty strings are not allowed for embedding.")
|
||||
@@ -329,17 +330,8 @@ async def embed_text(
|
||||
logger.error("No texts provided for embedding")
|
||||
raise ValueError("No texts provided for embedding.")
|
||||
|
||||
start = time.monotonic()
|
||||
|
||||
total_chars = 0
|
||||
for text in texts:
|
||||
total_chars += len(text)
|
||||
|
||||
if provider_type is not None:
|
||||
logger.info(
|
||||
f"Embedding {len(texts)} texts with {total_chars} total characters with provider: {provider_type}"
|
||||
)
|
||||
|
||||
logger.debug(f"Using cloud provider {provider_type} for embedding")
|
||||
if api_key is None:
|
||||
logger.error("API key not provided for cloud model")
|
||||
raise RuntimeError("API key not provided for cloud model")
|
||||
@@ -371,16 +363,8 @@ async def embed_text(
|
||||
logger.error(error_message)
|
||||
raise ValueError(error_message)
|
||||
|
||||
elapsed = time.monotonic() - start
|
||||
logger.info(
|
||||
f"Successfully embedded {len(texts)} texts with {total_chars} total characters "
|
||||
f"with provider {provider_type} in {elapsed:.2f}"
|
||||
)
|
||||
elif model_name is not None:
|
||||
logger.info(
|
||||
f"Embedding {len(texts)} texts with {total_chars} total characters with local model: {model_name}"
|
||||
)
|
||||
|
||||
logger.debug(f"Using local model {model_name} for embedding")
|
||||
prefixed_texts = [f"{prefix}{text}" for text in texts] if prefix else texts
|
||||
|
||||
local_model = get_embedding_model(
|
||||
@@ -398,17 +382,13 @@ async def embed_text(
|
||||
for embedding in embeddings_vectors
|
||||
]
|
||||
|
||||
elapsed = time.monotonic() - start
|
||||
logger.info(
|
||||
f"Successfully embedded {len(texts)} texts with {total_chars} total characters "
|
||||
f"with local model {model_name} in {elapsed:.2f}"
|
||||
)
|
||||
else:
|
||||
logger.error("Neither model name nor provider specified for embedding")
|
||||
raise ValueError(
|
||||
"Either model name or provider must be provided to run embeddings."
|
||||
)
|
||||
|
||||
logger.info(f"Successfully embedded {len(texts)} texts")
|
||||
return embeddings
|
||||
|
||||
|
||||
@@ -460,8 +440,7 @@ async def process_embed_request(
|
||||
) -> EmbedResponse:
|
||||
if not embed_request.texts:
|
||||
raise HTTPException(status_code=400, detail="No texts to be embedded")
|
||||
|
||||
if not all(embed_request.texts):
|
||||
elif not all(embed_request.texts):
|
||||
raise ValueError("Empty strings are not allowed for embedding.")
|
||||
|
||||
try:
|
||||
@@ -492,12 +471,9 @@ async def process_embed_request(
|
||||
detail=str(e),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
f"Error during embedding process: provider={embed_request.provider_type} model={embed_request.model_name}"
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=500, detail=f"Error during embedding process: {e}"
|
||||
)
|
||||
exception_detail = f"Error during embedding process:\n{str(e)}"
|
||||
logger.exception(exception_detail)
|
||||
raise HTTPException(status_code=500, detail=exception_detail)
|
||||
|
||||
|
||||
@router.post("/cross-encoder-scores")
|
||||
|
||||
@@ -44,7 +44,6 @@ def _move_files_recursively(source: Path, dest: Path, overwrite: bool = False) -
|
||||
the files in the existing huggingface cache that don't exist in the temp
|
||||
huggingface cache.
|
||||
"""
|
||||
|
||||
for item in source.iterdir():
|
||||
target_path = dest / item.relative_to(source)
|
||||
if item.is_dir():
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
from langchain_core.callbacks.manager import dispatch_custom_event
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.basic.states import BasicInput
|
||||
from onyx.agent_search.basic.states import BasicOutput
|
||||
from onyx.agent_search.basic.states import BasicState
|
||||
from onyx.agent_search.basic.states import BasicStateUpdate
|
||||
from onyx.tools.tool_implementations.search.search_tool import SearchTool
|
||||
from onyx.chat.stream_processing.utils import (
|
||||
map_document_id_order,
|
||||
)
|
||||
|
||||
def basic_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=BasicState,
|
||||
input=BasicInput,
|
||||
output=BasicOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
graph.add_node(
|
||||
node="get_response",
|
||||
action=get_response,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_edge(start_key=START, end_key="get_response")
|
||||
|
||||
graph.add_conditional_edges("get_response", should_continue, ["get_response", END])
|
||||
graph.add_edge(
|
||||
start_key="get_response",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
def should_continue(state: BasicState) -> str:
|
||||
return (
|
||||
END if state["last_llm_call"] is None or state["calls"] > 1 else "get_response"
|
||||
)
|
||||
|
||||
|
||||
def get_response(state: BasicState) -> BasicStateUpdate:
|
||||
llm = state["llm"]
|
||||
current_llm_call = state["last_llm_call"]
|
||||
if current_llm_call is None:
|
||||
raise ValueError("last_llm_call is None")
|
||||
answer_style_config = state["answer_style_config"]
|
||||
response_handler_manager = state["response_handler_manager"]
|
||||
# DEBUG: good breakpoint
|
||||
stream = llm.stream(
|
||||
# For tool calling LLMs, we want to insert the task prompt as part of this flow, this is because the LLM
|
||||
# may choose to not call any tools and just generate the answer, in which case the task prompt is needed.
|
||||
prompt=current_llm_call.prompt_builder.build(),
|
||||
tools=[tool.tool_definition() for tool in current_llm_call.tools] or None,
|
||||
tool_choice=(
|
||||
"required"
|
||||
if current_llm_call.tools and current_llm_call.force_use_tool.force_use
|
||||
else None
|
||||
),
|
||||
structured_response_format=answer_style_config.structured_response_format,
|
||||
)
|
||||
|
||||
for response in response_handler_manager.handle_llm_response(stream):
|
||||
dispatch_custom_event(
|
||||
"basic_response",
|
||||
response,
|
||||
)
|
||||
|
||||
|
||||
next_call = response_handler_manager.next_llm_call(current_llm_call)
|
||||
if next_call is not None:
|
||||
final_search_results, displayed_search_results = SearchTool.get_search_result(
|
||||
next_call
|
||||
) or ([], [])
|
||||
else:
|
||||
final_search_results, displayed_search_results = [], []
|
||||
|
||||
response_handler_manager.answer_handler.update((
|
||||
final_search_results,
|
||||
map_document_id_order(final_search_results),
|
||||
map_document_id_order(displayed_search_results)))
|
||||
return BasicStateUpdate(
|
||||
last_llm_call=next_call,
|
||||
calls=state["calls"] + 1,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
@@ -1,42 +0,0 @@
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.chat.llm_response_handler import LLMResponseHandlerManager
|
||||
from onyx.chat.models import AnswerStyleConfig
|
||||
from onyx.chat.prompt_builder.build import LLMCall
|
||||
from onyx.llm.chat_llm import LLM
|
||||
|
||||
## Update States
|
||||
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class BasicInput(TypedDict):
|
||||
base_question: str
|
||||
last_llm_call: LLMCall | None
|
||||
llm: LLM
|
||||
answer_style_config: AnswerStyleConfig
|
||||
response_handler_manager: LLMResponseHandlerManager
|
||||
calls: int
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class BasicOutput(TypedDict):
|
||||
pass
|
||||
|
||||
|
||||
class BasicStateUpdate(TypedDict):
|
||||
last_llm_call: LLMCall | None
|
||||
calls: int
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class BasicState(
|
||||
BasicInput,
|
||||
BasicOutput,
|
||||
):
|
||||
pass
|
||||
@@ -1,18 +0,0 @@
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
|
||||
class CoreState(TypedDict, total=False):
|
||||
"""
|
||||
This is the core state that is shared across all subgraphs.
|
||||
"""
|
||||
|
||||
base_question: str
|
||||
log_messages: Annotated[list[str], add]
|
||||
|
||||
|
||||
class SubgraphCoreState(TypedDict, total=False):
|
||||
"""
|
||||
This is the core state that is shared across all subgraphs.
|
||||
"""
|
||||
@@ -1,66 +0,0 @@
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.db.models import AgentSubQuery
|
||||
from onyx.db.models import AgentSubQuestion
|
||||
|
||||
|
||||
def create_sub_question(
|
||||
db_session: Session,
|
||||
chat_session_id: UUID,
|
||||
primary_message_id: int,
|
||||
sub_question: str,
|
||||
sub_answer: str,
|
||||
) -> AgentSubQuestion:
|
||||
"""Create a new sub-question record in the database."""
|
||||
sub_q = AgentSubQuestion(
|
||||
chat_session_id=chat_session_id,
|
||||
primary_question_id=primary_message_id,
|
||||
sub_question=sub_question,
|
||||
sub_answer=sub_answer,
|
||||
)
|
||||
db_session.add(sub_q)
|
||||
db_session.flush()
|
||||
return sub_q
|
||||
|
||||
|
||||
def create_sub_query(
|
||||
db_session: Session,
|
||||
chat_session_id: UUID,
|
||||
parent_question_id: int,
|
||||
sub_query: str,
|
||||
) -> AgentSubQuery:
|
||||
"""Create a new sub-query record in the database."""
|
||||
sub_q = AgentSubQuery(
|
||||
chat_session_id=chat_session_id,
|
||||
parent_question_id=parent_question_id,
|
||||
sub_query=sub_query,
|
||||
)
|
||||
db_session.add(sub_q)
|
||||
db_session.flush()
|
||||
return sub_q
|
||||
|
||||
|
||||
def get_sub_questions_for_message(
|
||||
db_session: Session,
|
||||
primary_message_id: int,
|
||||
) -> list[AgentSubQuestion]:
|
||||
"""Get all sub-questions for a given primary message."""
|
||||
return (
|
||||
db_session.query(AgentSubQuestion)
|
||||
.filter(AgentSubQuestion.primary_question_id == primary_message_id)
|
||||
.all()
|
||||
)
|
||||
|
||||
|
||||
def get_sub_queries_for_question(
|
||||
db_session: Session,
|
||||
sub_question_id: int,
|
||||
) -> list[AgentSubQuery]:
|
||||
"""Get all sub-queries for a given sub-question."""
|
||||
return (
|
||||
db_session.query(AgentSubQuery)
|
||||
.filter(AgentSubQuery.parent_question_id == sub_question_id)
|
||||
.all()
|
||||
)
|
||||
@@ -1,49 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.context.search.models import SearchRequest
|
||||
from onyx.llm.interfaces import LLM
|
||||
from onyx.llm.models import PreviousMessage
|
||||
from onyx.tools.tool_implementations.search.search_tool import SearchTool
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProSearchConfig:
|
||||
"""
|
||||
Configuration for the Pro Search feature.
|
||||
"""
|
||||
|
||||
# The search request that was used to generate the Pro Search
|
||||
search_request: SearchRequest
|
||||
|
||||
primary_llm: LLM
|
||||
fast_llm: LLM
|
||||
search_tool: SearchTool
|
||||
use_agentic_search: bool = False
|
||||
|
||||
# For persisting agent search data
|
||||
chat_session_id: UUID | None = None
|
||||
|
||||
# The message ID of the user message that triggered the Pro Search
|
||||
message_id: int | None = None
|
||||
|
||||
# Whether to persistence data for the Pro Search (turned off for testing)
|
||||
use_persistence: bool = True
|
||||
|
||||
# The database session for the Pro Search
|
||||
db_session: Session | None = None
|
||||
|
||||
# Whether to allow creation of refinement questions (and entity extraction, etc.)
|
||||
allow_refinement: bool = False
|
||||
|
||||
# Message history for the current chat session
|
||||
message_history: list[PreviousMessage] | None = None
|
||||
|
||||
|
||||
class AgentDocumentCitations(BaseModel):
|
||||
document_id: str
|
||||
document_title: str
|
||||
link: str
|
||||
@@ -1,26 +0,0 @@
|
||||
from collections.abc import Hashable
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def send_to_expanded_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
|
||||
logger.debug("sending to expanded retrieval via edge")
|
||||
|
||||
return Send(
|
||||
"initial_sub_question_expanded_retrieval",
|
||||
ExpandedRetrievalInput(
|
||||
question=state["question"],
|
||||
base_search=False,
|
||||
sub_question_id=state["question_id"],
|
||||
),
|
||||
)
|
||||
@@ -1,125 +0,0 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.edges import (
|
||||
send_to_expanded_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_check import (
|
||||
answer_check,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_generation import (
|
||||
answer_generation,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.format_answer import (
|
||||
format_answer,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.ingest_retrieval import (
|
||||
ingest_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_test_config
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_query_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=AnswerQuestionState,
|
||||
input=AnswerQuestionInput,
|
||||
output=AnswerQuestionOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="initial_sub_question_expanded_retrieval",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="answer_check",
|
||||
action=answer_check,
|
||||
)
|
||||
graph.add_node(
|
||||
node="answer_generation",
|
||||
action=answer_generation,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_answer",
|
||||
action=format_answer,
|
||||
)
|
||||
graph.add_node(
|
||||
node="ingest_retrieval",
|
||||
action=ingest_retrieval,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source=START,
|
||||
path=send_to_expanded_retrieval,
|
||||
path_map=["initial_sub_question_expanded_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="initial_sub_question_expanded_retrieval",
|
||||
end_key="ingest_retrieval",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="ingest_retrieval",
|
||||
end_key="answer_generation",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_generation",
|
||||
end_key="answer_check",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_check",
|
||||
end_key="format_answer",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_answer",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = answer_query_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
with get_session_context_manager() as db_session:
|
||||
pro_search_config, search_tool = get_test_config(
|
||||
db_session, primary_llm, fast_llm, search_request
|
||||
)
|
||||
inputs = AnswerQuestionInput(
|
||||
question="what can you do with onyx?",
|
||||
question_id="0_0",
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
config={"configurable": {"config": pro_search_config}},
|
||||
# debug=True,
|
||||
# subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
@@ -1,8 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
@@ -1,45 +0,0 @@
|
||||
from typing import cast
|
||||
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_core.messages import merge_message_runs
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
|
||||
from onyx.agent_search.models import ProSearchConfig
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
QACheckUpdate,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.prompts import SUB_CHECK_NO
|
||||
from onyx.agent_search.shared_graph_utils.prompts import SUB_CHECK_PROMPT
|
||||
from onyx.agent_search.shared_graph_utils.prompts import UNKNOWN_ANSWER
|
||||
|
||||
|
||||
def answer_check(state: AnswerQuestionState, config: RunnableConfig) -> QACheckUpdate:
|
||||
if state["answer"] == UNKNOWN_ANSWER:
|
||||
return QACheckUpdate(
|
||||
answer_quality=SUB_CHECK_NO,
|
||||
)
|
||||
msg = [
|
||||
HumanMessage(
|
||||
content=SUB_CHECK_PROMPT.format(
|
||||
question=state["question"],
|
||||
base_answer=state["answer"],
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
fast_llm = pro_search_config.fast_llm
|
||||
response = list(
|
||||
fast_llm.stream(
|
||||
prompt=msg,
|
||||
)
|
||||
)
|
||||
|
||||
quality_str = merge_message_runs(response, chunk_separator="")[0].content
|
||||
|
||||
return QACheckUpdate(
|
||||
answer_quality=quality_str,
|
||||
)
|
||||
@@ -1,106 +0,0 @@
|
||||
import datetime
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
|
||||
from langchain_core.callbacks.manager import dispatch_custom_event
|
||||
from langchain_core.messages import merge_message_runs
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
|
||||
from onyx.agent_search.models import ProSearchConfig
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
QAGenerationUpdate,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.agent_prompt_ops import (
|
||||
build_sub_question_answer_prompt,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.prompts import ASSISTANT_SYSTEM_PROMPT_DEFAULT
|
||||
from onyx.agent_search.shared_graph_utils.prompts import ASSISTANT_SYSTEM_PROMPT_PERSONA
|
||||
from onyx.agent_search.shared_graph_utils.prompts import UNKNOWN_ANSWER
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_persona_prompt
|
||||
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
|
||||
from onyx.chat.models import AgentAnswerPiece
|
||||
from onyx.chat.models import StreamStopInfo
|
||||
from onyx.chat.models import StreamStopReason
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_generation(
|
||||
state: AnswerQuestionState, config: RunnableConfig
|
||||
) -> QAGenerationUpdate:
|
||||
now_start = datetime.datetime.now()
|
||||
logger.debug(f"--------{now_start}--------START ANSWER GENERATION---")
|
||||
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
question = state["question"]
|
||||
docs = state["documents"]
|
||||
level, question_nr = parse_question_id(state["question_id"])
|
||||
persona_prompt = get_persona_prompt(pro_search_config.search_request.persona)
|
||||
|
||||
if len(docs) == 0:
|
||||
answer_str = UNKNOWN_ANSWER
|
||||
dispatch_custom_event(
|
||||
"sub_answers",
|
||||
AgentAnswerPiece(
|
||||
answer_piece=answer_str,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
answer_type="agent_sub_answer",
|
||||
),
|
||||
)
|
||||
else:
|
||||
if len(persona_prompt) > 0:
|
||||
persona_specification = ASSISTANT_SYSTEM_PROMPT_DEFAULT
|
||||
else:
|
||||
persona_specification = ASSISTANT_SYSTEM_PROMPT_PERSONA.format(
|
||||
persona_prompt=persona_prompt
|
||||
)
|
||||
|
||||
logger.debug(f"Number of verified retrieval docs: {len(docs)}")
|
||||
|
||||
fast_llm = pro_search_config.fast_llm
|
||||
msg = build_sub_question_answer_prompt(
|
||||
question=question,
|
||||
original_question=pro_search_config.search_request.query,
|
||||
docs=docs,
|
||||
persona_specification=persona_specification,
|
||||
config=fast_llm.config,
|
||||
)
|
||||
|
||||
response: list[str | list[str | dict[str, Any]]] = []
|
||||
for message in fast_llm.stream(
|
||||
prompt=msg,
|
||||
):
|
||||
# TODO: in principle, the answer here COULD contain images, but we don't support that yet
|
||||
content = message.content
|
||||
if not isinstance(content, str):
|
||||
raise ValueError(
|
||||
f"Expected content to be a string, but got {type(content)}"
|
||||
)
|
||||
dispatch_custom_event(
|
||||
"sub_answers",
|
||||
AgentAnswerPiece(
|
||||
answer_piece=content,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
answer_type="agent_sub_answer",
|
||||
),
|
||||
)
|
||||
response.append(content)
|
||||
|
||||
answer_str = merge_message_runs(response, chunk_separator="")[0].content
|
||||
|
||||
stop_event = StreamStopInfo(
|
||||
stop_reason=StreamStopReason.FINISHED,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
)
|
||||
dispatch_custom_event("sub_answer_finished", stop_event)
|
||||
|
||||
return QAGenerationUpdate(
|
||||
answer=answer_str,
|
||||
)
|
||||
@@ -1,25 +0,0 @@
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
|
||||
|
||||
def format_answer(state: AnswerQuestionState) -> AnswerQuestionOutput:
|
||||
return AnswerQuestionOutput(
|
||||
answer_results=[
|
||||
QuestionAnswerResults(
|
||||
question=state["question"],
|
||||
question_id=state["question_id"],
|
||||
quality=state.get("answer_quality", "No"),
|
||||
answer=state["answer"],
|
||||
expanded_retrieval_results=state["expanded_retrieval_results"],
|
||||
documents=state["documents"],
|
||||
sub_question_retrieval_stats=state["sub_question_retrieval_stats"],
|
||||
)
|
||||
],
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
RetrievalIngestionUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
|
||||
|
||||
def ingest_retrieval(state: ExpandedRetrievalOutput) -> RetrievalIngestionUpdate:
|
||||
sub_question_retrieval_stats = state[
|
||||
"expanded_retrieval_result"
|
||||
].sub_question_retrieval_stats
|
||||
if sub_question_retrieval_stats is None:
|
||||
sub_question_retrieval_stats = [AgentChunkStats()]
|
||||
|
||||
return RetrievalIngestionUpdate(
|
||||
expanded_retrieval_results=state[
|
||||
"expanded_retrieval_result"
|
||||
].expanded_queries_results,
|
||||
documents=state["expanded_retrieval_result"].all_documents,
|
||||
sub_question_retrieval_stats=sub_question_retrieval_stats,
|
||||
)
|
||||
@@ -1,63 +0,0 @@
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import SubgraphCoreState
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
## Update States
|
||||
class QACheckUpdate(TypedDict):
|
||||
answer_quality: str
|
||||
|
||||
|
||||
class QAGenerationUpdate(TypedDict):
|
||||
answer: str
|
||||
# answer_stat: AnswerStats
|
||||
|
||||
|
||||
class RetrievalIngestionUpdate(TypedDict):
|
||||
expanded_retrieval_results: list[QueryResult]
|
||||
documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class AnswerQuestionInput(SubgraphCoreState):
|
||||
question: str
|
||||
question_id: str # 0_0 is original question, everything else is <level>_<question_num>.
|
||||
# level 0 is original question and first decomposition, level 1 is follow up, etc
|
||||
# question_num is a unique number per original question per level.
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class AnswerQuestionState(
|
||||
AnswerQuestionInput,
|
||||
QAGenerationUpdate,
|
||||
QACheckUpdate,
|
||||
RetrievalIngestionUpdate,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class AnswerQuestionOutput(TypedDict):
|
||||
"""
|
||||
This is a list of results even though each call of this subgraph only returns one result.
|
||||
This is because if we parallelize the answer query subgraph, there will be multiple
|
||||
results in a list so the add operator is used to add them together.
|
||||
"""
|
||||
|
||||
answer_results: Annotated[list[QuestionAnswerResults], add]
|
||||
@@ -1,26 +0,0 @@
|
||||
from collections.abc import Hashable
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def send_to_expanded_refined_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
|
||||
logger.debug("sending to expanded retrieval for follow up question via edge")
|
||||
|
||||
return Send(
|
||||
"refined_sub_question_expanded_retrieval",
|
||||
ExpandedRetrievalInput(
|
||||
question=state["question"],
|
||||
sub_question_id=state["question_id"],
|
||||
base_search=False,
|
||||
),
|
||||
)
|
||||
@@ -1,122 +0,0 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_check import (
|
||||
answer_check,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_generation import (
|
||||
answer_generation,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.format_answer import (
|
||||
format_answer,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.ingest_retrieval import (
|
||||
ingest_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_refinement_sub_question.edges import (
|
||||
send_to_expanded_refined_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_refined_query_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=AnswerQuestionState,
|
||||
input=AnswerQuestionInput,
|
||||
output=AnswerQuestionOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="refined_sub_question_expanded_retrieval",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="refined_sub_answer_check",
|
||||
action=answer_check,
|
||||
)
|
||||
graph.add_node(
|
||||
node="refined_sub_answer_generation",
|
||||
action=answer_generation,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_refined_sub_answer",
|
||||
action=format_answer,
|
||||
)
|
||||
graph.add_node(
|
||||
node="ingest_refined_retrieval",
|
||||
action=ingest_retrieval,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source=START,
|
||||
path=send_to_expanded_refined_retrieval,
|
||||
path_map=["refined_sub_question_expanded_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="refined_sub_question_expanded_retrieval",
|
||||
end_key="ingest_refined_retrieval",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="ingest_refined_retrieval",
|
||||
end_key="refined_sub_answer_generation",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="refined_sub_answer_generation",
|
||||
end_key="refined_sub_answer_check",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="refined_sub_answer_check",
|
||||
end_key="format_refined_sub_answer",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_refined_sub_answer",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = answer_refined_query_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
with get_session_context_manager() as db_session:
|
||||
inputs = AnswerQuestionInput(
|
||||
question="what can you do with onyx?",
|
||||
question_id="0_0",
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
# debug=True,
|
||||
# subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
# output = compiled_graph.invoke(inputs)
|
||||
# logger.debug(output)
|
||||
@@ -1,19 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
|
||||
|
||||
class QuestionAnswerResults(BaseModel):
|
||||
question: str
|
||||
answer: str
|
||||
quality: str
|
||||
# expanded_retrieval_results: list[QueryResult]
|
||||
documents: list[InferenceSection]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
@@ -1,70 +0,0 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.nodes.format_raw_search_results import (
|
||||
format_raw_search_results,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.nodes.generate_raw_search_data import (
|
||||
generate_raw_search_data,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchInput
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchOutput
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchState
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
|
||||
|
||||
def base_raw_search_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=BaseRawSearchState,
|
||||
input=BaseRawSearchInput,
|
||||
output=BaseRawSearchOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
graph.add_node(
|
||||
node="generate_raw_search_data",
|
||||
action=generate_raw_search_data,
|
||||
)
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="expanded_retrieval_base_search",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_raw_search_results",
|
||||
action=format_raw_search_results,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_edge(start_key=START, end_key="generate_raw_search_data")
|
||||
|
||||
graph.add_edge(
|
||||
start_key="generate_raw_search_data",
|
||||
end_key="expanded_retrieval_base_search",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="expanded_retrieval_base_search",
|
||||
end_key="format_raw_search_results",
|
||||
)
|
||||
|
||||
# graph.add_edge(
|
||||
# start_key="expanded_retrieval_base_search",
|
||||
# end_key=END,
|
||||
# )
|
||||
|
||||
graph.add_edge(
|
||||
start_key="format_raw_search_results",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
@@ -1,20 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
|
||||
|
||||
class QuestionAnswerResults(BaseModel):
|
||||
question: str
|
||||
answer: str
|
||||
quality: str
|
||||
expanded_retrieval_results: list[QueryResult]
|
||||
documents: list[InferenceSection]
|
||||
sub_question_retrieval_stats: list[AgentChunkStats]
|
||||
@@ -1,16 +0,0 @@
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchOutput
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def format_raw_search_results(state: ExpandedRetrievalOutput) -> BaseRawSearchOutput:
|
||||
logger.debug("format_raw_search_results")
|
||||
return BaseRawSearchOutput(
|
||||
base_expanded_retrieval_result=state["expanded_retrieval_result"],
|
||||
# base_retrieval_results=[state["expanded_retrieval_result"]],
|
||||
# base_search_documents=[],
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
from typing import cast
|
||||
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
|
||||
from onyx.agent_search.core_state import CoreState
|
||||
from onyx.agent_search.models import ProSearchConfig
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def generate_raw_search_data(
|
||||
state: CoreState, config: RunnableConfig
|
||||
) -> ExpandedRetrievalInput:
|
||||
logger.debug("generate_raw_search_data")
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
return ExpandedRetrievalInput(
|
||||
question=pro_search_config.search_request.query,
|
||||
base_search=True,
|
||||
sub_question_id=None, # This graph is always and only used for the original question
|
||||
)
|
||||
@@ -1,45 +0,0 @@
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import CoreState
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
|
||||
|
||||
## Update States
|
||||
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class BaseRawSearchInput(CoreState):
|
||||
pass
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class BaseRawSearchOutput(TypedDict):
|
||||
"""
|
||||
This is a list of results even though each call of this subgraph only returns one result.
|
||||
This is because if we parallelize the answer query subgraph, there will be multiple
|
||||
results in a list so the add operator is used to add them together.
|
||||
"""
|
||||
|
||||
# base_search_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
# base_retrieval_results: Annotated[list[ExpandedRetrievalResult], add]
|
||||
base_expanded_retrieval_result: ExpandedRetrievalResult
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class BaseRawSearchState(
|
||||
BaseRawSearchInput,
|
||||
ExpandedRetrievalInput,
|
||||
BaseRawSearchOutput,
|
||||
):
|
||||
pass
|
||||
@@ -1,32 +0,0 @@
|
||||
from collections.abc import Hashable
|
||||
from typing import cast
|
||||
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.models import ProSearchConfig
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import RetrievalInput
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalState,
|
||||
)
|
||||
|
||||
|
||||
def parallel_retrieval_edge(
|
||||
state: ExpandedRetrievalState, config: RunnableConfig
|
||||
) -> list[Send | Hashable]:
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
question = state.get("question", pro_search_config.search_request.query)
|
||||
|
||||
query_expansions = state.get("expanded_queries", []) + [question]
|
||||
return [
|
||||
Send(
|
||||
"doc_retrieval",
|
||||
RetrievalInput(
|
||||
query_to_retrieve=query,
|
||||
question=question,
|
||||
base_search=False,
|
||||
sub_question_id=state.get("sub_question_id"),
|
||||
),
|
||||
)
|
||||
for query in query_expansions
|
||||
]
|
||||
@@ -1,122 +0,0 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.edges import (
|
||||
parallel_retrieval_edge,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import doc_reranking
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import doc_retrieval
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import doc_verification
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import expand_queries
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import format_results
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import verification_kickoff
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalState,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_test_config
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def expanded_retrieval_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=ExpandedRetrievalState,
|
||||
input=ExpandedRetrievalInput,
|
||||
output=ExpandedRetrievalOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
graph.add_node(
|
||||
node="expand_queries",
|
||||
action=expand_queries,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="doc_retrieval",
|
||||
action=doc_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="verification_kickoff",
|
||||
action=verification_kickoff,
|
||||
)
|
||||
graph.add_node(
|
||||
node="doc_verification",
|
||||
action=doc_verification,
|
||||
)
|
||||
graph.add_node(
|
||||
node="doc_reranking",
|
||||
action=doc_reranking,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_results",
|
||||
action=format_results,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
graph.add_edge(
|
||||
start_key=START,
|
||||
end_key="expand_queries",
|
||||
)
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source="expand_queries",
|
||||
path=parallel_retrieval_edge,
|
||||
path_map=["doc_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="doc_retrieval",
|
||||
end_key="verification_kickoff",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="doc_verification",
|
||||
end_key="doc_reranking",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="doc_reranking",
|
||||
end_key="format_results",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_results",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = expanded_retrieval_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
|
||||
with get_session_context_manager() as db_session:
|
||||
pro_search_config, search_tool = get_test_config(
|
||||
db_session, primary_llm, fast_llm, search_request
|
||||
)
|
||||
inputs = ExpandedRetrievalInput(
|
||||
question="what can you do with onyx?",
|
||||
base_search=False,
|
||||
sub_question_id=None,
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
config={"configurable": {"config": pro_search_config}},
|
||||
# debug=True,
|
||||
subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
@@ -1,11 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
class ExpandedRetrievalResult(BaseModel):
|
||||
expanded_queries_results: list[QueryResult]
|
||||
all_documents: list[InferenceSection]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
@@ -1,431 +0,0 @@
|
||||
from collections import defaultdict
|
||||
from collections.abc import Callable
|
||||
from typing import cast
|
||||
from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
from langchain_core.callbacks.manager import dispatch_custom_event
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_core.messages import merge_message_runs
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
from langgraph.types import Command
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.models import ProSearchConfig
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import DocRerankingUpdate
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import DocRetrievalUpdate
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
DocVerificationInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
DocVerificationUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import InferenceSection
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
QueryExpansionUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import RetrievalInput
|
||||
from onyx.agent_search.shared_graph_utils.agent_prompt_ops import trim_prompt_piece
|
||||
from onyx.agent_search.shared_graph_utils.calculations import get_fit_scores
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import RetrievalFitStats
|
||||
from onyx.agent_search.shared_graph_utils.prompts import REWRITE_PROMPT_MULTI_ORIGINAL
|
||||
from onyx.agent_search.shared_graph_utils.prompts import VERIFIER_PROMPT
|
||||
from onyx.agent_search.shared_graph_utils.utils import dispatch_separated
|
||||
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
|
||||
from onyx.chat.models import ExtendedToolResponse
|
||||
from onyx.chat.models import SubQueryPiece
|
||||
from onyx.configs.dev_configs import AGENT_MAX_QUERY_RETRIEVAL_RESULTS
|
||||
from onyx.configs.dev_configs import AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS
|
||||
from onyx.configs.dev_configs import AGENT_RERANKING_STATS
|
||||
from onyx.configs.dev_configs import AGENT_RETRIEVAL_STATS
|
||||
from onyx.context.search.models import SearchRequest
|
||||
from onyx.context.search.pipeline import retrieval_preprocessing
|
||||
from onyx.context.search.postprocessing.postprocessing import rerank_sections
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.tools.models import SearchQueryInfo
|
||||
from onyx.tools.tool_implementations.search.search_tool import (
|
||||
SEARCH_RESPONSE_SUMMARY_ID,
|
||||
)
|
||||
from onyx.tools.tool_implementations.search.search_tool import SearchResponseSummary
|
||||
from onyx.tools.tool_implementations.search.search_tool import yield_search_responses
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def dispatch_subquery(level: int, question_nr: int) -> Callable[[str, int], None]:
|
||||
def helper(token: str, num: int) -> None:
|
||||
dispatch_custom_event(
|
||||
"subqueries",
|
||||
SubQueryPiece(
|
||||
sub_query=token,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
query_id=num,
|
||||
),
|
||||
)
|
||||
|
||||
return helper
|
||||
|
||||
|
||||
def expand_queries(
|
||||
state: ExpandedRetrievalInput, config: RunnableConfig
|
||||
) -> QueryExpansionUpdate:
|
||||
# Sometimes we want to expand the original question, sometimes we want to expand a sub-question.
|
||||
# When we are running this node on the original question, no question is explictly passed in.
|
||||
# Instead, we use the original question from the search request.
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
question = state.get("question", pro_search_config.search_request.query)
|
||||
llm = pro_search_config.fast_llm
|
||||
chat_session_id = pro_search_config.chat_session_id
|
||||
sub_question_id = state.get("sub_question_id")
|
||||
if sub_question_id is None:
|
||||
level, question_nr = 0, 0
|
||||
else:
|
||||
level, question_nr = parse_question_id(sub_question_id)
|
||||
|
||||
if chat_session_id is None:
|
||||
raise ValueError("chat_session_id must be provided for agent search")
|
||||
|
||||
msg = [
|
||||
HumanMessage(
|
||||
content=REWRITE_PROMPT_MULTI_ORIGINAL.format(question=question),
|
||||
)
|
||||
]
|
||||
|
||||
llm_response_list = dispatch_separated(
|
||||
llm.stream(prompt=msg), dispatch_subquery(level, question_nr)
|
||||
)
|
||||
|
||||
llm_response = merge_message_runs(llm_response_list, chunk_separator="")[0].content
|
||||
|
||||
rewritten_queries = llm_response.split("\n")
|
||||
|
||||
return QueryExpansionUpdate(
|
||||
expanded_queries=rewritten_queries,
|
||||
)
|
||||
|
||||
|
||||
def doc_retrieval(state: RetrievalInput, config: RunnableConfig) -> DocRetrievalUpdate:
|
||||
"""
|
||||
Retrieve documents
|
||||
|
||||
Args:
|
||||
state (RetrievalInput): Primary state + the query to retrieve
|
||||
config (RunnableConfig): Configuration containing ProSearchConfig
|
||||
|
||||
Updates:
|
||||
expanded_retrieval_results: list[ExpandedRetrievalResult]
|
||||
retrieved_documents: list[InferenceSection]
|
||||
"""
|
||||
query_to_retrieve = state["query_to_retrieve"]
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
search_tool = pro_search_config.search_tool
|
||||
|
||||
retrieved_docs: list[InferenceSection] = []
|
||||
if not query_to_retrieve.strip():
|
||||
logger.warning("Empty query, skipping retrieval")
|
||||
return DocRetrievalUpdate(
|
||||
expanded_retrieval_results=[],
|
||||
retrieved_documents=[],
|
||||
)
|
||||
|
||||
query_info = None
|
||||
# new db session to avoid concurrency issues
|
||||
with get_session_context_manager() as db_session:
|
||||
for tool_response in search_tool.run(
|
||||
query=query_to_retrieve,
|
||||
force_no_rerank=True,
|
||||
alternate_db_session=db_session,
|
||||
):
|
||||
# get retrieved docs to send to the rest of the graph
|
||||
if tool_response.id == SEARCH_RESPONSE_SUMMARY_ID:
|
||||
response = cast(SearchResponseSummary, tool_response.response)
|
||||
retrieved_docs = response.top_sections
|
||||
query_info = SearchQueryInfo(
|
||||
predicted_search=response.predicted_search,
|
||||
final_filters=response.final_filters,
|
||||
recency_bias_multiplier=response.recency_bias_multiplier,
|
||||
)
|
||||
break
|
||||
|
||||
retrieved_docs = retrieved_docs[:AGENT_MAX_QUERY_RETRIEVAL_RESULTS]
|
||||
pre_rerank_docs = retrieved_docs
|
||||
if search_tool.search_pipeline is not None:
|
||||
pre_rerank_docs = (
|
||||
search_tool.search_pipeline._retrieved_sections or retrieved_docs
|
||||
)
|
||||
|
||||
if AGENT_RETRIEVAL_STATS:
|
||||
fit_scores = get_fit_scores(
|
||||
pre_rerank_docs,
|
||||
retrieved_docs,
|
||||
)
|
||||
else:
|
||||
fit_scores = None
|
||||
|
||||
expanded_retrieval_result = QueryResult(
|
||||
query=query_to_retrieve,
|
||||
search_results=retrieved_docs,
|
||||
stats=fit_scores,
|
||||
query_info=query_info,
|
||||
)
|
||||
return DocRetrievalUpdate(
|
||||
expanded_retrieval_results=[expanded_retrieval_result],
|
||||
retrieved_documents=retrieved_docs,
|
||||
)
|
||||
|
||||
|
||||
def verification_kickoff(
|
||||
state: ExpandedRetrievalState,
|
||||
config: RunnableConfig,
|
||||
) -> Command[Literal["doc_verification"]]:
|
||||
documents = state["retrieved_documents"]
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
verification_question = state.get(
|
||||
"question", pro_search_config.search_request.query
|
||||
)
|
||||
sub_question_id = state.get("sub_question_id")
|
||||
return Command(
|
||||
update={},
|
||||
goto=[
|
||||
Send(
|
||||
node="doc_verification",
|
||||
arg=DocVerificationInput(
|
||||
doc_to_verify=doc,
|
||||
question=verification_question,
|
||||
base_search=False,
|
||||
sub_question_id=sub_question_id,
|
||||
),
|
||||
)
|
||||
for doc in documents
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def doc_verification(
|
||||
state: DocVerificationInput, config: RunnableConfig
|
||||
) -> DocVerificationUpdate:
|
||||
"""
|
||||
Check whether the document is relevant for the original user question
|
||||
|
||||
Args:
|
||||
state (DocVerificationInput): The current state
|
||||
config (RunnableConfig): Configuration containing ProSearchConfig
|
||||
|
||||
Updates:
|
||||
verified_documents: list[InferenceSection]
|
||||
"""
|
||||
|
||||
question = state["question"]
|
||||
doc_to_verify = state["doc_to_verify"]
|
||||
document_content = doc_to_verify.combined_content
|
||||
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
fast_llm = pro_search_config.fast_llm
|
||||
|
||||
document_content = trim_prompt_piece(
|
||||
fast_llm.config, document_content, VERIFIER_PROMPT + question
|
||||
)
|
||||
|
||||
msg = [
|
||||
HumanMessage(
|
||||
content=VERIFIER_PROMPT.format(
|
||||
question=question, document_content=document_content
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
response = fast_llm.invoke(msg)
|
||||
|
||||
verified_documents = []
|
||||
if isinstance(response.content, str) and "yes" in response.content.lower():
|
||||
verified_documents.append(doc_to_verify)
|
||||
|
||||
return DocVerificationUpdate(
|
||||
verified_documents=verified_documents,
|
||||
)
|
||||
|
||||
|
||||
def doc_reranking(
|
||||
state: ExpandedRetrievalState, config: RunnableConfig
|
||||
) -> DocRerankingUpdate:
|
||||
verified_documents = state["verified_documents"]
|
||||
|
||||
# Rerank post retrieval and verification. First, create a search query
|
||||
# then create the list of reranked sections
|
||||
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
question = state.get("question", pro_search_config.search_request.query)
|
||||
with get_session_context_manager() as db_session:
|
||||
_search_query = retrieval_preprocessing(
|
||||
search_request=SearchRequest(query=question),
|
||||
user=pro_search_config.search_tool.user, # bit of a hack
|
||||
llm=pro_search_config.fast_llm,
|
||||
db_session=db_session,
|
||||
)
|
||||
|
||||
# skip section filtering
|
||||
|
||||
if (
|
||||
_search_query.rerank_settings
|
||||
and _search_query.rerank_settings.rerank_model_name
|
||||
and _search_query.rerank_settings.num_rerank > 0
|
||||
):
|
||||
reranked_documents = rerank_sections(
|
||||
_search_query,
|
||||
verified_documents,
|
||||
)
|
||||
else:
|
||||
logger.warning("No reranking settings found, using unranked documents")
|
||||
reranked_documents = verified_documents
|
||||
|
||||
if AGENT_RERANKING_STATS:
|
||||
fit_scores = get_fit_scores(verified_documents, reranked_documents)
|
||||
else:
|
||||
fit_scores = RetrievalFitStats(fit_score_lift=0, rerank_effect=0, fit_scores={})
|
||||
|
||||
# TODO: stream deduped docs here, or decide to use search tool ranking/verification
|
||||
|
||||
return DocRerankingUpdate(
|
||||
reranked_documents=[
|
||||
doc for doc in reranked_documents if type(doc) == InferenceSection
|
||||
][:AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS],
|
||||
sub_question_retrieval_stats=fit_scores,
|
||||
)
|
||||
|
||||
|
||||
def _calculate_sub_question_retrieval_stats(
|
||||
verified_documents: list[InferenceSection],
|
||||
expanded_retrieval_results: list[QueryResult],
|
||||
) -> AgentChunkStats:
|
||||
chunk_scores: dict[str, dict[str, list[int | float]]] = defaultdict(
|
||||
lambda: defaultdict(list)
|
||||
)
|
||||
|
||||
for expanded_retrieval_result in expanded_retrieval_results:
|
||||
for doc in expanded_retrieval_result.search_results:
|
||||
doc_chunk_id = f"{doc.center_chunk.document_id}_{doc.center_chunk.chunk_id}"
|
||||
if doc.center_chunk.score is not None:
|
||||
chunk_scores[doc_chunk_id]["score"].append(doc.center_chunk.score)
|
||||
|
||||
verified_doc_chunk_ids = [
|
||||
f"{verified_document.center_chunk.document_id}_{verified_document.center_chunk.chunk_id}"
|
||||
for verified_document in verified_documents
|
||||
]
|
||||
dismissed_doc_chunk_ids = []
|
||||
|
||||
raw_chunk_stats_counts: dict[str, int] = defaultdict(int)
|
||||
raw_chunk_stats_scores: dict[str, float] = defaultdict(float)
|
||||
for doc_chunk_id, chunk_data in chunk_scores.items():
|
||||
if doc_chunk_id in verified_doc_chunk_ids:
|
||||
raw_chunk_stats_counts["verified_count"] += 1
|
||||
|
||||
valid_chunk_scores = [
|
||||
score for score in chunk_data["score"] if score is not None
|
||||
]
|
||||
raw_chunk_stats_scores["verified_scores"] += float(
|
||||
np.mean(valid_chunk_scores)
|
||||
)
|
||||
else:
|
||||
raw_chunk_stats_counts["rejected_count"] += 1
|
||||
valid_chunk_scores = [
|
||||
score for score in chunk_data["score"] if score is not None
|
||||
]
|
||||
raw_chunk_stats_scores["rejected_scores"] += float(
|
||||
np.mean(valid_chunk_scores)
|
||||
)
|
||||
dismissed_doc_chunk_ids.append(doc_chunk_id)
|
||||
|
||||
if raw_chunk_stats_counts["verified_count"] == 0:
|
||||
verified_avg_scores = 0.0
|
||||
else:
|
||||
verified_avg_scores = raw_chunk_stats_scores["verified_scores"] / float(
|
||||
raw_chunk_stats_counts["verified_count"]
|
||||
)
|
||||
|
||||
rejected_scores = raw_chunk_stats_scores.get("rejected_scores", None)
|
||||
if rejected_scores is not None:
|
||||
rejected_avg_scores = rejected_scores / float(
|
||||
raw_chunk_stats_counts["rejected_count"]
|
||||
)
|
||||
else:
|
||||
rejected_avg_scores = None
|
||||
|
||||
chunk_stats = AgentChunkStats(
|
||||
verified_count=raw_chunk_stats_counts["verified_count"],
|
||||
verified_avg_scores=verified_avg_scores,
|
||||
rejected_count=raw_chunk_stats_counts["rejected_count"],
|
||||
rejected_avg_scores=rejected_avg_scores,
|
||||
verified_doc_chunk_ids=verified_doc_chunk_ids,
|
||||
dismissed_doc_chunk_ids=dismissed_doc_chunk_ids,
|
||||
)
|
||||
|
||||
return chunk_stats
|
||||
|
||||
|
||||
def format_results(
|
||||
state: ExpandedRetrievalState, config: RunnableConfig
|
||||
) -> ExpandedRetrievalUpdate:
|
||||
level, question_nr = parse_question_id(state.get("sub_question_id") or "0_0")
|
||||
query_infos = [
|
||||
result.query_info
|
||||
for result in state["expanded_retrieval_results"]
|
||||
if result.query_info is not None
|
||||
]
|
||||
if len(query_infos) == 0:
|
||||
raise ValueError("No query info found")
|
||||
|
||||
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
|
||||
# main question docs will be sent later after aggregation and deduping with sub-question docs
|
||||
if not (level == 0 and question_nr == 0):
|
||||
for tool_response in yield_search_responses(
|
||||
query=state["question"],
|
||||
reranked_sections=state[
|
||||
"retrieved_documents"
|
||||
], # TODO: rename params. this one is supposed to be the sections pre-merging
|
||||
final_context_sections=state["reranked_documents"],
|
||||
search_query_info=query_infos[0], # TODO: handle differing query infos?
|
||||
get_section_relevance=lambda: None, # TODO: add relevance
|
||||
search_tool=pro_search_config.search_tool,
|
||||
):
|
||||
dispatch_custom_event(
|
||||
"tool_response",
|
||||
ExtendedToolResponse(
|
||||
id=tool_response.id,
|
||||
response=tool_response.response,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
),
|
||||
)
|
||||
sub_question_retrieval_stats = _calculate_sub_question_retrieval_stats(
|
||||
verified_documents=state["verified_documents"],
|
||||
expanded_retrieval_results=state["expanded_retrieval_results"],
|
||||
)
|
||||
|
||||
if sub_question_retrieval_stats is None:
|
||||
sub_question_retrieval_stats = AgentChunkStats()
|
||||
# else:
|
||||
# sub_question_retrieval_stats = [sub_question_retrieval_stats]
|
||||
|
||||
return ExpandedRetrievalUpdate(
|
||||
expanded_retrieval_result=ExpandedRetrievalResult(
|
||||
expanded_queries_results=state["expanded_retrieval_results"],
|
||||
all_documents=state["reranked_documents"],
|
||||
sub_question_retrieval_stats=sub_question_retrieval_stats,
|
||||
),
|
||||
)
|
||||
@@ -1,82 +0,0 @@
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import SubgraphCoreState
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import RetrievalFitStats
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
### States ###
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class ExpandedRetrievalInput(SubgraphCoreState):
|
||||
question: str
|
||||
base_search: bool
|
||||
sub_question_id: str | None
|
||||
|
||||
|
||||
## Update/Return States
|
||||
|
||||
|
||||
class QueryExpansionUpdate(TypedDict):
|
||||
expanded_queries: list[str]
|
||||
|
||||
|
||||
class DocVerificationUpdate(TypedDict):
|
||||
verified_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
|
||||
|
||||
class DocRetrievalUpdate(TypedDict):
|
||||
expanded_retrieval_results: Annotated[list[QueryResult], add]
|
||||
retrieved_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
|
||||
|
||||
class DocRerankingUpdate(TypedDict):
|
||||
reranked_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
sub_question_retrieval_stats: RetrievalFitStats | None
|
||||
|
||||
|
||||
class ExpandedRetrievalUpdate(TypedDict):
|
||||
expanded_retrieval_result: ExpandedRetrievalResult
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class ExpandedRetrievalOutput(TypedDict):
|
||||
expanded_retrieval_result: ExpandedRetrievalResult
|
||||
base_expanded_retrieval_result: ExpandedRetrievalResult
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class ExpandedRetrievalState(
|
||||
# This includes the core state
|
||||
ExpandedRetrievalInput,
|
||||
QueryExpansionUpdate,
|
||||
DocRetrievalUpdate,
|
||||
DocVerificationUpdate,
|
||||
DocRerankingUpdate,
|
||||
ExpandedRetrievalOutput,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
## Conditional Input States
|
||||
|
||||
|
||||
class DocVerificationInput(ExpandedRetrievalInput):
|
||||
doc_to_verify: InferenceSection
|
||||
|
||||
|
||||
class RetrievalInput(ExpandedRetrievalInput):
|
||||
query_to_retrieve: str
|
||||
@@ -1,89 +0,0 @@
|
||||
from collections.abc import Hashable
|
||||
from typing import Literal
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.states import MainState
|
||||
from onyx.agent_search.pro_search_a.main.states import RequireRefinedAnswerUpdate
|
||||
from onyx.agent_search.shared_graph_utils.utils import make_question_id
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def parallelize_initial_sub_question_answering(
|
||||
state: MainState,
|
||||
) -> list[Send | Hashable]:
|
||||
if len(state["initial_decomp_questions"]) > 0:
|
||||
# sub_question_record_ids = [subq_record.id for subq_record in state["sub_question_records"]]
|
||||
# if len(state["sub_question_records"]) == 0:
|
||||
# if state["config"].use_persistence:
|
||||
# raise ValueError("No sub-questions found for initial decompozed questions")
|
||||
# else:
|
||||
# # in this case, we are doing retrieval on the original question.
|
||||
# # to make all the logic consistent, we create a new sub-question
|
||||
# # with the same content as the original question
|
||||
# sub_question_record_ids = [1] * len(state["initial_decomp_questions"])
|
||||
|
||||
return [
|
||||
Send(
|
||||
"answer_query_subgraph",
|
||||
AnswerQuestionInput(
|
||||
question=question,
|
||||
question_id=make_question_id(0, question_nr + 1),
|
||||
),
|
||||
)
|
||||
for question_nr, question in enumerate(state["initial_decomp_questions"])
|
||||
]
|
||||
|
||||
else:
|
||||
return [
|
||||
Send(
|
||||
"ingest_answers",
|
||||
AnswerQuestionOutput(
|
||||
answer_results=[],
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
# Define the function that determines whether to continue or not
|
||||
def continue_to_refined_answer_or_end(
|
||||
state: RequireRefinedAnswerUpdate,
|
||||
) -> Literal["refined_sub_question_creation", "logging_node"]:
|
||||
if state["require_refined_answer"]:
|
||||
return "refined_sub_question_creation"
|
||||
else:
|
||||
return "logging_node"
|
||||
|
||||
|
||||
def parallelize_refined_sub_question_answering(
|
||||
state: MainState,
|
||||
) -> list[Send | Hashable]:
|
||||
if len(state["refined_sub_questions"]) > 0:
|
||||
return [
|
||||
Send(
|
||||
"answer_refined_question",
|
||||
AnswerQuestionInput(
|
||||
question=question_data.sub_question,
|
||||
question_id=make_question_id(1, question_nr),
|
||||
),
|
||||
)
|
||||
for question_nr, question_data in state["refined_sub_questions"].items()
|
||||
]
|
||||
|
||||
else:
|
||||
return [
|
||||
Send(
|
||||
"ingest_refined_sub_answers",
|
||||
AnswerQuestionOutput(
|
||||
answer_results=[],
|
||||
),
|
||||
)
|
||||
]
|
||||
@@ -1,259 +0,0 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.graph_builder import (
|
||||
answer_query_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_refinement_sub_question.graph_builder import (
|
||||
answer_refined_query_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.graph_builder import (
|
||||
base_raw_search_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.edges import continue_to_refined_answer_or_end
|
||||
from onyx.agent_search.pro_search_a.main.edges import (
|
||||
parallelize_initial_sub_question_answering,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.edges import (
|
||||
parallelize_refined_sub_question_answering,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.nodes import agent_logging
|
||||
from onyx.agent_search.pro_search_a.main.nodes import entity_term_extraction_llm
|
||||
from onyx.agent_search.pro_search_a.main.nodes import generate_initial_answer
|
||||
from onyx.agent_search.pro_search_a.main.nodes import generate_refined_answer
|
||||
from onyx.agent_search.pro_search_a.main.nodes import ingest_initial_base_retrieval
|
||||
from onyx.agent_search.pro_search_a.main.nodes import (
|
||||
ingest_initial_sub_question_answers,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.nodes import ingest_refined_answers
|
||||
from onyx.agent_search.pro_search_a.main.nodes import initial_answer_quality_check
|
||||
from onyx.agent_search.pro_search_a.main.nodes import initial_sub_question_creation
|
||||
from onyx.agent_search.pro_search_a.main.nodes import refined_answer_decision
|
||||
from onyx.agent_search.pro_search_a.main.nodes import refined_sub_question_creation
|
||||
from onyx.agent_search.pro_search_a.main.states import MainInput
|
||||
from onyx.agent_search.pro_search_a.main.states import MainState
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_test_config
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
test_mode = False
|
||||
|
||||
|
||||
def main_graph_builder(test_mode: bool = False) -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=MainState,
|
||||
input=MainInput,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="initial_sub_question_creation",
|
||||
action=initial_sub_question_creation,
|
||||
)
|
||||
answer_query_subgraph = answer_query_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="answer_query_subgraph",
|
||||
action=answer_query_subgraph,
|
||||
)
|
||||
|
||||
base_raw_search_subgraph = base_raw_search_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="base_raw_search_subgraph",
|
||||
action=base_raw_search_subgraph,
|
||||
)
|
||||
|
||||
# refined_answer_subgraph = refined_answers_graph_builder().compile()
|
||||
# graph.add_node(
|
||||
# node="refined_answer_subgraph",
|
||||
# action=refined_answer_subgraph,
|
||||
# )
|
||||
|
||||
graph.add_node(
|
||||
node="refined_sub_question_creation",
|
||||
action=refined_sub_question_creation,
|
||||
)
|
||||
|
||||
answer_refined_question = answer_refined_query_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="answer_refined_question",
|
||||
action=answer_refined_question,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="ingest_refined_answers",
|
||||
action=ingest_refined_answers,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="generate_refined_answer",
|
||||
action=generate_refined_answer,
|
||||
)
|
||||
|
||||
# graph.add_node(
|
||||
# node="check_refined_answer",
|
||||
# action=check_refined_answer,
|
||||
# )
|
||||
|
||||
graph.add_node(
|
||||
node="ingest_initial_retrieval",
|
||||
action=ingest_initial_base_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="ingest_initial_sub_question_answers",
|
||||
action=ingest_initial_sub_question_answers,
|
||||
)
|
||||
graph.add_node(
|
||||
node="generate_initial_answer",
|
||||
action=generate_initial_answer,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="initial_answer_quality_check",
|
||||
action=initial_answer_quality_check,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="entity_term_extraction_llm",
|
||||
action=entity_term_extraction_llm,
|
||||
)
|
||||
graph.add_node(
|
||||
node="refined_answer_decision",
|
||||
action=refined_answer_decision,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="logging_node",
|
||||
action=agent_logging,
|
||||
)
|
||||
# if test_mode:
|
||||
# graph.add_node(
|
||||
# node="generate_initial_base_answer",
|
||||
# action=generate_initial_base_answer,
|
||||
# )
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_edge(start_key=START, end_key="base_raw_search_subgraph")
|
||||
|
||||
graph.add_edge(
|
||||
start_key="base_raw_search_subgraph",
|
||||
end_key="ingest_initial_retrieval",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key=START,
|
||||
end_key="initial_sub_question_creation",
|
||||
)
|
||||
graph.add_conditional_edges(
|
||||
source="initial_sub_question_creation",
|
||||
path=parallelize_initial_sub_question_answering,
|
||||
path_map=["answer_query_subgraph"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_query_subgraph",
|
||||
end_key="ingest_initial_sub_question_answers",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key=["ingest_initial_sub_question_answers", "ingest_initial_retrieval"],
|
||||
end_key="generate_initial_answer",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key="generate_initial_answer",
|
||||
end_key="entity_term_extraction_llm",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key="generate_initial_answer",
|
||||
end_key="initial_answer_quality_check",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key=["initial_answer_quality_check", "entity_term_extraction_llm"],
|
||||
end_key="refined_answer_decision",
|
||||
)
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source="refined_answer_decision",
|
||||
path=continue_to_refined_answer_or_end,
|
||||
path_map=["refined_sub_question_creation", "logging_node"],
|
||||
)
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source="refined_sub_question_creation", # DONE
|
||||
path=parallelize_refined_sub_question_answering,
|
||||
path_map=["answer_refined_question"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_refined_question", # HERE
|
||||
end_key="ingest_refined_answers",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key="ingest_refined_answers",
|
||||
end_key="generate_refined_answer",
|
||||
)
|
||||
|
||||
# graph.add_conditional_edges(
|
||||
# source="refined_answer_decision",
|
||||
# path=continue_to_refined_answer_or_end,
|
||||
# path_map=["refined_answer_subgraph", END],
|
||||
# )
|
||||
|
||||
# graph.add_edge(
|
||||
# start_key="refined_answer_subgraph",
|
||||
# end_key="generate_refined_answer",
|
||||
# )
|
||||
|
||||
graph.add_edge(
|
||||
start_key="generate_refined_answer",
|
||||
end_key="logging_node",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key="logging_node",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
# graph.add_edge(
|
||||
# start_key="generate_refined_answer",
|
||||
# end_key="check_refined_answer",
|
||||
# )
|
||||
|
||||
# graph.add_edge(
|
||||
# start_key="check_refined_answer",
|
||||
# end_key=END,
|
||||
# )
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = main_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
|
||||
with get_session_context_manager() as db_session:
|
||||
search_request = SearchRequest(query="Who created Excel?")
|
||||
pro_search_config, search_tool = get_test_config(
|
||||
db_session, primary_llm, fast_llm, search_request
|
||||
)
|
||||
|
||||
inputs = MainInput()
|
||||
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
config={"configurable": {"config": pro_search_config}},
|
||||
# stream_mode="debug",
|
||||
# debug=True,
|
||||
subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
@@ -1,36 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class FollowUpSubQuestion(BaseModel):
|
||||
sub_question: str
|
||||
sub_question_id: str
|
||||
verified: bool
|
||||
answered: bool
|
||||
answer: str
|
||||
|
||||
|
||||
class AgentTimings(BaseModel):
|
||||
base_duration__s: float | None
|
||||
refined_duration__s: float | None
|
||||
full_duration__s: float | None
|
||||
|
||||
|
||||
class AgentBaseMetrics(BaseModel):
|
||||
num_verified_documents_total: int | None
|
||||
num_verified_documents_core: int | None
|
||||
verified_avg_score_core: float | None
|
||||
num_verified_documents_base: int | float | None
|
||||
verified_avg_score_base: float | None
|
||||
base_doc_boost_factor: float | None
|
||||
support_boost_factor: float | None
|
||||
duration__s: float | None
|
||||
|
||||
|
||||
class AgentRefinedMetrics(BaseModel):
|
||||
refined_doc_boost_factor: float | None
|
||||
refined_question_boost_factor: float | None
|
||||
duration__s: float | None
|
||||
|
||||
|
||||
class AgentAdditionalMetrics(BaseModel):
|
||||
pass
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,151 +0,0 @@
|
||||
from datetime import datetime
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import CoreState
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.models import AgentBaseMetrics
|
||||
from onyx.agent_search.pro_search_a.main.models import AgentRefinedMetrics
|
||||
from onyx.agent_search.pro_search_a.main.models import FollowUpSubQuestion
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import EntityRelationshipTermExtraction
|
||||
from onyx.agent_search.shared_graph_utils.models import InitialAgentResultStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import RefinedAgentStats
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_question_answer_results
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
### States ###
|
||||
|
||||
## Update States
|
||||
|
||||
|
||||
class RefinedAgentStartStats(TypedDict):
|
||||
agent_refined_start_time: datetime | None
|
||||
|
||||
|
||||
class RefinedAgentEndStats(TypedDict):
|
||||
agent_refined_end_time: datetime | None
|
||||
agent_refined_metrics: AgentRefinedMetrics
|
||||
|
||||
|
||||
class BaseDecompUpdateBase(TypedDict):
|
||||
agent_start_time: datetime
|
||||
initial_decomp_questions: list[str]
|
||||
|
||||
|
||||
class BaseDecompUpdate(
|
||||
RefinedAgentStartStats, RefinedAgentEndStats, BaseDecompUpdateBase
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class InitialAnswerBASEUpdate(TypedDict):
|
||||
initial_base_answer: str
|
||||
|
||||
|
||||
class InitialAnswerUpdate(TypedDict):
|
||||
initial_answer: str
|
||||
initial_agent_stats: InitialAgentResultStats | None
|
||||
generated_sub_questions: list[str]
|
||||
agent_base_end_time: datetime
|
||||
agent_base_metrics: AgentBaseMetrics
|
||||
|
||||
|
||||
class RefinedAnswerUpdateBase(TypedDict):
|
||||
refined_answer: str
|
||||
refined_agent_stats: RefinedAgentStats | None
|
||||
refined_answer_quality: bool
|
||||
|
||||
|
||||
class RefinedAnswerUpdate(RefinedAgentEndStats, RefinedAnswerUpdateBase):
|
||||
pass
|
||||
|
||||
|
||||
class InitialAnswerQualityUpdate(TypedDict):
|
||||
initial_answer_quality: bool
|
||||
|
||||
|
||||
class RequireRefinedAnswerUpdate(TypedDict):
|
||||
require_refined_answer: bool
|
||||
|
||||
|
||||
class DecompAnswersUpdate(TypedDict):
|
||||
documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
decomp_answer_results: Annotated[
|
||||
list[QuestionAnswerResults], dedup_question_answer_results
|
||||
]
|
||||
|
||||
|
||||
class FollowUpDecompAnswersUpdate(TypedDict):
|
||||
refined_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
refined_decomp_answer_results: Annotated[list[QuestionAnswerResults], add]
|
||||
|
||||
|
||||
class ExpandedRetrievalUpdate(TypedDict):
|
||||
all_original_question_documents: Annotated[
|
||||
list[InferenceSection], dedup_inference_sections
|
||||
]
|
||||
original_question_retrieval_results: list[QueryResult]
|
||||
original_question_retrieval_stats: AgentChunkStats
|
||||
|
||||
|
||||
class EntityTermExtractionUpdate(TypedDict):
|
||||
entity_retlation_term_extractions: EntityRelationshipTermExtraction
|
||||
|
||||
|
||||
class FollowUpSubQuestionsUpdateBase(TypedDict):
|
||||
refined_sub_questions: dict[int, FollowUpSubQuestion]
|
||||
|
||||
|
||||
class FollowUpSubQuestionsUpdate(
|
||||
RefinedAgentStartStats, FollowUpSubQuestionsUpdateBase
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
## Graph Input State
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class MainInput(CoreState):
|
||||
pass
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class MainState(
|
||||
# This includes the core state
|
||||
MainInput,
|
||||
BaseDecompUpdateBase,
|
||||
InitialAnswerUpdate,
|
||||
InitialAnswerBASEUpdate,
|
||||
DecompAnswersUpdate,
|
||||
ExpandedRetrievalUpdate,
|
||||
EntityTermExtractionUpdate,
|
||||
InitialAnswerQualityUpdate,
|
||||
RequireRefinedAnswerUpdate,
|
||||
FollowUpSubQuestionsUpdateBase,
|
||||
FollowUpDecompAnswersUpdate,
|
||||
RefinedAnswerUpdateBase,
|
||||
RefinedAgentStartStats,
|
||||
RefinedAgentEndStats,
|
||||
):
|
||||
# expanded_retrieval_result: Annotated[list[ExpandedRetrievalResult], add]
|
||||
base_raw_search_result: Annotated[list[ExpandedRetrievalResult], add]
|
||||
|
||||
|
||||
## Graph Output State - presently not used
|
||||
|
||||
|
||||
class MainOutput(TypedDict):
|
||||
pass
|
||||
@@ -1,26 +0,0 @@
|
||||
from collections.abc import Hashable
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def send_to_expanded_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
|
||||
logger.debug("sending to expanded retrieval via edge")
|
||||
|
||||
return Send(
|
||||
"initial_sub_question_expanded_retrieval",
|
||||
ExpandedRetrievalInput(
|
||||
question=state["question"],
|
||||
base_search=False,
|
||||
sub_question_id=state["question_id"],
|
||||
),
|
||||
)
|
||||
@@ -1,129 +0,0 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.edges import (
|
||||
send_to_expanded_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.answer_check import (
|
||||
answer_check,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.answer_generation import (
|
||||
answer_generation,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.format_answer import (
|
||||
format_answer,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.ingest_retrieval import (
|
||||
ingest_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_test_config
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_query_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=AnswerQuestionState,
|
||||
input=AnswerQuestionInput,
|
||||
output=AnswerQuestionOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="initial_sub_question_expanded_retrieval",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="answer_check",
|
||||
action=answer_check,
|
||||
)
|
||||
graph.add_node(
|
||||
node="answer_generation",
|
||||
action=answer_generation,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_answer",
|
||||
action=format_answer,
|
||||
)
|
||||
graph.add_node(
|
||||
node="ingest_retrieval",
|
||||
action=ingest_retrieval,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source=START,
|
||||
path=send_to_expanded_retrieval,
|
||||
path_map=["initial_sub_question_expanded_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="initial_sub_question_expanded_retrieval",
|
||||
end_key="ingest_retrieval",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="ingest_retrieval",
|
||||
end_key="answer_generation",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_generation",
|
||||
end_key="answer_check",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_check",
|
||||
end_key="format_answer",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_answer",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = answer_query_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
with get_session_context_manager() as db_session:
|
||||
pro_search_config, search_tool = get_test_config(
|
||||
db_session, primary_llm, fast_llm, search_request
|
||||
)
|
||||
inputs = AnswerQuestionInput(
|
||||
question="what can you do with onyx?",
|
||||
subgraph_fast_llm=fast_llm,
|
||||
subgraph_primary_llm=primary_llm,
|
||||
subgraph_config=pro_search_config,
|
||||
subgraph_search_tool=search_tool,
|
||||
subgraph_db_session=db_session,
|
||||
question_id="0_0",
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
# debug=True,
|
||||
# subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
@@ -1,8 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
@@ -1,14 +0,0 @@
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
QACheckUpdate,
|
||||
)
|
||||
|
||||
|
||||
def answer_check(state: AnswerQuestionState) -> QACheckUpdate:
|
||||
quality_str = "yes"
|
||||
|
||||
return QACheckUpdate(
|
||||
answer_quality=quality_str,
|
||||
)
|
||||
@@ -1,41 +0,0 @@
|
||||
import datetime
|
||||
|
||||
from langchain_core.callbacks.manager import dispatch_custom_event
|
||||
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
QAGenerationUpdate,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_persona_prompt
|
||||
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
|
||||
from onyx.chat.models import AgentAnswerPiece
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_generation(state: AnswerQuestionState) -> QAGenerationUpdate:
|
||||
now_start = datetime.datetime.now()
|
||||
logger.debug(f"--------{now_start}--------START ANSWER GENERATION---")
|
||||
|
||||
state["question"]
|
||||
state["documents"]
|
||||
level, question_nr = parse_question_id(state["question_id"])
|
||||
get_persona_prompt(state["subgraph_config"].search_request.persona)
|
||||
|
||||
dispatch_custom_event(
|
||||
"sub_answers",
|
||||
AgentAnswerPiece(
|
||||
answer_piece="",
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
answer_type="agent_sub_answer",
|
||||
),
|
||||
)
|
||||
answer_str = ""
|
||||
|
||||
return QAGenerationUpdate(
|
||||
answer=answer_str,
|
||||
)
|
||||
@@ -1,25 +0,0 @@
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
|
||||
|
||||
def format_answer(state: AnswerQuestionState) -> AnswerQuestionOutput:
|
||||
return AnswerQuestionOutput(
|
||||
answer_results=[
|
||||
QuestionAnswerResults(
|
||||
question=state["question"],
|
||||
question_id=state["question_id"],
|
||||
quality=state.get("answer_quality", "No"),
|
||||
answer=state["answer"],
|
||||
expanded_retrieval_results=state["expanded_retrieval_results"],
|
||||
documents=state["documents"],
|
||||
sub_question_retrieval_stats=state["sub_question_retrieval_stats"],
|
||||
)
|
||||
],
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
RetrievalIngestionUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
|
||||
|
||||
def ingest_retrieval(state: ExpandedRetrievalOutput) -> RetrievalIngestionUpdate:
|
||||
sub_question_retrieval_stats = state[
|
||||
"expanded_retrieval_result"
|
||||
].sub_question_retrieval_stats
|
||||
if sub_question_retrieval_stats is None:
|
||||
sub_question_retrieval_stats = [AgentChunkStats()]
|
||||
|
||||
return RetrievalIngestionUpdate(
|
||||
expanded_retrieval_results=state[
|
||||
"expanded_retrieval_result"
|
||||
].expanded_queries_results,
|
||||
documents=state["expanded_retrieval_result"].all_documents,
|
||||
sub_question_retrieval_stats=sub_question_retrieval_stats,
|
||||
)
|
||||
@@ -1,63 +0,0 @@
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import SubgraphCoreState
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
## Update States
|
||||
class QACheckUpdate(TypedDict):
|
||||
answer_quality: str
|
||||
|
||||
|
||||
class QAGenerationUpdate(TypedDict):
|
||||
answer: str
|
||||
# answer_stat: AnswerStats
|
||||
|
||||
|
||||
class RetrievalIngestionUpdate(TypedDict):
|
||||
expanded_retrieval_results: list[QueryResult]
|
||||
documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class AnswerQuestionInput(SubgraphCoreState):
|
||||
question: str
|
||||
question_id: str # 0_0 is original question, everything else is <level>_<question_num>.
|
||||
# level 0 is original question and first decomposition, level 1 is follow up, etc
|
||||
# question_num is a unique number per original question per level.
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class AnswerQuestionState(
|
||||
AnswerQuestionInput,
|
||||
QAGenerationUpdate,
|
||||
QACheckUpdate,
|
||||
RetrievalIngestionUpdate,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class AnswerQuestionOutput(TypedDict):
|
||||
"""
|
||||
This is a list of results even though each call of this subgraph only returns one result.
|
||||
This is because if we parallelize the answer query subgraph, there will be multiple
|
||||
results in a list so the add operator is used to add them together.
|
||||
"""
|
||||
|
||||
answer_results: Annotated[list[QuestionAnswerResults], add]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user