mirror of
https://github.com/onyx-dot-app/onyx.git
synced 2026-02-17 15:55:45 +00:00
Compare commits
211 Commits
testing
...
pro-fe-exp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00f7dbd475 | ||
|
|
fb6ea2ff9a | ||
|
|
599bdc7e15 | ||
|
|
7e8380c2d7 | ||
|
|
2bc9b8ba4a | ||
|
|
b81b7edc82 | ||
|
|
6836a07248 | ||
|
|
a2882b331b | ||
|
|
88362c2a30 | ||
|
|
963ab82919 | ||
|
|
399ebe68e5 | ||
|
|
45f704bb36 | ||
|
|
0c91bcde90 | ||
|
|
03eca261ce | ||
|
|
488ba902f8 | ||
|
|
cdf9f86fcf | ||
|
|
b4b1cd07fe | ||
|
|
b1e718743f | ||
|
|
6c5ba41228 | ||
|
|
541383e850 | ||
|
|
8ef642091a | ||
|
|
565ceda6a4 | ||
|
|
58a55e17a9 | ||
|
|
ab78582977 | ||
|
|
26f6c6dd44 | ||
|
|
434e80c58b | ||
|
|
a34f42f1f5 | ||
|
|
168f4f7049 | ||
|
|
f76c0586a9 | ||
|
|
f901ab27da | ||
|
|
2153eb896c | ||
|
|
541a092c83 | ||
|
|
9aa0ec72f6 | ||
|
|
14855fcce9 | ||
|
|
10abfdac4e | ||
|
|
d9884f2c34 | ||
|
|
4393e0b14a | ||
|
|
1bb5e7d846 | ||
|
|
f948f3a6f2 | ||
|
|
91b27f0b06 | ||
|
|
b3a769ca8a | ||
|
|
adc546bcae | ||
|
|
885ae1c031 | ||
|
|
5b6fc7eb73 | ||
|
|
3824d2cc41 | ||
|
|
722485f5bb | ||
|
|
8ab722e15e | ||
|
|
cc271ec228 | ||
|
|
78add9aa01 | ||
|
|
9cc458329e | ||
|
|
239e3b5c16 | ||
|
|
07a6a453c0 | ||
|
|
ff03e4f2c8 | ||
|
|
6658355dc4 | ||
|
|
7b5bb18b98 | ||
|
|
698adffa6b | ||
|
|
74026341f1 | ||
|
|
b37393d7ba | ||
|
|
3406ebe7f4 | ||
|
|
a71bd62641 | ||
|
|
73f9c097ce | ||
|
|
314ca84c27 | ||
|
|
1804b18378 | ||
|
|
72a01033d7 | ||
|
|
476756c3c6 | ||
|
|
3991eac4c0 | ||
|
|
fabc9f7e4b | ||
|
|
7987a3a75e | ||
|
|
ff24f82d52 | ||
|
|
563513698b | ||
|
|
b2208195ce | ||
|
|
ac9ce7ca68 | ||
|
|
c9b34ed583 | ||
|
|
2bc948fa73 | ||
|
|
8ea987e068 | ||
|
|
43e488275f | ||
|
|
09d0820882 | ||
|
|
2c0ef2ea47 | ||
|
|
7cca660a93 | ||
|
|
ab70b7c303 | ||
|
|
0ff212a2b4 | ||
|
|
5c31171b37 | ||
|
|
1669a5d69f | ||
|
|
60db37fac1 | ||
|
|
48a4e2c76b | ||
|
|
e1175e15da | ||
|
|
27b044c030 | ||
|
|
4a7c6a6561 | ||
|
|
78df113c7f | ||
|
|
113767a061 | ||
|
|
1851c836dc | ||
|
|
413a4e55f5 | ||
|
|
dfcf8791e7 | ||
|
|
1b9f128851 | ||
|
|
d38b9bd194 | ||
|
|
ee304c9c35 | ||
|
|
4c75b73fa8 | ||
|
|
70509fbe7e | ||
|
|
6bea880695 | ||
|
|
249dd96f25 | ||
|
|
f0e74618e2 | ||
|
|
3e27b79040 | ||
|
|
91a37ef345 | ||
|
|
6c2f4e4775 | ||
|
|
87101c8c74 | ||
|
|
9887be3dfc | ||
|
|
22e209afc8 | ||
|
|
05a4792575 | ||
|
|
f79d44bd6e | ||
|
|
5081d240ce | ||
|
|
a570d39301 | ||
|
|
cd9279f0e1 | ||
|
|
72d1928b8f | ||
|
|
086bba6454 | ||
|
|
289be0a423 | ||
|
|
862a62483c | ||
|
|
694925d81d | ||
|
|
478eb511fa | ||
|
|
a259f92f39 | ||
|
|
9592f0a494 | ||
|
|
07c2336c08 | ||
|
|
7350dd73d1 | ||
|
|
8f428cfcec | ||
|
|
3610a51222 | ||
|
|
e0654c2209 | ||
|
|
99d7c09433 | ||
|
|
6803702ca3 | ||
|
|
4ea0ca5a78 | ||
|
|
561e44e443 | ||
|
|
931a119498 | ||
|
|
fddc1882d1 | ||
|
|
086d70a085 | ||
|
|
5a832628e2 | ||
|
|
dfe1ed4c66 | ||
|
|
5761557c19 | ||
|
|
29c479f496 | ||
|
|
b60002c791 | ||
|
|
b49c5afb09 | ||
|
|
b15e29619a | ||
|
|
119336035e | ||
|
|
25bb5983af | ||
|
|
ca1c12f122 | ||
|
|
9b55643e55 | ||
|
|
233bbfa4e4 | ||
|
|
8c67288197 | ||
|
|
26c5c57ddb | ||
|
|
ac15d0002a | ||
|
|
8cca29eeab | ||
|
|
a365ab0c7d | ||
|
|
fd89d1e141 | ||
|
|
7e32d21236 | ||
|
|
71eab9c740 | ||
|
|
5c4451c084 | ||
|
|
2c894aaf07 | ||
|
|
bf3da9f9cf | ||
|
|
4506770dd9 | ||
|
|
25e2dfa5df | ||
|
|
ba2d5fcc7d | ||
|
|
f9be71ff24 | ||
|
|
b92485223b | ||
|
|
5790f37648 | ||
|
|
b641cfc3e4 | ||
|
|
a2dfbb5b9c | ||
|
|
570ba9f0b6 | ||
|
|
650fee6e2c | ||
|
|
8aa7fb5027 | ||
|
|
e4bf04fd94 | ||
|
|
70de4708d0 | ||
|
|
4ed9f0ffc7 | ||
|
|
7b67546199 | ||
|
|
6c68a53c62 | ||
|
|
9e3b1d29aa | ||
|
|
ec7a606f4c | ||
|
|
4e5bc7a4ba | ||
|
|
55d3b0f271 | ||
|
|
565bfa4e88 | ||
|
|
9ad10d1f60 | ||
|
|
ad19e9aee7 | ||
|
|
b11641c2bc | ||
|
|
b7307813d5 | ||
|
|
3e8e544086 | ||
|
|
7ba00f8b48 | ||
|
|
19204b49a7 | ||
|
|
7254cb642d | ||
|
|
da27c8be6d | ||
|
|
9d0272fe62 | ||
|
|
167b5bad49 | ||
|
|
3acf235c84 | ||
|
|
0d9441da88 | ||
|
|
a915e4dfa7 | ||
|
|
7f2610e7d4 | ||
|
|
19323472e6 | ||
|
|
be84cf95bf | ||
|
|
79d847f660 | ||
|
|
b06f56102e | ||
|
|
078ae4b9c7 | ||
|
|
5d034e08fc | ||
|
|
cafa0aac0d | ||
|
|
f61864c36e | ||
|
|
5b0a1ccc31 | ||
|
|
acb9cca1e8 | ||
|
|
e22918e31d | ||
|
|
e5c430178d | ||
|
|
35f379b093 | ||
|
|
c85900e4f8 | ||
|
|
bf6b6342a1 | ||
|
|
9adbfc1b81 | ||
|
|
b5dd5df36f | ||
|
|
6b0a2e11b5 | ||
|
|
169f3fd0dc | ||
|
|
c15a828576 |
8
.github/pull_request_template.md
vendored
8
.github/pull_request_template.md
vendored
@@ -1,15 +1,11 @@
|
||||
## Description
|
||||
|
||||
[Provide a brief description of the changes in this PR]
|
||||
|
||||
## How Has This Been Tested?
|
||||
|
||||
## How Has This Been Tested?
|
||||
[Describe the tests you ran to verify your changes]
|
||||
|
||||
|
||||
## Backporting (check the box to trigger backport action)
|
||||
|
||||
Note: You have to check that the action passes, otherwise resolve the conflicts manually and tag the patches.
|
||||
|
||||
- [ ] This PR should be backported (make sure to check that the backport attempt succeeds)
|
||||
- [ ] I have included a link to a Linear ticket in my description.
|
||||
- [ ] [Optional] Override Linear Check
|
||||
|
||||
29
.github/workflows/pr-linear-check.yml
vendored
29
.github/workflows/pr-linear-check.yml
vendored
@@ -1,29 +0,0 @@
|
||||
name: Ensure PR references Linear
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
linear-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check PR body for Linear link or override
|
||||
run: |
|
||||
PR_BODY="${{ github.event.pull_request.body }}"
|
||||
|
||||
# Looking for "https://linear.app" in the body
|
||||
if echo "$PR_BODY" | grep -qE "https://linear\.app"; then
|
||||
echo "Found a Linear link. Check passed."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Looking for a checked override: "[x] Override Linear Check"
|
||||
if echo "$PR_BODY" | grep -q "\[x\].*Override Linear Check"; then
|
||||
echo "Override box is checked. Check passed."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Otherwise, fail the run
|
||||
echo "No Linear link or override found in the PR description."
|
||||
exit 1
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -7,4 +7,6 @@
|
||||
.vscode/
|
||||
*.sw?
|
||||
/backend/tests/regression/answer_quality/search_test_config.yaml
|
||||
/web/test-results/
|
||||
/web/test-results/
|
||||
backend/onyx/agent_search/main/test_data.json
|
||||
backend/tests/regression/answer_quality/test_data.json
|
||||
|
||||
7
.vscode/env_template.txt
vendored
7
.vscode/env_template.txt
vendored
@@ -29,7 +29,6 @@ REQUIRE_EMAIL_VERIFICATION=False
|
||||
|
||||
# Set these so if you wipe the DB, you don't end up having to go through the UI every time
|
||||
GEN_AI_API_KEY=<REPLACE THIS>
|
||||
OPENAI_API_KEY=<REPLACE THIS>
|
||||
# If answer quality isn't important for dev, use gpt-4o-mini since it's cheaper
|
||||
GEN_AI_MODEL_VERSION=gpt-4o
|
||||
FAST_GEN_AI_MODEL_VERSION=gpt-4o
|
||||
@@ -52,3 +51,9 @@ BING_API_KEY=<REPLACE THIS>
|
||||
# Enable the full set of Danswer Enterprise Edition features
|
||||
# NOTE: DO NOT ENABLE THIS UNLESS YOU HAVE A PAID ENTERPRISE LICENSE (or if you are using this for local testing/development)
|
||||
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=False
|
||||
|
||||
# Agent Search configs # TODO: Remove give proper namings
|
||||
AGENT_RETRIEVAL_STATS=False # Note: This setting will incur substantial re-ranking effort
|
||||
AGENT_RERANKING_STATS=True
|
||||
AGENT_MAX_QUERY_RETRIEVAL_RESULTS=20
|
||||
AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS=20
|
||||
|
||||
29
.vscode/launch.template.jsonc
vendored
29
.vscode/launch.template.jsonc
vendored
@@ -28,7 +28,6 @@
|
||||
"Celery heavy",
|
||||
"Celery indexing",
|
||||
"Celery beat",
|
||||
"Celery monitoring",
|
||||
],
|
||||
"presentation": {
|
||||
"group": "1",
|
||||
@@ -52,8 +51,7 @@
|
||||
"Celery light",
|
||||
"Celery heavy",
|
||||
"Celery indexing",
|
||||
"Celery beat",
|
||||
"Celery monitoring",
|
||||
"Celery beat"
|
||||
],
|
||||
"presentation": {
|
||||
"group": "1",
|
||||
@@ -271,31 +269,6 @@
|
||||
},
|
||||
"consoleTitle": "Celery indexing Console"
|
||||
},
|
||||
{
|
||||
"name": "Celery monitoring",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "celery",
|
||||
"cwd": "${workspaceFolder}/backend",
|
||||
"envFile": "${workspaceFolder}/.vscode/.env",
|
||||
"env": {},
|
||||
"args": [
|
||||
"-A",
|
||||
"onyx.background.celery.versioned_apps.monitoring",
|
||||
"worker",
|
||||
"--pool=solo",
|
||||
"--concurrency=1",
|
||||
"--prefetch-multiplier=1",
|
||||
"--loglevel=INFO",
|
||||
"--hostname=monitoring@%n",
|
||||
"-Q",
|
||||
"monitoring",
|
||||
],
|
||||
"presentation": {
|
||||
"group": "2",
|
||||
},
|
||||
"consoleTitle": "Celery monitoring Console"
|
||||
},
|
||||
{
|
||||
"name": "Celery beat",
|
||||
"type": "debugpy",
|
||||
|
||||
@@ -17,10 +17,9 @@ Before starting, make sure the Docker Daemon is running.
|
||||
1. Open the Debug view in VSCode (Cmd+Shift+D on macOS)
|
||||
2. From the dropdown at the top, select "Clear and Restart External Volumes and Containers" and press the green play button
|
||||
3. From the dropdown at the top, select "Run All Onyx Services" and press the green play button
|
||||
4. CD into web, run "npm i" followed by npm run dev.
|
||||
5. Now, you can navigate to onyx in your browser (default is http://localhost:3000) and start using the app
|
||||
6. You can set breakpoints by clicking to the left of line numbers to help debug while the app is running
|
||||
7. Use the debug toolbar to step through code, inspect variables, etc.
|
||||
4. Now, you can navigate to onyx in your browser (default is http://localhost:3000) and start using the app
|
||||
5. You can set breakpoints by clicking to the left of line numbers to help debug while the app is running
|
||||
6. Use the debug toolbar to step through code, inspect variables, etc.
|
||||
|
||||
## Features
|
||||
|
||||
|
||||
@@ -152,6 +152,8 @@ async def run_async_migrations() -> None:
|
||||
if upgrade_all_tenants:
|
||||
tenant_schemas = get_all_tenant_ids()
|
||||
for schema in tenant_schemas:
|
||||
print("MIGRRATING SCHEMA")
|
||||
print(schema)
|
||||
try:
|
||||
logger.info(f"Migrating schema: {schema}")
|
||||
async with engine.connect() as connection:
|
||||
@@ -198,6 +200,8 @@ def run_migrations_offline() -> None:
|
||||
engine.sync_engine.dispose()
|
||||
|
||||
for schema in tenant_schemas:
|
||||
print("MIGRRATING SCHEMA")
|
||||
print(schema)
|
||||
logger.info(f"Migrating schema: {schema}")
|
||||
context.configure(
|
||||
url=url,
|
||||
|
||||
@@ -20,7 +20,7 @@ def upgrade() -> None:
|
||||
op.add_column(
|
||||
"user",
|
||||
sa.Column(
|
||||
"shortcut_enabled", sa.Boolean(), nullable=False, server_default="false"
|
||||
"shortcut_enabled", sa.Boolean(), nullable=False, server_default="true"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
"""add index to index_attempt.time_created
|
||||
|
||||
Revision ID: 0f7ff6d75b57
|
||||
Revises: 369644546676
|
||||
Create Date: 2025-01-10 14:01:14.067144
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "0f7ff6d75b57"
|
||||
down_revision = "fec3db967bf7"
|
||||
branch_labels: None = None
|
||||
depends_on: None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_index(
|
||||
op.f("ix_index_attempt_status"),
|
||||
"index_attempt",
|
||||
["status"],
|
||||
unique=False,
|
||||
)
|
||||
|
||||
op.create_index(
|
||||
op.f("ix_index_attempt_time_created"),
|
||||
"index_attempt",
|
||||
["time_created"],
|
||||
unique=False,
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_index(op.f("ix_index_attempt_time_created"), table_name="index_attempt")
|
||||
|
||||
op.drop_index(op.f("ix_index_attempt_status"), table_name="index_attempt")
|
||||
@@ -0,0 +1,29 @@
|
||||
"""agent_doc_result_col
|
||||
|
||||
Revision ID: 1adf5ea20d2b
|
||||
Revises: e9cf2bd7baed
|
||||
Create Date: 2025-01-05 13:14:58.344316
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "1adf5ea20d2b"
|
||||
down_revision = "e9cf2bd7baed"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add the new column with JSONB type
|
||||
op.add_column(
|
||||
"sub_question",
|
||||
sa.Column("sub_question_doc_results", postgresql.JSONB(), nullable=True),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop the column
|
||||
op.drop_column("sub_question", "sub_question_doc_results")
|
||||
@@ -41,7 +41,6 @@ def upgrade() -> None:
|
||||
sa.Column(
|
||||
"user_id", fastapi_users_db_sqlalchemy.generics.GUID(), nullable=False
|
||||
),
|
||||
sa.Column("disabled", sa.Boolean(), nullable=False, default=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["input_prompt_id"],
|
||||
["inputprompt.id"],
|
||||
|
||||
@@ -40,6 +40,6 @@ def upgrade() -> None:
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_constraint("persona_category_id_fkey", "persona", type_="foreignkey")
|
||||
op.drop_constraint("fk_persona_category", "persona", type_="foreignkey")
|
||||
op.drop_column("persona", "category_id")
|
||||
op.drop_table("persona_category")
|
||||
|
||||
@@ -32,7 +32,6 @@ def upgrade() -> None:
|
||||
sa.ForeignKeyConstraint(
|
||||
["persona_label_id"],
|
||||
["persona_label.id"],
|
||||
ondelete="CASCADE",
|
||||
),
|
||||
sa.PrimaryKeyConstraint("persona_id", "persona_label_id"),
|
||||
)
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
"""agent_metric_col_rename__s
|
||||
|
||||
Revision ID: 925b58bd75b6
|
||||
Revises: 9787be927e58
|
||||
Create Date: 2025-01-06 11:20:26.752441
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "925b58bd75b6"
|
||||
down_revision = "9787be927e58"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Rename columns using PostgreSQL syntax
|
||||
op.alter_column(
|
||||
"agent__search_metrics", "base_duration_s", new_column_name="base_duration__s"
|
||||
)
|
||||
op.alter_column(
|
||||
"agent__search_metrics", "full_duration_s", new_column_name="full_duration__s"
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Revert the column renames
|
||||
op.alter_column(
|
||||
"agent__search_metrics", "base_duration__s", new_column_name="base_duration_s"
|
||||
)
|
||||
op.alter_column(
|
||||
"agent__search_metrics", "full_duration__s", new_column_name="full_duration_s"
|
||||
)
|
||||
@@ -0,0 +1,25 @@
|
||||
"""agent_metric_table_renames__agent__
|
||||
|
||||
Revision ID: 9787be927e58
|
||||
Revises: bceb76d618ec
|
||||
Create Date: 2025-01-06 11:01:44.210160
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "9787be927e58"
|
||||
down_revision = "bceb76d618ec"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Rename table from agent_search_metrics to agent__search_metrics
|
||||
op.rename_table("agent_search_metrics", "agent__search_metrics")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Rename table back from agent__search_metrics to agent_search_metrics
|
||||
op.rename_table("agent__search_metrics", "agent_search_metrics")
|
||||
@@ -1,72 +0,0 @@
|
||||
"""Add SyncRecord
|
||||
|
||||
Revision ID: 97dbb53fa8c8
|
||||
Revises: 369644546676
|
||||
Create Date: 2025-01-11 19:39:50.426302
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "97dbb53fa8c8"
|
||||
down_revision = "be2ab2aa50ee"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"sync_record",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("entity_id", sa.Integer(), nullable=False),
|
||||
sa.Column(
|
||||
"sync_type",
|
||||
sa.Enum(
|
||||
"DOCUMENT_SET",
|
||||
"USER_GROUP",
|
||||
"CONNECTOR_DELETION",
|
||||
name="synctype",
|
||||
native_enum=False,
|
||||
length=40,
|
||||
),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column(
|
||||
"sync_status",
|
||||
sa.Enum(
|
||||
"IN_PROGRESS",
|
||||
"SUCCESS",
|
||||
"FAILED",
|
||||
"CANCELED",
|
||||
name="syncstatus",
|
||||
native_enum=False,
|
||||
length=40,
|
||||
),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("num_docs_synced", sa.Integer(), nullable=False),
|
||||
sa.Column("sync_start_time", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("sync_end_time", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
# Add index for fetch_latest_sync_record query
|
||||
op.create_index(
|
||||
"ix_sync_record_entity_id_sync_type_sync_start_time",
|
||||
"sync_record",
|
||||
["entity_id", "sync_type", "sync_start_time"],
|
||||
)
|
||||
|
||||
# Add index for cleanup_sync_records query
|
||||
op.create_index(
|
||||
"ix_sync_record_entity_id_sync_type_sync_status",
|
||||
"sync_record",
|
||||
["entity_id", "sync_type", "sync_status"],
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_index("ix_sync_record_entity_id_sync_type_sync_status")
|
||||
op.drop_index("ix_sync_record_entity_id_sync_type_sync_start_time")
|
||||
op.drop_table("sync_record")
|
||||
42
backend/alembic/versions/98a5008d8711_agent_tracking.py
Normal file
42
backend/alembic/versions/98a5008d8711_agent_tracking.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""agent_tracking
|
||||
|
||||
Revision ID: 98a5008d8711
|
||||
Revises: 027381bce97c
|
||||
Create Date: 2025-01-04 14:41:52.732238
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "98a5008d8711"
|
||||
down_revision = "027381bce97c"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"agent_search_metrics",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column("persona_id", sa.Integer(), nullable=True),
|
||||
sa.Column("agent_type", sa.String(), nullable=False),
|
||||
sa.Column("start_time", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("base_duration_s", sa.Float(), nullable=False),
|
||||
sa.Column("full_duration_s", sa.Float(), nullable=False),
|
||||
sa.Column("base_metrics", postgresql.JSONB(), nullable=True),
|
||||
sa.Column("refined_metrics", postgresql.JSONB(), nullable=True),
|
||||
sa.Column("all_metrics", postgresql.JSONB(), nullable=True),
|
||||
sa.ForeignKeyConstraint(
|
||||
["persona_id"],
|
||||
["persona.id"],
|
||||
),
|
||||
sa.ForeignKeyConstraint(["user_id"], ["user.id"], ondelete="CASCADE"),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("agent_search_metrics")
|
||||
@@ -1,7 +1,7 @@
|
||||
"""add pinned assistants
|
||||
|
||||
Revision ID: aeda5f2df4f6
|
||||
Revises: c5eae4a75a1b
|
||||
Revises: 369644546676
|
||||
Create Date: 2025-01-09 16:04:10.770636
|
||||
|
||||
"""
|
||||
@@ -11,7 +11,7 @@ from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "aeda5f2df4f6"
|
||||
down_revision = "c5eae4a75a1b"
|
||||
down_revision = "369644546676"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
@@ -0,0 +1,84 @@
|
||||
"""agent_table_renames__agent__
|
||||
|
||||
Revision ID: bceb76d618ec
|
||||
Revises: c0132518a25b
|
||||
Create Date: 2025-01-06 10:50:48.109285
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "bceb76d618ec"
|
||||
down_revision = "c0132518a25b"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.drop_constraint(
|
||||
"sub_query__search_doc_sub_query_id_fkey",
|
||||
"sub_query__search_doc",
|
||||
type_="foreignkey",
|
||||
)
|
||||
op.drop_constraint(
|
||||
"sub_query__search_doc_search_doc_id_fkey",
|
||||
"sub_query__search_doc",
|
||||
type_="foreignkey",
|
||||
)
|
||||
# Rename tables
|
||||
op.rename_table("sub_query", "agent__sub_query")
|
||||
op.rename_table("sub_question", "agent__sub_question")
|
||||
op.rename_table("sub_query__search_doc", "agent__sub_query__search_doc")
|
||||
|
||||
# Update both foreign key constraints for agent__sub_query__search_doc
|
||||
|
||||
# Create new foreign keys with updated names
|
||||
op.create_foreign_key(
|
||||
"agent__sub_query__search_doc_sub_query_id_fkey",
|
||||
"agent__sub_query__search_doc",
|
||||
"agent__sub_query",
|
||||
["sub_query_id"],
|
||||
["id"],
|
||||
)
|
||||
op.create_foreign_key(
|
||||
"agent__sub_query__search_doc_search_doc_id_fkey",
|
||||
"agent__sub_query__search_doc",
|
||||
"search_doc", # This table name doesn't change
|
||||
["search_doc_id"],
|
||||
["id"],
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Update foreign key constraints for sub_query__search_doc
|
||||
op.drop_constraint(
|
||||
"agent__sub_query__search_doc_sub_query_id_fkey",
|
||||
"agent__sub_query__search_doc",
|
||||
type_="foreignkey",
|
||||
)
|
||||
op.drop_constraint(
|
||||
"agent__sub_query__search_doc_search_doc_id_fkey",
|
||||
"agent__sub_query__search_doc",
|
||||
type_="foreignkey",
|
||||
)
|
||||
|
||||
# Rename tables back
|
||||
op.rename_table("agent__sub_query__search_doc", "sub_query__search_doc")
|
||||
op.rename_table("agent__sub_question", "sub_question")
|
||||
op.rename_table("agent__sub_query", "sub_query")
|
||||
|
||||
op.create_foreign_key(
|
||||
"sub_query__search_doc_sub_query_id_fkey",
|
||||
"sub_query__search_doc",
|
||||
"sub_query",
|
||||
["sub_query_id"],
|
||||
["id"],
|
||||
)
|
||||
op.create_foreign_key(
|
||||
"sub_query__search_doc_search_doc_id_fkey",
|
||||
"sub_query__search_doc",
|
||||
"search_doc", # This table name doesn't change
|
||||
["search_doc_id"],
|
||||
["id"],
|
||||
)
|
||||
@@ -1,38 +0,0 @@
|
||||
"""fix_capitalization
|
||||
|
||||
Revision ID: be2ab2aa50ee
|
||||
Revises: 369644546676
|
||||
Create Date: 2025-01-10 13:13:26.228960
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "be2ab2aa50ee"
|
||||
down_revision = "369644546676"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.execute(
|
||||
"""
|
||||
UPDATE document
|
||||
SET
|
||||
external_user_group_ids = ARRAY(
|
||||
SELECT LOWER(unnest(external_user_group_ids))
|
||||
),
|
||||
last_modified = NOW()
|
||||
WHERE
|
||||
external_user_group_ids IS NOT NULL
|
||||
AND external_user_group_ids::text[] <> ARRAY(
|
||||
SELECT LOWER(unnest(external_user_group_ids))
|
||||
)::text[]
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# No way to cleanly persist the bad state through an upgrade/downgrade
|
||||
# cycle, so we just pass
|
||||
pass
|
||||
@@ -0,0 +1,40 @@
|
||||
"""agent_table_changes_rename_level
|
||||
|
||||
Revision ID: c0132518a25b
|
||||
Revises: 1adf5ea20d2b
|
||||
Create Date: 2025-01-05 16:38:37.660152
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "c0132518a25b"
|
||||
down_revision = "1adf5ea20d2b"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add level and level_question_nr columns with NOT NULL constraint
|
||||
op.add_column(
|
||||
"sub_question",
|
||||
sa.Column("level", sa.Integer(), nullable=False, server_default="0"),
|
||||
)
|
||||
op.add_column(
|
||||
"sub_question",
|
||||
sa.Column(
|
||||
"level_question_nr", sa.Integer(), nullable=False, server_default="0"
|
||||
),
|
||||
)
|
||||
|
||||
# Remove the server_default after the columns are created
|
||||
op.alter_column("sub_question", "level", server_default=None)
|
||||
op.alter_column("sub_question", "level_question_nr", server_default=None)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Remove the columns
|
||||
op.drop_column("sub_question", "level_question_nr")
|
||||
op.drop_column("sub_question", "level")
|
||||
@@ -1,36 +0,0 @@
|
||||
"""Add chat_message__standard_answer table
|
||||
|
||||
Revision ID: c5eae4a75a1b
|
||||
Revises: 0f7ff6d75b57
|
||||
Create Date: 2025-01-15 14:08:49.688998
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "c5eae4a75a1b"
|
||||
down_revision = "0f7ff6d75b57"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"chat_message__standard_answer",
|
||||
sa.Column("chat_message_id", sa.Integer(), nullable=False),
|
||||
sa.Column("standard_answer_id", sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
["chat_message_id"],
|
||||
["chat_message.id"],
|
||||
),
|
||||
sa.ForeignKeyConstraint(
|
||||
["standard_answer_id"],
|
||||
["standard_answer.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("chat_message_id", "standard_answer_id"),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("chat_message__standard_answer")
|
||||
@@ -1,48 +0,0 @@
|
||||
"""Add has_been_indexed to DocumentByConnectorCredentialPair
|
||||
|
||||
Revision ID: c7bf5721733e
|
||||
Revises: fec3db967bf7
|
||||
Create Date: 2025-01-13 12:39:05.831693
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "c7bf5721733e"
|
||||
down_revision = "027381bce97c"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# assume all existing rows have been indexed, no better approach
|
||||
op.add_column(
|
||||
"document_by_connector_credential_pair",
|
||||
sa.Column("has_been_indexed", sa.Boolean(), nullable=True),
|
||||
)
|
||||
op.execute(
|
||||
"UPDATE document_by_connector_credential_pair SET has_been_indexed = TRUE"
|
||||
)
|
||||
op.alter_column(
|
||||
"document_by_connector_credential_pair",
|
||||
"has_been_indexed",
|
||||
nullable=False,
|
||||
)
|
||||
|
||||
# Add index to optimize get_document_counts_for_cc_pairs query pattern
|
||||
op.create_index(
|
||||
"idx_document_cc_pair_counts",
|
||||
"document_by_connector_credential_pair",
|
||||
["connector_id", "credential_id", "has_been_indexed"],
|
||||
unique=False,
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Remove the index first before removing the column
|
||||
op.drop_index(
|
||||
"idx_document_cc_pair_counts",
|
||||
table_name="document_by_connector_credential_pair",
|
||||
)
|
||||
op.drop_column("document_by_connector_credential_pair", "has_been_indexed")
|
||||
@@ -0,0 +1,68 @@
|
||||
"""create pro search persistence tables
|
||||
|
||||
Revision ID: e9cf2bd7baed
|
||||
Revises: 98a5008d8711
|
||||
Create Date: 2025-01-02 17:55:56.544246
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "e9cf2bd7baed"
|
||||
down_revision = "98a5008d8711"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Create sub_question table
|
||||
op.create_table(
|
||||
"sub_question",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("primary_question_id", sa.Integer, sa.ForeignKey("chat_message.id")),
|
||||
sa.Column(
|
||||
"chat_session_id", UUID(as_uuid=True), sa.ForeignKey("chat_session.id")
|
||||
),
|
||||
sa.Column("sub_question", sa.Text),
|
||||
sa.Column(
|
||||
"time_created", sa.DateTime(timezone=True), server_default=sa.func.now()
|
||||
),
|
||||
sa.Column("sub_answer", sa.Text),
|
||||
)
|
||||
|
||||
# Create sub_query table
|
||||
op.create_table(
|
||||
"sub_query",
|
||||
sa.Column("id", sa.Integer, primary_key=True),
|
||||
sa.Column("parent_question_id", sa.Integer, sa.ForeignKey("sub_question.id")),
|
||||
sa.Column(
|
||||
"chat_session_id", UUID(as_uuid=True), sa.ForeignKey("chat_session.id")
|
||||
),
|
||||
sa.Column("sub_query", sa.Text),
|
||||
sa.Column(
|
||||
"time_created", sa.DateTime(timezone=True), server_default=sa.func.now()
|
||||
),
|
||||
)
|
||||
|
||||
# Create sub_query__search_doc association table
|
||||
op.create_table(
|
||||
"sub_query__search_doc",
|
||||
sa.Column(
|
||||
"sub_query_id", sa.Integer, sa.ForeignKey("sub_query.id"), primary_key=True
|
||||
),
|
||||
sa.Column(
|
||||
"search_doc_id",
|
||||
sa.Integer,
|
||||
sa.ForeignKey("search_doc.id"),
|
||||
primary_key=True,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("sub_query__search_doc")
|
||||
op.drop_table("sub_query")
|
||||
op.drop_table("sub_question")
|
||||
@@ -1,41 +0,0 @@
|
||||
"""Add time_updated to UserGroup and DocumentSet
|
||||
|
||||
Revision ID: fec3db967bf7
|
||||
Revises: 97dbb53fa8c8
|
||||
Create Date: 2025-01-12 15:49:02.289100
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "fec3db967bf7"
|
||||
down_revision = "97dbb53fa8c8"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"document_set",
|
||||
sa.Column(
|
||||
"time_last_modified_by_user",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.func.now(),
|
||||
),
|
||||
)
|
||||
op.add_column(
|
||||
"user_group",
|
||||
sa.Column(
|
||||
"time_last_modified_by_user",
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.func.now(),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("user_group", "time_last_modified_by_user")
|
||||
op.drop_column("document_set", "time_last_modified_by_user")
|
||||
370
backend/chat_packets.log
Normal file
370
backend/chat_packets.log
Normal file
File diff suppressed because one or more lines are too long
536
backend/chatt.txt
Normal file
536
backend/chatt.txt
Normal file
@@ -0,0 +1,536 @@
|
||||
"{\"user_message_id\": 475, \"reserved_assistant_message_id\": 476}\n"
|
||||
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \"1\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 1}\n"
|
||||
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \" specifications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_question\": \"2\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_query\": \" cases\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_question\": \"3\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 3}\n"
|
||||
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_question\": \"4\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 4}\n"
|
||||
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" differences\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" product\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" specifications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" previous\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" versions\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" cases\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" previous\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" other\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" versions\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
|
||||
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 4?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
|
||||
"{\"llm_selected_doc_indices\": []}\n"
|
||||
"{\"final_context_docs\": []}\n"
|
||||
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" formerly\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" known\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" D\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" an\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Assistant\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" that\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" connects\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" company's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" documents\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" personnel\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" provides\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" can\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" integrate\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" any\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" large\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" language\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" model\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" (\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"LL\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"M\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \")\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 2?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
|
||||
"{\"llm_selected_doc_indices\": []}\n"
|
||||
"{\"final_context_docs\": []}\n"
|
||||
"{\"answer_piece\": \" of\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" choice\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" designed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" modular\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" easily\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" extens\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"ible\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" allowing\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" deployment\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" various\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" platforms\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" including\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" laptops\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"-prem\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"ise\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" or\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" cloud\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" environments\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" ensures\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" that\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" data\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" chats\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" remain\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" under\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" control\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" deployment\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" owned\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" by\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" MIT\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" licensed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" comes\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" ready\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" production\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" featuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" authentication\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" role\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" management\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" persistence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" configuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Assist\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"ants\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" their\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" prompts\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Additionally\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" serves\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" unified\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" search\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" tool\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" across\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" common\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" workplace\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" like\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Slack\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Google\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Drive\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" Con\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"fluence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" enabling\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" it\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" act\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" subject\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" matter\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" expert\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" teams\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" by\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" combining\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" L\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"LM\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"s\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" team\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \"-specific\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" knowledge\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" [[1]]()\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 3?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
|
||||
"{\"llm_selected_doc_indices\": []}\n"
|
||||
"{\"final_context_docs\": []}\n"
|
||||
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
|
||||
"{\"top_documents\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"chunk_ind\": 0, \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"link\": \"https://docs.onyx.app/introduction\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"source_type\": \"web\", \"boost\": 0, \"hidden\": false, \"metadata\": {}, \"score\": 0.6275177643886491, \"is_relevant\": null, \"relevance_explanation\": null, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"], \"updated_at\": null, \"primary_owners\": null, \"secondary_owners\": null, \"is_internet\": false, \"db_doc_id\": 35923}], \"rephrased_query\": \"what is onyx 1, 2, 3, 4\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
|
||||
"{\"llm_selected_doc_indices\": []}\n"
|
||||
"{\"final_context_docs\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"content\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible. The system also comes fully ready for production usage with user authentication, role management (admin/basic users), chat persistence, and a UI for configuring Personas (AI Assistants) and their Prompts.\\nOnyx also serves as a Unified Search across all common workplace tools such as Slack, Google Drive, Confluence, etc. By combining LLMs and team specific knowledge, Onyx becomes a subject matter expert for the team. Its like ChatGPT if it had access to your teams unique knowledge! It enables questions such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\nOnyx can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain Features \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants with different prompts and backing knowledge sets.\\n- Connect Onyx with LLM of your choice (self-host for a fully airgapped solution).\\n- Document Search + AI Answers for natural language queries.\\n- Connectors to all common workplace tools like Google Drive, Confluence, Slack, etc.\\n- Slack integration to get answers and search results directly in Slack.\\n\\nUpcoming\\n- Chat/Prompt sharing with specific teammates and user groups.\\n- Multi-modal model support, chat with images, video etc.\\n- Choosing between LLMs and parameters during chat session.\\n- Tool calling and agent configurations options.\\n- Organizational understanding and ability to locate and suggest experts from your team.\\n\\nOther Noteable Benefits of Onyx\\n- User Authentication with document level access management.\\n- Best in class Hybrid Search across all sources (BM-25 + prefix aware embedding models).\\n- Admin Dashboard to configure connectors, document-sets, access, etc.\\n- Custom deep learning models + learn from user feedback.\\n- Easy deployment and ability to host Onyx anywhere of your choosing.\\nQuickstart\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"source_type\": \"web\", \"metadata\": {}, \"updated_at\": null, \"link\": \"https://docs.onyx.app/introduction\", \"source_links\": {\"0\": \"https://docs.onyx.app/introduction\"}, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"]}]}\n"
|
||||
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" cannot\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" reliably\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" question\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" about\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" provided\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" only\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" describes\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" which\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" an\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Assistant\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" formerly\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" known\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" D\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" connects\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" company's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" documents\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" personnel\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" providing\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" integration\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" any\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" large\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" language\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" model\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" (\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"LL\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"M\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \")\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" of\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" choice\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" designed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" modular\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" easily\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" extens\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"ible\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" can\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" deployed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" various\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" platforms\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" while\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" ensuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" data\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" control\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" also\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" serves\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" unified\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" search\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" tool\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" across\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" common\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" workplace\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" like\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Slack\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Google\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Drive\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" Con\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"fluence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" acting\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" subject\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" matter\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" expert\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" teams\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" [[1]]()\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"{{1}}\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"There\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" no\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" available\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" regarding\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" or\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" so\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" I\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" cannot\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" provide\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" details\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" about\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \" them\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
|
||||
"{\"citations\": []}\n"
|
||||
"{\"message_id\": 476, \"parent_message\": 475, \"latest_child_message\": null, \"message\": \"I cannot reliably answer the question about Onyx 2, 3, and 4, as the provided information only describes Onyx 1, which is an AI Assistant formerly known as Danswer. Onyx 1 connects to a company's documents, applications, and personnel, providing a chat interface and integration with any large language model (LLM) of choice. It is designed to be modular, easily extensible, and can be deployed on various platforms while ensuring user data control. It also serves as a unified search tool across common workplace applications like Slack, Google Drive, and Confluence, acting as a subject matter expert for teams [[1]](){{1}}There is no information available regarding Onyx 2, 3, or 4, so I cannot provide details about them.\", \"rephrased_query\": \"what is onyx 1, 2, 3, 4\", \"context_docs\": {\"top_documents\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"chunk_ind\": 0, \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"link\": \"https://docs.onyx.app/introduction\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"source_type\": \"web\", \"boost\": 0, \"hidden\": false, \"metadata\": {}, \"score\": 0.6275177643886491, \"is_relevant\": null, \"relevance_explanation\": null, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"], \"updated_at\": null, \"primary_owners\": null, \"secondary_owners\": null, \"is_internet\": false, \"db_doc_id\": 35923}]}, \"message_type\": \"assistant\", \"time_sent\": \"2025-01-12T05:37:18.318251+00:00\", \"overridden_model\": \"gpt-4o\", \"alternate_assistant_id\": 0, \"chat_session_id\": \"40f91916-7419-48d1-9681-5882b0869d88\", \"citations\": {}, \"sub_questions\": [], \"files\": [], \"tool_call\": null}\n"
|
||||
@@ -1,9 +1,6 @@
|
||||
from datetime import timedelta
|
||||
from typing import Any
|
||||
|
||||
from onyx.background.celery.tasks.beat_schedule import (
|
||||
cloud_tasks_to_schedule as base_cloud_tasks_to_schedule,
|
||||
)
|
||||
from onyx.background.celery.tasks.beat_schedule import (
|
||||
tasks_to_schedule as base_tasks_to_schedule,
|
||||
)
|
||||
@@ -11,7 +8,7 @@ from onyx.configs.constants import OnyxCeleryTask
|
||||
|
||||
ee_tasks_to_schedule = [
|
||||
{
|
||||
"name": "autogenerate-usage-report",
|
||||
"name": "autogenerate_usage_report",
|
||||
"task": OnyxCeleryTask.AUTOGENERATE_USAGE_REPORT_TASK,
|
||||
"schedule": timedelta(days=30), # TODO: change this to config flag
|
||||
},
|
||||
@@ -23,9 +20,5 @@ ee_tasks_to_schedule = [
|
||||
]
|
||||
|
||||
|
||||
def get_cloud_tasks_to_schedule() -> list[dict[str, Any]]:
|
||||
return base_cloud_tasks_to_schedule
|
||||
|
||||
|
||||
def get_tasks_to_schedule() -> list[dict[str, Any]]:
|
||||
return ee_tasks_to_schedule + base_tasks_to_schedule
|
||||
|
||||
@@ -8,9 +8,6 @@ from ee.onyx.db.user_group import fetch_user_group
|
||||
from ee.onyx.db.user_group import mark_user_group_as_synced
|
||||
from ee.onyx.db.user_group import prepare_user_group_for_deletion
|
||||
from onyx.background.celery.apps.app_base import task_logger
|
||||
from onyx.db.enums import SyncStatus
|
||||
from onyx.db.enums import SyncType
|
||||
from onyx.db.sync_record import update_sync_record_status
|
||||
from onyx.redis.redis_usergroup import RedisUserGroup
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
@@ -46,59 +43,24 @@ def monitor_usergroup_taskset(
|
||||
f"User group sync progress: usergroup_id={usergroup_id} remaining={count} initial={initial_count}"
|
||||
)
|
||||
if count > 0:
|
||||
update_sync_record_status(
|
||||
db_session=db_session,
|
||||
entity_id=usergroup_id,
|
||||
sync_type=SyncType.USER_GROUP,
|
||||
sync_status=SyncStatus.IN_PROGRESS,
|
||||
num_docs_synced=count,
|
||||
)
|
||||
return
|
||||
|
||||
user_group = fetch_user_group(db_session=db_session, user_group_id=usergroup_id)
|
||||
if user_group:
|
||||
usergroup_name = user_group.name
|
||||
try:
|
||||
if user_group.is_up_for_deletion:
|
||||
# this prepare should have been run when the deletion was scheduled,
|
||||
# but run it again to be sure we're ready to go
|
||||
mark_user_group_as_synced(db_session, user_group)
|
||||
prepare_user_group_for_deletion(db_session, usergroup_id)
|
||||
delete_user_group(db_session=db_session, user_group=user_group)
|
||||
|
||||
update_sync_record_status(
|
||||
db_session=db_session,
|
||||
entity_id=usergroup_id,
|
||||
sync_type=SyncType.USER_GROUP,
|
||||
sync_status=SyncStatus.SUCCESS,
|
||||
num_docs_synced=initial_count,
|
||||
)
|
||||
|
||||
task_logger.info(
|
||||
f"Deleted usergroup: name={usergroup_name} id={usergroup_id}"
|
||||
)
|
||||
else:
|
||||
mark_user_group_as_synced(db_session=db_session, user_group=user_group)
|
||||
|
||||
update_sync_record_status(
|
||||
db_session=db_session,
|
||||
entity_id=usergroup_id,
|
||||
sync_type=SyncType.USER_GROUP,
|
||||
sync_status=SyncStatus.SUCCESS,
|
||||
num_docs_synced=initial_count,
|
||||
)
|
||||
|
||||
task_logger.info(
|
||||
f"Synced usergroup. name={usergroup_name} id={usergroup_id}"
|
||||
)
|
||||
except Exception as e:
|
||||
update_sync_record_status(
|
||||
db_session=db_session,
|
||||
entity_id=usergroup_id,
|
||||
sync_type=SyncType.USER_GROUP,
|
||||
sync_status=SyncStatus.FAILED,
|
||||
num_docs_synced=initial_count,
|
||||
if user_group.is_up_for_deletion:
|
||||
# this prepare should have been run when the deletion was scheduled,
|
||||
# but run it again to be sure we're ready to go
|
||||
mark_user_group_as_synced(db_session, user_group)
|
||||
prepare_user_group_for_deletion(db_session, usergroup_id)
|
||||
delete_user_group(db_session=db_session, user_group=user_group)
|
||||
task_logger.info(
|
||||
f"Deleted usergroup: name={usergroup_name} id={usergroup_id}"
|
||||
)
|
||||
else:
|
||||
mark_user_group_as_synced(db_session=db_session, user_group=user_group)
|
||||
task_logger.info(
|
||||
f"Synced usergroup. name={usergroup_name} id={usergroup_id}"
|
||||
)
|
||||
raise e
|
||||
|
||||
rug.reset()
|
||||
|
||||
@@ -5,7 +5,7 @@ from sqlalchemy import select
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.access.models import ExternalAccess
|
||||
from onyx.access.utils import build_ext_group_name_for_onyx
|
||||
from onyx.access.utils import prefix_group_w_source
|
||||
from onyx.configs.constants import DocumentSource
|
||||
from onyx.db.models import Document as DbDocument
|
||||
|
||||
@@ -25,7 +25,7 @@ def upsert_document_external_perms__no_commit(
|
||||
).first()
|
||||
|
||||
prefixed_external_groups = [
|
||||
build_ext_group_name_for_onyx(
|
||||
prefix_group_w_source(
|
||||
ext_group_name=group_id,
|
||||
source=source_type,
|
||||
)
|
||||
@@ -66,7 +66,7 @@ def upsert_document_external_perms(
|
||||
).first()
|
||||
|
||||
prefixed_external_groups: set[str] = {
|
||||
build_ext_group_name_for_onyx(
|
||||
prefix_group_w_source(
|
||||
ext_group_name=group_id,
|
||||
source=source_type,
|
||||
)
|
||||
|
||||
@@ -6,9 +6,8 @@ from sqlalchemy import delete
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.access.utils import build_ext_group_name_for_onyx
|
||||
from onyx.access.utils import prefix_group_w_source
|
||||
from onyx.configs.constants import DocumentSource
|
||||
from onyx.db.models import User
|
||||
from onyx.db.models import User__ExternalUserGroupId
|
||||
from onyx.db.users import batch_add_ext_perm_user_if_not_exists
|
||||
from onyx.db.users import get_user_by_email
|
||||
@@ -62,10 +61,8 @@ def replace_user__ext_group_for_cc_pair(
|
||||
all_group_member_emails.add(user_email)
|
||||
|
||||
# batch add users if they don't exist and get their ids
|
||||
all_group_members: list[User] = batch_add_ext_perm_user_if_not_exists(
|
||||
db_session=db_session,
|
||||
# NOTE: this function handles case sensitivity for emails
|
||||
emails=list(all_group_member_emails),
|
||||
all_group_members = batch_add_ext_perm_user_if_not_exists(
|
||||
db_session=db_session, emails=list(all_group_member_emails)
|
||||
)
|
||||
|
||||
delete_user__ext_group_for_cc_pair__no_commit(
|
||||
@@ -87,14 +84,12 @@ def replace_user__ext_group_for_cc_pair(
|
||||
f" with email {user_email} not found"
|
||||
)
|
||||
continue
|
||||
external_group_id = build_ext_group_name_for_onyx(
|
||||
ext_group_name=external_group.id,
|
||||
source=source,
|
||||
)
|
||||
new_external_permissions.append(
|
||||
User__ExternalUserGroupId(
|
||||
user_id=user_id,
|
||||
external_user_group_id=external_group_id,
|
||||
external_user_group_id=prefix_group_w_source(
|
||||
external_group.id, source
|
||||
),
|
||||
cc_pair_id=cc_pair_id,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,135 +1,27 @@
|
||||
from collections.abc import Sequence
|
||||
from datetime import datetime
|
||||
import datetime
|
||||
from typing import Literal
|
||||
|
||||
from sqlalchemy import asc
|
||||
from sqlalchemy import BinaryExpression
|
||||
from sqlalchemy import ColumnElement
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy import distinct
|
||||
from sqlalchemy.orm import contains_eager
|
||||
from sqlalchemy.orm import joinedload
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy.sql import case
|
||||
from sqlalchemy.sql import func
|
||||
from sqlalchemy.sql import select
|
||||
from sqlalchemy.sql.expression import literal
|
||||
from sqlalchemy.sql.expression import UnaryExpression
|
||||
|
||||
from onyx.configs.constants import QAFeedbackType
|
||||
from onyx.db.models import ChatMessage
|
||||
from onyx.db.models import ChatMessageFeedback
|
||||
from onyx.db.models import ChatSession
|
||||
|
||||
|
||||
def _build_filter_conditions(
|
||||
start_time: datetime | None,
|
||||
end_time: datetime | None,
|
||||
feedback_filter: QAFeedbackType | None,
|
||||
) -> list[ColumnElement]:
|
||||
"""
|
||||
Helper function to build all filter conditions for chat sessions.
|
||||
Filters by start and end time, feedback type, and any sessions without messages.
|
||||
start_time: Date from which to filter
|
||||
end_time: Date to which to filter
|
||||
feedback_filter: Feedback type to filter by
|
||||
Returns: List of filter conditions
|
||||
"""
|
||||
conditions = []
|
||||
|
||||
if start_time is not None:
|
||||
conditions.append(ChatSession.time_created >= start_time)
|
||||
if end_time is not None:
|
||||
conditions.append(ChatSession.time_created <= end_time)
|
||||
|
||||
if feedback_filter is not None:
|
||||
feedback_subq = (
|
||||
select(ChatMessage.chat_session_id)
|
||||
.join(ChatMessageFeedback)
|
||||
.group_by(ChatMessage.chat_session_id)
|
||||
.having(
|
||||
case(
|
||||
(
|
||||
case(
|
||||
{literal(feedback_filter == QAFeedbackType.LIKE): True},
|
||||
else_=False,
|
||||
),
|
||||
func.bool_and(ChatMessageFeedback.is_positive),
|
||||
),
|
||||
(
|
||||
case(
|
||||
{literal(feedback_filter == QAFeedbackType.DISLIKE): True},
|
||||
else_=False,
|
||||
),
|
||||
func.bool_and(func.not_(ChatMessageFeedback.is_positive)),
|
||||
),
|
||||
else_=func.bool_or(ChatMessageFeedback.is_positive)
|
||||
& func.bool_or(func.not_(ChatMessageFeedback.is_positive)),
|
||||
)
|
||||
)
|
||||
)
|
||||
conditions.append(ChatSession.id.in_(feedback_subq))
|
||||
|
||||
return conditions
|
||||
|
||||
|
||||
def get_total_filtered_chat_sessions_count(
|
||||
db_session: Session,
|
||||
start_time: datetime | None,
|
||||
end_time: datetime | None,
|
||||
feedback_filter: QAFeedbackType | None,
|
||||
) -> int:
|
||||
conditions = _build_filter_conditions(start_time, end_time, feedback_filter)
|
||||
stmt = (
|
||||
select(func.count(distinct(ChatSession.id)))
|
||||
.select_from(ChatSession)
|
||||
.filter(*conditions)
|
||||
)
|
||||
return db_session.scalar(stmt) or 0
|
||||
|
||||
|
||||
def get_page_of_chat_sessions(
|
||||
start_time: datetime | None,
|
||||
end_time: datetime | None,
|
||||
db_session: Session,
|
||||
page_num: int,
|
||||
page_size: int,
|
||||
feedback_filter: QAFeedbackType | None = None,
|
||||
) -> Sequence[ChatSession]:
|
||||
conditions = _build_filter_conditions(start_time, end_time, feedback_filter)
|
||||
|
||||
subquery = (
|
||||
select(ChatSession.id, ChatSession.time_created)
|
||||
.filter(*conditions)
|
||||
.order_by(ChatSession.id, desc(ChatSession.time_created))
|
||||
.distinct(ChatSession.id)
|
||||
.limit(page_size)
|
||||
.offset(page_num * page_size)
|
||||
.subquery()
|
||||
)
|
||||
|
||||
stmt = (
|
||||
select(ChatSession)
|
||||
.join(subquery, ChatSession.id == subquery.c.id)
|
||||
.outerjoin(ChatMessage, ChatSession.id == ChatMessage.chat_session_id)
|
||||
.options(
|
||||
joinedload(ChatSession.user),
|
||||
joinedload(ChatSession.persona),
|
||||
contains_eager(ChatSession.messages).joinedload(
|
||||
ChatMessage.chat_message_feedbacks
|
||||
),
|
||||
)
|
||||
.order_by(desc(ChatSession.time_created), asc(ChatMessage.id))
|
||||
)
|
||||
|
||||
return db_session.scalars(stmt).unique().all()
|
||||
SortByOptions = Literal["time_sent"]
|
||||
|
||||
|
||||
def fetch_chat_sessions_eagerly_by_time(
|
||||
start: datetime,
|
||||
end: datetime,
|
||||
start: datetime.datetime,
|
||||
end: datetime.datetime,
|
||||
db_session: Session,
|
||||
limit: int | None = 500,
|
||||
initial_time: datetime | None = None,
|
||||
initial_time: datetime.datetime | None = None,
|
||||
) -> list[ChatSession]:
|
||||
time_order: UnaryExpression = desc(ChatSession.time_created)
|
||||
message_order: UnaryExpression = asc(ChatMessage.id)
|
||||
|
||||
@@ -111,10 +111,10 @@ def insert_user_group_token_rate_limit(
|
||||
return token_limit
|
||||
|
||||
|
||||
def fetch_user_group_token_rate_limits_for_user(
|
||||
def fetch_user_group_token_rate_limits(
|
||||
db_session: Session,
|
||||
group_id: int,
|
||||
user: User | None,
|
||||
user: User | None = None,
|
||||
enabled_only: bool = False,
|
||||
ordered: bool = True,
|
||||
get_editable: bool = True,
|
||||
|
||||
@@ -374,9 +374,7 @@ def _add_user_group__cc_pair_relationships__no_commit(
|
||||
|
||||
|
||||
def insert_user_group(db_session: Session, user_group: UserGroupCreate) -> UserGroup:
|
||||
db_user_group = UserGroup(
|
||||
name=user_group.name, time_last_modified_by_user=func.now()
|
||||
)
|
||||
db_user_group = UserGroup(name=user_group.name)
|
||||
db_session.add(db_user_group)
|
||||
db_session.flush() # give the group an ID
|
||||
|
||||
@@ -632,10 +630,6 @@ def update_user_group(
|
||||
select(User).where(User.id.in_(removed_user_ids)) # type: ignore
|
||||
).unique()
|
||||
_validate_curator_status__no_commit(db_session, list(removed_users))
|
||||
|
||||
# update "time_updated" to now
|
||||
db_user_group.time_last_modified_by_user = func.now()
|
||||
|
||||
db_session.commit()
|
||||
return db_user_group
|
||||
|
||||
@@ -705,10 +699,7 @@ def delete_user_group_cc_pair_relationship__no_commit(
|
||||
connector_credential_pair_id matches the given cc_pair_id.
|
||||
|
||||
Should be used very carefully (only for connectors that are being deleted)."""
|
||||
cc_pair = get_connector_credential_pair_from_id(
|
||||
db_session=db_session,
|
||||
cc_pair_id=cc_pair_id,
|
||||
)
|
||||
cc_pair = get_connector_credential_pair_from_id(cc_pair_id, db_session)
|
||||
if not cc_pair:
|
||||
raise ValueError(f"Connector Credential Pair '{cc_pair_id}' does not exist")
|
||||
|
||||
|
||||
@@ -120,12 +120,9 @@ def _get_permissions_from_slim_doc(
|
||||
elif permission_type == "anyone":
|
||||
public = True
|
||||
|
||||
drive_id = permission_info.get("drive_id")
|
||||
group_ids = group_emails | ({drive_id} if drive_id is not None else set())
|
||||
|
||||
return ExternalAccess(
|
||||
external_user_emails=user_emails,
|
||||
external_user_group_ids=group_ids,
|
||||
external_user_group_ids=group_emails,
|
||||
is_public=public,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,127 +1,16 @@
|
||||
from ee.onyx.db.external_perm import ExternalUserGroup
|
||||
from onyx.connectors.google_drive.connector import GoogleDriveConnector
|
||||
from onyx.connectors.google_utils.google_utils import execute_paginated_retrieval
|
||||
from onyx.connectors.google_utils.resources import AdminService
|
||||
from onyx.connectors.google_utils.resources import get_admin_service
|
||||
from onyx.connectors.google_utils.resources import get_drive_service
|
||||
from onyx.db.models import ConnectorCredentialPair
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def _get_drive_members(
|
||||
google_drive_connector: GoogleDriveConnector,
|
||||
) -> dict[str, tuple[set[str], set[str]]]:
|
||||
"""
|
||||
This builds a map of drive ids to their members (group and user emails).
|
||||
E.g. {
|
||||
"drive_id_1": ({"group_email_1"}, {"user_email_1", "user_email_2"}),
|
||||
"drive_id_2": ({"group_email_3"}, {"user_email_3"}),
|
||||
}
|
||||
"""
|
||||
drive_ids = google_drive_connector.get_all_drive_ids()
|
||||
|
||||
drive_id_to_members_map: dict[str, tuple[set[str], set[str]]] = {}
|
||||
drive_service = get_drive_service(
|
||||
google_drive_connector.creds,
|
||||
google_drive_connector.primary_admin_email,
|
||||
)
|
||||
|
||||
for drive_id in drive_ids:
|
||||
group_emails: set[str] = set()
|
||||
user_emails: set[str] = set()
|
||||
for permission in execute_paginated_retrieval(
|
||||
drive_service.permissions().list,
|
||||
list_key="permissions",
|
||||
fileId=drive_id,
|
||||
fields="permissions(emailAddress, type)",
|
||||
supportsAllDrives=True,
|
||||
):
|
||||
if permission["type"] == "group":
|
||||
group_emails.add(permission["emailAddress"])
|
||||
elif permission["type"] == "user":
|
||||
user_emails.add(permission["emailAddress"])
|
||||
drive_id_to_members_map[drive_id] = (group_emails, user_emails)
|
||||
return drive_id_to_members_map
|
||||
|
||||
|
||||
def _get_all_groups(
|
||||
admin_service: AdminService,
|
||||
google_domain: str,
|
||||
) -> set[str]:
|
||||
"""
|
||||
This gets all the group emails.
|
||||
"""
|
||||
group_emails: set[str] = set()
|
||||
for group in execute_paginated_retrieval(
|
||||
admin_service.groups().list,
|
||||
list_key="groups",
|
||||
domain=google_domain,
|
||||
fields="groups(email)",
|
||||
):
|
||||
group_emails.add(group["email"])
|
||||
return group_emails
|
||||
|
||||
|
||||
def _map_group_email_to_member_emails(
|
||||
admin_service: AdminService,
|
||||
group_emails: set[str],
|
||||
) -> dict[str, set[str]]:
|
||||
"""
|
||||
This maps group emails to their member emails.
|
||||
"""
|
||||
group_to_member_map: dict[str, set[str]] = {}
|
||||
for group_email in group_emails:
|
||||
group_member_emails: set[str] = set()
|
||||
for member in execute_paginated_retrieval(
|
||||
admin_service.members().list,
|
||||
list_key="members",
|
||||
groupKey=group_email,
|
||||
fields="members(email)",
|
||||
):
|
||||
group_member_emails.add(member["email"])
|
||||
|
||||
group_to_member_map[group_email] = group_member_emails
|
||||
return group_to_member_map
|
||||
|
||||
|
||||
def _build_onyx_groups(
|
||||
drive_id_to_members_map: dict[str, tuple[set[str], set[str]]],
|
||||
group_email_to_member_emails_map: dict[str, set[str]],
|
||||
) -> list[ExternalUserGroup]:
|
||||
onyx_groups: list[ExternalUserGroup] = []
|
||||
|
||||
# Convert all drive member definitions to onyx groups
|
||||
# This is because having drive level access means you have
|
||||
# irrevocable access to all the files in the drive.
|
||||
for drive_id, (group_emails, user_emails) in drive_id_to_members_map.items():
|
||||
all_member_emails: set[str] = user_emails
|
||||
for group_email in group_emails:
|
||||
all_member_emails.update(group_email_to_member_emails_map[group_email])
|
||||
onyx_groups.append(
|
||||
ExternalUserGroup(
|
||||
id=drive_id,
|
||||
user_emails=list(all_member_emails),
|
||||
)
|
||||
)
|
||||
|
||||
# Convert all group member definitions to onyx groups
|
||||
for group_email, member_emails in group_email_to_member_emails_map.items():
|
||||
onyx_groups.append(
|
||||
ExternalUserGroup(
|
||||
id=group_email,
|
||||
user_emails=list(member_emails),
|
||||
)
|
||||
)
|
||||
|
||||
return onyx_groups
|
||||
|
||||
|
||||
def gdrive_group_sync(
|
||||
cc_pair: ConnectorCredentialPair,
|
||||
) -> list[ExternalUserGroup]:
|
||||
# Initialize connector and build credential/service objects
|
||||
google_drive_connector = GoogleDriveConnector(
|
||||
**cc_pair.connector.connector_specific_config
|
||||
)
|
||||
@@ -130,23 +19,34 @@ def gdrive_group_sync(
|
||||
google_drive_connector.creds, google_drive_connector.primary_admin_email
|
||||
)
|
||||
|
||||
# Get all drive members
|
||||
drive_id_to_members_map = _get_drive_members(google_drive_connector)
|
||||
onyx_groups: list[ExternalUserGroup] = []
|
||||
for group in execute_paginated_retrieval(
|
||||
admin_service.groups().list,
|
||||
list_key="groups",
|
||||
domain=google_drive_connector.google_domain,
|
||||
fields="groups(email)",
|
||||
):
|
||||
# The id is the group email
|
||||
group_email = group["email"]
|
||||
|
||||
# Get all group emails
|
||||
all_group_emails = _get_all_groups(
|
||||
admin_service, google_drive_connector.google_domain
|
||||
)
|
||||
# Gather group member emails
|
||||
group_member_emails: list[str] = []
|
||||
for member in execute_paginated_retrieval(
|
||||
admin_service.members().list,
|
||||
list_key="members",
|
||||
groupKey=group_email,
|
||||
fields="members(email)",
|
||||
):
|
||||
group_member_emails.append(member["email"])
|
||||
|
||||
# Map group emails to their members
|
||||
group_email_to_member_emails_map = _map_group_email_to_member_emails(
|
||||
admin_service, all_group_emails
|
||||
)
|
||||
if not group_member_emails:
|
||||
continue
|
||||
|
||||
# Convert the maps to onyx groups
|
||||
onyx_groups = _build_onyx_groups(
|
||||
drive_id_to_members_map=drive_id_to_members_map,
|
||||
group_email_to_member_emails_map=group_email_to_member_emails_map,
|
||||
)
|
||||
onyx_groups.append(
|
||||
ExternalUserGroup(
|
||||
id=group_email,
|
||||
user_emails=list(group_member_emails),
|
||||
)
|
||||
)
|
||||
|
||||
return onyx_groups
|
||||
|
||||
@@ -161,10 +161,7 @@ def _get_salesforce_client_for_doc_id(db_session: Session, doc_id: str) -> Sales
|
||||
|
||||
cc_pair_id = _DOC_ID_TO_CC_PAIR_ID_MAP[doc_id]
|
||||
if cc_pair_id not in _CC_PAIR_ID_SALESFORCE_CLIENT_MAP:
|
||||
cc_pair = get_connector_credential_pair_from_id(
|
||||
db_session=db_session,
|
||||
cc_pair_id=cc_pair_id,
|
||||
)
|
||||
cc_pair = get_connector_credential_pair_from_id(cc_pair_id, db_session)
|
||||
if cc_pair is None:
|
||||
raise ValueError(f"CC pair {cc_pair_id} not found")
|
||||
credential_json = cc_pair.credential.credential_json
|
||||
|
||||
@@ -150,9 +150,9 @@ def _handle_standard_answers(
|
||||
db_session=db_session,
|
||||
description="",
|
||||
user_id=None,
|
||||
persona_id=(
|
||||
slack_channel_config.persona.id if slack_channel_config.persona else 0
|
||||
),
|
||||
persona_id=slack_channel_config.persona.id
|
||||
if slack_channel_config.persona
|
||||
else 0,
|
||||
onyxbot_flow=True,
|
||||
slack_thread_id=slack_thread_id,
|
||||
)
|
||||
@@ -182,7 +182,7 @@ def _handle_standard_answers(
|
||||
formatted_answers.append(formatted_answer)
|
||||
answer_message = "\n\n".join(formatted_answers)
|
||||
|
||||
chat_message = create_new_chat_message(
|
||||
_ = create_new_chat_message(
|
||||
chat_session_id=chat_session.id,
|
||||
parent_message=new_user_message,
|
||||
prompt_id=prompt.id if prompt else None,
|
||||
@@ -191,13 +191,8 @@ def _handle_standard_answers(
|
||||
message_type=MessageType.ASSISTANT,
|
||||
error=None,
|
||||
db_session=db_session,
|
||||
commit=False,
|
||||
commit=True,
|
||||
)
|
||||
# attach the standard answers to the chat message
|
||||
chat_message.standard_answers = [
|
||||
standard_answer for standard_answer, _ in matching_standard_answers
|
||||
]
|
||||
db_session.commit()
|
||||
|
||||
update_emote_react(
|
||||
emoji=DANSWER_REACT_EMOJI,
|
||||
|
||||
@@ -228,6 +228,8 @@ def get_assistant_stats(
|
||||
datetime.datetime.utcnow() - datetime.timedelta(days=_DEFAULT_LOOKBACK_DAYS)
|
||||
)
|
||||
end = end or datetime.datetime.utcnow()
|
||||
print("current user")
|
||||
print(user)
|
||||
|
||||
if not user_can_view_assistant_stats(db_session, user, assistant_id):
|
||||
raise HTTPException(
|
||||
|
||||
@@ -179,6 +179,7 @@ def handle_simplified_chat_message(
|
||||
chunks_below=0,
|
||||
full_doc=chat_message_req.full_doc,
|
||||
structured_response_format=chat_message_req.structured_response_format,
|
||||
use_pro_search=chat_message_req.use_pro_search,
|
||||
)
|
||||
|
||||
packets = stream_chat_message_objects(
|
||||
@@ -301,6 +302,7 @@ def handle_send_message_simple_with_history(
|
||||
chunks_below=0,
|
||||
full_doc=req.full_doc,
|
||||
structured_response_format=req.structured_response_format,
|
||||
use_pro_search=req.use_pro_search,
|
||||
)
|
||||
|
||||
packets = stream_chat_message_objects(
|
||||
|
||||
@@ -57,6 +57,9 @@ class BasicCreateChatMessageRequest(ChunkContext):
|
||||
# https://platform.openai.com/docs/guides/structured-outputs/introduction
|
||||
structured_response_format: dict | None = None
|
||||
|
||||
# If True, uses pro search instead of basic search
|
||||
use_pro_search: bool = False
|
||||
|
||||
|
||||
class BasicCreateChatMessageWithHistoryRequest(ChunkContext):
|
||||
# Last element is the new query. All previous elements are historical context
|
||||
@@ -71,6 +74,8 @@ class BasicCreateChatMessageWithHistoryRequest(ChunkContext):
|
||||
# only works if using an OpenAI model. See the following for more details:
|
||||
# https://platform.openai.com/docs/guides/structured-outputs/introduction
|
||||
structured_response_format: dict | None = None
|
||||
# If True, uses pro search instead of basic search
|
||||
use_pro_search: bool = False
|
||||
|
||||
|
||||
class SimpleDoc(BaseModel):
|
||||
@@ -123,6 +128,9 @@ class OneShotQARequest(ChunkContext):
|
||||
# If True, skips generative an AI response to the search query
|
||||
skip_gen_ai_answer_generation: bool = False
|
||||
|
||||
# If True, uses pro search instead of basic search
|
||||
use_pro_search: bool = False
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_persona_fields(self) -> "OneShotQARequest":
|
||||
if self.persona_override_config is None and self.persona_id is None:
|
||||
|
||||
@@ -196,6 +196,7 @@ def get_answer_stream(
|
||||
retrieval_details=query_request.retrieval_options,
|
||||
rerank_settings=query_request.rerank_settings,
|
||||
db_session=db_session,
|
||||
use_pro_search=query_request.use_pro_search,
|
||||
)
|
||||
|
||||
packets = stream_chat_message_objects(
|
||||
|
||||
@@ -1,23 +1,19 @@
|
||||
import csv
|
||||
import io
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
from datetime import timezone
|
||||
from typing import Literal
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter
|
||||
from fastapi import Depends
|
||||
from fastapi import HTTPException
|
||||
from fastapi import Query
|
||||
from fastapi.responses import StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from ee.onyx.db.query_history import fetch_chat_sessions_eagerly_by_time
|
||||
from ee.onyx.db.query_history import get_page_of_chat_sessions
|
||||
from ee.onyx.db.query_history import get_total_filtered_chat_sessions_count
|
||||
from ee.onyx.server.query_history.models import ChatSessionMinimal
|
||||
from ee.onyx.server.query_history.models import ChatSessionSnapshot
|
||||
from ee.onyx.server.query_history.models import MessageSnapshot
|
||||
from ee.onyx.server.query_history.models import QuestionAnswerPairSnapshot
|
||||
from onyx.auth.users import current_admin_user
|
||||
from onyx.auth.users import get_display_email
|
||||
from onyx.chat.chat_utils import create_chat_chain
|
||||
@@ -27,15 +23,257 @@ from onyx.configs.constants import SessionType
|
||||
from onyx.db.chat import get_chat_session_by_id
|
||||
from onyx.db.chat import get_chat_sessions_by_user
|
||||
from onyx.db.engine import get_session
|
||||
from onyx.db.models import ChatMessage
|
||||
from onyx.db.models import ChatSession
|
||||
from onyx.db.models import User
|
||||
from onyx.server.documents.models import PaginatedReturn
|
||||
from onyx.server.query_and_chat.models import ChatSessionDetails
|
||||
from onyx.server.query_and_chat.models import ChatSessionsResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
class AbridgedSearchDoc(BaseModel):
|
||||
"""A subset of the info present in `SearchDoc`"""
|
||||
|
||||
document_id: str
|
||||
semantic_identifier: str
|
||||
link: str | None
|
||||
|
||||
|
||||
class MessageSnapshot(BaseModel):
|
||||
message: str
|
||||
message_type: MessageType
|
||||
documents: list[AbridgedSearchDoc]
|
||||
feedback_type: QAFeedbackType | None
|
||||
feedback_text: str | None
|
||||
time_created: datetime
|
||||
|
||||
@classmethod
|
||||
def build(cls, message: ChatMessage) -> "MessageSnapshot":
|
||||
latest_messages_feedback_obj = (
|
||||
message.chat_message_feedbacks[-1]
|
||||
if len(message.chat_message_feedbacks) > 0
|
||||
else None
|
||||
)
|
||||
feedback_type = (
|
||||
(
|
||||
QAFeedbackType.LIKE
|
||||
if latest_messages_feedback_obj.is_positive
|
||||
else QAFeedbackType.DISLIKE
|
||||
)
|
||||
if latest_messages_feedback_obj
|
||||
else None
|
||||
)
|
||||
feedback_text = (
|
||||
latest_messages_feedback_obj.feedback_text
|
||||
if latest_messages_feedback_obj
|
||||
else None
|
||||
)
|
||||
return cls(
|
||||
message=message.message,
|
||||
message_type=message.message_type,
|
||||
documents=[
|
||||
AbridgedSearchDoc(
|
||||
document_id=document.document_id,
|
||||
semantic_identifier=document.semantic_id,
|
||||
link=document.link,
|
||||
)
|
||||
for document in message.search_docs
|
||||
],
|
||||
feedback_type=feedback_type,
|
||||
feedback_text=feedback_text,
|
||||
time_created=message.time_sent,
|
||||
)
|
||||
|
||||
|
||||
class ChatSessionMinimal(BaseModel):
|
||||
id: UUID
|
||||
user_email: str
|
||||
name: str | None
|
||||
first_user_message: str
|
||||
first_ai_message: str
|
||||
assistant_id: int | None
|
||||
assistant_name: str | None
|
||||
time_created: datetime
|
||||
feedback_type: QAFeedbackType | Literal["mixed"] | None
|
||||
flow_type: SessionType
|
||||
conversation_length: int
|
||||
|
||||
|
||||
class ChatSessionSnapshot(BaseModel):
|
||||
id: UUID
|
||||
user_email: str
|
||||
name: str | None
|
||||
messages: list[MessageSnapshot]
|
||||
assistant_id: int | None
|
||||
assistant_name: str | None
|
||||
time_created: datetime
|
||||
flow_type: SessionType
|
||||
|
||||
|
||||
class QuestionAnswerPairSnapshot(BaseModel):
|
||||
chat_session_id: UUID
|
||||
# 1-indexed message number in the chat_session
|
||||
# e.g. the first message pair in the chat_session is 1, the second is 2, etc.
|
||||
message_pair_num: int
|
||||
user_message: str
|
||||
ai_response: str
|
||||
retrieved_documents: list[AbridgedSearchDoc]
|
||||
feedback_type: QAFeedbackType | None
|
||||
feedback_text: str | None
|
||||
persona_name: str | None
|
||||
user_email: str
|
||||
time_created: datetime
|
||||
flow_type: SessionType
|
||||
|
||||
@classmethod
|
||||
def from_chat_session_snapshot(
|
||||
cls,
|
||||
chat_session_snapshot: ChatSessionSnapshot,
|
||||
) -> list["QuestionAnswerPairSnapshot"]:
|
||||
message_pairs: list[tuple[MessageSnapshot, MessageSnapshot]] = []
|
||||
for ind in range(1, len(chat_session_snapshot.messages), 2):
|
||||
message_pairs.append(
|
||||
(
|
||||
chat_session_snapshot.messages[ind - 1],
|
||||
chat_session_snapshot.messages[ind],
|
||||
)
|
||||
)
|
||||
|
||||
return [
|
||||
cls(
|
||||
chat_session_id=chat_session_snapshot.id,
|
||||
message_pair_num=ind + 1,
|
||||
user_message=user_message.message,
|
||||
ai_response=ai_message.message,
|
||||
retrieved_documents=ai_message.documents,
|
||||
feedback_type=ai_message.feedback_type,
|
||||
feedback_text=ai_message.feedback_text,
|
||||
persona_name=chat_session_snapshot.assistant_name,
|
||||
user_email=get_display_email(chat_session_snapshot.user_email),
|
||||
time_created=user_message.time_created,
|
||||
flow_type=chat_session_snapshot.flow_type,
|
||||
)
|
||||
for ind, (user_message, ai_message) in enumerate(message_pairs)
|
||||
]
|
||||
|
||||
def to_json(self) -> dict[str, str | None]:
|
||||
return {
|
||||
"chat_session_id": str(self.chat_session_id),
|
||||
"message_pair_num": str(self.message_pair_num),
|
||||
"user_message": self.user_message,
|
||||
"ai_response": self.ai_response,
|
||||
"retrieved_documents": "|".join(
|
||||
[
|
||||
doc.link or doc.semantic_identifier
|
||||
for doc in self.retrieved_documents
|
||||
]
|
||||
),
|
||||
"feedback_type": self.feedback_type.value if self.feedback_type else "",
|
||||
"feedback_text": self.feedback_text or "",
|
||||
"persona_name": self.persona_name,
|
||||
"user_email": self.user_email,
|
||||
"time_created": str(self.time_created),
|
||||
"flow_type": self.flow_type,
|
||||
}
|
||||
|
||||
|
||||
def determine_flow_type(chat_session: ChatSession) -> SessionType:
|
||||
return SessionType.SLACK if chat_session.onyxbot_flow else SessionType.CHAT
|
||||
|
||||
|
||||
def fetch_and_process_chat_session_history_minimal(
|
||||
db_session: Session,
|
||||
start: datetime,
|
||||
end: datetime,
|
||||
feedback_filter: QAFeedbackType | None = None,
|
||||
limit: int | None = 500,
|
||||
) -> list[ChatSessionMinimal]:
|
||||
chat_sessions = fetch_chat_sessions_eagerly_by_time(
|
||||
start=start, end=end, db_session=db_session, limit=limit
|
||||
)
|
||||
|
||||
minimal_sessions = []
|
||||
for chat_session in chat_sessions:
|
||||
if not chat_session.messages:
|
||||
continue
|
||||
|
||||
first_user_message = next(
|
||||
(
|
||||
message.message
|
||||
for message in chat_session.messages
|
||||
if message.message_type == MessageType.USER
|
||||
),
|
||||
"",
|
||||
)
|
||||
first_ai_message = next(
|
||||
(
|
||||
message.message
|
||||
for message in chat_session.messages
|
||||
if message.message_type == MessageType.ASSISTANT
|
||||
),
|
||||
"",
|
||||
)
|
||||
|
||||
has_positive_feedback = any(
|
||||
feedback.is_positive
|
||||
for message in chat_session.messages
|
||||
for feedback in message.chat_message_feedbacks
|
||||
)
|
||||
|
||||
has_negative_feedback = any(
|
||||
not feedback.is_positive
|
||||
for message in chat_session.messages
|
||||
for feedback in message.chat_message_feedbacks
|
||||
)
|
||||
|
||||
feedback_type: QAFeedbackType | Literal["mixed"] | None = (
|
||||
"mixed"
|
||||
if has_positive_feedback and has_negative_feedback
|
||||
else QAFeedbackType.LIKE
|
||||
if has_positive_feedback
|
||||
else QAFeedbackType.DISLIKE
|
||||
if has_negative_feedback
|
||||
else None
|
||||
)
|
||||
|
||||
if feedback_filter:
|
||||
if feedback_filter == QAFeedbackType.LIKE and not has_positive_feedback:
|
||||
continue
|
||||
if feedback_filter == QAFeedbackType.DISLIKE and not has_negative_feedback:
|
||||
continue
|
||||
|
||||
flow_type = determine_flow_type(chat_session)
|
||||
|
||||
minimal_sessions.append(
|
||||
ChatSessionMinimal(
|
||||
id=chat_session.id,
|
||||
user_email=get_display_email(
|
||||
chat_session.user.email if chat_session.user else None
|
||||
),
|
||||
name=chat_session.description,
|
||||
first_user_message=first_user_message,
|
||||
first_ai_message=first_ai_message,
|
||||
assistant_id=chat_session.persona_id,
|
||||
assistant_name=(
|
||||
chat_session.persona.name if chat_session.persona else None
|
||||
),
|
||||
time_created=chat_session.time_created,
|
||||
feedback_type=feedback_type,
|
||||
flow_type=flow_type,
|
||||
conversation_length=len(
|
||||
[
|
||||
m
|
||||
for m in chat_session.messages
|
||||
if m.message_type != MessageType.SYSTEM
|
||||
]
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
return minimal_sessions
|
||||
|
||||
|
||||
def fetch_and_process_chat_session_history(
|
||||
db_session: Session,
|
||||
start: datetime,
|
||||
@@ -81,7 +319,7 @@ def snapshot_from_chat_session(
|
||||
except RuntimeError:
|
||||
return None
|
||||
|
||||
flow_type = SessionType.SLACK if chat_session.onyxbot_flow else SessionType.CHAT
|
||||
flow_type = determine_flow_type(chat_session)
|
||||
|
||||
return ChatSessionSnapshot(
|
||||
id=chat_session.id,
|
||||
@@ -133,38 +371,22 @@ def get_user_chat_sessions(
|
||||
|
||||
@router.get("/admin/chat-session-history")
|
||||
def get_chat_session_history(
|
||||
page_num: int = Query(0, ge=0),
|
||||
page_size: int = Query(10, ge=1),
|
||||
feedback_type: QAFeedbackType | None = None,
|
||||
start_time: datetime | None = None,
|
||||
end_time: datetime | None = None,
|
||||
start: datetime | None = None,
|
||||
end: datetime | None = None,
|
||||
_: User | None = Depends(current_admin_user),
|
||||
db_session: Session = Depends(get_session),
|
||||
) -> PaginatedReturn[ChatSessionMinimal]:
|
||||
page_of_chat_sessions = get_page_of_chat_sessions(
|
||||
page_num=page_num,
|
||||
page_size=page_size,
|
||||
) -> list[ChatSessionMinimal]:
|
||||
return fetch_and_process_chat_session_history_minimal(
|
||||
db_session=db_session,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
start=start
|
||||
or (
|
||||
datetime.now(tz=timezone.utc) - timedelta(days=30)
|
||||
), # default is 30d lookback
|
||||
end=end or datetime.now(tz=timezone.utc),
|
||||
feedback_filter=feedback_type,
|
||||
)
|
||||
|
||||
total_filtered_chat_sessions_count = get_total_filtered_chat_sessions_count(
|
||||
db_session=db_session,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
feedback_filter=feedback_type,
|
||||
)
|
||||
|
||||
return PaginatedReturn(
|
||||
items=[
|
||||
ChatSessionMinimal.from_chat_session(chat_session)
|
||||
for chat_session in page_of_chat_sessions
|
||||
],
|
||||
total_items=total_filtered_chat_sessions_count,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/admin/chat-session-history/{chat_session_id}")
|
||||
def get_chat_session_admin(
|
||||
|
||||
@@ -1,218 +0,0 @@
|
||||
from datetime import datetime
|
||||
from uuid import UUID
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.auth.users import get_display_email
|
||||
from onyx.configs.constants import MessageType
|
||||
from onyx.configs.constants import QAFeedbackType
|
||||
from onyx.configs.constants import SessionType
|
||||
from onyx.db.models import ChatMessage
|
||||
from onyx.db.models import ChatSession
|
||||
|
||||
|
||||
class AbridgedSearchDoc(BaseModel):
|
||||
"""A subset of the info present in `SearchDoc`"""
|
||||
|
||||
document_id: str
|
||||
semantic_identifier: str
|
||||
link: str | None
|
||||
|
||||
|
||||
class MessageSnapshot(BaseModel):
|
||||
id: int
|
||||
message: str
|
||||
message_type: MessageType
|
||||
documents: list[AbridgedSearchDoc]
|
||||
feedback_type: QAFeedbackType | None
|
||||
feedback_text: str | None
|
||||
time_created: datetime
|
||||
|
||||
@classmethod
|
||||
def build(cls, message: ChatMessage) -> "MessageSnapshot":
|
||||
latest_messages_feedback_obj = (
|
||||
message.chat_message_feedbacks[-1]
|
||||
if len(message.chat_message_feedbacks) > 0
|
||||
else None
|
||||
)
|
||||
feedback_type = (
|
||||
(
|
||||
QAFeedbackType.LIKE
|
||||
if latest_messages_feedback_obj.is_positive
|
||||
else QAFeedbackType.DISLIKE
|
||||
)
|
||||
if latest_messages_feedback_obj
|
||||
else None
|
||||
)
|
||||
feedback_text = (
|
||||
latest_messages_feedback_obj.feedback_text
|
||||
if latest_messages_feedback_obj
|
||||
else None
|
||||
)
|
||||
return cls(
|
||||
id=message.id,
|
||||
message=message.message,
|
||||
message_type=message.message_type,
|
||||
documents=[
|
||||
AbridgedSearchDoc(
|
||||
document_id=document.document_id,
|
||||
semantic_identifier=document.semantic_id,
|
||||
link=document.link,
|
||||
)
|
||||
for document in message.search_docs
|
||||
],
|
||||
feedback_type=feedback_type,
|
||||
feedback_text=feedback_text,
|
||||
time_created=message.time_sent,
|
||||
)
|
||||
|
||||
|
||||
class ChatSessionMinimal(BaseModel):
|
||||
id: UUID
|
||||
user_email: str
|
||||
name: str | None
|
||||
first_user_message: str
|
||||
first_ai_message: str
|
||||
assistant_id: int | None
|
||||
assistant_name: str | None
|
||||
time_created: datetime
|
||||
feedback_type: QAFeedbackType | None
|
||||
flow_type: SessionType
|
||||
conversation_length: int
|
||||
|
||||
@classmethod
|
||||
def from_chat_session(cls, chat_session: ChatSession) -> "ChatSessionMinimal":
|
||||
first_user_message = next(
|
||||
(
|
||||
message.message
|
||||
for message in chat_session.messages
|
||||
if message.message_type == MessageType.USER
|
||||
),
|
||||
"",
|
||||
)
|
||||
first_ai_message = next(
|
||||
(
|
||||
message.message
|
||||
for message in chat_session.messages
|
||||
if message.message_type == MessageType.ASSISTANT
|
||||
),
|
||||
"",
|
||||
)
|
||||
|
||||
list_of_message_feedbacks = [
|
||||
feedback.is_positive
|
||||
for message in chat_session.messages
|
||||
for feedback in message.chat_message_feedbacks
|
||||
]
|
||||
session_feedback_type = None
|
||||
if list_of_message_feedbacks:
|
||||
if all(list_of_message_feedbacks):
|
||||
session_feedback_type = QAFeedbackType.LIKE
|
||||
elif not any(list_of_message_feedbacks):
|
||||
session_feedback_type = QAFeedbackType.DISLIKE
|
||||
else:
|
||||
session_feedback_type = QAFeedbackType.MIXED
|
||||
|
||||
return cls(
|
||||
id=chat_session.id,
|
||||
user_email=get_display_email(
|
||||
chat_session.user.email if chat_session.user else None
|
||||
),
|
||||
name=chat_session.description,
|
||||
first_user_message=first_user_message,
|
||||
first_ai_message=first_ai_message,
|
||||
assistant_id=chat_session.persona_id,
|
||||
assistant_name=(
|
||||
chat_session.persona.name if chat_session.persona else None
|
||||
),
|
||||
time_created=chat_session.time_created,
|
||||
feedback_type=session_feedback_type,
|
||||
flow_type=SessionType.SLACK
|
||||
if chat_session.onyxbot_flow
|
||||
else SessionType.CHAT,
|
||||
conversation_length=len(
|
||||
[
|
||||
message
|
||||
for message in chat_session.messages
|
||||
if message.message_type != MessageType.SYSTEM
|
||||
]
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ChatSessionSnapshot(BaseModel):
|
||||
id: UUID
|
||||
user_email: str
|
||||
name: str | None
|
||||
messages: list[MessageSnapshot]
|
||||
assistant_id: int | None
|
||||
assistant_name: str | None
|
||||
time_created: datetime
|
||||
flow_type: SessionType
|
||||
|
||||
|
||||
class QuestionAnswerPairSnapshot(BaseModel):
|
||||
chat_session_id: UUID
|
||||
# 1-indexed message number in the chat_session
|
||||
# e.g. the first message pair in the chat_session is 1, the second is 2, etc.
|
||||
message_pair_num: int
|
||||
user_message: str
|
||||
ai_response: str
|
||||
retrieved_documents: list[AbridgedSearchDoc]
|
||||
feedback_type: QAFeedbackType | None
|
||||
feedback_text: str | None
|
||||
persona_name: str | None
|
||||
user_email: str
|
||||
time_created: datetime
|
||||
flow_type: SessionType
|
||||
|
||||
@classmethod
|
||||
def from_chat_session_snapshot(
|
||||
cls,
|
||||
chat_session_snapshot: ChatSessionSnapshot,
|
||||
) -> list["QuestionAnswerPairSnapshot"]:
|
||||
message_pairs: list[tuple[MessageSnapshot, MessageSnapshot]] = []
|
||||
for ind in range(1, len(chat_session_snapshot.messages), 2):
|
||||
message_pairs.append(
|
||||
(
|
||||
chat_session_snapshot.messages[ind - 1],
|
||||
chat_session_snapshot.messages[ind],
|
||||
)
|
||||
)
|
||||
|
||||
return [
|
||||
cls(
|
||||
chat_session_id=chat_session_snapshot.id,
|
||||
message_pair_num=ind + 1,
|
||||
user_message=user_message.message,
|
||||
ai_response=ai_message.message,
|
||||
retrieved_documents=ai_message.documents,
|
||||
feedback_type=ai_message.feedback_type,
|
||||
feedback_text=ai_message.feedback_text,
|
||||
persona_name=chat_session_snapshot.assistant_name,
|
||||
user_email=get_display_email(chat_session_snapshot.user_email),
|
||||
time_created=user_message.time_created,
|
||||
flow_type=chat_session_snapshot.flow_type,
|
||||
)
|
||||
for ind, (user_message, ai_message) in enumerate(message_pairs)
|
||||
]
|
||||
|
||||
def to_json(self) -> dict[str, str | None]:
|
||||
return {
|
||||
"chat_session_id": str(self.chat_session_id),
|
||||
"message_pair_num": str(self.message_pair_num),
|
||||
"user_message": self.user_message,
|
||||
"ai_response": self.ai_response,
|
||||
"retrieved_documents": "|".join(
|
||||
[
|
||||
doc.link or doc.semantic_identifier
|
||||
for doc in self.retrieved_documents
|
||||
]
|
||||
),
|
||||
"feedback_type": self.feedback_type.value if self.feedback_type else "",
|
||||
"feedback_text": self.feedback_text or "",
|
||||
"persona_name": self.persona_name,
|
||||
"user_email": self.user_email,
|
||||
"time_created": str(self.time_created),
|
||||
"flow_type": self.flow_type,
|
||||
}
|
||||
@@ -24,7 +24,7 @@ from onyx.db.llm import update_default_provider
|
||||
from onyx.db.llm import upsert_llm_provider
|
||||
from onyx.db.models import Tool
|
||||
from onyx.db.persona import upsert_persona
|
||||
from onyx.server.features.persona.models import PersonaUpsertRequest
|
||||
from onyx.server.features.persona.models import CreatePersonaRequest
|
||||
from onyx.server.manage.llm.models import LLMProviderUpsertRequest
|
||||
from onyx.server.settings.models import Settings
|
||||
from onyx.server.settings.store import store_settings as store_base_settings
|
||||
@@ -57,7 +57,7 @@ class SeedConfiguration(BaseModel):
|
||||
llms: list[LLMProviderUpsertRequest] | None = None
|
||||
admin_user_emails: list[str] | None = None
|
||||
seeded_logo_path: str | None = None
|
||||
personas: list[PersonaUpsertRequest] | None = None
|
||||
personas: list[CreatePersonaRequest] | None = None
|
||||
settings: Settings | None = None
|
||||
enterprise_settings: EnterpriseSettings | None = None
|
||||
|
||||
@@ -128,7 +128,7 @@ def _seed_llms(
|
||||
)
|
||||
|
||||
|
||||
def _seed_personas(db_session: Session, personas: list[PersonaUpsertRequest]) -> None:
|
||||
def _seed_personas(db_session: Session, personas: list[CreatePersonaRequest]) -> None:
|
||||
if personas:
|
||||
logger.notice("Seeding Personas")
|
||||
for persona in personas:
|
||||
|
||||
@@ -5,7 +5,7 @@ from fastapi import Depends
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from ee.onyx.db.token_limit import fetch_all_user_group_token_rate_limits_by_group
|
||||
from ee.onyx.db.token_limit import fetch_user_group_token_rate_limits_for_user
|
||||
from ee.onyx.db.token_limit import fetch_user_group_token_rate_limits
|
||||
from ee.onyx.db.token_limit import insert_user_group_token_rate_limit
|
||||
from onyx.auth.users import current_admin_user
|
||||
from onyx.auth.users import current_curator_or_admin_user
|
||||
@@ -51,10 +51,8 @@ def get_group_token_limit_settings(
|
||||
) -> list[TokenRateLimitDisplay]:
|
||||
return [
|
||||
TokenRateLimitDisplay.from_db(token_rate_limit)
|
||||
for token_rate_limit in fetch_user_group_token_rate_limits_for_user(
|
||||
db_session=db_session,
|
||||
group_id=group_id,
|
||||
user=user,
|
||||
for token_rate_limit in fetch_user_group_token_rate_limits(
|
||||
db_session, group_id, user
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@@ -19,9 +19,6 @@ def prefix_external_group(ext_group_name: str) -> str:
|
||||
return f"external_group:{ext_group_name}"
|
||||
|
||||
|
||||
def build_ext_group_name_for_onyx(ext_group_name: str, source: DocumentSource) -> str:
|
||||
"""
|
||||
External groups may collide across sources, every source needs its own prefix.
|
||||
NOTE: the name is lowercased to handle case sensitivity for group names
|
||||
"""
|
||||
return f"{source.value}_{ext_group_name}".lower()
|
||||
def prefix_group_w_source(ext_group_name: str, source: DocumentSource) -> str:
|
||||
"""External groups may collide across sources, every source needs its own prefix."""
|
||||
return f"{source.value.upper()}_{ext_group_name}"
|
||||
|
||||
95
backend/onyx/agent_search/basic/graph_builder.py
Normal file
95
backend/onyx/agent_search/basic/graph_builder.py
Normal file
@@ -0,0 +1,95 @@
|
||||
from langchain_core.callbacks.manager import dispatch_custom_event
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.basic.states import BasicInput
|
||||
from onyx.agent_search.basic.states import BasicOutput
|
||||
from onyx.agent_search.basic.states import BasicState
|
||||
from onyx.agent_search.basic.states import BasicStateUpdate
|
||||
from onyx.tools.tool_implementations.search.search_tool import SearchTool
|
||||
from onyx.chat.stream_processing.utils import (
|
||||
map_document_id_order,
|
||||
)
|
||||
|
||||
def basic_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=BasicState,
|
||||
input=BasicInput,
|
||||
output=BasicOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
graph.add_node(
|
||||
node="get_response",
|
||||
action=get_response,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_edge(start_key=START, end_key="get_response")
|
||||
|
||||
graph.add_conditional_edges("get_response", should_continue, ["get_response", END])
|
||||
graph.add_edge(
|
||||
start_key="get_response",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
def should_continue(state: BasicState) -> str:
|
||||
return (
|
||||
END if state["last_llm_call"] is None or state["calls"] > 1 else "get_response"
|
||||
)
|
||||
|
||||
|
||||
def get_response(state: BasicState) -> BasicStateUpdate:
|
||||
llm = state["llm"]
|
||||
current_llm_call = state["last_llm_call"]
|
||||
if current_llm_call is None:
|
||||
raise ValueError("last_llm_call is None")
|
||||
answer_style_config = state["answer_style_config"]
|
||||
response_handler_manager = state["response_handler_manager"]
|
||||
# DEBUG: good breakpoint
|
||||
stream = llm.stream(
|
||||
# For tool calling LLMs, we want to insert the task prompt as part of this flow, this is because the LLM
|
||||
# may choose to not call any tools and just generate the answer, in which case the task prompt is needed.
|
||||
prompt=current_llm_call.prompt_builder.build(),
|
||||
tools=[tool.tool_definition() for tool in current_llm_call.tools] or None,
|
||||
tool_choice=(
|
||||
"required"
|
||||
if current_llm_call.tools and current_llm_call.force_use_tool.force_use
|
||||
else None
|
||||
),
|
||||
structured_response_format=answer_style_config.structured_response_format,
|
||||
)
|
||||
|
||||
for response in response_handler_manager.handle_llm_response(stream):
|
||||
dispatch_custom_event(
|
||||
"basic_response",
|
||||
response,
|
||||
)
|
||||
|
||||
|
||||
next_call = response_handler_manager.next_llm_call(current_llm_call)
|
||||
if next_call is not None:
|
||||
final_search_results, displayed_search_results = SearchTool.get_search_result(
|
||||
next_call
|
||||
) or ([], [])
|
||||
else:
|
||||
final_search_results, displayed_search_results = [], []
|
||||
|
||||
response_handler_manager.answer_handler.update((
|
||||
final_search_results,
|
||||
map_document_id_order(final_search_results),
|
||||
map_document_id_order(displayed_search_results)))
|
||||
return BasicStateUpdate(
|
||||
last_llm_call=next_call,
|
||||
calls=state["calls"] + 1,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
41
backend/onyx/agent_search/basic/states.py
Normal file
41
backend/onyx/agent_search/basic/states.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.chat.llm_response_handler import LLMResponseHandlerManager
|
||||
from onyx.chat.models import AnswerStyleConfig
|
||||
from onyx.chat.prompt_builder.build import LLMCall
|
||||
from onyx.llm.chat_llm import LLM
|
||||
|
||||
## Update States
|
||||
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class BasicInput(TypedDict):
|
||||
last_llm_call: LLMCall | None
|
||||
llm: LLM
|
||||
answer_style_config: AnswerStyleConfig
|
||||
response_handler_manager: LLMResponseHandlerManager
|
||||
calls: int
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class BasicOutput(TypedDict):
|
||||
pass
|
||||
|
||||
|
||||
class BasicStateUpdate(TypedDict):
|
||||
last_llm_call: LLMCall | None
|
||||
calls: int
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class BasicState(
|
||||
BasicInput,
|
||||
BasicOutput,
|
||||
):
|
||||
pass
|
||||
66
backend/onyx/agent_search/core_state.py
Normal file
66
backend/onyx/agent_search/core_state.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
from typing import TypeVar
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.chat.models import ProSearchConfig
|
||||
from onyx.db.models import User
|
||||
from onyx.llm.interfaces import LLM
|
||||
from onyx.tools.tool_implementations.search.search_tool import SearchTool
|
||||
|
||||
|
||||
class CoreState(TypedDict, total=False):
|
||||
"""
|
||||
This is the core state that is shared across all subgraphs.
|
||||
"""
|
||||
|
||||
config: ProSearchConfig
|
||||
primary_llm: LLM
|
||||
fast_llm: LLM
|
||||
# a single session for the entire agent search
|
||||
# is fine if we are only reading
|
||||
db_session: Session
|
||||
user: User | None
|
||||
log_messages: Annotated[list[str], add]
|
||||
search_tool: SearchTool
|
||||
|
||||
|
||||
class SubgraphCoreState(TypedDict, total=False):
|
||||
"""
|
||||
This is the core state that is shared across all subgraphs.
|
||||
"""
|
||||
|
||||
subgraph_config: ProSearchConfig
|
||||
subgraph_primary_llm: LLM
|
||||
subgraph_fast_llm: LLM
|
||||
# a single session for the entire agent search
|
||||
# is fine if we are only reading
|
||||
subgraph_db_session: Session
|
||||
|
||||
subgraph_search_tool: SearchTool
|
||||
|
||||
|
||||
# This ensures that the state passed in extends the CoreState
|
||||
T = TypeVar("T", bound=CoreState)
|
||||
T_SUBGRAPH = TypeVar("T_SUBGRAPH", bound=SubgraphCoreState)
|
||||
|
||||
|
||||
def extract_core_fields(state: T) -> CoreState:
|
||||
filtered_dict = {k: v for k, v in state.items() if k in CoreState.__annotations__}
|
||||
return CoreState(**dict(filtered_dict)) # type: ignore
|
||||
|
||||
|
||||
def extract_core_fields_for_subgraph(state: T) -> SubgraphCoreState:
|
||||
filtered_dict = {
|
||||
"subgraph_" + k: v for k, v in state.items() if k in CoreState.__annotations__
|
||||
}
|
||||
return SubgraphCoreState(**dict(filtered_dict)) # type: ignore
|
||||
|
||||
|
||||
def in_subgraph_extract_core_fields(state: T_SUBGRAPH) -> SubgraphCoreState:
|
||||
filtered_dict = {
|
||||
k: v for k, v in state.items() if k in SubgraphCoreState.__annotations__
|
||||
}
|
||||
return SubgraphCoreState(**dict(filtered_dict)) # type: ignore
|
||||
66
backend/onyx/agent_search/db_operations.py
Normal file
66
backend/onyx/agent_search/db_operations.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from uuid import UUID
|
||||
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.db.models import AgentSubQuery
|
||||
from onyx.db.models import AgentSubQuestion
|
||||
|
||||
|
||||
def create_sub_question(
|
||||
db_session: Session,
|
||||
chat_session_id: UUID,
|
||||
primary_message_id: int,
|
||||
sub_question: str,
|
||||
sub_answer: str,
|
||||
) -> AgentSubQuestion:
|
||||
"""Create a new sub-question record in the database."""
|
||||
sub_q = AgentSubQuestion(
|
||||
chat_session_id=chat_session_id,
|
||||
primary_question_id=primary_message_id,
|
||||
sub_question=sub_question,
|
||||
sub_answer=sub_answer,
|
||||
)
|
||||
db_session.add(sub_q)
|
||||
db_session.flush()
|
||||
return sub_q
|
||||
|
||||
|
||||
def create_sub_query(
|
||||
db_session: Session,
|
||||
chat_session_id: UUID,
|
||||
parent_question_id: int,
|
||||
sub_query: str,
|
||||
) -> AgentSubQuery:
|
||||
"""Create a new sub-query record in the database."""
|
||||
sub_q = AgentSubQuery(
|
||||
chat_session_id=chat_session_id,
|
||||
parent_question_id=parent_question_id,
|
||||
sub_query=sub_query,
|
||||
)
|
||||
db_session.add(sub_q)
|
||||
db_session.flush()
|
||||
return sub_q
|
||||
|
||||
|
||||
def get_sub_questions_for_message(
|
||||
db_session: Session,
|
||||
primary_message_id: int,
|
||||
) -> list[AgentSubQuestion]:
|
||||
"""Get all sub-questions for a given primary message."""
|
||||
return (
|
||||
db_session.query(AgentSubQuestion)
|
||||
.filter(AgentSubQuestion.primary_question_id == primary_message_id)
|
||||
.all()
|
||||
)
|
||||
|
||||
|
||||
def get_sub_queries_for_question(
|
||||
db_session: Session,
|
||||
sub_question_id: int,
|
||||
) -> list[AgentSubQuery]:
|
||||
"""Get all sub-queries for a given sub-question."""
|
||||
return (
|
||||
db_session.query(AgentSubQuery)
|
||||
.filter(AgentSubQuery.parent_question_id == sub_question_id)
|
||||
.all()
|
||||
)
|
||||
7
backend/onyx/agent_search/models.py
Normal file
7
backend/onyx/agent_search/models.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AgentDocumentCitations(BaseModel):
|
||||
document_id: str
|
||||
document_title: str
|
||||
link: str
|
||||
@@ -0,0 +1,28 @@
|
||||
from collections.abc import Hashable
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import in_subgraph_extract_core_fields
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def send_to_expanded_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
|
||||
logger.debug("sending to expanded retrieval via edge")
|
||||
|
||||
return Send(
|
||||
"initial_sub_question_expanded_retrieval",
|
||||
ExpandedRetrievalInput(
|
||||
**in_subgraph_extract_core_fields(state),
|
||||
question=state["question"],
|
||||
base_search=False,
|
||||
sub_question_id=state["question_id"],
|
||||
),
|
||||
)
|
||||
@@ -0,0 +1,129 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.edges import (
|
||||
send_to_expanded_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_check import (
|
||||
answer_check,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_generation import (
|
||||
answer_generation,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.format_answer import (
|
||||
format_answer,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.ingest_retrieval import (
|
||||
ingest_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_test_config
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_query_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=AnswerQuestionState,
|
||||
input=AnswerQuestionInput,
|
||||
output=AnswerQuestionOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="initial_sub_question_expanded_retrieval",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="answer_check",
|
||||
action=answer_check,
|
||||
)
|
||||
graph.add_node(
|
||||
node="answer_generation",
|
||||
action=answer_generation,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_answer",
|
||||
action=format_answer,
|
||||
)
|
||||
graph.add_node(
|
||||
node="ingest_retrieval",
|
||||
action=ingest_retrieval,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source=START,
|
||||
path=send_to_expanded_retrieval,
|
||||
path_map=["initial_sub_question_expanded_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="initial_sub_question_expanded_retrieval",
|
||||
end_key="ingest_retrieval",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="ingest_retrieval",
|
||||
end_key="answer_generation",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_generation",
|
||||
end_key="answer_check",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_check",
|
||||
end_key="format_answer",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_answer",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = answer_query_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
with get_session_context_manager() as db_session:
|
||||
pro_search_config, search_tool = get_test_config(
|
||||
db_session, primary_llm, fast_llm, search_request
|
||||
)
|
||||
inputs = AnswerQuestionInput(
|
||||
question="what can you do with onyx?",
|
||||
subgraph_fast_llm=fast_llm,
|
||||
subgraph_primary_llm=primary_llm,
|
||||
subgraph_config=pro_search_config,
|
||||
subgraph_search_tool=search_tool,
|
||||
subgraph_db_session=db_session,
|
||||
question_id="0_0",
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
# debug=True,
|
||||
# subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
@@ -0,0 +1,8 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
@@ -0,0 +1,34 @@
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_core.messages import merge_message_runs
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
QACheckUpdate,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.prompts import SUB_CHECK_PROMPT
|
||||
|
||||
|
||||
def answer_check(state: AnswerQuestionState) -> QACheckUpdate:
|
||||
msg = [
|
||||
HumanMessage(
|
||||
content=SUB_CHECK_PROMPT.format(
|
||||
question=state["question"],
|
||||
base_answer=state["answer"],
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
fast_llm = state["subgraph_fast_llm"]
|
||||
response = list(
|
||||
fast_llm.stream(
|
||||
prompt=msg,
|
||||
)
|
||||
)
|
||||
|
||||
quality_str = merge_message_runs(response, chunk_separator="")[0].content
|
||||
|
||||
return QACheckUpdate(
|
||||
answer_quality=quality_str,
|
||||
)
|
||||
@@ -0,0 +1,94 @@
|
||||
import datetime
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.callbacks.manager import dispatch_custom_event
|
||||
from langchain_core.messages import merge_message_runs
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
QAGenerationUpdate,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.agent_prompt_ops import (
|
||||
build_sub_question_answer_prompt,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.prompts import ASSISTANT_SYSTEM_PROMPT_DEFAULT
|
||||
from onyx.agent_search.shared_graph_utils.prompts import ASSISTANT_SYSTEM_PROMPT_PERSONA
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_persona_prompt
|
||||
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
|
||||
from onyx.chat.models import AgentAnswerPiece, StreamStopInfo, StreamStopReason
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_generation(state: AnswerQuestionState) -> QAGenerationUpdate:
|
||||
now_start = datetime.datetime.now()
|
||||
logger.debug(f"--------{now_start}--------START ANSWER GENERATION---")
|
||||
|
||||
question = state["question"]
|
||||
docs = state["documents"]
|
||||
level, question_nr = parse_question_id(state["question_id"])
|
||||
persona_prompt = get_persona_prompt(state["subgraph_config"].search_request.persona)
|
||||
|
||||
if len(docs) == 0:
|
||||
dispatch_custom_event(
|
||||
"sub_answers",
|
||||
AgentAnswerPiece(
|
||||
answer_piece="I don't know",
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
answer_type="agent_sub_answer",
|
||||
),
|
||||
)
|
||||
answer_str = "I don't know"
|
||||
else:
|
||||
if len(persona_prompt) > 0:
|
||||
persona_specification = ASSISTANT_SYSTEM_PROMPT_DEFAULT
|
||||
else:
|
||||
persona_specification = ASSISTANT_SYSTEM_PROMPT_PERSONA.format(
|
||||
persona_prompt=persona_prompt
|
||||
)
|
||||
|
||||
logger.debug(f"Number of verified retrieval docs: {len(docs)}")
|
||||
|
||||
fast_llm = state["subgraph_fast_llm"]
|
||||
msg = build_sub_question_answer_prompt(
|
||||
question=question,
|
||||
original_question=state["subgraph_config"].search_request.query,
|
||||
docs=docs,
|
||||
persona_specification=persona_specification,
|
||||
config = fast_llm.config,
|
||||
)
|
||||
|
||||
|
||||
response: list[str | list[str | dict[str, Any]]] = []
|
||||
for message in fast_llm.stream(
|
||||
prompt=msg,
|
||||
):
|
||||
# TODO: in principle, the answer here COULD contain images, but we don't support that yet
|
||||
content = message.content
|
||||
if not isinstance(content, str):
|
||||
raise ValueError(
|
||||
f"Expected content to be a string, but got {type(content)}"
|
||||
)
|
||||
dispatch_custom_event(
|
||||
"sub_answers",
|
||||
AgentAnswerPiece(
|
||||
answer_piece=content,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
answer_type="agent_sub_answer",
|
||||
),
|
||||
)
|
||||
response.append(content)
|
||||
|
||||
answer_str = merge_message_runs(response, chunk_separator="")[0].content
|
||||
|
||||
stop_event = StreamStopInfo(stop_reason=StreamStopReason.FINISHED, level=level, level_question_nr=question_nr)
|
||||
dispatch_custom_event("sub_answer_finished", stop_event)
|
||||
|
||||
return QAGenerationUpdate(
|
||||
answer=answer_str,
|
||||
)
|
||||
@@ -0,0 +1,25 @@
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
|
||||
|
||||
def format_answer(state: AnswerQuestionState) -> AnswerQuestionOutput:
|
||||
return AnswerQuestionOutput(
|
||||
answer_results=[
|
||||
QuestionAnswerResults(
|
||||
question=state["question"],
|
||||
question_id=state["question_id"],
|
||||
quality=state.get("answer_quality", "No"),
|
||||
answer=state["answer"],
|
||||
expanded_retrieval_results=state["expanded_retrieval_results"],
|
||||
documents=state["documents"],
|
||||
sub_question_retrieval_stats=state["sub_question_retrieval_stats"],
|
||||
)
|
||||
],
|
||||
)
|
||||
@@ -0,0 +1,23 @@
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
RetrievalIngestionUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
|
||||
|
||||
def ingest_retrieval(state: ExpandedRetrievalOutput) -> RetrievalIngestionUpdate:
|
||||
sub_question_retrieval_stats = state[
|
||||
"expanded_retrieval_result"
|
||||
].sub_question_retrieval_stats
|
||||
if sub_question_retrieval_stats is None:
|
||||
sub_question_retrieval_stats = [AgentChunkStats()]
|
||||
|
||||
return RetrievalIngestionUpdate(
|
||||
expanded_retrieval_results=state[
|
||||
"expanded_retrieval_result"
|
||||
].expanded_queries_results,
|
||||
documents=state["expanded_retrieval_result"].all_documents,
|
||||
sub_question_retrieval_stats=sub_question_retrieval_stats,
|
||||
)
|
||||
@@ -0,0 +1,63 @@
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import SubgraphCoreState
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
## Update States
|
||||
class QACheckUpdate(TypedDict):
|
||||
answer_quality: str
|
||||
|
||||
|
||||
class QAGenerationUpdate(TypedDict):
|
||||
answer: str
|
||||
# answer_stat: AnswerStats
|
||||
|
||||
|
||||
class RetrievalIngestionUpdate(TypedDict):
|
||||
expanded_retrieval_results: list[QueryResult]
|
||||
documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class AnswerQuestionInput(SubgraphCoreState):
|
||||
question: str
|
||||
question_id: str # 0_0 is original question, everything else is <level>_<question_num>.
|
||||
# level 0 is original question and first decomposition, level 1 is follow up, etc
|
||||
# question_num is a unique number per original question per level.
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class AnswerQuestionState(
|
||||
AnswerQuestionInput,
|
||||
QAGenerationUpdate,
|
||||
QACheckUpdate,
|
||||
RetrievalIngestionUpdate,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class AnswerQuestionOutput(TypedDict):
|
||||
"""
|
||||
This is a list of results even though each call of this subgraph only returns one result.
|
||||
This is because if we parallelize the answer query subgraph, there will be multiple
|
||||
results in a list so the add operator is used to add them together.
|
||||
"""
|
||||
|
||||
answer_results: Annotated[list[QuestionAnswerResults], add]
|
||||
@@ -0,0 +1,28 @@
|
||||
from collections.abc import Hashable
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import in_subgraph_extract_core_fields
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def send_to_expanded_refined_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
|
||||
logger.debug("sending to expanded retrieval for follow up question via edge")
|
||||
|
||||
return Send(
|
||||
"refined_sub_question_expanded_retrieval",
|
||||
ExpandedRetrievalInput(
|
||||
**in_subgraph_extract_core_fields(state),
|
||||
question=state["question"],
|
||||
sub_question_id=state["question_id"],
|
||||
base_search=False
|
||||
),
|
||||
)
|
||||
@@ -0,0 +1,122 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_check import (
|
||||
answer_check,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_generation import (
|
||||
answer_generation,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.format_answer import (
|
||||
format_answer,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.ingest_retrieval import (
|
||||
ingest_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_refinement_sub_question.edges import (
|
||||
send_to_expanded_refined_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_refined_query_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=AnswerQuestionState,
|
||||
input=AnswerQuestionInput,
|
||||
output=AnswerQuestionOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="refined_sub_question_expanded_retrieval",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="refined_sub_answer_check",
|
||||
action=answer_check,
|
||||
)
|
||||
graph.add_node(
|
||||
node="refined_sub_answer_generation",
|
||||
action=answer_generation,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_refined_sub_answer",
|
||||
action=format_answer,
|
||||
)
|
||||
graph.add_node(
|
||||
node="ingest_refined_retrieval",
|
||||
action=ingest_retrieval,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source=START,
|
||||
path=send_to_expanded_refined_retrieval,
|
||||
path_map=["refined_sub_question_expanded_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="refined_sub_question_expanded_retrieval",
|
||||
end_key="ingest_refined_retrieval",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="ingest_refined_retrieval",
|
||||
end_key="refined_sub_answer_generation",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="refined_sub_answer_generation",
|
||||
end_key="refined_sub_answer_check",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="refined_sub_answer_check",
|
||||
end_key="format_refined_sub_answer",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_refined_sub_answer",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = answer_refined_query_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
with get_session_context_manager() as db_session:
|
||||
inputs = AnswerQuestionInput(
|
||||
question="what can you do with onyx?",
|
||||
question_id="0_0",
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
# debug=True,
|
||||
# subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
# output = compiled_graph.invoke(inputs)
|
||||
# logger.debug(output)
|
||||
@@ -0,0 +1,19 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
|
||||
|
||||
class QuestionAnswerResults(BaseModel):
|
||||
question: str
|
||||
answer: str
|
||||
quality: str
|
||||
# expanded_retrieval_results: list[QueryResult]
|
||||
documents: list[InferenceSection]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
@@ -0,0 +1,70 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.nodes.format_raw_search_results import (
|
||||
format_raw_search_results,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.nodes.generate_raw_search_data import (
|
||||
generate_raw_search_data,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchInput
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchOutput
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchState
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
|
||||
|
||||
def base_raw_search_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=BaseRawSearchState,
|
||||
input=BaseRawSearchInput,
|
||||
output=BaseRawSearchOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="generate_raw_search_data",
|
||||
action=generate_raw_search_data,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="expanded_retrieval_base_search",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_raw_search_results",
|
||||
action=format_raw_search_results,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_edge(start_key=START, end_key="generate_raw_search_data")
|
||||
|
||||
graph.add_edge(
|
||||
start_key="generate_raw_search_data",
|
||||
end_key="expanded_retrieval_base_search",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="expanded_retrieval_base_search",
|
||||
end_key="format_raw_search_results",
|
||||
)
|
||||
|
||||
# graph.add_edge(
|
||||
# start_key="expanded_retrieval_base_search",
|
||||
# end_key=END,
|
||||
# )
|
||||
|
||||
graph.add_edge(
|
||||
start_key="format_raw_search_results",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
@@ -0,0 +1,20 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
|
||||
|
||||
class QuestionAnswerResults(BaseModel):
|
||||
question: str
|
||||
answer: str
|
||||
quality: str
|
||||
expanded_retrieval_results: list[QueryResult]
|
||||
documents: list[InferenceSection]
|
||||
sub_question_retrieval_stats: list[AgentChunkStats]
|
||||
@@ -0,0 +1,16 @@
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchOutput
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def format_raw_search_results(state: ExpandedRetrievalOutput) -> BaseRawSearchOutput:
|
||||
logger.debug("format_raw_search_results")
|
||||
return BaseRawSearchOutput(
|
||||
base_expanded_retrieval_result=state["expanded_retrieval_result"],
|
||||
# base_retrieval_results=[state["expanded_retrieval_result"]],
|
||||
# base_search_documents=[],
|
||||
)
|
||||
@@ -0,0 +1,21 @@
|
||||
from onyx.agent_search.core_state import CoreState
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def generate_raw_search_data(state: CoreState) -> ExpandedRetrievalInput:
|
||||
logger.debug("generate_raw_search_data")
|
||||
return ExpandedRetrievalInput(
|
||||
subgraph_config=state["config"],
|
||||
subgraph_primary_llm=state["primary_llm"],
|
||||
subgraph_fast_llm=state["fast_llm"],
|
||||
subgraph_db_session=state["db_session"],
|
||||
question=state["config"].search_request.query,
|
||||
base_search=True,
|
||||
subgraph_search_tool=state["search_tool"],
|
||||
sub_question_id=None, # This graph is always and only used for the original question
|
||||
)
|
||||
@@ -0,0 +1,42 @@
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import CoreState
|
||||
from onyx.agent_search.core_state import SubgraphCoreState
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
|
||||
|
||||
## Update States
|
||||
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class BaseRawSearchInput(CoreState, SubgraphCoreState):
|
||||
pass
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class BaseRawSearchOutput(TypedDict):
|
||||
"""
|
||||
This is a list of results even though each call of this subgraph only returns one result.
|
||||
This is because if we parallelize the answer query subgraph, there will be multiple
|
||||
results in a list so the add operator is used to add them together.
|
||||
"""
|
||||
|
||||
# base_search_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
# base_retrieval_results: Annotated[list[ExpandedRetrievalResult], add]
|
||||
base_expanded_retrieval_result: ExpandedRetrievalResult
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class BaseRawSearchState(
|
||||
BaseRawSearchInput,
|
||||
BaseRawSearchOutput,
|
||||
):
|
||||
pass
|
||||
@@ -0,0 +1,28 @@
|
||||
from collections.abc import Hashable
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import in_subgraph_extract_core_fields
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import RetrievalInput
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalState,
|
||||
)
|
||||
|
||||
|
||||
def parallel_retrieval_edge(state: ExpandedRetrievalState) -> list[Send | Hashable]:
|
||||
question = state.get("question", state["subgraph_config"].search_request.query)
|
||||
|
||||
query_expansions = state.get("expanded_queries", []) + [question]
|
||||
return [
|
||||
Send(
|
||||
"doc_retrieval",
|
||||
RetrievalInput(
|
||||
query_to_retrieve=query,
|
||||
question=question,
|
||||
**in_subgraph_extract_core_fields(state),
|
||||
base_search=False,
|
||||
sub_question_id=state.get("sub_question_id"),
|
||||
),
|
||||
)
|
||||
for query in query_expansions
|
||||
]
|
||||
@@ -0,0 +1,126 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.edges import (
|
||||
parallel_retrieval_edge,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import doc_reranking
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import doc_retrieval
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import doc_verification
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import expand_queries
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import format_results
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import verification_kickoff
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalState,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_test_config
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def expanded_retrieval_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=ExpandedRetrievalState,
|
||||
input=ExpandedRetrievalInput,
|
||||
output=ExpandedRetrievalOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
graph.add_node(
|
||||
node="expand_queries",
|
||||
action=expand_queries,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="doc_retrieval",
|
||||
action=doc_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="verification_kickoff",
|
||||
action=verification_kickoff,
|
||||
)
|
||||
graph.add_node(
|
||||
node="doc_verification",
|
||||
action=doc_verification,
|
||||
)
|
||||
graph.add_node(
|
||||
node="doc_reranking",
|
||||
action=doc_reranking,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_results",
|
||||
action=format_results,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
graph.add_edge(
|
||||
start_key=START,
|
||||
end_key="expand_queries",
|
||||
)
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source="expand_queries",
|
||||
path=parallel_retrieval_edge,
|
||||
path_map=["doc_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="doc_retrieval",
|
||||
end_key="verification_kickoff",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="doc_verification",
|
||||
end_key="doc_reranking",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="doc_reranking",
|
||||
end_key="format_results",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_results",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = expanded_retrieval_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
|
||||
with get_session_context_manager() as db_session:
|
||||
pro_search_config, search_tool = get_test_config(
|
||||
db_session, primary_llm, fast_llm, search_request
|
||||
)
|
||||
inputs = ExpandedRetrievalInput(
|
||||
question="what can you do with onyx?",
|
||||
base_search=False,
|
||||
subgraph_fast_llm=fast_llm,
|
||||
subgraph_primary_llm=primary_llm,
|
||||
subgraph_db_session=db_session,
|
||||
subgraph_config=pro_search_config,
|
||||
subgraph_search_tool=search_tool,
|
||||
sub_question_id=None,
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
# debug=True,
|
||||
subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
@@ -0,0 +1,11 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
class ExpandedRetrievalResult(BaseModel):
|
||||
expanded_queries_results: list[QueryResult]
|
||||
all_documents: list[InferenceSection]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
@@ -0,0 +1,413 @@
|
||||
from collections import defaultdict
|
||||
from collections.abc import Callable
|
||||
from typing import cast
|
||||
from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
from langchain_core.callbacks.manager import dispatch_custom_event
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_core.messages import merge_message_runs
|
||||
from langgraph.types import Command
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import in_subgraph_extract_core_fields
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import DocRerankingUpdate
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import DocRetrievalUpdate
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
DocVerificationInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
DocVerificationUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
ExpandedRetrievalUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import InferenceSection
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
|
||||
QueryExpansionUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.states import RetrievalInput
|
||||
from onyx.agent_search.shared_graph_utils.calculations import get_fit_scores
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import RetrievalFitStats
|
||||
from onyx.agent_search.shared_graph_utils.prompts import REWRITE_PROMPT_MULTI_ORIGINAL
|
||||
from onyx.agent_search.shared_graph_utils.prompts import VERIFIER_PROMPT
|
||||
from onyx.agent_search.shared_graph_utils.utils import dispatch_separated
|
||||
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
|
||||
from onyx.chat.models import ExtendedToolResponse
|
||||
from onyx.chat.models import SubQueryPiece
|
||||
from onyx.configs.dev_configs import AGENT_MAX_QUERY_RETRIEVAL_RESULTS
|
||||
from onyx.configs.dev_configs import AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS
|
||||
from onyx.configs.dev_configs import AGENT_RERANKING_STATS
|
||||
from onyx.configs.dev_configs import AGENT_RETRIEVAL_STATS
|
||||
from onyx.context.search.models import SearchRequest
|
||||
from onyx.context.search.pipeline import retrieval_preprocessing
|
||||
from onyx.context.search.postprocessing.postprocessing import rerank_sections
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.interfaces import LLM
|
||||
from onyx.tools.models import SearchQueryInfo
|
||||
from onyx.tools.tool_implementations.search.search_tool import (
|
||||
SEARCH_RESPONSE_SUMMARY_ID,
|
||||
)
|
||||
from onyx.tools.tool_implementations.search.search_tool import SearchResponseSummary
|
||||
from onyx.tools.tool_implementations.search.search_tool import yield_search_responses
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def dispatch_subquery(level: int, question_nr: int) -> Callable[[str, int], None]:
|
||||
def helper(token: str, num: int) -> None:
|
||||
dispatch_custom_event(
|
||||
"subqueries",
|
||||
SubQueryPiece(
|
||||
sub_query=token,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
query_id=num,
|
||||
),
|
||||
)
|
||||
|
||||
return helper
|
||||
|
||||
|
||||
def expand_queries(state: ExpandedRetrievalInput) -> QueryExpansionUpdate:
|
||||
# Sometimes we want to expand the original question, sometimes we want to expand a sub-question.
|
||||
# When we are running this node on the original question, no question is explictly passed in.
|
||||
# Instead, we use the original question from the search request.
|
||||
question = state.get("question", state["subgraph_config"].search_request.query)
|
||||
llm: LLM = state["subgraph_fast_llm"]
|
||||
state["subgraph_db_session"]
|
||||
chat_session_id = state["subgraph_config"].chat_session_id
|
||||
sub_question_id = state.get("sub_question_id")
|
||||
if sub_question_id is None:
|
||||
level, question_nr = 0, 0
|
||||
else:
|
||||
level, question_nr = parse_question_id(sub_question_id)
|
||||
|
||||
if chat_session_id is None:
|
||||
raise ValueError("chat_session_id must be provided for agent search")
|
||||
|
||||
msg = [
|
||||
HumanMessage(
|
||||
content=REWRITE_PROMPT_MULTI_ORIGINAL.format(question=question),
|
||||
)
|
||||
]
|
||||
|
||||
llm_response_list = dispatch_separated(
|
||||
llm.stream(prompt=msg), dispatch_subquery(level, question_nr)
|
||||
)
|
||||
|
||||
llm_response = merge_message_runs(llm_response_list, chunk_separator="")[0].content
|
||||
|
||||
rewritten_queries = llm_response.split("\n")
|
||||
|
||||
return QueryExpansionUpdate(
|
||||
expanded_queries=rewritten_queries,
|
||||
)
|
||||
|
||||
|
||||
def doc_retrieval(state: RetrievalInput) -> DocRetrievalUpdate:
|
||||
"""
|
||||
Retrieve documents
|
||||
|
||||
Args:
|
||||
state (RetrievalInput): Primary state + the query to retrieve
|
||||
|
||||
Updates:
|
||||
expanded_retrieval_results: list[ExpandedRetrievalResult]
|
||||
retrieved_documents: list[InferenceSection]
|
||||
"""
|
||||
query_to_retrieve = state["query_to_retrieve"]
|
||||
search_tool = state["subgraph_search_tool"]
|
||||
|
||||
retrieved_docs: list[InferenceSection] = []
|
||||
if not query_to_retrieve.strip():
|
||||
logger.warning("Empty query, skipping retrieval")
|
||||
return DocRetrievalUpdate(
|
||||
expanded_retrieval_results=[],
|
||||
retrieved_documents=[],
|
||||
)
|
||||
|
||||
query_info = None
|
||||
# new db session to avoid concurrency issues
|
||||
with get_session_context_manager() as db_session:
|
||||
for tool_response in search_tool.run(
|
||||
query=query_to_retrieve,
|
||||
force_no_rerank=True,
|
||||
alternate_db_session=db_session,
|
||||
):
|
||||
# get retrieved docs to send to the rest of the graph
|
||||
if tool_response.id == SEARCH_RESPONSE_SUMMARY_ID:
|
||||
response = cast(SearchResponseSummary, tool_response.response)
|
||||
retrieved_docs = response.top_sections
|
||||
query_info = SearchQueryInfo(
|
||||
predicted_search=response.predicted_search,
|
||||
final_filters=response.final_filters,
|
||||
recency_bias_multiplier=response.recency_bias_multiplier,
|
||||
)
|
||||
|
||||
retrieved_docs = retrieved_docs[:AGENT_MAX_QUERY_RETRIEVAL_RESULTS]
|
||||
pre_rerank_docs = retrieved_docs
|
||||
if search_tool.search_pipeline is not None:
|
||||
pre_rerank_docs = (
|
||||
search_tool.search_pipeline._retrieved_sections or retrieved_docs
|
||||
)
|
||||
|
||||
if AGENT_RETRIEVAL_STATS:
|
||||
fit_scores = get_fit_scores(
|
||||
pre_rerank_docs,
|
||||
retrieved_docs,
|
||||
)
|
||||
else:
|
||||
fit_scores = None
|
||||
|
||||
expanded_retrieval_result = QueryResult(
|
||||
query=query_to_retrieve,
|
||||
search_results=retrieved_docs,
|
||||
stats=fit_scores,
|
||||
query_info=query_info,
|
||||
)
|
||||
return DocRetrievalUpdate(
|
||||
expanded_retrieval_results=[expanded_retrieval_result],
|
||||
retrieved_documents=retrieved_docs,
|
||||
)
|
||||
|
||||
|
||||
def verification_kickoff(
|
||||
state: ExpandedRetrievalState,
|
||||
) -> Command[Literal["doc_verification"]]:
|
||||
documents = state["retrieved_documents"]
|
||||
verification_question = state.get(
|
||||
"question", state["subgraph_config"].search_request.query
|
||||
)
|
||||
sub_question_id = state.get("sub_question_id")
|
||||
return Command(
|
||||
update={},
|
||||
goto=[
|
||||
Send(
|
||||
node="doc_verification",
|
||||
arg=DocVerificationInput(
|
||||
doc_to_verify=doc,
|
||||
question=verification_question,
|
||||
base_search=False,
|
||||
sub_question_id=sub_question_id,
|
||||
**in_subgraph_extract_core_fields(state),
|
||||
),
|
||||
)
|
||||
for doc in documents
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def doc_verification(state: DocVerificationInput) -> DocVerificationUpdate:
|
||||
"""
|
||||
Check whether the document is relevant for the original user question
|
||||
|
||||
Args:
|
||||
state (DocVerificationInput): The current state
|
||||
|
||||
Updates:
|
||||
verified_documents: list[InferenceSection]
|
||||
"""
|
||||
|
||||
question = state["question"]
|
||||
doc_to_verify = state["doc_to_verify"]
|
||||
document_content = doc_to_verify.combined_content
|
||||
|
||||
msg = [
|
||||
HumanMessage(
|
||||
content=VERIFIER_PROMPT.format(
|
||||
question=question, document_content=document_content
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
fast_llm = state["subgraph_fast_llm"]
|
||||
|
||||
response = fast_llm.invoke(msg)
|
||||
|
||||
verified_documents = []
|
||||
if isinstance(response.content, str) and "yes" in response.content.lower():
|
||||
verified_documents.append(doc_to_verify)
|
||||
|
||||
return DocVerificationUpdate(
|
||||
verified_documents=verified_documents,
|
||||
)
|
||||
|
||||
|
||||
def doc_reranking(state: ExpandedRetrievalState) -> DocRerankingUpdate:
|
||||
verified_documents = state["verified_documents"]
|
||||
|
||||
# Rerank post retrieval and verification. First, create a search query
|
||||
# then create the list of reranked sections
|
||||
|
||||
question = state.get("question", state["subgraph_config"].search_request.query)
|
||||
with get_session_context_manager() as db_session:
|
||||
_search_query = retrieval_preprocessing(
|
||||
search_request=SearchRequest(query=question),
|
||||
user=state["subgraph_search_tool"].user, # bit of a hack
|
||||
llm=state["subgraph_fast_llm"],
|
||||
db_session=db_session,
|
||||
)
|
||||
|
||||
# skip section filtering
|
||||
|
||||
if (
|
||||
_search_query.rerank_settings
|
||||
and _search_query.rerank_settings.rerank_model_name
|
||||
and _search_query.rerank_settings.num_rerank > 0
|
||||
):
|
||||
reranked_documents = rerank_sections(
|
||||
_search_query,
|
||||
verified_documents,
|
||||
)
|
||||
else:
|
||||
logger.warning("No reranking settings found, using unranked documents")
|
||||
reranked_documents = verified_documents
|
||||
|
||||
if AGENT_RERANKING_STATS:
|
||||
fit_scores = get_fit_scores(verified_documents, reranked_documents)
|
||||
else:
|
||||
fit_scores = RetrievalFitStats(fit_score_lift=0, rerank_effect=0, fit_scores={})
|
||||
|
||||
# TODO: stream deduped docs here, or decide to use search tool ranking/verification
|
||||
|
||||
return DocRerankingUpdate(
|
||||
reranked_documents=[
|
||||
doc for doc in reranked_documents if type(doc) == InferenceSection
|
||||
][:AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS],
|
||||
sub_question_retrieval_stats=fit_scores,
|
||||
)
|
||||
|
||||
|
||||
def _calculate_sub_question_retrieval_stats(
|
||||
verified_documents: list[InferenceSection],
|
||||
expanded_retrieval_results: list[QueryResult],
|
||||
) -> AgentChunkStats:
|
||||
chunk_scores: dict[str, dict[str, list[int | float]]] = defaultdict(
|
||||
lambda: defaultdict(list)
|
||||
)
|
||||
|
||||
for expanded_retrieval_result in expanded_retrieval_results:
|
||||
for doc in expanded_retrieval_result.search_results:
|
||||
doc_chunk_id = f"{doc.center_chunk.document_id}_{doc.center_chunk.chunk_id}"
|
||||
if doc.center_chunk.score is not None:
|
||||
chunk_scores[doc_chunk_id]["score"].append(doc.center_chunk.score)
|
||||
|
||||
verified_doc_chunk_ids = [
|
||||
f"{verified_document.center_chunk.document_id}_{verified_document.center_chunk.chunk_id}"
|
||||
for verified_document in verified_documents
|
||||
]
|
||||
dismissed_doc_chunk_ids = []
|
||||
|
||||
raw_chunk_stats_counts: dict[str, int] = defaultdict(int)
|
||||
raw_chunk_stats_scores: dict[str, float] = defaultdict(float)
|
||||
for doc_chunk_id, chunk_data in chunk_scores.items():
|
||||
if doc_chunk_id in verified_doc_chunk_ids:
|
||||
raw_chunk_stats_counts["verified_count"] += 1
|
||||
|
||||
valid_chunk_scores = [
|
||||
score for score in chunk_data["score"] if score is not None
|
||||
]
|
||||
raw_chunk_stats_scores["verified_scores"] += float(
|
||||
np.mean(valid_chunk_scores)
|
||||
)
|
||||
else:
|
||||
raw_chunk_stats_counts["rejected_count"] += 1
|
||||
valid_chunk_scores = [
|
||||
score for score in chunk_data["score"] if score is not None
|
||||
]
|
||||
raw_chunk_stats_scores["rejected_scores"] += float(
|
||||
np.mean(valid_chunk_scores)
|
||||
)
|
||||
dismissed_doc_chunk_ids.append(doc_chunk_id)
|
||||
|
||||
if raw_chunk_stats_counts["verified_count"] == 0:
|
||||
verified_avg_scores = 0.0
|
||||
else:
|
||||
verified_avg_scores = raw_chunk_stats_scores["verified_scores"] / float(
|
||||
raw_chunk_stats_counts["verified_count"]
|
||||
)
|
||||
|
||||
rejected_scores = raw_chunk_stats_scores.get("rejected_scores", None)
|
||||
if rejected_scores is not None:
|
||||
rejected_avg_scores = rejected_scores / float(
|
||||
raw_chunk_stats_counts["rejected_count"]
|
||||
)
|
||||
else:
|
||||
rejected_avg_scores = None
|
||||
|
||||
chunk_stats = AgentChunkStats(
|
||||
verified_count=raw_chunk_stats_counts["verified_count"],
|
||||
verified_avg_scores=verified_avg_scores,
|
||||
rejected_count=raw_chunk_stats_counts["rejected_count"],
|
||||
rejected_avg_scores=rejected_avg_scores,
|
||||
verified_doc_chunk_ids=verified_doc_chunk_ids,
|
||||
dismissed_doc_chunk_ids=dismissed_doc_chunk_ids,
|
||||
)
|
||||
|
||||
return chunk_stats
|
||||
|
||||
|
||||
def format_results(state: ExpandedRetrievalState) -> ExpandedRetrievalUpdate:
|
||||
level, question_nr = parse_question_id(state.get("sub_question_id") or "0_0")
|
||||
query_infos = [
|
||||
result.query_info
|
||||
for result in state["expanded_retrieval_results"]
|
||||
if result.query_info is not None
|
||||
]
|
||||
if len(query_infos) == 0:
|
||||
raise ValueError("No query info found")
|
||||
|
||||
# main question docs will be sent later after aggregation and deduping with sub-question docs
|
||||
if not (level == 0 and question_nr == 0):
|
||||
print("Yielding search responses for sub-question")
|
||||
print(level)
|
||||
print(question_nr)
|
||||
for tool_response in yield_search_responses(
|
||||
query=state["question"],
|
||||
reranked_sections=state[
|
||||
"retrieved_documents"
|
||||
], # TODO: rename params. this one is supposed to be the sections pre-merging
|
||||
final_context_sections=state["reranked_documents"],
|
||||
search_query_info=query_infos[0], # TODO: handle differing query infos?
|
||||
get_section_relevance=lambda: None, # TODO: add relevance
|
||||
search_tool=state["subgraph_search_tool"],
|
||||
):
|
||||
dispatch_custom_event(
|
||||
"tool_response",
|
||||
ExtendedToolResponse(
|
||||
id=tool_response.id,
|
||||
response=tool_response.response,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
),
|
||||
)
|
||||
sub_question_retrieval_stats = _calculate_sub_question_retrieval_stats(
|
||||
verified_documents=state["verified_documents"],
|
||||
expanded_retrieval_results=state["expanded_retrieval_results"],
|
||||
)
|
||||
|
||||
if sub_question_retrieval_stats is None:
|
||||
sub_question_retrieval_stats = AgentChunkStats()
|
||||
# else:
|
||||
# sub_question_retrieval_stats = [sub_question_retrieval_stats]
|
||||
|
||||
return ExpandedRetrievalUpdate(
|
||||
expanded_retrieval_result=ExpandedRetrievalResult(
|
||||
expanded_queries_results=state["expanded_retrieval_results"],
|
||||
all_documents=state["reranked_documents"],
|
||||
sub_question_retrieval_stats=sub_question_retrieval_stats,
|
||||
),
|
||||
)
|
||||
@@ -0,0 +1,82 @@
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import SubgraphCoreState
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import RetrievalFitStats
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
### States ###
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class ExpandedRetrievalInput(SubgraphCoreState):
|
||||
question: str
|
||||
base_search: bool
|
||||
sub_question_id: str | None
|
||||
|
||||
|
||||
## Update/Return States
|
||||
|
||||
|
||||
class QueryExpansionUpdate(TypedDict):
|
||||
expanded_queries: list[str]
|
||||
|
||||
|
||||
class DocVerificationUpdate(TypedDict):
|
||||
verified_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
|
||||
|
||||
class DocRetrievalUpdate(TypedDict):
|
||||
expanded_retrieval_results: Annotated[list[QueryResult], add]
|
||||
retrieved_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
|
||||
|
||||
class DocRerankingUpdate(TypedDict):
|
||||
reranked_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
sub_question_retrieval_stats: RetrievalFitStats | None
|
||||
|
||||
|
||||
class ExpandedRetrievalUpdate(TypedDict):
|
||||
expanded_retrieval_result: ExpandedRetrievalResult
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class ExpandedRetrievalOutput(TypedDict):
|
||||
expanded_retrieval_result: ExpandedRetrievalResult
|
||||
base_expanded_retrieval_result: ExpandedRetrievalResult
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class ExpandedRetrievalState(
|
||||
# This includes the core state
|
||||
ExpandedRetrievalInput,
|
||||
QueryExpansionUpdate,
|
||||
DocRetrievalUpdate,
|
||||
DocVerificationUpdate,
|
||||
DocRerankingUpdate,
|
||||
ExpandedRetrievalOutput,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
## Conditional Input States
|
||||
|
||||
|
||||
class DocVerificationInput(ExpandedRetrievalInput):
|
||||
doc_to_verify: InferenceSection
|
||||
|
||||
|
||||
class RetrievalInput(ExpandedRetrievalInput):
|
||||
query_to_retrieve: str
|
||||
92
backend/onyx/agent_search/pro_search_a/main/edges.py
Normal file
92
backend/onyx/agent_search/pro_search_a/main/edges.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from collections.abc import Hashable
|
||||
from typing import Literal
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import extract_core_fields_for_subgraph
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.states import MainState
|
||||
from onyx.agent_search.pro_search_a.main.states import RequireRefinedAnswerUpdate
|
||||
from onyx.agent_search.shared_graph_utils.utils import make_question_id
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def parallelize_initial_sub_question_answering(
|
||||
state: MainState,
|
||||
) -> list[Send | Hashable]:
|
||||
if len(state["initial_decomp_questions"]) > 0:
|
||||
# sub_question_record_ids = [subq_record.id for subq_record in state["sub_question_records"]]
|
||||
# if len(state["sub_question_records"]) == 0:
|
||||
# if state["config"].use_persistence:
|
||||
# raise ValueError("No sub-questions found for initial decompozed questions")
|
||||
# else:
|
||||
# # in this case, we are doing retrieval on the original question.
|
||||
# # to make all the logic consistent, we create a new sub-question
|
||||
# # with the same content as the original question
|
||||
# sub_question_record_ids = [1] * len(state["initial_decomp_questions"])
|
||||
|
||||
return [
|
||||
Send(
|
||||
"answer_query_subgraph",
|
||||
AnswerQuestionInput(
|
||||
**extract_core_fields_for_subgraph(state),
|
||||
question=question,
|
||||
question_id=make_question_id(0, question_nr + 1),
|
||||
),
|
||||
)
|
||||
for question_nr, question in enumerate(state["initial_decomp_questions"])
|
||||
]
|
||||
|
||||
else:
|
||||
return [
|
||||
Send(
|
||||
"ingest_answers",
|
||||
AnswerQuestionOutput(
|
||||
answer_results=[],
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
# Define the function that determines whether to continue or not
|
||||
def continue_to_refined_answer_or_end(
|
||||
state: RequireRefinedAnswerUpdate,
|
||||
) -> Literal["refined_sub_question_creation", "logging_node"]:
|
||||
if state["require_refined_answer"]:
|
||||
return "refined_sub_question_creation"
|
||||
else:
|
||||
return "logging_node"
|
||||
|
||||
|
||||
def parallelize_refined_sub_question_answering(
|
||||
state: MainState,
|
||||
) -> list[Send | Hashable]:
|
||||
if len(state["refined_sub_questions"]) > 0:
|
||||
return [
|
||||
Send(
|
||||
"answer_refined_question",
|
||||
AnswerQuestionInput(
|
||||
**extract_core_fields_for_subgraph(state),
|
||||
question=question_data.sub_question,
|
||||
question_id=make_question_id(1, question_nr),
|
||||
),
|
||||
)
|
||||
for question_nr, question_data in state["refined_sub_questions"].items()
|
||||
]
|
||||
|
||||
else:
|
||||
return [
|
||||
Send(
|
||||
"ingest_refined_sub_answers",
|
||||
AnswerQuestionOutput(
|
||||
answer_results=[],
|
||||
),
|
||||
)
|
||||
]
|
||||
264
backend/onyx/agent_search/pro_search_a/main/graph_builder.py
Normal file
264
backend/onyx/agent_search/pro_search_a/main/graph_builder.py
Normal file
@@ -0,0 +1,264 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_a.answer_initial_sub_question.graph_builder import (
|
||||
answer_query_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.answer_refinement_sub_question.graph_builder import (
|
||||
answer_refined_query_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.base_raw_search.graph_builder import (
|
||||
base_raw_search_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.edges import continue_to_refined_answer_or_end
|
||||
from onyx.agent_search.pro_search_a.main.edges import (
|
||||
parallelize_initial_sub_question_answering,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.edges import (
|
||||
parallelize_refined_sub_question_answering,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.nodes import agent_logging
|
||||
from onyx.agent_search.pro_search_a.main.nodes import entity_term_extraction_llm
|
||||
from onyx.agent_search.pro_search_a.main.nodes import generate_initial_answer
|
||||
from onyx.agent_search.pro_search_a.main.nodes import generate_refined_answer
|
||||
from onyx.agent_search.pro_search_a.main.nodes import ingest_initial_base_retrieval
|
||||
from onyx.agent_search.pro_search_a.main.nodes import (
|
||||
ingest_initial_sub_question_answers,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.nodes import ingest_refined_answers
|
||||
from onyx.agent_search.pro_search_a.main.nodes import initial_answer_quality_check
|
||||
from onyx.agent_search.pro_search_a.main.nodes import initial_sub_question_creation
|
||||
from onyx.agent_search.pro_search_a.main.nodes import refined_answer_decision
|
||||
from onyx.agent_search.pro_search_a.main.nodes import refined_sub_question_creation
|
||||
from onyx.agent_search.pro_search_a.main.states import MainInput
|
||||
from onyx.agent_search.pro_search_a.main.states import MainState
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_test_config
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
test_mode = False
|
||||
|
||||
|
||||
def main_graph_builder(test_mode: bool = False) -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=MainState,
|
||||
input=MainInput,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="initial_sub_question_creation",
|
||||
action=initial_sub_question_creation,
|
||||
)
|
||||
answer_query_subgraph = answer_query_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="answer_query_subgraph",
|
||||
action=answer_query_subgraph,
|
||||
)
|
||||
|
||||
base_raw_search_subgraph = base_raw_search_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="base_raw_search_subgraph",
|
||||
action=base_raw_search_subgraph,
|
||||
)
|
||||
|
||||
# refined_answer_subgraph = refined_answers_graph_builder().compile()
|
||||
# graph.add_node(
|
||||
# node="refined_answer_subgraph",
|
||||
# action=refined_answer_subgraph,
|
||||
# )
|
||||
|
||||
graph.add_node(
|
||||
node="refined_sub_question_creation",
|
||||
action=refined_sub_question_creation,
|
||||
)
|
||||
|
||||
answer_refined_question = answer_refined_query_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="answer_refined_question",
|
||||
action=answer_refined_question,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="ingest_refined_answers",
|
||||
action=ingest_refined_answers,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="generate_refined_answer",
|
||||
action=generate_refined_answer,
|
||||
)
|
||||
|
||||
# graph.add_node(
|
||||
# node="check_refined_answer",
|
||||
# action=check_refined_answer,
|
||||
# )
|
||||
|
||||
graph.add_node(
|
||||
node="ingest_initial_retrieval",
|
||||
action=ingest_initial_base_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="ingest_initial_sub_question_answers",
|
||||
action=ingest_initial_sub_question_answers,
|
||||
)
|
||||
graph.add_node(
|
||||
node="generate_initial_answer",
|
||||
action=generate_initial_answer,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="initial_answer_quality_check",
|
||||
action=initial_answer_quality_check,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="entity_term_extraction_llm",
|
||||
action=entity_term_extraction_llm,
|
||||
)
|
||||
graph.add_node(
|
||||
node="refined_answer_decision",
|
||||
action=refined_answer_decision,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="logging_node",
|
||||
action=agent_logging,
|
||||
)
|
||||
# if test_mode:
|
||||
# graph.add_node(
|
||||
# node="generate_initial_base_answer",
|
||||
# action=generate_initial_base_answer,
|
||||
# )
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_edge(start_key=START, end_key="base_raw_search_subgraph")
|
||||
|
||||
graph.add_edge(
|
||||
start_key="base_raw_search_subgraph",
|
||||
end_key="ingest_initial_retrieval",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key=START,
|
||||
end_key="initial_sub_question_creation",
|
||||
)
|
||||
graph.add_conditional_edges(
|
||||
source="initial_sub_question_creation",
|
||||
path=parallelize_initial_sub_question_answering,
|
||||
path_map=["answer_query_subgraph"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_query_subgraph",
|
||||
end_key="ingest_initial_sub_question_answers",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key=["ingest_initial_sub_question_answers", "ingest_initial_retrieval"],
|
||||
end_key="generate_initial_answer",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key=["ingest_initial_sub_question_answers", "ingest_initial_retrieval"],
|
||||
end_key="entity_term_extraction_llm",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key="generate_initial_answer",
|
||||
end_key="initial_answer_quality_check",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key=["initial_answer_quality_check", "entity_term_extraction_llm"],
|
||||
end_key="refined_answer_decision",
|
||||
)
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source="refined_answer_decision",
|
||||
path=continue_to_refined_answer_or_end,
|
||||
path_map=["refined_sub_question_creation", "logging_node"],
|
||||
)
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source="refined_sub_question_creation", #DONE
|
||||
path=parallelize_refined_sub_question_answering,
|
||||
path_map=["answer_refined_question"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_refined_question", # HERE
|
||||
end_key="ingest_refined_answers",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key="ingest_refined_answers",
|
||||
end_key="generate_refined_answer",
|
||||
)
|
||||
|
||||
# graph.add_conditional_edges(
|
||||
# source="refined_answer_decision",
|
||||
# path=continue_to_refined_answer_or_end,
|
||||
# path_map=["refined_answer_subgraph", END],
|
||||
# )
|
||||
|
||||
# graph.add_edge(
|
||||
# start_key="refined_answer_subgraph",
|
||||
# end_key="generate_refined_answer",
|
||||
# )
|
||||
|
||||
graph.add_edge(
|
||||
start_key="generate_refined_answer",
|
||||
end_key="logging_node",
|
||||
)
|
||||
|
||||
graph.add_edge(
|
||||
start_key="logging_node",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
# graph.add_edge(
|
||||
# start_key="generate_refined_answer",
|
||||
# end_key="check_refined_answer",
|
||||
# )
|
||||
|
||||
# graph.add_edge(
|
||||
# start_key="check_refined_answer",
|
||||
# end_key=END,
|
||||
# )
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = main_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
|
||||
with get_session_context_manager() as db_session:
|
||||
search_request = SearchRequest(query="Who created Excel?")
|
||||
pro_search_config, search_tool = get_test_config(
|
||||
db_session, primary_llm, fast_llm, search_request
|
||||
)
|
||||
|
||||
inputs = MainInput(
|
||||
primary_llm=primary_llm,
|
||||
fast_llm=fast_llm,
|
||||
db_session=db_session,
|
||||
config=pro_search_config,
|
||||
search_tool=search_tool,
|
||||
)
|
||||
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
# stream_mode="debug",
|
||||
# debug=True,
|
||||
subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
36
backend/onyx/agent_search/pro_search_a/main/models.py
Normal file
36
backend/onyx/agent_search/pro_search_a/main/models.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class FollowUpSubQuestion(BaseModel):
|
||||
sub_question: str
|
||||
sub_question_id: str
|
||||
verified: bool
|
||||
answered: bool
|
||||
answer: str
|
||||
|
||||
|
||||
class AgentTimings(BaseModel):
|
||||
base_duration__s: float | None
|
||||
refined_duration__s: float | None
|
||||
full_duration__s: float | None
|
||||
|
||||
|
||||
class AgentBaseMetrics(BaseModel):
|
||||
num_verified_documents_total: int | None
|
||||
num_verified_documents_core: int | None
|
||||
verified_avg_score_core: float | None
|
||||
num_verified_documents_base: int | float | None
|
||||
verified_avg_score_base: float | None
|
||||
base_doc_boost_factor: float | None
|
||||
support_boost_factor: float | None
|
||||
duration__s: float | None
|
||||
|
||||
|
||||
class AgentRefinedMetrics(BaseModel):
|
||||
refined_doc_boost_factor: float | None
|
||||
refined_question_boost_factor: float | None
|
||||
duration__s: float | None
|
||||
|
||||
|
||||
class AgentAdditionalMetrics(BaseModel):
|
||||
pass
|
||||
1144
backend/onyx/agent_search/pro_search_a/main/nodes.py
Normal file
1144
backend/onyx/agent_search/pro_search_a/main/nodes.py
Normal file
File diff suppressed because it is too large
Load Diff
151
backend/onyx/agent_search/pro_search_a/main/states.py
Normal file
151
backend/onyx/agent_search/pro_search_a/main/states.py
Normal file
@@ -0,0 +1,151 @@
|
||||
from datetime import datetime
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import CoreState
|
||||
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
from onyx.agent_search.pro_search_a.main.models import AgentBaseMetrics
|
||||
from onyx.agent_search.pro_search_a.main.models import AgentRefinedMetrics
|
||||
from onyx.agent_search.pro_search_a.main.models import FollowUpSubQuestion
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import EntityRelationshipTermExtraction
|
||||
from onyx.agent_search.shared_graph_utils.models import InitialAgentResultStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import RefinedAgentStats
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_question_answer_results
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
### States ###
|
||||
|
||||
## Update States
|
||||
|
||||
|
||||
class RefinedAgentStartStats(TypedDict):
|
||||
agent_refined_start_time: datetime | None
|
||||
|
||||
|
||||
class RefinedAgentEndStats(TypedDict):
|
||||
agent_refined_end_time: datetime | None
|
||||
agent_refined_metrics: AgentRefinedMetrics
|
||||
|
||||
|
||||
class BaseDecompUpdateBase(TypedDict):
|
||||
agent_start_time: datetime
|
||||
initial_decomp_questions: list[str]
|
||||
|
||||
|
||||
class BaseDecompUpdate(
|
||||
RefinedAgentStartStats, RefinedAgentEndStats, BaseDecompUpdateBase
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class InitialAnswerBASEUpdate(TypedDict):
|
||||
initial_base_answer: str
|
||||
|
||||
|
||||
class InitialAnswerUpdate(TypedDict):
|
||||
initial_answer: str
|
||||
initial_agent_stats: InitialAgentResultStats | None
|
||||
generated_sub_questions: list[str]
|
||||
agent_base_end_time: datetime
|
||||
agent_base_metrics: AgentBaseMetrics
|
||||
|
||||
|
||||
class RefinedAnswerUpdateBase(TypedDict):
|
||||
refined_answer: str
|
||||
refined_agent_stats: RefinedAgentStats | None
|
||||
refined_answer_quality: bool
|
||||
|
||||
|
||||
class RefinedAnswerUpdate(RefinedAgentEndStats, RefinedAnswerUpdateBase):
|
||||
pass
|
||||
|
||||
|
||||
class InitialAnswerQualityUpdate(TypedDict):
|
||||
initial_answer_quality: bool
|
||||
|
||||
|
||||
class RequireRefinedAnswerUpdate(TypedDict):
|
||||
require_refined_answer: bool
|
||||
|
||||
|
||||
class DecompAnswersUpdate(TypedDict):
|
||||
documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
decomp_answer_results: Annotated[
|
||||
list[QuestionAnswerResults], dedup_question_answer_results
|
||||
]
|
||||
|
||||
|
||||
class FollowUpDecompAnswersUpdate(TypedDict):
|
||||
refined_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
refined_decomp_answer_results: Annotated[list[QuestionAnswerResults], add]
|
||||
|
||||
|
||||
class ExpandedRetrievalUpdate(TypedDict):
|
||||
all_original_question_documents: Annotated[
|
||||
list[InferenceSection], dedup_inference_sections
|
||||
]
|
||||
original_question_retrieval_results: list[QueryResult]
|
||||
original_question_retrieval_stats: AgentChunkStats
|
||||
|
||||
|
||||
class EntityTermExtractionUpdate(TypedDict):
|
||||
entity_retlation_term_extractions: EntityRelationshipTermExtraction
|
||||
|
||||
|
||||
class FollowUpSubQuestionsUpdateBase(TypedDict):
|
||||
refined_sub_questions: dict[int, FollowUpSubQuestion]
|
||||
|
||||
|
||||
class FollowUpSubQuestionsUpdate(
|
||||
RefinedAgentStartStats, FollowUpSubQuestionsUpdateBase
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
## Graph Input State
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class MainInput(CoreState):
|
||||
pass
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class MainState(
|
||||
# This includes the core state
|
||||
MainInput,
|
||||
BaseDecompUpdateBase,
|
||||
InitialAnswerUpdate,
|
||||
InitialAnswerBASEUpdate,
|
||||
DecompAnswersUpdate,
|
||||
ExpandedRetrievalUpdate,
|
||||
EntityTermExtractionUpdate,
|
||||
InitialAnswerQualityUpdate,
|
||||
RequireRefinedAnswerUpdate,
|
||||
FollowUpSubQuestionsUpdateBase,
|
||||
FollowUpDecompAnswersUpdate,
|
||||
RefinedAnswerUpdateBase,
|
||||
RefinedAgentStartStats,
|
||||
RefinedAgentEndStats,
|
||||
):
|
||||
# expanded_retrieval_result: Annotated[list[ExpandedRetrievalResult], add]
|
||||
base_raw_search_result: Annotated[list[ExpandedRetrievalResult], add]
|
||||
|
||||
|
||||
## Graph Output State - presently not used
|
||||
|
||||
|
||||
class MainOutput(TypedDict):
|
||||
pass
|
||||
@@ -0,0 +1,28 @@
|
||||
from collections.abc import Hashable
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import in_subgraph_extract_core_fields
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def send_to_expanded_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
|
||||
logger.debug("sending to expanded retrieval via edge")
|
||||
|
||||
return Send(
|
||||
"initial_sub_question_expanded_retrieval",
|
||||
ExpandedRetrievalInput(
|
||||
**in_subgraph_extract_core_fields(state),
|
||||
question=state["question"],
|
||||
base_search=False,
|
||||
sub_question_id=state["question_id"],
|
||||
),
|
||||
)
|
||||
@@ -0,0 +1,129 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.edges import (
|
||||
send_to_expanded_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.answer_check import (
|
||||
answer_check,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.answer_generation import (
|
||||
answer_generation,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.format_answer import (
|
||||
format_answer,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.ingest_retrieval import (
|
||||
ingest_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_test_config
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_query_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=AnswerQuestionState,
|
||||
input=AnswerQuestionInput,
|
||||
output=AnswerQuestionOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="initial_sub_question_expanded_retrieval",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="answer_check",
|
||||
action=answer_check,
|
||||
)
|
||||
graph.add_node(
|
||||
node="answer_generation",
|
||||
action=answer_generation,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_answer",
|
||||
action=format_answer,
|
||||
)
|
||||
graph.add_node(
|
||||
node="ingest_retrieval",
|
||||
action=ingest_retrieval,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source=START,
|
||||
path=send_to_expanded_retrieval,
|
||||
path_map=["initial_sub_question_expanded_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="initial_sub_question_expanded_retrieval",
|
||||
end_key="ingest_retrieval",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="ingest_retrieval",
|
||||
end_key="answer_generation",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_generation",
|
||||
end_key="answer_check",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="answer_check",
|
||||
end_key="format_answer",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_answer",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = answer_query_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
with get_session_context_manager() as db_session:
|
||||
pro_search_config, search_tool = get_test_config(
|
||||
db_session, primary_llm, fast_llm, search_request
|
||||
)
|
||||
inputs = AnswerQuestionInput(
|
||||
question="what can you do with onyx?",
|
||||
subgraph_fast_llm=fast_llm,
|
||||
subgraph_primary_llm=primary_llm,
|
||||
subgraph_config=pro_search_config,
|
||||
subgraph_search_tool=search_tool,
|
||||
subgraph_db_session=db_session,
|
||||
question_id="0_0",
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
# debug=True,
|
||||
# subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
@@ -0,0 +1,8 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
@@ -0,0 +1,14 @@
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
QACheckUpdate,
|
||||
)
|
||||
|
||||
|
||||
def answer_check(state: AnswerQuestionState) -> QACheckUpdate:
|
||||
quality_str = "yes"
|
||||
|
||||
return QACheckUpdate(
|
||||
answer_quality=quality_str,
|
||||
)
|
||||
@@ -0,0 +1,41 @@
|
||||
import datetime
|
||||
|
||||
from langchain_core.callbacks.manager import dispatch_custom_event
|
||||
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
QAGenerationUpdate,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_persona_prompt
|
||||
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
|
||||
from onyx.chat.models import AgentAnswerPiece
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_generation(state: AnswerQuestionState) -> QAGenerationUpdate:
|
||||
now_start = datetime.datetime.now()
|
||||
logger.debug(f"--------{now_start}--------START ANSWER GENERATION---")
|
||||
|
||||
state["question"]
|
||||
state["documents"]
|
||||
level, question_nr = parse_question_id(state["question_id"])
|
||||
get_persona_prompt(state["subgraph_config"].search_request.persona)
|
||||
|
||||
dispatch_custom_event(
|
||||
"sub_answers",
|
||||
AgentAnswerPiece(
|
||||
answer_piece="",
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
answer_type="agent_sub_answer",
|
||||
),
|
||||
)
|
||||
answer_str = ""
|
||||
|
||||
return QAGenerationUpdate(
|
||||
answer=answer_str,
|
||||
)
|
||||
@@ -0,0 +1,25 @@
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
|
||||
|
||||
def format_answer(state: AnswerQuestionState) -> AnswerQuestionOutput:
|
||||
return AnswerQuestionOutput(
|
||||
answer_results=[
|
||||
QuestionAnswerResults(
|
||||
question=state["question"],
|
||||
question_id=state["question_id"],
|
||||
quality=state.get("answer_quality", "No"),
|
||||
answer=state["answer"],
|
||||
expanded_retrieval_results=state["expanded_retrieval_results"],
|
||||
documents=state["documents"],
|
||||
sub_question_retrieval_stats=state["sub_question_retrieval_stats"],
|
||||
)
|
||||
],
|
||||
)
|
||||
@@ -0,0 +1,23 @@
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
RetrievalIngestionUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
|
||||
|
||||
def ingest_retrieval(state: ExpandedRetrievalOutput) -> RetrievalIngestionUpdate:
|
||||
sub_question_retrieval_stats = state[
|
||||
"expanded_retrieval_result"
|
||||
].sub_question_retrieval_stats
|
||||
if sub_question_retrieval_stats is None:
|
||||
sub_question_retrieval_stats = [AgentChunkStats()]
|
||||
|
||||
return RetrievalIngestionUpdate(
|
||||
expanded_retrieval_results=state[
|
||||
"expanded_retrieval_result"
|
||||
].expanded_queries_results,
|
||||
documents=state["expanded_retrieval_result"].all_documents,
|
||||
sub_question_retrieval_stats=sub_question_retrieval_stats,
|
||||
)
|
||||
@@ -0,0 +1,63 @@
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import SubgraphCoreState
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import (
|
||||
QuestionAnswerResults,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
## Update States
|
||||
class QACheckUpdate(TypedDict):
|
||||
answer_quality: str
|
||||
|
||||
|
||||
class QAGenerationUpdate(TypedDict):
|
||||
answer: str
|
||||
# answer_stat: AnswerStats
|
||||
|
||||
|
||||
class RetrievalIngestionUpdate(TypedDict):
|
||||
expanded_retrieval_results: list[QueryResult]
|
||||
documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class AnswerQuestionInput(SubgraphCoreState):
|
||||
question: str
|
||||
question_id: str # 0_0 is original question, everything else is <level>_<question_num>.
|
||||
# level 0 is original question and first decomposition, level 1 is follow up, etc
|
||||
# question_num is a unique number per original question per level.
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class AnswerQuestionState(
|
||||
AnswerQuestionInput,
|
||||
QAGenerationUpdate,
|
||||
QACheckUpdate,
|
||||
RetrievalIngestionUpdate,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class AnswerQuestionOutput(TypedDict):
|
||||
"""
|
||||
This is a list of results even though each call of this subgraph only returns one result.
|
||||
This is because if we parallelize the answer query subgraph, there will be multiple
|
||||
results in a list so the add operator is used to add them together.
|
||||
"""
|
||||
|
||||
answer_results: Annotated[list[QuestionAnswerResults], add]
|
||||
@@ -0,0 +1,28 @@
|
||||
from collections.abc import Hashable
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import in_subgraph_extract_core_fields
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def send_to_expanded_refined_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
|
||||
logger.debug("sending to expanded retrieval for follow up question via edge")
|
||||
|
||||
return Send(
|
||||
"refined_sub_question_expanded_retrieval",
|
||||
ExpandedRetrievalInput(
|
||||
**in_subgraph_extract_core_fields(state),
|
||||
question=state["question"],
|
||||
sub_question_id=state["question_id"],
|
||||
base_search=False
|
||||
),
|
||||
)
|
||||
@@ -0,0 +1,122 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.answer_check import (
|
||||
answer_check,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.answer_generation import (
|
||||
answer_generation,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.format_answer import (
|
||||
format_answer,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.ingest_retrieval import (
|
||||
ingest_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_refinement_sub_question.edges import (
|
||||
send_to_expanded_refined_retrieval,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def answer_refined_query_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=AnswerQuestionState,
|
||||
input=AnswerQuestionInput,
|
||||
output=AnswerQuestionOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="refined_sub_question_expanded_retrieval",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="refined_sub_answer_check",
|
||||
action=answer_check,
|
||||
)
|
||||
graph.add_node(
|
||||
node="refined_sub_answer_generation",
|
||||
action=answer_generation,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_refined_sub_answer",
|
||||
action=format_answer,
|
||||
)
|
||||
graph.add_node(
|
||||
node="ingest_refined_retrieval",
|
||||
action=ingest_retrieval,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source=START,
|
||||
path=send_to_expanded_refined_retrieval,
|
||||
path_map=["refined_sub_question_expanded_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="refined_sub_question_expanded_retrieval",
|
||||
end_key="ingest_refined_retrieval",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="ingest_refined_retrieval",
|
||||
end_key="refined_sub_answer_generation",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="refined_sub_answer_generation",
|
||||
end_key="refined_sub_answer_check",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="refined_sub_answer_check",
|
||||
end_key="format_refined_sub_answer",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_refined_sub_answer",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = answer_refined_query_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
with get_session_context_manager() as db_session:
|
||||
inputs = AnswerQuestionInput(
|
||||
question="what can you do with onyx?",
|
||||
question_id="0_0",
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
# debug=True,
|
||||
# subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
# output = compiled_graph.invoke(inputs)
|
||||
# logger.debug(output)
|
||||
@@ -0,0 +1,19 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
|
||||
|
||||
class QuestionAnswerResults(BaseModel):
|
||||
question: str
|
||||
answer: str
|
||||
quality: str
|
||||
# expanded_retrieval_results: list[QueryResult]
|
||||
documents: list[InferenceSection]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
@@ -0,0 +1,70 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_b.base_raw_search.nodes.format_raw_search_results import (
|
||||
format_raw_search_results,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.base_raw_search.nodes.generate_raw_search_data import (
|
||||
generate_raw_search_data,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.base_raw_search.states import BaseRawSearchInput
|
||||
from onyx.agent_search.pro_search_b.base_raw_search.states import BaseRawSearchOutput
|
||||
from onyx.agent_search.pro_search_b.base_raw_search.states import BaseRawSearchState
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.graph_builder import (
|
||||
expanded_retrieval_graph_builder,
|
||||
)
|
||||
|
||||
|
||||
def base_raw_search_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=BaseRawSearchState,
|
||||
input=BaseRawSearchInput,
|
||||
output=BaseRawSearchOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
expanded_retrieval = expanded_retrieval_graph_builder().compile()
|
||||
graph.add_node(
|
||||
node="generate_raw_search_data",
|
||||
action=generate_raw_search_data,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="expanded_retrieval_base_search",
|
||||
action=expanded_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_raw_search_results",
|
||||
action=format_raw_search_results,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
|
||||
graph.add_edge(start_key=START, end_key="generate_raw_search_data")
|
||||
|
||||
graph.add_edge(
|
||||
start_key="generate_raw_search_data",
|
||||
end_key="expanded_retrieval_base_search",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="expanded_retrieval_base_search",
|
||||
end_key="format_raw_search_results",
|
||||
)
|
||||
|
||||
# graph.add_edge(
|
||||
# start_key="expanded_retrieval_base_search",
|
||||
# end_key=END,
|
||||
# )
|
||||
|
||||
graph.add_edge(
|
||||
start_key="format_raw_search_results",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
@@ -0,0 +1,20 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class AnswerRetrievalStats(BaseModel):
|
||||
answer_retrieval_stats: dict[str, float | int]
|
||||
|
||||
|
||||
class QuestionAnswerResults(BaseModel):
|
||||
question: str
|
||||
answer: str
|
||||
quality: str
|
||||
expanded_retrieval_results: list[QueryResult]
|
||||
documents: list[InferenceSection]
|
||||
sub_question_retrieval_stats: list[AgentChunkStats]
|
||||
@@ -0,0 +1,16 @@
|
||||
from onyx.agent_search.pro_search_b.base_raw_search.states import BaseRawSearchOutput
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def format_raw_search_results(state: ExpandedRetrievalOutput) -> BaseRawSearchOutput:
|
||||
logger.debug("format_raw_search_results")
|
||||
return BaseRawSearchOutput(
|
||||
base_expanded_retrieval_result=state["expanded_retrieval_result"],
|
||||
# base_retrieval_results=[state["expanded_retrieval_result"]],
|
||||
# base_search_documents=[],
|
||||
)
|
||||
@@ -0,0 +1,21 @@
|
||||
from onyx.agent_search.core_state import CoreState
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def generate_raw_search_data(state: CoreState) -> ExpandedRetrievalInput:
|
||||
logger.debug("generate_raw_search_data")
|
||||
return ExpandedRetrievalInput(
|
||||
subgraph_config=state["config"],
|
||||
subgraph_primary_llm=state["primary_llm"],
|
||||
subgraph_fast_llm=state["fast_llm"],
|
||||
subgraph_db_session=state["db_session"],
|
||||
question=state["config"].search_request.query,
|
||||
base_search=True,
|
||||
subgraph_search_tool=state["search_tool"],
|
||||
sub_question_id=None, # This graph is always and only used for the original question
|
||||
)
|
||||
@@ -0,0 +1,42 @@
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import CoreState
|
||||
from onyx.agent_search.core_state import SubgraphCoreState
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
|
||||
|
||||
## Update States
|
||||
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class BaseRawSearchInput(CoreState, SubgraphCoreState):
|
||||
pass
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class BaseRawSearchOutput(TypedDict):
|
||||
"""
|
||||
This is a list of results even though each call of this subgraph only returns one result.
|
||||
This is because if we parallelize the answer query subgraph, there will be multiple
|
||||
results in a list so the add operator is used to add them together.
|
||||
"""
|
||||
|
||||
# base_search_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
# base_retrieval_results: Annotated[list[ExpandedRetrievalResult], add]
|
||||
base_expanded_retrieval_result: ExpandedRetrievalResult
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class BaseRawSearchState(
|
||||
BaseRawSearchInput,
|
||||
BaseRawSearchOutput,
|
||||
):
|
||||
pass
|
||||
@@ -0,0 +1,28 @@
|
||||
from collections.abc import Hashable
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import in_subgraph_extract_core_fields
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.nodes import RetrievalInput
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalState,
|
||||
)
|
||||
|
||||
|
||||
def parallel_retrieval_edge(state: ExpandedRetrievalState) -> list[Send | Hashable]:
|
||||
question = state.get("question", state["subgraph_config"].search_request.query)
|
||||
|
||||
query_expansions = state.get("expanded_queries", []) + [question]
|
||||
return [
|
||||
Send(
|
||||
"doc_retrieval",
|
||||
RetrievalInput(
|
||||
query_to_retrieve=query,
|
||||
question=question,
|
||||
**in_subgraph_extract_core_fields(state),
|
||||
base_search=False,
|
||||
sub_question_id=state.get("sub_question_id"),
|
||||
),
|
||||
)
|
||||
for query in query_expansions
|
||||
]
|
||||
@@ -0,0 +1,126 @@
|
||||
from langgraph.graph import END
|
||||
from langgraph.graph import START
|
||||
from langgraph.graph import StateGraph
|
||||
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.edges import (
|
||||
parallel_retrieval_edge,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.nodes import doc_reranking
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.nodes import doc_retrieval
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.nodes import doc_verification
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.nodes import expand_queries
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.nodes import format_results
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.nodes import verification_kickoff
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalState,
|
||||
)
|
||||
from onyx.agent_search.shared_graph_utils.utils import get_test_config
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def expanded_retrieval_graph_builder() -> StateGraph:
|
||||
graph = StateGraph(
|
||||
state_schema=ExpandedRetrievalState,
|
||||
input=ExpandedRetrievalInput,
|
||||
output=ExpandedRetrievalOutput,
|
||||
)
|
||||
|
||||
### Add nodes ###
|
||||
|
||||
graph.add_node(
|
||||
node="expand_queries",
|
||||
action=expand_queries,
|
||||
)
|
||||
|
||||
graph.add_node(
|
||||
node="doc_retrieval",
|
||||
action=doc_retrieval,
|
||||
)
|
||||
graph.add_node(
|
||||
node="verification_kickoff",
|
||||
action=verification_kickoff,
|
||||
)
|
||||
graph.add_node(
|
||||
node="doc_verification",
|
||||
action=doc_verification,
|
||||
)
|
||||
graph.add_node(
|
||||
node="doc_reranking",
|
||||
action=doc_reranking,
|
||||
)
|
||||
graph.add_node(
|
||||
node="format_results",
|
||||
action=format_results,
|
||||
)
|
||||
|
||||
### Add edges ###
|
||||
graph.add_edge(
|
||||
start_key=START,
|
||||
end_key="expand_queries",
|
||||
)
|
||||
|
||||
graph.add_conditional_edges(
|
||||
source="expand_queries",
|
||||
path=parallel_retrieval_edge,
|
||||
path_map=["doc_retrieval"],
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="doc_retrieval",
|
||||
end_key="verification_kickoff",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="doc_verification",
|
||||
end_key="doc_reranking",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="doc_reranking",
|
||||
end_key="format_results",
|
||||
)
|
||||
graph.add_edge(
|
||||
start_key="format_results",
|
||||
end_key=END,
|
||||
)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.factory import get_default_llms
|
||||
from onyx.context.search.models import SearchRequest
|
||||
|
||||
graph = expanded_retrieval_graph_builder()
|
||||
compiled_graph = graph.compile()
|
||||
primary_llm, fast_llm = get_default_llms()
|
||||
search_request = SearchRequest(
|
||||
query="what can you do with onyx or danswer?",
|
||||
)
|
||||
|
||||
with get_session_context_manager() as db_session:
|
||||
pro_search_config, search_tool = get_test_config(
|
||||
db_session, primary_llm, fast_llm, search_request
|
||||
)
|
||||
inputs = ExpandedRetrievalInput(
|
||||
question="what can you do with onyx?",
|
||||
base_search=False,
|
||||
subgraph_fast_llm=fast_llm,
|
||||
subgraph_primary_llm=primary_llm,
|
||||
subgraph_db_session=db_session,
|
||||
subgraph_config=pro_search_config,
|
||||
subgraph_search_tool=search_tool,
|
||||
sub_question_id=None,
|
||||
)
|
||||
for thing in compiled_graph.stream(
|
||||
input=inputs,
|
||||
# debug=True,
|
||||
subgraphs=True,
|
||||
):
|
||||
logger.debug(thing)
|
||||
@@ -0,0 +1,13 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import QueryResult
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
### Models ###
|
||||
|
||||
|
||||
class ExpandedRetrievalResult(BaseModel):
|
||||
expanded_queries_results: list[QueryResult]
|
||||
all_documents: list[InferenceSection]
|
||||
sub_question_retrieval_stats: AgentChunkStats
|
||||
@@ -0,0 +1,410 @@
|
||||
from collections import defaultdict
|
||||
from collections.abc import Callable
|
||||
from typing import cast
|
||||
from typing import Literal
|
||||
|
||||
import numpy as np
|
||||
from langchain_core.callbacks.manager import dispatch_custom_event
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_core.messages import merge_message_runs
|
||||
from langgraph.types import Command
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import in_subgraph_extract_core_fields
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.models import QueryResult
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import DocRerankingUpdate
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import DocRetrievalUpdate
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
DocVerificationInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
DocVerificationUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalState,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
ExpandedRetrievalUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import InferenceSection
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
|
||||
QueryExpansionUpdate,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.states import RetrievalInput
|
||||
from onyx.agent_search.shared_graph_utils.calculations import get_fit_scores
|
||||
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
|
||||
from onyx.agent_search.shared_graph_utils.models import RetrievalFitStats
|
||||
from onyx.agent_search.shared_graph_utils.prompts import REWRITE_PROMPT_MULTI_ORIGINAL
|
||||
from onyx.agent_search.shared_graph_utils.prompts import VERIFIER_PROMPT
|
||||
from onyx.agent_search.shared_graph_utils.utils import dispatch_separated
|
||||
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
|
||||
from onyx.chat.models import ExtendedToolResponse
|
||||
from onyx.chat.models import SubQueryPiece
|
||||
from onyx.configs.dev_configs import AGENT_MAX_QUERY_RETRIEVAL_RESULTS
|
||||
from onyx.configs.dev_configs import AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS
|
||||
from onyx.configs.dev_configs import AGENT_RERANKING_STATS
|
||||
from onyx.configs.dev_configs import AGENT_RETRIEVAL_STATS
|
||||
from onyx.context.search.models import SearchRequest
|
||||
from onyx.context.search.pipeline import retrieval_preprocessing
|
||||
from onyx.context.search.postprocessing.postprocessing import rerank_sections
|
||||
from onyx.db.engine import get_session_context_manager
|
||||
from onyx.llm.interfaces import LLM
|
||||
from onyx.tools.models import SearchQueryInfo
|
||||
from onyx.tools.tool_implementations.search.search_tool import (
|
||||
SEARCH_RESPONSE_SUMMARY_ID,
|
||||
)
|
||||
from onyx.tools.tool_implementations.search.search_tool import SearchResponseSummary
|
||||
from onyx.tools.tool_implementations.search.search_tool import yield_search_responses
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def dispatch_subquery(level: int, question_nr: int) -> Callable[[str, int], None]:
|
||||
def helper(token: str, num: int) -> None:
|
||||
dispatch_custom_event(
|
||||
"subqueries",
|
||||
SubQueryPiece(
|
||||
sub_query=token,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
query_id=num,
|
||||
),
|
||||
)
|
||||
|
||||
return helper
|
||||
|
||||
|
||||
def expand_queries(state: ExpandedRetrievalInput) -> QueryExpansionUpdate:
|
||||
# Sometimes we want to expand the original question, sometimes we want to expand a sub-question.
|
||||
# When we are running this node on the original question, no question is explictly passed in.
|
||||
# Instead, we use the original question from the search request.
|
||||
question = state.get("question", state["subgraph_config"].search_request.query)
|
||||
llm: LLM = state["subgraph_fast_llm"]
|
||||
state["subgraph_db_session"]
|
||||
chat_session_id = state["subgraph_config"].chat_session_id
|
||||
sub_question_id = state.get("sub_question_id")
|
||||
if sub_question_id is None:
|
||||
level, question_nr = 0, 0
|
||||
else:
|
||||
level, question_nr = parse_question_id(sub_question_id)
|
||||
|
||||
if chat_session_id is None:
|
||||
raise ValueError("chat_session_id must be provided for agent search")
|
||||
|
||||
msg = [
|
||||
HumanMessage(
|
||||
content=REWRITE_PROMPT_MULTI_ORIGINAL.format(question=question),
|
||||
)
|
||||
]
|
||||
|
||||
llm_response_list = dispatch_separated(
|
||||
llm.stream(prompt=msg), dispatch_subquery(level, question_nr)
|
||||
)
|
||||
|
||||
llm_response = merge_message_runs(llm_response_list, chunk_separator="")[0].content
|
||||
|
||||
rewritten_queries = llm_response.split("\n")
|
||||
|
||||
return QueryExpansionUpdate(
|
||||
expanded_queries=rewritten_queries,
|
||||
)
|
||||
|
||||
|
||||
def doc_retrieval(state: RetrievalInput) -> DocRetrievalUpdate:
|
||||
"""
|
||||
Retrieve documents
|
||||
|
||||
Args:
|
||||
state (RetrievalInput): Primary state + the query to retrieve
|
||||
|
||||
Updates:
|
||||
expanded_retrieval_results: list[ExpandedRetrievalResult]
|
||||
retrieved_documents: list[InferenceSection]
|
||||
"""
|
||||
query_to_retrieve = state["query_to_retrieve"]
|
||||
search_tool = state["subgraph_search_tool"]
|
||||
|
||||
retrieved_docs: list[InferenceSection] = []
|
||||
if not query_to_retrieve.strip():
|
||||
logger.warning("Empty query, skipping retrieval")
|
||||
return DocRetrievalUpdate(
|
||||
expanded_retrieval_results=[],
|
||||
retrieved_documents=[],
|
||||
)
|
||||
|
||||
query_info = None
|
||||
# new db session to avoid concurrency issues
|
||||
with get_session_context_manager() as db_session:
|
||||
for tool_response in search_tool.run(
|
||||
query=query_to_retrieve,
|
||||
force_no_rerank=True,
|
||||
alternate_db_session=db_session,
|
||||
):
|
||||
# get retrieved docs to send to the rest of the graph
|
||||
if tool_response.id == SEARCH_RESPONSE_SUMMARY_ID:
|
||||
response = cast(SearchResponseSummary, tool_response.response)
|
||||
retrieved_docs = response.top_sections
|
||||
query_info = SearchQueryInfo(
|
||||
predicted_search=response.predicted_search,
|
||||
final_filters=response.final_filters,
|
||||
recency_bias_multiplier=response.recency_bias_multiplier,
|
||||
)
|
||||
|
||||
retrieved_docs = retrieved_docs[:AGENT_MAX_QUERY_RETRIEVAL_RESULTS]
|
||||
pre_rerank_docs = retrieved_docs
|
||||
if search_tool.search_pipeline is not None:
|
||||
pre_rerank_docs = (
|
||||
search_tool.search_pipeline._retrieved_sections or retrieved_docs
|
||||
)
|
||||
|
||||
if AGENT_RETRIEVAL_STATS:
|
||||
fit_scores = get_fit_scores(
|
||||
pre_rerank_docs,
|
||||
retrieved_docs,
|
||||
)
|
||||
else:
|
||||
fit_scores = None
|
||||
|
||||
expanded_retrieval_result = QueryResult(
|
||||
query=query_to_retrieve,
|
||||
search_results=retrieved_docs,
|
||||
stats=fit_scores,
|
||||
query_info=query_info,
|
||||
)
|
||||
return DocRetrievalUpdate(
|
||||
expanded_retrieval_results=[expanded_retrieval_result],
|
||||
retrieved_documents=retrieved_docs,
|
||||
)
|
||||
|
||||
|
||||
def verification_kickoff(
|
||||
state: ExpandedRetrievalState,
|
||||
) -> Command[Literal["doc_verification"]]:
|
||||
documents = state["retrieved_documents"]
|
||||
verification_question = state.get(
|
||||
"question", state["subgraph_config"].search_request.query
|
||||
)
|
||||
sub_question_id = state.get("sub_question_id")
|
||||
return Command(
|
||||
update={},
|
||||
goto=[
|
||||
Send(
|
||||
node="doc_verification",
|
||||
arg=DocVerificationInput(
|
||||
doc_to_verify=doc,
|
||||
question=verification_question,
|
||||
base_search=False,
|
||||
sub_question_id=sub_question_id,
|
||||
**in_subgraph_extract_core_fields(state),
|
||||
),
|
||||
)
|
||||
for doc in documents
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def doc_verification(state: DocVerificationInput) -> DocVerificationUpdate:
|
||||
"""
|
||||
Check whether the document is relevant for the original user question
|
||||
|
||||
Args:
|
||||
state (DocVerificationInput): The current state
|
||||
|
||||
Updates:
|
||||
verified_documents: list[InferenceSection]
|
||||
"""
|
||||
|
||||
question = state["question"]
|
||||
doc_to_verify = state["doc_to_verify"]
|
||||
document_content = doc_to_verify.combined_content
|
||||
|
||||
msg = [
|
||||
HumanMessage(
|
||||
content=VERIFIER_PROMPT.format(
|
||||
question=question, document_content=document_content
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
fast_llm = state["subgraph_fast_llm"]
|
||||
|
||||
response = fast_llm.invoke(msg)
|
||||
|
||||
verified_documents = []
|
||||
if isinstance(response.content, str) and "yes" in response.content.lower():
|
||||
verified_documents.append(doc_to_verify)
|
||||
|
||||
return DocVerificationUpdate(
|
||||
verified_documents=verified_documents,
|
||||
)
|
||||
|
||||
|
||||
def doc_reranking(state: ExpandedRetrievalState) -> DocRerankingUpdate:
|
||||
verified_documents = state["verified_documents"]
|
||||
|
||||
# Rerank post retrieval and verification. First, create a search query
|
||||
# then create the list of reranked sections
|
||||
|
||||
question = state.get("question", state["subgraph_config"].search_request.query)
|
||||
with get_session_context_manager() as db_session:
|
||||
_search_query = retrieval_preprocessing(
|
||||
search_request=SearchRequest(query=question),
|
||||
user=state["subgraph_search_tool"].user, # bit of a hack
|
||||
llm=state["subgraph_fast_llm"],
|
||||
db_session=db_session,
|
||||
)
|
||||
|
||||
# skip section filtering
|
||||
|
||||
if (
|
||||
_search_query.rerank_settings
|
||||
and _search_query.rerank_settings.rerank_model_name
|
||||
and _search_query.rerank_settings.num_rerank > 0
|
||||
):
|
||||
reranked_documents = rerank_sections(
|
||||
_search_query,
|
||||
verified_documents,
|
||||
)
|
||||
else:
|
||||
logger.warning("No reranking settings found, using unranked documents")
|
||||
reranked_documents = verified_documents
|
||||
|
||||
if AGENT_RERANKING_STATS:
|
||||
fit_scores = get_fit_scores(verified_documents, reranked_documents)
|
||||
else:
|
||||
fit_scores = RetrievalFitStats(fit_score_lift=0, rerank_effect=0, fit_scores={})
|
||||
|
||||
# TODO: stream deduped docs here, or decide to use search tool ranking/verification
|
||||
|
||||
return DocRerankingUpdate(
|
||||
reranked_documents=[
|
||||
doc for doc in reranked_documents if type(doc) == InferenceSection
|
||||
][:AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS],
|
||||
sub_question_retrieval_stats=fit_scores,
|
||||
)
|
||||
|
||||
|
||||
def _calculate_sub_question_retrieval_stats(
|
||||
verified_documents: list[InferenceSection],
|
||||
expanded_retrieval_results: list[QueryResult],
|
||||
) -> AgentChunkStats:
|
||||
chunk_scores: dict[str, dict[str, list[int | float]]] = defaultdict(
|
||||
lambda: defaultdict(list)
|
||||
)
|
||||
|
||||
for expanded_retrieval_result in expanded_retrieval_results:
|
||||
for doc in expanded_retrieval_result.search_results:
|
||||
doc_chunk_id = f"{doc.center_chunk.document_id}_{doc.center_chunk.chunk_id}"
|
||||
if doc.center_chunk.score is not None:
|
||||
chunk_scores[doc_chunk_id]["score"].append(doc.center_chunk.score)
|
||||
|
||||
verified_doc_chunk_ids = [
|
||||
f"{verified_document.center_chunk.document_id}_{verified_document.center_chunk.chunk_id}"
|
||||
for verified_document in verified_documents
|
||||
]
|
||||
dismissed_doc_chunk_ids = []
|
||||
|
||||
raw_chunk_stats_counts: dict[str, int] = defaultdict(int)
|
||||
raw_chunk_stats_scores: dict[str, float] = defaultdict(float)
|
||||
for doc_chunk_id, chunk_data in chunk_scores.items():
|
||||
if doc_chunk_id in verified_doc_chunk_ids:
|
||||
raw_chunk_stats_counts["verified_count"] += 1
|
||||
|
||||
valid_chunk_scores = [
|
||||
score for score in chunk_data["score"] if score is not None
|
||||
]
|
||||
raw_chunk_stats_scores["verified_scores"] += float(
|
||||
np.mean(valid_chunk_scores)
|
||||
)
|
||||
else:
|
||||
raw_chunk_stats_counts["rejected_count"] += 1
|
||||
valid_chunk_scores = [
|
||||
score for score in chunk_data["score"] if score is not None
|
||||
]
|
||||
raw_chunk_stats_scores["rejected_scores"] += float(
|
||||
np.mean(valid_chunk_scores)
|
||||
)
|
||||
dismissed_doc_chunk_ids.append(doc_chunk_id)
|
||||
|
||||
if raw_chunk_stats_counts["verified_count"] == 0:
|
||||
verified_avg_scores = 0.0
|
||||
else:
|
||||
verified_avg_scores = raw_chunk_stats_scores["verified_scores"] / float(
|
||||
raw_chunk_stats_counts["verified_count"]
|
||||
)
|
||||
|
||||
rejected_scores = raw_chunk_stats_scores.get("rejected_scores", None)
|
||||
if rejected_scores is not None:
|
||||
rejected_avg_scores = rejected_scores / float(
|
||||
raw_chunk_stats_counts["rejected_count"]
|
||||
)
|
||||
else:
|
||||
rejected_avg_scores = None
|
||||
|
||||
chunk_stats = AgentChunkStats(
|
||||
verified_count=raw_chunk_stats_counts["verified_count"],
|
||||
verified_avg_scores=verified_avg_scores,
|
||||
rejected_count=raw_chunk_stats_counts["rejected_count"],
|
||||
rejected_avg_scores=rejected_avg_scores,
|
||||
verified_doc_chunk_ids=verified_doc_chunk_ids,
|
||||
dismissed_doc_chunk_ids=dismissed_doc_chunk_ids,
|
||||
)
|
||||
|
||||
return chunk_stats
|
||||
|
||||
|
||||
def format_results(state: ExpandedRetrievalState) -> ExpandedRetrievalUpdate:
|
||||
level, question_nr = parse_question_id(state.get("sub_question_id") or "0_0")
|
||||
query_infos = [
|
||||
result.query_info
|
||||
for result in state["expanded_retrieval_results"]
|
||||
if result.query_info is not None
|
||||
]
|
||||
if len(query_infos) == 0:
|
||||
raise ValueError("No query info found")
|
||||
|
||||
# main question docs will be sent later after aggregation and deduping with sub-question docs
|
||||
if not (level == 0 and question_nr == 0):
|
||||
for tool_response in yield_search_responses(
|
||||
query=state["question"],
|
||||
reranked_sections=state[
|
||||
"retrieved_documents"
|
||||
], # TODO: rename params. this one is supposed to be the sections pre-merging
|
||||
final_context_sections=state["reranked_documents"],
|
||||
search_query_info=query_infos[0], # TODO: handle differing query infos?
|
||||
get_section_relevance=lambda: None, # TODO: add relevance
|
||||
search_tool=state["subgraph_search_tool"],
|
||||
):
|
||||
dispatch_custom_event(
|
||||
"tool_response",
|
||||
ExtendedToolResponse(
|
||||
id=tool_response.id,
|
||||
response=tool_response.response,
|
||||
level=level,
|
||||
level_question_nr=question_nr,
|
||||
),
|
||||
)
|
||||
sub_question_retrieval_stats = _calculate_sub_question_retrieval_stats(
|
||||
verified_documents=state["verified_documents"],
|
||||
expanded_retrieval_results=state["expanded_retrieval_results"],
|
||||
)
|
||||
|
||||
if sub_question_retrieval_stats is None:
|
||||
sub_question_retrieval_stats = AgentChunkStats()
|
||||
# else:
|
||||
# sub_question_retrieval_stats = [sub_question_retrieval_stats]
|
||||
|
||||
return ExpandedRetrievalUpdate(
|
||||
expanded_retrieval_result=ExpandedRetrievalResult(
|
||||
expanded_queries_results=state["expanded_retrieval_results"],
|
||||
all_documents=state["reranked_documents"],
|
||||
sub_question_retrieval_stats=sub_question_retrieval_stats,
|
||||
),
|
||||
)
|
||||
@@ -0,0 +1,82 @@
|
||||
from operator import add
|
||||
from typing import Annotated
|
||||
from typing import TypedDict
|
||||
|
||||
from onyx.agent_search.core_state import SubgraphCoreState
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.models import (
|
||||
ExpandedRetrievalResult,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.expanded_retrieval.models import QueryResult
|
||||
from onyx.agent_search.shared_graph_utils.models import RetrievalFitStats
|
||||
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
### States ###
|
||||
|
||||
## Graph Input State
|
||||
|
||||
|
||||
class ExpandedRetrievalInput(SubgraphCoreState):
|
||||
question: str
|
||||
base_search: bool
|
||||
sub_question_id: str | None
|
||||
|
||||
|
||||
## Update/Return States
|
||||
|
||||
|
||||
class QueryExpansionUpdate(TypedDict):
|
||||
expanded_queries: list[str]
|
||||
|
||||
|
||||
class DocVerificationUpdate(TypedDict):
|
||||
verified_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
|
||||
|
||||
class DocRetrievalUpdate(TypedDict):
|
||||
expanded_retrieval_results: Annotated[list[QueryResult], add]
|
||||
retrieved_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
|
||||
|
||||
class DocRerankingUpdate(TypedDict):
|
||||
reranked_documents: Annotated[list[InferenceSection], dedup_inference_sections]
|
||||
sub_question_retrieval_stats: RetrievalFitStats | None
|
||||
|
||||
|
||||
class ExpandedRetrievalUpdate(TypedDict):
|
||||
expanded_retrieval_result: ExpandedRetrievalResult
|
||||
|
||||
|
||||
## Graph Output State
|
||||
|
||||
|
||||
class ExpandedRetrievalOutput(TypedDict):
|
||||
expanded_retrieval_result: ExpandedRetrievalResult
|
||||
base_expanded_retrieval_result: ExpandedRetrievalResult
|
||||
|
||||
|
||||
## Graph State
|
||||
|
||||
|
||||
class ExpandedRetrievalState(
|
||||
# This includes the core state
|
||||
ExpandedRetrievalInput,
|
||||
QueryExpansionUpdate,
|
||||
DocRetrievalUpdate,
|
||||
DocVerificationUpdate,
|
||||
DocRerankingUpdate,
|
||||
ExpandedRetrievalOutput,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
## Conditional Input States
|
||||
|
||||
|
||||
class DocVerificationInput(ExpandedRetrievalInput):
|
||||
doc_to_verify: InferenceSection
|
||||
|
||||
|
||||
class RetrievalInput(ExpandedRetrievalInput):
|
||||
query_to_retrieve: str
|
||||
92
backend/onyx/agent_search/pro_search_b/main/edges.py
Normal file
92
backend/onyx/agent_search/pro_search_b/main/edges.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from collections.abc import Hashable
|
||||
from typing import Literal
|
||||
|
||||
from langgraph.types import Send
|
||||
|
||||
from onyx.agent_search.core_state import extract_core_fields_for_subgraph
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionInput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
|
||||
AnswerQuestionOutput,
|
||||
)
|
||||
from onyx.agent_search.pro_search_b.main.states import MainState
|
||||
from onyx.agent_search.pro_search_b.main.states import RequireRefinedAnswerUpdate
|
||||
from onyx.agent_search.shared_graph_utils.utils import make_question_id
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def parallelize_initial_sub_question_answering(
|
||||
state: MainState,
|
||||
) -> list[Send | Hashable]:
|
||||
if len(state["initial_decomp_questions"]) > 0:
|
||||
# sub_question_record_ids = [subq_record.id for subq_record in state["sub_question_records"]]
|
||||
# if len(state["sub_question_records"]) == 0:
|
||||
# if state["config"].use_persistence:
|
||||
# raise ValueError("No sub-questions found for initial decompozed questions")
|
||||
# else:
|
||||
# # in this case, we are doing retrieval on the original question.
|
||||
# # to make all the logic consistent, we create a new sub-question
|
||||
# # with the same content as the original question
|
||||
# sub_question_record_ids = [1] * len(state["initial_decomp_questions"])
|
||||
|
||||
return [
|
||||
Send(
|
||||
"answer_query_subgraph",
|
||||
AnswerQuestionInput(
|
||||
**extract_core_fields_for_subgraph(state),
|
||||
question=question,
|
||||
question_id=make_question_id(0, question_nr),
|
||||
),
|
||||
)
|
||||
for question_nr, question in enumerate(state["initial_decomp_questions"])
|
||||
]
|
||||
|
||||
else:
|
||||
return [
|
||||
Send(
|
||||
"ingest_answers",
|
||||
AnswerQuestionOutput(
|
||||
answer_results=[],
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
# Define the function that determines whether to continue or not
|
||||
def continue_to_refined_answer_or_end(
|
||||
state: RequireRefinedAnswerUpdate,
|
||||
) -> Literal["refined_decompose", "logging_node"]:
|
||||
if state["require_refined_answer"]:
|
||||
return "refined_decompose"
|
||||
else:
|
||||
return "logging_node"
|
||||
|
||||
|
||||
def parallelize_refined_sub_question_answering(
|
||||
state: MainState,
|
||||
) -> list[Send | Hashable]:
|
||||
if len(state["refined_sub_questions"]) > 0:
|
||||
return [
|
||||
Send(
|
||||
"answer_refinement_sub_question",
|
||||
AnswerQuestionInput(
|
||||
**extract_core_fields_for_subgraph(state),
|
||||
question=question_data.sub_question,
|
||||
question_id=make_question_id(1, question_nr),
|
||||
),
|
||||
)
|
||||
for question_nr, question_data in state["refined_sub_questions"].items()
|
||||
]
|
||||
|
||||
else:
|
||||
return [
|
||||
Send(
|
||||
"ingest_refined_sub_answers",
|
||||
AnswerQuestionOutput(
|
||||
answer_results=[],
|
||||
),
|
||||
)
|
||||
]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user