Compare commits

..

305 Commits

Author SHA1 Message Date
Evan Lohn
a9183fddb7 WIP: debugging langgraph inputs 2025-01-17 14:55:15 -08:00
Evan Lohn
9d7c337ee6 skip answer verification for idk answers 2025-01-16 17:04:31 -08:00
Evan Lohn
38fd061ed5 cleanup hardcoded idks 2025-01-16 16:41:00 -08:00
Evan Lohn
7282447186 clean run_graph 2025-01-16 16:15:59 -08:00
joachim-danswer
b6da896240 removal of citations from previous answers 2025-01-16 14:26:41 -08:00
joachim-danswer
4dad841bfc new refined prompt 2025-01-16 14:25:44 -08:00
joachim-danswer
755b7579b5 sub_question number handling 2025-01-16 14:25:38 -08:00
Evan Lohn
42257244d9 finished checks for llm context window before invoking or streaming 2025-01-16 14:10:05 -08:00
joachim-danswer
3991eac4c0 new citation format 2025-01-15 17:17:04 -08:00
Evan Lohn
fabc9f7e4b attempted fix for early exit from langgraph event loop 2025-01-15 17:05:47 -08:00
Evan Lohn
7987a3a75e mypy error on unbound locals 2025-01-15 15:43:55 -08:00
Evan Lohn
ff24f82d52 unbound local error 2025-01-15 15:32:24 -08:00
Evan Lohn
563513698b fixed no good subquestions error 2025-01-15 15:23:59 -08:00
Evan Lohn
b2208195ce fixed streaming and persistence of answer docs 2025-01-15 14:55:52 -08:00
Evan Lohn
ac9ce7ca68 added back saved docs for agentic search 2025-01-15 14:23:24 -08:00
joachim-danswer
c9b34ed583 Updated citation handling on backend + citations for refined answer 2025-01-15 14:16:04 -08:00
Evan Lohn
2bc948fa73 dispatch initial question as dummy subquestion 2025-01-15 11:51:13 -08:00
Evan Lohn
8ea987e068 remove empty expanded subquestions 2025-01-15 11:35:00 -08:00
Evan Lohn
43e488275f context length fix 2025-01-15 11:35:00 -08:00
joachim-danswer
09d0820882 Question numbering fix (logging temp to info) 2025-01-15 11:30:46 -08:00
Evan Lohn
2c0ef2ea47 better separated dispatch 2025-01-15 10:26:38 -08:00
Evan Lohn
7cca660a93 handle prompts that exceed context window 2025-01-14 18:58:12 -08:00
Evan Lohn
ab70b7c303 persistence of refined agent answer and top-level handling of fine grained citations (WIP) 2025-01-14 17:28:40 -08:00
pablodanswer
0ff212a2b4 alembic migration 2025-01-14 14:41:14 -08:00
pablodanswer
5c31171b37 Merge branch 'pro-search' of github.com:onyx-dot-app/onyx into pro-search 2025-01-14 14:38:27 -08:00
pablodanswer
1669a5d69f alembic migration 2025-01-14 14:37:59 -08:00
pablodanswer
60db37fac1 alembic migration 2025-01-14 14:35:43 -08:00
joachim-danswer
48a4e2c76b refined prompt fix 2025-01-14 14:35:43 -08:00
Evan Lohn
e1175e15da stream refined answer 2025-01-14 14:35:43 -08:00
Evan Lohn
27b044c030 fix local variable access issue 2025-01-14 14:35:43 -08:00
Evan Lohn
4a7c6a6561 subquestion numbers start at 1 2025-01-14 14:35:43 -08:00
Evan Lohn
78df113c7f un-break refinement flow and stream answer 2025-01-14 14:35:43 -08:00
Evan Lohn
113767a061 stream stop info consistency 2025-01-14 14:35:43 -08:00
pablodanswer
1851c836dc alembic migration 2025-01-14 14:35:43 -08:00
Evan Lohn
413a4e55f5 use correct doc id 2025-01-14 14:35:43 -08:00
Evan Lohn
dfcf8791e7 tool call kickoffs for starting agentic, starting refined 2025-01-14 14:35:43 -08:00
Evan Lohn
1b9f128851 verified docs for persisted info 2025-01-14 14:35:43 -08:00
Evan Lohn
d38b9bd194 cleanup run_graph and added stream stopping for sub_answers 2025-01-14 14:35:43 -08:00
Evan Lohn
ee304c9c35 improved no-subquestion-gen handling 2025-01-14 14:35:43 -08:00
joachim-danswer
4c75b73fa8 Pro_search B + fix for no docs retrieved 2025-01-14 14:35:43 -08:00
Evan Lohn
70509fbe7e attach persisted agent search info to assistant message 2025-01-14 14:35:43 -08:00
Evan Lohn
6bea880695 added subquestion info to streamed docs, removed extra deduping 2025-01-14 14:35:43 -08:00
Evan Lohn
249dd96f25 tentative fix for not persisting search docs 2025-01-14 14:35:43 -08:00
Evan Lohn
f0e74618e2 fixed issue when tool call unnecessary 2025-01-14 14:35:43 -08:00
Evan Lohn
3e27b79040 better answer persistence 2025-01-14 14:35:43 -08:00
Evan Lohn
91a37ef345 save agent answers 2025-01-14 14:35:43 -08:00
Evan Lohn
6c2f4e4775 fix citation processing in basic search 2025-01-14 14:35:43 -08:00
Evan Lohn
87101c8c74 fix run_graph testing script 2025-01-14 14:35:43 -08:00
Evan Lohn
9887be3dfc fixed issues with standard flow 2025-01-14 14:35:43 -08:00
Evan Lohn
22e209afc8 add back old citation processing temporarily 2025-01-14 14:35:43 -08:00
Evan Lohn
05a4792575 fix heads issue 2025-01-14 14:35:43 -08:00
Evan Lohn
f79d44bd6e Pro search clean commmit. See evan_answer_rework branch for prior history 2025-01-14 14:35:43 -08:00
pablonyx
5081d240ce Proper anonymous user restricting (#3645) 2025-01-14 14:35:42 -08:00
pablodanswer
a570d39301 validated 2025-01-14 14:15:41 -08:00
pablodanswer
cd9279f0e1 additional label filtering 2025-01-14 13:49:12 -08:00
pablodanswer
72d1928b8f label editing / deletion 2025-01-14 13:45:49 -08:00
pablodanswer
086bba6454 fully cleaned assistant editor 2025-01-14 13:40:41 -08:00
pablodanswer
289be0a423 user settings etc. 2025-01-14 13:31:37 -08:00
pablodanswer
862a62483c various improvements 2025-01-14 13:19:25 -08:00
pablodanswer
694925d81d k 2025-01-14 12:33:34 -08:00
pablodanswer
478eb511fa add shortcuts, reorganize various pages,update seeding, starter messages, etc. 2025-01-14 12:30:05 -08:00
pablodanswer
a259f92f39 alembic migration 2025-01-13 20:55:52 -08:00
joachim-danswer
9592f0a494 refined prompt fix 2025-01-13 20:54:39 -08:00
Evan Lohn
07c2336c08 stream refined answer 2025-01-13 20:54:39 -08:00
Evan Lohn
7350dd73d1 fix local variable access issue 2025-01-13 20:54:39 -08:00
Evan Lohn
8f428cfcec subquestion numbers start at 1 2025-01-13 20:54:39 -08:00
Evan Lohn
3610a51222 un-break refinement flow and stream answer 2025-01-13 20:54:39 -08:00
Evan Lohn
e0654c2209 stream stop info consistency 2025-01-13 20:54:39 -08:00
pablodanswer
99d7c09433 alembic migration 2025-01-13 20:54:39 -08:00
Evan Lohn
6803702ca3 use correct doc id 2025-01-13 20:54:39 -08:00
Evan Lohn
4ea0ca5a78 tool call kickoffs for starting agentic, starting refined 2025-01-13 20:54:39 -08:00
Evan Lohn
561e44e443 verified docs for persisted info 2025-01-13 20:54:39 -08:00
Evan Lohn
931a119498 cleanup run_graph and added stream stopping for sub_answers 2025-01-13 20:54:39 -08:00
Evan Lohn
fddc1882d1 improved no-subquestion-gen handling 2025-01-13 20:54:39 -08:00
joachim-danswer
086d70a085 Pro_search B + fix for no docs retrieved 2025-01-13 20:54:39 -08:00
Evan Lohn
5a832628e2 attach persisted agent search info to assistant message 2025-01-13 20:54:39 -08:00
Evan Lohn
dfe1ed4c66 added subquestion info to streamed docs, removed extra deduping 2025-01-13 20:54:39 -08:00
Evan Lohn
5761557c19 tentative fix for not persisting search docs 2025-01-13 20:54:39 -08:00
Evan Lohn
29c479f496 fixed issue when tool call unnecessary 2025-01-13 20:54:39 -08:00
Evan Lohn
b60002c791 better answer persistence 2025-01-13 20:54:39 -08:00
Evan Lohn
b49c5afb09 save agent answers 2025-01-13 20:54:39 -08:00
Evan Lohn
b15e29619a fix citation processing in basic search 2025-01-13 20:54:39 -08:00
Evan Lohn
119336035e fix run_graph testing script 2025-01-13 20:54:39 -08:00
Evan Lohn
25bb5983af fixed issues with standard flow 2025-01-13 20:54:39 -08:00
Evan Lohn
ca1c12f122 add back old citation processing temporarily 2025-01-13 20:54:39 -08:00
Evan Lohn
9b55643e55 fix heads issue 2025-01-13 20:54:39 -08:00
Evan Lohn
233bbfa4e4 Pro search clean commmit. See evan_answer_rework branch for prior history 2025-01-13 20:54:36 -08:00
pablonyx
8c67288197 Proper anonymous user restricting (#3645) 2025-01-13 20:54:05 -08:00
pablodanswer
26c5c57ddb quick v1 labels 2025-01-13 19:45:00 -08:00
pablodanswer
ac15d0002a k 2025-01-13 18:01:19 -08:00
pablodanswer
8cca29eeab fix width + editors 2025-01-13 17:58:25 -08:00
pablodanswer
a365ab0c7d remove some whitespace 2025-01-13 17:55:29 -08:00
pablodanswer
fd89d1e141 quick nit 2025-01-13 17:53:38 -08:00
pablodanswer
7e32d21236 push quick changes from diff 2025-01-13 17:52:08 -08:00
pablodanswer
71eab9c740 push 2025-01-13 17:47:36 -08:00
pablodanswer
5c4451c084 :wMerge branch 'new_ux_branch' of github.com:onyx-dot-app/onyx into new_ux_branch 2025-01-13 17:34:37 -08:00
pablodanswer
2c894aaf07 quick nit 2025-01-13 17:32:04 -08:00
Yuhong Sun
bf3da9f9cf k 2025-01-13 17:25:31 -08:00
Yuhong Sun
4506770dd9 Yuhong 2025-01-13 17:16:15 -08:00
pablodanswer
25e2dfa5df k 2025-01-13 13:45:20 -08:00
pablodanswer
ba2d5fcc7d add input prompts 2025-01-13 13:43:41 -08:00
pablodanswer
f9be71ff24 l 2025-01-13 12:44:21 -08:00
pablodanswer
b92485223b sidebar 2025-01-13 12:44:04 -08:00
pablodanswer
5790f37648 quick nit 2025-01-13 12:41:32 -08:00
pablodanswer
b641cfc3e4 Merge branch 'new_ux_branch' of github.com:onyx-dot-app/onyx into new_ux_branch 2025-01-13 12:39:36 -08:00
pablodanswer
a2dfbb5b9c update assistant editor 2025-01-13 12:39:16 -08:00
Yuhong Sun
570ba9f0b6 Yuhong 2025-01-13 12:17:21 -08:00
pablodanswer
650fee6e2c quick updates 2025-01-13 12:00:47 -08:00
pablodanswer
8aa7fb5027 Merge branch 'new_ux_branch' of github.com:onyx-dot-app/onyx into new_ux_branch 2025-01-13 10:04:39 -08:00
pablodanswer
e4bf04fd94 update chat banner 2025-01-13 10:04:31 -08:00
Yuhong Sun
70de4708d0 Yuhong 2025-01-13 10:03:57 -08:00
pablodanswer
4ed9f0ffc7 editor changes 2025-01-12 21:22:35 -08:00
pablodanswer
7b67546199 k 2025-01-12 19:17:38 -08:00
pablodanswer
6c68a53c62 nit 2025-01-12 19:14:19 -08:00
pablodanswer
9e3b1d29aa nit 2025-01-12 18:25:40 -08:00
pablodanswer
ec7a606f4c popover within a popover > modal within a modal 2025-01-12 18:08:00 -08:00
pablodanswer
4e5bc7a4ba quick nit 2025-01-12 17:46:52 -08:00
pablodanswer
55d3b0f271 Merge branch 'new_ux_branch' of github.com:onyx-dot-app/onyx into new_ux_branch 2025-01-12 17:45:18 -08:00
pablodanswer
565bfa4e88 address all but modal within modal 2025-01-12 17:42:34 -08:00
Yuhong Sun
9ad10d1f60 Yuhong 2025-01-12 17:10:38 -08:00
pablodanswer
ad19e9aee7 k 2025-01-12 16:47:57 -08:00
pablodanswer
b11641c2bc most new fixes 2025-01-12 16:32:52 -08:00
pablodanswer
b7307813d5 quick nits 2025-01-12 16:22:21 -08:00
Yuhong Sun
3e8e544086 Yuhong 2025-01-12 14:03:36 -08:00
pablodanswer
7ba00f8b48 quick nit 2025-01-12 12:17:56 -08:00
pablodanswer
19204b49a7 final updates 2025-01-12 12:13:05 -08:00
pablodanswer
7254cb642d fully updated - groups 2025-01-12 12:01:39 -08:00
pablodanswer
da27c8be6d k 2025-01-11 20:28:12 -08:00
pablodanswer
9d0272fe62 minor nit 2025-01-11 19:13:45 -08:00
pablodanswer
167b5bad49 additioanl nits 2025-01-11 17:14:01 -08:00
pablodanswer
3acf235c84 quick nit 2025-01-11 17:07:53 -08:00
pablodanswer
0d9441da88 draggables 2025-01-11 17:02:59 -08:00
pablodanswer
a915e4dfa7 k 2025-01-11 11:45:50 -08:00
pablodanswer
7f2610e7d4 looking good 2025-01-11 11:24:09 -08:00
pablodanswer
19323472e6 nit 2025-01-10 19:24:33 -08:00
pablodanswer
be84cf95bf fix the hydra 2025-01-10 14:15:17 -08:00
pablodanswer
79d847f660 nit 2025-01-10 13:08:50 -08:00
pablodanswer
b06f56102e quick nit 2025-01-10 12:55:25 -08:00
pablodanswer
078ae4b9c7 add new ux 2025-01-10 12:54:00 -08:00
pablodanswer
5d034e08fc nit 2025-01-10 12:07:27 -08:00
pablodanswer
cafa0aac0d push minor changes 2025-01-10 12:07:27 -08:00
pablodanswer
f61864c36e nit 2025-01-10 12:07:27 -08:00
pablodanswer
5b0a1ccc31 minor nit 2025-01-10 12:07:27 -08:00
pablodanswer
acb9cca1e8 quick nits 2025-01-10 12:07:27 -08:00
pablodanswer
e22918e31d v3 2025-01-10 12:07:27 -08:00
pablodanswer
e5c430178d quick nits 2025-01-10 12:07:27 -08:00
pablodanswer
35f379b093 validate 2025-01-10 12:07:27 -08:00
pablodanswer
c85900e4f8 nit 2025-01-10 12:07:27 -08:00
pablodanswer
bf6b6342a1 quick addition 2025-01-10 12:07:27 -08:00
pablodanswer
9adbfc1b81 organize components 2025-01-10 12:07:27 -08:00
pablodanswer
b5dd5df36f quick nit 2025-01-10 12:07:27 -08:00
pablodanswer
6b0a2e11b5 k 2025-01-10 12:07:27 -08:00
pablodanswer
169f3fd0dc v2 2025-01-10 12:07:27 -08:00
pablodanswer
c15a828576 add chrome extension
minor clean up

additional handling

post rebase fixes

nit

quick bump

finalize

minor cleanup

organizational

Revert changes in backend directory

Revert changes in deployment directory

push misc changes

improve shortcut display + general nrf page layout

minor clean up

quick nit

update chrome

k

build fix

k

update

k
2025-01-10 12:07:26 -08:00
Weves
1470b7e038 Add tests for some LLM provider endpoints + small logic change to ensure that display_model_names is not empty 2025-01-10 08:55:53 -08:00
rkuo-danswer
bf78fb79f8 possible fix for gdrive oauth in the cloud (#3642)
* possible fix for gd oauth in the cloud

* missed code in rename/merge

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-10 02:10:59 +00:00
rkuo-danswer
d972a78f45 Make connector pause and delete fast (#3646)
* first cut

* refresh on delete

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-10 01:39:45 +00:00
hagen-danswer
50131ba22c Better logging for confluence space permissions 2025-01-09 15:13:02 -08:00
rkuo-danswer
439217317f Merge pull request #3644 from onyx-dot-app/bugfix/model-server-build-fix
hope this env var works.
2025-01-09 14:34:25 -08:00
hagen-danswer
c55de28423 added distinct when outer joining for user filters (#3641)
* added distinct when outer joining for user filters

* Added distinct when outer joining for user filters for all
2025-01-09 14:15:38 -08:00
Richard Kuo (Danswer)
91e32e801d hope this env var works. 2025-01-09 13:51:58 -08:00
rkuo-danswer
2ae91f0f2b Feature/redis prod tool (#3619)
* prototype tools for handling prod issues

* add some commands

* add batching and dry run options

* custom redis tool

* comment

* default to app config settings for redis

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-09 21:34:07 +00:00
hagen-danswer
d40fd82803 Conf doc sync improvements (#3643)
* Reduce number of requests to Confluence

* undo

* added a way to dynamically adjust the pagination limit

* undo
2025-01-09 12:56:56 -08:00
rkuo-danswer
97a963b4bf add index to speed up get last attempt (#3636)
* add index to speed up get last attempt

* use descending order

* put back unique param

* how did this not get formatted?

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-09 00:56:55 +00:00
pablonyx
7f6ef1ff57 Remove unnecessary logspam
Remove unnecessary logs
2025-01-08 17:03:52 -08:00
pablodanswer
d98746b988 remove unnecessary logs 2025-01-08 17:03:15 -08:00
rkuo-danswer
a76f1b4c1b Merge pull request #3628 from onyx-dot-app/bugfix/debug_tenant
add more debug logging for locking issue
2025-01-08 15:14:37 -08:00
hagen-danswer
4c4ff46fe3 Fixing google drive tests (#3634)
* Fixing google drive texts

* Update conftest.py
2025-01-08 22:34:38 +00:00
hagen-danswer
0f9842064f Added env var to skip warm up (#3633) 2025-01-08 14:29:15 -08:00
pablonyx
d7bc32c0ec Fully remove visit API (#3621)
* v1

* update indexing logic

* update updates

* nit

* clean up args

* update for clarity + best practices

* nit + logs

* fix

* minor clean up

* remove logs

* quick nit
2025-01-08 13:49:01 -08:00
Richard Kuo (Danswer)
1f48de9731 more logging 2025-01-08 12:49:24 -08:00
Richard Kuo (Danswer)
a22d02ff70 add another log line 2025-01-08 10:01:24 -08:00
Richard Kuo (Danswer)
dcfc621a66 add more debug logging for locking issue 2025-01-08 09:43:47 -08:00
Chris Weaver
eac73a1bf1 Improve egnyte connector (#3626) 2025-01-08 03:09:46 +00:00
pablonyx
717560872f Merge pull request #3627 from onyx-dot-app/whitelabeling_name
Whitelabelling
2025-01-07 19:16:01 -08:00
pablodanswer
ce2572134c k 2025-01-07 19:06:52 -08:00
rkuo-danswer
02f72a5c86 Multiple cloud/indexing fixes (#3609)
* more debugging

* test reacquire outside of loop

* more logging

* move lock_beat test outside the try catch so that we don't worry about testing locks we never took

* use a larger scan_iter value for performance

* batch stale document sync batches

* add debug logging for a particular timeout issue

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-08 01:30:29 +00:00
hagen-danswer
eb916df139 added debugger step 2025-01-07 16:18:46 -08:00
hagen-danswer
fafad5e119 Improve contributing guide (#3625)
* Improve contributing guide

* more improvements to contributing guide
2025-01-07 16:16:17 -08:00
pablonyx
a314a08309 Speed up admin pages (#3623)
* ni

* speed up pages

* minor nit

* nit
2025-01-07 15:40:26 -08:00
hagen-danswer
4ce24d68f7 prevent other tests from interfering with existing google drive tests (#3624)
* prevent other tests from interfering with existing google drive tests

* cleanup gdrive tests

* finished

* done
2025-01-07 15:32:36 -08:00
hagen-danswer
a95f4298ad Improved logging for confluence calls (#3622)
* Improved logging for confluence calls

* cleanup

* idk

* combined logging
2025-01-07 21:53:08 +00:00
rkuo-danswer
7cd76ec404 comment out the per doc sync hack (#3620)
* comment out the per doc sync hack

* fix commented code

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-07 19:44:15 +00:00
pablonyx
5b5c1166ca Async Redis (#3618)
* k

* update configs for clarity

* typing

* update
2025-01-07 19:34:57 +00:00
pablonyx
d9e9c6973d Multitenant anonymous (#3595)
* anonymous users for multi tenant setting

* nit

* k
2025-01-07 02:57:20 +00:00
pablonyx
91903141cd Built in tool cache with tool call id (#3617)
* k

* improved

* k

* nit

* nit

* nit
2025-01-07 01:03:52 +00:00
hagen-danswer
e329b63b89 Added Permission Syncing for Salesforce (#3551)
* Added Permission Syncing for Salesforce

* cleanup

* updated connector doc conversion

* finished salesforce permission syncing

* fixed connector to batch Salesforce queries

* tests!

* k

* Added error handling and check for ee and sync type for postprocessing

* comments

* minor touchups

* tested to work!

* done

* my pie

* lil cleanup

* minor comment
2025-01-07 00:37:03 +00:00
hagen-danswer
71c2559ea9 Discord cleanup (#3615)
* Discord cleanup

* fix case discrepancy
2025-01-06 15:11:03 -08:00
Ishankoradia
ceb34a41d9 discord connector (#3023)
* discord: frontend and backend poll connector

* added requirements for discord installation

* fixed the mypy errors

* process messages not part of any thread

* minor change

* updated the connector; this logic works & am able to docs when i print

* minor change

* ability to enter a start date to pull docs from and refactor

* added the load connector and fixed mypy errors

* local commit test

done!

* minor refactor and properly commented everything

* updated the logic to handle permissions and index active/archived threads

* basic discord test template

* cleanup

* going away with the danswer discord client class ; using an async context manager

* moved to proper folder

* minor fixes

* needs improvement

* fixed discord icon

---------

Co-authored-by: hagen-danswer <hagen@danswer.ai>
2025-01-06 14:54:22 -08:00
pablonyx
82eab9d704 Doc explore fix (#3614)
* k

* k

* add comment
2025-01-06 19:42:07 +00:00
pablonyx
2b8d3a6ef5 fix white labelling empty string (#3603) 2025-01-06 19:26:55 +00:00
pablonyx
4fb129e77b Increase timeout + revert changes for clarity (#3604)
* increase timeout + revert changes for clarity

* quick nit

* k
2025-01-06 18:20:53 +00:00
pablonyx
f16ca1b735 minor auth fix (#3613) 2025-01-06 17:51:02 +00:00
pablonyx
e3b2c9d944 Tracking update (#3605)
* tracking update

* k
2025-01-06 17:17:00 +00:00
pablodanswer
6c9c25642d remove empty files on main 2025-01-06 09:01:33 -08:00
hagen-danswer
2862d8bbd3 Minor opensource cleanup (#3610) 2025-01-06 07:26:07 -08:00
skylares
143be6a524 Add assertions to Zendesk connector tests (#3600)
Co-authored-by: hagen-danswer <hagen@danswer.ai>
2025-01-06 06:43:23 -08:00
SubashMohan
c2444a5cff Slim connector for Zendesk (#3367)
* Add SlimConnector support for Zendesk

* ZenDesk format changes

* code formating

---------

Co-authored-by: hagen-danswer <hagen@danswer.ai>
2025-01-06 06:41:41 -08:00
rkuo-danswer
7f8194798a Merge pull request #3608 from onyx-dot-app/bugfix/locking_4
fix timing calculations and don't spam the queue lengths check from e…
2025-01-05 21:13:47 -08:00
Richard Kuo (Danswer)
e3947e4b64 fix timing calculations and don't spam the queue lengths check from every task 2025-01-05 21:13:08 -08:00
rkuo-danswer
98005510ad Merge pull request #3607 from onyx-dot-app/bugfix/locking_redux
add detailed timings to monitor vespa sync
2025-01-05 20:03:17 -08:00
Richard Kuo
ca54bd0b21 add logging 2025-01-05 20:02:05 -08:00
Richard Kuo
d26f8ce852 add detailed timings to monitor vespa sync 2025-01-05 19:24:58 -08:00
pablonyx
c8090ab75b Auth fix + Registration Clarity (#3590)
* clarify auth flow

* k

* nit

* k

* fix typing
2025-01-06 02:17:45 +00:00
hagen-danswer
e100a5e965 Properly account for anonymous access in Confluence 2025-01-05 18:01:39 -08:00
pablonyx
ddec239fef Improved indexing (#3594)
* nit

* k

* add steps

* main util functions

* functioning fully

* quick nit

* k

* typing fix

* k

* address comments
2025-01-05 23:31:53 +00:00
Chris Weaver
e83542f572 Add support for auto-refreshing available models based on an API call (#3576) 2025-01-05 15:45:49 -08:00
joachim-danswer
8750f14647 alignment & renaming of objects for initial (displayed) ranking and re-ranking/validation citations
- renamed post-reranking/validation citation information consistently to final_... (example: doc_id_to_rank_map -> final_doc_id_to_rank_map)
 - changed and renamed objects containing initial ranking information (now: display_...) consistent with final rankings (final_...). Specifically, {} to [] for displayed_search_results
 - for CitationInfo, changed citation_num from 'x-th citation in response stream' to the initial position of the doc [NOTE: test implications]
-  changed tests:
    onyx/backend/tests/unit/onyx/chat/stream_processing/test_citation_processing.py
    onyx/backend/tests/unit/onyx/chat/stream_processing/test_citation_substitution.py
2025-01-05 15:44:34 -08:00
rkuo-danswer
27699c8216 Merge pull request #3602 from onyx-dot-app/bugfix/lock_not_owned
various lock diagnostics and timing adjustments
2025-01-05 15:12:01 -08:00
Richard Kuo (Danswer)
6fcd712a00 comment for lock and usage 2025-01-05 15:11:39 -08:00
Richard Kuo (Danswer)
b027a08698 various lock diagnostics and timing adjustments 2025-01-05 13:59:36 -08:00
Chris Weaver
1db778baa8 Small airtable refactor + handle files with uppercase extensions (#3598)
* Small airtable refactor + handle files with uppercase extensions

* Fix mypy
2025-01-05 11:27:50 -08:00
Chris Weaver
f895e5f7d0 Speedup orphan doc cleanup script (#3596)
* Speedup orphan doc cleanup script

* Fix mypy
2025-01-05 14:28:25 +00:00
rkuo-danswer
2fc58252f4 Merge pull request #3599 from onyx-dot-app/bugfix/doc-sync
quick hack to prevent resyncing the same doc
2025-01-05 04:14:28 -08:00
Richard Kuo (Danswer)
371d1ccd8f move to just setting keys 2025-01-05 03:26:33 -08:00
Richard Kuo (Danswer)
7fb92d42a0 quick hack to prevent resyncing the same doc 2025-01-05 03:05:32 -08:00
Weves
af2061c4db Add Linear OAuth env variables to dev compose 2025-01-04 16:02:04 -08:00
pablonyx
ffec19645b JWT -> Redis (#3574)
* functional v1

* functional logout

* minor clean up

* quick clean up

* update configuration

* ni

* nit

* finalize

* update login page

* delete unused import

* quick nit

* updates

* clean up

* ni

* k

* k
2025-01-04 19:35:43 +00:00
pablonyx
67d2c86250 Remove Exclamation marks + comments (#3586)
* remove explanation marks + comments

* nit
2025-01-04 18:39:16 +00:00
pablonyx
6c018cb53f add personal assistant usage stats (#3543) 2025-01-04 18:38:41 +00:00
pablonyx
62302e3faf Latex for $10 and $100 (#3585)
* nit

* k
2025-01-04 17:37:04 +00:00
rkuo-danswer
0460531c72 Merge pull request #3593 from onyx-dot-app/bugfix/primary_worker_lock
the primary worker lock doesn't always exist
2025-01-03 22:01:11 -08:00
Richard Kuo
6af07a888b the primary worker lock doesn't always exist 2025-01-03 21:12:11 -08:00
Weves
ea75f5cd5d Move google-cloud-aiplatform to default requirements to support vertex AI llama 2025-01-03 15:20:59 -08:00
rkuo-danswer
b92c183022 re-prep user group deletion on the actual deletion (#3588)
* re-prep user group deletion on the actual deletion

* user group needs to be synced to be prepped

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-03 22:35:56 +00:00
skylares
c191e23256 Pagination Hook (#3494)
* Backend changes for pagination hook + Paginated users table

* Frontend changes for pagination hook

* Fix invited users endpoint

* Fix layout shift & add enter to submit user invites

* mypy

* Cleanup

* Resolve PR concerns & remove UserStatus

* Fix errors

---------

Co-authored-by: hagen-danswer <hagen@danswer.ai>
2025-01-03 14:32:55 -08:00
rkuo-danswer
66f9124135 Merge pull request #3584 from onyx-dot-app/bugfix/log_spacing
fix formatting
2025-01-03 00:43:36 -08:00
Richard Kuo
8f0fb70bbf fix formatting 2025-01-02 23:21:54 -08:00
rkuo-danswer
ef5e5c80bb Merge pull request #3577 from onyx-dot-app/bugfix/model_server_exception_logging
fix response logging
2025-01-02 23:08:46 -08:00
rkuo-danswer
03acb6587a Feature/model server logging (#3579)
* improve model server logging

* improve exception logging with provider/model names

* get everything into one log line

---------

Co-authored-by: Richard Kuo <rkuo@rkuo.com>
2025-01-03 01:40:29 +00:00
hagen-danswer
d1ec72b5e5 Reworked salesforce connector to use bulk api (#3581) 2025-01-02 18:09:02 -08:00
Weves
3b214133a8 Airtable improvement 2025-01-02 17:56:05 -08:00
rkuo-danswer
2232702e99 retry the individual delete's (#3580)
* retry the individual delete's

* need to raise inside the retry

* just use retry for now

---------

Co-authored-by: Richard Kuo <rkuo@rkuo.com>
2025-01-02 17:39:37 -08:00
hagen-danswer
8108ff0a4b Added logging for permissions upsert queue length 2025-01-02 17:39:01 -08:00
Richard Kuo
f64e78e986 fix response logging 2025-01-02 13:39:19 -08:00
Chris Weaver
08312a4394 Update Slack link in README.md 2025-01-01 10:03:59 -08:00
Weves
92add655e0 Slack fixes 2024-12-31 18:04:12 -08:00
Chris Weaver
d64464ca7c Add support for OAuth connectors that require user input (#3571)
* Add support for OAuth connectors that require user input

* Cleanup

* Fix linear

* Small re-naming

* Remove console.log
2024-12-31 18:03:33 -08:00
Yuhong Sun
ccd3983802 Linear OAuth Connector (#3570) 2024-12-31 16:11:09 -08:00
pablonyx
240f3e4fff Ensure users cannot modify their roles on main
Ensure users cannot modify their roles
2024-12-31 15:59:27 -05:00
pablonyx
1291b3d930 Add anonymous user to main
Anonymous user
2024-12-31 15:58:52 -05:00
rkuo-danswer
d05f1997b5 Merge pull request #3569 from onyx-dot-app/bugfix/alt_index
we didn't want to rename the alt index suffix, reverting
2024-12-31 12:39:00 -08:00
Chris Weaver
aa2e2a62b9 Small Egnyte tweaks (#3568) 2024-12-31 19:28:38 +00:00
Richard Kuo
174e5968f8 we didn't want to rename the alt index suffix, reverting 2024-12-31 11:28:11 -08:00
pablodanswer
1f27606e17 minor clean up 2024-12-31 13:04:02 -05:00
pablodanswer
60355b84c1 quick nits 2024-12-31 13:04:02 -05:00
pablodanswer
680ab9ea30 updated logic 2024-12-31 13:04:02 -05:00
pablodanswer
c2447dbb1c cosmetic updates 2024-12-31 13:04:02 -05:00
pablodanswer
52bad522f8 update for multi-tenant clarity 2024-12-31 13:04:02 -05:00
pablodanswer
63e5e58313 add anonymous user 2024-12-31 13:04:02 -05:00
rkuo-danswer
2643782e30 Merge pull request #3567 from onyx-dot-app/bugfix/revert_vespa
Revert "More efficient Vespa indexing (#3552)"
2024-12-31 09:47:00 -08:00
Richard Kuo
3eb72e5c1d Revert "More efficient Vespa indexing (#3552)"
This reverts commit 2783216781.
2024-12-31 09:40:23 -08:00
rkuo-danswer
9b65c23a7e Merge pull request #3566 from onyx-dot-app/bugfix/primary_task_timings
re-enable celery task execution logging in primary worker
2024-12-31 01:29:05 -08:00
Richard Kuo (Danswer)
b43a8e48c6 add some return types to distinguish when the task is actually performing work 2024-12-31 00:10:33 -08:00
Richard Kuo (Danswer)
1955c1d67b re-enable celery task execution logging in primary worker 2024-12-30 21:53:00 -08:00
Chris Weaver
3f92ed9d29 Airtable connector (#3564)
* Airtable connector

* Improvements

* improve

* Clean up test

* Add secrets

* Fix mypy + add access token

* Mock unstructured call

* Adjust comments

* Fix ID in test
2024-12-31 03:06:28 +00:00
Weves
618369f4a1 Small fix 2024-12-30 19:20:30 -08:00
pablonyx
2783216781 More efficient Vespa indexing (#3552)
---------

Co-authored-by: Chris Weaver <25087905+Weves@users.noreply.github.com>
2024-12-30 18:51:14 -08:00
rkuo-danswer
bec0f9fb23 permission sync in cloud and beat expiry adjustment (#3544)
* try fixing exception in cloud

* raise beat expiry ... 60 seconds might be starving certain tasks completely

* adjust expiry down to 10 min

* raise concurrency overflow for indexing worker.

* parent pid check

* fix comment

* fix parent pid check, also actually raise an exception from the task if the spawned task exit status is bad

* fix pid check

* some cleanup and task wait fixes

* review fixes

* comment some code so we don't change too many things at once

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
Co-authored-by: Richard Kuo <rkuo@rkuo.com>
2024-12-31 01:05:57 +00:00
pablodanswer
97a03e7fc8 nit 2024-12-29 21:07:12 -05:00
pablodanswer
8d6e8269b7 k 2024-12-29 21:07:12 -05:00
pablodanswer
9ce2c6c517 minor change 2024-12-29 21:07:12 -05:00
pablodanswer
2ad8bdbc65 k 2024-12-29 21:07:12 -05:00
rkuo-danswer
a83c9b40d5 Bugfix/oauth fix (#3507)
* old oauth file left behind

* fix function change that was lost in merge

* fix some testing vars

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-30 01:49:12 +00:00
Chris Weaver
340fab1375 Additional error handling + logging for google drive connector (#3563)
* Additional error handling + logging for google drive connector

* Fix mypy
2024-12-29 17:48:02 -08:00
hagen-danswer
3ec338307f Fixed indexing issues with Salesforce 2024-12-29 16:45:29 -08:00
pablonyx
27acd3387a Auth specific rate limiting (#3463)
* k

* v1

* fully functional

* finalize

* nit

* nit

* nit

* clean up with wrapper + comments

* k

* update

* minor clean
2024-12-29 23:34:23 +00:00
hagen-danswer
d14ef431a7 Improve Salesforce connector 2024-12-29 14:18:40 -08:00
pablonyx
9bffeb65af Eagerly load CCpair connectors (#3531)
* remove left over vim command

* eager loading

* Revert "remove left over vim command"

This reverts commit 184a134ae0.
2024-12-29 15:58:38 +00:00
Yuhong Sun
f4806da653 Fix Null Value in PG (#3559)
* k

* k

* k

* k

* k
2024-12-29 01:53:16 +00:00
pablonyx
e2700b2bbd Remove left over yaml errors (#3527)
* remove left over vim command

* additional misconfigurations

* ensure all regions updated
2024-12-29 01:45:07 +00:00
Yuhong Sun
fc81a3fb12 Zendesk Retries (#3558)
* k

* k

* k

* k
2024-12-28 23:51:49 +00:00
pablonyx
2203cfabea Prevent SSRF risk
Prevent SSRF risk
2024-12-28 15:25:57 -05:00
pablodanswer
f4050306d6 Prevent SSRF risk 2024-12-28 15:25:12 -05:00
Weves
2d960a477f Fix discourse connector 2024-12-24 12:43:10 -08:00
hagen-danswer
8837b8ea79 Curators can now update the curator relationship (#3536)
* Curators can now update the curator relationship

* mypy

* mypy

* whoops haha
2024-12-24 18:49:58 +00:00
hagen-danswer
3dfb214f73 Slackbot polish (#3547) 2024-12-24 16:19:15 +00:00
pablonyx
18d7262608 fix logo rendering (#3542) 2024-12-22 23:00:33 +00:00
pablonyx
09b879ee73 Ensure gmail works for personal accounts (#3541)
* Ensure gmail works for personal accounts

* nit

* minor update
2024-12-22 23:00:14 +00:00
rkuo-danswer
aaa668c963 Merge pull request #3534 from onyx-dot-app/bugfix/validate_ttl
raise activity timeout to one hour
2024-12-22 13:13:57 -08:00
pablonyx
edb877f4bc fix NUL character (#3540) 2024-12-21 23:30:25 +00:00
rkuo-danswer
eb369caefb log attempt id, log elapsed since task execution start, remove log spam (#3539)
* log attempt id, log elapsed since task execution start, remove log spam

* diagnostic lock logs

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-21 23:03:50 +00:00
Chris Weaver
b9567eabd7 Fix bedrock w/ access keys (#3538)
* Fix bedrock w/ access keys

* cleanup

* Remove extra #
2024-12-21 02:24:11 +00:00
Richard Kuo (Danswer)
13bbf67091 raise activity timeout to one hour 2024-12-20 16:18:35 -08:00
hagen-danswer
457a4c73f0 Made sure confluence connector recursive by page includes top level page (#3532)
* Made sure confluence connector by page includes top level page

* surface level change
2024-12-20 21:53:59 +00:00
rkuo-danswer
ce37688b5b allow limited user to create chat session (#3533)
Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-20 21:36:41 +00:00
pablonyx
4e2c90f4af Proper user deletion / organization leaving (#3460)
* Proper user deletion / organization leaving

* minor nit

* update

* udpate provisioning

* minor cleanup

* typing

* post rebase
2024-12-20 21:01:03 +00:00
pablonyx
513dd8a319 update toggling states (#3519) 2024-12-20 20:27:22 +00:00
hagen-danswer
71c5043832 Added filter to exclude attachments with unsupported file extensions (#3530)
* Added filter to exclude attachments with unsupported file extensions

* extension
2024-12-20 19:48:15 +00:00
pablonyx
64b6f15e95 AWS extraneous error fix (#3529)
* remove left over vim command

* aws fix

* k

* remove double
2024-12-20 19:31:04 +00:00
hagen-danswer
35022f5f09 Fix group table (#3523) 2024-12-20 17:51:26 +00:00
hagen-danswer
0d44014c16 Cleanup PR template to make it more concise (#3524) 2024-12-20 17:49:31 +00:00
Yuhong Sun
1b9e9f48fa Update README.md 2024-12-20 10:26:57 -08:00
Yuhong Sun
05fb5aa27b Update README.md 2024-12-20 10:25:34 -08:00
Yuhong Sun
3b645b72a3 Crop Logo Closer 2024-12-20 10:23:52 -08:00
Yuhong Sun
fe770b5c3a Fix Logo On DarkMode (#3525) 2024-12-20 10:15:48 -08:00
hagen-danswer
1eaf885f50 associating credentials with connectors is not considered editing (#3522)
* associating credentials with connectors is not considered editing

* formatting

* formatting

* Update credentials.py

---------

Co-authored-by: Yuhong Sun <yuhongsun96@gmail.com>
2024-12-20 17:36:25 +00:00
rkuo-danswer
a187aa508c use redis exclusively with active signal renewal in more places to perform indexing fence validation (#3517)
Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-20 06:54:00 +00:00
pablonyx
aa4bfa2a78 Forgot password feature (#3437)
* forgot password feature

* improved config

* nit

* nit
2024-12-20 04:53:37 +00:00
pablonyx
9011b8a139 Update citations in shared chat display (#3487)
* update shared chat display

* Change Copy

* fix icon

* remove secret!

---------

Co-authored-by: Yuhong Sun <yuhongsun96@gmail.com>
2024-12-20 01:48:29 +00:00
pablonyx
59c774353a Latex formatting (#3499) 2024-12-19 14:48:06 -08:00
pablonyx
b458d504af Sidebar Default Open (#3488) 2024-12-19 14:04:50 -08:00
Yuhong Sun
f83e7bfcd9 Fix Default CC Pair (#3513) 2024-12-19 09:43:12 -08:00
pablonyx
4d2e26ce4b MT Cloud Tracking Fix (#3514) 2024-12-19 08:47:02 -08:00
pablonyx
817fdc1f36 Ensure metadata overrides file contents (#3512)
* ensure metadata overrides file contents

* update more blocks
2024-12-19 04:44:24 +00:00
557 changed files with 35056 additions and 8530 deletions

View File

@@ -6,24 +6,6 @@
[Describe the tests you ran to verify your changes]
## Accepted Risk (provide if relevant)
N/A
## Related Issue(s) (provide if relevant)
N/A
## Mental Checklist:
- All of the automated tests pass
- All PR comments are addressed and marked resolved
- If there are migrations, they have been rebased to latest main
- If there are new dependencies, they are added to the requirements
- If there are new environment variables, they are added to all of the deployment methods
- If there are new APIs that don't require auth, they are added to PUBLIC_ENDPOINT_SPECS
- Docker images build and basic functionalities work
- Author has done a final read through of the PR right before merge
## Backporting (check the box to trigger backport action)
Note: You have to check that the action passes, otherwise resolve the conflicts manually and tag the patches.
- [ ] This PR should be backported (make sure to check that the backport attempt succeeds)

View File

@@ -66,6 +66,7 @@ jobs:
NEXT_PUBLIC_POSTHOG_HOST=${{ secrets.POSTHOG_HOST }}
NEXT_PUBLIC_SENTRY_DSN=${{ secrets.SENTRY_DSN }}
NEXT_PUBLIC_GTM_ENABLED=true
NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=true
# needed due to weird interactions with the builds for different platforms
no-cache: true
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -118,6 +118,6 @@ jobs:
TRIVY_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-db:2"
TRIVY_JAVA_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-java-db:1"
with:
image-ref: docker.io/onyxdotapp/onyx-model-server:${{ github.ref_name }}
image-ref: docker.io/${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}
severity: "CRITICAL,HIGH"
timeout: "10m"

View File

@@ -26,7 +26,19 @@ env:
GOOGLE_GMAIL_OAUTH_CREDENTIALS_JSON_STR: ${{ secrets.GOOGLE_GMAIL_OAUTH_CREDENTIALS_JSON_STR }}
# Slab
SLAB_BOT_TOKEN: ${{ secrets.SLAB_BOT_TOKEN }}
# Zendesk
ZENDESK_SUBDOMAIN: ${{ secrets.ZENDESK_SUBDOMAIN }}
ZENDESK_EMAIL: ${{ secrets.ZENDESK_EMAIL }}
ZENDESK_TOKEN: ${{ secrets.ZENDESK_TOKEN }}
# Salesforce
SF_USERNAME: ${{ secrets.SF_USERNAME }}
SF_PASSWORD: ${{ secrets.SF_PASSWORD }}
SF_SECURITY_TOKEN: ${{ secrets.SF_SECURITY_TOKEN }}
# Airtable
AIRTABLE_TEST_BASE_ID: ${{ secrets.AIRTABLE_TEST_BASE_ID }}
AIRTABLE_TEST_TABLE_ID: ${{ secrets.AIRTABLE_TEST_TABLE_ID }}
AIRTABLE_TEST_TABLE_NAME: ${{ secrets.AIRTABLE_TEST_TABLE_NAME }}
AIRTABLE_ACCESS_TOKEN: ${{ secrets.AIRTABLE_ACCESS_TOKEN }}
jobs:
connectors-check:
# See https://runs-on.com/runners/linux/

4
.gitignore vendored
View File

@@ -7,4 +7,6 @@
.vscode/
*.sw?
/backend/tests/regression/answer_quality/search_test_config.yaml
/web/test-results/
/web/test-results/
backend/onyx/agent_search/main/test_data.json
backend/tests/regression/answer_quality/test_data.json

View File

@@ -5,6 +5,8 @@
# For local dev, often user Authentication is not needed
AUTH_TYPE=disabled
# Skip warm up for dev
SKIP_WARM_UP=True
# Always keep these on for Dev
# Logs all model prompts to stdout
@@ -49,3 +51,9 @@ BING_API_KEY=<REPLACE THIS>
# Enable the full set of Danswer Enterprise Edition features
# NOTE: DO NOT ENABLE THIS UNLESS YOU HAVE A PAID ENTERPRISE LICENSE (or if you are using this for local testing/development)
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=False
# Agent Search configs # TODO: Remove give proper namings
AGENT_RETRIEVAL_STATS=False # Note: This setting will incur substantial re-ranking effort
AGENT_RERANKING_STATS=True
AGENT_MAX_QUERY_RETRIEVAL_RESULTS=20
AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS=20

View File

@@ -355,5 +355,20 @@
"PYTHONPATH": "."
},
},
{
"name": "Install Python Requirements",
"type": "node",
"request": "launch",
"runtimeExecutable": "bash",
"runtimeArgs": [
"-c",
"pip install -r backend/requirements/default.txt && pip install -r backend/requirements/dev.txt && pip install -r backend/requirements/ee.txt && pip install -r backend/requirements/model_server.txt"
],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"presentation": {
"group": "3"
}
},
]
}

View File

@@ -12,6 +12,10 @@ As an open source project in a rapidly changing space, we welcome all contributi
The [GitHub Issues](https://github.com/onyx-dot-app/onyx/issues) page is a great place to start for contribution ideas.
To ensure that your contribution is aligned with the project's direction, please reach out to Hagen (or any other maintainer) on the Onyx team
via [Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA) /
[Discord](https://discord.gg/TDJ59cGV2X) or [email](mailto:founders@onyx.app).
Issues that have been explicitly approved by the maintainers (aligned with the direction of the project)
will be marked with the `approved by maintainers` label.
Issues marked `good first issue` are an especially great place to start.
@@ -23,8 +27,8 @@ If you have a new/different contribution in mind, we'd love to hear about it!
Your input is vital to making sure that Onyx moves in the right direction.
Before starting on implementation, please raise a GitHub issue.
And always feel free to message us (Chris Weaver / Yuhong Sun) on
[Slack](https://join.slack.com/t/danswer/shared_invite/zt-1w76msxmd-HJHLe3KNFIAIzk_0dSOKaQ) /
Also, always feel free to message the founders (Chris Weaver / Yuhong Sun) on
[Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA) /
[Discord](https://discord.gg/TDJ59cGV2X) directly about anything at all.
### Contributing Code
@@ -42,7 +46,7 @@ Our goal is to make contributing as easy as possible. If you run into any issues
That way we can help future contributors and users can avoid the same issue.
We also have support channels and generally interesting discussions on our
[Slack](https://join.slack.com/t/danswer/shared_invite/zt-1w76msxmd-HJHLe3KNFIAIzk_0dSOKaQ)
[Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA)
and
[Discord](https://discord.gg/TDJ59cGV2X).
@@ -123,7 +127,47 @@ Once the above is done, navigate to `onyx/web` run:
npm i
```
#### Docker containers for external software
## Formatting and Linting
### Backend
For the backend, you'll need to setup pre-commit hooks (black / reorder-python-imports).
First, install pre-commit (if you don't have it already) following the instructions
[here](https://pre-commit.com/#installation).
With the virtual environment active, install the pre-commit library with:
```bash
pip install pre-commit
```
Then, from the `onyx/backend` directory, run:
```bash
pre-commit install
```
Additionally, we use `mypy` for static type checking.
Onyx is fully type-annotated, and we want to keep it that way!
To run the mypy checks manually, run `python -m mypy .` from the `onyx/backend` directory.
### Web
We use `prettier` for formatting. The desired version (2.8.8) will be installed via a `npm i` from the `onyx/web` directory.
To run the formatter, use `npx prettier --write .` from the `onyx/web` directory.
Please double check that prettier passes before creating a pull request.
# Running the application for development
## Developing using VSCode Debugger (recommended)
We highly recommend using VSCode debugger for development.
See [CONTRIBUTING_VSCODE.md](./CONTRIBUTING_VSCODE.md) for more details.
Otherwise, you can follow the instructions below to run the application for development.
## Manually running the application for development
### Docker containers for external software
You will need Docker installed to run these containers.
@@ -135,7 +179,7 @@ docker compose -f docker-compose.dev.yml -p onyx-stack up -d index relational_db
(index refers to Vespa, relational_db refers to Postgres, and cache refers to Redis)
#### Running Onyx locally
### Running Onyx locally
To start the frontend, navigate to `onyx/web` and run:
@@ -223,35 +267,6 @@ If you want to make changes to Onyx and run those changes in Docker, you can als
docker compose -f docker-compose.dev.yml -p onyx-stack up -d --build
```
### Formatting and Linting
#### Backend
For the backend, you'll need to setup pre-commit hooks (black / reorder-python-imports).
First, install pre-commit (if you don't have it already) following the instructions
[here](https://pre-commit.com/#installation).
With the virtual environment active, install the pre-commit library with:
```bash
pip install pre-commit
```
Then, from the `onyx/backend` directory, run:
```bash
pre-commit install
```
Additionally, we use `mypy` for static type checking.
Onyx is fully type-annotated, and we want to keep it that way!
To run the mypy checks manually, run `python -m mypy .` from the `onyx/backend` directory.
#### Web
We use `prettier` for formatting. The desired version (2.8.8) will be installed via a `npm i` from the `onyx/web` directory.
To run the formatter, use `npx prettier --write .` from the `onyx/web` directory.
Please double check that prettier passes before creating a pull request.
### Release Process

29
CONTRIBUTING_VSCODE.md Normal file
View File

@@ -0,0 +1,29 @@
# VSCode Debugging Setup
This guide explains how to set up and use VSCode's debugging capabilities with this project.
## Initial Setup
1. **Environment Setup**:
- Copy `.vscode/.env.template` to `.vscode/.env`
- Fill in the necessary environment variables in `.vscode/.env`
2. **launch.json**:
- Copy `.vscode/launch.template.jsonc` to `.vscode/launch.json`
## Using the Debugger
Before starting, make sure the Docker Daemon is running.
1. Open the Debug view in VSCode (Cmd+Shift+D on macOS)
2. From the dropdown at the top, select "Clear and Restart External Volumes and Containers" and press the green play button
3. From the dropdown at the top, select "Run All Onyx Services" and press the green play button
4. Now, you can navigate to onyx in your browser (default is http://localhost:3000) and start using the app
5. You can set breakpoints by clicking to the left of line numbers to help debug while the app is running
6. Use the debug toolbar to step through code, inspect variables, etc.
## Features
- Hot reload is enabled for the web server and API servers
- Python debugging is configured with debugpy
- Environment variables are loaded from `.vscode/.env`
- Console output is organized in the integrated terminal with labeled tabs

View File

@@ -3,7 +3,7 @@
<a name="readme-top"></a>
<h2 align="center">
<a href="https://www.onyx.app/"> <img width="50%" src="https://github.com/onyx-dot-app/onyx/blob/logo/LogoOnyx.png?raw=true)" /></a>
<a href="https://www.onyx.app/"> <img width="50%" src="https://github.com/onyx-dot-app/onyx/blob/logo/OnyxLogoCropped.jpg?raw=true)" /></a>
</h2>
<p align="center">
@@ -13,7 +13,7 @@
<a href="https://docs.onyx.app/" target="_blank">
<img src="https://img.shields.io/badge/docs-view-blue" alt="Documentation">
</a>
<a href="https://join.slack.com/t/danswer/shared_invite/zt-1w76msxmd-HJHLe3KNFIAIzk_0dSOKaQ" target="_blank">
<a href="https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA" target="_blank">
<img src="https://img.shields.io/badge/slack-join-blue.svg?logo=slack" alt="Slack">
</a>
<a href="https://discord.gg/TDJ59cGV2X" target="_blank">
@@ -24,7 +24,7 @@
</a>
</p>
<strong>[Onyx](https://www.onyx.app/)</strong> (Formerly Danswer) is the AI Assistant connected to your company's docs, apps, and people.
<strong>[Onyx](https://www.onyx.app/)</strong> (formerly Danswer) is the AI Assistant connected to your company's docs, apps, and people.
Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any
scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your
own control. Onyx is dual Licensed with most of it under MIT license and designed to be modular and easily extensible. The system also comes fully ready
@@ -133,15 +133,3 @@ Looking to contribute? Please check out the [Contribution Guide](CONTRIBUTING.md
## ⭐Star History
[![Star History Chart](https://api.star-history.com/svg?repos=onyx-dot-app/onyx&type=Date)](https://star-history.com/#onyx-dot-app/onyx&Date)
## ✨Contributors
<a href="https://github.com/onyx-dot-app/onyx/graphs/contributors">
<img alt="contributors" src="https://contrib.rocks/image?repo=onyx-dot-app/onyx"/>
</a>
<p align="right" style="font-size: 14px; color: #555; margin-top: 20px;">
<a href="#readme-top" style="text-decoration: none; color: #007bff; font-weight: bold;">
↑ Back to Top ↑
</a>
</p>

1
backend/.gitignore vendored
View File

@@ -9,3 +9,4 @@ api_keys.py
vespa-app.zip
dynamic_config_storage/
celerybeat-schedule*
onyx/connectors/salesforce/data/

View File

@@ -4,7 +4,7 @@ from onyx.configs.app_configs import USE_IAM_AUTH
from onyx.configs.app_configs import POSTGRES_HOST
from onyx.configs.app_configs import POSTGRES_PORT
from onyx.configs.app_configs import POSTGRES_USER
from onyx.configs.app_configs import AWS_REGION
from onyx.configs.app_configs import AWS_REGION_NAME
from onyx.db.engine import build_connection_string
from onyx.db.engine import get_all_tenant_ids
from sqlalchemy import event
@@ -120,7 +120,7 @@ def provide_iam_token_for_alembic(
) -> None:
if USE_IAM_AUTH:
# Database connection settings
region = AWS_REGION
region = AWS_REGION_NAME
host = POSTGRES_HOST
port = POSTGRES_PORT
user = POSTGRES_USER

View File

@@ -0,0 +1,29 @@
"""add shortcut option for users
Revision ID: 027381bce97c
Revises: 6fc7886d665d
Create Date: 2025-01-14 12:14:00.814390
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "027381bce97c"
down_revision = "6fc7886d665d"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column(
"user",
sa.Column(
"shortcut_enabled", sa.Boolean(), nullable=False, server_default="true"
),
)
def downgrade() -> None:
op.drop_column("user", "shortcut_enabled")

View File

@@ -0,0 +1,29 @@
"""agent_doc_result_col
Revision ID: 1adf5ea20d2b
Revises: e9cf2bd7baed
Create Date: 2025-01-05 13:14:58.344316
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "1adf5ea20d2b"
down_revision = "e9cf2bd7baed"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Add the new column with JSONB type
op.add_column(
"sub_question",
sa.Column("sub_question_doc_results", postgresql.JSONB(), nullable=True),
)
def downgrade() -> None:
# Drop the column
op.drop_column("sub_question", "sub_question_doc_results")

View File

@@ -0,0 +1,24 @@
"""add chunk count to document
Revision ID: 2955778aa44c
Revises: c0aab6edb6dd
Create Date: 2025-01-04 11:39:43.268612
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "2955778aa44c"
down_revision = "c0aab6edb6dd"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column("document", sa.Column("chunk_count", sa.Integer(), nullable=True))
def downgrade() -> None:
op.drop_column("document", "chunk_count")

View File

@@ -0,0 +1,35 @@
"""add composite index for index attempt time updated
Revision ID: 369644546676
Revises: 2955778aa44c
Create Date: 2025-01-08 15:38:17.224380
"""
from alembic import op
from sqlalchemy import text
# revision identifiers, used by Alembic.
revision = "369644546676"
down_revision = "2955778aa44c"
branch_labels: None = None
depends_on: None = None
def upgrade() -> None:
op.create_index(
"ix_index_attempt_ccpair_search_settings_time_updated",
"index_attempt",
[
"connector_credential_pair_id",
"search_settings_id",
text("time_updated DESC"),
],
unique=False,
)
def downgrade() -> None:
op.drop_index(
"ix_index_attempt_ccpair_search_settings_time_updated",
table_name="index_attempt",
)

View File

@@ -0,0 +1,58 @@
"""add back input prompts
Revision ID: 3c6531f32351
Revises: aeda5f2df4f6
Create Date: 2025-01-13 12:49:51.705235
"""
from alembic import op
import sqlalchemy as sa
import fastapi_users_db_sqlalchemy
# revision identifiers, used by Alembic.
revision = "3c6531f32351"
down_revision = "aeda5f2df4f6"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
"inputprompt",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("prompt", sa.String(), nullable=False),
sa.Column("content", sa.String(), nullable=False),
sa.Column("active", sa.Boolean(), nullable=False),
sa.Column("is_public", sa.Boolean(), nullable=False),
sa.Column(
"user_id",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"inputprompt__user",
sa.Column("input_prompt_id", sa.Integer(), nullable=False),
sa.Column(
"user_id", fastapi_users_db_sqlalchemy.generics.GUID(), nullable=False
),
sa.ForeignKeyConstraint(
["input_prompt_id"],
["inputprompt.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("input_prompt_id", "user_id"),
)
def downgrade() -> None:
op.drop_table("inputprompt__user")
op.drop_table("inputprompt")

View File

@@ -0,0 +1,79 @@
"""make categories labels and many to many
Revision ID: 6fc7886d665d
Revises: 3c6531f32351
Create Date: 2025-01-13 18:12:18.029112
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "6fc7886d665d"
down_revision = "3c6531f32351"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Rename persona_category table to persona_label
op.rename_table("persona_category", "persona_label")
# Create the new association table
op.create_table(
"persona__persona_label",
sa.Column("persona_id", sa.Integer(), nullable=False),
sa.Column("persona_label_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["persona_id"],
["persona.id"],
),
sa.ForeignKeyConstraint(
["persona_label_id"],
["persona_label.id"],
),
sa.PrimaryKeyConstraint("persona_id", "persona_label_id"),
)
# Copy existing relationships to the new table
op.execute(
"""
INSERT INTO persona__persona_label (persona_id, persona_label_id)
SELECT id, category_id FROM persona WHERE category_id IS NOT NULL
"""
)
# Remove the old category_id column from persona table
op.drop_column("persona", "category_id")
def downgrade() -> None:
# Rename persona_label table back to persona_category
op.rename_table("persona_label", "persona_category")
# Add back the category_id column to persona table
op.add_column("persona", sa.Column("category_id", sa.Integer(), nullable=True))
op.create_foreign_key(
"persona_category_id_fkey",
"persona",
"persona_category",
["category_id"],
["id"],
)
# Copy the first label relationship back to the persona table
op.execute(
"""
UPDATE persona
SET category_id = (
SELECT persona_label_id
FROM persona__persona_label
WHERE persona__persona_label.persona_id = persona.id
LIMIT 1
)
"""
)
# Drop the association table
op.drop_table("persona__persona_label")

View File

@@ -0,0 +1,35 @@
"""agent_metric_col_rename__s
Revision ID: 925b58bd75b6
Revises: 9787be927e58
Create Date: 2025-01-06 11:20:26.752441
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "925b58bd75b6"
down_revision = "9787be927e58"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Rename columns using PostgreSQL syntax
op.alter_column(
"agent__search_metrics", "base_duration_s", new_column_name="base_duration__s"
)
op.alter_column(
"agent__search_metrics", "full_duration_s", new_column_name="full_duration__s"
)
def downgrade() -> None:
# Revert the column renames
op.alter_column(
"agent__search_metrics", "base_duration__s", new_column_name="base_duration_s"
)
op.alter_column(
"agent__search_metrics", "full_duration__s", new_column_name="full_duration_s"
)

View File

@@ -0,0 +1,25 @@
"""agent_metric_table_renames__agent__
Revision ID: 9787be927e58
Revises: bceb76d618ec
Create Date: 2025-01-06 11:01:44.210160
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "9787be927e58"
down_revision = "bceb76d618ec"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Rename table from agent_search_metrics to agent__search_metrics
op.rename_table("agent_search_metrics", "agent__search_metrics")
def downgrade() -> None:
# Rename table back from agent__search_metrics to agent_search_metrics
op.rename_table("agent__search_metrics", "agent_search_metrics")

View File

@@ -0,0 +1,42 @@
"""agent_tracking
Revision ID: 98a5008d8711
Revises: 027381bce97c
Create Date: 2025-01-04 14:41:52.732238
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "98a5008d8711"
down_revision = "027381bce97c"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
"agent_search_metrics",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("persona_id", sa.Integer(), nullable=True),
sa.Column("agent_type", sa.String(), nullable=False),
sa.Column("start_time", sa.DateTime(timezone=True), nullable=False),
sa.Column("base_duration_s", sa.Float(), nullable=False),
sa.Column("full_duration_s", sa.Float(), nullable=False),
sa.Column("base_metrics", postgresql.JSONB(), nullable=True),
sa.Column("refined_metrics", postgresql.JSONB(), nullable=True),
sa.Column("all_metrics", postgresql.JSONB(), nullable=True),
sa.ForeignKeyConstraint(
["persona_id"],
["persona.id"],
),
sa.ForeignKeyConstraint(["user_id"], ["user.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
def downgrade() -> None:
op.drop_table("agent_search_metrics")

View File

@@ -0,0 +1,27 @@
"""add pinned assistants
Revision ID: aeda5f2df4f6
Revises: 369644546676
Create Date: 2025-01-09 16:04:10.770636
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "aeda5f2df4f6"
down_revision = "369644546676"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column(
"user", sa.Column("pinned_assistants", postgresql.JSONB(), nullable=True)
)
op.execute('UPDATE "user" SET pinned_assistants = chosen_assistants')
def downgrade() -> None:
op.drop_column("user", "pinned_assistants")

View File

@@ -0,0 +1,84 @@
"""agent_table_renames__agent__
Revision ID: bceb76d618ec
Revises: c0132518a25b
Create Date: 2025-01-06 10:50:48.109285
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "bceb76d618ec"
down_revision = "c0132518a25b"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.drop_constraint(
"sub_query__search_doc_sub_query_id_fkey",
"sub_query__search_doc",
type_="foreignkey",
)
op.drop_constraint(
"sub_query__search_doc_search_doc_id_fkey",
"sub_query__search_doc",
type_="foreignkey",
)
# Rename tables
op.rename_table("sub_query", "agent__sub_query")
op.rename_table("sub_question", "agent__sub_question")
op.rename_table("sub_query__search_doc", "agent__sub_query__search_doc")
# Update both foreign key constraints for agent__sub_query__search_doc
# Create new foreign keys with updated names
op.create_foreign_key(
"agent__sub_query__search_doc_sub_query_id_fkey",
"agent__sub_query__search_doc",
"agent__sub_query",
["sub_query_id"],
["id"],
)
op.create_foreign_key(
"agent__sub_query__search_doc_search_doc_id_fkey",
"agent__sub_query__search_doc",
"search_doc", # This table name doesn't change
["search_doc_id"],
["id"],
)
def downgrade() -> None:
# Update foreign key constraints for sub_query__search_doc
op.drop_constraint(
"agent__sub_query__search_doc_sub_query_id_fkey",
"agent__sub_query__search_doc",
type_="foreignkey",
)
op.drop_constraint(
"agent__sub_query__search_doc_search_doc_id_fkey",
"agent__sub_query__search_doc",
type_="foreignkey",
)
# Rename tables back
op.rename_table("agent__sub_query__search_doc", "sub_query__search_doc")
op.rename_table("agent__sub_question", "sub_question")
op.rename_table("agent__sub_query", "sub_query")
op.create_foreign_key(
"sub_query__search_doc_sub_query_id_fkey",
"sub_query__search_doc",
"sub_query",
["sub_query_id"],
["id"],
)
op.create_foreign_key(
"sub_query__search_doc_search_doc_id_fkey",
"sub_query__search_doc",
"search_doc", # This table name doesn't change
["search_doc_id"],
["id"],
)

View File

@@ -0,0 +1,40 @@
"""agent_table_changes_rename_level
Revision ID: c0132518a25b
Revises: 1adf5ea20d2b
Create Date: 2025-01-05 16:38:37.660152
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "c0132518a25b"
down_revision = "1adf5ea20d2b"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Add level and level_question_nr columns with NOT NULL constraint
op.add_column(
"sub_question",
sa.Column("level", sa.Integer(), nullable=False, server_default="0"),
)
op.add_column(
"sub_question",
sa.Column(
"level_question_nr", sa.Integer(), nullable=False, server_default="0"
),
)
# Remove the server_default after the columns are created
op.alter_column("sub_question", "level", server_default=None)
op.alter_column("sub_question", "level_question_nr", server_default=None)
def downgrade() -> None:
# Remove the columns
op.drop_column("sub_question", "level_question_nr")
op.drop_column("sub_question", "level")

View File

@@ -0,0 +1,68 @@
"""create pro search persistence tables
Revision ID: e9cf2bd7baed
Revises: 98a5008d8711
Create Date: 2025-01-02 17:55:56.544246
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
# revision identifiers, used by Alembic.
revision = "e9cf2bd7baed"
down_revision = "98a5008d8711"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Create sub_question table
op.create_table(
"sub_question",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("primary_question_id", sa.Integer, sa.ForeignKey("chat_message.id")),
sa.Column(
"chat_session_id", UUID(as_uuid=True), sa.ForeignKey("chat_session.id")
),
sa.Column("sub_question", sa.Text),
sa.Column(
"time_created", sa.DateTime(timezone=True), server_default=sa.func.now()
),
sa.Column("sub_answer", sa.Text),
)
# Create sub_query table
op.create_table(
"sub_query",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("parent_question_id", sa.Integer, sa.ForeignKey("sub_question.id")),
sa.Column(
"chat_session_id", UUID(as_uuid=True), sa.ForeignKey("chat_session.id")
),
sa.Column("sub_query", sa.Text),
sa.Column(
"time_created", sa.DateTime(timezone=True), server_default=sa.func.now()
),
)
# Create sub_query__search_doc association table
op.create_table(
"sub_query__search_doc",
sa.Column(
"sub_query_id", sa.Integer, sa.ForeignKey("sub_query.id"), primary_key=True
),
sa.Column(
"search_doc_id",
sa.Integer,
sa.ForeignKey("search_doc.id"),
primary_key=True,
),
)
def downgrade() -> None:
op.drop_table("sub_query__search_doc")
op.drop_table("sub_query")
op.drop_table("sub_question")

View File

@@ -0,0 +1,31 @@
"""mapping for anonymous user path
Revision ID: a4f6ee863c47
Revises: 14a83a331951
Create Date: 2025-01-04 14:16:58.697451
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "a4f6ee863c47"
down_revision = "14a83a331951"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
"tenant_anonymous_user_path",
sa.Column("tenant_id", sa.String(), primary_key=True, nullable=False),
sa.Column("anonymous_user_path", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("tenant_id"),
sa.UniqueConstraint("anonymous_user_path"),
)
def downgrade() -> None:
op.drop_table("tenant_anonymous_user_path")

536
backend/chatt.txt Normal file
View File

@@ -0,0 +1,536 @@
"{\"user_message_id\": 475, \"reserved_assistant_message_id\": 476}\n"
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \"1\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \" specifications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_question\": \"2\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \" cases\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_question\": \"3\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_question\": \"4\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" differences\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \" product\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" specifications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" previous\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" versions\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" cases\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" previous\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" other\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" versions\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 4?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
"{\"llm_selected_doc_indices\": []}\n"
"{\"final_context_docs\": []}\n"
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" formerly\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" known\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" D\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" an\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Assistant\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" that\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" connects\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" company's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" documents\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" personnel\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" provides\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" can\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" integrate\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" any\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" large\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" language\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" model\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" (\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"LL\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"M\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \")\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 2?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
"{\"llm_selected_doc_indices\": []}\n"
"{\"final_context_docs\": []}\n"
"{\"answer_piece\": \" of\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" choice\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" designed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" modular\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" easily\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" extens\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"ible\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" allowing\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" deployment\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" various\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" platforms\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" including\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" laptops\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"-prem\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"ise\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" or\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" cloud\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" environments\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" ensures\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" that\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" data\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" chats\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" remain\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" under\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" control\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" deployment\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" owned\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" by\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" MIT\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" licensed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" comes\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" ready\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" production\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" featuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" authentication\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" role\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" management\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" persistence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" configuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Assist\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"ants\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" their\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" prompts\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Additionally\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" serves\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" unified\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" search\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" tool\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" across\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" common\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" workplace\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" like\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Slack\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Google\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Drive\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Con\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"fluence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" enabling\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" it\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" act\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" subject\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" matter\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" expert\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" teams\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" by\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" combining\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" L\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"LM\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"s\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" team\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"-specific\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" knowledge\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" [[1]]()\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 3?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
"{\"llm_selected_doc_indices\": []}\n"
"{\"final_context_docs\": []}\n"
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"top_documents\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"chunk_ind\": 0, \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"link\": \"https://docs.onyx.app/introduction\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"source_type\": \"web\", \"boost\": 0, \"hidden\": false, \"metadata\": {}, \"score\": 0.6275177643886491, \"is_relevant\": null, \"relevance_explanation\": null, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"], \"updated_at\": null, \"primary_owners\": null, \"secondary_owners\": null, \"is_internet\": false, \"db_doc_id\": 35923}], \"rephrased_query\": \"what is onyx 1, 2, 3, 4\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
"{\"llm_selected_doc_indices\": []}\n"
"{\"final_context_docs\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"content\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible. The system also comes fully ready for production usage with user authentication, role management (admin/basic users), chat persistence, and a UI for configuring Personas (AI Assistants) and their Prompts.\\nOnyx also serves as a Unified Search across all common workplace tools such as Slack, Google Drive, Confluence, etc. By combining LLMs and team specific knowledge, Onyx becomes a subject matter expert for the team. Its like ChatGPT if it had access to your teams unique knowledge! It enables questions such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\nOnyx can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain Features \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants with different prompts and backing knowledge sets.\\n- Connect Onyx with LLM of your choice (self-host for a fully airgapped solution).\\n- Document Search + AI Answers for natural language queries.\\n- Connectors to all common workplace tools like Google Drive, Confluence, Slack, etc.\\n- Slack integration to get answers and search results directly in Slack.\\n\\nUpcoming\\n- Chat/Prompt sharing with specific teammates and user groups.\\n- Multi-modal model support, chat with images, video etc.\\n- Choosing between LLMs and parameters during chat session.\\n- Tool calling and agent configurations options.\\n- Organizational understanding and ability to locate and suggest experts from your team.\\n\\nOther Noteable Benefits of Onyx\\n- User Authentication with document level access management.\\n- Best in class Hybrid Search across all sources (BM-25 + prefix aware embedding models).\\n- Admin Dashboard to configure connectors, document-sets, access, etc.\\n- Custom deep learning models + learn from user feedback.\\n- Easy deployment and ability to host Onyx anywhere of your choosing.\\nQuickstart\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"source_type\": \"web\", \"metadata\": {}, \"updated_at\": null, \"link\": \"https://docs.onyx.app/introduction\", \"source_links\": {\"0\": \"https://docs.onyx.app/introduction\"}, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"]}]}\n"
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" cannot\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" reliably\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" question\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" about\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" provided\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" only\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" describes\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" which\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" an\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Assistant\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" formerly\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" known\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" D\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" connects\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" company's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" documents\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" personnel\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" providing\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" integration\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" any\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" large\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" language\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" model\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" (\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"LL\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"M\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \")\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" of\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" choice\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" designed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" modular\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" easily\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" extens\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"ible\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" can\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" deployed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" various\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" platforms\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" while\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" ensuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" data\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" control\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" also\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" serves\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" unified\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" search\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" tool\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" across\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" common\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" workplace\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" like\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Slack\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Google\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Drive\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Con\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"fluence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" acting\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" subject\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" matter\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" expert\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" teams\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" [[1]]()\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"{{1}}\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"There\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" no\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" available\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" regarding\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" or\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" so\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" I\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" cannot\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" provide\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" details\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" about\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" them\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"citations\": []}\n"
"{\"message_id\": 476, \"parent_message\": 475, \"latest_child_message\": null, \"message\": \"I cannot reliably answer the question about Onyx 2, 3, and 4, as the provided information only describes Onyx 1, which is an AI Assistant formerly known as Danswer. Onyx 1 connects to a company's documents, applications, and personnel, providing a chat interface and integration with any large language model (LLM) of choice. It is designed to be modular, easily extensible, and can be deployed on various platforms while ensuring user data control. It also serves as a unified search tool across common workplace applications like Slack, Google Drive, and Confluence, acting as a subject matter expert for teams [[1]](){{1}}There is no information available regarding Onyx 2, 3, or 4, so I cannot provide details about them.\", \"rephrased_query\": \"what is onyx 1, 2, 3, 4\", \"context_docs\": {\"top_documents\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"chunk_ind\": 0, \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"link\": \"https://docs.onyx.app/introduction\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"source_type\": \"web\", \"boost\": 0, \"hidden\": false, \"metadata\": {}, \"score\": 0.6275177643886491, \"is_relevant\": null, \"relevance_explanation\": null, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"], \"updated_at\": null, \"primary_owners\": null, \"secondary_owners\": null, \"is_internet\": false, \"db_doc_id\": 35923}]}, \"message_type\": \"assistant\", \"time_sent\": \"2025-01-12T05:37:18.318251+00:00\", \"overridden_model\": \"gpt-4o\", \"alternate_assistant_id\": 0, \"chat_session_id\": \"40f91916-7419-48d1-9681-5882b0869d88\", \"citations\": {}, \"sub_questions\": [], \"files\": [], \"tool_call\": null}\n"

View File

@@ -3,6 +3,10 @@ from sqlalchemy.orm import Session
from ee.onyx.db.external_perm import fetch_external_groups_for_user
from ee.onyx.db.user_group import fetch_user_groups_for_documents
from ee.onyx.db.user_group import fetch_user_groups_for_user
from ee.onyx.external_permissions.post_query_censoring import (
DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION,
)
from ee.onyx.external_permissions.sync_params import DOC_PERMISSIONS_FUNC_MAP
from onyx.access.access import (
_get_access_for_documents as get_access_for_documents_without_groups,
)
@@ -10,6 +14,7 @@ from onyx.access.access import _get_acl_for_user as get_acl_for_user_without_gro
from onyx.access.models import DocumentAccess
from onyx.access.utils import prefix_external_group
from onyx.access.utils import prefix_user_group
from onyx.db.document import get_document_sources
from onyx.db.document import get_documents_by_ids
from onyx.db.models import User
@@ -52,9 +57,20 @@ def _get_access_for_documents(
)
doc_id_map = {doc.id: doc for doc in documents}
# Get all sources in one batch
doc_id_to_source_map = get_document_sources(
db_session=db_session,
document_ids=document_ids,
)
access_map = {}
for document_id, non_ee_access in non_ee_access_dict.items():
document = doc_id_map[document_id]
source = doc_id_to_source_map.get(document_id)
is_only_censored = (
source in DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION
and source not in DOC_PERMISSIONS_FUNC_MAP
)
ext_u_emails = (
set(document.external_user_emails)
@@ -70,7 +86,11 @@ def _get_access_for_documents(
# If the document is determined to be "public" externally (through a SYNC connector)
# then it's given the same access level as if it were marked public within Onyx
is_public_anywhere = document.is_public or non_ee_access.is_public
# If its censored, then it's public anywhere during the search and then permissions are
# applied after the search
is_public_anywhere = (
document.is_public or non_ee_access.is_public or is_only_censored
)
# To avoid collisions of group namings between connectors, they need to be prefixed
access_map[document_id] = DocumentAccess(

View File

@@ -1,5 +1,7 @@
from datetime import datetime
from functools import lru_cache
import jwt
import requests
from fastapi import Depends
from fastapi import HTTPException
@@ -20,6 +22,7 @@ from ee.onyx.server.seeding import get_seed_config
from ee.onyx.utils.secrets import extract_hashed_cookie
from onyx.auth.users import current_admin_user
from onyx.configs.app_configs import AUTH_TYPE
from onyx.configs.app_configs import USER_AUTH_SECRET
from onyx.configs.constants import AuthType
from onyx.db.models import User
from onyx.utils.logger import setup_logger
@@ -118,3 +121,17 @@ async def current_cloud_superuser(
detail="Access denied. User must be a cloud superuser to perform this action.",
)
return user
def generate_anonymous_user_jwt_token(tenant_id: str) -> str:
payload = {
"tenant_id": tenant_id,
# Token does not expire
"iat": datetime.utcnow(), # Issued at time
}
return jwt.encode(payload, USER_AUTH_SECRET, algorithm="HS256")
def decode_anonymous_user_jwt_token(token: str) -> dict:
return jwt.decode(token, USER_AUTH_SECRET, algorithms=["HS256"])

View File

@@ -6,6 +6,7 @@ from sqlalchemy.orm import Session
from ee.onyx.db.user_group import delete_user_group
from ee.onyx.db.user_group import fetch_user_group
from ee.onyx.db.user_group import mark_user_group_as_synced
from ee.onyx.db.user_group import prepare_user_group_for_deletion
from onyx.background.celery.apps.app_base import task_logger
from onyx.redis.redis_usergroup import RedisUserGroup
from onyx.utils.logger import setup_logger
@@ -46,11 +47,20 @@ def monitor_usergroup_taskset(
user_group = fetch_user_group(db_session=db_session, user_group_id=usergroup_id)
if user_group:
usergroup_name = user_group.name
if user_group.is_up_for_deletion:
# this prepare should have been run when the deletion was scheduled,
# but run it again to be sure we're ready to go
mark_user_group_as_synced(db_session, user_group)
prepare_user_group_for_deletion(db_session, usergroup_id)
delete_user_group(db_session=db_session, user_group=user_group)
task_logger.info(f"Deleted usergroup. id='{usergroup_id}'")
task_logger.info(
f"Deleted usergroup: name={usergroup_name} id={usergroup_id}"
)
else:
mark_user_group_as_synced(db_session=db_session, user_group=user_group)
task_logger.info(f"Synced usergroup. id='{usergroup_id}'")
task_logger.info(
f"Synced usergroup. name={usergroup_name} id={usergroup_id}"
)
rug.reset()

View File

@@ -15,6 +15,12 @@ SAML_CONF_DIR = os.environ.get("SAML_CONF_DIR") or "/app/ee/onyx/configs/saml_co
CONFLUENCE_PERMISSION_GROUP_SYNC_FREQUENCY = int(
os.environ.get("CONFLUENCE_PERMISSION_GROUP_SYNC_FREQUENCY") or 5 * 60
)
# This is a boolean that determines if anonymous access is public
# Default behavior is to not make the page public and instead add a group
# that contains all the users that we found in Confluence
CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC = (
os.environ.get("CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC", "").lower() == "true"
)
# In seconds, default is 5 minutes
CONFLUENCE_PERMISSION_DOC_SYNC_FREQUENCY = int(
os.environ.get("CONFLUENCE_PERMISSION_DOC_SYNC_FREQUENCY") or 5 * 60
@@ -55,3 +61,5 @@ POSTHOG_API_KEY = os.environ.get("POSTHOG_API_KEY") or "FooBar"
POSTHOG_HOST = os.environ.get("POSTHOG_HOST") or "https://us.i.posthog.com"
HUBSPOT_TRACKING_URL = os.environ.get("HUBSPOT_TRACKING_URL")
ANONYMOUS_USER_COOKIE_NAME = "onyx_anonymous_user"

View File

@@ -2,6 +2,7 @@ import datetime
from collections.abc import Sequence
from uuid import UUID
from sqlalchemy import and_
from sqlalchemy import case
from sqlalchemy import cast
from sqlalchemy import Date
@@ -14,6 +15,9 @@ from onyx.configs.constants import MessageType
from onyx.db.models import ChatMessage
from onyx.db.models import ChatMessageFeedback
from onyx.db.models import ChatSession
from onyx.db.models import Persona
from onyx.db.models import User
from onyx.db.models import UserRole
def fetch_query_analytics(
@@ -234,3 +238,122 @@ def fetch_persona_unique_users(
)
return [tuple(row) for row in db_session.execute(query).all()]
def fetch_assistant_message_analytics(
db_session: Session,
assistant_id: int,
start: datetime.datetime,
end: datetime.datetime,
) -> list[tuple[int, datetime.date]]:
"""
Gets the daily message counts for a specific assistant in the given time range.
"""
query = (
select(
func.count(ChatMessage.id),
cast(ChatMessage.time_sent, Date),
)
.join(
ChatSession,
ChatMessage.chat_session_id == ChatSession.id,
)
.where(
or_(
ChatMessage.alternate_assistant_id == assistant_id,
ChatSession.persona_id == assistant_id,
),
ChatMessage.time_sent >= start,
ChatMessage.time_sent <= end,
ChatMessage.message_type == MessageType.ASSISTANT,
)
.group_by(cast(ChatMessage.time_sent, Date))
.order_by(cast(ChatMessage.time_sent, Date))
)
return [tuple(row) for row in db_session.execute(query).all()]
def fetch_assistant_unique_users(
db_session: Session,
assistant_id: int,
start: datetime.datetime,
end: datetime.datetime,
) -> list[tuple[int, datetime.date]]:
"""
Gets the daily unique user counts for a specific assistant in the given time range.
"""
query = (
select(
func.count(func.distinct(ChatSession.user_id)),
cast(ChatMessage.time_sent, Date),
)
.join(
ChatSession,
ChatMessage.chat_session_id == ChatSession.id,
)
.where(
or_(
ChatMessage.alternate_assistant_id == assistant_id,
ChatSession.persona_id == assistant_id,
),
ChatMessage.time_sent >= start,
ChatMessage.time_sent <= end,
ChatMessage.message_type == MessageType.ASSISTANT,
)
.group_by(cast(ChatMessage.time_sent, Date))
.order_by(cast(ChatMessage.time_sent, Date))
)
return [tuple(row) for row in db_session.execute(query).all()]
def fetch_assistant_unique_users_total(
db_session: Session,
assistant_id: int,
start: datetime.datetime,
end: datetime.datetime,
) -> int:
"""
Gets the total number of distinct users who have sent or received messages from
the specified assistant in the given time range.
"""
query = (
select(func.count(func.distinct(ChatSession.user_id)))
.select_from(ChatMessage)
.join(
ChatSession,
ChatMessage.chat_session_id == ChatSession.id,
)
.where(
or_(
ChatMessage.alternate_assistant_id == assistant_id,
ChatSession.persona_id == assistant_id,
),
ChatMessage.time_sent >= start,
ChatMessage.time_sent <= end,
ChatMessage.message_type == MessageType.ASSISTANT,
)
)
result = db_session.execute(query).scalar()
return result if result else 0
# Users can view assistant stats if they created the persona,
# or if they are an admin
def user_can_view_assistant_stats(
db_session: Session, user: User | None, assistant_id: int
) -> bool:
# If user is None and auth is disabled, assume the user is an admin
if user is None or user.role == UserRole.ADMIN:
return True
# Check if the user created the persona
stmt = select(Persona).where(
and_(Persona.id == assistant_id, Persona.user_id == user.id)
)
persona = db_session.execute(stmt).scalar_one_or_none()
return persona is not None

View File

@@ -10,6 +10,7 @@ from onyx.access.utils import prefix_group_w_source
from onyx.configs.constants import DocumentSource
from onyx.db.models import User__ExternalUserGroupId
from onyx.db.users import batch_add_ext_perm_user_if_not_exists
from onyx.db.users import get_user_by_email
from onyx.utils.logger import setup_logger
logger = setup_logger()
@@ -106,3 +107,21 @@ def fetch_external_groups_for_user(
User__ExternalUserGroupId.user_id == user_id
)
).all()
def fetch_external_groups_for_user_email_and_group_ids(
db_session: Session,
user_email: str,
group_ids: list[str],
) -> list[User__ExternalUserGroupId]:
user = get_user_by_email(db_session=db_session, email=user_email)
if user is None:
return []
user_id = user.id
user_ext_groups = db_session.scalars(
select(User__ExternalUserGroupId).where(
User__ExternalUserGroupId.user_id == user_id,
User__ExternalUserGroupId.external_user_group_id.in_(group_ids),
)
).all()
return list(user_ext_groups)

View File

@@ -7,6 +7,7 @@ from sqlalchemy import select
from sqlalchemy.orm import aliased
from sqlalchemy.orm import Session
from onyx.configs.app_configs import DISABLE_AUTH
from onyx.configs.constants import TokenRateLimitScope
from onyx.db.models import TokenRateLimit
from onyx.db.models import TokenRateLimit__UserGroup
@@ -20,10 +21,11 @@ from onyx.server.token_rate_limits.models import TokenRateLimitArgs
def _add_user_filters(
stmt: Select, user: User | None, get_editable: bool = True
) -> Select:
# If user is None, assume the user is an admin or auth is disabled
if user is None or user.role == UserRole.ADMIN:
# If user is None and auth is disabled, assume the user is an admin
if (user is None and DISABLE_AUTH) or (user and user.role == UserRole.ADMIN):
return stmt
stmt = stmt.distinct()
TRLimit_UG = aliased(TokenRateLimit__UserGroup)
User__UG = aliased(User__UserGroup)
@@ -46,6 +48,12 @@ def _add_user_filters(
that the user isn't a curator for
- if we are not editing, we show all token_rate_limits in the groups the user curates
"""
# If user is None, this is an anonymous user and we should only show public token_rate_limits
if user is None:
where_clause = TokenRateLimit.scope == TokenRateLimitScope.GLOBAL
return stmt.where(where_clause)
where_clause = User__UG.user_id == user.id
if user.role == UserRole.CURATOR and get_editable:
where_clause &= User__UG.is_curator == True # noqa: E712

View File

@@ -122,7 +122,7 @@ def _cleanup_document_set__user_group_relationships__no_commit(
)
def validate_user_creation_permissions(
def validate_object_creation_for_user(
db_session: Session,
user: User | None,
target_group_ids: list[int] | None = None,
@@ -440,32 +440,108 @@ def remove_curator_status__no_commit(db_session: Session, user: User) -> None:
_validate_curator_status__no_commit(db_session, [user])
def update_user_curator_relationship(
def _validate_curator_relationship_update_requester(
db_session: Session,
user_group_id: int,
set_curator_request: SetCuratorRequest,
user_making_change: User | None = None,
) -> None:
user = fetch_user_by_id(db_session, set_curator_request.user_id)
if not user:
raise ValueError(f"User with id '{set_curator_request.user_id}' not found")
"""
This function validates that the user making the change has the necessary permissions
to update the curator relationship for the target user in the given user group.
"""
if user.role == UserRole.ADMIN:
if user_making_change is None or user_making_change.role == UserRole.ADMIN:
return
# check if the user making the change is a curator in the group they are changing the curator relationship for
user_making_change_curator_groups = fetch_user_groups_for_user(
db_session=db_session,
user_id=user_making_change.id,
# only check if the user making the change is a curator if they are a curator
# otherwise, they are a global_curator and can update the curator relationship
# for any group they are a member of
only_curator_groups=user_making_change.role == UserRole.CURATOR,
)
requestor_curator_group_ids = [
group.id for group in user_making_change_curator_groups
]
if user_group_id not in requestor_curator_group_ids:
raise ValueError(
f"User '{user.email}' is an admin and therefore has all permissions "
f"user making change {user_making_change.email} is not a curator,"
f" admin, or global_curator for group '{user_group_id}'"
)
def _validate_curator_relationship_update_request(
db_session: Session,
user_group_id: int,
target_user: User,
) -> None:
"""
This function validates that the curator_relationship_update request itself is valid.
"""
if target_user.role == UserRole.ADMIN:
raise ValueError(
f"User '{target_user.email}' is an admin and therefore has all permissions "
"of a curator. If you'd like this user to only have curator permissions, "
"you must update their role to BASIC then assign them to be CURATOR in the "
"appropriate groups."
)
elif target_user.role == UserRole.GLOBAL_CURATOR:
raise ValueError(
f"User '{target_user.email}' is a global_curator and therefore has all "
"permissions of a curator for all groups. If you'd like this user to only "
"have curator permissions for a specific group, you must update their role "
"to BASIC then assign them to be CURATOR in the appropriate groups."
)
elif target_user.role not in [UserRole.CURATOR, UserRole.BASIC]:
raise ValueError(
f"This endpoint can only be used to update the curator relationship for "
"users with the CURATOR or BASIC role. \n"
f"Target user: {target_user.email} \n"
f"Target user role: {target_user.role} \n"
)
# check if the target user is in the group they are changing the curator relationship for
requested_user_groups = fetch_user_groups_for_user(
db_session=db_session,
user_id=set_curator_request.user_id,
user_id=target_user.id,
only_curator_groups=False,
)
group_ids = [group.id for group in requested_user_groups]
if user_group_id not in group_ids:
raise ValueError(f"user is not in group '{user_group_id}'")
raise ValueError(
f"target user {target_user.email} is not in group '{user_group_id}'"
)
def update_user_curator_relationship(
db_session: Session,
user_group_id: int,
set_curator_request: SetCuratorRequest,
user_making_change: User | None = None,
) -> None:
target_user = fetch_user_by_id(db_session, set_curator_request.user_id)
if not target_user:
raise ValueError(f"User with id '{set_curator_request.user_id}' not found")
_validate_curator_relationship_update_request(
db_session=db_session,
user_group_id=user_group_id,
target_user=target_user,
)
_validate_curator_relationship_update_requester(
db_session=db_session,
user_group_id=user_group_id,
user_making_change=user_making_change,
)
logger.info(
f"user_making_change={user_making_change.email if user_making_change else 'None'} is "
f"updating the curator relationship for user={target_user.email} "
f"in group={user_group_id} to is_curator={set_curator_request.is_curator}"
)
relationship_to_update = (
db_session.query(User__UserGroup)
@@ -486,7 +562,7 @@ def update_user_curator_relationship(
)
db_session.add(relationship_to_update)
_validate_curator_status__no_commit(db_session, [user])
_validate_curator_status__no_commit(db_session, [target_user])
db_session.commit()

View File

@@ -0,0 +1,4 @@
# This is a group that we use to store all the users that we found in Confluence
# Instead of setting a page to public, we just add this group so that the page
# is only accessible to users who have confluence accounts.
ALL_CONF_EMAILS_GROUP_NAME = "All_Confluence_Users_Found_By_Onyx"

View File

@@ -4,6 +4,8 @@ https://confluence.atlassian.com/conf85/check-who-can-view-a-page-1283360557.htm
"""
from typing import Any
from ee.onyx.configs.app_configs import CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC
from ee.onyx.external_permissions.confluence.constants import ALL_CONF_EMAILS_GROUP_NAME
from onyx.access.models import DocExternalAccess
from onyx.access.models import ExternalAccess
from onyx.connectors.confluence.connector import ConfluenceConnector
@@ -22,7 +24,9 @@ _REQUEST_PAGINATION_LIMIT = 5000
def _get_server_space_permissions(
confluence_client: OnyxConfluence, space_key: str
) -> ExternalAccess:
space_permissions = confluence_client.get_space_permissions(space_key=space_key)
space_permissions = confluence_client.get_all_space_permissions_server(
space_key=space_key
)
viewspace_permissions = []
for permission_category in space_permissions:
@@ -31,14 +35,32 @@ def _get_server_space_permissions(
permission_category.get("spacePermissions", [])
)
is_public = False
user_names = set()
group_names = set()
for permission in viewspace_permissions:
if user_name := permission.get("userName"):
user_name = permission.get("userName")
if user_name:
user_names.add(user_name)
if group_name := permission.get("groupName"):
group_name = permission.get("groupName")
if group_name:
group_names.add(group_name)
# It seems that if anonymous access is turned on for the site and space,
# then the space is publicly accessible.
# For confluence server, we make a group that contains all users
# that exist in confluence and then just add that group to the space permissions
# if anonymous access is turned on for the site and space or we set is_public = True
# if they set the env variable CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC to True so
# that we can support confluence server deployments that want anonymous access
# to be public (we cant test this because its paywalled)
if user_name is None and group_name is None:
# Defaults to False
if CONFLUENCE_ANONYMOUS_ACCESS_IS_PUBLIC:
is_public = True
else:
group_names.add(ALL_CONF_EMAILS_GROUP_NAME)
user_emails = set()
for user_name in user_names:
user_email = get_user_email_from_username__server(confluence_client, user_name)
@@ -47,14 +69,17 @@ def _get_server_space_permissions(
else:
logger.warning(f"Email for user {user_name} not found in Confluence")
if not user_emails and not group_names:
logger.warning(
"No user emails or group names found in Confluence space permissions"
f"\nSpace key: {space_key}"
f"\nSpace permissions: {space_permissions}"
)
return ExternalAccess(
external_user_emails=user_emails,
external_user_group_ids=group_names,
# TODO: Check if the space is publicly accessible
# Currently, we assume the space is not public
# We need to check if anonymous access is turned on for the site and space
# This information is paywalled so it remains unimplemented
is_public=False,
is_public=is_public,
)
@@ -134,7 +159,7 @@ def _get_space_permissions(
def _extract_read_access_restrictions(
confluence_client: OnyxConfluence, restrictions: dict[str, Any]
) -> ExternalAccess | None:
) -> tuple[set[str], set[str]]:
"""
Converts a page's restrictions dict into an ExternalAccess object.
If there are no restrictions, then return None
@@ -177,21 +202,57 @@ def _extract_read_access_restrictions(
group["name"] for group in read_access_group_jsons if group.get("name")
]
return set(read_access_user_emails), set(read_access_group_names)
def _get_all_page_restrictions(
confluence_client: OnyxConfluence,
perm_sync_data: dict[str, Any],
) -> ExternalAccess | None:
"""
This function gets the restrictions for a page by taking the intersection
of the page's restrictions and the restrictions of all the ancestors
of the page.
If the page/ancestor has no restrictions, then it is ignored (no intersection).
If no restrictions are found anywhere, then return None, indicating that the page
should inherit the space's restrictions.
"""
found_user_emails: set[str] = set()
found_group_names: set[str] = set()
found_user_emails, found_group_names = _extract_read_access_restrictions(
confluence_client=confluence_client,
restrictions=perm_sync_data.get("restrictions", {}),
)
ancestors: list[dict[str, Any]] = perm_sync_data.get("ancestors", [])
for ancestor in ancestors:
ancestor_user_emails, ancestor_group_names = _extract_read_access_restrictions(
confluence_client=confluence_client,
restrictions=ancestor.get("restrictions", {}),
)
if not ancestor_user_emails and not ancestor_group_names:
# This ancestor has no restrictions, so it has no effect on
# the page's restrictions, so we ignore it
continue
found_user_emails.intersection_update(ancestor_user_emails)
found_group_names.intersection_update(ancestor_group_names)
# If there are no restrictions found, then the page
# inherits the space's restrictions so return None
is_space_public = read_access_user_emails == [] and read_access_group_names == []
if is_space_public:
if not found_user_emails and not found_group_names:
return None
return ExternalAccess(
external_user_emails=set(read_access_user_emails),
external_user_group_ids=set(read_access_group_names),
external_user_emails=found_user_emails,
external_user_group_ids=found_group_names,
# there is no way for a page to be individually public if the space isn't public
is_public=False,
)
def _fetch_all_page_restrictions_for_space(
def _fetch_all_page_restrictions(
confluence_client: OnyxConfluence,
slim_docs: list[SlimDocument],
space_permissions_by_space_key: dict[str, ExternalAccess],
@@ -208,11 +269,11 @@ def _fetch_all_page_restrictions_for_space(
raise ValueError(
f"No permission sync data found for document {slim_doc.id}"
)
restrictions = _extract_read_access_restrictions(
if restrictions := _get_all_page_restrictions(
confluence_client=confluence_client,
restrictions=slim_doc.perm_sync_data.get("restrictions", {}),
)
if restrictions:
perm_sync_data=slim_doc.perm_sync_data,
):
document_restrictions.append(
DocExternalAccess(
doc_id=slim_doc.id,
@@ -301,7 +362,7 @@ def confluence_doc_sync(
slim_docs.extend(doc_batch)
logger.debug("Fetching all page restrictions for space")
return _fetch_all_page_restrictions_for_space(
return _fetch_all_page_restrictions(
confluence_client=confluence_connector.confluence_client,
slim_docs=slim_docs,
space_permissions_by_space_key=space_permissions_by_space_key,

View File

@@ -1,11 +1,11 @@
from ee.onyx.db.external_perm import ExternalUserGroup
from ee.onyx.external_permissions.confluence.constants import ALL_CONF_EMAILS_GROUP_NAME
from onyx.connectors.confluence.onyx_confluence import build_confluence_client
from onyx.connectors.confluence.onyx_confluence import OnyxConfluence
from onyx.connectors.confluence.utils import get_user_email_from_username__server
from onyx.db.models import ConnectorCredentialPair
from onyx.utils.logger import setup_logger
logger = setup_logger()
@@ -30,6 +30,7 @@ def _build_group_member_email_map(
)
if not email:
# If we still don't have an email, skip this user
logger.warning(f"user result missing email field: {user_result}")
continue
for group in confluence_client.paginated_groups_by_user_retrieval(user):
@@ -53,6 +54,7 @@ def confluence_group_sync(
confluence_client=confluence_client,
)
onyx_groups: list[ExternalUserGroup] = []
all_found_emails = set()
for group_id, group_member_emails in group_member_email_map.items():
onyx_groups.append(
ExternalUserGroup(
@@ -60,5 +62,15 @@ def confluence_group_sync(
user_emails=list(group_member_emails),
)
)
all_found_emails.update(group_member_emails)
# This is so that when we find a public confleunce server page, we can
# give access to all users only in if they have an email in Confluence
if cc_pair.connector.connector_specific_config.get("is_cloud", False):
all_found_group = ExternalUserGroup(
id=ALL_CONF_EMAILS_GROUP_NAME,
user_emails=list(all_found_emails),
)
onyx_groups.append(all_found_group)
return onyx_groups

View File

@@ -0,0 +1,84 @@
from collections.abc import Callable
from ee.onyx.db.connector_credential_pair import get_all_auto_sync_cc_pairs
from ee.onyx.external_permissions.salesforce.postprocessing import (
censor_salesforce_chunks,
)
from onyx.configs.constants import DocumentSource
from onyx.context.search.pipeline import InferenceChunk
from onyx.db.engine import get_session_context_manager
from onyx.db.models import User
from onyx.utils.logger import setup_logger
logger = setup_logger()
DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION: dict[
DocumentSource,
# list of chunks to be censored and the user email. returns censored chunks
Callable[[list[InferenceChunk], str], list[InferenceChunk]],
] = {
DocumentSource.SALESFORCE: censor_salesforce_chunks,
}
def _get_all_censoring_enabled_sources() -> set[DocumentSource]:
"""
Returns the set of sources that have censoring enabled.
This is based on if the access_type is set to sync and the connector
source is included in DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION.
NOTE: This means if there is a source has a single cc_pair that is sync,
all chunks for that source will be censored, even if the connector that
indexed that chunk is not sync. This was done to avoid getting the cc_pair
for every single chunk.
"""
with get_session_context_manager() as db_session:
enabled_sync_connectors = get_all_auto_sync_cc_pairs(db_session)
return {
cc_pair.connector.source
for cc_pair in enabled_sync_connectors
if cc_pair.connector.source in DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION
}
# NOTE: This is only called if ee is enabled.
def _post_query_chunk_censoring(
chunks: list[InferenceChunk],
user: User | None,
) -> list[InferenceChunk]:
"""
This function checks all chunks to see if they need to be sent to a censoring
function. If they do, it sends them to the censoring function and returns the
censored chunks. If they don't, it returns the original chunks.
"""
if user is None:
# if user is None, permissions are not enforced
return chunks
chunks_to_keep = []
chunks_to_process: dict[DocumentSource, list[InferenceChunk]] = {}
sources_to_censor = _get_all_censoring_enabled_sources()
for chunk in chunks:
# Separate out chunks that require permission post-processing by source
if chunk.source_type in sources_to_censor:
chunks_to_process.setdefault(chunk.source_type, []).append(chunk)
else:
chunks_to_keep.append(chunk)
# For each source, filter out the chunks using the permission
# check function for that source
# TODO: Use a threadpool/multiprocessing to process the sources in parallel
for source, chunks_for_source in chunks_to_process.items():
censor_chunks_for_source = DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION[source]
try:
censored_chunks = censor_chunks_for_source(chunks_for_source, user.email)
except Exception as e:
logger.exception(
f"Failed to censor chunks for source {source} so throwing out all"
f" chunks for this source and continuing: {e}"
)
continue
chunks_to_keep.extend(censored_chunks)
return chunks_to_keep

View File

@@ -0,0 +1,226 @@
import time
from ee.onyx.db.external_perm import fetch_external_groups_for_user_email_and_group_ids
from ee.onyx.external_permissions.salesforce.utils import (
get_any_salesforce_client_for_doc_id,
)
from ee.onyx.external_permissions.salesforce.utils import get_objects_access_for_user_id
from ee.onyx.external_permissions.salesforce.utils import (
get_salesforce_user_id_from_email,
)
from onyx.configs.app_configs import BLURB_SIZE
from onyx.context.search.models import InferenceChunk
from onyx.db.engine import get_session_context_manager
from onyx.utils.logger import setup_logger
logger = setup_logger()
# Types
ChunkKey = tuple[str, int] # (doc_id, chunk_id)
ContentRange = tuple[int, int | None] # (start_index, end_index) None means to the end
# NOTE: Used for testing timing
def _get_dummy_object_access_map(
object_ids: set[str], user_email: str, chunks: list[InferenceChunk]
) -> dict[str, bool]:
time.sleep(0.15)
# return {object_id: True for object_id in object_ids}
import random
return {object_id: random.choice([True, False]) for object_id in object_ids}
def _get_objects_access_for_user_email_from_salesforce(
object_ids: set[str],
user_email: str,
chunks: list[InferenceChunk],
) -> dict[str, bool] | None:
"""
This function wraps the salesforce call as we may want to change how this
is done in the future. (E.g. replace it with the above function)
"""
# This is cached in the function so the first query takes an extra 0.1-0.3 seconds
# but subsequent queries for this source are essentially instant
first_doc_id = chunks[0].document_id
with get_session_context_manager() as db_session:
salesforce_client = get_any_salesforce_client_for_doc_id(
db_session, first_doc_id
)
# This is cached in the function so the first query takes an extra 0.1-0.3 seconds
# but subsequent queries by the same user are essentially instant
start_time = time.time()
user_id = get_salesforce_user_id_from_email(salesforce_client, user_email)
end_time = time.time()
logger.info(
f"Time taken to get Salesforce user ID: {end_time - start_time} seconds"
)
if user_id is None:
return None
# This is the only query that is not cached in the function
# so it takes 0.1-0.2 seconds total
object_id_to_access = get_objects_access_for_user_id(
salesforce_client, user_id, list(object_ids)
)
return object_id_to_access
def _extract_salesforce_object_id_from_url(url: str) -> str:
return url.split("/")[-1]
def _get_object_ranges_for_chunk(
chunk: InferenceChunk,
) -> dict[str, list[ContentRange]]:
"""
Given a chunk, return a dictionary of salesforce object ids and the content ranges
for that object id in the current chunk
"""
if chunk.source_links is None:
return {}
object_ranges: dict[str, list[ContentRange]] = {}
end_index = None
descending_source_links = sorted(
chunk.source_links.items(), key=lambda x: x[0], reverse=True
)
for start_index, url in descending_source_links:
object_id = _extract_salesforce_object_id_from_url(url)
if object_id not in object_ranges:
object_ranges[object_id] = []
object_ranges[object_id].append((start_index, end_index))
end_index = start_index
return object_ranges
def _create_empty_censored_chunk(uncensored_chunk: InferenceChunk) -> InferenceChunk:
"""
Create a copy of the unfiltered chunk where potentially sensitive content is removed
to be added later if the user has access to each of the sub-objects
"""
empty_censored_chunk = InferenceChunk(
**uncensored_chunk.model_dump(),
)
empty_censored_chunk.content = ""
empty_censored_chunk.blurb = ""
empty_censored_chunk.source_links = {}
return empty_censored_chunk
def _update_censored_chunk(
censored_chunk: InferenceChunk,
uncensored_chunk: InferenceChunk,
content_range: ContentRange,
) -> InferenceChunk:
"""
Update the filtered chunk with the content and source links from the unfiltered chunk using the content ranges
"""
start_index, end_index = content_range
# Update the content of the filtered chunk
permitted_content = uncensored_chunk.content[start_index:end_index]
permitted_section_start_index = len(censored_chunk.content)
censored_chunk.content = permitted_content + censored_chunk.content
# Update the source links of the filtered chunk
if uncensored_chunk.source_links is not None:
if censored_chunk.source_links is None:
censored_chunk.source_links = {}
link_content = uncensored_chunk.source_links[start_index]
censored_chunk.source_links[permitted_section_start_index] = link_content
# Update the blurb of the filtered chunk
censored_chunk.blurb = censored_chunk.content[:BLURB_SIZE]
return censored_chunk
# TODO: Generalize this to other sources
def censor_salesforce_chunks(
chunks: list[InferenceChunk],
user_email: str,
# This is so we can provide a mock access map for testing
access_map: dict[str, bool] | None = None,
) -> list[InferenceChunk]:
# object_id -> list[((doc_id, chunk_id), (start_index, end_index))]
object_to_content_map: dict[str, list[tuple[ChunkKey, ContentRange]]] = {}
# (doc_id, chunk_id) -> chunk
uncensored_chunks: dict[ChunkKey, InferenceChunk] = {}
# keep track of all object ids that we have seen to make it easier to get
# the access for these object ids
object_ids: set[str] = set()
for chunk in chunks:
chunk_key = (chunk.document_id, chunk.chunk_id)
# create a dictionary to quickly look up the unfiltered chunk
uncensored_chunks[chunk_key] = chunk
# for each chunk, get a dictionary of object ids and the content ranges
# for that object id in the current chunk
object_ranges_for_chunk = _get_object_ranges_for_chunk(chunk)
for object_id, ranges in object_ranges_for_chunk.items():
object_ids.add(object_id)
for start_index, end_index in ranges:
object_to_content_map.setdefault(object_id, []).append(
(chunk_key, (start_index, end_index))
)
# This is so we can provide a mock access map for testing
if access_map is None:
access_map = _get_objects_access_for_user_email_from_salesforce(
object_ids=object_ids,
user_email=user_email,
chunks=chunks,
)
if access_map is None:
# If the user is not found in Salesforce, access_map will be None
# so we should just return an empty list because no chunks will be
# censored
return []
censored_chunks: dict[ChunkKey, InferenceChunk] = {}
for object_id, content_list in object_to_content_map.items():
# if the user does not have access to the object, or the object is not in the
# access_map, do not include its content in the filtered chunks
if not access_map.get(object_id, False):
continue
# if we got this far, the user has access to the object so we can create or update
# the filtered chunk(s) for this object
# NOTE: we only create a censored chunk if the user has access to some
# part of the chunk
for chunk_key, content_range in content_list:
if chunk_key not in censored_chunks:
censored_chunks[chunk_key] = _create_empty_censored_chunk(
uncensored_chunks[chunk_key]
)
uncensored_chunk = uncensored_chunks[chunk_key]
censored_chunk = _update_censored_chunk(
censored_chunk=censored_chunks[chunk_key],
uncensored_chunk=uncensored_chunk,
content_range=content_range,
)
censored_chunks[chunk_key] = censored_chunk
return list(censored_chunks.values())
# NOTE: This is not used anywhere.
def _get_objects_access_for_user_email(
object_ids: set[str], user_email: str
) -> dict[str, bool]:
with get_session_context_manager() as db_session:
external_groups = fetch_external_groups_for_user_email_and_group_ids(
db_session=db_session,
user_email=user_email,
# Maybe make a function that adds a salesforce prefix to the group ids
group_ids=list(object_ids),
)
external_group_ids = {group.external_user_group_id for group in external_groups}
return {group_id: group_id in external_group_ids for group_id in object_ids}

View File

@@ -0,0 +1,174 @@
from simple_salesforce import Salesforce
from sqlalchemy.orm import Session
from onyx.connectors.salesforce.sqlite_functions import get_user_id_by_email
from onyx.connectors.salesforce.sqlite_functions import init_db
from onyx.connectors.salesforce.sqlite_functions import NULL_ID_STRING
from onyx.connectors.salesforce.sqlite_functions import update_email_to_id_table
from onyx.db.connector_credential_pair import get_connector_credential_pair_from_id
from onyx.db.document import get_cc_pairs_for_document
from onyx.utils.logger import setup_logger
logger = setup_logger()
_ANY_SALESFORCE_CLIENT: Salesforce | None = None
def get_any_salesforce_client_for_doc_id(
db_session: Session, doc_id: str
) -> Salesforce:
"""
We create a salesforce client for the first cc_pair for the first doc_id where
salesforce censoring is enabled. After that we just cache and reuse the same
client for all queries.
We do this to reduce the number of postgres queries we make at query time.
This may be problematic if they are using multiple cc_pairs for salesforce.
E.g. there are 2 different credential sets for 2 different salesforce cc_pairs
but only one has the permissions to access the permissions needed for the query.
"""
global _ANY_SALESFORCE_CLIENT
if _ANY_SALESFORCE_CLIENT is None:
cc_pairs = get_cc_pairs_for_document(db_session, doc_id)
first_cc_pair = cc_pairs[0]
credential_json = first_cc_pair.credential.credential_json
_ANY_SALESFORCE_CLIENT = Salesforce(
username=credential_json["sf_username"],
password=credential_json["sf_password"],
security_token=credential_json["sf_security_token"],
)
return _ANY_SALESFORCE_CLIENT
def _query_salesforce_user_id(sf_client: Salesforce, user_email: str) -> str | None:
query = f"SELECT Id FROM User WHERE Email = '{user_email}'"
result = sf_client.query(query)
if len(result["records"]) == 0:
return None
return result["records"][0]["Id"]
# This contains only the user_ids that we have found in Salesforce.
# If we don't know their user_id, we don't store anything in the cache.
_CACHED_SF_EMAIL_TO_ID_MAP: dict[str, str] = {}
def get_salesforce_user_id_from_email(
sf_client: Salesforce,
user_email: str,
) -> str | None:
"""
We cache this so we don't have to query Salesforce for every query and salesforce
user IDs never change.
Memory usage is fine because we just store 2 small strings per user.
If the email is not in the cache, we check the local salesforce database for the info.
If the user is not found in the local salesforce database, we query Salesforce.
Whatever we get back from Salesforce is added to the database.
If no user_id is found, we add a NULL_ID_STRING to the database for that email so
we don't query Salesforce again (which is slow) but we still check the local salesforce
database every query until a user id is found. This is acceptable because the query time
is quite fast.
If a user_id is created in Salesforce, it will be added to the local salesforce database
next time the connector is run. Then that value will be found in this function and cached.
NOTE: First time this runs, it may be slow if it hasn't already been updated in the local
salesforce database. (Around 0.1-0.3 seconds)
If it's cached or stored in the local salesforce database, it's fast (<0.001 seconds).
"""
global _CACHED_SF_EMAIL_TO_ID_MAP
if user_email in _CACHED_SF_EMAIL_TO_ID_MAP:
if _CACHED_SF_EMAIL_TO_ID_MAP[user_email] is not None:
return _CACHED_SF_EMAIL_TO_ID_MAP[user_email]
db_exists = True
try:
# Check if the user is already in the database
user_id = get_user_id_by_email(user_email)
except Exception:
init_db()
try:
user_id = get_user_id_by_email(user_email)
except Exception as e:
logger.error(f"Error checking if user is in database: {e}")
user_id = None
db_exists = False
# If no entry is found in the database (indicated by user_id being None)...
if user_id is None:
# ...query Salesforce and store the result in the database
user_id = _query_salesforce_user_id(sf_client, user_email)
if db_exists:
update_email_to_id_table(user_email, user_id)
return user_id
elif user_id is None:
return None
elif user_id == NULL_ID_STRING:
return None
# If the found user_id is real, cache it
_CACHED_SF_EMAIL_TO_ID_MAP[user_email] = user_id
return user_id
_MAX_RECORD_IDS_PER_QUERY = 200
def get_objects_access_for_user_id(
salesforce_client: Salesforce,
user_id: str,
record_ids: list[str],
) -> dict[str, bool]:
"""
Salesforce has a limit of 200 record ids per query. So we just truncate
the list of record ids to 200. We only ever retrieve 50 chunks at a time
so this should be fine (unlikely that we retrieve all 50 chunks contain
4 unique objects).
If we decide this isn't acceptable we can use multiple queries but they
should be in parallel so query time doesn't get too long.
"""
truncated_record_ids = record_ids[:_MAX_RECORD_IDS_PER_QUERY]
record_ids_str = "'" + "','".join(truncated_record_ids) + "'"
access_query = f"""
SELECT RecordId, HasReadAccess
FROM UserRecordAccess
WHERE RecordId IN ({record_ids_str})
AND UserId = '{user_id}'
"""
result = salesforce_client.query_all(access_query)
return {record["RecordId"]: record["HasReadAccess"] for record in result["records"]}
_CC_PAIR_ID_SALESFORCE_CLIENT_MAP: dict[int, Salesforce] = {}
_DOC_ID_TO_CC_PAIR_ID_MAP: dict[str, int] = {}
# NOTE: This is not used anywhere.
def _get_salesforce_client_for_doc_id(db_session: Session, doc_id: str) -> Salesforce:
"""
Uses a document id to get the cc_pair that indexed that document and uses the credentials
for that cc_pair to create a Salesforce client.
Problems:
- There may be multiple cc_pairs for a document, and we don't know which one to use.
- right now we just use the first one
- Building a new Salesforce client for each document is slow.
- Memory usage could be an issue as we build these dictionaries.
"""
if doc_id not in _DOC_ID_TO_CC_PAIR_ID_MAP:
cc_pairs = get_cc_pairs_for_document(db_session, doc_id)
first_cc_pair = cc_pairs[0]
_DOC_ID_TO_CC_PAIR_ID_MAP[doc_id] = first_cc_pair.id
cc_pair_id = _DOC_ID_TO_CC_PAIR_ID_MAP[doc_id]
if cc_pair_id not in _CC_PAIR_ID_SALESFORCE_CLIENT_MAP:
cc_pair = get_connector_credential_pair_from_id(cc_pair_id, db_session)
if cc_pair is None:
raise ValueError(f"CC pair {cc_pair_id} not found")
credential_json = cc_pair.credential.credential_json
_CC_PAIR_ID_SALESFORCE_CLIENT_MAP[cc_pair_id] = Salesforce(
username=credential_json["sf_username"],
password=credential_json["sf_password"],
security_token=credential_json["sf_security_token"],
)
return _CC_PAIR_ID_SALESFORCE_CLIENT_MAP[cc_pair_id]

View File

@@ -8,6 +8,9 @@ from ee.onyx.external_permissions.confluence.group_sync import confluence_group_
from ee.onyx.external_permissions.gmail.doc_sync import gmail_doc_sync
from ee.onyx.external_permissions.google_drive.doc_sync import gdrive_doc_sync
from ee.onyx.external_permissions.google_drive.group_sync import gdrive_group_sync
from ee.onyx.external_permissions.post_query_censoring import (
DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION,
)
from ee.onyx.external_permissions.slack.doc_sync import slack_doc_sync
from onyx.access.models import DocExternalAccess
from onyx.configs.constants import DocumentSource
@@ -71,4 +74,7 @@ EXTERNAL_GROUP_SYNC_PERIODS: dict[DocumentSource, int] = {
def check_if_valid_sync_source(source_type: DocumentSource) -> bool:
return source_type in DOC_PERMISSIONS_FUNC_MAP
return (
source_type in DOC_PERMISSIONS_FUNC_MAP
or source_type in DOC_SOURCE_TO_CHUNK_CENSORING_FUNCTION
)

View File

@@ -40,6 +40,7 @@ from onyx.configs.app_configs import USER_AUTH_SECRET
from onyx.configs.app_configs import WEB_DOMAIN
from onyx.configs.constants import AuthType
from onyx.main import get_application as get_application_base
from onyx.main import include_auth_router_with_prefix
from onyx.main import include_router_with_global_prefix_prepended
from onyx.utils.logger import setup_logger
from onyx.utils.variable_functionality import global_version
@@ -62,7 +63,7 @@ def get_application() -> FastAPI:
if AUTH_TYPE == AuthType.CLOUD:
oauth_client = GoogleOAuth2(OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET)
include_router_with_global_prefix_prepended(
include_auth_router_with_prefix(
application,
create_onyx_oauth_router(
oauth_client,
@@ -74,19 +75,17 @@ def get_application() -> FastAPI:
redirect_url=f"{WEB_DOMAIN}/auth/oauth/callback",
),
prefix="/auth/oauth",
tags=["auth"],
)
# Need basic auth router for `logout` endpoint
include_router_with_global_prefix_prepended(
include_auth_router_with_prefix(
application,
fastapi_users.get_logout_router(auth_backend),
prefix="/auth",
tags=["auth"],
)
if AUTH_TYPE == AuthType.OIDC:
include_router_with_global_prefix_prepended(
include_auth_router_with_prefix(
application,
create_onyx_oauth_router(
OpenID(OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET, OPENID_CONFIG_URL),
@@ -97,19 +96,20 @@ def get_application() -> FastAPI:
redirect_url=f"{WEB_DOMAIN}/auth/oidc/callback",
),
prefix="/auth/oidc",
tags=["auth"],
)
# need basic auth router for `logout` endpoint
include_router_with_global_prefix_prepended(
include_auth_router_with_prefix(
application,
fastapi_users.get_auth_router(auth_backend),
prefix="/auth",
tags=["auth"],
)
elif AUTH_TYPE == AuthType.SAML:
include_router_with_global_prefix_prepended(application, saml_router)
include_auth_router_with_prefix(
application,
saml_router,
)
# RBAC / group access control
include_router_with_global_prefix_prepended(application, user_group_router)

View File

@@ -1,17 +1,24 @@
import datetime
from collections import defaultdict
from typing import List
from fastapi import APIRouter
from fastapi import Depends
from fastapi import HTTPException
from pydantic import BaseModel
from sqlalchemy.orm import Session
from ee.onyx.db.analytics import fetch_assistant_message_analytics
from ee.onyx.db.analytics import fetch_assistant_unique_users
from ee.onyx.db.analytics import fetch_assistant_unique_users_total
from ee.onyx.db.analytics import fetch_onyxbot_analytics
from ee.onyx.db.analytics import fetch_per_user_query_analytics
from ee.onyx.db.analytics import fetch_persona_message_analytics
from ee.onyx.db.analytics import fetch_persona_unique_users
from ee.onyx.db.analytics import fetch_query_analytics
from ee.onyx.db.analytics import user_can_view_assistant_stats
from onyx.auth.users import current_admin_user
from onyx.auth.users import current_user
from onyx.db.engine import get_session
from onyx.db.models import User
@@ -191,3 +198,76 @@ def get_persona_unique_users(
)
)
return unique_user_counts
class AssistantDailyUsageResponse(BaseModel):
date: datetime.date
total_messages: int
total_unique_users: int
class AssistantStatsResponse(BaseModel):
daily_stats: List[AssistantDailyUsageResponse]
total_messages: int
total_unique_users: int
@router.get("/assistant/{assistant_id}/stats")
def get_assistant_stats(
assistant_id: int,
start: datetime.datetime | None = None,
end: datetime.datetime | None = None,
user: User | None = Depends(current_user),
db_session: Session = Depends(get_session),
) -> AssistantStatsResponse:
"""
Returns daily message and unique user counts for a user's assistant,
along with the overall total messages and total distinct users.
"""
start = start or (
datetime.datetime.utcnow() - datetime.timedelta(days=_DEFAULT_LOOKBACK_DAYS)
)
end = end or datetime.datetime.utcnow()
print("current user")
print(user)
if not user_can_view_assistant_stats(db_session, user, assistant_id):
raise HTTPException(
status_code=403, detail="Not allowed to access this assistant's stats."
)
# Pull daily usage from the DB calls
messages_data = fetch_assistant_message_analytics(
db_session, assistant_id, start, end
)
unique_users_data = fetch_assistant_unique_users(
db_session, assistant_id, start, end
)
# Map each day => (messages, unique_users).
daily_messages_map = {date: count for count, date in messages_data}
daily_unique_users_map = {date: count for count, date in unique_users_data}
all_dates = set(daily_messages_map.keys()) | set(daily_unique_users_map.keys())
# Merge both sets of metrics by date
daily_results: list[AssistantDailyUsageResponse] = []
for date in sorted(all_dates):
daily_results.append(
AssistantDailyUsageResponse(
date=date,
total_messages=daily_messages_map.get(date, 0),
total_unique_users=daily_unique_users_map.get(date, 0),
)
)
# Now pull a single total distinct user count across the entire time range
total_msgs = sum(d.total_messages for d in daily_results)
total_users = fetch_assistant_unique_users_total(
db_session, assistant_id, start, end
)
return AssistantStatsResponse(
daily_stats=daily_results,
total_messages=total_msgs,
total_unique_users=total_users,
)

View File

@@ -2,15 +2,16 @@ import logging
from collections.abc import Awaitable
from collections.abc import Callable
import jwt
from fastapi import FastAPI
from fastapi import HTTPException
from fastapi import Request
from fastapi import Response
from ee.onyx.auth.users import decode_anonymous_user_jwt_token
from ee.onyx.configs.app_configs import ANONYMOUS_USER_COOKIE_NAME
from onyx.auth.api_key import extract_tenant_from_api_key_header
from onyx.configs.app_configs import USER_AUTH_SECRET
from onyx.db.engine import is_valid_schema_name
from onyx.redis.redis_pool import retrieve_auth_token_data_from_redis
from shared_configs.configs import MULTI_TENANT
from shared_configs.configs import POSTGRES_DEFAULT_SCHEMA
from shared_configs.contextvars import CURRENT_TENANT_ID_CONTEXTVAR
@@ -22,11 +23,11 @@ def add_tenant_id_middleware(app: FastAPI, logger: logging.LoggerAdapter) -> Non
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
try:
tenant_id = (
_get_tenant_id_from_request(request, logger)
if MULTI_TENANT
else POSTGRES_DEFAULT_SCHEMA
)
if MULTI_TENANT:
tenant_id = await _get_tenant_id_from_request(request, logger)
else:
tenant_id = POSTGRES_DEFAULT_SCHEMA
CURRENT_TENANT_ID_CONTEXTVAR.set(tenant_id)
return await call_next(request)
@@ -35,27 +36,46 @@ def add_tenant_id_middleware(app: FastAPI, logger: logging.LoggerAdapter) -> Non
raise
def _get_tenant_id_from_request(request: Request, logger: logging.LoggerAdapter) -> str:
# First check for API key
async def _get_tenant_id_from_request(
request: Request, logger: logging.LoggerAdapter
) -> str:
"""
Attempt to extract tenant_id from:
1) The API key header
2) The Redis-based token (stored in Cookie: fastapiusersauth)
Fallback: POSTGRES_DEFAULT_SCHEMA
"""
# Check for API key
tenant_id = extract_tenant_from_api_key_header(request)
if tenant_id is not None:
if tenant_id:
return tenant_id
# Check for cookie-based auth
token = request.cookies.get("fastapiusersauth")
if not token:
return POSTGRES_DEFAULT_SCHEMA
# Check for anonymous user cookie
anonymous_user_cookie = request.cookies.get(ANONYMOUS_USER_COOKIE_NAME)
if anonymous_user_cookie:
try:
anonymous_user_data = decode_anonymous_user_jwt_token(anonymous_user_cookie)
return anonymous_user_data.get("tenant_id", POSTGRES_DEFAULT_SCHEMA)
except Exception as e:
logger.error(f"Error decoding anonymous user cookie: {str(e)}")
# Continue and attempt to authenticate
try:
payload = jwt.decode(
token,
USER_AUTH_SECRET,
audience=["fastapi-users:auth"],
algorithms=["HS256"],
)
tenant_id_from_payload = payload.get("tenant_id", POSTGRES_DEFAULT_SCHEMA)
# Look up token data in Redis
token_data = await retrieve_auth_token_data_from_redis(request)
# Since payload.get() can return None, ensure we have a string
if not token_data:
logger.debug(
"Token data not found or expired in Redis, defaulting to POSTGRES_DEFAULT_SCHEMA"
)
# Return POSTGRES_DEFAULT_SCHEMA, so non-authenticated requests are sent to the default schema
# The CURRENT_TENANT_ID_CONTEXTVAR is initialized with POSTGRES_DEFAULT_SCHEMA,
# so we maintain consistency by returning it here when no valid tenant is found.
return POSTGRES_DEFAULT_SCHEMA
tenant_id_from_payload = token_data.get("tenant_id", POSTGRES_DEFAULT_SCHEMA)
# Since token_data.get() can return None, ensure we have a string
tenant_id = (
str(tenant_id_from_payload)
if tenant_id_from_payload is not None
@@ -67,9 +87,6 @@ def _get_tenant_id_from_request(request: Request, logger: logging.LoggerAdapter)
return tenant_id
except jwt.InvalidTokenError:
return POSTGRES_DEFAULT_SCHEMA
except Exception as e:
logger.error(f"Unexpected error in set_tenant_id_middleware: {str(e)}")
logger.error(f"Unexpected error in _get_tenant_id_from_request: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")

View File

@@ -1,5 +1,7 @@
import base64
import json
import uuid
from typing import Any
from typing import cast
import requests
@@ -10,11 +12,29 @@ from fastapi.responses import JSONResponse
from pydantic import BaseModel
from sqlalchemy.orm import Session
from ee.onyx.configs.app_configs import OAUTH_CONFLUENCE_CLIENT_ID
from ee.onyx.configs.app_configs import OAUTH_CONFLUENCE_CLIENT_SECRET
from ee.onyx.configs.app_configs import OAUTH_GOOGLE_DRIVE_CLIENT_ID
from ee.onyx.configs.app_configs import OAUTH_GOOGLE_DRIVE_CLIENT_SECRET
from ee.onyx.configs.app_configs import OAUTH_SLACK_CLIENT_ID
from ee.onyx.configs.app_configs import OAUTH_SLACK_CLIENT_SECRET
from onyx.auth.users import current_user
from onyx.configs.app_configs import WEB_DOMAIN
from onyx.configs.constants import DocumentSource
from onyx.connectors.google_utils.google_auth import get_google_oauth_creds
from onyx.connectors.google_utils.google_auth import sanitize_oauth_credentials
from onyx.connectors.google_utils.shared_constants import (
DB_CREDENTIALS_AUTHENTICATION_METHOD,
)
from onyx.connectors.google_utils.shared_constants import (
DB_CREDENTIALS_DICT_TOKEN_KEY,
)
from onyx.connectors.google_utils.shared_constants import (
DB_CREDENTIALS_PRIMARY_ADMIN_KEY,
)
from onyx.connectors.google_utils.shared_constants import (
GoogleOAuthAuthenticationMethod,
)
from onyx.db.credentials import create_credential
from onyx.db.engine import get_current_tenant_id
from onyx.db.engine import get_session
@@ -62,14 +82,7 @@ class SlackOAuth:
@classmethod
def generate_oauth_url(cls, state: str) -> str:
url = (
f"https://slack.com/oauth/v2/authorize"
f"?client_id={cls.CLIENT_ID}"
f"&redirect_uri={cls.REDIRECT_URI}"
f"&scope={cls.BOT_SCOPE}"
f"&state={state}"
)
return url
return cls._generate_oauth_url_helper(cls.REDIRECT_URI, state)
@classmethod
def generate_dev_oauth_url(cls, state: str) -> str:
@@ -77,10 +90,14 @@ class SlackOAuth:
- https://www.nango.dev/blog/oauth-redirects-on-localhost-with-https
"""
return cls._generate_oauth_url_helper(cls.DEV_REDIRECT_URI, state)
@classmethod
def _generate_oauth_url_helper(cls, redirect_uri: str, state: str) -> str:
url = (
f"https://slack.com/oauth/v2/authorize"
f"?client_id={cls.CLIENT_ID}"
f"&redirect_uri={cls.DEV_REDIRECT_URI}"
f"&redirect_uri={redirect_uri}"
f"&scope={cls.BOT_SCOPE}"
f"&state={state}"
)
@@ -102,82 +119,151 @@ class SlackOAuth:
return session
# Work in progress
# class ConfluenceCloudOAuth:
# """work in progress"""
class ConfluenceCloudOAuth:
"""work in progress"""
# # https://developer.atlassian.com/cloud/confluence/oauth-2-3lo-apps/
# https://developer.atlassian.com/cloud/confluence/oauth-2-3lo-apps/
# class OAuthSession(BaseModel):
# """Stored in redis to be looked up on callback"""
class OAuthSession(BaseModel):
"""Stored in redis to be looked up on callback"""
# email: str
# redirect_on_success: str | None # Where to send the user if OAuth flow succeeds
email: str
redirect_on_success: str | None # Where to send the user if OAuth flow succeeds
# CLIENT_ID = OAUTH_CONFLUENCE_CLIENT_ID
# CLIENT_SECRET = OAUTH_CONFLUENCE_CLIENT_SECRET
# TOKEN_URL = "https://auth.atlassian.com/oauth/token"
CLIENT_ID = OAUTH_CONFLUENCE_CLIENT_ID
CLIENT_SECRET = OAUTH_CONFLUENCE_CLIENT_SECRET
TOKEN_URL = "https://auth.atlassian.com/oauth/token"
# # All read scopes per https://developer.atlassian.com/cloud/confluence/scopes-for-oauth-2-3LO-and-forge-apps/
# CONFLUENCE_OAUTH_SCOPE = (
# "read:confluence-props%20"
# "read:confluence-content.all%20"
# "read:confluence-content.summary%20"
# "read:confluence-content.permission%20"
# "read:confluence-user%20"
# "read:confluence-groups%20"
# "readonly:content.attachment:confluence"
# )
# All read scopes per https://developer.atlassian.com/cloud/confluence/scopes-for-oauth-2-3LO-and-forge-apps/
CONFLUENCE_OAUTH_SCOPE = (
"read:confluence-props%20"
"read:confluence-content.all%20"
"read:confluence-content.summary%20"
"read:confluence-content.permission%20"
"read:confluence-user%20"
"read:confluence-groups%20"
"readonly:content.attachment:confluence"
)
# REDIRECT_URI = f"{WEB_DOMAIN}/admin/connectors/confluence/oauth/callback"
# DEV_REDIRECT_URI = f"https://redirectmeto.com/{REDIRECT_URI}"
REDIRECT_URI = f"{WEB_DOMAIN}/admin/connectors/confluence/oauth/callback"
DEV_REDIRECT_URI = f"https://redirectmeto.com/{REDIRECT_URI}"
# # eventually for Confluence Data Center
# # oauth_url = (
# # f"http://localhost:8090/rest/oauth/v2/authorize?client_id={CONFLUENCE_OAUTH_CLIENT_ID}"
# # f"&scope={CONFLUENCE_OAUTH_SCOPE_2}"
# # f"&redirect_uri={redirectme_uri}"
# # )
# eventually for Confluence Data Center
# oauth_url = (
# f"http://localhost:8090/rest/oauth/v2/authorize?client_id={CONFLUENCE_OAUTH_CLIENT_ID}"
# f"&scope={CONFLUENCE_OAUTH_SCOPE_2}"
# f"&redirect_uri={redirectme_uri}"
# )
# @classmethod
# def generate_oauth_url(cls, state: str) -> str:
# return cls._generate_oauth_url_helper(cls.REDIRECT_URI, state)
@classmethod
def generate_oauth_url(cls, state: str) -> str:
return cls._generate_oauth_url_helper(cls.REDIRECT_URI, state)
# @classmethod
# def generate_dev_oauth_url(cls, state: str) -> str:
# """dev mode workaround for localhost testing
# - https://www.nango.dev/blog/oauth-redirects-on-localhost-with-https
# """
# return cls._generate_oauth_url_helper(cls.DEV_REDIRECT_URI, state)
@classmethod
def generate_dev_oauth_url(cls, state: str) -> str:
"""dev mode workaround for localhost testing
- https://www.nango.dev/blog/oauth-redirects-on-localhost-with-https
"""
return cls._generate_oauth_url_helper(cls.DEV_REDIRECT_URI, state)
# @classmethod
# def _generate_oauth_url_helper(cls, redirect_uri: str, state: str) -> str:
# url = (
# "https://auth.atlassian.com/authorize"
# f"?audience=api.atlassian.com"
# f"&client_id={cls.CLIENT_ID}"
# f"&redirect_uri={redirect_uri}"
# f"&scope={cls.CONFLUENCE_OAUTH_SCOPE}"
# f"&state={state}"
# "&response_type=code"
# "&prompt=consent"
# )
# return url
@classmethod
def _generate_oauth_url_helper(cls, redirect_uri: str, state: str) -> str:
url = (
"https://auth.atlassian.com/authorize"
f"?audience=api.atlassian.com"
f"&client_id={cls.CLIENT_ID}"
f"&redirect_uri={redirect_uri}"
f"&scope={cls.CONFLUENCE_OAUTH_SCOPE}"
f"&state={state}"
"&response_type=code"
"&prompt=consent"
)
return url
# @classmethod
# def session_dump_json(cls, email: str, redirect_on_success: str | None) -> str:
# """Temporary state to store in redis. to be looked up on auth response.
# Returns a json string.
# """
# session = ConfluenceCloudOAuth.OAuthSession(
# email=email, redirect_on_success=redirect_on_success
# )
# return session.model_dump_json()
@classmethod
def session_dump_json(cls, email: str, redirect_on_success: str | None) -> str:
"""Temporary state to store in redis. to be looked up on auth response.
Returns a json string.
"""
session = ConfluenceCloudOAuth.OAuthSession(
email=email, redirect_on_success=redirect_on_success
)
return session.model_dump_json()
# @classmethod
# def parse_session(cls, session_json: str) -> SlackOAuth.OAuthSession:
# session = SlackOAuth.OAuthSession.model_validate_json(session_json)
# return session
@classmethod
def parse_session(cls, session_json: str) -> SlackOAuth.OAuthSession:
session = SlackOAuth.OAuthSession.model_validate_json(session_json)
return session
class GoogleDriveOAuth:
# https://developers.google.com/identity/protocols/oauth2
# https://developers.google.com/identity/protocols/oauth2/web-server
class OAuthSession(BaseModel):
"""Stored in redis to be looked up on callback"""
email: str
redirect_on_success: str | None # Where to send the user if OAuth flow succeeds
CLIENT_ID = OAUTH_GOOGLE_DRIVE_CLIENT_ID
CLIENT_SECRET = OAUTH_GOOGLE_DRIVE_CLIENT_SECRET
TOKEN_URL = "https://oauth2.googleapis.com/token"
# SCOPE is per https://docs.onyx.app/connectors/google-drive
# TODO: Merge with or use google_utils.GOOGLE_SCOPES
SCOPE = (
"https://www.googleapis.com/auth/drive.readonly%20"
"https://www.googleapis.com/auth/drive.metadata.readonly%20"
"https://www.googleapis.com/auth/admin.directory.user.readonly%20"
"https://www.googleapis.com/auth/admin.directory.group.readonly"
)
REDIRECT_URI = f"{WEB_DOMAIN}/admin/connectors/google-drive/oauth/callback"
DEV_REDIRECT_URI = f"https://redirectmeto.com/{REDIRECT_URI}"
@classmethod
def generate_oauth_url(cls, state: str) -> str:
return cls._generate_oauth_url_helper(cls.REDIRECT_URI, state)
@classmethod
def generate_dev_oauth_url(cls, state: str) -> str:
"""dev mode workaround for localhost testing
- https://www.nango.dev/blog/oauth-redirects-on-localhost-with-https
"""
return cls._generate_oauth_url_helper(cls.DEV_REDIRECT_URI, state)
@classmethod
def _generate_oauth_url_helper(cls, redirect_uri: str, state: str) -> str:
# without prompt=consent, a refresh token is only issued the first time the user approves
url = (
f"https://accounts.google.com/o/oauth2/v2/auth"
f"?client_id={cls.CLIENT_ID}"
f"&redirect_uri={redirect_uri}"
"&response_type=code"
f"&scope={cls.SCOPE}"
"&access_type=offline"
f"&state={state}"
"&prompt=consent"
)
return url
@classmethod
def session_dump_json(cls, email: str, redirect_on_success: str | None) -> str:
"""Temporary state to store in redis. to be looked up on auth response.
Returns a json string.
"""
session = GoogleDriveOAuth.OAuthSession(
email=email, redirect_on_success=redirect_on_success
)
return session.model_dump_json()
@classmethod
def parse_session(cls, session_json: str) -> OAuthSession:
session = GoogleDriveOAuth.OAuthSession.model_validate_json(session_json)
return session
@router.post("/prepare-authorization-request")
@@ -192,8 +278,11 @@ def prepare_authorization_request(
Example: https://www.oauth.com/oauth2-servers/authorization/the-authorization-request/
"""
# create random oauth state param for security and to retrieve user data later
oauth_uuid = uuid.uuid4()
oauth_uuid_str = str(oauth_uuid)
# urlsafe b64 encode the uuid for the oauth url
oauth_state = (
base64.urlsafe_b64encode(oauth_uuid.bytes).rstrip(b"=").decode("utf-8")
)
@@ -203,6 +292,11 @@ def prepare_authorization_request(
session = SlackOAuth.session_dump_json(
email=user.email, redirect_on_success=redirect_on_success
)
elif connector == DocumentSource.GOOGLE_DRIVE:
oauth_url = GoogleDriveOAuth.generate_oauth_url(oauth_state)
session = GoogleDriveOAuth.session_dump_json(
email=user.email, redirect_on_success=redirect_on_success
)
# elif connector == DocumentSource.CONFLUENCE:
# oauth_url = ConfluenceCloudOAuth.generate_oauth_url(oauth_state)
# session = ConfluenceCloudOAuth.session_dump_json(
@@ -210,8 +304,6 @@ def prepare_authorization_request(
# )
# elif connector == DocumentSource.JIRA:
# oauth_url = JiraCloudOAuth.generate_dev_oauth_url(oauth_state)
# elif connector == DocumentSource.GOOGLE_DRIVE:
# oauth_url = GoogleDriveOAuth.generate_dev_oauth_url(oauth_state)
else:
oauth_url = None
@@ -223,6 +315,7 @@ def prepare_authorization_request(
r = get_redis_client(tenant_id=tenant_id)
# store important session state to retrieve when the user is redirected back
# 10 min is the max we want an oauth flow to be valid
r.set(f"da_oauth:{oauth_uuid_str}", session, ex=600)
@@ -421,3 +514,116 @@ def handle_slack_oauth_callback(
# "redirect_on_success": session.redirect_on_success,
# }
# )
@router.post("/connector/google-drive/callback")
def handle_google_drive_oauth_callback(
code: str,
state: str,
user: User = Depends(current_user),
db_session: Session = Depends(get_session),
tenant_id: str | None = Depends(get_current_tenant_id),
) -> JSONResponse:
if not GoogleDriveOAuth.CLIENT_ID or not GoogleDriveOAuth.CLIENT_SECRET:
raise HTTPException(
status_code=500,
detail="Google Drive client ID or client secret is not configured.",
)
r = get_redis_client(tenant_id=tenant_id)
# recover the state
padded_state = state + "=" * (
-len(state) % 4
) # Add padding back (Base64 decoding requires padding)
uuid_bytes = base64.urlsafe_b64decode(
padded_state
) # Decode the Base64 string back to bytes
# Convert bytes back to a UUID
oauth_uuid = uuid.UUID(bytes=uuid_bytes)
oauth_uuid_str = str(oauth_uuid)
r_key = f"da_oauth:{oauth_uuid_str}"
session_json_bytes = cast(bytes, r.get(r_key))
if not session_json_bytes:
raise HTTPException(
status_code=400,
detail=f"Google Drive OAuth failed - OAuth state key not found: key={r_key}",
)
session_json = session_json_bytes.decode("utf-8")
try:
session = GoogleDriveOAuth.parse_session(session_json)
# Exchange the authorization code for an access token
response = requests.post(
GoogleDriveOAuth.TOKEN_URL,
headers={"Content-Type": "application/x-www-form-urlencoded"},
data={
"client_id": GoogleDriveOAuth.CLIENT_ID,
"client_secret": GoogleDriveOAuth.CLIENT_SECRET,
"code": code,
"redirect_uri": GoogleDriveOAuth.REDIRECT_URI,
"grant_type": "authorization_code",
},
)
response.raise_for_status()
authorization_response: dict[str, Any] = response.json()
# the connector wants us to store the json in its authorized_user_info format
# returned from OAuthCredentials.get_authorized_user_info().
# So refresh immediately via get_google_oauth_creds with the params filled in
# from fields in authorization_response to get the json we need
authorized_user_info = {}
authorized_user_info["client_id"] = OAUTH_GOOGLE_DRIVE_CLIENT_ID
authorized_user_info["client_secret"] = OAUTH_GOOGLE_DRIVE_CLIENT_SECRET
authorized_user_info["refresh_token"] = authorization_response["refresh_token"]
token_json_str = json.dumps(authorized_user_info)
oauth_creds = get_google_oauth_creds(
token_json_str=token_json_str, source=DocumentSource.GOOGLE_DRIVE
)
if not oauth_creds:
raise RuntimeError("get_google_oauth_creds returned None.")
# save off the credentials
oauth_creds_sanitized_json_str = sanitize_oauth_credentials(oauth_creds)
credential_dict: dict[str, str] = {}
credential_dict[DB_CREDENTIALS_DICT_TOKEN_KEY] = oauth_creds_sanitized_json_str
credential_dict[DB_CREDENTIALS_PRIMARY_ADMIN_KEY] = session.email
credential_dict[
DB_CREDENTIALS_AUTHENTICATION_METHOD
] = GoogleOAuthAuthenticationMethod.OAUTH_INTERACTIVE.value
credential_info = CredentialBase(
credential_json=credential_dict,
admin_public=True,
source=DocumentSource.GOOGLE_DRIVE,
name="OAuth (interactive)",
)
create_credential(credential_info, user, db_session)
except Exception as e:
return JSONResponse(
status_code=500,
content={
"success": False,
"message": f"An error occurred during Google Drive OAuth: {str(e)}",
},
)
finally:
r.delete(r_key)
# return the result
return JSONResponse(
content={
"success": True,
"message": "Google Drive OAuth completed successfully.",
"redirect_on_success": session.redirect_on_success,
}
)

View File

@@ -179,6 +179,7 @@ def handle_simplified_chat_message(
chunks_below=0,
full_doc=chat_message_req.full_doc,
structured_response_format=chat_message_req.structured_response_format,
use_pro_search=chat_message_req.use_pro_search,
)
packets = stream_chat_message_objects(
@@ -301,6 +302,7 @@ def handle_send_message_simple_with_history(
chunks_below=0,
full_doc=req.full_doc,
structured_response_format=req.structured_response_format,
use_pro_search=req.use_pro_search,
)
packets = stream_chat_message_objects(

View File

@@ -57,6 +57,9 @@ class BasicCreateChatMessageRequest(ChunkContext):
# https://platform.openai.com/docs/guides/structured-outputs/introduction
structured_response_format: dict | None = None
# If True, uses pro search instead of basic search
use_pro_search: bool = False
class BasicCreateChatMessageWithHistoryRequest(ChunkContext):
# Last element is the new query. All previous elements are historical context
@@ -71,6 +74,8 @@ class BasicCreateChatMessageWithHistoryRequest(ChunkContext):
# only works if using an OpenAI model. See the following for more details:
# https://platform.openai.com/docs/guides/structured-outputs/introduction
structured_response_format: dict | None = None
# If True, uses pro search instead of basic search
use_pro_search: bool = False
class SimpleDoc(BaseModel):
@@ -123,6 +128,9 @@ class OneShotQARequest(ChunkContext):
# If True, skips generative an AI response to the search query
skip_gen_ai_answer_generation: bool = False
# If True, uses pro search instead of basic search
use_pro_search: bool = False
@model_validator(mode="after")
def check_persona_fields(self) -> "OneShotQARequest":
if self.persona_override_config is None and self.persona_id is None:

View File

@@ -196,6 +196,7 @@ def get_answer_stream(
retrieval_details=query_request.retrieval_options,
rerank_settings=query_request.rerank_settings,
db_session=db_session,
use_pro_search=query_request.use_pro_search,
)
packets = stream_chat_message_objects(

View File

@@ -13,9 +13,8 @@ from ee.onyx.db.usage_export import get_all_empty_chat_message_entries
from ee.onyx.db.usage_export import write_usage_report
from ee.onyx.server.reporting.usage_export_models import UsageReportMetadata
from ee.onyx.server.reporting.usage_export_models import UserSkeleton
from onyx.auth.schemas import UserStatus
from onyx.configs.constants import FileOrigin
from onyx.db.users import list_users
from onyx.db.users import get_all_users
from onyx.file_store.constants import MAX_IN_MEMORY_SIZE
from onyx.file_store.file_store import FileStore
from onyx.file_store.file_store import get_default_file_store
@@ -84,15 +83,15 @@ def generate_user_report(
max_size=MAX_IN_MEMORY_SIZE, mode="w+"
) as temp_file:
csvwriter = csv.writer(temp_file, delimiter=",")
csvwriter.writerow(["user_id", "status"])
csvwriter.writerow(["user_id", "is_active"])
users = list_users(db_session)
users = get_all_users(db_session)
for user in users:
user_skeleton = UserSkeleton(
user_id=str(user.id),
status=UserStatus.LIVE if user.is_active else UserStatus.DEACTIVATED,
is_active=user.is_active,
)
csvwriter.writerow([user_skeleton.user_id, user_skeleton.status])
csvwriter.writerow([user_skeleton.user_id, user_skeleton.is_active])
temp_file.seek(0)
file_store.save_file(

View File

@@ -4,8 +4,6 @@ from uuid import UUID
from pydantic import BaseModel
from onyx.auth.schemas import UserStatus
class FlowType(str, Enum):
CHAT = "chat"
@@ -22,7 +20,7 @@ class ChatMessageSkeleton(BaseModel):
class UserSkeleton(BaseModel):
user_id: str
status: UserStatus
is_active: bool
class UsageReportMetadata(BaseModel):

View File

@@ -0,0 +1,59 @@
from sqlalchemy import select
from sqlalchemy.orm import Session
from onyx.db.models import TenantAnonymousUserPath
def get_anonymous_user_path(tenant_id: str, db_session: Session) -> str | None:
result = db_session.execute(
select(TenantAnonymousUserPath).where(
TenantAnonymousUserPath.tenant_id == tenant_id
)
)
result_scalar = result.scalar_one_or_none()
if result_scalar:
return result_scalar.anonymous_user_path
else:
return None
def modify_anonymous_user_path(
tenant_id: str, anonymous_user_path: str, db_session: Session
) -> None:
# Enforce lowercase path at DB operation level
anonymous_user_path = anonymous_user_path.lower()
existing_entry = (
db_session.query(TenantAnonymousUserPath).filter_by(tenant_id=tenant_id).first()
)
if existing_entry:
existing_entry.anonymous_user_path = anonymous_user_path
else:
new_entry = TenantAnonymousUserPath(
tenant_id=tenant_id, anonymous_user_path=anonymous_user_path
)
db_session.add(new_entry)
db_session.commit()
def get_tenant_id_for_anonymous_user_path(
anonymous_user_path: str, db_session: Session
) -> str | None:
result = db_session.execute(
select(TenantAnonymousUserPath).where(
TenantAnonymousUserPath.anonymous_user_path == anonymous_user_path
)
)
result_scalar = result.scalar_one_or_none()
if result_scalar:
return result_scalar.tenant_id
else:
return None
def validate_anonymous_user_path(path: str) -> None:
if not path or "/" in path or not path.replace("-", "").isalnum():
raise ValueError("Invalid path. Use only letters, numbers, and hyphens.")

View File

@@ -3,35 +3,124 @@ from fastapi import APIRouter
from fastapi import Depends
from fastapi import HTTPException
from fastapi import Response
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from ee.onyx.auth.users import current_cloud_superuser
from ee.onyx.auth.users import generate_anonymous_user_jwt_token
from ee.onyx.configs.app_configs import ANONYMOUS_USER_COOKIE_NAME
from ee.onyx.configs.app_configs import STRIPE_SECRET_KEY
from ee.onyx.server.tenants.access import control_plane_dep
from ee.onyx.server.tenants.anonymous_user_path import get_anonymous_user_path
from ee.onyx.server.tenants.anonymous_user_path import (
get_tenant_id_for_anonymous_user_path,
)
from ee.onyx.server.tenants.anonymous_user_path import modify_anonymous_user_path
from ee.onyx.server.tenants.anonymous_user_path import validate_anonymous_user_path
from ee.onyx.server.tenants.billing import fetch_billing_information
from ee.onyx.server.tenants.billing import fetch_tenant_stripe_information
from ee.onyx.server.tenants.models import AnonymousUserPath
from ee.onyx.server.tenants.models import BillingInformation
from ee.onyx.server.tenants.models import ImpersonateRequest
from ee.onyx.server.tenants.models import ProductGatingRequest
from ee.onyx.server.tenants.provisioning import delete_user_from_control_plane
from ee.onyx.server.tenants.user_mapping import get_tenant_id_for_email
from ee.onyx.server.tenants.user_mapping import remove_all_users_from_tenant
from ee.onyx.server.tenants.user_mapping import remove_users_from_tenant
from onyx.auth.users import anonymous_user_enabled
from onyx.auth.users import auth_backend
from onyx.auth.users import current_admin_user
from onyx.auth.users import get_jwt_strategy
from onyx.auth.users import get_redis_strategy
from onyx.auth.users import optional_user
from onyx.auth.users import User
from onyx.configs.app_configs import WEB_DOMAIN
from onyx.db.auth import get_user_count
from onyx.db.engine import get_current_tenant_id
from onyx.db.engine import get_session
from onyx.db.engine import get_session_with_tenant
from onyx.db.notification import create_notification
from onyx.db.users import delete_user_from_db
from onyx.db.users import get_user_by_email
from onyx.server.manage.models import UserByEmail
from onyx.server.settings.store import load_settings
from onyx.server.settings.store import store_settings
from onyx.utils.logger import setup_logger
from shared_configs.contextvars import CURRENT_TENANT_ID_CONTEXTVAR
stripe.api_key = STRIPE_SECRET_KEY
logger = setup_logger()
router = APIRouter(prefix="/tenants")
@router.get("/anonymous-user-path")
async def get_anonymous_user_path_api(
tenant_id: str | None = Depends(get_current_tenant_id),
_: User | None = Depends(current_admin_user),
) -> AnonymousUserPath:
if tenant_id is None:
raise HTTPException(status_code=404, detail="Tenant not found")
with get_session_with_tenant(tenant_id=None) as db_session:
current_path = get_anonymous_user_path(tenant_id, db_session)
return AnonymousUserPath(anonymous_user_path=current_path)
@router.post("/anonymous-user-path")
async def set_anonymous_user_path_api(
anonymous_user_path: str,
tenant_id: str = Depends(get_current_tenant_id),
_: User | None = Depends(current_admin_user),
) -> None:
try:
validate_anonymous_user_path(anonymous_user_path)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
with get_session_with_tenant(tenant_id=None) as db_session:
try:
modify_anonymous_user_path(tenant_id, anonymous_user_path, db_session)
except IntegrityError:
raise HTTPException(
status_code=409,
detail="The anonymous user path is already in use. Please choose a different path.",
)
except Exception as e:
logger.exception(f"Failed to modify anonymous user path: {str(e)}")
raise HTTPException(
status_code=500,
detail="An unexpected error occurred while modifying the anonymous user path",
)
@router.post("/anonymous-user")
async def login_as_anonymous_user(
anonymous_user_path: str,
_: User | None = Depends(optional_user),
) -> Response:
with get_session_with_tenant(tenant_id=None) as db_session:
tenant_id = get_tenant_id_for_anonymous_user_path(
anonymous_user_path, db_session
)
if not tenant_id:
raise HTTPException(status_code=404, detail="Tenant not found")
if not anonymous_user_enabled(tenant_id=tenant_id):
raise HTTPException(status_code=403, detail="Anonymous user is not enabled")
token = generate_anonymous_user_jwt_token(tenant_id)
response = Response()
response.set_cookie(
key=ANONYMOUS_USER_COOKIE_NAME,
value=token,
httponly=True,
secure=True,
samesite="strict",
)
return response
@router.post("/product-gating")
def gate_product(
product_gating_request: ProductGatingRequest, _: None = Depends(control_plane_dep)
@@ -103,7 +192,7 @@ async def impersonate_user(
)
if user_to_impersonate is None:
raise HTTPException(status_code=404, detail="User not found")
token = await get_jwt_strategy().write_token(user_to_impersonate)
token = await get_redis_strategy().write_token(user_to_impersonate)
response = await auth_backend.transport.get_login_response(token)
response.set_cookie(
@@ -114,3 +203,48 @@ async def impersonate_user(
samesite="lax",
)
return response
@router.post("/leave-organization")
async def leave_organization(
user_email: UserByEmail,
current_user: User | None = Depends(current_admin_user),
db_session: Session = Depends(get_session),
tenant_id: str = Depends(get_current_tenant_id),
) -> None:
if current_user is None or current_user.email != user_email.user_email:
raise HTTPException(
status_code=403, detail="You can only leave the organization as yourself"
)
user_to_delete = get_user_by_email(user_email.user_email, db_session)
if user_to_delete is None:
raise HTTPException(status_code=404, detail="User not found")
num_admin_users = await get_user_count(only_admin_users=True)
should_delete_tenant = num_admin_users == 1
if should_delete_tenant:
logger.info(
"Last admin user is leaving the organization. Deleting tenant from control plane."
)
try:
await delete_user_from_control_plane(tenant_id, user_to_delete.email)
logger.debug("User deleted from control plane")
except Exception as e:
logger.exception(
f"Failed to delete user from control plane for tenant {tenant_id}: {e}"
)
raise HTTPException(
status_code=500,
detail=f"Failed to remove user from control plane: {str(e)}",
)
db_session.expunge(user_to_delete)
delete_user_from_db(user_to_delete, db_session)
if should_delete_tenant:
remove_all_users_from_tenant(tenant_id)
else:
remove_users_from_tenant([user_to_delete.email], tenant_id)

View File

@@ -46,6 +46,7 @@ def register_tenant_users(tenant_id: str, number_of_users: int) -> stripe.Subscr
"""
Send a request to the control service to register the number of users for a tenant.
"""
if not STRIPE_PRICE_ID:
raise Exception("STRIPE_PRICE_ID is not set")

View File

@@ -39,3 +39,12 @@ class TenantCreationPayload(BaseModel):
tenant_id: str
email: str
referral_source: str | None = None
class TenantDeletionPayload(BaseModel):
tenant_id: str
email: str
class AnonymousUserPath(BaseModel):
anonymous_user_path: str | None

View File

@@ -15,6 +15,7 @@ from ee.onyx.configs.app_configs import HUBSPOT_TRACKING_URL
from ee.onyx.configs.app_configs import OPENAI_DEFAULT_API_KEY
from ee.onyx.server.tenants.access import generate_data_plane_token
from ee.onyx.server.tenants.models import TenantCreationPayload
from ee.onyx.server.tenants.models import TenantDeletionPayload
from ee.onyx.server.tenants.schema_management import create_schema_if_not_exists
from ee.onyx.server.tenants.schema_management import drop_schema
from ee.onyx.server.tenants.schema_management import run_alembic_migrations
@@ -185,6 +186,7 @@ async def rollback_tenant_provisioning(tenant_id: str) -> None:
try:
# Drop the tenant's schema to rollback provisioning
drop_schema(tenant_id)
# Remove tenant mapping
with Session(get_sqlalchemy_engine()) as db_session:
db_session.query(UserTenantMapping).filter(
@@ -320,3 +322,26 @@ async def submit_to_hubspot(
if response.status_code != 200:
logger.error(f"Failed to submit to HubSpot: {response.text}")
async def delete_user_from_control_plane(tenant_id: str, email: str) -> None:
token = generate_data_plane_token()
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
}
payload = TenantDeletionPayload(tenant_id=tenant_id, email=email)
async with aiohttp.ClientSession() as session:
async with session.delete(
f"{CONTROL_PLANE_API_BASE_URL}/tenants/delete",
headers=headers,
json=payload.model_dump(),
) as response:
print(response)
if response.status != 200:
error_text = await response.text()
logger.error(f"Control plane tenant creation failed: {error_text}")
raise Exception(
f"Failed to delete tenant on control plane: {error_text}"
)

View File

@@ -68,3 +68,11 @@ def remove_users_from_tenant(emails: list[str], tenant_id: str) -> None:
f"Failed to remove users from tenant {tenant_id}: {str(e)}"
)
db_session.rollback()
def remove_all_users_from_tenant(tenant_id: str) -> None:
with get_session_with_tenant(POSTGRES_DEFAULT_SCHEMA) as db_session:
db_session.query(UserTenantMapping).filter(
UserTenantMapping.tenant_id == tenant_id
).delete()
db_session.commit()

View File

@@ -83,7 +83,7 @@ def patch_user_group(
def set_user_curator(
user_group_id: int,
set_curator_request: SetCuratorRequest,
_: User | None = Depends(current_admin_user),
user: User | None = Depends(current_curator_or_admin_user),
db_session: Session = Depends(get_session),
) -> None:
try:
@@ -91,6 +91,7 @@ def set_user_curator(
db_session=db_session,
user_group_id=user_group_id,
set_curator_request=set_curator_request,
user_making_change=user,
)
except ValueError as e:
logger.error(f"Error setting user curator: {e}")

View File

@@ -10,6 +10,7 @@ logger = setup_logger()
def posthog_on_error(error: Any, items: Any) -> None:
"""Log any PostHog delivery errors."""
logger.error(f"PostHog error: {error}, items: {items}")
@@ -24,15 +25,10 @@ posthog = Posthog(
def event_telemetry(
distinct_id: str, event: str, properties: dict | None = None
) -> None:
logger.info(f"Capturing Posthog event: {distinct_id} {event} {properties}")
print("API KEY", POSTHOG_API_KEY)
print("HOST", POSTHOG_HOST)
"""Capture and send an event to PostHog, flushing immediately."""
logger.info(f"Capturing PostHog event: {distinct_id} {event} {properties}")
try:
print(type(distinct_id))
print(type(event))
print(type(properties))
response = posthog.capture(distinct_id, event, properties)
posthog.capture(distinct_id, event, properties)
posthog.flush()
print(response)
except Exception as e:
logger.error(f"Error capturing Posthog event: {e}")
logger.error(f"Error capturing PostHog event: {e}")

View File

@@ -1,5 +1,6 @@
import asyncio
import json
import time
from types import TracebackType
from typing import cast
from typing import Optional
@@ -320,8 +321,6 @@ async def embed_text(
api_url: str | None,
api_version: str | None,
) -> list[Embedding]:
logger.info(f"Embedding {len(texts)} texts with provider: {provider_type}")
if not all(texts):
logger.error("Empty strings provided for embedding")
raise ValueError("Empty strings are not allowed for embedding.")
@@ -330,8 +329,17 @@ async def embed_text(
logger.error("No texts provided for embedding")
raise ValueError("No texts provided for embedding.")
start = time.monotonic()
total_chars = 0
for text in texts:
total_chars += len(text)
if provider_type is not None:
logger.debug(f"Using cloud provider {provider_type} for embedding")
logger.info(
f"Embedding {len(texts)} texts with {total_chars} total characters with provider: {provider_type}"
)
if api_key is None:
logger.error("API key not provided for cloud model")
raise RuntimeError("API key not provided for cloud model")
@@ -363,8 +371,16 @@ async def embed_text(
logger.error(error_message)
raise ValueError(error_message)
elapsed = time.monotonic() - start
logger.info(
f"Successfully embedded {len(texts)} texts with {total_chars} total characters "
f"with provider {provider_type} in {elapsed:.2f}"
)
elif model_name is not None:
logger.debug(f"Using local model {model_name} for embedding")
logger.info(
f"Embedding {len(texts)} texts with {total_chars} total characters with local model: {model_name}"
)
prefixed_texts = [f"{prefix}{text}" for text in texts] if prefix else texts
local_model = get_embedding_model(
@@ -382,13 +398,17 @@ async def embed_text(
for embedding in embeddings_vectors
]
elapsed = time.monotonic() - start
logger.info(
f"Successfully embedded {len(texts)} texts with {total_chars} total characters "
f"with local model {model_name} in {elapsed:.2f}"
)
else:
logger.error("Neither model name nor provider specified for embedding")
raise ValueError(
"Either model name or provider must be provided to run embeddings."
)
logger.info(f"Successfully embedded {len(texts)} texts")
return embeddings
@@ -440,7 +460,8 @@ async def process_embed_request(
) -> EmbedResponse:
if not embed_request.texts:
raise HTTPException(status_code=400, detail="No texts to be embedded")
elif not all(embed_request.texts):
if not all(embed_request.texts):
raise ValueError("Empty strings are not allowed for embedding.")
try:
@@ -471,9 +492,12 @@ async def process_embed_request(
detail=str(e),
)
except Exception as e:
exception_detail = f"Error during embedding process:\n{str(e)}"
logger.exception(exception_detail)
raise HTTPException(status_code=500, detail=exception_detail)
logger.exception(
f"Error during embedding process: provider={embed_request.provider_type} model={embed_request.model_name}"
)
raise HTTPException(
status_code=500, detail=f"Error during embedding process: {e}"
)
@router.post("/cross-encoder-scores")

View File

@@ -44,6 +44,7 @@ def _move_files_recursively(source: Path, dest: Path, overwrite: bool = False) -
the files in the existing huggingface cache that don't exist in the temp
huggingface cache.
"""
for item in source.iterdir():
target_path = dest / item.relative_to(source)
if item.is_dir():

View File

@@ -0,0 +1,95 @@
from langchain_core.callbacks.manager import dispatch_custom_event
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agent_search.basic.states import BasicInput
from onyx.agent_search.basic.states import BasicOutput
from onyx.agent_search.basic.states import BasicState
from onyx.agent_search.basic.states import BasicStateUpdate
from onyx.tools.tool_implementations.search.search_tool import SearchTool
from onyx.chat.stream_processing.utils import (
map_document_id_order,
)
def basic_graph_builder() -> StateGraph:
graph = StateGraph(
state_schema=BasicState,
input=BasicInput,
output=BasicOutput,
)
### Add nodes ###
graph.add_node(
node="get_response",
action=get_response,
)
### Add edges ###
graph.add_edge(start_key=START, end_key="get_response")
graph.add_conditional_edges("get_response", should_continue, ["get_response", END])
graph.add_edge(
start_key="get_response",
end_key=END,
)
return graph
def should_continue(state: BasicState) -> str:
return (
END if state["last_llm_call"] is None or state["calls"] > 1 else "get_response"
)
def get_response(state: BasicState) -> BasicStateUpdate:
llm = state["llm"]
current_llm_call = state["last_llm_call"]
if current_llm_call is None:
raise ValueError("last_llm_call is None")
answer_style_config = state["answer_style_config"]
response_handler_manager = state["response_handler_manager"]
# DEBUG: good breakpoint
stream = llm.stream(
# For tool calling LLMs, we want to insert the task prompt as part of this flow, this is because the LLM
# may choose to not call any tools and just generate the answer, in which case the task prompt is needed.
prompt=current_llm_call.prompt_builder.build(),
tools=[tool.tool_definition() for tool in current_llm_call.tools] or None,
tool_choice=(
"required"
if current_llm_call.tools and current_llm_call.force_use_tool.force_use
else None
),
structured_response_format=answer_style_config.structured_response_format,
)
for response in response_handler_manager.handle_llm_response(stream):
dispatch_custom_event(
"basic_response",
response,
)
next_call = response_handler_manager.next_llm_call(current_llm_call)
if next_call is not None:
final_search_results, displayed_search_results = SearchTool.get_search_result(
next_call
) or ([], [])
else:
final_search_results, displayed_search_results = [], []
response_handler_manager.answer_handler.update((
final_search_results,
map_document_id_order(final_search_results),
map_document_id_order(displayed_search_results)))
return BasicStateUpdate(
last_llm_call=next_call,
calls=state["calls"] + 1,
)
if __name__ == "__main__":
pass

View File

@@ -0,0 +1,42 @@
from typing import TypedDict
from onyx.chat.llm_response_handler import LLMResponseHandlerManager
from onyx.chat.models import AnswerStyleConfig
from onyx.chat.prompt_builder.build import LLMCall
from onyx.llm.chat_llm import LLM
## Update States
## Graph Input State
class BasicInput(TypedDict):
base_question: str
last_llm_call: LLMCall | None
llm: LLM
answer_style_config: AnswerStyleConfig
response_handler_manager: LLMResponseHandlerManager
calls: int
## Graph Output State
class BasicOutput(TypedDict):
pass
class BasicStateUpdate(TypedDict):
last_llm_call: LLMCall | None
calls: int
## Graph State
class BasicState(
BasicInput,
BasicOutput,
):
pass

View File

@@ -0,0 +1,18 @@
from operator import add
from typing import Annotated
from typing import TypedDict
class CoreState(TypedDict, total=False):
"""
This is the core state that is shared across all subgraphs.
"""
base_question: str
log_messages: Annotated[list[str], add]
class SubgraphCoreState(TypedDict, total=False):
"""
This is the core state that is shared across all subgraphs.
"""

View File

@@ -0,0 +1,66 @@
from uuid import UUID
from sqlalchemy.orm import Session
from onyx.db.models import AgentSubQuery
from onyx.db.models import AgentSubQuestion
def create_sub_question(
db_session: Session,
chat_session_id: UUID,
primary_message_id: int,
sub_question: str,
sub_answer: str,
) -> AgentSubQuestion:
"""Create a new sub-question record in the database."""
sub_q = AgentSubQuestion(
chat_session_id=chat_session_id,
primary_question_id=primary_message_id,
sub_question=sub_question,
sub_answer=sub_answer,
)
db_session.add(sub_q)
db_session.flush()
return sub_q
def create_sub_query(
db_session: Session,
chat_session_id: UUID,
parent_question_id: int,
sub_query: str,
) -> AgentSubQuery:
"""Create a new sub-query record in the database."""
sub_q = AgentSubQuery(
chat_session_id=chat_session_id,
parent_question_id=parent_question_id,
sub_query=sub_query,
)
db_session.add(sub_q)
db_session.flush()
return sub_q
def get_sub_questions_for_message(
db_session: Session,
primary_message_id: int,
) -> list[AgentSubQuestion]:
"""Get all sub-questions for a given primary message."""
return (
db_session.query(AgentSubQuestion)
.filter(AgentSubQuestion.primary_question_id == primary_message_id)
.all()
)
def get_sub_queries_for_question(
db_session: Session,
sub_question_id: int,
) -> list[AgentSubQuery]:
"""Get all sub-queries for a given sub-question."""
return (
db_session.query(AgentSubQuery)
.filter(AgentSubQuery.parent_question_id == sub_question_id)
.all()
)

View File

@@ -0,0 +1,49 @@
from dataclasses import dataclass
from uuid import UUID
from pydantic import BaseModel
from sqlalchemy.orm import Session
from onyx.context.search.models import SearchRequest
from onyx.llm.interfaces import LLM
from onyx.llm.models import PreviousMessage
from onyx.tools.tool_implementations.search.search_tool import SearchTool
@dataclass
class ProSearchConfig:
"""
Configuration for the Pro Search feature.
"""
# The search request that was used to generate the Pro Search
search_request: SearchRequest
primary_llm: LLM
fast_llm: LLM
search_tool: SearchTool
use_agentic_search: bool = False
# For persisting agent search data
chat_session_id: UUID | None = None
# The message ID of the user message that triggered the Pro Search
message_id: int | None = None
# Whether to persistence data for the Pro Search (turned off for testing)
use_persistence: bool = True
# The database session for the Pro Search
db_session: Session | None = None
# Whether to allow creation of refinement questions (and entity extraction, etc.)
allow_refinement: bool = False
# Message history for the current chat session
message_history: list[PreviousMessage] | None = None
class AgentDocumentCitations(BaseModel):
document_id: str
document_title: str
link: str

View File

@@ -0,0 +1,26 @@
from collections.abc import Hashable
from langgraph.types import Send
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionInput,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()
def send_to_expanded_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
logger.debug("sending to expanded retrieval via edge")
return Send(
"initial_sub_question_expanded_retrieval",
ExpandedRetrievalInput(
question=state["question"],
base_search=False,
sub_question_id=state["question_id"],
),
)

View File

@@ -0,0 +1,125 @@
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agent_search.pro_search_a.answer_initial_sub_question.edges import (
send_to_expanded_retrieval,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_check import (
answer_check,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_generation import (
answer_generation,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.format_answer import (
format_answer,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.ingest_retrieval import (
ingest_retrieval,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionInput,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionOutput,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionState,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.graph_builder import (
expanded_retrieval_graph_builder,
)
from onyx.agent_search.shared_graph_utils.utils import get_test_config
from onyx.utils.logger import setup_logger
logger = setup_logger()
def answer_query_graph_builder() -> StateGraph:
graph = StateGraph(
state_schema=AnswerQuestionState,
input=AnswerQuestionInput,
output=AnswerQuestionOutput,
)
### Add nodes ###
expanded_retrieval = expanded_retrieval_graph_builder().compile()
graph.add_node(
node="initial_sub_question_expanded_retrieval",
action=expanded_retrieval,
)
graph.add_node(
node="answer_check",
action=answer_check,
)
graph.add_node(
node="answer_generation",
action=answer_generation,
)
graph.add_node(
node="format_answer",
action=format_answer,
)
graph.add_node(
node="ingest_retrieval",
action=ingest_retrieval,
)
### Add edges ###
graph.add_conditional_edges(
source=START,
path=send_to_expanded_retrieval,
path_map=["initial_sub_question_expanded_retrieval"],
)
graph.add_edge(
start_key="initial_sub_question_expanded_retrieval",
end_key="ingest_retrieval",
)
graph.add_edge(
start_key="ingest_retrieval",
end_key="answer_generation",
)
graph.add_edge(
start_key="answer_generation",
end_key="answer_check",
)
graph.add_edge(
start_key="answer_check",
end_key="format_answer",
)
graph.add_edge(
start_key="format_answer",
end_key=END,
)
return graph
if __name__ == "__main__":
from onyx.db.engine import get_session_context_manager
from onyx.llm.factory import get_default_llms
from onyx.context.search.models import SearchRequest
graph = answer_query_graph_builder()
compiled_graph = graph.compile()
primary_llm, fast_llm = get_default_llms()
search_request = SearchRequest(
query="what can you do with onyx or danswer?",
)
with get_session_context_manager() as db_session:
pro_search_config, search_tool = get_test_config(
db_session, primary_llm, fast_llm, search_request
)
inputs = AnswerQuestionInput(
question="what can you do with onyx?",
question_id="0_0",
)
for thing in compiled_graph.stream(
input=inputs,
config={"configurable": {"config": pro_search_config}},
# debug=True,
# subgraphs=True,
):
logger.debug(thing)

View File

@@ -0,0 +1,8 @@
from pydantic import BaseModel
### Models ###
class AnswerRetrievalStats(BaseModel):
answer_retrieval_stats: dict[str, float | int]

View File

@@ -0,0 +1,45 @@
from typing import cast
from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_message_runs
from langchain_core.runnables.config import RunnableConfig
from onyx.agent_search.models import ProSearchConfig
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionState,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
QACheckUpdate,
)
from onyx.agent_search.shared_graph_utils.prompts import SUB_CHECK_NO
from onyx.agent_search.shared_graph_utils.prompts import SUB_CHECK_PROMPT
from onyx.agent_search.shared_graph_utils.prompts import UNKNOWN_ANSWER
def answer_check(state: AnswerQuestionState, config: RunnableConfig) -> QACheckUpdate:
if state["answer"] == UNKNOWN_ANSWER:
return QACheckUpdate(
answer_quality=SUB_CHECK_NO,
)
msg = [
HumanMessage(
content=SUB_CHECK_PROMPT.format(
question=state["question"],
base_answer=state["answer"],
)
)
]
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
fast_llm = pro_search_config.fast_llm
response = list(
fast_llm.stream(
prompt=msg,
)
)
quality_str = merge_message_runs(response, chunk_separator="")[0].content
return QACheckUpdate(
answer_quality=quality_str,
)

View File

@@ -0,0 +1,106 @@
import datetime
from typing import Any
from typing import cast
from langchain_core.callbacks.manager import dispatch_custom_event
from langchain_core.messages import merge_message_runs
from langchain_core.runnables.config import RunnableConfig
from onyx.agent_search.models import ProSearchConfig
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionState,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
QAGenerationUpdate,
)
from onyx.agent_search.shared_graph_utils.agent_prompt_ops import (
build_sub_question_answer_prompt,
)
from onyx.agent_search.shared_graph_utils.prompts import ASSISTANT_SYSTEM_PROMPT_DEFAULT
from onyx.agent_search.shared_graph_utils.prompts import ASSISTANT_SYSTEM_PROMPT_PERSONA
from onyx.agent_search.shared_graph_utils.prompts import UNKNOWN_ANSWER
from onyx.agent_search.shared_graph_utils.utils import get_persona_prompt
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
from onyx.chat.models import AgentAnswerPiece
from onyx.chat.models import StreamStopInfo
from onyx.chat.models import StreamStopReason
from onyx.utils.logger import setup_logger
logger = setup_logger()
def answer_generation(
state: AnswerQuestionState, config: RunnableConfig
) -> QAGenerationUpdate:
now_start = datetime.datetime.now()
logger.debug(f"--------{now_start}--------START ANSWER GENERATION---")
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
question = state["question"]
docs = state["documents"]
level, question_nr = parse_question_id(state["question_id"])
persona_prompt = get_persona_prompt(pro_search_config.search_request.persona)
if len(docs) == 0:
answer_str = UNKNOWN_ANSWER
dispatch_custom_event(
"sub_answers",
AgentAnswerPiece(
answer_piece=answer_str,
level=level,
level_question_nr=question_nr,
answer_type="agent_sub_answer",
),
)
else:
if len(persona_prompt) > 0:
persona_specification = ASSISTANT_SYSTEM_PROMPT_DEFAULT
else:
persona_specification = ASSISTANT_SYSTEM_PROMPT_PERSONA.format(
persona_prompt=persona_prompt
)
logger.debug(f"Number of verified retrieval docs: {len(docs)}")
fast_llm = pro_search_config.fast_llm
msg = build_sub_question_answer_prompt(
question=question,
original_question=pro_search_config.search_request.query,
docs=docs,
persona_specification=persona_specification,
config=fast_llm.config,
)
response: list[str | list[str | dict[str, Any]]] = []
for message in fast_llm.stream(
prompt=msg,
):
# TODO: in principle, the answer here COULD contain images, but we don't support that yet
content = message.content
if not isinstance(content, str):
raise ValueError(
f"Expected content to be a string, but got {type(content)}"
)
dispatch_custom_event(
"sub_answers",
AgentAnswerPiece(
answer_piece=content,
level=level,
level_question_nr=question_nr,
answer_type="agent_sub_answer",
),
)
response.append(content)
answer_str = merge_message_runs(response, chunk_separator="")[0].content
stop_event = StreamStopInfo(
stop_reason=StreamStopReason.FINISHED,
level=level,
level_question_nr=question_nr,
)
dispatch_custom_event("sub_answer_finished", stop_event)
return QAGenerationUpdate(
answer=answer_str,
)

View File

@@ -0,0 +1,29 @@
from langchain_core.runnables.config import RunnableConfig
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionOutput,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionState,
)
from onyx.agent_search.shared_graph_utils.models import (
QuestionAnswerResults,
)
def format_answer(
state: AnswerQuestionState, config: RunnableConfig
) -> AnswerQuestionOutput:
return AnswerQuestionOutput(
answer_results=[
QuestionAnswerResults(
question=state["question"],
question_id=state["question_id"],
quality=state.get("answer_quality", "No"),
answer=state["answer"],
expanded_retrieval_results=state["expanded_retrieval_results"],
documents=state["documents"],
sub_question_retrieval_stats=state["sub_question_retrieval_stats"],
)
],
)

View File

@@ -0,0 +1,27 @@
from langchain_core.runnables.config import RunnableConfig
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
RetrievalIngestionUpdate,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalOutput,
)
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
def ingest_retrieval(
state: ExpandedRetrievalOutput, config: RunnableConfig
) -> RetrievalIngestionUpdate:
sub_question_retrieval_stats = state[
"expanded_retrieval_result"
].sub_question_retrieval_stats
if sub_question_retrieval_stats is None:
sub_question_retrieval_stats = [AgentChunkStats()]
return RetrievalIngestionUpdate(
expanded_retrieval_results=state[
"expanded_retrieval_result"
].expanded_queries_results,
documents=state["expanded_retrieval_result"].all_documents,
sub_question_retrieval_stats=sub_question_retrieval_stats,
)

View File

@@ -0,0 +1,63 @@
from operator import add
from typing import Annotated
from typing import TypedDict
from onyx.agent_search.core_state import SubgraphCoreState
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
from onyx.agent_search.shared_graph_utils.models import QueryResult
from onyx.agent_search.shared_graph_utils.models import (
QuestionAnswerResults,
)
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
from onyx.context.search.models import InferenceSection
## Update States
class QACheckUpdate(TypedDict):
answer_quality: str
class QAGenerationUpdate(TypedDict):
answer: str
# answer_stat: AnswerStats
class RetrievalIngestionUpdate(TypedDict):
expanded_retrieval_results: list[QueryResult]
documents: Annotated[list[InferenceSection], dedup_inference_sections]
sub_question_retrieval_stats: AgentChunkStats
## Graph Input State
class AnswerQuestionInput(SubgraphCoreState):
question: str
question_id: str # 0_0 is original question, everything else is <level>_<question_num>.
# level 0 is original question and first decomposition, level 1 is follow up, etc
# question_num is a unique number per original question per level.
## Graph State
class AnswerQuestionState(
AnswerQuestionInput,
QAGenerationUpdate,
QACheckUpdate,
RetrievalIngestionUpdate,
):
pass
## Graph Output State
class AnswerQuestionOutput(TypedDict):
"""
This is a list of results even though each call of this subgraph only returns one result.
This is because if we parallelize the answer query subgraph, there will be multiple
results in a list so the add operator is used to add them together.
"""
answer_results: Annotated[list[QuestionAnswerResults], add]

View File

@@ -0,0 +1,26 @@
from collections.abc import Hashable
from langgraph.types import Send
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionInput,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()
def send_to_expanded_refined_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
logger.debug("sending to expanded retrieval for follow up question via edge")
return Send(
"refined_sub_question_expanded_retrieval",
ExpandedRetrievalInput(
question=state["question"],
sub_question_id=state["question_id"],
base_search=False,
),
)

View File

@@ -0,0 +1,122 @@
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_check import (
answer_check,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.answer_generation import (
answer_generation,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.format_answer import (
format_answer,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.nodes.ingest_retrieval import (
ingest_retrieval,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionInput,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionOutput,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionState,
)
from onyx.agent_search.pro_search_a.answer_refinement_sub_question.edges import (
send_to_expanded_refined_retrieval,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.graph_builder import (
expanded_retrieval_graph_builder,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()
def answer_refined_query_graph_builder() -> StateGraph:
graph = StateGraph(
state_schema=AnswerQuestionState,
input=AnswerQuestionInput,
output=AnswerQuestionOutput,
)
### Add nodes ###
expanded_retrieval = expanded_retrieval_graph_builder().compile()
graph.add_node(
node="refined_sub_question_expanded_retrieval",
action=expanded_retrieval,
)
graph.add_node(
node="refined_sub_answer_check",
action=answer_check,
)
graph.add_node(
node="refined_sub_answer_generation",
action=answer_generation,
)
graph.add_node(
node="format_refined_sub_answer",
action=format_answer,
)
graph.add_node(
node="ingest_refined_retrieval",
action=ingest_retrieval,
)
### Add edges ###
graph.add_conditional_edges(
source=START,
path=send_to_expanded_refined_retrieval,
path_map=["refined_sub_question_expanded_retrieval"],
)
graph.add_edge(
start_key="refined_sub_question_expanded_retrieval",
end_key="ingest_refined_retrieval",
)
graph.add_edge(
start_key="ingest_refined_retrieval",
end_key="refined_sub_answer_generation",
)
graph.add_edge(
start_key="refined_sub_answer_generation",
end_key="refined_sub_answer_check",
)
graph.add_edge(
start_key="refined_sub_answer_check",
end_key="format_refined_sub_answer",
)
graph.add_edge(
start_key="format_refined_sub_answer",
end_key=END,
)
return graph
if __name__ == "__main__":
from onyx.db.engine import get_session_context_manager
from onyx.llm.factory import get_default_llms
from onyx.context.search.models import SearchRequest
graph = answer_refined_query_graph_builder()
compiled_graph = graph.compile()
primary_llm, fast_llm = get_default_llms()
search_request = SearchRequest(
query="what can you do with onyx or danswer?",
)
with get_session_context_manager() as db_session:
inputs = AnswerQuestionInput(
question="what can you do with onyx?",
question_id="0_0",
)
for thing in compiled_graph.stream(
input=inputs,
# debug=True,
# subgraphs=True,
):
logger.debug(thing)
# output = compiled_graph.invoke(inputs)
# logger.debug(output)

View File

@@ -0,0 +1,19 @@
from pydantic import BaseModel
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
from onyx.context.search.models import InferenceSection
### Models ###
class AnswerRetrievalStats(BaseModel):
answer_retrieval_stats: dict[str, float | int]
class QuestionAnswerResults(BaseModel):
question: str
answer: str
quality: str
# expanded_retrieval_results: list[QueryResult]
documents: list[InferenceSection]
sub_question_retrieval_stats: AgentChunkStats

View File

@@ -0,0 +1,70 @@
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agent_search.pro_search_a.base_raw_search.nodes.format_raw_search_results import (
format_raw_search_results,
)
from onyx.agent_search.pro_search_a.base_raw_search.nodes.generate_raw_search_data import (
generate_raw_search_data,
)
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchInput
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchOutput
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchState
from onyx.agent_search.pro_search_a.expanded_retrieval.graph_builder import (
expanded_retrieval_graph_builder,
)
def base_raw_search_graph_builder() -> StateGraph:
graph = StateGraph(
state_schema=BaseRawSearchState,
input=BaseRawSearchInput,
output=BaseRawSearchOutput,
)
### Add nodes ###
graph.add_node(
node="generate_raw_search_data",
action=generate_raw_search_data,
)
expanded_retrieval = expanded_retrieval_graph_builder().compile()
graph.add_node(
node="expanded_retrieval_base_search",
action=expanded_retrieval,
)
graph.add_node(
node="format_raw_search_results",
action=format_raw_search_results,
)
### Add edges ###
graph.add_edge(start_key=START, end_key="generate_raw_search_data")
graph.add_edge(
start_key="generate_raw_search_data",
end_key="expanded_retrieval_base_search",
)
graph.add_edge(
start_key="expanded_retrieval_base_search",
end_key="format_raw_search_results",
)
# graph.add_edge(
# start_key="expanded_retrieval_base_search",
# end_key=END,
# )
graph.add_edge(
start_key="format_raw_search_results",
end_key=END,
)
return graph
if __name__ == "__main__":
pass

View File

@@ -0,0 +1,20 @@
from pydantic import BaseModel
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
from onyx.agent_search.shared_graph_utils.models import QueryResult
from onyx.context.search.models import InferenceSection
### Models ###
class AnswerRetrievalStats(BaseModel):
answer_retrieval_stats: dict[str, float | int]
class QuestionAnswerResults(BaseModel):
question: str
answer: str
quality: str
expanded_retrieval_results: list[QueryResult]
documents: list[InferenceSection]
sub_question_retrieval_stats: list[AgentChunkStats]

View File

@@ -0,0 +1,16 @@
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchOutput
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalOutput,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()
def format_raw_search_results(state: ExpandedRetrievalOutput) -> BaseRawSearchOutput:
logger.debug("format_raw_search_results")
return BaseRawSearchOutput(
base_expanded_retrieval_result=state["expanded_retrieval_result"],
# base_retrieval_results=[state["expanded_retrieval_result"]],
# base_search_documents=[],
)

View File

@@ -0,0 +1,24 @@
from typing import cast
from langchain_core.runnables.config import RunnableConfig
from onyx.agent_search.core_state import CoreState
from onyx.agent_search.models import ProSearchConfig
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()
def generate_raw_search_data(
state: CoreState, config: RunnableConfig
) -> ExpandedRetrievalInput:
logger.debug("generate_raw_search_data")
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
return ExpandedRetrievalInput(
question=pro_search_config.search_request.query,
base_search=True,
sub_question_id="oogabooga", # This graph is always and only used for the original question
)

View File

@@ -0,0 +1,42 @@
from typing import TypedDict
from onyx.agent_search.core_state import CoreState
from onyx.agent_search.core_state import SubgraphCoreState
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
ExpandedRetrievalResult,
)
## Update States
## Graph Input State
class BaseRawSearchInput(CoreState, SubgraphCoreState):
pass
## Graph Output State
class BaseRawSearchOutput(TypedDict):
"""
This is a list of results even though each call of this subgraph only returns one result.
This is because if we parallelize the answer query subgraph, there will be multiple
results in a list so the add operator is used to add them together.
"""
# base_search_documents: Annotated[list[InferenceSection], dedup_inference_sections]
# base_retrieval_results: Annotated[list[ExpandedRetrievalResult], add]
base_expanded_retrieval_result: ExpandedRetrievalResult
## Graph State
class BaseRawSearchState(
BaseRawSearchInput,
BaseRawSearchOutput,
):
pass

View File

@@ -0,0 +1,32 @@
from collections.abc import Hashable
from typing import cast
from langchain_core.runnables.config import RunnableConfig
from langgraph.types import Send
from onyx.agent_search.models import ProSearchConfig
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import RetrievalInput
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalState,
)
def parallel_retrieval_edge(
state: ExpandedRetrievalState, config: RunnableConfig
) -> list[Send | Hashable]:
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
question = state.get("question", pro_search_config.search_request.query)
query_expansions = state.get("expanded_queries", []) + [question]
return [
Send(
"doc_retrieval",
RetrievalInput(
query_to_retrieve=query,
question=question,
base_search=False,
sub_question_id=state.get("sub_question_id"),
),
)
for query in query_expansions
]

View File

@@ -0,0 +1,122 @@
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agent_search.pro_search_a.expanded_retrieval.edges import (
parallel_retrieval_edge,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import doc_reranking
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import doc_retrieval
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import doc_verification
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import expand_queries
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import format_results
from onyx.agent_search.pro_search_a.expanded_retrieval.nodes import verification_kickoff
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalOutput,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalState,
)
from onyx.agent_search.shared_graph_utils.utils import get_test_config
from onyx.utils.logger import setup_logger
logger = setup_logger()
def expanded_retrieval_graph_builder() -> StateGraph:
graph = StateGraph(
state_schema=ExpandedRetrievalState,
input=ExpandedRetrievalInput,
output=ExpandedRetrievalOutput,
)
### Add nodes ###
graph.add_node(
node="expand_queries",
action=expand_queries,
)
graph.add_node(
node="doc_retrieval",
action=doc_retrieval,
)
graph.add_node(
node="verification_kickoff",
action=verification_kickoff,
)
graph.add_node(
node="doc_verification",
action=doc_verification,
)
graph.add_node(
node="doc_reranking",
action=doc_reranking,
)
graph.add_node(
node="format_results",
action=format_results,
)
### Add edges ###
graph.add_edge(
start_key=START,
end_key="expand_queries",
)
graph.add_conditional_edges(
source="expand_queries",
path=parallel_retrieval_edge,
path_map=["doc_retrieval"],
)
graph.add_edge(
start_key="doc_retrieval",
end_key="verification_kickoff",
)
graph.add_edge(
start_key="doc_verification",
end_key="doc_reranking",
)
graph.add_edge(
start_key="doc_reranking",
end_key="format_results",
)
graph.add_edge(
start_key="format_results",
end_key=END,
)
return graph
if __name__ == "__main__":
from onyx.db.engine import get_session_context_manager
from onyx.llm.factory import get_default_llms
from onyx.context.search.models import SearchRequest
graph = expanded_retrieval_graph_builder()
compiled_graph = graph.compile()
primary_llm, fast_llm = get_default_llms()
search_request = SearchRequest(
query="what can you do with onyx or danswer?",
)
with get_session_context_manager() as db_session:
pro_search_config, search_tool = get_test_config(
db_session, primary_llm, fast_llm, search_request
)
inputs = ExpandedRetrievalInput(
question="what can you do with onyx?",
base_search=False,
sub_question_id=None,
)
for thing in compiled_graph.stream(
input=inputs,
config={"configurable": {"config": pro_search_config}},
# debug=True,
subgraphs=True,
):
logger.debug(thing)

View File

@@ -0,0 +1,11 @@
from pydantic import BaseModel
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
from onyx.agent_search.shared_graph_utils.models import QueryResult
from onyx.context.search.models import InferenceSection
class ExpandedRetrievalResult(BaseModel):
expanded_queries_results: list[QueryResult]
all_documents: list[InferenceSection]
sub_question_retrieval_stats: AgentChunkStats

View File

@@ -0,0 +1,431 @@
from collections import defaultdict
from collections.abc import Callable
from typing import cast
from typing import Literal
import numpy as np
from langchain_core.callbacks.manager import dispatch_custom_event
from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_message_runs
from langchain_core.runnables.config import RunnableConfig
from langgraph.types import Command
from langgraph.types import Send
from onyx.agent_search.models import ProSearchConfig
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
ExpandedRetrievalResult,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import DocRerankingUpdate
from onyx.agent_search.pro_search_a.expanded_retrieval.states import DocRetrievalUpdate
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
DocVerificationInput,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
DocVerificationUpdate,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalState,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
ExpandedRetrievalUpdate,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import InferenceSection
from onyx.agent_search.pro_search_a.expanded_retrieval.states import (
QueryExpansionUpdate,
)
from onyx.agent_search.pro_search_a.expanded_retrieval.states import RetrievalInput
from onyx.agent_search.shared_graph_utils.agent_prompt_ops import trim_prompt_piece
from onyx.agent_search.shared_graph_utils.calculations import get_fit_scores
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
from onyx.agent_search.shared_graph_utils.models import QueryResult
from onyx.agent_search.shared_graph_utils.models import RetrievalFitStats
from onyx.agent_search.shared_graph_utils.prompts import REWRITE_PROMPT_MULTI_ORIGINAL
from onyx.agent_search.shared_graph_utils.prompts import VERIFIER_PROMPT
from onyx.agent_search.shared_graph_utils.utils import dispatch_separated
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
from onyx.chat.models import ExtendedToolResponse
from onyx.chat.models import SubQueryPiece
from onyx.configs.dev_configs import AGENT_MAX_QUERY_RETRIEVAL_RESULTS
from onyx.configs.dev_configs import AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS
from onyx.configs.dev_configs import AGENT_RERANKING_STATS
from onyx.configs.dev_configs import AGENT_RETRIEVAL_STATS
from onyx.context.search.models import SearchRequest
from onyx.context.search.pipeline import retrieval_preprocessing
from onyx.context.search.postprocessing.postprocessing import rerank_sections
from onyx.db.engine import get_session_context_manager
from onyx.tools.models import SearchQueryInfo
from onyx.tools.tool_implementations.search.search_tool import (
SEARCH_RESPONSE_SUMMARY_ID,
)
from onyx.tools.tool_implementations.search.search_tool import SearchResponseSummary
from onyx.tools.tool_implementations.search.search_tool import yield_search_responses
from onyx.utils.logger import setup_logger
logger = setup_logger()
def dispatch_subquery(level: int, question_nr: int) -> Callable[[str, int], None]:
def helper(token: str, num: int) -> None:
dispatch_custom_event(
"subqueries",
SubQueryPiece(
sub_query=token,
level=level,
level_question_nr=question_nr,
query_id=num,
),
)
return helper
def expand_queries(
state: ExpandedRetrievalInput, config: RunnableConfig
) -> QueryExpansionUpdate:
# Sometimes we want to expand the original question, sometimes we want to expand a sub-question.
# When we are running this node on the original question, no question is explictly passed in.
# Instead, we use the original question from the search request.
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
question = state.get("question", pro_search_config.search_request.query)
llm = pro_search_config.fast_llm
chat_session_id = pro_search_config.chat_session_id
sub_question_id = state.get("sub_question_id")
if sub_question_id is None:
level, question_nr = 0, 0
else:
level, question_nr = parse_question_id(sub_question_id)
if chat_session_id is None:
raise ValueError("chat_session_id must be provided for agent search")
msg = [
HumanMessage(
content=REWRITE_PROMPT_MULTI_ORIGINAL.format(question=question),
)
]
llm_response_list = dispatch_separated(
llm.stream(prompt=msg), dispatch_subquery(level, question_nr)
)
llm_response = merge_message_runs(llm_response_list, chunk_separator="")[0].content
rewritten_queries = llm_response.split("\n")
return QueryExpansionUpdate(
expanded_queries=rewritten_queries,
)
def doc_retrieval(state: RetrievalInput, config: RunnableConfig) -> DocRetrievalUpdate:
"""
Retrieve documents
Args:
state (RetrievalInput): Primary state + the query to retrieve
config (RunnableConfig): Configuration containing ProSearchConfig
Updates:
expanded_retrieval_results: list[ExpandedRetrievalResult]
retrieved_documents: list[InferenceSection]
"""
query_to_retrieve = state["query_to_retrieve"]
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
search_tool = pro_search_config.search_tool
retrieved_docs: list[InferenceSection] = []
if not query_to_retrieve.strip():
logger.warning("Empty query, skipping retrieval")
return DocRetrievalUpdate(
expanded_retrieval_results=[],
retrieved_documents=[],
)
query_info = None
# new db session to avoid concurrency issues
with get_session_context_manager() as db_session:
for tool_response in search_tool.run(
query=query_to_retrieve,
force_no_rerank=True,
alternate_db_session=db_session,
):
# get retrieved docs to send to the rest of the graph
if tool_response.id == SEARCH_RESPONSE_SUMMARY_ID:
response = cast(SearchResponseSummary, tool_response.response)
retrieved_docs = response.top_sections
query_info = SearchQueryInfo(
predicted_search=response.predicted_search,
final_filters=response.final_filters,
recency_bias_multiplier=response.recency_bias_multiplier,
)
break
retrieved_docs = retrieved_docs[:AGENT_MAX_QUERY_RETRIEVAL_RESULTS]
pre_rerank_docs = retrieved_docs
if search_tool.search_pipeline is not None:
pre_rerank_docs = (
search_tool.search_pipeline._retrieved_sections or retrieved_docs
)
if AGENT_RETRIEVAL_STATS:
fit_scores = get_fit_scores(
pre_rerank_docs,
retrieved_docs,
)
else:
fit_scores = None
expanded_retrieval_result = QueryResult(
query=query_to_retrieve,
search_results=retrieved_docs,
stats=fit_scores,
query_info=query_info,
)
return DocRetrievalUpdate(
expanded_retrieval_results=[expanded_retrieval_result],
retrieved_documents=retrieved_docs,
)
def verification_kickoff(
state: ExpandedRetrievalState,
config: RunnableConfig,
) -> Command[Literal["doc_verification"]]:
documents = state["retrieved_documents"]
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
verification_question = state.get(
"question", pro_search_config.search_request.query
)
sub_question_id = state.get("sub_question_id")
return Command(
update={},
goto=[
Send(
node="doc_verification",
arg=DocVerificationInput(
doc_to_verify=doc,
question=verification_question,
base_search=False,
sub_question_id=sub_question_id,
),
)
for doc in documents
],
)
def doc_verification(
state: DocVerificationInput, config: RunnableConfig
) -> DocVerificationUpdate:
"""
Check whether the document is relevant for the original user question
Args:
state (DocVerificationInput): The current state
config (RunnableConfig): Configuration containing ProSearchConfig
Updates:
verified_documents: list[InferenceSection]
"""
question = state["question"]
doc_to_verify = state["doc_to_verify"]
document_content = doc_to_verify.combined_content
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
fast_llm = pro_search_config.fast_llm
document_content = trim_prompt_piece(
fast_llm.config, document_content, VERIFIER_PROMPT + question
)
msg = [
HumanMessage(
content=VERIFIER_PROMPT.format(
question=question, document_content=document_content
)
)
]
response = fast_llm.invoke(msg)
verified_documents = []
if isinstance(response.content, str) and "yes" in response.content.lower():
verified_documents.append(doc_to_verify)
return DocVerificationUpdate(
verified_documents=verified_documents,
)
def doc_reranking(
state: ExpandedRetrievalState, config: RunnableConfig
) -> DocRerankingUpdate:
verified_documents = state["verified_documents"]
# Rerank post retrieval and verification. First, create a search query
# then create the list of reranked sections
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
question = state.get("question", pro_search_config.search_request.query)
with get_session_context_manager() as db_session:
_search_query = retrieval_preprocessing(
search_request=SearchRequest(query=question),
user=pro_search_config.search_tool.user, # bit of a hack
llm=pro_search_config.fast_llm,
db_session=db_session,
)
# skip section filtering
if (
_search_query.rerank_settings
and _search_query.rerank_settings.rerank_model_name
and _search_query.rerank_settings.num_rerank > 0
):
reranked_documents = rerank_sections(
_search_query,
verified_documents,
)
else:
logger.warning("No reranking settings found, using unranked documents")
reranked_documents = verified_documents
if AGENT_RERANKING_STATS:
fit_scores = get_fit_scores(verified_documents, reranked_documents)
else:
fit_scores = RetrievalFitStats(fit_score_lift=0, rerank_effect=0, fit_scores={})
# TODO: stream deduped docs here, or decide to use search tool ranking/verification
return DocRerankingUpdate(
reranked_documents=[
doc for doc in reranked_documents if type(doc) == InferenceSection
][:AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS],
sub_question_retrieval_stats=fit_scores,
)
def _calculate_sub_question_retrieval_stats(
verified_documents: list[InferenceSection],
expanded_retrieval_results: list[QueryResult],
) -> AgentChunkStats:
chunk_scores: dict[str, dict[str, list[int | float]]] = defaultdict(
lambda: defaultdict(list)
)
for expanded_retrieval_result in expanded_retrieval_results:
for doc in expanded_retrieval_result.search_results:
doc_chunk_id = f"{doc.center_chunk.document_id}_{doc.center_chunk.chunk_id}"
if doc.center_chunk.score is not None:
chunk_scores[doc_chunk_id]["score"].append(doc.center_chunk.score)
verified_doc_chunk_ids = [
f"{verified_document.center_chunk.document_id}_{verified_document.center_chunk.chunk_id}"
for verified_document in verified_documents
]
dismissed_doc_chunk_ids = []
raw_chunk_stats_counts: dict[str, int] = defaultdict(int)
raw_chunk_stats_scores: dict[str, float] = defaultdict(float)
for doc_chunk_id, chunk_data in chunk_scores.items():
if doc_chunk_id in verified_doc_chunk_ids:
raw_chunk_stats_counts["verified_count"] += 1
valid_chunk_scores = [
score for score in chunk_data["score"] if score is not None
]
raw_chunk_stats_scores["verified_scores"] += float(
np.mean(valid_chunk_scores)
)
else:
raw_chunk_stats_counts["rejected_count"] += 1
valid_chunk_scores = [
score for score in chunk_data["score"] if score is not None
]
raw_chunk_stats_scores["rejected_scores"] += float(
np.mean(valid_chunk_scores)
)
dismissed_doc_chunk_ids.append(doc_chunk_id)
if raw_chunk_stats_counts["verified_count"] == 0:
verified_avg_scores = 0.0
else:
verified_avg_scores = raw_chunk_stats_scores["verified_scores"] / float(
raw_chunk_stats_counts["verified_count"]
)
rejected_scores = raw_chunk_stats_scores.get("rejected_scores", None)
if rejected_scores is not None:
rejected_avg_scores = rejected_scores / float(
raw_chunk_stats_counts["rejected_count"]
)
else:
rejected_avg_scores = None
chunk_stats = AgentChunkStats(
verified_count=raw_chunk_stats_counts["verified_count"],
verified_avg_scores=verified_avg_scores,
rejected_count=raw_chunk_stats_counts["rejected_count"],
rejected_avg_scores=rejected_avg_scores,
verified_doc_chunk_ids=verified_doc_chunk_ids,
dismissed_doc_chunk_ids=dismissed_doc_chunk_ids,
)
return chunk_stats
def format_results(
state: ExpandedRetrievalState, config: RunnableConfig
) -> ExpandedRetrievalUpdate:
level, question_nr = parse_question_id(state.get("sub_question_id") or "0_0")
query_infos = [
result.query_info
for result in state["expanded_retrieval_results"]
if result.query_info is not None
]
if len(query_infos) == 0:
raise ValueError("No query info found")
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
# main question docs will be sent later after aggregation and deduping with sub-question docs
if not (level == 0 and question_nr == 0):
for tool_response in yield_search_responses(
query=state["question"],
reranked_sections=state[
"retrieved_documents"
], # TODO: rename params. this one is supposed to be the sections pre-merging
final_context_sections=state["reranked_documents"],
search_query_info=query_infos[0], # TODO: handle differing query infos?
get_section_relevance=lambda: None, # TODO: add relevance
search_tool=pro_search_config.search_tool,
):
dispatch_custom_event(
"tool_response",
ExtendedToolResponse(
id=tool_response.id,
response=tool_response.response,
level=level,
level_question_nr=question_nr,
),
)
sub_question_retrieval_stats = _calculate_sub_question_retrieval_stats(
verified_documents=state["verified_documents"],
expanded_retrieval_results=state["expanded_retrieval_results"],
)
if sub_question_retrieval_stats is None:
sub_question_retrieval_stats = AgentChunkStats()
# else:
# sub_question_retrieval_stats = [sub_question_retrieval_stats]
return ExpandedRetrievalUpdate(
expanded_retrieval_result=ExpandedRetrievalResult(
expanded_queries_results=state["expanded_retrieval_results"],
all_documents=state["reranked_documents"],
sub_question_retrieval_stats=sub_question_retrieval_stats,
),
)

View File

@@ -0,0 +1,82 @@
from operator import add
from typing import Annotated
from typing import TypedDict
from onyx.agent_search.core_state import SubgraphCoreState
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
ExpandedRetrievalResult,
)
from onyx.agent_search.shared_graph_utils.models import QueryResult
from onyx.agent_search.shared_graph_utils.models import RetrievalFitStats
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
from onyx.context.search.models import InferenceSection
### States ###
## Graph Input State
class ExpandedRetrievalInput(SubgraphCoreState):
question: str
base_search: bool
sub_question_id: str | None
## Update/Return States
class QueryExpansionUpdate(TypedDict):
expanded_queries: list[str]
class DocVerificationUpdate(TypedDict):
verified_documents: Annotated[list[InferenceSection], dedup_inference_sections]
class DocRetrievalUpdate(TypedDict):
expanded_retrieval_results: Annotated[list[QueryResult], add]
retrieved_documents: Annotated[list[InferenceSection], dedup_inference_sections]
class DocRerankingUpdate(TypedDict):
reranked_documents: Annotated[list[InferenceSection], dedup_inference_sections]
sub_question_retrieval_stats: RetrievalFitStats | None
class ExpandedRetrievalUpdate(TypedDict):
expanded_retrieval_result: ExpandedRetrievalResult
## Graph Output State
class ExpandedRetrievalOutput(TypedDict):
expanded_retrieval_result: ExpandedRetrievalResult
base_expanded_retrieval_result: ExpandedRetrievalResult
## Graph State
class ExpandedRetrievalState(
# This includes the core state
ExpandedRetrievalInput,
QueryExpansionUpdate,
DocRetrievalUpdate,
DocVerificationUpdate,
DocRerankingUpdate,
ExpandedRetrievalOutput,
):
pass
## Conditional Input States
class DocVerificationInput(ExpandedRetrievalInput):
doc_to_verify: InferenceSection
class RetrievalInput(ExpandedRetrievalInput):
query_to_retrieve: str

View File

@@ -0,0 +1,89 @@
from collections.abc import Hashable
from typing import Literal
from langgraph.types import Send
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionInput,
)
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionOutput,
)
from onyx.agent_search.pro_search_a.main.states import MainState
from onyx.agent_search.pro_search_a.main.states import RequireRefinedAnswerUpdate
from onyx.agent_search.shared_graph_utils.utils import make_question_id
from onyx.utils.logger import setup_logger
logger = setup_logger()
def parallelize_initial_sub_question_answering(
state: MainState,
) -> list[Send | Hashable]:
if len(state["initial_decomp_questions"]) > 0:
# sub_question_record_ids = [subq_record.id for subq_record in state["sub_question_records"]]
# if len(state["sub_question_records"]) == 0:
# if state["config"].use_persistence:
# raise ValueError("No sub-questions found for initial decompozed questions")
# else:
# # in this case, we are doing retrieval on the original question.
# # to make all the logic consistent, we create a new sub-question
# # with the same content as the original question
# sub_question_record_ids = [1] * len(state["initial_decomp_questions"])
return [
Send(
"answer_query_subgraph",
AnswerQuestionInput(
question=question,
question_id=make_question_id(0, question_nr + 1),
),
)
for question_nr, question in enumerate(state["initial_decomp_questions"])
]
else:
return [
Send(
"ingest_answers",
AnswerQuestionOutput(
answer_results=[],
),
)
]
# Define the function that determines whether to continue or not
def continue_to_refined_answer_or_end(
state: RequireRefinedAnswerUpdate,
) -> Literal["refined_sub_question_creation", "logging_node"]:
if state["require_refined_answer"]:
return "refined_sub_question_creation"
else:
return "logging_node"
def parallelize_refined_sub_question_answering(
state: MainState,
) -> list[Send | Hashable]:
if len(state["refined_sub_questions"]) > 0:
return [
Send(
"answer_refined_question",
AnswerQuestionInput(
question=question_data.sub_question,
question_id=make_question_id(1, question_nr),
),
)
for question_nr, question_data in state["refined_sub_questions"].items()
]
else:
return [
Send(
"ingest_refined_sub_answers",
AnswerQuestionOutput(
answer_results=[],
),
)
]

View File

@@ -0,0 +1,259 @@
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agent_search.pro_search_a.answer_initial_sub_question.graph_builder import (
answer_query_graph_builder,
)
from onyx.agent_search.pro_search_a.answer_refinement_sub_question.graph_builder import (
answer_refined_query_graph_builder,
)
from onyx.agent_search.pro_search_a.base_raw_search.graph_builder import (
base_raw_search_graph_builder,
)
from onyx.agent_search.pro_search_a.main.edges import continue_to_refined_answer_or_end
from onyx.agent_search.pro_search_a.main.edges import (
parallelize_initial_sub_question_answering,
)
from onyx.agent_search.pro_search_a.main.edges import (
parallelize_refined_sub_question_answering,
)
from onyx.agent_search.pro_search_a.main.nodes import agent_logging
from onyx.agent_search.pro_search_a.main.nodes import entity_term_extraction_llm
from onyx.agent_search.pro_search_a.main.nodes import generate_initial_answer
from onyx.agent_search.pro_search_a.main.nodes import generate_refined_answer
from onyx.agent_search.pro_search_a.main.nodes import ingest_initial_base_retrieval
from onyx.agent_search.pro_search_a.main.nodes import (
ingest_initial_sub_question_answers,
)
from onyx.agent_search.pro_search_a.main.nodes import ingest_refined_answers
from onyx.agent_search.pro_search_a.main.nodes import initial_answer_quality_check
from onyx.agent_search.pro_search_a.main.nodes import initial_sub_question_creation
from onyx.agent_search.pro_search_a.main.nodes import refined_answer_decision
from onyx.agent_search.pro_search_a.main.nodes import refined_sub_question_creation
from onyx.agent_search.pro_search_a.main.states import MainInput
from onyx.agent_search.pro_search_a.main.states import MainState
from onyx.agent_search.shared_graph_utils.utils import get_test_config
from onyx.utils.logger import setup_logger
logger = setup_logger()
test_mode = False
def main_graph_builder(test_mode: bool = False) -> StateGraph:
graph = StateGraph(
state_schema=MainState,
input=MainInput,
)
graph.add_node(
node="initial_sub_question_creation",
action=initial_sub_question_creation,
)
answer_query_subgraph = answer_query_graph_builder().compile()
graph.add_node(
node="answer_query_subgraph",
action=answer_query_subgraph,
)
base_raw_search_subgraph = base_raw_search_graph_builder().compile()
graph.add_node(
node="base_raw_search_subgraph",
action=base_raw_search_subgraph,
)
# refined_answer_subgraph = refined_answers_graph_builder().compile()
# graph.add_node(
# node="refined_answer_subgraph",
# action=refined_answer_subgraph,
# )
graph.add_node(
node="refined_sub_question_creation",
action=refined_sub_question_creation,
)
answer_refined_question = answer_refined_query_graph_builder().compile()
graph.add_node(
node="answer_refined_question",
action=answer_refined_question,
)
graph.add_node(
node="ingest_refined_answers",
action=ingest_refined_answers,
)
graph.add_node(
node="generate_refined_answer",
action=generate_refined_answer,
)
# graph.add_node(
# node="check_refined_answer",
# action=check_refined_answer,
# )
graph.add_node(
node="ingest_initial_retrieval",
action=ingest_initial_base_retrieval,
)
graph.add_node(
node="ingest_initial_sub_question_answers",
action=ingest_initial_sub_question_answers,
)
graph.add_node(
node="generate_initial_answer",
action=generate_initial_answer,
)
graph.add_node(
node="initial_answer_quality_check",
action=initial_answer_quality_check,
)
graph.add_node(
node="entity_term_extraction_llm",
action=entity_term_extraction_llm,
)
graph.add_node(
node="refined_answer_decision",
action=refined_answer_decision,
)
graph.add_node(
node="logging_node",
action=agent_logging,
)
# if test_mode:
# graph.add_node(
# node="generate_initial_base_answer",
# action=generate_initial_base_answer,
# )
### Add edges ###
graph.add_edge(start_key=START, end_key="base_raw_search_subgraph")
graph.add_edge(
start_key="base_raw_search_subgraph",
end_key="ingest_initial_retrieval",
)
graph.add_edge(
start_key=START,
end_key="initial_sub_question_creation",
)
graph.add_conditional_edges(
source="initial_sub_question_creation",
path=parallelize_initial_sub_question_answering,
path_map=["answer_query_subgraph"],
)
graph.add_edge(
start_key="answer_query_subgraph",
end_key="ingest_initial_sub_question_answers",
)
graph.add_edge(
start_key=["ingest_initial_sub_question_answers", "ingest_initial_retrieval"],
end_key="generate_initial_answer",
)
graph.add_edge(
start_key=["ingest_initial_sub_question_answers", "ingest_initial_retrieval"],
end_key="entity_term_extraction_llm",
)
graph.add_edge(
start_key="generate_initial_answer",
end_key="initial_answer_quality_check",
)
graph.add_edge(
start_key=["initial_answer_quality_check", "entity_term_extraction_llm"],
end_key="refined_answer_decision",
)
graph.add_conditional_edges(
source="refined_answer_decision",
path=continue_to_refined_answer_or_end,
path_map=["refined_sub_question_creation", "logging_node"],
)
graph.add_conditional_edges(
source="refined_sub_question_creation", # DONE
path=parallelize_refined_sub_question_answering,
path_map=["answer_refined_question"],
)
graph.add_edge(
start_key="answer_refined_question", # HERE
end_key="ingest_refined_answers",
)
graph.add_edge(
start_key="ingest_refined_answers",
end_key="generate_refined_answer",
)
# graph.add_conditional_edges(
# source="refined_answer_decision",
# path=continue_to_refined_answer_or_end,
# path_map=["refined_answer_subgraph", END],
# )
# graph.add_edge(
# start_key="refined_answer_subgraph",
# end_key="generate_refined_answer",
# )
graph.add_edge(
start_key="generate_refined_answer",
end_key="logging_node",
)
graph.add_edge(
start_key="logging_node",
end_key=END,
)
# graph.add_edge(
# start_key="generate_refined_answer",
# end_key="check_refined_answer",
# )
# graph.add_edge(
# start_key="check_refined_answer",
# end_key=END,
# )
return graph
if __name__ == "__main__":
pass
from onyx.db.engine import get_session_context_manager
from onyx.llm.factory import get_default_llms
from onyx.context.search.models import SearchRequest
graph = main_graph_builder()
compiled_graph = graph.compile()
primary_llm, fast_llm = get_default_llms()
with get_session_context_manager() as db_session:
search_request = SearchRequest(query="Who created Excel?")
pro_search_config, search_tool = get_test_config(
db_session, primary_llm, fast_llm, search_request
)
inputs = MainInput()
for thing in compiled_graph.stream(
input=inputs,
config={"configurable": {"config": pro_search_config}},
# stream_mode="debug",
# debug=True,
subgraphs=True,
):
logger.debug(thing)

View File

@@ -0,0 +1,36 @@
from pydantic import BaseModel
class FollowUpSubQuestion(BaseModel):
sub_question: str
sub_question_id: str
verified: bool
answered: bool
answer: str
class AgentTimings(BaseModel):
base_duration__s: float | None
refined_duration__s: float | None
full_duration__s: float | None
class AgentBaseMetrics(BaseModel):
num_verified_documents_total: int | None
num_verified_documents_core: int | None
verified_avg_score_core: float | None
num_verified_documents_base: int | float | None
verified_avg_score_base: float | None
base_doc_boost_factor: float | None
support_boost_factor: float | None
duration__s: float | None
class AgentRefinedMetrics(BaseModel):
refined_doc_boost_factor: float | None
refined_question_boost_factor: float | None
duration__s: float | None
class AgentAdditionalMetrics(BaseModel):
pass

View File

@@ -0,0 +1,951 @@
import json
import re
from collections.abc import Callable
from datetime import datetime
from typing import Any
from typing import cast
from langchain_core.callbacks.manager import dispatch_custom_event
from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_content
from langchain_core.runnables.config import RunnableConfig
from onyx.agent_search.models import ProSearchConfig
from onyx.agent_search.pro_search_a.answer_initial_sub_question.states import (
AnswerQuestionOutput,
)
from onyx.agent_search.pro_search_a.base_raw_search.states import BaseRawSearchOutput
from onyx.agent_search.pro_search_a.main.models import AgentAdditionalMetrics
from onyx.agent_search.pro_search_a.main.models import AgentBaseMetrics
from onyx.agent_search.pro_search_a.main.models import AgentRefinedMetrics
from onyx.agent_search.pro_search_a.main.models import AgentTimings
from onyx.agent_search.pro_search_a.main.models import FollowUpSubQuestion
from onyx.agent_search.pro_search_a.main.states import BaseDecompUpdate
from onyx.agent_search.pro_search_a.main.states import DecompAnswersUpdate
from onyx.agent_search.pro_search_a.main.states import EntityTermExtractionUpdate
from onyx.agent_search.pro_search_a.main.states import ExpandedRetrievalUpdate
from onyx.agent_search.pro_search_a.main.states import FollowUpSubQuestionsUpdate
from onyx.agent_search.pro_search_a.main.states import InitialAnswerBASEUpdate
from onyx.agent_search.pro_search_a.main.states import InitialAnswerQualityUpdate
from onyx.agent_search.pro_search_a.main.states import InitialAnswerUpdate
from onyx.agent_search.pro_search_a.main.states import MainOutput
from onyx.agent_search.pro_search_a.main.states import MainState
from onyx.agent_search.pro_search_a.main.states import RefinedAnswerUpdate
from onyx.agent_search.pro_search_a.main.states import RequireRefinedAnswerUpdate
from onyx.agent_search.shared_graph_utils.agent_prompt_ops import trim_prompt_piece
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
from onyx.agent_search.shared_graph_utils.models import CombinedAgentMetrics
from onyx.agent_search.shared_graph_utils.models import Entity
from onyx.agent_search.shared_graph_utils.models import EntityRelationshipTermExtraction
from onyx.agent_search.shared_graph_utils.models import InitialAgentResultStats
from onyx.agent_search.shared_graph_utils.models import QueryResult
from onyx.agent_search.shared_graph_utils.models import (
QuestionAnswerResults,
)
from onyx.agent_search.shared_graph_utils.models import RefinedAgentStats
from onyx.agent_search.shared_graph_utils.models import Relationship
from onyx.agent_search.shared_graph_utils.models import Term
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
from onyx.agent_search.shared_graph_utils.prompts import ASSISTANT_SYSTEM_PROMPT_DEFAULT
from onyx.agent_search.shared_graph_utils.prompts import ASSISTANT_SYSTEM_PROMPT_PERSONA
from onyx.agent_search.shared_graph_utils.prompts import DEEP_DECOMPOSE_PROMPT
from onyx.agent_search.shared_graph_utils.prompts import ENTITY_TERM_PROMPT
from onyx.agent_search.shared_graph_utils.prompts import (
INITIAL_DECOMPOSITION_PROMPT_QUESTIONS,
)
from onyx.agent_search.shared_graph_utils.prompts import INITIAL_RAG_BASE_PROMPT
from onyx.agent_search.shared_graph_utils.prompts import INITIAL_RAG_PROMPT
from onyx.agent_search.shared_graph_utils.prompts import (
INITIAL_RAG_PROMPT_NO_SUB_QUESTIONS,
)
from onyx.agent_search.shared_graph_utils.prompts import REVISED_RAG_PROMPT
from onyx.agent_search.shared_graph_utils.prompts import (
REVISED_RAG_PROMPT_NO_SUB_QUESTIONS,
)
from onyx.agent_search.shared_graph_utils.prompts import SUB_QUESTION_ANSWER_TEMPLATE
from onyx.agent_search.shared_graph_utils.prompts import UNKNOWN_ANSWER
from onyx.agent_search.shared_graph_utils.utils import dispatch_separated
from onyx.agent_search.shared_graph_utils.utils import format_docs
from onyx.agent_search.shared_graph_utils.utils import format_entity_term_extraction
from onyx.agent_search.shared_graph_utils.utils import get_persona_prompt
from onyx.agent_search.shared_graph_utils.utils import make_question_id
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
from onyx.chat.models import AgentAnswerPiece
from onyx.chat.models import ExtendedToolResponse
from onyx.chat.models import SubQuestionPiece
from onyx.db.chat import log_agent_metrics
from onyx.db.chat import log_agent_sub_question_results
from onyx.tools.models import SearchQueryInfo
from onyx.tools.models import ToolCallKickoff
from onyx.tools.tool_implementations.search.search_tool import yield_search_responses
from onyx.utils.logger import setup_logger
logger = setup_logger()
def _remove_document_citations(text: str) -> str:
"""
Removes citation expressions of format '[[D1]]()' from text.
The number after D can vary.
Args:
text: Input text containing citations
Returns:
Text with citations removed
"""
# Pattern explanation:
# \[\[D\d+\]\]\(\) matches:
# \[\[ - literal [[ characters
# D - literal D character
# \d+ - one or more digits
# \]\] - literal ]] characters
# \(\) - literal () characters
return re.sub(r"\[\[(?:D|Q)\d+\]\]\(\)", "", text)
def _dispatch_subquestion(level: int) -> Callable[[str, int], None]:
def _helper(sub_question_part: str, num: int) -> None:
dispatch_custom_event(
"decomp_qs",
SubQuestionPiece(
sub_question=sub_question_part,
level=level,
level_question_nr=num,
),
)
return _helper
def initial_sub_question_creation(
state: MainState, config: RunnableConfig
) -> BaseDecompUpdate:
now_start = datetime.now()
logger.debug(f"--------{now_start}--------BASE DECOMP START---")
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
question = pro_search_config.search_request.query
chat_session_id = pro_search_config.chat_session_id
primary_message_id = pro_search_config.message_id
if not chat_session_id or not primary_message_id:
raise ValueError(
"chat_session_id and message_id must be provided for agent search"
)
agent_start_time = datetime.now()
msg = [
HumanMessage(
content=INITIAL_DECOMPOSITION_PROMPT_QUESTIONS.format(question=question),
)
]
# Get the rewritten queries in a defined format
model = pro_search_config.fast_llm
# Send the initial question as a subquestion with number 0
dispatch_custom_event(
"decomp_qs",
SubQuestionPiece(
sub_question=question,
level=0,
level_question_nr=0,
),
)
# dispatches custom events for subquestion tokens, adding in subquestion ids.
streamed_tokens = dispatch_separated(model.stream(msg), _dispatch_subquestion(0))
response = merge_content(*streamed_tokens)
# this call should only return strings. Commenting out for efficiency
# assert [type(tok) == str for tok in streamed_tokens]
# use no-op cast() instead of str() which runs code
# list_of_subquestions = clean_and_parse_list_string(cast(str, response))
list_of_subqs = cast(str, response).split("\n")
decomp_list: list[str] = [sq.strip() for sq in list_of_subqs if sq.strip() != ""]
now_end = datetime.now()
logger.debug(f"--------{now_end}--{now_end - now_start}--------BASE DECOMP END---")
return BaseDecompUpdate(
initial_decomp_questions=decomp_list,
agent_start_time=agent_start_time,
agent_refined_start_time=None,
agent_refined_end_time=None,
agent_refined_metrics=AgentRefinedMetrics(
refined_doc_boost_factor=None,
refined_question_boost_factor=None,
duration__s=None,
),
)
def _calculate_initial_agent_stats(
decomp_answer_results: list[QuestionAnswerResults],
original_question_stats: AgentChunkStats,
) -> InitialAgentResultStats:
initial_agent_result_stats: InitialAgentResultStats = InitialAgentResultStats(
sub_questions={},
original_question={},
agent_effectiveness={},
)
orig_verified = original_question_stats.verified_count
orig_support_score = original_question_stats.verified_avg_scores
verified_document_chunk_ids = []
support_scores = 0.0
for decomp_answer_result in decomp_answer_results:
verified_document_chunk_ids += (
decomp_answer_result.sub_question_retrieval_stats.verified_doc_chunk_ids
)
if (
decomp_answer_result.sub_question_retrieval_stats.verified_avg_scores
is not None
):
support_scores += (
decomp_answer_result.sub_question_retrieval_stats.verified_avg_scores
)
verified_document_chunk_ids = list(set(verified_document_chunk_ids))
# Calculate sub-question stats
if (
verified_document_chunk_ids
and len(verified_document_chunk_ids) > 0
and support_scores is not None
):
sub_question_stats: dict[str, float | int | None] = {
"num_verified_documents": len(verified_document_chunk_ids),
"verified_avg_score": float(support_scores / len(decomp_answer_results)),
}
else:
sub_question_stats = {"num_verified_documents": 0, "verified_avg_score": None}
initial_agent_result_stats.sub_questions.update(sub_question_stats)
# Get original question stats
initial_agent_result_stats.original_question.update(
{
"num_verified_documents": original_question_stats.verified_count,
"verified_avg_score": original_question_stats.verified_avg_scores,
}
)
# Calculate chunk utilization ratio
sub_verified = initial_agent_result_stats.sub_questions["num_verified_documents"]
chunk_ratio: float | None = None
if sub_verified is not None and orig_verified is not None and orig_verified > 0:
chunk_ratio = (float(sub_verified) / orig_verified) if sub_verified > 0 else 0.0
elif sub_verified is not None and sub_verified > 0:
chunk_ratio = 10.0
initial_agent_result_stats.agent_effectiveness["utilized_chunk_ratio"] = chunk_ratio
if (
orig_support_score is None
or orig_support_score == 0.0
and initial_agent_result_stats.sub_questions["verified_avg_score"] is None
):
initial_agent_result_stats.agent_effectiveness["support_ratio"] = None
elif orig_support_score is None or orig_support_score == 0.0:
initial_agent_result_stats.agent_effectiveness["support_ratio"] = 10
elif initial_agent_result_stats.sub_questions["verified_avg_score"] is None:
initial_agent_result_stats.agent_effectiveness["support_ratio"] = 0
else:
initial_agent_result_stats.agent_effectiveness["support_ratio"] = (
initial_agent_result_stats.sub_questions["verified_avg_score"]
/ orig_support_score
)
return initial_agent_result_stats
def _get_query_info(results: list[QueryResult]) -> SearchQueryInfo:
# Use the query info from the base document retrieval
# TODO: see if this is the right way to do this
query_infos = [
result.query_info for result in results if result.query_info is not None
]
if len(query_infos) == 0:
raise ValueError("No query info found")
return query_infos[0]
def generate_initial_answer(
state: MainState, config: RunnableConfig
) -> InitialAnswerUpdate:
now_start = datetime.now()
logger.debug(f"--------{now_start}--------GENERATE INITIAL---")
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
question = pro_search_config.search_request.query
persona_prompt = get_persona_prompt(pro_search_config.search_request.persona)
sub_question_docs = state["documents"]
all_original_question_documents = state["all_original_question_documents"]
relevant_docs = dedup_inference_sections(
sub_question_docs, all_original_question_documents
)
decomp_questions = []
if len(relevant_docs) == 0:
dispatch_custom_event(
"initial_agent_answer",
AgentAnswerPiece(
answer_piece=UNKNOWN_ANSWER,
level=0,
level_question_nr=0,
answer_type="agent_level_answer",
),
)
answer = UNKNOWN_ANSWER
initial_agent_stats = InitialAgentResultStats(
sub_questions={},
original_question={},
agent_effectiveness={},
)
else:
# Use the query info from the base document retrieval
query_info = _get_query_info(state["original_question_retrieval_results"])
for tool_response in yield_search_responses(
query=question,
reranked_sections=relevant_docs,
final_context_sections=relevant_docs,
search_query_info=query_info,
get_section_relevance=lambda: None, # TODO: add relevance
search_tool=pro_search_config.search_tool,
):
dispatch_custom_event(
"tool_response",
ExtendedToolResponse(
id=tool_response.id,
response=tool_response.response,
level=0,
level_question_nr=0, # 0, 0 is the base question
),
)
net_new_original_question_docs = []
for all_original_question_doc in all_original_question_documents:
if all_original_question_doc not in sub_question_docs:
net_new_original_question_docs.append(all_original_question_doc)
decomp_answer_results = state["decomp_answer_results"]
good_qa_list: list[str] = []
sub_question_nr = 1
for decomp_answer_result in decomp_answer_results:
decomp_questions.append(decomp_answer_result.question)
_, question_nr = parse_question_id(decomp_answer_result.question_id)
if (
decomp_answer_result.quality.lower().startswith("yes")
and len(decomp_answer_result.answer) > 0
and decomp_answer_result.answer != UNKNOWN_ANSWER
):
good_qa_list.append(
SUB_QUESTION_ANSWER_TEMPLATE.format(
sub_question=decomp_answer_result.question,
sub_answer=decomp_answer_result.answer,
sub_question_nr=sub_question_nr,
)
)
sub_question_nr += 1
if len(good_qa_list) > 0:
sub_question_answer_str = "\n\n------\n\n".join(good_qa_list)
else:
sub_question_answer_str = ""
# Determine which persona-specification prompt to use
if len(persona_prompt) > 0:
persona_specification = ASSISTANT_SYSTEM_PROMPT_DEFAULT
else:
persona_specification = ASSISTANT_SYSTEM_PROMPT_PERSONA.format(
persona_prompt=persona_prompt
)
# Determine which base prompt to use given the sub-question information
if len(good_qa_list) > 0:
base_prompt = INITIAL_RAG_PROMPT
else:
base_prompt = INITIAL_RAG_PROMPT_NO_SUB_QUESTIONS
model = pro_search_config.fast_llm
doc_context = format_docs(relevant_docs)
doc_context = trim_prompt_piece(
model.config,
doc_context,
base_prompt + sub_question_answer_str + persona_specification,
)
msg = [
HumanMessage(
content=base_prompt.format(
question=question,
answered_sub_questions=_remove_document_citations(
sub_question_answer_str
),
relevant_docs=format_docs(relevant_docs),
persona_specification=persona_specification,
)
)
]
streamed_tokens: list[str | list[str | dict[str, Any]]] = [""]
for message in model.stream(msg):
# TODO: in principle, the answer here COULD contain images, but we don't support that yet
content = message.content
if not isinstance(content, str):
raise ValueError(
f"Expected content to be a string, but got {type(content)}"
)
dispatch_custom_event(
"initial_agent_answer",
AgentAnswerPiece(
answer_piece=content,
level=0,
level_question_nr=0,
answer_type="agent_level_answer",
),
)
streamed_tokens.append(content)
response = merge_content(*streamed_tokens)
answer = cast(str, response)
initial_agent_stats = _calculate_initial_agent_stats(
state["decomp_answer_results"], state["original_question_retrieval_stats"]
)
logger.debug(
f"\n\nYYYYY--Sub-Questions:\n\n{sub_question_answer_str}\n\nStats:\n\n"
)
if initial_agent_stats:
logger.debug(initial_agent_stats.original_question)
logger.debug(initial_agent_stats.sub_questions)
logger.debug(initial_agent_stats.agent_effectiveness)
now_end = datetime.now()
logger.debug(
f"--------{now_end}--{now_end - now_start}--------INITIAL AGENT ANSWER END---\n\n"
)
agent_base_end_time = datetime.now()
agent_base_metrics = AgentBaseMetrics(
num_verified_documents_total=len(relevant_docs),
num_verified_documents_core=state[
"original_question_retrieval_stats"
].verified_count,
verified_avg_score_core=state[
"original_question_retrieval_stats"
].verified_avg_scores,
num_verified_documents_base=initial_agent_stats.sub_questions.get(
"num_verified_documents", None
),
verified_avg_score_base=initial_agent_stats.sub_questions.get(
"verified_avg_score", None
),
base_doc_boost_factor=initial_agent_stats.agent_effectiveness.get(
"utilized_chunk_ratio", None
),
support_boost_factor=initial_agent_stats.agent_effectiveness.get(
"support_ratio", None
),
duration__s=(agent_base_end_time - state["agent_start_time"]).total_seconds(),
)
return InitialAnswerUpdate(
initial_answer=answer,
initial_agent_stats=initial_agent_stats,
generated_sub_questions=decomp_questions,
agent_base_end_time=agent_base_end_time,
agent_base_metrics=agent_base_metrics,
)
def initial_answer_quality_check(
state: MainState, config: RunnableConfig
) -> InitialAnswerQualityUpdate:
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
if not pro_search_config.allow_refinement:
return InitialAnswerQualityUpdate(initial_answer_quality=True)
# TODO: implement actual quality check
return InitialAnswerQualityUpdate(initial_answer_quality=True)
def entity_term_extraction_llm(
state: MainState, config: RunnableConfig
) -> EntityTermExtractionUpdate:
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
question = pro_search_config.search_request.query
base_answer = state["initial_answer"]
msg = [
HumanMessage(
content=ENTITY_TERM_PROMPT.format(
question=question,
base_answer=base_answer,
),
)
]
# Grader
model = pro_search_config.fast_llm
response = model.invoke(msg)
answer = response.pretty_repr()
try:
entity_term_extraction = json.loads(answer)
entity_term_extraction = EntityRelationshipTermExtraction(
entities=[
Entity(**entity) for entity in entity_term_extraction["entities"]
],
relationships=[
Relationship(**relationship)
for relationship in entity_term_extraction["relationships"]
],
terms=[Term(**term) for term in entity_term_extraction["terms"]],
)
except Exception as e:
logger.error(f"Error parsing entity term extraction: {e}")
entity_term_extraction = EntityRelationshipTermExtraction(
entities=[], relationships=[], terms=[]
)
return EntityTermExtractionUpdate(
entity_retlation_term_extractions=entity_term_extraction,
)
def generate_initial_base_search_only_answer(
state: MainState,
config: RunnableConfig,
) -> InitialAnswerBASEUpdate:
now_start = datetime.now()
logger.debug(f"--------{now_start}--------GENERATE INITIAL BASE ANSWER---")
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
question = pro_search_config.search_request.query
original_question_docs = state["all_original_question_documents"]
msg = [
HumanMessage(
content=INITIAL_RAG_BASE_PROMPT.format(
question=question,
context=format_docs(original_question_docs),
)
)
]
# Grader
model = pro_search_config.fast_llm
response = model.invoke(msg)
answer = response.pretty_repr()
now_end = datetime.now()
logger.debug(
f"--------{now_end}--{now_end - now_start}--------INITIAL BASE ANSWER END---\n\n"
)
return InitialAnswerBASEUpdate(initial_base_answer=answer)
def ingest_initial_sub_question_answers(
state: AnswerQuestionOutput,
) -> DecompAnswersUpdate:
now_start = datetime.now()
logger.debug(f"--------{now_start}--------INGEST ANSWERS---")
documents = []
answer_results = state.get("answer_results", [])
for answer_result in answer_results:
documents.extend(answer_result.documents)
now_end = datetime.now()
logger.debug(
f"--------{now_end}--{now_end - now_start}--------INGEST ANSWERS END---"
)
return DecompAnswersUpdate(
# Deduping is done by the documents operator for the main graph
# so we might not need to dedup here
documents=dedup_inference_sections(documents, []),
decomp_answer_results=answer_results,
)
def ingest_initial_base_retrieval(
state: BaseRawSearchOutput,
) -> ExpandedRetrievalUpdate:
now_start = datetime.now()
logger.debug(f"--------{now_start}--------INGEST INITIAL RETRIEVAL---")
sub_question_retrieval_stats = state[
"base_expanded_retrieval_result"
].sub_question_retrieval_stats
if sub_question_retrieval_stats is None:
sub_question_retrieval_stats = AgentChunkStats()
else:
sub_question_retrieval_stats = sub_question_retrieval_stats
now_end = datetime.now()
logger.debug(
f"--------{now_end}--{now_end - now_start}--------INGEST INITIAL RETRIEVAL END---"
)
return ExpandedRetrievalUpdate(
original_question_retrieval_results=state[
"base_expanded_retrieval_result"
].expanded_queries_results,
all_original_question_documents=state[
"base_expanded_retrieval_result"
].all_documents,
original_question_retrieval_stats=sub_question_retrieval_stats,
)
def refined_answer_decision(
state: MainState, config: RunnableConfig
) -> RequireRefinedAnswerUpdate:
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
if not pro_search_config.allow_refinement:
return RequireRefinedAnswerUpdate(require_refined_answer=False)
# TODO: implement actual decision logic
return RequireRefinedAnswerUpdate(require_refined_answer=True)
def generate_refined_answer(
state: MainState, config: RunnableConfig
) -> RefinedAnswerUpdate:
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
question = pro_search_config.search_request.query
persona_prompt = get_persona_prompt(pro_search_config.search_request.persona)
base_answer = state["initial_answer"]
sub_question_docs = state["documents"]
all_original_question_documents = state["all_original_question_documents"]
relevant_docs = dedup_inference_sections(
sub_question_docs, all_original_question_documents
)
# Determine which persona-specification prompt to use
if len(persona_prompt) > 0:
persona_specification = ASSISTANT_SYSTEM_PROMPT_DEFAULT
else:
persona_specification = ASSISTANT_SYSTEM_PROMPT_PERSONA.format(
persona_prompt=persona_prompt
)
# Get the refined sub-questions and their answers
refined_sub_questions = state["refined_sub_questions"]
good_qa_list: list[str] = []
new_revised_good_sub_questions: list[str] = []
sub_question_nr = 1
for refined_sub_question in refined_sub_questions.values():
if refined_sub_question.answered:
good_qa_list.append(
SUB_QUESTION_ANSWER_TEMPLATE.format(
sub_question=refined_sub_question.sub_question,
sub_answer=refined_sub_question.answer,
sub_question_nr=sub_question_nr,
)
)
new_revised_good_sub_questions.append(refined_sub_question.sub_question)
sub_question_nr += 1
if len(good_qa_list) > 0:
sub_question_answer_str = "\n\n------\n\n".join(good_qa_list)
else:
sub_question_answer_str = ""
# Determine which base prompt to use given the sub-question information
if len(good_qa_list) > 0:
base_prompt = REVISED_RAG_PROMPT
else:
base_prompt = REVISED_RAG_PROMPT_NO_SUB_QUESTIONS
msg = [
HumanMessage(
content=base_prompt.format(
question=question,
initial_answer=base_answer,
answered_sub_questions=_remove_document_citations(
sub_question_answer_str
),
relevant_docs=format_docs(relevant_docs),
persona_specification=persona_specification,
)
)
]
model = pro_search_config.fast_llm
streamed_tokens: list[str | list[str | dict[str, Any]]] = [""]
for message in model.stream(msg):
# TODO: in principle, the answer here COULD contain images, but we don't support that yet
content = message.content
if not isinstance(content, str):
raise ValueError(
f"Expected content to be a string, but got {type(content)}"
)
dispatch_custom_event(
"refined_agent_answer",
AgentAnswerPiece(
answer_piece=content,
level=1,
level_question_nr=0,
answer_type="agent_level_answer",
),
)
streamed_tokens.append(content)
response = merge_content(*streamed_tokens)
answer = cast(str, response)
# new_revised_good_sub_questions_str = "\n".join(
# list(set(new_revised_good_sub_questions))
# )
refined_agent_stats = RefinedAgentStats(
revision_doc_efficiency=1.0, # TODO: calculate this
revision_question_efficiency=1.0, # TODO: calculate this
)
agent_refined_end_time = datetime.now()
agent_refined_metrics = AgentRefinedMetrics(
refined_doc_boost_factor=refined_agent_stats.revision_doc_efficiency,
refined_question_boost_factor=refined_agent_stats.revision_question_efficiency,
duration__s=(
agent_refined_end_time - state["agent_refined_start_time"]
).total_seconds()
if state["agent_refined_start_time"]
else None,
)
return RefinedAnswerUpdate(
refined_answer=answer,
refined_agent_stats=refined_agent_stats,
refined_answer_quality=True, # TODO: replace this with actual check
agent_refined_end_time=agent_refined_end_time,
agent_refined_metrics=agent_refined_metrics,
)
def refined_sub_question_creation(
state: MainState, config: RunnableConfig
) -> FollowUpSubQuestionsUpdate:
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
dispatch_custom_event(
"tool_response",
ToolCallKickoff(
tool_name="agent_search_1",
tool_args={
"query": pro_search_config.search_request.query,
"answer": state["initial_answer"],
},
),
)
now_start = datetime.now()
logger.debug(f"--------{now_start}--------FOLLOW UP DECOMPOSE---")
agent_refined_start_time = datetime.now()
question = pro_search_config.search_request.query
base_answer = state["initial_answer"]
# get the entity term extraction dict and properly format it
entity_retlation_term_extractions = state["entity_retlation_term_extractions"]
entity_term_extraction_str = format_entity_term_extraction(
entity_retlation_term_extractions
)
initial_question_answers = state["decomp_answer_results"]
addressed_question_list = [
x.question for x in initial_question_answers if "yes" in x.quality.lower()
]
failed_question_list = [
x.question for x in initial_question_answers if "no" in x.quality.lower()
]
msg = [
HumanMessage(
content=DEEP_DECOMPOSE_PROMPT.format(
question=question,
entity_term_extraction_str=entity_term_extraction_str,
base_answer=base_answer,
answered_sub_questions="\n - ".join(addressed_question_list),
failed_sub_questions="\n - ".join(failed_question_list),
),
)
]
# Grader
model = pro_search_config.fast_llm
streamed_tokens = dispatch_separated(model.stream(msg), _dispatch_subquestion(1))
response = merge_content(*streamed_tokens)
if isinstance(response, str):
parsed_response = [q for q in response.split("\n") if q.strip() != ""]
else:
raise ValueError("LLM response is not a string")
refined_sub_question_dict = {}
for sub_question_nr, sub_question in enumerate(parsed_response):
refined_sub_question = FollowUpSubQuestion(
sub_question=sub_question,
sub_question_id=make_question_id(1, sub_question_nr + 1),
verified=False,
answered=False,
answer="",
)
refined_sub_question_dict[sub_question_nr + 1] = refined_sub_question
now_end = datetime.now()
logger.debug(
f"--------{now_end}--{now_end - now_start}--------FOLLOW UP DECOMPOSE END---"
)
return FollowUpSubQuestionsUpdate(
refined_sub_questions=refined_sub_question_dict,
agent_refined_start_time=agent_refined_start_time,
)
def ingest_refined_answers(
state: AnswerQuestionOutput,
) -> DecompAnswersUpdate:
now_start = datetime.now()
logger.debug(f"--------{now_start}--------INGEST FOLLOW UP ANSWERS---")
documents = []
answer_results = state.get("answer_results", [])
for answer_result in answer_results:
documents.extend(answer_result.documents)
now_end = datetime.now()
logger.debug(
f"--------{now_end}--{now_end - now_start}--------INGEST FOLLOW UP ANSWERS END---"
)
return DecompAnswersUpdate(
# Deduping is done by the documents operator for the main graph
# so we might not need to dedup here
documents=dedup_inference_sections(documents, []),
decomp_answer_results=answer_results,
)
def agent_logging(state: MainState, config: RunnableConfig) -> MainOutput:
now_start = datetime.now()
logger.debug(f"--------{now_start}--------LOGGING NODE START---")
agent_start_time = state["agent_start_time"]
agent_base_end_time = state["agent_base_end_time"]
agent_refined_start_time = state["agent_refined_start_time"]
agent_refined_end_time = state["agent_refined_end_time"] or None
agent_end_time = agent_refined_end_time or agent_base_end_time
agent_base_duration = None
if agent_base_end_time:
agent_base_duration = (agent_base_end_time - agent_start_time).total_seconds()
agent_refined_duration = None
if agent_refined_start_time and agent_refined_end_time:
agent_refined_duration = (
agent_refined_end_time - agent_refined_start_time
).total_seconds()
agent_full_duration = None
if agent_end_time:
agent_full_duration = (agent_end_time - agent_start_time).total_seconds()
agent_type = "refined" if agent_refined_duration else "base"
agent_base_metrics = state["agent_base_metrics"]
agent_refined_metrics = state["agent_refined_metrics"]
combined_agent_metrics = CombinedAgentMetrics(
timings=AgentTimings(
base_duration__s=agent_base_duration,
refined_duration__s=agent_refined_duration,
full_duration__s=agent_full_duration,
),
base_metrics=agent_base_metrics,
refined_metrics=agent_refined_metrics,
additional_metrics=AgentAdditionalMetrics(),
)
pro_search_config = cast(ProSearchConfig, config["metadata"]["config"])
persona_id = None
if pro_search_config.search_request.persona:
persona_id = pro_search_config.search_request.persona.id
# log the agent metrics
if pro_search_config.db_session:
log_agent_metrics(
db_session=pro_search_config.db_session,
user_id=None, # TODO: get user ID from somewhere if needed
persona_id=persona_id,
agent_type=agent_type,
start_time=agent_start_time,
agent_metrics=combined_agent_metrics,
)
if pro_search_config.use_persistence and pro_search_config.db_session:
# Persist the sub-answer in the database
chat_session_id = pro_search_config.chat_session_id
primary_message_id = pro_search_config.message_id
sub_question_answer_results = state["decomp_answer_results"]
log_agent_sub_question_results(
db_session=pro_search_config.db_session,
chat_session_id=chat_session_id,
primary_message_id=primary_message_id,
sub_question_answer_results=sub_question_answer_results,
)
main_output = MainOutput()
now_end = datetime.now()
logger.debug(f"--------{now_end}--{now_end - now_start}--------LOGGING NODE END---")
return main_output

View File

@@ -0,0 +1,151 @@
from datetime import datetime
from operator import add
from typing import Annotated
from typing import TypedDict
from onyx.agent_search.core_state import CoreState
from onyx.agent_search.pro_search_a.expanded_retrieval.models import (
ExpandedRetrievalResult,
)
from onyx.agent_search.pro_search_a.main.models import AgentBaseMetrics
from onyx.agent_search.pro_search_a.main.models import AgentRefinedMetrics
from onyx.agent_search.pro_search_a.main.models import FollowUpSubQuestion
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
from onyx.agent_search.shared_graph_utils.models import EntityRelationshipTermExtraction
from onyx.agent_search.shared_graph_utils.models import InitialAgentResultStats
from onyx.agent_search.shared_graph_utils.models import QueryResult
from onyx.agent_search.shared_graph_utils.models import (
QuestionAnswerResults,
)
from onyx.agent_search.shared_graph_utils.models import RefinedAgentStats
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
from onyx.agent_search.shared_graph_utils.operators import dedup_question_answer_results
from onyx.context.search.models import InferenceSection
### States ###
## Update States
class RefinedAgentStartStats(TypedDict):
agent_refined_start_time: datetime | None
class RefinedAgentEndStats(TypedDict):
agent_refined_end_time: datetime | None
agent_refined_metrics: AgentRefinedMetrics
class BaseDecompUpdateBase(TypedDict):
agent_start_time: datetime
initial_decomp_questions: list[str]
class BaseDecompUpdate(
RefinedAgentStartStats, RefinedAgentEndStats, BaseDecompUpdateBase
):
pass
class InitialAnswerBASEUpdate(TypedDict):
initial_base_answer: str
class InitialAnswerUpdate(TypedDict):
initial_answer: str
initial_agent_stats: InitialAgentResultStats | None
generated_sub_questions: list[str]
agent_base_end_time: datetime
agent_base_metrics: AgentBaseMetrics
class RefinedAnswerUpdateBase(TypedDict):
refined_answer: str
refined_agent_stats: RefinedAgentStats | None
refined_answer_quality: bool
class RefinedAnswerUpdate(RefinedAgentEndStats, RefinedAnswerUpdateBase):
pass
class InitialAnswerQualityUpdate(TypedDict):
initial_answer_quality: bool
class RequireRefinedAnswerUpdate(TypedDict):
require_refined_answer: bool
class DecompAnswersUpdate(TypedDict):
documents: Annotated[list[InferenceSection], dedup_inference_sections]
decomp_answer_results: Annotated[
list[QuestionAnswerResults], dedup_question_answer_results
]
class FollowUpDecompAnswersUpdate(TypedDict):
refined_documents: Annotated[list[InferenceSection], dedup_inference_sections]
refined_decomp_answer_results: Annotated[list[QuestionAnswerResults], add]
class ExpandedRetrievalUpdate(TypedDict):
all_original_question_documents: Annotated[
list[InferenceSection], dedup_inference_sections
]
original_question_retrieval_results: list[QueryResult]
original_question_retrieval_stats: AgentChunkStats
class EntityTermExtractionUpdate(TypedDict):
entity_retlation_term_extractions: EntityRelationshipTermExtraction
class FollowUpSubQuestionsUpdateBase(TypedDict):
refined_sub_questions: dict[int, FollowUpSubQuestion]
class FollowUpSubQuestionsUpdate(
RefinedAgentStartStats, FollowUpSubQuestionsUpdateBase
):
pass
## Graph Input State
## Graph Input State
class MainInput(CoreState):
pass
## Graph State
class MainState(
# This includes the core state
MainInput,
BaseDecompUpdateBase,
InitialAnswerUpdate,
InitialAnswerBASEUpdate,
DecompAnswersUpdate,
ExpandedRetrievalUpdate,
EntityTermExtractionUpdate,
InitialAnswerQualityUpdate,
RequireRefinedAnswerUpdate,
FollowUpSubQuestionsUpdateBase,
FollowUpDecompAnswersUpdate,
RefinedAnswerUpdateBase,
RefinedAgentStartStats,
RefinedAgentEndStats,
):
# expanded_retrieval_result: Annotated[list[ExpandedRetrievalResult], add]
base_raw_search_result: Annotated[list[ExpandedRetrievalResult], add]
## Graph Output State - presently not used
class MainOutput(TypedDict):
pass

View File

@@ -0,0 +1,26 @@
from collections.abc import Hashable
from langgraph.types import Send
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
AnswerQuestionInput,
)
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()
def send_to_expanded_retrieval(state: AnswerQuestionInput) -> Send | Hashable:
logger.debug("sending to expanded retrieval via edge")
return Send(
"initial_sub_question_expanded_retrieval",
ExpandedRetrievalInput(
question=state["question"],
base_search=False,
sub_question_id=state["question_id"],
),
)

View File

@@ -0,0 +1,129 @@
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agent_search.pro_search_b.answer_initial_sub_question.edges import (
send_to_expanded_retrieval,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.answer_check import (
answer_check,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.answer_generation import (
answer_generation,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.format_answer import (
format_answer,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.nodes.ingest_retrieval import (
ingest_retrieval,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
AnswerQuestionInput,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
AnswerQuestionOutput,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
AnswerQuestionState,
)
from onyx.agent_search.pro_search_b.expanded_retrieval.graph_builder import (
expanded_retrieval_graph_builder,
)
from onyx.agent_search.shared_graph_utils.utils import get_test_config
from onyx.utils.logger import setup_logger
logger = setup_logger()
def answer_query_graph_builder() -> StateGraph:
graph = StateGraph(
state_schema=AnswerQuestionState,
input=AnswerQuestionInput,
output=AnswerQuestionOutput,
)
### Add nodes ###
expanded_retrieval = expanded_retrieval_graph_builder().compile()
graph.add_node(
node="initial_sub_question_expanded_retrieval",
action=expanded_retrieval,
)
graph.add_node(
node="answer_check",
action=answer_check,
)
graph.add_node(
node="answer_generation",
action=answer_generation,
)
graph.add_node(
node="format_answer",
action=format_answer,
)
graph.add_node(
node="ingest_retrieval",
action=ingest_retrieval,
)
### Add edges ###
graph.add_conditional_edges(
source=START,
path=send_to_expanded_retrieval,
path_map=["initial_sub_question_expanded_retrieval"],
)
graph.add_edge(
start_key="initial_sub_question_expanded_retrieval",
end_key="ingest_retrieval",
)
graph.add_edge(
start_key="ingest_retrieval",
end_key="answer_generation",
)
graph.add_edge(
start_key="answer_generation",
end_key="answer_check",
)
graph.add_edge(
start_key="answer_check",
end_key="format_answer",
)
graph.add_edge(
start_key="format_answer",
end_key=END,
)
return graph
if __name__ == "__main__":
from onyx.db.engine import get_session_context_manager
from onyx.llm.factory import get_default_llms
from onyx.context.search.models import SearchRequest
graph = answer_query_graph_builder()
compiled_graph = graph.compile()
primary_llm, fast_llm = get_default_llms()
search_request = SearchRequest(
query="what can you do with onyx or danswer?",
)
with get_session_context_manager() as db_session:
pro_search_config, search_tool = get_test_config(
db_session, primary_llm, fast_llm, search_request
)
inputs = AnswerQuestionInput(
question="what can you do with onyx?",
subgraph_fast_llm=fast_llm,
subgraph_primary_llm=primary_llm,
subgraph_config=pro_search_config,
subgraph_search_tool=search_tool,
subgraph_db_session=db_session,
question_id="0_0",
)
for thing in compiled_graph.stream(
input=inputs,
# debug=True,
# subgraphs=True,
):
logger.debug(thing)

View File

@@ -0,0 +1,8 @@
from pydantic import BaseModel
### Models ###
class AnswerRetrievalStats(BaseModel):
answer_retrieval_stats: dict[str, float | int]

View File

@@ -0,0 +1,14 @@
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
AnswerQuestionState,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
QACheckUpdate,
)
def answer_check(state: AnswerQuestionState) -> QACheckUpdate:
quality_str = "yes"
return QACheckUpdate(
answer_quality=quality_str,
)

View File

@@ -0,0 +1,41 @@
import datetime
from langchain_core.callbacks.manager import dispatch_custom_event
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
AnswerQuestionState,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
QAGenerationUpdate,
)
from onyx.agent_search.shared_graph_utils.utils import get_persona_prompt
from onyx.agent_search.shared_graph_utils.utils import parse_question_id
from onyx.chat.models import AgentAnswerPiece
from onyx.utils.logger import setup_logger
logger = setup_logger()
def answer_generation(state: AnswerQuestionState) -> QAGenerationUpdate:
now_start = datetime.datetime.now()
logger.debug(f"--------{now_start}--------START ANSWER GENERATION---")
state["question"]
state["documents"]
level, question_nr = parse_question_id(state["question_id"])
get_persona_prompt(state["subgraph_config"].search_request.persona)
dispatch_custom_event(
"sub_answers",
AgentAnswerPiece(
answer_piece="",
level=level,
level_question_nr=question_nr,
answer_type="agent_sub_answer",
),
)
answer_str = ""
return QAGenerationUpdate(
answer=answer_str,
)

View File

@@ -0,0 +1,25 @@
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
AnswerQuestionOutput,
)
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
AnswerQuestionState,
)
from onyx.agent_search.shared_graph_utils.models import (
QuestionAnswerResults,
)
def format_answer(state: AnswerQuestionState) -> AnswerQuestionOutput:
return AnswerQuestionOutput(
answer_results=[
QuestionAnswerResults(
question=state["question"],
question_id=state["question_id"],
quality=state.get("answer_quality", "No"),
answer=state["answer"],
expanded_retrieval_results=state["expanded_retrieval_results"],
documents=state["documents"],
sub_question_retrieval_stats=state["sub_question_retrieval_stats"],
)
],
)

View File

@@ -0,0 +1,23 @@
from onyx.agent_search.pro_search_b.answer_initial_sub_question.states import (
RetrievalIngestionUpdate,
)
from onyx.agent_search.pro_search_b.expanded_retrieval.states import (
ExpandedRetrievalOutput,
)
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
def ingest_retrieval(state: ExpandedRetrievalOutput) -> RetrievalIngestionUpdate:
sub_question_retrieval_stats = state[
"expanded_retrieval_result"
].sub_question_retrieval_stats
if sub_question_retrieval_stats is None:
sub_question_retrieval_stats = [AgentChunkStats()]
return RetrievalIngestionUpdate(
expanded_retrieval_results=state[
"expanded_retrieval_result"
].expanded_queries_results,
documents=state["expanded_retrieval_result"].all_documents,
sub_question_retrieval_stats=sub_question_retrieval_stats,
)

View File

@@ -0,0 +1,63 @@
from operator import add
from typing import Annotated
from typing import TypedDict
from onyx.agent_search.core_state import SubgraphCoreState
from onyx.agent_search.pro_search_b.expanded_retrieval.models import QueryResult
from onyx.agent_search.shared_graph_utils.models import AgentChunkStats
from onyx.agent_search.shared_graph_utils.models import (
QuestionAnswerResults,
)
from onyx.agent_search.shared_graph_utils.operators import dedup_inference_sections
from onyx.context.search.models import InferenceSection
## Update States
class QACheckUpdate(TypedDict):
answer_quality: str
class QAGenerationUpdate(TypedDict):
answer: str
# answer_stat: AnswerStats
class RetrievalIngestionUpdate(TypedDict):
expanded_retrieval_results: list[QueryResult]
documents: Annotated[list[InferenceSection], dedup_inference_sections]
sub_question_retrieval_stats: AgentChunkStats
## Graph Input State
class AnswerQuestionInput(SubgraphCoreState):
question: str
question_id: str # 0_0 is original question, everything else is <level>_<question_num>.
# level 0 is original question and first decomposition, level 1 is follow up, etc
# question_num is a unique number per original question per level.
## Graph State
class AnswerQuestionState(
AnswerQuestionInput,
QAGenerationUpdate,
QACheckUpdate,
RetrievalIngestionUpdate,
):
pass
## Graph Output State
class AnswerQuestionOutput(TypedDict):
"""
This is a list of results even though each call of this subgraph only returns one result.
This is because if we parallelize the answer query subgraph, there will be multiple
results in a list so the add operator is used to add them together.
"""
answer_results: Annotated[list[QuestionAnswerResults], add]

Some files were not shown because too many files have changed in this diff Show More