Compare commits

..

507 Commits

Author SHA1 Message Date
joachim-danswer
a4b780e27f streaming + saving of search docs of no verified ones available
- sub-questions only
2025-01-19 21:25:27 -08:00
joachim-danswer
dffda7d0c0 added logging to log_messages 2025-01-19 19:28:20 -08:00
joachim-danswer
79f7ca29d1 merge with Evan changes & ProSearchConfig -> AgentSearchConfig 2025-01-19 15:19:55 -08:00
joachim-danswer
2a8898527b new directory structure
- also nodes got moved to individual files
2025-01-19 14:51:26 -08:00
Evan Lohn
0de3a6fe6b stream finished events 2025-01-19 13:18:30 -08:00
joachim-danswer
a35ef19f71 Stream pre-verify results if no verified docs 2025-01-19 09:07:53 -08:00
Evan Lohn
e955279186 fixed pro search config creation 2025-01-18 17:42:07 -08:00
joachim-danswer
a48611ef57 Updated initial search flags for UI flows
- may need tweaking
2025-01-18 16:49:44 -08:00
Evan Lohn
6fe4ce1dd6 fix non updated key issue 2025-01-18 16:15:19 -08:00
Evan Lohn
640f64d5de fixed pro search flag and unnecessary/incorrect streaming 2025-01-18 15:59:29 -08:00
Evan Lohn
a0517a8da7 finish pro->agentic rename 2025-01-18 15:10:55 -08:00
joachim-danswer
afeb7f2dd8 prompt update to suggest doc citations vs question citations 2025-01-18 15:10:13 -08:00
joachim-danswer
91ee90b00d added 'Agent Search' decision 2025-01-18 14:39:00 -08:00
joachim-danswer
5d5a8b2218 Changed "I don't know" to a more appropriate response
And some other small prompt modifications.
2025-01-18 10:38:58 -08:00
joachim-danswer
7ef928794d optional search to inform decomposition
New Agent Search parameter:
  - perform_initial_search (default: False)
2025-01-18 10:16:00 -08:00
Evan Lohn
0df0b7147a name cleanup and WIP unifying input types 2025-01-17 19:54:15 -08:00
Evan Lohn
fb3bc2f259 working version with cleanup 2025-01-17 17:16:41 -08:00
Evan Lohn
e8fd57a1ac WIP 2025-01-17 17:06:43 -08:00
Evan Lohn
9d7c337ee6 skip answer verification for idk answers 2025-01-16 17:04:31 -08:00
Evan Lohn
38fd061ed5 cleanup hardcoded idks 2025-01-16 16:41:00 -08:00
Evan Lohn
7282447186 clean run_graph 2025-01-16 16:15:59 -08:00
joachim-danswer
b6da896240 removal of citations from previous answers 2025-01-16 14:26:41 -08:00
joachim-danswer
4dad841bfc new refined prompt 2025-01-16 14:25:44 -08:00
joachim-danswer
755b7579b5 sub_question number handling 2025-01-16 14:25:38 -08:00
Evan Lohn
42257244d9 finished checks for llm context window before invoking or streaming 2025-01-16 14:10:05 -08:00
joachim-danswer
3991eac4c0 new citation format 2025-01-15 17:17:04 -08:00
Evan Lohn
fabc9f7e4b attempted fix for early exit from langgraph event loop 2025-01-15 17:05:47 -08:00
Evan Lohn
7987a3a75e mypy error on unbound locals 2025-01-15 15:43:55 -08:00
Evan Lohn
ff24f82d52 unbound local error 2025-01-15 15:32:24 -08:00
Evan Lohn
563513698b fixed no good subquestions error 2025-01-15 15:23:59 -08:00
Evan Lohn
b2208195ce fixed streaming and persistence of answer docs 2025-01-15 14:55:52 -08:00
Evan Lohn
ac9ce7ca68 added back saved docs for agentic search 2025-01-15 14:23:24 -08:00
joachim-danswer
c9b34ed583 Updated citation handling on backend + citations for refined answer 2025-01-15 14:16:04 -08:00
Evan Lohn
2bc948fa73 dispatch initial question as dummy subquestion 2025-01-15 11:51:13 -08:00
Evan Lohn
8ea987e068 remove empty expanded subquestions 2025-01-15 11:35:00 -08:00
Evan Lohn
43e488275f context length fix 2025-01-15 11:35:00 -08:00
joachim-danswer
09d0820882 Question numbering fix (logging temp to info) 2025-01-15 11:30:46 -08:00
Evan Lohn
2c0ef2ea47 better separated dispatch 2025-01-15 10:26:38 -08:00
Evan Lohn
7cca660a93 handle prompts that exceed context window 2025-01-14 18:58:12 -08:00
Evan Lohn
ab70b7c303 persistence of refined agent answer and top-level handling of fine grained citations (WIP) 2025-01-14 17:28:40 -08:00
pablodanswer
0ff212a2b4 alembic migration 2025-01-14 14:41:14 -08:00
pablodanswer
5c31171b37 Merge branch 'pro-search' of github.com:onyx-dot-app/onyx into pro-search 2025-01-14 14:38:27 -08:00
pablodanswer
1669a5d69f alembic migration 2025-01-14 14:37:59 -08:00
pablodanswer
60db37fac1 alembic migration 2025-01-14 14:35:43 -08:00
joachim-danswer
48a4e2c76b refined prompt fix 2025-01-14 14:35:43 -08:00
Evan Lohn
e1175e15da stream refined answer 2025-01-14 14:35:43 -08:00
Evan Lohn
27b044c030 fix local variable access issue 2025-01-14 14:35:43 -08:00
Evan Lohn
4a7c6a6561 subquestion numbers start at 1 2025-01-14 14:35:43 -08:00
Evan Lohn
78df113c7f un-break refinement flow and stream answer 2025-01-14 14:35:43 -08:00
Evan Lohn
113767a061 stream stop info consistency 2025-01-14 14:35:43 -08:00
pablodanswer
1851c836dc alembic migration 2025-01-14 14:35:43 -08:00
Evan Lohn
413a4e55f5 use correct doc id 2025-01-14 14:35:43 -08:00
Evan Lohn
dfcf8791e7 tool call kickoffs for starting agentic, starting refined 2025-01-14 14:35:43 -08:00
Evan Lohn
1b9f128851 verified docs for persisted info 2025-01-14 14:35:43 -08:00
Evan Lohn
d38b9bd194 cleanup run_graph and added stream stopping for sub_answers 2025-01-14 14:35:43 -08:00
Evan Lohn
ee304c9c35 improved no-subquestion-gen handling 2025-01-14 14:35:43 -08:00
joachim-danswer
4c75b73fa8 Pro_search B + fix for no docs retrieved 2025-01-14 14:35:43 -08:00
Evan Lohn
70509fbe7e attach persisted agent search info to assistant message 2025-01-14 14:35:43 -08:00
Evan Lohn
6bea880695 added subquestion info to streamed docs, removed extra deduping 2025-01-14 14:35:43 -08:00
Evan Lohn
249dd96f25 tentative fix for not persisting search docs 2025-01-14 14:35:43 -08:00
Evan Lohn
f0e74618e2 fixed issue when tool call unnecessary 2025-01-14 14:35:43 -08:00
Evan Lohn
3e27b79040 better answer persistence 2025-01-14 14:35:43 -08:00
Evan Lohn
91a37ef345 save agent answers 2025-01-14 14:35:43 -08:00
Evan Lohn
6c2f4e4775 fix citation processing in basic search 2025-01-14 14:35:43 -08:00
Evan Lohn
87101c8c74 fix run_graph testing script 2025-01-14 14:35:43 -08:00
Evan Lohn
9887be3dfc fixed issues with standard flow 2025-01-14 14:35:43 -08:00
Evan Lohn
22e209afc8 add back old citation processing temporarily 2025-01-14 14:35:43 -08:00
Evan Lohn
05a4792575 fix heads issue 2025-01-14 14:35:43 -08:00
Evan Lohn
f79d44bd6e Pro search clean commmit. See evan_answer_rework branch for prior history 2025-01-14 14:35:43 -08:00
pablonyx
5081d240ce Proper anonymous user restricting (#3645) 2025-01-14 14:35:42 -08:00
pablodanswer
a570d39301 validated 2025-01-14 14:15:41 -08:00
pablodanswer
cd9279f0e1 additional label filtering 2025-01-14 13:49:12 -08:00
pablodanswer
72d1928b8f label editing / deletion 2025-01-14 13:45:49 -08:00
pablodanswer
086bba6454 fully cleaned assistant editor 2025-01-14 13:40:41 -08:00
pablodanswer
289be0a423 user settings etc. 2025-01-14 13:31:37 -08:00
pablodanswer
862a62483c various improvements 2025-01-14 13:19:25 -08:00
pablodanswer
694925d81d k 2025-01-14 12:33:34 -08:00
pablodanswer
478eb511fa add shortcuts, reorganize various pages,update seeding, starter messages, etc. 2025-01-14 12:30:05 -08:00
pablodanswer
a259f92f39 alembic migration 2025-01-13 20:55:52 -08:00
joachim-danswer
9592f0a494 refined prompt fix 2025-01-13 20:54:39 -08:00
Evan Lohn
07c2336c08 stream refined answer 2025-01-13 20:54:39 -08:00
Evan Lohn
7350dd73d1 fix local variable access issue 2025-01-13 20:54:39 -08:00
Evan Lohn
8f428cfcec subquestion numbers start at 1 2025-01-13 20:54:39 -08:00
Evan Lohn
3610a51222 un-break refinement flow and stream answer 2025-01-13 20:54:39 -08:00
Evan Lohn
e0654c2209 stream stop info consistency 2025-01-13 20:54:39 -08:00
pablodanswer
99d7c09433 alembic migration 2025-01-13 20:54:39 -08:00
Evan Lohn
6803702ca3 use correct doc id 2025-01-13 20:54:39 -08:00
Evan Lohn
4ea0ca5a78 tool call kickoffs for starting agentic, starting refined 2025-01-13 20:54:39 -08:00
Evan Lohn
561e44e443 verified docs for persisted info 2025-01-13 20:54:39 -08:00
Evan Lohn
931a119498 cleanup run_graph and added stream stopping for sub_answers 2025-01-13 20:54:39 -08:00
Evan Lohn
fddc1882d1 improved no-subquestion-gen handling 2025-01-13 20:54:39 -08:00
joachim-danswer
086d70a085 Pro_search B + fix for no docs retrieved 2025-01-13 20:54:39 -08:00
Evan Lohn
5a832628e2 attach persisted agent search info to assistant message 2025-01-13 20:54:39 -08:00
Evan Lohn
dfe1ed4c66 added subquestion info to streamed docs, removed extra deduping 2025-01-13 20:54:39 -08:00
Evan Lohn
5761557c19 tentative fix for not persisting search docs 2025-01-13 20:54:39 -08:00
Evan Lohn
29c479f496 fixed issue when tool call unnecessary 2025-01-13 20:54:39 -08:00
Evan Lohn
b60002c791 better answer persistence 2025-01-13 20:54:39 -08:00
Evan Lohn
b49c5afb09 save agent answers 2025-01-13 20:54:39 -08:00
Evan Lohn
b15e29619a fix citation processing in basic search 2025-01-13 20:54:39 -08:00
Evan Lohn
119336035e fix run_graph testing script 2025-01-13 20:54:39 -08:00
Evan Lohn
25bb5983af fixed issues with standard flow 2025-01-13 20:54:39 -08:00
Evan Lohn
ca1c12f122 add back old citation processing temporarily 2025-01-13 20:54:39 -08:00
Evan Lohn
9b55643e55 fix heads issue 2025-01-13 20:54:39 -08:00
Evan Lohn
233bbfa4e4 Pro search clean commmit. See evan_answer_rework branch for prior history 2025-01-13 20:54:36 -08:00
pablonyx
8c67288197 Proper anonymous user restricting (#3645) 2025-01-13 20:54:05 -08:00
pablodanswer
26c5c57ddb quick v1 labels 2025-01-13 19:45:00 -08:00
pablodanswer
ac15d0002a k 2025-01-13 18:01:19 -08:00
pablodanswer
8cca29eeab fix width + editors 2025-01-13 17:58:25 -08:00
pablodanswer
a365ab0c7d remove some whitespace 2025-01-13 17:55:29 -08:00
pablodanswer
fd89d1e141 quick nit 2025-01-13 17:53:38 -08:00
pablodanswer
7e32d21236 push quick changes from diff 2025-01-13 17:52:08 -08:00
pablodanswer
71eab9c740 push 2025-01-13 17:47:36 -08:00
pablodanswer
5c4451c084 :wMerge branch 'new_ux_branch' of github.com:onyx-dot-app/onyx into new_ux_branch 2025-01-13 17:34:37 -08:00
pablodanswer
2c894aaf07 quick nit 2025-01-13 17:32:04 -08:00
Yuhong Sun
bf3da9f9cf k 2025-01-13 17:25:31 -08:00
Yuhong Sun
4506770dd9 Yuhong 2025-01-13 17:16:15 -08:00
pablodanswer
25e2dfa5df k 2025-01-13 13:45:20 -08:00
pablodanswer
ba2d5fcc7d add input prompts 2025-01-13 13:43:41 -08:00
pablodanswer
f9be71ff24 l 2025-01-13 12:44:21 -08:00
pablodanswer
b92485223b sidebar 2025-01-13 12:44:04 -08:00
pablodanswer
5790f37648 quick nit 2025-01-13 12:41:32 -08:00
pablodanswer
b641cfc3e4 Merge branch 'new_ux_branch' of github.com:onyx-dot-app/onyx into new_ux_branch 2025-01-13 12:39:36 -08:00
pablodanswer
a2dfbb5b9c update assistant editor 2025-01-13 12:39:16 -08:00
Yuhong Sun
570ba9f0b6 Yuhong 2025-01-13 12:17:21 -08:00
pablodanswer
650fee6e2c quick updates 2025-01-13 12:00:47 -08:00
pablodanswer
8aa7fb5027 Merge branch 'new_ux_branch' of github.com:onyx-dot-app/onyx into new_ux_branch 2025-01-13 10:04:39 -08:00
pablodanswer
e4bf04fd94 update chat banner 2025-01-13 10:04:31 -08:00
Yuhong Sun
70de4708d0 Yuhong 2025-01-13 10:03:57 -08:00
pablodanswer
4ed9f0ffc7 editor changes 2025-01-12 21:22:35 -08:00
pablodanswer
7b67546199 k 2025-01-12 19:17:38 -08:00
pablodanswer
6c68a53c62 nit 2025-01-12 19:14:19 -08:00
pablodanswer
9e3b1d29aa nit 2025-01-12 18:25:40 -08:00
pablodanswer
ec7a606f4c popover within a popover > modal within a modal 2025-01-12 18:08:00 -08:00
pablodanswer
4e5bc7a4ba quick nit 2025-01-12 17:46:52 -08:00
pablodanswer
55d3b0f271 Merge branch 'new_ux_branch' of github.com:onyx-dot-app/onyx into new_ux_branch 2025-01-12 17:45:18 -08:00
pablodanswer
565bfa4e88 address all but modal within modal 2025-01-12 17:42:34 -08:00
Yuhong Sun
9ad10d1f60 Yuhong 2025-01-12 17:10:38 -08:00
pablodanswer
ad19e9aee7 k 2025-01-12 16:47:57 -08:00
pablodanswer
b11641c2bc most new fixes 2025-01-12 16:32:52 -08:00
pablodanswer
b7307813d5 quick nits 2025-01-12 16:22:21 -08:00
Yuhong Sun
3e8e544086 Yuhong 2025-01-12 14:03:36 -08:00
pablodanswer
7ba00f8b48 quick nit 2025-01-12 12:17:56 -08:00
pablodanswer
19204b49a7 final updates 2025-01-12 12:13:05 -08:00
pablodanswer
7254cb642d fully updated - groups 2025-01-12 12:01:39 -08:00
pablodanswer
da27c8be6d k 2025-01-11 20:28:12 -08:00
pablodanswer
9d0272fe62 minor nit 2025-01-11 19:13:45 -08:00
pablodanswer
167b5bad49 additioanl nits 2025-01-11 17:14:01 -08:00
pablodanswer
3acf235c84 quick nit 2025-01-11 17:07:53 -08:00
pablodanswer
0d9441da88 draggables 2025-01-11 17:02:59 -08:00
pablodanswer
a915e4dfa7 k 2025-01-11 11:45:50 -08:00
pablodanswer
7f2610e7d4 looking good 2025-01-11 11:24:09 -08:00
pablodanswer
19323472e6 nit 2025-01-10 19:24:33 -08:00
pablodanswer
be84cf95bf fix the hydra 2025-01-10 14:15:17 -08:00
pablodanswer
79d847f660 nit 2025-01-10 13:08:50 -08:00
pablodanswer
b06f56102e quick nit 2025-01-10 12:55:25 -08:00
pablodanswer
078ae4b9c7 add new ux 2025-01-10 12:54:00 -08:00
pablodanswer
5d034e08fc nit 2025-01-10 12:07:27 -08:00
pablodanswer
cafa0aac0d push minor changes 2025-01-10 12:07:27 -08:00
pablodanswer
f61864c36e nit 2025-01-10 12:07:27 -08:00
pablodanswer
5b0a1ccc31 minor nit 2025-01-10 12:07:27 -08:00
pablodanswer
acb9cca1e8 quick nits 2025-01-10 12:07:27 -08:00
pablodanswer
e22918e31d v3 2025-01-10 12:07:27 -08:00
pablodanswer
e5c430178d quick nits 2025-01-10 12:07:27 -08:00
pablodanswer
35f379b093 validate 2025-01-10 12:07:27 -08:00
pablodanswer
c85900e4f8 nit 2025-01-10 12:07:27 -08:00
pablodanswer
bf6b6342a1 quick addition 2025-01-10 12:07:27 -08:00
pablodanswer
9adbfc1b81 organize components 2025-01-10 12:07:27 -08:00
pablodanswer
b5dd5df36f quick nit 2025-01-10 12:07:27 -08:00
pablodanswer
6b0a2e11b5 k 2025-01-10 12:07:27 -08:00
pablodanswer
169f3fd0dc v2 2025-01-10 12:07:27 -08:00
pablodanswer
c15a828576 add chrome extension
minor clean up

additional handling

post rebase fixes

nit

quick bump

finalize

minor cleanup

organizational

Revert changes in backend directory

Revert changes in deployment directory

push misc changes

improve shortcut display + general nrf page layout

minor clean up

quick nit

update chrome

k

build fix

k

update

k
2025-01-10 12:07:26 -08:00
Weves
1470b7e038 Add tests for some LLM provider endpoints + small logic change to ensure that display_model_names is not empty 2025-01-10 08:55:53 -08:00
rkuo-danswer
bf78fb79f8 possible fix for gdrive oauth in the cloud (#3642)
* possible fix for gd oauth in the cloud

* missed code in rename/merge

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-10 02:10:59 +00:00
rkuo-danswer
d972a78f45 Make connector pause and delete fast (#3646)
* first cut

* refresh on delete

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-10 01:39:45 +00:00
hagen-danswer
50131ba22c Better logging for confluence space permissions 2025-01-09 15:13:02 -08:00
rkuo-danswer
439217317f Merge pull request #3644 from onyx-dot-app/bugfix/model-server-build-fix
hope this env var works.
2025-01-09 14:34:25 -08:00
hagen-danswer
c55de28423 added distinct when outer joining for user filters (#3641)
* added distinct when outer joining for user filters

* Added distinct when outer joining for user filters for all
2025-01-09 14:15:38 -08:00
Richard Kuo (Danswer)
91e32e801d hope this env var works. 2025-01-09 13:51:58 -08:00
rkuo-danswer
2ae91f0f2b Feature/redis prod tool (#3619)
* prototype tools for handling prod issues

* add some commands

* add batching and dry run options

* custom redis tool

* comment

* default to app config settings for redis

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-09 21:34:07 +00:00
hagen-danswer
d40fd82803 Conf doc sync improvements (#3643)
* Reduce number of requests to Confluence

* undo

* added a way to dynamically adjust the pagination limit

* undo
2025-01-09 12:56:56 -08:00
rkuo-danswer
97a963b4bf add index to speed up get last attempt (#3636)
* add index to speed up get last attempt

* use descending order

* put back unique param

* how did this not get formatted?

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-09 00:56:55 +00:00
pablonyx
7f6ef1ff57 Remove unnecessary logspam
Remove unnecessary logs
2025-01-08 17:03:52 -08:00
pablodanswer
d98746b988 remove unnecessary logs 2025-01-08 17:03:15 -08:00
rkuo-danswer
a76f1b4c1b Merge pull request #3628 from onyx-dot-app/bugfix/debug_tenant
add more debug logging for locking issue
2025-01-08 15:14:37 -08:00
hagen-danswer
4c4ff46fe3 Fixing google drive tests (#3634)
* Fixing google drive texts

* Update conftest.py
2025-01-08 22:34:38 +00:00
hagen-danswer
0f9842064f Added env var to skip warm up (#3633) 2025-01-08 14:29:15 -08:00
pablonyx
d7bc32c0ec Fully remove visit API (#3621)
* v1

* update indexing logic

* update updates

* nit

* clean up args

* update for clarity + best practices

* nit + logs

* fix

* minor clean up

* remove logs

* quick nit
2025-01-08 13:49:01 -08:00
Richard Kuo (Danswer)
1f48de9731 more logging 2025-01-08 12:49:24 -08:00
Richard Kuo (Danswer)
a22d02ff70 add another log line 2025-01-08 10:01:24 -08:00
Richard Kuo (Danswer)
dcfc621a66 add more debug logging for locking issue 2025-01-08 09:43:47 -08:00
Chris Weaver
eac73a1bf1 Improve egnyte connector (#3626) 2025-01-08 03:09:46 +00:00
pablonyx
717560872f Merge pull request #3627 from onyx-dot-app/whitelabeling_name
Whitelabelling
2025-01-07 19:16:01 -08:00
pablodanswer
ce2572134c k 2025-01-07 19:06:52 -08:00
rkuo-danswer
02f72a5c86 Multiple cloud/indexing fixes (#3609)
* more debugging

* test reacquire outside of loop

* more logging

* move lock_beat test outside the try catch so that we don't worry about testing locks we never took

* use a larger scan_iter value for performance

* batch stale document sync batches

* add debug logging for a particular timeout issue

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-08 01:30:29 +00:00
hagen-danswer
eb916df139 added debugger step 2025-01-07 16:18:46 -08:00
hagen-danswer
fafad5e119 Improve contributing guide (#3625)
* Improve contributing guide

* more improvements to contributing guide
2025-01-07 16:16:17 -08:00
pablonyx
a314a08309 Speed up admin pages (#3623)
* ni

* speed up pages

* minor nit

* nit
2025-01-07 15:40:26 -08:00
hagen-danswer
4ce24d68f7 prevent other tests from interfering with existing google drive tests (#3624)
* prevent other tests from interfering with existing google drive tests

* cleanup gdrive tests

* finished

* done
2025-01-07 15:32:36 -08:00
hagen-danswer
a95f4298ad Improved logging for confluence calls (#3622)
* Improved logging for confluence calls

* cleanup

* idk

* combined logging
2025-01-07 21:53:08 +00:00
rkuo-danswer
7cd76ec404 comment out the per doc sync hack (#3620)
* comment out the per doc sync hack

* fix commented code

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-07 19:44:15 +00:00
pablonyx
5b5c1166ca Async Redis (#3618)
* k

* update configs for clarity

* typing

* update
2025-01-07 19:34:57 +00:00
pablonyx
d9e9c6973d Multitenant anonymous (#3595)
* anonymous users for multi tenant setting

* nit

* k
2025-01-07 02:57:20 +00:00
pablonyx
91903141cd Built in tool cache with tool call id (#3617)
* k

* improved

* k

* nit

* nit

* nit
2025-01-07 01:03:52 +00:00
hagen-danswer
e329b63b89 Added Permission Syncing for Salesforce (#3551)
* Added Permission Syncing for Salesforce

* cleanup

* updated connector doc conversion

* finished salesforce permission syncing

* fixed connector to batch Salesforce queries

* tests!

* k

* Added error handling and check for ee and sync type for postprocessing

* comments

* minor touchups

* tested to work!

* done

* my pie

* lil cleanup

* minor comment
2025-01-07 00:37:03 +00:00
hagen-danswer
71c2559ea9 Discord cleanup (#3615)
* Discord cleanup

* fix case discrepancy
2025-01-06 15:11:03 -08:00
Ishankoradia
ceb34a41d9 discord connector (#3023)
* discord: frontend and backend poll connector

* added requirements for discord installation

* fixed the mypy errors

* process messages not part of any thread

* minor change

* updated the connector; this logic works & am able to docs when i print

* minor change

* ability to enter a start date to pull docs from and refactor

* added the load connector and fixed mypy errors

* local commit test

done!

* minor refactor and properly commented everything

* updated the logic to handle permissions and index active/archived threads

* basic discord test template

* cleanup

* going away with the danswer discord client class ; using an async context manager

* moved to proper folder

* minor fixes

* needs improvement

* fixed discord icon

---------

Co-authored-by: hagen-danswer <hagen@danswer.ai>
2025-01-06 14:54:22 -08:00
pablonyx
82eab9d704 Doc explore fix (#3614)
* k

* k

* add comment
2025-01-06 19:42:07 +00:00
pablonyx
2b8d3a6ef5 fix white labelling empty string (#3603) 2025-01-06 19:26:55 +00:00
pablonyx
4fb129e77b Increase timeout + revert changes for clarity (#3604)
* increase timeout + revert changes for clarity

* quick nit

* k
2025-01-06 18:20:53 +00:00
pablonyx
f16ca1b735 minor auth fix (#3613) 2025-01-06 17:51:02 +00:00
pablonyx
e3b2c9d944 Tracking update (#3605)
* tracking update

* k
2025-01-06 17:17:00 +00:00
pablodanswer
6c9c25642d remove empty files on main 2025-01-06 09:01:33 -08:00
hagen-danswer
2862d8bbd3 Minor opensource cleanup (#3610) 2025-01-06 07:26:07 -08:00
skylares
143be6a524 Add assertions to Zendesk connector tests (#3600)
Co-authored-by: hagen-danswer <hagen@danswer.ai>
2025-01-06 06:43:23 -08:00
SubashMohan
c2444a5cff Slim connector for Zendesk (#3367)
* Add SlimConnector support for Zendesk

* ZenDesk format changes

* code formating

---------

Co-authored-by: hagen-danswer <hagen@danswer.ai>
2025-01-06 06:41:41 -08:00
rkuo-danswer
7f8194798a Merge pull request #3608 from onyx-dot-app/bugfix/locking_4
fix timing calculations and don't spam the queue lengths check from e…
2025-01-05 21:13:47 -08:00
Richard Kuo (Danswer)
e3947e4b64 fix timing calculations and don't spam the queue lengths check from every task 2025-01-05 21:13:08 -08:00
rkuo-danswer
98005510ad Merge pull request #3607 from onyx-dot-app/bugfix/locking_redux
add detailed timings to monitor vespa sync
2025-01-05 20:03:17 -08:00
Richard Kuo
ca54bd0b21 add logging 2025-01-05 20:02:05 -08:00
Richard Kuo
d26f8ce852 add detailed timings to monitor vespa sync 2025-01-05 19:24:58 -08:00
pablonyx
c8090ab75b Auth fix + Registration Clarity (#3590)
* clarify auth flow

* k

* nit

* k

* fix typing
2025-01-06 02:17:45 +00:00
hagen-danswer
e100a5e965 Properly account for anonymous access in Confluence 2025-01-05 18:01:39 -08:00
pablonyx
ddec239fef Improved indexing (#3594)
* nit

* k

* add steps

* main util functions

* functioning fully

* quick nit

* k

* typing fix

* k

* address comments
2025-01-05 23:31:53 +00:00
Chris Weaver
e83542f572 Add support for auto-refreshing available models based on an API call (#3576) 2025-01-05 15:45:49 -08:00
joachim-danswer
8750f14647 alignment & renaming of objects for initial (displayed) ranking and re-ranking/validation citations
- renamed post-reranking/validation citation information consistently to final_... (example: doc_id_to_rank_map -> final_doc_id_to_rank_map)
 - changed and renamed objects containing initial ranking information (now: display_...) consistent with final rankings (final_...). Specifically, {} to [] for displayed_search_results
 - for CitationInfo, changed citation_num from 'x-th citation in response stream' to the initial position of the doc [NOTE: test implications]
-  changed tests:
    onyx/backend/tests/unit/onyx/chat/stream_processing/test_citation_processing.py
    onyx/backend/tests/unit/onyx/chat/stream_processing/test_citation_substitution.py
2025-01-05 15:44:34 -08:00
rkuo-danswer
27699c8216 Merge pull request #3602 from onyx-dot-app/bugfix/lock_not_owned
various lock diagnostics and timing adjustments
2025-01-05 15:12:01 -08:00
Richard Kuo (Danswer)
6fcd712a00 comment for lock and usage 2025-01-05 15:11:39 -08:00
Richard Kuo (Danswer)
b027a08698 various lock diagnostics and timing adjustments 2025-01-05 13:59:36 -08:00
Chris Weaver
1db778baa8 Small airtable refactor + handle files with uppercase extensions (#3598)
* Small airtable refactor + handle files with uppercase extensions

* Fix mypy
2025-01-05 11:27:50 -08:00
Chris Weaver
f895e5f7d0 Speedup orphan doc cleanup script (#3596)
* Speedup orphan doc cleanup script

* Fix mypy
2025-01-05 14:28:25 +00:00
rkuo-danswer
2fc58252f4 Merge pull request #3599 from onyx-dot-app/bugfix/doc-sync
quick hack to prevent resyncing the same doc
2025-01-05 04:14:28 -08:00
Richard Kuo (Danswer)
371d1ccd8f move to just setting keys 2025-01-05 03:26:33 -08:00
Richard Kuo (Danswer)
7fb92d42a0 quick hack to prevent resyncing the same doc 2025-01-05 03:05:32 -08:00
Weves
af2061c4db Add Linear OAuth env variables to dev compose 2025-01-04 16:02:04 -08:00
pablonyx
ffec19645b JWT -> Redis (#3574)
* functional v1

* functional logout

* minor clean up

* quick clean up

* update configuration

* ni

* nit

* finalize

* update login page

* delete unused import

* quick nit

* updates

* clean up

* ni

* k

* k
2025-01-04 19:35:43 +00:00
pablonyx
67d2c86250 Remove Exclamation marks + comments (#3586)
* remove explanation marks + comments

* nit
2025-01-04 18:39:16 +00:00
pablonyx
6c018cb53f add personal assistant usage stats (#3543) 2025-01-04 18:38:41 +00:00
pablonyx
62302e3faf Latex for $10 and $100 (#3585)
* nit

* k
2025-01-04 17:37:04 +00:00
rkuo-danswer
0460531c72 Merge pull request #3593 from onyx-dot-app/bugfix/primary_worker_lock
the primary worker lock doesn't always exist
2025-01-03 22:01:11 -08:00
Richard Kuo
6af07a888b the primary worker lock doesn't always exist 2025-01-03 21:12:11 -08:00
Weves
ea75f5cd5d Move google-cloud-aiplatform to default requirements to support vertex AI llama 2025-01-03 15:20:59 -08:00
rkuo-danswer
b92c183022 re-prep user group deletion on the actual deletion (#3588)
* re-prep user group deletion on the actual deletion

* user group needs to be synced to be prepped

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2025-01-03 22:35:56 +00:00
skylares
c191e23256 Pagination Hook (#3494)
* Backend changes for pagination hook + Paginated users table

* Frontend changes for pagination hook

* Fix invited users endpoint

* Fix layout shift & add enter to submit user invites

* mypy

* Cleanup

* Resolve PR concerns & remove UserStatus

* Fix errors

---------

Co-authored-by: hagen-danswer <hagen@danswer.ai>
2025-01-03 14:32:55 -08:00
rkuo-danswer
66f9124135 Merge pull request #3584 from onyx-dot-app/bugfix/log_spacing
fix formatting
2025-01-03 00:43:36 -08:00
Richard Kuo
8f0fb70bbf fix formatting 2025-01-02 23:21:54 -08:00
rkuo-danswer
ef5e5c80bb Merge pull request #3577 from onyx-dot-app/bugfix/model_server_exception_logging
fix response logging
2025-01-02 23:08:46 -08:00
rkuo-danswer
03acb6587a Feature/model server logging (#3579)
* improve model server logging

* improve exception logging with provider/model names

* get everything into one log line

---------

Co-authored-by: Richard Kuo <rkuo@rkuo.com>
2025-01-03 01:40:29 +00:00
hagen-danswer
d1ec72b5e5 Reworked salesforce connector to use bulk api (#3581) 2025-01-02 18:09:02 -08:00
Weves
3b214133a8 Airtable improvement 2025-01-02 17:56:05 -08:00
rkuo-danswer
2232702e99 retry the individual delete's (#3580)
* retry the individual delete's

* need to raise inside the retry

* just use retry for now

---------

Co-authored-by: Richard Kuo <rkuo@rkuo.com>
2025-01-02 17:39:37 -08:00
hagen-danswer
8108ff0a4b Added logging for permissions upsert queue length 2025-01-02 17:39:01 -08:00
Richard Kuo
f64e78e986 fix response logging 2025-01-02 13:39:19 -08:00
Chris Weaver
08312a4394 Update Slack link in README.md 2025-01-01 10:03:59 -08:00
Weves
92add655e0 Slack fixes 2024-12-31 18:04:12 -08:00
Chris Weaver
d64464ca7c Add support for OAuth connectors that require user input (#3571)
* Add support for OAuth connectors that require user input

* Cleanup

* Fix linear

* Small re-naming

* Remove console.log
2024-12-31 18:03:33 -08:00
Yuhong Sun
ccd3983802 Linear OAuth Connector (#3570) 2024-12-31 16:11:09 -08:00
pablonyx
240f3e4fff Ensure users cannot modify their roles on main
Ensure users cannot modify their roles
2024-12-31 15:59:27 -05:00
pablonyx
1291b3d930 Add anonymous user to main
Anonymous user
2024-12-31 15:58:52 -05:00
rkuo-danswer
d05f1997b5 Merge pull request #3569 from onyx-dot-app/bugfix/alt_index
we didn't want to rename the alt index suffix, reverting
2024-12-31 12:39:00 -08:00
Chris Weaver
aa2e2a62b9 Small Egnyte tweaks (#3568) 2024-12-31 19:28:38 +00:00
Richard Kuo
174e5968f8 we didn't want to rename the alt index suffix, reverting 2024-12-31 11:28:11 -08:00
pablodanswer
1f27606e17 minor clean up 2024-12-31 13:04:02 -05:00
pablodanswer
60355b84c1 quick nits 2024-12-31 13:04:02 -05:00
pablodanswer
680ab9ea30 updated logic 2024-12-31 13:04:02 -05:00
pablodanswer
c2447dbb1c cosmetic updates 2024-12-31 13:04:02 -05:00
pablodanswer
52bad522f8 update for multi-tenant clarity 2024-12-31 13:04:02 -05:00
pablodanswer
63e5e58313 add anonymous user 2024-12-31 13:04:02 -05:00
rkuo-danswer
2643782e30 Merge pull request #3567 from onyx-dot-app/bugfix/revert_vespa
Revert "More efficient Vespa indexing (#3552)"
2024-12-31 09:47:00 -08:00
Richard Kuo
3eb72e5c1d Revert "More efficient Vespa indexing (#3552)"
This reverts commit 2783216781.
2024-12-31 09:40:23 -08:00
rkuo-danswer
9b65c23a7e Merge pull request #3566 from onyx-dot-app/bugfix/primary_task_timings
re-enable celery task execution logging in primary worker
2024-12-31 01:29:05 -08:00
Richard Kuo (Danswer)
b43a8e48c6 add some return types to distinguish when the task is actually performing work 2024-12-31 00:10:33 -08:00
Richard Kuo (Danswer)
1955c1d67b re-enable celery task execution logging in primary worker 2024-12-30 21:53:00 -08:00
Chris Weaver
3f92ed9d29 Airtable connector (#3564)
* Airtable connector

* Improvements

* improve

* Clean up test

* Add secrets

* Fix mypy + add access token

* Mock unstructured call

* Adjust comments

* Fix ID in test
2024-12-31 03:06:28 +00:00
Weves
618369f4a1 Small fix 2024-12-30 19:20:30 -08:00
pablonyx
2783216781 More efficient Vespa indexing (#3552)
---------

Co-authored-by: Chris Weaver <25087905+Weves@users.noreply.github.com>
2024-12-30 18:51:14 -08:00
rkuo-danswer
bec0f9fb23 permission sync in cloud and beat expiry adjustment (#3544)
* try fixing exception in cloud

* raise beat expiry ... 60 seconds might be starving certain tasks completely

* adjust expiry down to 10 min

* raise concurrency overflow for indexing worker.

* parent pid check

* fix comment

* fix parent pid check, also actually raise an exception from the task if the spawned task exit status is bad

* fix pid check

* some cleanup and task wait fixes

* review fixes

* comment some code so we don't change too many things at once

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
Co-authored-by: Richard Kuo <rkuo@rkuo.com>
2024-12-31 01:05:57 +00:00
pablodanswer
97a03e7fc8 nit 2024-12-29 21:07:12 -05:00
pablodanswer
8d6e8269b7 k 2024-12-29 21:07:12 -05:00
pablodanswer
9ce2c6c517 minor change 2024-12-29 21:07:12 -05:00
pablodanswer
2ad8bdbc65 k 2024-12-29 21:07:12 -05:00
rkuo-danswer
a83c9b40d5 Bugfix/oauth fix (#3507)
* old oauth file left behind

* fix function change that was lost in merge

* fix some testing vars

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-30 01:49:12 +00:00
Chris Weaver
340fab1375 Additional error handling + logging for google drive connector (#3563)
* Additional error handling + logging for google drive connector

* Fix mypy
2024-12-29 17:48:02 -08:00
hagen-danswer
3ec338307f Fixed indexing issues with Salesforce 2024-12-29 16:45:29 -08:00
pablonyx
27acd3387a Auth specific rate limiting (#3463)
* k

* v1

* fully functional

* finalize

* nit

* nit

* nit

* clean up with wrapper + comments

* k

* update

* minor clean
2024-12-29 23:34:23 +00:00
hagen-danswer
d14ef431a7 Improve Salesforce connector 2024-12-29 14:18:40 -08:00
pablonyx
9bffeb65af Eagerly load CCpair connectors (#3531)
* remove left over vim command

* eager loading

* Revert "remove left over vim command"

This reverts commit 184a134ae0.
2024-12-29 15:58:38 +00:00
Yuhong Sun
f4806da653 Fix Null Value in PG (#3559)
* k

* k

* k

* k

* k
2024-12-29 01:53:16 +00:00
pablonyx
e2700b2bbd Remove left over yaml errors (#3527)
* remove left over vim command

* additional misconfigurations

* ensure all regions updated
2024-12-29 01:45:07 +00:00
Yuhong Sun
fc81a3fb12 Zendesk Retries (#3558)
* k

* k

* k

* k
2024-12-28 23:51:49 +00:00
pablonyx
2203cfabea Prevent SSRF risk
Prevent SSRF risk
2024-12-28 15:25:57 -05:00
pablodanswer
f4050306d6 Prevent SSRF risk 2024-12-28 15:25:12 -05:00
Weves
2d960a477f Fix discourse connector 2024-12-24 12:43:10 -08:00
hagen-danswer
8837b8ea79 Curators can now update the curator relationship (#3536)
* Curators can now update the curator relationship

* mypy

* mypy

* whoops haha
2024-12-24 18:49:58 +00:00
hagen-danswer
3dfb214f73 Slackbot polish (#3547) 2024-12-24 16:19:15 +00:00
pablonyx
18d7262608 fix logo rendering (#3542) 2024-12-22 23:00:33 +00:00
pablonyx
09b879ee73 Ensure gmail works for personal accounts (#3541)
* Ensure gmail works for personal accounts

* nit

* minor update
2024-12-22 23:00:14 +00:00
rkuo-danswer
aaa668c963 Merge pull request #3534 from onyx-dot-app/bugfix/validate_ttl
raise activity timeout to one hour
2024-12-22 13:13:57 -08:00
pablonyx
edb877f4bc fix NUL character (#3540) 2024-12-21 23:30:25 +00:00
rkuo-danswer
eb369caefb log attempt id, log elapsed since task execution start, remove log spam (#3539)
* log attempt id, log elapsed since task execution start, remove log spam

* diagnostic lock logs

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-21 23:03:50 +00:00
Chris Weaver
b9567eabd7 Fix bedrock w/ access keys (#3538)
* Fix bedrock w/ access keys

* cleanup

* Remove extra #
2024-12-21 02:24:11 +00:00
Richard Kuo (Danswer)
13bbf67091 raise activity timeout to one hour 2024-12-20 16:18:35 -08:00
hagen-danswer
457a4c73f0 Made sure confluence connector recursive by page includes top level page (#3532)
* Made sure confluence connector by page includes top level page

* surface level change
2024-12-20 21:53:59 +00:00
rkuo-danswer
ce37688b5b allow limited user to create chat session (#3533)
Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-20 21:36:41 +00:00
pablonyx
4e2c90f4af Proper user deletion / organization leaving (#3460)
* Proper user deletion / organization leaving

* minor nit

* update

* udpate provisioning

* minor cleanup

* typing

* post rebase
2024-12-20 21:01:03 +00:00
pablonyx
513dd8a319 update toggling states (#3519) 2024-12-20 20:27:22 +00:00
hagen-danswer
71c5043832 Added filter to exclude attachments with unsupported file extensions (#3530)
* Added filter to exclude attachments with unsupported file extensions

* extension
2024-12-20 19:48:15 +00:00
pablonyx
64b6f15e95 AWS extraneous error fix (#3529)
* remove left over vim command

* aws fix

* k

* remove double
2024-12-20 19:31:04 +00:00
hagen-danswer
35022f5f09 Fix group table (#3523) 2024-12-20 17:51:26 +00:00
hagen-danswer
0d44014c16 Cleanup PR template to make it more concise (#3524) 2024-12-20 17:49:31 +00:00
Yuhong Sun
1b9e9f48fa Update README.md 2024-12-20 10:26:57 -08:00
Yuhong Sun
05fb5aa27b Update README.md 2024-12-20 10:25:34 -08:00
Yuhong Sun
3b645b72a3 Crop Logo Closer 2024-12-20 10:23:52 -08:00
Yuhong Sun
fe770b5c3a Fix Logo On DarkMode (#3525) 2024-12-20 10:15:48 -08:00
hagen-danswer
1eaf885f50 associating credentials with connectors is not considered editing (#3522)
* associating credentials with connectors is not considered editing

* formatting

* formatting

* Update credentials.py

---------

Co-authored-by: Yuhong Sun <yuhongsun96@gmail.com>
2024-12-20 17:36:25 +00:00
rkuo-danswer
a187aa508c use redis exclusively with active signal renewal in more places to perform indexing fence validation (#3517)
Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-20 06:54:00 +00:00
pablonyx
aa4bfa2a78 Forgot password feature (#3437)
* forgot password feature

* improved config

* nit

* nit
2024-12-20 04:53:37 +00:00
pablonyx
9011b8a139 Update citations in shared chat display (#3487)
* update shared chat display

* Change Copy

* fix icon

* remove secret!

---------

Co-authored-by: Yuhong Sun <yuhongsun96@gmail.com>
2024-12-20 01:48:29 +00:00
pablonyx
59c774353a Latex formatting (#3499) 2024-12-19 14:48:06 -08:00
pablonyx
b458d504af Sidebar Default Open (#3488) 2024-12-19 14:04:50 -08:00
Yuhong Sun
f83e7bfcd9 Fix Default CC Pair (#3513) 2024-12-19 09:43:12 -08:00
pablonyx
4d2e26ce4b MT Cloud Tracking Fix (#3514) 2024-12-19 08:47:02 -08:00
pablonyx
817fdc1f36 Ensure metadata overrides file contents (#3512)
* ensure metadata overrides file contents

* update more blocks
2024-12-19 04:44:24 +00:00
rkuo-danswer
e9b10e8b41 temporarily disabling validate indexing fences (#3502)
* temporarily disabling validate indexing fences

* add back a few startup checks in the cloud

* use common vespa client to perform health check

* log vespa url and try using http1 on light worker index methods

---------

Co-authored-by: Richard Kuo <rkuo@rkuo.com>
Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-19 01:32:09 +00:00
pablonyx
a0fa4adb60 Ensure password validation errors propagate (#3509)
* ensure password validation errors propagate

* copy update

* support o1

* improve typing

* Revert "support o1"

This reverts commit 9b7aa6008c.
2024-12-19 00:05:57 +00:00
pablonyx
ca9ba925bd Support o1 (#3510)
* support o1

* nit
2024-12-19 00:05:00 +00:00
rkuo-danswer
833cc5c97c Merge pull request #3497 from emerzon/new_icons
New model icons for LLM Picker
2024-12-18 16:38:31 -08:00
Chris Weaver
23ecf654ed Add support for custom LLM error messages (#3501)
* Add support for custom LLM error messages

* Fix mypy
2024-12-17 22:58:17 -08:00
pablonyx
ddc6a6d2b3 Wrap nits (#3496) 2024-12-17 18:03:38 -08:00
pablonyx
571c8ece32 Slack Workspace Alembic Updates
Old alembic migration + restore workspace
2024-12-17 16:28:59 -08:00
pablodanswer
884bdb4b01 old alembic migration + restore workspace 2024-12-17 16:28:05 -08:00
pablonyx
b3ecf0d59f Migrate user milestone logic (#3493) 2024-12-17 15:59:56 -08:00
Emerson Gomes
f56fda27c9 Add also Microsoft models 2024-12-17 16:37:52 -06:00
Emerson Gomes
b1e4d4ea8d Adds icons for Amazon, Meta and Mistral models (when proxied via LiteLLM) 2024-12-17 16:20:46 -06:00
pablonyx
8db6d49fe5 IAM Auth for RDS (#3479)
* k

* functional iam auth

* k

* k

* improve typing

* add deployment options

* cleanup

* quick clean up

* minor cleanup

* additional clarity for db session operations

* nit

* k

* k

* update configs

* docker compose spacing
2024-12-17 22:02:37 +00:00
pablonyx
28598694b1 Add delete all chats option (#2515)
* Add delete all chats option

* post rebase fixes

* final validation

* minor cleanup

* move up
2024-12-17 02:55:35 +00:00
Emerson Gomes
b5d0df90b9 Remove hardcoded root path for HF models 2024-12-16 19:03:15 -08:00
pablonyx
48be6338ec Update Hubpost tracking form submission (#3261)
* Update Hubpost tracking form submission

* minor cleanup

* validated

* validate

* nit

* k
2024-12-17 02:31:09 +00:00
pablonyx
ed9014f03d Use logotypes where feasible (#3478)
* Use logotypes where feasible

* quick nit

* minor cleanup
2024-12-17 02:13:45 +00:00
rkuo-danswer
2dd51230ed clear indexing fences with no celery tasks queued (#3482)
* allow beat tasks to expire. it isn't important that they all run

* validate fences are in a good state and cancel/fail them if not

* add function timings for important beat tasks

* optimize lookups, add lots of comments

* review changes

---------

Co-authored-by: Richard Kuo <rkuo@rkuo.com>
Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-17 00:55:58 +00:00
pablonyx
8b249cbe63 Proper display priority seeding (#3468)
* proper seeding

* k

* clean up
2024-12-17 00:19:45 +00:00
pablonyx
6b50f86cd2 Improved theming (#3204) 2024-12-16 22:24:32 +00:00
pablonyx
bd2805b6df Update llm override defaults (#3230)
* update llm override defaults

* post rebase fix
2024-12-16 22:18:21 +00:00
pablonyx
2847ab003e Prompting (#3372)
* auto generate start prompts

* post rebase clean up

* update for clarity
2024-12-16 21:34:43 +00:00
pablodanswer
1df6a506ec Revert "update pre-commit black version (#3250)"
This reverts commit d954914a0a.
2024-12-16 13:57:56 -08:00
pablonyx
f1541d1fbe Update default assistant to search for new users (#3317)
* update default assistant to search for new users

* update!
2024-12-16 21:15:33 +00:00
rkuo-danswer
dd0c4b64df errors in the summary row should be counting last_finished_status as reflected in the per connector rows (#3484)
Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-16 20:53:19 +00:00
pablonyx
788b3015bc fix single quote block in llm answer (#3139) 2024-12-16 20:37:47 +00:00
pablonyx
cbbf10f450 remove tenant id logs (#3063) 2024-12-16 20:24:09 +00:00
pablonyx
d954914a0a update pre-commit black version (#3250) 2024-12-16 20:04:42 +00:00
pablodanswer
bee74ac360 mark slack perm sync as flaky 2024-12-16 11:50:03 -08:00
pablonyx
29ef64272a Update chat provider values
Update chat provider values
2024-12-16 11:46:53 -08:00
pablodanswer
01bf6ee4b7 quick clean up 2024-12-16 11:43:34 -08:00
pablodanswer
0502417cbe update chat provider values 2024-12-16 11:39:25 -08:00
pablodanswer
d0483dd269 temporary vespa bump for tests 2024-12-15 21:41:21 -08:00
pablodanswer
eefa872d60 fix no space left on device for chromatic model server 2024-12-15 18:40:25 -08:00
pablonyx
3f3d4da611 do not include slackbot sessions when fetching chat sessions
do not include slackbot sessions when fetching `chat sessions`
2024-12-15 16:35:19 -08:00
pablodanswer
469068052e don't include slackbot sessions 2024-12-15 16:34:39 -08:00
pablonyx
9032b05606 Increase password requirements
Increase password requirements
2024-12-15 16:29:11 -08:00
pablodanswer
334bc6be8c Increase password requirements 2024-12-15 16:28:45 -08:00
Yuhong Sun
814f97c2c7 MT Cloud Monitoring (#3465) 2024-12-15 16:05:03 -08:00
pablodanswer
4f5a2b47c4 ensure integration tests build 2024-12-15 10:43:55 -08:00
pablodanswer
f545508268 Updated model server run-on config 2024-12-15 10:35:57 -08:00
pablonyx
590986ec65 Merge pull request #3476 from onyx-dot-app/fix_model_server_building
Update model server
2024-12-14 20:52:13 -08:00
pablodanswer
531bab5409 update model server 2024-12-14 20:51:03 -08:00
pablodanswer
29c44007c4 update model server 2024-12-14 20:49:05 -08:00
pablonyx
d388643a04 Cloud settings -> billing (#3469) 2024-12-14 18:10:50 -08:00
pablonyx
8a422683e3 Update folder logic (#3472) 2024-12-14 17:59:30 -08:00
pablonyx
ddc0230d68 align user dropdown in top right (#3473) 2024-12-14 17:25:11 -08:00
Yuhong Sun
6711e91dbf Seed Spacing (#3474) 2024-12-14 17:23:00 -08:00
pablodanswer
cff2346db5 Scale up model server 2024-12-14 17:19:28 -08:00
Yuhong Sun
8d3fad1f12 Change Default Assistant Description (#3470) 2024-12-14 17:00:08 -08:00
pablonyx
0c3dab8e8d Make doc count query more efficient (#3461) 2024-12-14 16:26:36 -08:00
Yuhong Sun
47735e2044 Rebrand Seeding Docs (#3467) 2024-12-14 16:08:13 -08:00
pablonyx
1eeab8c773 Update gmail test configuration
Update gmail test configuration
2024-12-14 14:53:45 -08:00
pablodanswer
e9b41bddc9 gmail configuration update 2024-12-14 14:53:02 -08:00
Yuhong Sun
73a86b9019 Reenable Seeding (#3464) 2024-12-14 12:26:08 -08:00
rkuo-danswer
12c426c87b Merge pull request #3458 from onyx-dot-app/bugfix/connector_tests
test changing back emails
2024-12-13 20:30:55 -08:00
Richard Kuo
06aeab6d59 fix scope typo 2024-12-13 20:21:10 -08:00
Richard Kuo
9b7e67004c Revert "test changing back emails"
This reverts commit 626ce74aa3.
2024-12-13 20:20:54 -08:00
Richard Kuo
626ce74aa3 test changing back emails 2024-12-13 18:18:01 -08:00
pablonyx
cec63465eb Improved invited users
Improved invited users
2024-12-13 17:22:32 -08:00
pablodanswer
5f4b31d322 k 2024-12-13 17:21:54 -08:00
pablonyx
ab5e515a5a Organize frontend tests
Organize frontend tests
2024-12-13 14:58:43 -08:00
pablodanswer
699a02902a nit 2024-12-13 12:50:02 -08:00
pablodanswer
c85157f734 k 2024-12-13 12:48:50 -08:00
pablodanswer
824844bf84 post rebase fix 2024-12-13 12:08:03 -08:00
pablodanswer
a6ab8a8da4 organize fe tests 2024-12-13 12:06:26 -08:00
pablodanswer
40719eb542 github workflow reference updates 2024-12-13 11:50:46 -08:00
pablonyx
e8c72f9e82 Minor Docker Reference Updates
Minor Docker Reference Updates
2024-12-13 11:50:21 -08:00
pablodanswer
0ba77963c4 update nit references 2024-12-13 11:49:27 -08:00
pablonyx
86f2892349 Merge pull request #3439 from onyx-dot-app/goodbye_danswer
Introducing Onyx!
2024-12-13 11:43:00 -08:00
pablodanswer
64f0ad8b26 fix drive tests (nit) 2024-12-13 11:36:39 -08:00
pablodanswer
616e997dad more fixes for connector tests 2024-12-13 11:25:24 -08:00
pablodanswer
614bd378bb fix connector tests 2024-12-13 10:54:00 -08:00
pablodanswer
7064c3d06f update legal references 2024-12-13 10:39:01 -08:00
pablodanswer
3bb9e4bff6 post rebase fix 2024-12-13 10:06:07 -08:00
pablodanswer
3fec7a6a30 post rebase fixes 2024-12-13 10:05:06 -08:00
pablonyx
a01a9b9a99 nit (#3441) 2024-12-13 18:04:46 +00:00
pablodanswer
21ec5ed795 welcome to onyx 2024-12-13 09:56:10 -08:00
hagen-danswer
54dcbfa288 made description optional for document sets (#3407)
* made description optional for document sets

* update document set optional

* update alembic migration head

---------

Co-authored-by: pablodanswer <pablo@danswer.ai>
2024-12-13 01:41:11 +00:00
pablonyx
c69b7fc941 Prevent SSRF risk (#3453)
* update con

* k
2024-12-12 23:41:35 +00:00
pablonyx
6722e88a7b Security (#3452)
* security policies

* k

* update config
2024-12-12 15:01:40 -08:00
pablonyx
5b5e1eb7c7 ensure reload (#3447) 2024-12-12 20:23:17 +00:00
Weves
87d97d13d5 Fixes issue on cloud with redirect URI during token fetching 2024-12-12 12:28:08 -08:00
rkuo-danswer
4ae3b48938 use redis completion signal to double check exit code (#3435)
Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-12 18:47:45 +00:00
rkuo-danswer
dee1a0ecd7 Feature/google drive oauth (#3365)
* first cut at slack oauth flow

* fix usage of hooks

* fix button spacing

* add additional error logging

* no dev redirect

* early cut at google drive oauth

* second pass

* switch to production uri's

* try handling oauth_interactive differently

* pass through client id and secret if uploaded

* fix call

* fix test

* temporarily disable check for testing

* Revert "temporarily disable check for testing"

This reverts commit 4b5a022a5f.

* support visibility in test

* missed file

---------

Co-authored-by: Richard Kuo <rkuo@rkuo.com>
Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-12 18:01:59 +00:00
pablonyx
ca172f3306 Merge pull request #3442 from onyx-dot-app/vespa_seeding_fix
Update initial seeding for latency requirements
2024-12-12 09:59:50 -08:00
pablodanswer
e5d0587efa pre-commit 2024-12-12 09:12:08 -08:00
pablonyx
a9516202fe update conditional (#3446) 2024-12-12 17:07:30 +00:00
Richard Kuo
d23fca96c4 reverse commit (fix later) 2024-12-11 22:19:10 -08:00
pablodanswer
a45724c899 run black 2024-12-11 19:18:06 -08:00
pablodanswer
34e250407a k 2024-12-11 19:14:10 -08:00
pablodanswer
046c0fbe3e update indexing 2024-12-11 19:08:05 -08:00
pablonyx
76595facef Merge pull request #3432 from onyx-dot-app/vercel_preview
Enable Vercel Preview
2024-12-11 18:55:14 -08:00
pablodanswer
af2d548766 k 2024-12-11 18:52:47 -08:00
Weves
7c29b1e028 add more egnyte failure logging 2024-12-11 18:19:55 -08:00
pablonyx
a52c821e78 Merge pull request #3436 from onyx-dot-app/cloud_improvements
cloud improvements
2024-12-11 17:06:06 -08:00
pablonyx
0770a587f1 remove slack workspace (#3394)
* remove slack workspace

* update client tokens

* fix up

* clean up docs

* fix up tests
2024-12-12 01:01:43 +00:00
hagen-danswer
748b79b0ef Added text for empty table and cascade delete for slack bot deletion (#3390)
* fixed fk issue for slack bot deletion

* Added text for empty table and cascade delete for slack bot deletion
2024-12-12 01:00:32 +00:00
pablonyx
9cacb373ef let users specify resourcing caps (#3403)
* let users specify resourcing caps

* functioanl resource limits

* improve defaults

* k

* update

* update comment + refer to proper resource

* self nit

* update var names
2024-12-12 00:59:41 +00:00
pablodanswer
21967d4b6f cloud improvements 2024-12-11 16:48:00 -08:00
pablodanswer
f5d638161b k 2024-12-11 15:35:44 -08:00
pablodanswer
0b5013b47d k 2024-12-11 15:34:26 -08:00
pablodanswer
1b846fbf06 update config 2024-12-11 15:17:11 -08:00
hagen-danswer
cae8a131a2 Made frontend conditional check for source (#3434) 2024-12-11 22:46:32 +00:00
pablonyx
72b4e8e9fe Clean citation cards (#3396)
* seed

* initial steps

* clean up

* fully clickable
2024-12-11 21:37:11 +00:00
pablonyx
c04e2f14d9 remove double x (#3387) 2024-12-11 21:36:58 +00:00
pablonyx
b40a12d5d7 clean up cursor pointers (#3385)
* update

* nit
2024-12-11 21:36:43 +00:00
pablonyx
5e7d454ebe Merge pull request #3433 from onyx-dot-app/silence_integration
Silence Slack Permission Sync test flakiness
2024-12-11 13:49:52 -08:00
pablodanswer
238509c536 silence 2024-12-11 13:48:37 -08:00
pablodanswer
d7f8cf8f18 testing 2024-12-11 13:36:10 -08:00
pablodanswer
5d810d373e k 2024-12-11 13:32:09 -08:00
joachim-danswer
9455576078 Mismatch issue of Documents shown and Citation number in text fix (#3421)
* Mismatch issue of Documents shown and Citation number in text fix

When document order presented to LLM differs from order shown to user, wrong doc numbers are cited.

Fix:
 - SearchTool.get_search_result  returns now final and initial ranking
 - initial ranking is passed through a few objects and used for replacement in citation processing

Notes:
 - the citation_num in the CitationInfo() object has not been changed.

* PR fixes

 - linting
 - removed erroneous tab
 - added a substitution test case
 - adjusted original citation extraction use case

* Included a key test and

* Fixed extra spaces

* Updated test documentation

Updated:
 - test_citation_substitution (changed description)
 - test_citation_processing (removed data only relevant for the substitution)
2024-12-11 19:58:24 +00:00
rkuo-danswer
71421bb782 better handling around index attempts that don't exist and remove unn… (#3417)
* better handling around index attempts that don't exist and remove unnecessary index attempt deletions

* don't delete index attempts, just update them

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-11 19:32:04 +00:00
pablonyx
b88cb388b7 Faster api hashing (#3423)
* migrate hashing to run faster v1

* k
2024-12-11 19:30:05 +00:00
Wendi
639986001f Fix bug (title overflow) (#3431) 2024-12-11 12:09:44 -08:00
pablonyx
e7a7e78969 clean up csv prompt + frontend (#3393)
* clean up csv prompt + frontend

* nit

* nit

* detect uploading

* upload
2024-12-11 19:10:34 +00:00
rkuo-danswer
e255ff7d23 editable refresh and prune for connectors (#3406)
* editable refresh and prune for connectors

* add extra validations on pruning/refresh frequency

* fix validation

* fix icon usage

* fix TextFormField error formatting

* nit

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
Co-authored-by: pablodanswer <pablo@danswer.ai>
2024-12-11 19:04:09 +00:00
pablonyx
1be2502112 finalize (#3398)
Co-authored-by: hagen-danswer <hagen@danswer.ai>
2024-12-11 18:52:20 +00:00
pablonyx
f2bedb8fdd Borders (#3388)
* remove double x

* incorporate base default padding for modals
2024-12-11 18:47:26 +00:00
pablonyx
637404f482 Connector page lists (pending feedback) (#3415)
* v1 (pending feedback)

* nits

* nit
2024-12-11 18:45:27 +00:00
pablonyx
daae146920 recognize updates (#3397) 2024-12-11 18:19:00 +00:00
pablonyx
d95959fb41 base role setting fix (#3381)
* base role setting fix

* update user tables

* finalize

* minor cleanup

* fix chromatic
2024-12-11 18:09:47 +00:00
rkuo-danswer
c667d28e7a update helm charts for onyx-dot-app rebrand (#3412)
* update helm charts for onyx-dot-app rebrand

* fix helm chart testing config

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-11 18:08:39 +00:00
pablonyx
9e0b482f47 k (#3399) 2024-12-11 18:05:39 +00:00
pablonyx
fa84eb657f cleaner citations (#3389) 2024-12-11 17:36:15 +00:00
pablonyx
264df3441b Various clean ups (#3413)
* tbd

* minor

* prettify

* update sidebar values
2024-12-11 17:19:14 +00:00
pablonyx
b9bad8b7a0 fix wikipedia icon (#3395) 2024-12-11 09:03:29 -08:00
pablonyx
600ebb6432 remove doc sets (#3400) 2024-12-11 16:31:14 +00:00
pablonyx
09fe8ea868 improved display - no odd cutoffs (#3401) 2024-12-11 16:09:19 +00:00
evan-danswer
ad6be03b4d centered score in feedbac panel (#3426) 2024-12-11 08:19:53 -08:00
rkuo-danswer
65d2511216 change text and formatting to guide users away from thinking "Back to… (#3382)
* change text and formatting to guide users away from thinking "Back to Danswer" is a back button

* regular text color and different icon

---------

Co-authored-by: Richard Kuo (Danswer) <rkuo@onyx.app>
2024-12-11 03:31:27 +00:00
Weves
113bf19c65 Remove dev-only check 2024-12-10 19:04:21 -08:00
Yuhong Sun
6026536110 Model Server Async (#3386)
* need-verify

* fix some lib calls

* k

* tests

* k

* k

* k

* Address the comments

* fix comment
2024-12-11 01:33:44 +00:00
Weves
056b671cd4 Small tweaks to get Egynte to work on our cloud 2024-12-10 17:43:46 -08:00
pablonyx
8d83ae2ee8 fix linear (#3402) 2024-12-11 00:45:06 +00:00
Yuhong Sun
ca988f5c5f Max File Size (#3422)
* k

* k

* k
2024-12-11 00:06:47 +00:00
Chris Weaver
4e4214b82c Egnyte connector (#3420) 2024-12-10 16:07:33 -08:00
Yuhong Sun
fe83f676df k (#3404) 2024-12-10 23:27:48 +00:00
hagen-danswer
6d6e12119b made external group emails lowercase (#3410) 2024-12-10 22:08:00 +00:00
pablonyx
1f2b7cb9c8 strip text for slackbot (#3416)
* stripe text for slackbot

* k
2024-12-10 21:42:35 +00:00
pablonyx
878a189011 delete input prompts (#3380)
* delete input prompts

* nit

* remove vestigial test

* nit
2024-12-10 21:36:40 +00:00
hagen-danswer
48c10271c2 fixed ephemeral slackbot messages (#3409) 2024-12-10 18:00:34 +00:00
evan-danswer
c6a79d847e fix typo (#3408)
expliticly -> explicitly
2024-12-10 16:44:42 +00:00
hagen-danswer
1bc3f8b96f Revert "Fixed ephemeral slackbot messages"
This reverts commit 7f6a6944d6.
2024-12-10 08:18:31 -08:00
hagen-danswer
7f6a6944d6 Fixed ephemeral slackbot messages 2024-12-10 07:57:28 -08:00
Weves
06f4146597 Bump litellm to support Nova models from AWS 2024-12-09 21:19:11 -08:00
hagen-danswer
7ea73d5a5a Temp slackbot url error Fix (#3392) 2024-12-09 18:34:38 -08:00
Weves
30dfe6dcb4 Add better vertex support + LLM form cleanup 2024-12-09 13:44:44 -08:00
Yuhong Sun
dc5d5dfe05 README Update (#3383) 2024-12-09 13:17:53 -08:00
pablonyx
0746e0be5b unify toggling (#3378) 2024-12-09 19:48:06 +00:00
Chris Weaver
970320bd49 Persona / prompt hardening (#3375)
* Persona / prompt hardening

* fix it
2024-12-09 03:39:59 +00:00
Chris Weaver
4a7bd5578e Fix Confluence perm sync for cloud users (#3374) 2024-12-09 01:41:30 +00:00
Chris Weaver
874b098a4b Add more logging + retries to teams connector (#3369) 2024-12-08 00:56:34 +00:00
pablodanswer
ce18b63eea hide oauth sources (#3368) 2024-12-07 23:57:37 +00:00
Yuhong Sun
7a919c3589 Dev Version Niceness 2024-12-07 15:10:13 -08:00
rkuo-danswer
631bac4432 Bugfix/log exit code (#3362)
* log the exit code of the spawned task

* exitcode can be negative

* mypy fixes
2024-12-06 22:32:59 +00:00
hagen-danswer
53428f6e9c More logging/fixes (#3364)
* More logging for external group syncing

* Fixed edge case where some spaces were not being fetched

* made refresh frequency for confluence syncs configurable

* clarity
2024-12-06 21:56:29 +00:00
pablodanswer
53b3dcbace fix slackbot channel config nullable (#3363)
* fix slackbot

* nit
2024-12-06 21:24:36 +00:00
rkuo-danswer
7a3c06c2d2 first cut at slack oauth flow (#3323)
* first cut at slack oauth flow

* fix usage of hooks

* fix button spacing

* add additional error logging

* no dev redirect

* cleanup

* comment work in progress

* move some stuff to ee, add some playwright tests for the oauth callback edge cases

* fix ee, fix test name

* fix tests

* code review fixes
2024-12-06 19:55:21 +00:00
pablodanswer
7a0d823c89 Improved file handling (#3353)
* update props

* update documents

* nit

* update chat processing

* k

* k

* nit

* minor nit

* minor nits

* k

* nits
2024-12-06 19:16:54 +00:00
Yuhong Sun
db69e445d6 k (#3358) 2024-12-06 18:08:44 +00:00
Weves
18e63889b7 Change default log level back to info 2024-12-06 10:07:14 -08:00
Weves
738e60c8ed Increase vespa attempts on startup 2024-12-06 09:46:33 -08:00
hagen-danswer
8aec873e66 Merge pull request #3359 from danswer-ai/conf-logging-filter
Added filter to slim connector and logging for space permissions
2024-12-06 09:03:07 -08:00
hagen-danswer
7c57dde8ab fixed test 2024-12-06 08:33:12 -08:00
hagen-danswer
f30adab853 Merge remote-tracking branch 'origin/main' into conf-logging-filter 2024-12-06 08:30:07 -08:00
hagen-danswer
601687a522 Add test for Confluence permissions 2024-12-06 08:28:42 -08:00
hagen-danswer
350cf407c9 explicitly set page and attachment restrictions and space keys 2024-12-06 08:12:07 -08:00
hagen-danswer
32ec4efc7a tygod for tests 2024-12-06 08:03:34 -08:00
hagen-danswer
7c6981e052 Added filter to slim connector and logging for space permissions 2024-12-06 07:55:54 -08:00
Yuhong Sun
c50cd20156 Fix SlackBot Page Bugs (#3354) 2024-12-05 13:17:04 -08:00
hagen-danswer
14772dee71 Add persona stats (#3282)
* Added a chart to display persona message stats

* polish

* k

* hope this works

* cleanup
2024-12-05 17:15:56 +00:00
pablodanswer
c81e704c95 various niceties (#3348) 2024-12-05 17:12:52 +00:00
Chris Weaver
3266ef6321 Improve chat page performance (#3347)
* Simplify /manage/indexing-status

* Rename endpoint
2024-12-04 20:28:30 -08:00
pablodanswer
c89b98b4f2 update email invites (#3349) 2024-12-05 03:29:07 +00:00
rkuo-danswer
e70e0ab859 Merge pull request #3346 from danswer-ai/bugfix/chromatic-tests-2
Bugfix/chromatic tests 2
2024-12-04 19:44:05 -08:00
Richard Kuo (Danswer)
69b6e9321e Merge branch 'main' of https://github.com/danswer-ai/danswer into bugfix/chromatic-tests-2
# Conflicts:
#	web/tests/e2e/home.spec.ts
2024-12-04 19:10:25 -08:00
Chris Weaver
7e53af18b6 Add b64 image support for image generation (#3342)
* Add b64 image support

* Fix

* enhance

* Fix mypy

* Fix imports
2024-12-05 02:24:54 +00:00
Richard Kuo (Danswer)
b9eb1ca2ba wait for whole placeholder string 2024-12-04 18:23:06 -08:00
rkuo-danswer
91d44c83d2 fixing chromatic tests (#3344)
* wait for the page to load

* fix up tests

* make sure "Initializing Danswer" is gone
2024-12-05 02:19:43 +00:00
Richard Kuo (Danswer)
4dbc6bb4d1 make sure "Initializing Danswer" is gone 2024-12-04 17:49:59 -08:00
Richard Kuo (Danswer)
4b6a4c6bbf fix up tests 2024-12-04 17:19:16 -08:00
pablodanswer
fd1999454a ensure we can order by doc id (#3343) 2024-12-05 01:10:37 +00:00
Richard Kuo (Danswer)
0a35422d1d wait for the page to load 2024-12-04 16:47:42 -08:00
pablodanswer
69b99056b2 Redirect to chat (#3341)
* k

* nit
2024-12-05 00:08:52 +00:00
Yuhong Sun
2a55696545 Move Answer (#3339) 2024-12-04 16:30:47 -08:00
1295 changed files with 59856 additions and 24286 deletions

View File

@@ -6,24 +6,6 @@
[Describe the tests you ran to verify your changes]
## Accepted Risk (provide if relevant)
N/A
## Related Issue(s) (provide if relevant)
N/A
## Mental Checklist:
- All of the automated tests pass
- All PR comments are addressed and marked resolved
- If there are migrations, they have been rebased to latest main
- If there are new dependencies, they are added to the requirements
- If there are new environment variables, they are added to all of the deployment methods
- If there are new APIs that don't require auth, they are added to PUBLIC_ENDPOINT_SPECS
- Docker images build and basic functionalities work
- Author has done a final read through of the PR right before merge
## Backporting (check the box to trigger backport action)
Note: You have to check that the action passes, otherwise resolve the conflicts manually and tag the patches.
- [ ] This PR should be backported (make sure to check that the backport attempt succeeds)

View File

@@ -6,7 +6,7 @@ on:
- "*"
env:
REGISTRY_IMAGE: ${{ contains(github.ref_name, 'cloud') && 'danswer/danswer-backend-cloud' || 'danswer/danswer-backend' }}
REGISTRY_IMAGE: ${{ contains(github.ref_name, 'cloud') && 'onyxdotapp/onyx-backend-cloud' || 'onyxdotapp/onyx-backend' }}
LATEST_TAG: ${{ contains(github.ref_name, 'latest') }}
jobs:
@@ -44,7 +44,7 @@ jobs:
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}
${{ env.LATEST_TAG == 'true' && format('{0}:latest', env.REGISTRY_IMAGE) || '' }}
build-args: |
DANSWER_VERSION=${{ github.ref_name }}
ONYX_VERSION=${{ github.ref_name }}
# trivy has their own rate limiting issues causing this action to flake
# we worked around it by hardcoding to different db repos in env
@@ -57,7 +57,7 @@ jobs:
TRIVY_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-db:2"
TRIVY_JAVA_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-java-db:1"
with:
# To run locally: trivy image --severity HIGH,CRITICAL danswer/danswer-backend
# To run locally: trivy image --severity HIGH,CRITICAL onyxdotapp/onyx-backend
image-ref: docker.io/${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}
severity: "CRITICAL,HIGH"
trivyignores: ./backend/.trivyignore

View File

@@ -7,7 +7,7 @@ on:
- "*"
env:
REGISTRY_IMAGE: danswer/danswer-web-server-cloud
REGISTRY_IMAGE: onyxdotapp/onyx-web-server-cloud
LATEST_TAG: ${{ contains(github.ref_name, 'latest') }}
jobs:
@@ -60,12 +60,13 @@ jobs:
platforms: ${{ matrix.platform }}
push: true
build-args: |
DANSWER_VERSION=${{ github.ref_name }}
ONYX_VERSION=${{ github.ref_name }}
NEXT_PUBLIC_CLOUD_ENABLED=true
NEXT_PUBLIC_POSTHOG_KEY=${{ secrets.POSTHOG_KEY }}
NEXT_PUBLIC_POSTHOG_HOST=${{ secrets.POSTHOG_HOST }}
NEXT_PUBLIC_SENTRY_DSN=${{ secrets.SENTRY_DSN }}
NEXT_PUBLIC_GTM_ENABLED=true
NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=true
# needed due to weird interactions with the builds for different platforms
no-cache: true
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -6,20 +6,31 @@ on:
- "*"
env:
REGISTRY_IMAGE: ${{ contains(github.ref_name, 'cloud') && 'danswer/danswer-model-server-cloud' || 'danswer/danswer-model-server' }}
REGISTRY_IMAGE: ${{ contains(github.ref_name, 'cloud') && 'onyxdotapp/onyx-model-server-cloud' || 'onyxdotapp/onyx-model-server' }}
LATEST_TAG: ${{ contains(github.ref_name, 'latest') }}
DOCKER_BUILDKIT: 1
BUILDKIT_PROGRESS: plain
jobs:
build-and-push:
# See https://runs-on.com/runners/linux/
runs-on: [runs-on, runner=8cpu-linux-x64, "run-id=${{ github.run_id }}"]
build-amd64:
runs-on:
[runs-on, runner=8cpu-linux-x64, "run-id=${{ github.run_id }}-amd64"]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: System Info
run: |
df -h
free -h
docker system prune -af --volumes
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: |
image=moby/buildkit:latest
network=host
- name: Login to Docker Hub
uses: docker/login-action@v3
@@ -27,29 +38,86 @@ jobs:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Model Server Image Docker Build and Push
- name: Build and Push AMD64
uses: docker/build-push-action@v5
with:
context: ./backend
file: ./backend/Dockerfile.model_server
platforms: linux/amd64,linux/arm64
platforms: linux/amd64
push: true
tags: |
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}
${{ env.LATEST_TAG == 'true' && format('{0}:latest', env.REGISTRY_IMAGE) || '' }}
tags: ${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-amd64
build-args: |
DANSWER_VERSION=${{ github.ref_name }}
outputs: type=registry
provenance: false
build-arm64:
runs-on:
[runs-on, runner=8cpu-linux-x64, "run-id=${{ github.run_id }}-arm64"]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: System Info
run: |
df -h
free -h
docker system prune -af --volumes
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: |
image=moby/buildkit:latest
network=host
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Build and Push ARM64
uses: docker/build-push-action@v5
with:
context: ./backend
file: ./backend/Dockerfile.model_server
platforms: linux/arm64
push: true
tags: ${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-arm64
build-args: |
DANSWER_VERSION=${{ github.ref_name }}
outputs: type=registry
provenance: false
merge-and-scan:
needs: [build-amd64, build-arm64]
runs-on: ubuntu-latest
steps:
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Create and Push Multi-arch Manifest
run: |
docker buildx create --use
docker buildx imagetools create -t ${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }} \
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-amd64 \
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-arm64
if [[ "${{ env.LATEST_TAG }}" == "true" ]]; then
docker buildx imagetools create -t ${{ env.REGISTRY_IMAGE }}:latest \
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-amd64 \
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-arm64
fi
# trivy has their own rate limiting issues causing this action to flake
# we worked around it by hardcoding to different db repos in env
# can re-enable when they figure it out
# https://github.com/aquasecurity/trivy/discussions/7538
# https://github.com/aquasecurity/trivy-action/issues/389
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
env:
TRIVY_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-db:2"
TRIVY_JAVA_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-java-db:1"
with:
image-ref: docker.io/danswer/danswer-model-server:${{ github.ref_name }}
image-ref: docker.io/${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}
severity: "CRITICAL,HIGH"
timeout: "10m"

View File

@@ -3,12 +3,12 @@ name: Build and Push Web Image on Tag
on:
push:
tags:
- '*'
- "*"
env:
REGISTRY_IMAGE: danswer/danswer-web-server
REGISTRY_IMAGE: onyxdotapp/onyx-web-server
LATEST_TAG: ${{ contains(github.ref_name, 'latest') }}
jobs:
build:
runs-on:
@@ -27,11 +27,11 @@ jobs:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout
uses: actions/checkout@v4
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
@@ -40,16 +40,16 @@ jobs:
tags: |
type=raw,value=${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}
type=raw,value=${{ env.LATEST_TAG == 'true' && format('{0}:latest', env.REGISTRY_IMAGE) || '' }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Build and push by digest
id: build
uses: docker/build-push-action@v5
@@ -59,18 +59,18 @@ jobs:
platforms: ${{ matrix.platform }}
push: true
build-args: |
DANSWER_VERSION=${{ github.ref_name }}
# needed due to weird interactions with the builds for different platforms
ONYX_VERSION=${{ github.ref_name }}
# needed due to weird interactions with the builds for different platforms
no-cache: true
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true
- name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
@@ -90,42 +90,42 @@ jobs:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Create manifest list and push
working-directory: /tmp/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
$(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
- name: Inspect image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
# trivy has their own rate limiting issues causing this action to flake
# we worked around it by hardcoding to different db repos in env
# can re-enable when they figure it out
# https://github.com/aquasecurity/trivy/discussions/7538
# https://github.com/aquasecurity/trivy-action/issues/389
# trivy has their own rate limiting issues causing this action to flake
# we worked around it by hardcoding to different db repos in env
# can re-enable when they figure it out
# https://github.com/aquasecurity/trivy/discussions/7538
# https://github.com/aquasecurity/trivy-action/issues/389
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
env:
TRIVY_DB_REPOSITORY: 'public.ecr.aws/aquasecurity/trivy-db:2'
TRIVY_JAVA_DB_REPOSITORY: 'public.ecr.aws/aquasecurity/trivy-java-db:1'
TRIVY_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-db:2"
TRIVY_JAVA_DB_REPOSITORY: "public.ecr.aws/aquasecurity/trivy-java-db:1"
with:
image-ref: docker.io/${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}
severity: 'CRITICAL,HIGH'
severity: "CRITICAL,HIGH"

View File

@@ -7,31 +7,31 @@ on:
workflow_dispatch:
inputs:
version:
description: 'The version (ie v0.0.1) to tag as latest'
description: "The version (ie v0.0.1) to tag as latest"
required: true
jobs:
tag:
# See https://runs-on.com/runners/linux/
# use a lower powered instance since this just does i/o to docker hub
runs-on: [runs-on,runner=2cpu-linux-x64,"run-id=${{ github.run_id }}"]
runs-on: [runs-on, runner=2cpu-linux-x64, "run-id=${{ github.run_id }}"]
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Enable Docker CLI experimental features
run: echo "DOCKER_CLI_EXPERIMENTAL=enabled" >> $GITHUB_ENV
- name: Enable Docker CLI experimental features
run: echo "DOCKER_CLI_EXPERIMENTAL=enabled" >> $GITHUB_ENV
- name: Pull, Tag and Push Web Server Image
run: |
docker buildx imagetools create -t danswer/danswer-web-server:latest danswer/danswer-web-server:${{ github.event.inputs.version }}
- name: Pull, Tag and Push Web Server Image
run: |
docker buildx imagetools create -t onyxdotapp/onyx-web-server:latest onyxdotapp/onyx-web-server:${{ github.event.inputs.version }}
- name: Pull, Tag and Push API Server Image
run: |
docker buildx imagetools create -t danswer/danswer-backend:latest danswer/danswer-backend:${{ github.event.inputs.version }}
- name: Pull, Tag and Push API Server Image
run: |
docker buildx imagetools create -t onyxdotapp/onyx-backend:latest onyxdotapp/onyx-backend:${{ github.event.inputs.version }}

View File

@@ -8,43 +8,42 @@ on:
workflow_dispatch:
inputs:
hotfix_commit:
description: 'Hotfix commit hash'
description: "Hotfix commit hash"
required: true
hotfix_suffix:
description: 'Hotfix branch suffix (e.g. hotfix/v0.8-{suffix})'
description: "Hotfix branch suffix (e.g. hotfix/v0.8-{suffix})"
required: true
release_branch_pattern:
description: 'Release branch pattern (regex)'
description: "Release branch pattern (regex)"
required: true
default: 'release/.*'
default: "release/.*"
auto_merge:
description: 'Automatically merge the hotfix PRs'
description: "Automatically merge the hotfix PRs"
required: true
type: choice
default: 'true'
default: "true"
options:
- true
- false
jobs:
hotfix_release_branches:
permissions: write-all
# See https://runs-on.com/runners/linux/
# use a lower powered instance since this just does i/o to docker hub
runs-on: [runs-on,runner=2cpu-linux-x64,"run-id=${{ github.run_id }}"]
runs-on: [runs-on, runner=2cpu-linux-x64, "run-id=${{ github.run_id }}"]
steps:
# needs RKUO_DEPLOY_KEY for write access to merge PR's
- name: Checkout Repository
uses: actions/checkout@v4
with:
ssh-key: "${{ secrets.RKUO_DEPLOY_KEY }}"
fetch-depth: 0
- name: Set up Git user
run: |
git config user.name "Richard Kuo [bot]"
git config user.email "rkuo[bot]@danswer.ai"
git config user.email "rkuo[bot]@onyx.app"
- name: Fetch All Branches
run: |
@@ -62,10 +61,10 @@ jobs:
echo "No release branches found matching pattern '${{ github.event.inputs.release_branch_pattern }}'."
exit 1
fi
echo "Found release branches:"
echo "$BRANCHES"
# Join the branches into a single line separated by commas
BRANCHES_JOINED=$(echo "$BRANCHES" | tr '\n' ',' | sed 's/,$//')
@@ -169,4 +168,4 @@ jobs:
echo "Failed to merge pull request #$PR_NUMBER."
fi
fi
done
done

View File

@@ -4,7 +4,7 @@ name: Backport on Merge
on:
pull_request:
types: [closed] # Later we check for merge so only PRs that go in can get backported
types: [closed] # Later we check for merge so only PRs that go in can get backported
permissions:
contents: write
@@ -26,9 +26,9 @@ jobs:
- name: Set up Git user
run: |
git config user.name "Richard Kuo [bot]"
git config user.email "rkuo[bot]@danswer.ai"
git config user.email "rkuo[bot]@onyx.app"
git fetch --prune
- name: Check for Backport Checkbox
id: checkbox-check
run: |
@@ -51,14 +51,14 @@ jobs:
# Fetch latest tags for beta and stable
LATEST_BETA_TAG=$(git tag -l "v[0-9]*.[0-9]*.[0-9]*-beta.[0-9]*" | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+-beta\.[0-9]+$" | grep -v -- "-cloud" | sort -Vr | head -n 1)
LATEST_STABLE_TAG=$(git tag -l "v[0-9]*.[0-9]*.[0-9]*" | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$" | sort -Vr | head -n 1)
# Handle case where no beta tags exist
if [[ -z "$LATEST_BETA_TAG" ]]; then
NEW_BETA_TAG="v1.0.0-beta.1"
else
NEW_BETA_TAG=$(echo $LATEST_BETA_TAG | awk -F '[.-]' '{print $1 "." $2 "." $3 "-beta." ($NF+1)}')
fi
# Increment latest stable tag
NEW_STABLE_TAG=$(echo $LATEST_STABLE_TAG | awk -F '.' '{print $1 "." $2 "." ($3+1)}')
echo "latest_beta_tag=$LATEST_BETA_TAG" >> $GITHUB_OUTPUT
@@ -80,10 +80,10 @@ jobs:
run: |
set -e
echo "Backporting to beta ${{ steps.list-branches.outputs.beta }} and stable ${{ steps.list-branches.outputs.stable }}"
# Echo the merge commit SHA
echo "Merge commit SHA: ${{ github.event.pull_request.merge_commit_sha }}"
# Fetch all history for all branches and tags
git fetch --prune
@@ -98,7 +98,7 @@ jobs:
echo "Cherry-pick to beta failed due to conflicts."
exit 1
}
# Create new beta branch/tag
git tag ${{ steps.list-branches.outputs.new_beta_tag }}
# Push the changes and tag to the beta branch using PAT
@@ -110,13 +110,13 @@ jobs:
echo "Last 5 commits on stable branch:"
git log -n 5 --pretty=format:"%H"
echo "" # Newline for formatting
# Cherry-pick the merge commit from the merged PR
git cherry-pick -m 1 ${{ github.event.pull_request.merge_commit_sha }} || {
echo "Cherry-pick to stable failed due to conflicts."
exit 1
}
# Create new stable branch/tag
git tag ${{ steps.list-branches.outputs.new_stable_tag }}
# Push the changes and tag to the stable branch using PAT

View File

@@ -14,18 +14,24 @@ jobs:
name: Playwright Tests
# See https://runs-on.com/runners/linux/
runs-on: [runs-on,runner=8cpu-linux-x64,ram=16,"run-id=${{ github.run_id }}"]
runs-on:
[
runs-on,
runner=32cpu-linux-x64,
disk=large,
"run-id=${{ github.run_id }}",
]
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: 'pip'
python-version: "3.11"
cache: "pip"
cache-dependency-path: |
backend/requirements/default.txt
backend/requirements/dev.txt
@@ -35,7 +41,7 @@ jobs:
pip install --retries 5 --timeout 30 -r backend/requirements/default.txt
pip install --retries 5 --timeout 30 -r backend/requirements/dev.txt
pip install --retries 5 --timeout 30 -r backend/requirements/model_server.txt
- name: Setup node
uses: actions/setup-node@v4
with:
@@ -48,7 +54,7 @@ jobs:
- name: Install playwright browsers
working-directory: ./web
run: npx playwright install --with-deps
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@@ -60,13 +66,13 @@ jobs:
# tag every docker image with "test" so that we can spin up the correct set
# of images during testing
# we use the runs-on cache for docker builds
# in conjunction with runs-on runners, it has better speed and unlimited caching
# https://runs-on.com/caching/s3-cache-for-github-actions/
# https://runs-on.com/caching/docker/
# https://github.com/moby/buildkit#s3-cache-experimental
# images are built and run locally for testing purposes. Not pushed.
- name: Build Web Docker image
@@ -75,7 +81,7 @@ jobs:
context: ./web
file: ./web/Dockerfile
platforms: linux/amd64
tags: danswer/danswer-web-server:test
tags: onyxdotapp/onyx-web-server:test
push: false
load: true
cache-from: type=s3,prefix=cache/${{ github.repository }}/integration-tests/web-server/,region=${{ env.RUNS_ON_AWS_REGION }},bucket=${{ env.RUNS_ON_S3_BUCKET_CACHE }}
@@ -87,7 +93,7 @@ jobs:
context: ./backend
file: ./backend/Dockerfile
platforms: linux/amd64
tags: danswer/danswer-backend:test
tags: onyxdotapp/onyx-backend:test
push: false
load: true
cache-from: type=s3,prefix=cache/${{ github.repository }}/integration-tests/backend/,region=${{ env.RUNS_ON_AWS_REGION }},bucket=${{ env.RUNS_ON_S3_BUCKET_CACHE }}
@@ -99,7 +105,7 @@ jobs:
context: ./backend
file: ./backend/Dockerfile.model_server
platforms: linux/amd64
tags: danswer/danswer-model-server:test
tags: onyxdotapp/onyx-model-server:test
push: false
load: true
cache-from: type=s3,prefix=cache/${{ github.repository }}/integration-tests/model-server/,region=${{ env.RUNS_ON_AWS_REGION }},bucket=${{ env.RUNS_ON_S3_BUCKET_CACHE }}
@@ -110,6 +116,7 @@ jobs:
cd deployment/docker_compose
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true \
AUTH_TYPE=basic \
GEN_AI_API_KEY=${{ secrets.OPENAI_API_KEY }} \
REQUIRE_EMAIL_VERIFICATION=false \
DISABLE_TELEMETRY=true \
IMAGE_TAG=test \
@@ -119,12 +126,12 @@ jobs:
- name: Wait for service to be ready
run: |
echo "Starting wait-for-service script..."
docker logs -f danswer-stack-api_server-1 &
start_time=$(date +%s)
timeout=300 # 5 minutes in seconds
while true; do
current_time=$(date +%s)
elapsed_time=$((current_time - start_time))
@@ -152,7 +159,7 @@ jobs:
- name: Run pytest playwright test init
working-directory: ./backend
env:
env:
PYTEST_IGNORE_SKIP: true
run: pytest -s tests/integration/tests/playwright/test_playwright.py
@@ -168,7 +175,7 @@ jobs:
name: test-results
path: ./web/test-results
retention-days: 30
# save before stopping the containers so the logs can be captured
- name: Save Docker logs
if: success() || failure()
@@ -176,7 +183,7 @@ jobs:
cd deployment/docker_compose
docker compose -f docker-compose.dev.yml -p danswer-stack logs > docker-compose.log
mv docker-compose.log ${{ github.workspace }}/docker-compose.log
- name: Upload logs
if: success() || failure()
uses: actions/upload-artifact@v4
@@ -191,35 +198,41 @@ jobs:
chromatic-tests:
name: Chromatic Tests
needs: playwright-tests
runs-on: [runs-on,runner=8cpu-linux-x64,ram=16,"run-id=${{ github.run_id }}"]
runs-on:
[
runs-on,
runner=32cpu-linux-x64,
disk=large,
"run-id=${{ github.run_id }}",
]
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup node
uses: actions/setup-node@v4
with:
node-version: 22
- name: Install node dependencies
working-directory: ./web
run: npm ci
- name: Download Playwright test results
uses: actions/download-artifact@v4
with:
name: test-results
path: ./web/test-results
- name: Run Chromatic
uses: chromaui/action@latest
with:
playwright: true
projectToken: ${{ secrets.CHROMATIC_PROJECT_TOKEN }}
workingDir: ./web
env:
env:
CHROMATIC_ARCHIVE_LOCATION: ./test-results

View File

@@ -8,7 +8,7 @@ on:
pull_request:
branches:
- main
- 'release/**'
- "release/**"
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
@@ -16,11 +16,11 @@ env:
CONFLUENCE_TEST_SPACE_URL: ${{ secrets.CONFLUENCE_TEST_SPACE_URL }}
CONFLUENCE_USER_NAME: ${{ secrets.CONFLUENCE_USER_NAME }}
CONFLUENCE_ACCESS_TOKEN: ${{ secrets.CONFLUENCE_ACCESS_TOKEN }}
jobs:
integration-tests:
# See https://runs-on.com/runners/linux/
runs-on: [runs-on,runner=8cpu-linux-x64,ram=16,"run-id=${{ github.run_id }}"]
runs-on: [runs-on, runner=32cpu-linux-x64, "run-id=${{ github.run_id }}"]
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -36,21 +36,21 @@ jobs:
# tag every docker image with "test" so that we can spin up the correct set
# of images during testing
# We don't need to build the Web Docker image since it's not yet used
# in the integration tests. We have a separate action to verify that it builds
# in the integration tests. We have a separate action to verify that it builds
# successfully.
- name: Pull Web Docker image
run: |
docker pull danswer/danswer-web-server:latest
docker tag danswer/danswer-web-server:latest danswer/danswer-web-server:test
docker pull onyxdotapp/onyx-web-server:latest
docker tag onyxdotapp/onyx-web-server:latest onyxdotapp/onyx-web-server:test
# we use the runs-on cache for docker builds
# in conjunction with runs-on runners, it has better speed and unlimited caching
# https://runs-on.com/caching/s3-cache-for-github-actions/
# https://runs-on.com/caching/docker/
# https://github.com/moby/buildkit#s3-cache-experimental
# images are built and run locally for testing purposes. Not pushed.
- name: Build Backend Docker image
uses: ./.github/actions/custom-build-and-push
@@ -58,7 +58,7 @@ jobs:
context: ./backend
file: ./backend/Dockerfile
platforms: linux/amd64
tags: danswer/danswer-backend:test
tags: onyxdotapp/onyx-backend:test
push: false
load: true
cache-from: type=s3,prefix=cache/${{ github.repository }}/integration-tests/backend/,region=${{ env.RUNS_ON_AWS_REGION }},bucket=${{ env.RUNS_ON_S3_BUCKET_CACHE }}
@@ -70,19 +70,19 @@ jobs:
context: ./backend
file: ./backend/Dockerfile.model_server
platforms: linux/amd64
tags: danswer/danswer-model-server:test
tags: onyxdotapp/onyx-model-server:test
push: false
load: true
cache-from: type=s3,prefix=cache/${{ github.repository }}/integration-tests/model-server/,region=${{ env.RUNS_ON_AWS_REGION }},bucket=${{ env.RUNS_ON_S3_BUCKET_CACHE }}
cache-to: type=s3,prefix=cache/${{ github.repository }}/integration-tests/model-server/,region=${{ env.RUNS_ON_AWS_REGION }},bucket=${{ env.RUNS_ON_S3_BUCKET_CACHE }},mode=max
- name: Build integration test Docker image
uses: ./.github/actions/custom-build-and-push
with:
context: ./backend
file: ./backend/tests/integration/Dockerfile
platforms: linux/amd64
tags: danswer/danswer-integration:test
tags: onyxdotapp/onyx-integration:test
push: false
load: true
cache-from: type=s3,prefix=cache/${{ github.repository }}/integration-tests/integration/,region=${{ env.RUNS_ON_AWS_REGION }},bucket=${{ env.RUNS_ON_S3_BUCKET_CACHE }}
@@ -119,7 +119,7 @@ jobs:
-e TEST_WEB_HOSTNAME=test-runner \
-e AUTH_TYPE=cloud \
-e MULTI_TENANT=true \
danswer/danswer-integration:test \
onyxdotapp/onyx-integration:test \
/app/tests/integration/multitenant_tests
continue-on-error: true
id: run_multitenant_tests
@@ -131,15 +131,14 @@ jobs:
exit 1
else
echo "All integration tests passed successfully."
fi
fi
- name: Stop multi-tenant Docker containers
run: |
cd deployment/docker_compose
docker compose -f docker-compose.dev.yml -p danswer-stack down -v
- name: Start Docker containers
- name: Start Docker containers
run: |
cd deployment/docker_compose
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true \
@@ -153,12 +152,12 @@ jobs:
- name: Wait for service to be ready
run: |
echo "Starting wait-for-service script..."
docker logs -f danswer-stack-api_server-1 &
start_time=$(date +%s)
timeout=300 # 5 minutes in seconds
while true; do
current_time=$(date +%s)
elapsed_time=$((current_time - start_time))
@@ -202,7 +201,7 @@ jobs:
-e CONFLUENCE_USER_NAME=${CONFLUENCE_USER_NAME} \
-e CONFLUENCE_ACCESS_TOKEN=${CONFLUENCE_ACCESS_TOKEN} \
-e TEST_WEB_HOSTNAME=test-runner \
danswer/danswer-integration:test \
onyxdotapp/onyx-integration:test \
/app/tests/integration/tests \
/app/tests/integration/connector_job_tests
continue-on-error: true
@@ -229,7 +228,7 @@ jobs:
run: |
cd deployment/docker_compose
docker compose -f docker-compose.dev.yml -p danswer-stack down -v
- name: Upload logs
if: success() || failure()
uses: actions/upload-artifact@v4

View File

@@ -26,7 +26,19 @@ env:
GOOGLE_GMAIL_OAUTH_CREDENTIALS_JSON_STR: ${{ secrets.GOOGLE_GMAIL_OAUTH_CREDENTIALS_JSON_STR }}
# Slab
SLAB_BOT_TOKEN: ${{ secrets.SLAB_BOT_TOKEN }}
# Zendesk
ZENDESK_SUBDOMAIN: ${{ secrets.ZENDESK_SUBDOMAIN }}
ZENDESK_EMAIL: ${{ secrets.ZENDESK_EMAIL }}
ZENDESK_TOKEN: ${{ secrets.ZENDESK_TOKEN }}
# Salesforce
SF_USERNAME: ${{ secrets.SF_USERNAME }}
SF_PASSWORD: ${{ secrets.SF_PASSWORD }}
SF_SECURITY_TOKEN: ${{ secrets.SF_SECURITY_TOKEN }}
# Airtable
AIRTABLE_TEST_BASE_ID: ${{ secrets.AIRTABLE_TEST_BASE_ID }}
AIRTABLE_TEST_TABLE_ID: ${{ secrets.AIRTABLE_TEST_TABLE_ID }}
AIRTABLE_TEST_TABLE_NAME: ${{ secrets.AIRTABLE_TEST_TABLE_NAME }}
AIRTABLE_ACCESS_TOKEN: ${{ secrets.AIRTABLE_ACCESS_TOKEN }}
jobs:
connectors-check:
# See https://runs-on.com/runners/linux/

View File

@@ -2,53 +2,52 @@ name: Nightly Tag Push
on:
schedule:
- cron: '0 10 * * *' # Runs every day at 2 AM PST / 3 AM PDT / 10 AM UTC
- cron: "0 10 * * *" # Runs every day at 2 AM PST / 3 AM PDT / 10 AM UTC
permissions:
contents: write # Allows pushing tags to the repository
contents: write # Allows pushing tags to the repository
jobs:
create-and-push-tag:
runs-on: [runs-on,runner=2cpu-linux-x64,"run-id=${{ github.run_id }}"]
runs-on: [runs-on, runner=2cpu-linux-x64, "run-id=${{ github.run_id }}"]
steps:
# actions using GITHUB_TOKEN cannot trigger another workflow, but we do want this to trigger docker pushes
# see https://github.com/orgs/community/discussions/27028#discussioncomment-3254367 for the workaround we
# implement here which needs an actual user's deploy key
- name: Checkout code
uses: actions/checkout@v4
with:
ssh-key: "${{ secrets.RKUO_DEPLOY_KEY }}"
# actions using GITHUB_TOKEN cannot trigger another workflow, but we do want this to trigger docker pushes
# see https://github.com/orgs/community/discussions/27028#discussioncomment-3254367 for the workaround we
# implement here which needs an actual user's deploy key
- name: Checkout code
uses: actions/checkout@v4
with:
ssh-key: "${{ secrets.RKUO_DEPLOY_KEY }}"
- name: Set up Git user
run: |
git config user.name "Richard Kuo [bot]"
git config user.email "rkuo[bot]@danswer.ai"
- name: Set up Git user
run: |
git config user.name "Richard Kuo [bot]"
git config user.email "rkuo[bot]@onyx.app"
- name: Check for existing nightly tag
id: check_tag
run: |
if git tag --points-at HEAD --list "nightly-latest*" | grep -q .; then
echo "A tag starting with 'nightly-latest' already exists on HEAD."
echo "tag_exists=true" >> $GITHUB_OUTPUT
else
echo "No tag starting with 'nightly-latest' exists on HEAD."
echo "tag_exists=false" >> $GITHUB_OUTPUT
fi
# don't tag again if HEAD already has a nightly-latest tag on it
- name: Create Nightly Tag
if: steps.check_tag.outputs.tag_exists == 'false'
env:
DATE: ${{ github.run_id }}
run: |
TAG_NAME="nightly-latest-$(date +'%Y%m%d')"
echo "Creating tag: $TAG_NAME"
git tag $TAG_NAME
- name: Check for existing nightly tag
id: check_tag
run: |
if git tag --points-at HEAD --list "nightly-latest*" | grep -q .; then
echo "A tag starting with 'nightly-latest' already exists on HEAD."
echo "tag_exists=true" >> $GITHUB_OUTPUT
else
echo "No tag starting with 'nightly-latest' exists on HEAD."
echo "tag_exists=false" >> $GITHUB_OUTPUT
fi
- name: Push Tag
if: steps.check_tag.outputs.tag_exists == 'false'
run: |
TAG_NAME="nightly-latest-$(date +'%Y%m%d')"
git push origin $TAG_NAME
# don't tag again if HEAD already has a nightly-latest tag on it
- name: Create Nightly Tag
if: steps.check_tag.outputs.tag_exists == 'false'
env:
DATE: ${{ github.run_id }}
run: |
TAG_NAME="nightly-latest-$(date +'%Y%m%d')"
echo "Creating tag: $TAG_NAME"
git tag $TAG_NAME
- name: Push Tag
if: steps.check_tag.outputs.tag_exists == 'false'
run: |
TAG_NAME="nightly-latest-$(date +'%Y%m%d')"
git push origin $TAG_NAME

4
.gitignore vendored
View File

@@ -7,4 +7,6 @@
.vscode/
*.sw?
/backend/tests/regression/answer_quality/search_test_config.yaml
/web/test-results/
/web/test-results/
backend/onyx/agent_search/main/test_data.json
backend/tests/regression/answer_quality/test_data.json

View File

@@ -5,6 +5,8 @@
# For local dev, often user Authentication is not needed
AUTH_TYPE=disabled
# Skip warm up for dev
SKIP_WARM_UP=True
# Always keep these on for Dev
# Logs all model prompts to stdout
@@ -49,3 +51,9 @@ BING_API_KEY=<REPLACE THIS>
# Enable the full set of Danswer Enterprise Edition features
# NOTE: DO NOT ENABLE THIS UNLESS YOU HAVE A PAID ENTERPRISE LICENSE (or if you are using this for local testing/development)
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=False
# Agent Search configs # TODO: Remove give proper namings
AGENT_RETRIEVAL_STATS=False # Note: This setting will incur substantial re-ranking effort
AGENT_RERANKING_STATS=True
AGENT_MAX_QUERY_RETRIEVAL_RESULTS=20
AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS=20

View File

@@ -17,7 +17,7 @@
}
},
{
"name": "Run All Danswer Services",
"name": "Run All Onyx Services",
"configurations": [
"Web Server",
"Model Server",
@@ -122,7 +122,7 @@
"PYTHONUNBUFFERED": "1"
},
"args": [
"danswer.main:app",
"onyx.main:app",
"--reload",
"--port",
"8080"
@@ -139,7 +139,7 @@
"consoleName": "Slack Bot",
"type": "debugpy",
"request": "launch",
"program": "danswer/danswerbot/slack/listener.py",
"program": "onyx/onyxbot/slack/listener.py",
"cwd": "${workspaceFolder}/backend",
"envFile": "${workspaceFolder}/.vscode/.env",
"env": {
@@ -166,7 +166,7 @@
},
"args": [
"-A",
"danswer.background.celery.versioned_apps.primary",
"onyx.background.celery.versioned_apps.primary",
"worker",
"--pool=threads",
"--concurrency=4",
@@ -195,7 +195,7 @@
},
"args": [
"-A",
"danswer.background.celery.versioned_apps.light",
"onyx.background.celery.versioned_apps.light",
"worker",
"--pool=threads",
"--concurrency=64",
@@ -224,7 +224,7 @@
},
"args": [
"-A",
"danswer.background.celery.versioned_apps.heavy",
"onyx.background.celery.versioned_apps.heavy",
"worker",
"--pool=threads",
"--concurrency=4",
@@ -254,7 +254,7 @@
},
"args": [
"-A",
"danswer.background.celery.versioned_apps.indexing",
"onyx.background.celery.versioned_apps.indexing",
"worker",
"--pool=threads",
"--concurrency=1",
@@ -283,7 +283,7 @@
},
"args": [
"-A",
"danswer.background.celery.versioned_apps.beat",
"onyx.background.celery.versioned_apps.beat",
"beat",
"--loglevel=INFO",
],
@@ -308,7 +308,7 @@
"args": [
"-v"
// Specify a sepcific module/test to run or provide nothing to run all tests
//"tests/unit/danswer/llm/answering/test_prune_and_merge.py"
//"tests/unit/onyx/llm/answering/test_prune_and_merge.py"
],
"presentation": {
"group": "2",
@@ -355,5 +355,20 @@
"PYTHONPATH": "."
},
},
{
"name": "Install Python Requirements",
"type": "node",
"request": "launch",
"runtimeExecutable": "bash",
"runtimeArgs": [
"-c",
"pip install -r backend/requirements/default.txt && pip install -r backend/requirements/dev.txt && pip install -r backend/requirements/ee.txt && pip install -r backend/requirements/model_server.txt"
],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"presentation": {
"group": "3"
}
},
]
}

View File

@@ -1,32 +1,38 @@
<!-- DANSWER_METADATA={"link": "https://github.com/danswer-ai/danswer/blob/main/CONTRIBUTING.md"} -->
<!-- DANSWER_METADATA={"link": "https://github.com/onyx-dot-app/onyx/blob/main/CONTRIBUTING.md"} -->
# Contributing to Danswer
Hey there! We are so excited that you're interested in Danswer.
# Contributing to Onyx
Hey there! We are so excited that you're interested in Onyx.
As an open source project in a rapidly changing space, we welcome all contributions.
## 💃 Guidelines
### Contribution Opportunities
The [GitHub Issues](https://github.com/danswer-ai/danswer/issues) page is a great place to start for contribution ideas.
The [GitHub Issues](https://github.com/onyx-dot-app/onyx/issues) page is a great place to start for contribution ideas.
To ensure that your contribution is aligned with the project's direction, please reach out to Hagen (or any other maintainer) on the Onyx team
via [Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA) /
[Discord](https://discord.gg/TDJ59cGV2X) or [email](mailto:founders@onyx.app).
Issues that have been explicitly approved by the maintainers (aligned with the direction of the project)
will be marked with the `approved by maintainers` label.
Issues marked `good first issue` are an especially great place to start.
**Connectors** to other tools are another great place to contribute. For details on how, refer to this
[README.md](https://github.com/danswer-ai/danswer/blob/main/backend/danswer/connectors/README.md).
[README.md](https://github.com/onyx-dot-app/onyx/blob/main/backend/onyx/connectors/README.md).
If you have a new/different contribution in mind, we'd love to hear about it!
Your input is vital to making sure that Danswer moves in the right direction.
Your input is vital to making sure that Onyx moves in the right direction.
Before starting on implementation, please raise a GitHub issue.
And always feel free to message us (Chris Weaver / Yuhong Sun) on
[Slack](https://join.slack.com/t/danswer/shared_invite/zt-2lcmqw703-071hBuZBfNEOGUsLa5PXvQ) /
[Discord](https://discord.gg/TDJ59cGV2X) directly about anything at all.
Also, always feel free to message the founders (Chris Weaver / Yuhong Sun) on
[Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA) /
[Discord](https://discord.gg/TDJ59cGV2X) directly about anything at all.
### Contributing Code
To contribute to this project, please follow the
["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
When opening a pull request, mention related issues and feel free to tag relevant maintainers.
@@ -34,72 +40,78 @@ When opening a pull request, mention related issues and feel free to tag relevan
Before creating a pull request please make sure that the new changes conform to the formatting and linting requirements.
See the [Formatting and Linting](#formatting-and-linting) section for how to run these checks locally.
### Getting Help 🙋
Our goal is to make contributing as easy as possible. If you run into any issues please don't hesitate to reach out.
That way we can help future contributors and users can avoid the same issue.
We also have support channels and generally interesting discussions on our
[Slack](https://join.slack.com/t/danswer/shared_invite/zt-2afut44lv-Rw3kSWu6_OmdAXRpCv80DQ)
and
[Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA)
and
[Discord](https://discord.gg/TDJ59cGV2X).
We would love to see you there!
## Get Started 🚀
Danswer being a fully functional app, relies on some external software, specifically:
Onyx being a fully functional app, relies on some external software, specifically:
- [Postgres](https://www.postgresql.org/) (Relational DB)
- [Vespa](https://vespa.ai/) (Vector DB/Search Engine)
- [Redis](https://redis.io/) (Cache)
- [Nginx](https://nginx.org/) (Not needed for development flows generally)
> **Note:**
> This guide provides instructions to build and run Danswer locally from source with Docker containers providing the above external software. We believe this combination is easier for
> development purposes. If you prefer to use pre-built container images, we provide instructions on running the full Danswer stack within Docker below.
> This guide provides instructions to build and run Onyx locally from source with Docker containers providing the above external software. We believe this combination is easier for
> development purposes. If you prefer to use pre-built container images, we provide instructions on running the full Onyx stack within Docker below.
### Local Set Up
Be sure to use Python version 3.11. For instructions on installing Python 3.11 on macOS, refer to the [CONTRIBUTING_MACOS.md](./CONTRIBUTING_MACOS.md) readme.
If using a lower version, modifications will have to be made to the code.
If using a higher version, sometimes some libraries will not be available (i.e. we had problems with Tensorflow in the past with higher versions of python).
#### Backend: Python requirements
Currently, we use pip and recommend creating a virtual environment.
For convenience here's a command for it:
```bash
python -m venv .venv
source .venv/bin/activate
```
> **Note:**
> This virtual environment MUST NOT be set up WITHIN the danswer directory if you plan on using mypy within certain IDEs.
> For simplicity, we recommend setting up the virtual environment outside of the danswer directory.
> This virtual environment MUST NOT be set up WITHIN the onyx directory if you plan on using mypy within certain IDEs.
> For simplicity, we recommend setting up the virtual environment outside of the onyx directory.
_For Windows, activate the virtual environment using Command Prompt:_
```bash
.venv\Scripts\activate
```
If using PowerShell, the command slightly differs:
```powershell
.venv\Scripts\Activate.ps1
```
Install the required python dependencies:
```bash
pip install -r danswer/backend/requirements/default.txt
pip install -r danswer/backend/requirements/dev.txt
pip install -r danswer/backend/requirements/ee.txt
pip install -r danswer/backend/requirements/model_server.txt
pip install -r onyx/backend/requirements/default.txt
pip install -r onyx/backend/requirements/dev.txt
pip install -r onyx/backend/requirements/ee.txt
pip install -r onyx/backend/requirements/model_server.txt
```
Install Playwright for Python (headless browser required by the Web Connector)
In the activated Python virtualenv, install Playwright for Python by running:
```bash
playwright install
```
@@ -109,42 +121,90 @@ You may have to deactivate and reactivate your virtualenv for `playwright` to ap
#### Frontend: Node dependencies
Install [Node.js and npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) for the frontend.
Once the above is done, navigate to `danswer/web` run:
Once the above is done, navigate to `onyx/web` run:
```bash
npm i
```
#### Docker containers for external software
## Formatting and Linting
### Backend
For the backend, you'll need to setup pre-commit hooks (black / reorder-python-imports).
First, install pre-commit (if you don't have it already) following the instructions
[here](https://pre-commit.com/#installation).
With the virtual environment active, install the pre-commit library with:
```bash
pip install pre-commit
```
Then, from the `onyx/backend` directory, run:
```bash
pre-commit install
```
Additionally, we use `mypy` for static type checking.
Onyx is fully type-annotated, and we want to keep it that way!
To run the mypy checks manually, run `python -m mypy .` from the `onyx/backend` directory.
### Web
We use `prettier` for formatting. The desired version (2.8.8) will be installed via a `npm i` from the `onyx/web` directory.
To run the formatter, use `npx prettier --write .` from the `onyx/web` directory.
Please double check that prettier passes before creating a pull request.
# Running the application for development
## Developing using VSCode Debugger (recommended)
We highly recommend using VSCode debugger for development.
See [CONTRIBUTING_VSCODE.md](./CONTRIBUTING_VSCODE.md) for more details.
Otherwise, you can follow the instructions below to run the application for development.
## Manually running the application for development
### Docker containers for external software
You will need Docker installed to run these containers.
First navigate to `danswer/deployment/docker_compose`, then start up Postgres/Vespa/Redis with:
First navigate to `onyx/deployment/docker_compose`, then start up Postgres/Vespa/Redis with:
```bash
docker compose -f docker-compose.dev.yml -p danswer-stack up -d index relational_db cache
docker compose -f docker-compose.dev.yml -p onyx-stack up -d index relational_db cache
```
(index refers to Vespa, relational_db refers to Postgres, and cache refers to Redis)
### Running Onyx locally
To start the frontend, navigate to `onyx/web` and run:
#### Running Danswer locally
To start the frontend, navigate to `danswer/web` and run:
```bash
npm run dev
```
Next, start the model server which runs the local NLP models.
Navigate to `danswer/backend` and run:
Navigate to `onyx/backend` and run:
```bash
uvicorn model_server.main:app --reload --port 9000
```
_For Windows (for compatibility with both PowerShell and Command Prompt):_
```bash
powershell -Command "uvicorn model_server.main:app --reload --port 9000"
```
The first time running Danswer, you will need to run the DB migrations for Postgres.
The first time running Onyx, you will need to run the DB migrations for Postgres.
After the first time, this is no longer required unless the DB models change.
Navigate to `danswer/backend` and with the venv active, run:
Navigate to `onyx/backend` and with the venv active, run:
```bash
alembic upgrade head
```
@@ -152,21 +212,24 @@ alembic upgrade head
Next, start the task queue which orchestrates the background jobs.
Jobs that take more time are run async from the API server.
Still in `danswer/backend`, run:
Still in `onyx/backend`, run:
```bash
python ./scripts/dev_run_background_jobs.py
```
To run the backend API server, navigate back to `danswer/backend` and run:
To run the backend API server, navigate back to `onyx/backend` and run:
```bash
AUTH_TYPE=disabled uvicorn danswer.main:app --reload --port 8080
AUTH_TYPE=disabled uvicorn onyx.main:app --reload --port 8080
```
_For Windows (for compatibility with both PowerShell and Command Prompt):_
```bash
powershell -Command "
$env:AUTH_TYPE='disabled'
uvicorn danswer.main:app --reload --port 8080
uvicorn onyx.main:app --reload --port 8080
"
```
@@ -182,57 +245,32 @@ You should now have 4 servers running:
- Model server
- Background jobs
Now, visit `http://localhost:3000` in your browser. You should see the Danswer onboarding wizard where you can connect your external LLM provider to Danswer.
Now, visit `http://localhost:3000` in your browser. You should see the Onyx onboarding wizard where you can connect your external LLM provider to Onyx.
You've successfully set up a local Danswer instance! 🏁
You've successfully set up a local Onyx instance! 🏁
#### Running the Danswer application in a container
#### Running the Onyx application in a container
You can run the full Danswer application stack from pre-built images including all external software dependencies.
You can run the full Onyx application stack from pre-built images including all external software dependencies.
Navigate to `danswer/deployment/docker_compose` and run:
Navigate to `onyx/deployment/docker_compose` and run:
```bash
docker compose -f docker-compose.dev.yml -p danswer-stack up -d
docker compose -f docker-compose.dev.yml -p onyx-stack up -d
```
After Docker pulls and starts these containers, navigate to `http://localhost:3000` to use Danswer.
After Docker pulls and starts these containers, navigate to `http://localhost:3000` to use Onyx.
If you want to make changes to Danswer and run those changes in Docker, you can also build a local version of the Danswer container images that incorporates your changes like so:
If you want to make changes to Onyx and run those changes in Docker, you can also build a local version of the Onyx container images that incorporates your changes like so:
```bash
docker compose -f docker-compose.dev.yml -p danswer-stack up -d --build
docker compose -f docker-compose.dev.yml -p onyx-stack up -d --build
```
### Formatting and Linting
#### Backend
For the backend, you'll need to setup pre-commit hooks (black / reorder-python-imports).
First, install pre-commit (if you don't have it already) following the instructions
[here](https://pre-commit.com/#installation).
With the virtual environment active, install the pre-commit library with:
```bash
pip install pre-commit
```
Then, from the `danswer/backend` directory, run:
```bash
pre-commit install
```
Additionally, we use `mypy` for static type checking.
Danswer is fully type-annotated, and we want to keep it that way!
To run the mypy checks manually, run `python -m mypy .` from the `danswer/backend` directory.
#### Web
We use `prettier` for formatting. The desired version (2.8.8) will be installed via a `npm i` from the `danswer/web` directory.
To run the formatter, use `npx prettier --write .` from the `danswer/web` directory.
Please double check that prettier passes before creating a pull request.
### Release Process
Danswer loosely follows the SemVer versioning standard.
Onyx loosely follows the SemVer versioning standard.
Major changes are released with a "minor" version bump. Currently we use patch release versions to indicate small feature changes.
A set of Docker containers will be pushed automatically to DockerHub with every tag.
You can see the containers [here](https://hub.docker.com/search?q=danswer%2F).
You can see the containers [here](https://hub.docker.com/search?q=onyx%2F).

View File

@@ -1,15 +1,19 @@
## Some additional notes for Mac Users
The base instructions to set up the development environment are located in [CONTRIBUTING.md](https://github.com/danswer-ai/danswer/blob/main/CONTRIBUTING.md).
The base instructions to set up the development environment are located in [CONTRIBUTING.md](https://github.com/onyx-dot-app/onyx/blob/main/CONTRIBUTING.md).
### Setting up Python
Ensure [Homebrew](https://brew.sh/) is already set up.
Then install python 3.11.
```bash
brew install python@3.11
```
Add python 3.11 to your path: add the following line to ~/.zshrc
```
export PATH="$(brew --prefix)/opt/python@3.11/libexec/bin:$PATH"
```
@@ -17,15 +21,16 @@ export PATH="$(brew --prefix)/opt/python@3.11/libexec/bin:$PATH"
> **Note:**
> You will need to open a new terminal for the path change above to take effect.
### Setting up Docker
On macOS, you will need to install [Docker Desktop](https://www.docker.com/products/docker-desktop/) and
On macOS, you will need to install [Docker Desktop](https://www.docker.com/products/docker-desktop/) and
ensure it is running before continuing with the docker commands.
### Formatting and Linting
MacOS will likely require you to remove some quarantine attributes on some of the hooks for them to execute properly.
After installing pre-commit, run the following command:
```bash
sudo xattr -r -d com.apple.quarantine ~/.cache/pre-commit
```
```

29
CONTRIBUTING_VSCODE.md Normal file
View File

@@ -0,0 +1,29 @@
# VSCode Debugging Setup
This guide explains how to set up and use VSCode's debugging capabilities with this project.
## Initial Setup
1. **Environment Setup**:
- Copy `.vscode/.env.template` to `.vscode/.env`
- Fill in the necessary environment variables in `.vscode/.env`
2. **launch.json**:
- Copy `.vscode/launch.template.jsonc` to `.vscode/launch.json`
## Using the Debugger
Before starting, make sure the Docker Daemon is running.
1. Open the Debug view in VSCode (Cmd+Shift+D on macOS)
2. From the dropdown at the top, select "Clear and Restart External Volumes and Containers" and press the green play button
3. From the dropdown at the top, select "Run All Onyx Services" and press the green play button
4. Now, you can navigate to onyx in your browser (default is http://localhost:3000) and start using the app
5. You can set breakpoints by clicking to the left of line numbers to help debug while the app is running
6. Use the debug toolbar to step through code, inspect variables, etc.
## Features
- Hot reload is enabled for the web server and API servers
- Python debugging is configured with debugpy
- Environment variables are loaded from `.vscode/.env`
- Console output is organized in the integrated terminal with labeled tabs

View File

@@ -2,9 +2,9 @@ Copyright (c) 2023-present DanswerAI, Inc.
Portions of this software are licensed as follows:
* All content that resides under "ee" directories of this repository, if that directory exists, is licensed under the license defined in "backend/ee/LICENSE". Specifically all content under "backend/ee" and "web/src/app/ee" is licensed under the license defined in "backend/ee/LICENSE".
* All third party components incorporated into the Danswer Software are licensed under the original license provided by the owner of the applicable component.
* Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
- All content that resides under "ee" directories of this repository, if that directory exists, is licensed under the license defined in "backend/ee/LICENSE". Specifically all content under "backend/ee" and "web/src/app/ee" is licensed under the license defined in "backend/ee/LICENSE".
- All third party components incorporated into the Onyx Software are licensed under the original license provided by the owner of the applicable component.
- Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

169
README.md
View File

@@ -1,146 +1,135 @@
<!-- DANSWER_METADATA={"link": "https://github.com/danswer-ai/danswer/blob/main/README.md"} -->
<!-- DANSWER_METADATA={"link": "https://github.com/onyx-dot-app/onyx/blob/main/README.md"} -->
<a name="readme-top"></a>
<h2 align="center">
<a href="https://www.danswer.ai/"> <img width="50%" src="https://github.com/danswer-owners/danswer/blob/1fabd9372d66cd54238847197c33f091a724803b/DanswerWithName.png?raw=true)" /></a>
<a href="https://www.onyx.app/"> <img width="50%" src="https://github.com/onyx-dot-app/onyx/blob/logo/OnyxLogoCropped.jpg?raw=true)" /></a>
</h2>
<p align="center">
<p align="center">Open Source Gen-AI Chat + Unified Search.</p>
<p align="center">Open Source Gen-AI + Enterprise Search.</p>
<p align="center">
<a href="https://docs.danswer.dev/" target="_blank">
<a href="https://docs.onyx.app/" target="_blank">
<img src="https://img.shields.io/badge/docs-view-blue" alt="Documentation">
</a>
<a href="https://join.slack.com/t/danswer/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA" target="_blank">
<a href="https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA" target="_blank">
<img src="https://img.shields.io/badge/slack-join-blue.svg?logo=slack" alt="Slack">
</a>
<a href="https://discord.gg/TDJ59cGV2X" target="_blank">
<img src="https://img.shields.io/badge/discord-join-blue.svg?logo=discord&logoColor=white" alt="Discord">
</a>
<a href="https://github.com/danswer-ai/danswer/blob/main/README.md" target="_blank">
<a href="https://github.com/onyx-dot-app/onyx/blob/main/README.md" target="_blank">
<img src="https://img.shields.io/static/v1?label=license&message=MIT&color=blue" alt="License">
</a>
</p>
<strong>[Danswer](https://www.danswer.ai/)</strong> is the AI Assistant connected to your company's docs, apps, and people.
Danswer provides a Chat interface and plugs into any LLM of your choice. Danswer can be deployed anywhere and for any
scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your
own control. Danswer is MIT licensed and designed to be modular and easily extensible. The system also comes fully ready
for production usage with user authentication, role management (admin/basic users), chat persistence, and a UI for
configuring Personas (AI Assistants) and their Prompts.
<strong>[Onyx](https://www.onyx.app/)</strong> (formerly Danswer) is the AI Assistant connected to your company's docs, apps, and people.
Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any
scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your
own control. Onyx is dual Licensed with most of it under MIT license and designed to be modular and easily extensible. The system also comes fully ready
for production usage with user authentication, role management (admin/basic users), chat persistence, and a UI for
configuring AI Assistants.
Danswer also serves as a Unified Search across all common workplace tools such as Slack, Google Drive, Confluence, etc.
By combining LLMs and team specific knowledge, Danswer becomes a subject matter expert for the team. Imagine ChatGPT if
Onyx also serves as a Enterprise Search across all common workplace tools such as Slack, Google Drive, Confluence, etc.
By combining LLMs and team specific knowledge, Onyx becomes a subject matter expert for the team. Imagine ChatGPT if
it had access to your team's unique knowledge! It enables questions such as "A customer wants feature X, is this already
supported?" or "Where's the pull request for feature Y?"
<h3>Usage</h3>
Danswer Web App:
Onyx Web App:
https://github.com/danswer-ai/danswer/assets/32520769/563be14c-9304-47b5-bf0a-9049c2b6f410
https://github.com/onyx-dot-app/onyx/assets/32520769/563be14c-9304-47b5-bf0a-9049c2b6f410
Or, plug Onyx into your existing Slack workflows (more integrations to come 😁):
Or, plug Danswer into your existing Slack workflows (more integrations to come 😁):
https://github.com/onyx-dot-app/onyx/assets/25087905/3e19739b-d178-4371-9a38-011430bdec1b
https://github.com/danswer-ai/danswer/assets/25087905/3e19739b-d178-4371-9a38-011430bdec1b
For more details on the Admin UI to manage connectors and users, check out our
For more details on the Admin UI to manage connectors and users, check out our
<strong><a href="https://www.youtube.com/watch?v=geNzY1nbCnU">Full Video Demo</a></strong>!
## Deployment
Danswer can easily be run locally (even on a laptop) or deployed on a virtual machine with a single
`docker compose` command. Checkout our [docs](https://docs.danswer.dev/quickstart) to learn more.
Onyx can easily be run locally (even on a laptop) or deployed on a virtual machine with a single
`docker compose` command. Checkout our [docs](https://docs.onyx.app/quickstart) to learn more.
We also have built-in support for deployment on Kubernetes. Files for that can be found [here](https://github.com/danswer-ai/danswer/tree/main/deployment/kubernetes).
We also have built-in support for deployment on Kubernetes. Files for that can be found [here](https://github.com/onyx-dot-app/onyx/tree/main/deployment/kubernetes).
## 💃 Main Features
## 💃 Main Features
* Chat UI with the ability to select documents to chat with.
* Create custom AI Assistants with different prompts and backing knowledge sets.
* Connect Danswer with LLM of your choice (self-host for a fully airgapped solution).
* Document Search + AI Answers for natural language queries.
* Connectors to all common workplace tools like Google Drive, Confluence, Slack, etc.
* Slack integration to get answers and search results directly in Slack.
- Chat UI with the ability to select documents to chat with.
- Create custom AI Assistants with different prompts and backing knowledge sets.
- Connect Onyx with LLM of your choice (self-host for a fully airgapped solution).
- Document Search + AI Answers for natural language queries.
- Connectors to all common workplace tools like Google Drive, Confluence, Slack, etc.
- Slack integration to get answers and search results directly in Slack.
## 🚧 Roadmap
* Chat/Prompt sharing with specific teammates and user groups.
* Multimodal model support, chat with images, video etc.
* Choosing between LLMs and parameters during chat session.
* Tool calling and agent configurations options.
* Organizational understanding and ability to locate and suggest experts from your team.
- Chat/Prompt sharing with specific teammates and user groups.
- Multimodal model support, chat with images, video etc.
- Choosing between LLMs and parameters during chat session.
- Tool calling and agent configurations options.
- Organizational understanding and ability to locate and suggest experts from your team.
## Other Notable Benefits of Danswer
* User Authentication with document level access management.
* Best in class Hybrid Search across all sources (BM-25 + prefix aware embedding models).
* Admin Dashboard to configure connectors, document-sets, access, etc.
* Custom deep learning models + learn from user feedback.
* Easy deployment and ability to host Danswer anywhere of your choosing.
## Other Notable Benefits of Onyx
- User Authentication with document level access management.
- Best in class Hybrid Search across all sources (BM-25 + prefix aware embedding models).
- Admin Dashboard to configure connectors, document-sets, access, etc.
- Custom deep learning models + learn from user feedback.
- Easy deployment and ability to host Onyx anywhere of your choosing.
## 🔌 Connectors
Efficiently pulls the latest changes from:
* Slack
* GitHub
* Google Drive
* Confluence
* Jira
* Zendesk
* Gmail
* Notion
* Gong
* Slab
* Linear
* Productboard
* Guru
* Bookstack
* Document360
* Sharepoint
* Hubspot
* Local Files
* Websites
* And more ...
- Slack
- GitHub
- Google Drive
- Confluence
- Jira
- Zendesk
- Gmail
- Notion
- Gong
- Slab
- Linear
- Productboard
- Guru
- Bookstack
- Document360
- Sharepoint
- Hubspot
- Local Files
- Websites
- And more ...
## 📚 Editions
There are two editions of Danswer:
There are two editions of Onyx:
* Danswer Community Edition (CE) is available freely under the MIT Expat license. This version has ALL the core features discussed above. This is the version of Danswer you will get if you follow the Deployment guide above.
* Danswer Enterprise Edition (EE) includes extra features that are primarily useful for larger organizations. Specifically, this includes:
* Single Sign-On (SSO), with support for both SAML and OIDC
* Role-based access control
* Document permission inheritance from connected sources
* Usage analytics and query history accessible to admins
* Whitelabeling
* API key authentication
* Encryption of secrets
* Any many more! Checkout [our website](https://www.danswer.ai/) for the latest.
- Onyx Community Edition (CE) is available freely under the MIT Expat license. This version has ALL the core features discussed above. This is the version of Onyx you will get if you follow the Deployment guide above.
- Onyx Enterprise Edition (EE) includes extra features that are primarily useful for larger organizations. Specifically, this includes:
- Single Sign-On (SSO), with support for both SAML and OIDC
- Role-based access control
- Document permission inheritance from connected sources
- Usage analytics and query history accessible to admins
- Whitelabeling
- API key authentication
- Encryption of secrets
- Any many more! Checkout [our website](https://www.onyx.app/) for the latest.
To try the Danswer Enterprise Edition:
To try the Onyx Enterprise Edition:
1. Checkout our [Cloud product](https://app.danswer.ai/signup).
2. For self-hosting, contact us at [founders@danswer.ai](mailto:founders@danswer.ai) or book a call with us on our [Cal](https://cal.com/team/danswer/founders).
1. Checkout our [Cloud product](https://cloud.onyx.app/signup).
2. For self-hosting, contact us at [founders@onyx.app](mailto:founders@onyx.app) or book a call with us on our [Cal](https://cal.com/team/danswer/founders).
## 💡 Contributing
Looking to contribute? Please check out the [Contribution Guide](CONTRIBUTING.md) for more details.
## ⭐Star History
[![Star History Chart](https://api.star-history.com/svg?repos=danswer-ai/danswer&type=Date)](https://star-history.com/#danswer-ai/danswer&Date)
## ✨Contributors
<a href="https://github.com/danswer-ai/danswer/graphs/contributors">
<img alt="contributors" src="https://contrib.rocks/image?repo=danswer-ai/danswer"/>
</a>
<p align="right" style="font-size: 14px; color: #555; margin-top: 20px;">
<a href="#readme-top" style="text-decoration: none; color: #007bff; font-weight: bold;">
↑ Back to Top ↑
</a>
</p>
[![Star History Chart](https://api.star-history.com/svg?repos=onyx-dot-app/onyx&type=Date)](https://star-history.com/#onyx-dot-app/onyx&Date)

1
backend/.gitignore vendored
View File

@@ -9,3 +9,4 @@ api_keys.py
vespa-app.zip
dynamic_config_storage/
celerybeat-schedule*
onyx/connectors/salesforce/data/

View File

@@ -1,19 +1,19 @@
FROM python:3.11.7-slim-bookworm
LABEL com.danswer.maintainer="founders@danswer.ai"
LABEL com.danswer.description="This image is the web/frontend container of Danswer which \
contains code for both the Community and Enterprise editions of Danswer. If you do not \
LABEL com.danswer.maintainer="founders@onyx.app"
LABEL com.danswer.description="This image is the web/frontend container of Onyx which \
contains code for both the Community and Enterprise editions of Onyx. If you do not \
have a contract or agreement with DanswerAI, you are not permitted to use the Enterprise \
Edition features outside of personal development or testing purposes. Please reach out to \
founders@danswer.ai for more information. Please visit https://github.com/danswer-ai/danswer"
founders@onyx.app for more information. Please visit https://github.com/onyx-dot-app/onyx"
# Default DANSWER_VERSION, typically overriden during builds by GitHub Actions.
ARG DANSWER_VERSION=0.8-dev
ENV DANSWER_VERSION=${DANSWER_VERSION} \
# Default ONYX_VERSION, typically overriden during builds by GitHub Actions.
ARG ONYX_VERSION=0.8-dev
ENV ONYX_VERSION=${ONYX_VERSION} \
DANSWER_RUNNING_IN_DOCKER="true"
RUN echo "DANSWER_VERSION: ${DANSWER_VERSION}"
RUN echo "ONYX_VERSION: ${ONYX_VERSION}"
# Install system dependencies
# cmake needed for psycopg (postgres)
# libpq-dev needed for psycopg (postgres)
@@ -56,7 +56,7 @@ RUN pip install --no-cache-dir --upgrade \
# Cleanup for CVEs and size reduction
# https://github.com/tornadoweb/tornado/issues/3107
# xserver-common and xvfb included by playwright installation but not needed after
# perl-base is part of the base Python Debian image but not needed for Danswer functionality
# perl-base is part of the base Python Debian image but not needed for Onyx functionality
# perl-base could only be removed with --allow-remove-essential
RUN apt-get update && \
apt-get remove -y --allow-remove-essential \
@@ -92,7 +92,7 @@ COPY ./ee /app/ee
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
# Set up application files
COPY ./danswer /app/danswer
COPY ./onyx /app/onyx
COPY ./shared_configs /app/shared_configs
COPY ./alembic /app/alembic
COPY ./alembic_tenants /app/alembic_tenants

View File

@@ -1,18 +1,18 @@
FROM python:3.11.7-slim-bookworm
LABEL com.danswer.maintainer="founders@danswer.ai"
LABEL com.danswer.description="This image is for the Danswer model server which runs all of the \
AI models for Danswer. This container and all the code is MIT Licensed and free for all to use. \
You can find it at https://hub.docker.com/r/danswer/danswer-model-server. For more details, \
visit https://github.com/danswer-ai/danswer."
LABEL com.danswer.maintainer="founders@onyx.app"
LABEL com.danswer.description="This image is for the Onyx model server which runs all of the \
AI models for Onyx. This container and all the code is MIT Licensed and free for all to use. \
You can find it at https://hub.docker.com/r/onyx/onyx-model-server. For more details, \
visit https://github.com/onyx-dot-app/onyx."
# Default DANSWER_VERSION, typically overriden during builds by GitHub Actions.
ARG DANSWER_VERSION=0.8-dev
ENV DANSWER_VERSION=${DANSWER_VERSION} \
# Default ONYX_VERSION, typically overriden during builds by GitHub Actions.
ARG ONYX_VERSION=0.8-dev
ENV ONYX_VERSION=${ONYX_VERSION} \
DANSWER_RUNNING_IN_DOCKER="true"
RUN echo "DANSWER_VERSION: ${DANSWER_VERSION}"
RUN echo "ONYX_VERSION: ${ONYX_VERSION}"
COPY ./requirements/model_server.txt /tmp/requirements.txt
RUN pip install --no-cache-dir --upgrade \
@@ -20,11 +20,11 @@ RUN pip install --no-cache-dir --upgrade \
--timeout 30 \
-r /tmp/requirements.txt
RUN apt-get remove -y --allow-remove-essential perl-base && \
RUN apt-get remove -y --allow-remove-essential perl-base && \
apt-get autoremove -y
# Pre-downloading models for setups with limited egress
# Download tokenizers, distilbert for the Danswer model
# Download tokenizers, distilbert for the Onyx model
# Download model weights
# Run Nomic to pull in the custom architecture and have it cached locally
RUN python -c "from transformers import AutoTokenizer; \
@@ -38,18 +38,18 @@ from sentence_transformers import SentenceTransformer; \
SentenceTransformer(model_name_or_path='nomic-ai/nomic-embed-text-v1', trust_remote_code=True);"
# In case the user has volumes mounted to /root/.cache/huggingface that they've downloaded while
# running Danswer, don't overwrite it with the built in cache folder
# running Onyx, don't overwrite it with the built in cache folder
RUN mv /root/.cache/huggingface /root/.cache/temp_huggingface
WORKDIR /app
# Utils used by model server
COPY ./danswer/utils/logger.py /app/danswer/utils/logger.py
COPY ./onyx/utils/logger.py /app/onyx/utils/logger.py
# Place to fetch version information
COPY ./danswer/__init__.py /app/danswer/__init__.py
COPY ./onyx/__init__.py /app/onyx/__init__.py
# Shared between Danswer Backend and Model Server
# Shared between Onyx Backend and Model Server
COPY ./shared_configs /app/shared_configs
# Model Server main code

View File

@@ -1,19 +1,22 @@
<!-- DANSWER_METADATA={"link": "https://github.com/danswer-ai/danswer/blob/main/backend/alembic/README.md"} -->
<!-- DANSWER_METADATA={"link": "https://github.com/onyx-dot-app/onyx/blob/main/backend/alembic/README.md"} -->
# Alembic DB Migrations
These files are for creating/updating the tables in the Relational DB (Postgres).
Danswer migrations use a generic single-database configuration with an async dbapi.
## To generate new migrations:
run from danswer/backend:
These files are for creating/updating the tables in the Relational DB (Postgres).
Onyx migrations use a generic single-database configuration with an async dbapi.
## To generate new migrations:
run from onyx/backend:
`alembic revision --autogenerate -m <DESCRIPTION_OF_MIGRATION>`
More info can be found here: https://alembic.sqlalchemy.org/en/latest/autogenerate.html
## Running migrations
To run all un-applied migrations:
`alembic upgrade head`
To undo migrations:
`alembic downgrade -X`
`alembic downgrade -X`
where X is the number of migrations you want to undo from the current state

View File

@@ -1,39 +1,49 @@
from typing import Any, Literal
from onyx.db.engine import get_iam_auth_token
from onyx.configs.app_configs import USE_IAM_AUTH
from onyx.configs.app_configs import POSTGRES_HOST
from onyx.configs.app_configs import POSTGRES_PORT
from onyx.configs.app_configs import POSTGRES_USER
from onyx.configs.app_configs import AWS_REGION_NAME
from onyx.db.engine import build_connection_string
from onyx.db.engine import get_all_tenant_ids
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import text
from sqlalchemy.engine.base import Connection
from typing import Literal
import os
import ssl
import asyncio
from logging.config import fileConfig
import logging
from logging.config import fileConfig
from alembic import context
from sqlalchemy import pool
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.sql import text
from sqlalchemy.sql.schema import SchemaItem
from shared_configs.configs import MULTI_TENANT
from danswer.db.engine import build_connection_string
from danswer.db.models import Base
from onyx.configs.constants import SSL_CERT_FILE
from shared_configs.configs import MULTI_TENANT, POSTGRES_DEFAULT_SCHEMA
from onyx.db.models import Base
from celery.backends.database.session import ResultModelBase # type: ignore
from danswer.db.engine import get_all_tenant_ids
from shared_configs.configs import POSTGRES_DEFAULT_SCHEMA
# Alembic Config object
config = context.config
# Interpret the config file for Python logging.
if config.config_file_name is not None and config.attributes.get(
"configure_logger", True
):
fileConfig(config.config_file_name)
# Add your model's MetaData object here for 'autogenerate' support
target_metadata = [Base.metadata, ResultModelBase.metadata]
EXCLUDE_TABLES = {"kombu_queue", "kombu_message"}
# Set up logging
logger = logging.getLogger(__name__)
ssl_context: ssl.SSLContext | None = None
if USE_IAM_AUTH:
if not os.path.exists(SSL_CERT_FILE):
raise FileNotFoundError(f"Expected {SSL_CERT_FILE} when USE_IAM_AUTH is true.")
ssl_context = ssl.create_default_context(cafile=SSL_CERT_FILE)
def include_object(
object: SchemaItem,
@@ -49,20 +59,12 @@ def include_object(
reflected: bool,
compare_to: SchemaItem | None,
) -> bool:
"""
Determines whether a database object should be included in migrations.
Excludes specified tables from migrations.
"""
if type_ == "table" and name in EXCLUDE_TABLES:
return False
return True
def get_schema_options() -> tuple[str, bool, bool]:
"""
Parses command-line options passed via '-x' in Alembic commands.
Recognizes 'schema', 'create_schema', and 'upgrade_all_tenants' options.
"""
x_args_raw = context.get_x_argument()
x_args = {}
for arg in x_args_raw:
@@ -90,16 +92,12 @@ def get_schema_options() -> tuple[str, bool, bool]:
def do_run_migrations(
connection: Connection, schema_name: str, create_schema: bool
) -> None:
"""
Executes migrations in the specified schema.
"""
logger.info(f"About to migrate schema: {schema_name}")
if create_schema:
connection.execute(text(f'CREATE SCHEMA IF NOT EXISTS "{schema_name}"'))
connection.execute(text("COMMIT"))
# Set search_path to the target schema
connection.execute(text(f'SET search_path TO "{schema_name}"'))
context.configure(
@@ -117,11 +115,25 @@ def do_run_migrations(
context.run_migrations()
def provide_iam_token_for_alembic(
dialect: Any, conn_rec: Any, cargs: Any, cparams: Any
) -> None:
if USE_IAM_AUTH:
# Database connection settings
region = AWS_REGION_NAME
host = POSTGRES_HOST
port = POSTGRES_PORT
user = POSTGRES_USER
# Get IAM authentication token
token = get_iam_auth_token(host, port, user, region)
# For Alembic / SQLAlchemy in this context, set SSL and password
cparams["password"] = token
cparams["ssl"] = ssl_context
async def run_async_migrations() -> None:
"""
Determines whether to run migrations for a single schema or all schemas,
and executes migrations accordingly.
"""
schema_name, create_schema, upgrade_all_tenants = get_schema_options()
engine = create_async_engine(
@@ -129,10 +141,16 @@ async def run_async_migrations() -> None:
poolclass=pool.NullPool,
)
if upgrade_all_tenants:
# Run migrations for all tenant schemas sequentially
tenant_schemas = get_all_tenant_ids()
if USE_IAM_AUTH:
@event.listens_for(engine.sync_engine, "do_connect")
def event_provide_iam_token_for_alembic(
dialect: Any, conn_rec: Any, cargs: Any, cparams: Any
) -> None:
provide_iam_token_for_alembic(dialect, conn_rec, cargs, cparams)
if upgrade_all_tenants:
tenant_schemas = get_all_tenant_ids()
for schema in tenant_schemas:
try:
logger.info(f"Migrating schema: {schema}")
@@ -162,15 +180,20 @@ async def run_async_migrations() -> None:
def run_migrations_offline() -> None:
"""
Run migrations in 'offline' mode.
"""
schema_name, _, upgrade_all_tenants = get_schema_options()
url = build_connection_string()
if upgrade_all_tenants:
# Run offline migrations for all tenant schemas
engine = create_async_engine(url)
if USE_IAM_AUTH:
@event.listens_for(engine.sync_engine, "do_connect")
def event_provide_iam_token_for_alembic_offline(
dialect: Any, conn_rec: Any, cargs: Any, cparams: Any
) -> None:
provide_iam_token_for_alembic(dialect, conn_rec, cargs, cparams)
tenant_schemas = get_all_tenant_ids()
engine.sync_engine.dispose()
@@ -207,9 +230,6 @@ def run_migrations_offline() -> None:
def run_migrations_online() -> None:
"""
Runs migrations in 'online' mode using an asynchronous engine.
"""
asyncio.run(run_async_migrations())

View File

@@ -0,0 +1,29 @@
"""add shortcut option for users
Revision ID: 027381bce97c
Revises: 6fc7886d665d
Create Date: 2025-01-14 12:14:00.814390
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "027381bce97c"
down_revision = "6fc7886d665d"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column(
"user",
sa.Column(
"shortcut_enabled", sa.Boolean(), nullable=False, server_default="true"
),
)
def downgrade() -> None:
op.drop_column("user", "shortcut_enabled")

View File

@@ -11,7 +11,7 @@ from sqlalchemy.sql import table
from sqlalchemy.dialects import postgresql
import json
from danswer.utils.encryption import encrypt_string_to_bytes
from onyx.utils.encryption import encrypt_string_to_bytes
# revision identifiers, used by Alembic.
revision = "0a98909f2757"

View File

@@ -1,4 +1,4 @@
"""Introduce Danswer APIs
"""Introduce Onyx APIs
Revision ID: 15326fcec57e
Revises: 77d07dffae64
@@ -8,7 +8,7 @@ Create Date: 2023-11-11 20:51:24.228999
from alembic import op
import sqlalchemy as sa
from danswer.configs.constants import DocumentSource
from onyx.configs.constants import DocumentSource
# revision identifiers, used by Alembic.
revision = "15326fcec57e"

View File

@@ -0,0 +1,29 @@
"""agent_doc_result_col
Revision ID: 1adf5ea20d2b
Revises: e9cf2bd7baed
Create Date: 2025-01-05 13:14:58.344316
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "1adf5ea20d2b"
down_revision = "e9cf2bd7baed"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Add the new column with JSONB type
op.add_column(
"sub_question",
sa.Column("sub_question_doc_results", postgresql.JSONB(), nullable=True),
)
def downgrade() -> None:
# Drop the column
op.drop_column("sub_question", "sub_question_doc_results")

View File

@@ -10,7 +10,7 @@ from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from danswer.configs.chat_configs import NUM_POSTPROCESSED_RESULTS
from onyx.configs.chat_configs import NUM_POSTPROCESSED_RESULTS
# revision identifiers, used by Alembic.
revision = "1f60f60c3401"

View File

@@ -0,0 +1,24 @@
"""add chunk count to document
Revision ID: 2955778aa44c
Revises: c0aab6edb6dd
Create Date: 2025-01-04 11:39:43.268612
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "2955778aa44c"
down_revision = "c0aab6edb6dd"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column("document", sa.Column("chunk_count", sa.Integer(), nullable=True))
def downgrade() -> None:
op.drop_column("document", "chunk_count")

View File

@@ -0,0 +1,121 @@
"""properly_cascade
Revision ID: 35e518e0ddf4
Revises: 91a0a4d62b14
Create Date: 2024-09-20 21:24:04.891018
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "35e518e0ddf4"
down_revision = "91a0a4d62b14"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Update chat_message foreign key constraint
op.drop_constraint(
"chat_message_chat_session_id_fkey", "chat_message", type_="foreignkey"
)
op.create_foreign_key(
"chat_message_chat_session_id_fkey",
"chat_message",
"chat_session",
["chat_session_id"],
["id"],
ondelete="CASCADE",
)
# Update chat_message__search_doc foreign key constraints
op.drop_constraint(
"chat_message__search_doc_chat_message_id_fkey",
"chat_message__search_doc",
type_="foreignkey",
)
op.drop_constraint(
"chat_message__search_doc_search_doc_id_fkey",
"chat_message__search_doc",
type_="foreignkey",
)
op.create_foreign_key(
"chat_message__search_doc_chat_message_id_fkey",
"chat_message__search_doc",
"chat_message",
["chat_message_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"chat_message__search_doc_search_doc_id_fkey",
"chat_message__search_doc",
"search_doc",
["search_doc_id"],
["id"],
ondelete="CASCADE",
)
# Add CASCADE delete for tool_call foreign key
op.drop_constraint("tool_call_message_id_fkey", "tool_call", type_="foreignkey")
op.create_foreign_key(
"tool_call_message_id_fkey",
"tool_call",
"chat_message",
["message_id"],
["id"],
ondelete="CASCADE",
)
def downgrade() -> None:
# Revert chat_message foreign key constraint
op.drop_constraint(
"chat_message_chat_session_id_fkey", "chat_message", type_="foreignkey"
)
op.create_foreign_key(
"chat_message_chat_session_id_fkey",
"chat_message",
"chat_session",
["chat_session_id"],
["id"],
)
# Revert chat_message__search_doc foreign key constraints
op.drop_constraint(
"chat_message__search_doc_chat_message_id_fkey",
"chat_message__search_doc",
type_="foreignkey",
)
op.drop_constraint(
"chat_message__search_doc_search_doc_id_fkey",
"chat_message__search_doc",
type_="foreignkey",
)
op.create_foreign_key(
"chat_message__search_doc_chat_message_id_fkey",
"chat_message__search_doc",
"chat_message",
["chat_message_id"],
["id"],
)
op.create_foreign_key(
"chat_message__search_doc_search_doc_id_fkey",
"chat_message__search_doc",
"search_doc",
["search_doc_id"],
["id"],
)
# Revert tool_call foreign key constraint
op.drop_constraint("tool_call_message_id_fkey", "tool_call", type_="foreignkey")
op.create_foreign_key(
"tool_call_message_id_fkey",
"tool_call",
"chat_message",
["message_id"],
["id"],
)

View File

@@ -0,0 +1,35 @@
"""add composite index for index attempt time updated
Revision ID: 369644546676
Revises: 2955778aa44c
Create Date: 2025-01-08 15:38:17.224380
"""
from alembic import op
from sqlalchemy import text
# revision identifiers, used by Alembic.
revision = "369644546676"
down_revision = "2955778aa44c"
branch_labels: None = None
depends_on: None = None
def upgrade() -> None:
op.create_index(
"ix_index_attempt_ccpair_search_settings_time_updated",
"index_attempt",
[
"connector_credential_pair_id",
"search_settings_id",
text("time_updated DESC"),
],
unique=False,
)
def downgrade() -> None:
op.drop_index(
"ix_index_attempt_ccpair_search_settings_time_updated",
table_name="index_attempt",
)

View File

@@ -0,0 +1,58 @@
"""add back input prompts
Revision ID: 3c6531f32351
Revises: aeda5f2df4f6
Create Date: 2025-01-13 12:49:51.705235
"""
from alembic import op
import sqlalchemy as sa
import fastapi_users_db_sqlalchemy
# revision identifiers, used by Alembic.
revision = "3c6531f32351"
down_revision = "aeda5f2df4f6"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
"inputprompt",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("prompt", sa.String(), nullable=False),
sa.Column("content", sa.String(), nullable=False),
sa.Column("active", sa.Boolean(), nullable=False),
sa.Column("is_public", sa.Boolean(), nullable=False),
sa.Column(
"user_id",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"inputprompt__user",
sa.Column("input_prompt_id", sa.Integer(), nullable=False),
sa.Column(
"user_id", fastapi_users_db_sqlalchemy.generics.GUID(), nullable=False
),
sa.ForeignKeyConstraint(
["input_prompt_id"],
["inputprompt.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("input_prompt_id", "user_id"),
)
def downgrade() -> None:
op.drop_table("inputprompt__user")
op.drop_table("inputprompt")

View File

@@ -17,7 +17,7 @@ depends_on: None = None
def upgrade() -> None:
# At this point, we directly changed some previous migrations,
# https://github.com/danswer-ai/danswer/pull/637
# https://github.com/onyx-dot-app/onyx/pull/637
# Due to using Postgres native Enums, it caused some complications for first time users.
# To remove those complications, all Enums are only handled application side moving forward.
# This migration exists to ensure that existing users don't run into upgrade issues.

View File

@@ -10,8 +10,8 @@ from typing import cast
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import Session
from danswer.key_value_store.factory import get_kv_store
from danswer.db.models import SlackBot
from onyx.key_value_store.factory import get_kv_store
from onyx.db.models import SlackBot
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.

View File

@@ -0,0 +1,23 @@
"""danswerbot -> onyxbot
Revision ID: 54a74a0417fc
Revises: 94dc3d0236f8
Create Date: 2024-12-11 18:05:05.490737
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "54a74a0417fc"
down_revision = "94dc3d0236f8"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.alter_column("chat_session", "danswerbot_flow", new_column_name="onyxbot_flow")
def downgrade() -> None:
op.alter_column("chat_session", "onyxbot_flow", new_column_name="danswerbot_flow")

View File

@@ -1,4 +1,4 @@
"""Track Danswerbot Explicitly
"""Track Onyxbot Explicitly
Revision ID: 570282d33c49
Revises: 7547d982db8f

View File

@@ -0,0 +1,79 @@
"""make categories labels and many to many
Revision ID: 6fc7886d665d
Revises: 3c6531f32351
Create Date: 2025-01-13 18:12:18.029112
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "6fc7886d665d"
down_revision = "3c6531f32351"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Rename persona_category table to persona_label
op.rename_table("persona_category", "persona_label")
# Create the new association table
op.create_table(
"persona__persona_label",
sa.Column("persona_id", sa.Integer(), nullable=False),
sa.Column("persona_label_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["persona_id"],
["persona.id"],
),
sa.ForeignKeyConstraint(
["persona_label_id"],
["persona_label.id"],
),
sa.PrimaryKeyConstraint("persona_id", "persona_label_id"),
)
# Copy existing relationships to the new table
op.execute(
"""
INSERT INTO persona__persona_label (persona_id, persona_label_id)
SELECT id, category_id FROM persona WHERE category_id IS NOT NULL
"""
)
# Remove the old category_id column from persona table
op.drop_column("persona", "category_id")
def downgrade() -> None:
# Rename persona_label table back to persona_category
op.rename_table("persona_label", "persona_category")
# Add back the category_id column to persona table
op.add_column("persona", sa.Column("category_id", sa.Integer(), nullable=True))
op.create_foreign_key(
"persona_category_id_fkey",
"persona",
"persona_category",
["category_id"],
["id"],
)
# Copy the first label relationship back to the persona table
op.execute(
"""
UPDATE persona
SET category_id = (
SELECT persona_label_id
FROM persona__persona_label
WHERE persona__persona_label.persona_id = persona.id
LIMIT 1
)
"""
)
# Drop the association table
op.drop_table("persona__persona_label")

View File

@@ -9,7 +9,7 @@ import json
from typing import cast
from alembic import op
import sqlalchemy as sa
from danswer.key_value_store.factory import get_kv_store
from onyx.key_value_store.factory import get_kv_store
# revision identifiers, used by Alembic.
revision = "703313b75876"

View File

@@ -8,9 +8,9 @@ Create Date: 2024-03-22 21:34:27.629444
from alembic import op
import sqlalchemy as sa
from danswer.db.models import IndexModelStatus
from danswer.context.search.enums import RecencyBiasSetting
from danswer.context.search.enums import SearchType
from onyx.db.models import IndexModelStatus
from onyx.context.search.enums import RecencyBiasSetting
from onyx.context.search.enums import SearchType
# revision identifiers, used by Alembic.
revision = "776b3bbe9092"

View File

@@ -18,7 +18,7 @@ depends_on: None = None
def upgrade() -> None:
# In a PR:
# https://github.com/danswer-ai/danswer/pull/397/files#diff-f05fb341f6373790b91852579631b64ca7645797a190837156a282b67e5b19c2
# https://github.com/onyx-dot-app/onyx/pull/397/files#diff-f05fb341f6373790b91852579631b64ca7645797a190837156a282b67e5b19c2
# we directly changed some previous migrations. This caused some users to have native enums
# while others wouldn't. This has caused some issues when adding new fields to these enums.
# This migration manually changes the enum types to ensure that nobody uses native enums.

View File

@@ -0,0 +1,45 @@
"""Milestone
Revision ID: 91a0a4d62b14
Revises: dab04867cd88
Create Date: 2024-12-13 19:03:30.947551
"""
from alembic import op
import sqlalchemy as sa
import fastapi_users_db_sqlalchemy
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "91a0a4d62b14"
down_revision = "dab04867cd88"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
"milestone",
sa.Column("id", sa.UUID(), nullable=False),
sa.Column("tenant_id", sa.String(), nullable=True),
sa.Column(
"user_id",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.Column("event_type", sa.String(), nullable=False),
sa.Column(
"time_created",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("event_tracker", postgresql.JSONB(), nullable=True),
sa.ForeignKeyConstraint(["user_id"], ["user.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("event_type", name="uq_milestone_event_type"),
)
def downgrade() -> None:
op.drop_table("milestone")

View File

@@ -7,7 +7,7 @@ Create Date: 2024-03-21 12:05:23.956734
"""
from alembic import op
import sqlalchemy as sa
from danswer.configs.constants import DocumentSource
from onyx.configs.constants import DocumentSource
# revision identifiers, used by Alembic.
revision = "91fd3b470d1a"

View File

@@ -0,0 +1,35 @@
"""agent_metric_col_rename__s
Revision ID: 925b58bd75b6
Revises: 9787be927e58
Create Date: 2025-01-06 11:20:26.752441
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "925b58bd75b6"
down_revision = "9787be927e58"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Rename columns using PostgreSQL syntax
op.alter_column(
"agent__search_metrics", "base_duration_s", new_column_name="base_duration__s"
)
op.alter_column(
"agent__search_metrics", "full_duration_s", new_column_name="full_duration__s"
)
def downgrade() -> None:
# Revert the column renames
op.alter_column(
"agent__search_metrics", "base_duration__s", new_column_name="base_duration_s"
)
op.alter_column(
"agent__search_metrics", "full_duration__s", new_column_name="full_duration_s"
)

View File

@@ -10,7 +10,7 @@ from sqlalchemy.orm import Session
from sqlalchemy import text
# Import your models and constants
from danswer.db.models import (
from onyx.db.models import (
Connector,
ConnectorCredentialPair,
Credential,

View File

@@ -0,0 +1,30 @@
"""make document set description optional
Revision ID: 94dc3d0236f8
Revises: bf7a81109301
Create Date: 2024-12-11 11:26:10.616722
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "94dc3d0236f8"
down_revision = "bf7a81109301"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Make document_set.description column nullable
op.alter_column(
"document_set", "description", existing_type=sa.String(), nullable=True
)
def downgrade() -> None:
# Revert document_set.description column to non-nullable
op.alter_column(
"document_set", "description", existing_type=sa.String(), nullable=False
)

View File

@@ -0,0 +1,25 @@
"""agent_metric_table_renames__agent__
Revision ID: 9787be927e58
Revises: bceb76d618ec
Create Date: 2025-01-06 11:01:44.210160
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "9787be927e58"
down_revision = "bceb76d618ec"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Rename table from agent_search_metrics to agent__search_metrics
op.rename_table("agent_search_metrics", "agent__search_metrics")
def downgrade() -> None:
# Rename table back from agent__search_metrics to agent_search_metrics
op.rename_table("agent__search_metrics", "agent_search_metrics")

View File

@@ -0,0 +1,42 @@
"""agent_tracking
Revision ID: 98a5008d8711
Revises: 027381bce97c
Create Date: 2025-01-04 14:41:52.732238
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "98a5008d8711"
down_revision = "027381bce97c"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
"agent_search_metrics",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("persona_id", sa.Integer(), nullable=True),
sa.Column("agent_type", sa.String(), nullable=False),
sa.Column("start_time", sa.DateTime(timezone=True), nullable=False),
sa.Column("base_duration_s", sa.Float(), nullable=False),
sa.Column("full_duration_s", sa.Float(), nullable=False),
sa.Column("base_metrics", postgresql.JSONB(), nullable=True),
sa.Column("refined_metrics", postgresql.JSONB(), nullable=True),
sa.Column("all_metrics", postgresql.JSONB(), nullable=True),
sa.ForeignKeyConstraint(
["persona_id"],
["persona.id"],
),
sa.ForeignKeyConstraint(["user_id"], ["user.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
def downgrade() -> None:
op.drop_table("agent_search_metrics")

View File

@@ -0,0 +1,27 @@
"""add pinned assistants
Revision ID: aeda5f2df4f6
Revises: 369644546676
Create Date: 2025-01-09 16:04:10.770636
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "aeda5f2df4f6"
down_revision = "369644546676"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column(
"user", sa.Column("pinned_assistants", postgresql.JSONB(), nullable=True)
)
op.execute('UPDATE "user" SET pinned_assistants = chosen_assistants')
def downgrade() -> None:
op.drop_column("user", "pinned_assistants")

View File

@@ -10,7 +10,7 @@ from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import ENUM
from danswer.configs.constants import DocumentSource
from onyx.configs.constants import DocumentSource
# revision identifiers, used by Alembic.
revision = "b156fa702355"

View File

@@ -0,0 +1,84 @@
"""agent_table_renames__agent__
Revision ID: bceb76d618ec
Revises: c0132518a25b
Create Date: 2025-01-06 10:50:48.109285
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "bceb76d618ec"
down_revision = "c0132518a25b"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.drop_constraint(
"sub_query__search_doc_sub_query_id_fkey",
"sub_query__search_doc",
type_="foreignkey",
)
op.drop_constraint(
"sub_query__search_doc_search_doc_id_fkey",
"sub_query__search_doc",
type_="foreignkey",
)
# Rename tables
op.rename_table("sub_query", "agent__sub_query")
op.rename_table("sub_question", "agent__sub_question")
op.rename_table("sub_query__search_doc", "agent__sub_query__search_doc")
# Update both foreign key constraints for agent__sub_query__search_doc
# Create new foreign keys with updated names
op.create_foreign_key(
"agent__sub_query__search_doc_sub_query_id_fkey",
"agent__sub_query__search_doc",
"agent__sub_query",
["sub_query_id"],
["id"],
)
op.create_foreign_key(
"agent__sub_query__search_doc_search_doc_id_fkey",
"agent__sub_query__search_doc",
"search_doc", # This table name doesn't change
["search_doc_id"],
["id"],
)
def downgrade() -> None:
# Update foreign key constraints for sub_query__search_doc
op.drop_constraint(
"agent__sub_query__search_doc_sub_query_id_fkey",
"agent__sub_query__search_doc",
type_="foreignkey",
)
op.drop_constraint(
"agent__sub_query__search_doc_search_doc_id_fkey",
"agent__sub_query__search_doc",
type_="foreignkey",
)
# Rename tables back
op.rename_table("agent__sub_query__search_doc", "sub_query__search_doc")
op.rename_table("agent__sub_question", "sub_question")
op.rename_table("agent__sub_query", "sub_query")
op.create_foreign_key(
"sub_query__search_doc_sub_query_id_fkey",
"sub_query__search_doc",
"sub_query",
["sub_query_id"],
["id"],
)
op.create_foreign_key(
"sub_query__search_doc_search_doc_id_fkey",
"sub_query__search_doc",
"search_doc", # This table name doesn't change
["search_doc_id"],
["id"],
)

View File

@@ -0,0 +1,57 @@
"""delete_input_prompts
Revision ID: bf7a81109301
Revises: f7a894b06d02
Create Date: 2024-12-09 12:00:49.884228
"""
from alembic import op
import sqlalchemy as sa
import fastapi_users_db_sqlalchemy
# revision identifiers, used by Alembic.
revision = "bf7a81109301"
down_revision = "f7a894b06d02"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.drop_table("inputprompt__user")
op.drop_table("inputprompt")
def downgrade() -> None:
op.create_table(
"inputprompt",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("prompt", sa.String(), nullable=False),
sa.Column("content", sa.String(), nullable=False),
sa.Column("active", sa.Boolean(), nullable=False),
sa.Column("is_public", sa.Boolean(), nullable=False),
sa.Column(
"user_id",
fastapi_users_db_sqlalchemy.generics.GUID(),
nullable=True,
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"inputprompt__user",
sa.Column("input_prompt_id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["input_prompt_id"],
["inputprompt.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["inputprompt.id"],
),
sa.PrimaryKeyConstraint("input_prompt_id", "user_id"),
)

View File

@@ -0,0 +1,40 @@
"""agent_table_changes_rename_level
Revision ID: c0132518a25b
Revises: 1adf5ea20d2b
Create Date: 2025-01-05 16:38:37.660152
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "c0132518a25b"
down_revision = "1adf5ea20d2b"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Add level and level_question_nr columns with NOT NULL constraint
op.add_column(
"sub_question",
sa.Column("level", sa.Integer(), nullable=False, server_default="0"),
)
op.add_column(
"sub_question",
sa.Column(
"level_question_nr", sa.Integer(), nullable=False, server_default="0"
),
)
# Remove the server_default after the columns are created
op.alter_column("sub_question", "level", server_default=None)
op.alter_column("sub_question", "level_question_nr", server_default=None)
def downgrade() -> None:
# Remove the columns
op.drop_column("sub_question", "level_question_nr")
op.drop_column("sub_question", "level")

View File

@@ -0,0 +1,87 @@
"""delete workspace
Revision ID: c0aab6edb6dd
Revises: 35e518e0ddf4
Create Date: 2024-12-17 14:37:07.660631
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "c0aab6edb6dd"
down_revision = "35e518e0ddf4"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.execute(
"""
UPDATE connector
SET connector_specific_config = connector_specific_config - 'workspace'
WHERE source = 'SLACK'
"""
)
def downgrade() -> None:
import json
from sqlalchemy import text
from slack_sdk import WebClient
conn = op.get_bind()
# Fetch all Slack credentials
creds_result = conn.execute(
text("SELECT id, credential_json FROM credential WHERE source = 'SLACK'")
)
all_slack_creds = creds_result.fetchall()
if not all_slack_creds:
return
for cred_row in all_slack_creds:
credential_id, credential_json = cred_row
credential_json = (
credential_json.tobytes().decode("utf-8")
if isinstance(credential_json, memoryview)
else credential_json.decode("utf-8")
)
credential_data = json.loads(credential_json)
slack_bot_token = credential_data.get("slack_bot_token")
if not slack_bot_token:
print(
f"No slack_bot_token found for credential {credential_id}. "
"Your Slack connector will not function until you upgrade and provide a valid token."
)
continue
client = WebClient(token=slack_bot_token)
try:
auth_response = client.auth_test()
workspace = auth_response["url"].split("//")[1].split(".")[0]
# Update only the connectors linked to this credential
# (and which are Slack connectors).
op.execute(
f"""
UPDATE connector AS c
SET connector_specific_config = jsonb_set(
connector_specific_config,
'{{workspace}}',
to_jsonb('{workspace}'::text)
)
FROM connector_credential_pair AS ccp
WHERE ccp.connector_id = c.id
AND c.source = 'SLACK'
AND ccp.credential_id = {credential_id}
"""
)
except Exception:
print(
f"We were unable to get the workspace url for your Slack Connector with id {credential_id}."
)
print("This connector will no longer work until you upgrade.")
continue

View File

@@ -0,0 +1,32 @@
"""Add composite index to document_by_connector_credential_pair
Revision ID: dab04867cd88
Revises: 54a74a0417fc
Create Date: 2024-12-13 22:43:20.119990
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "dab04867cd88"
down_revision = "54a74a0417fc"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Composite index on (connector_id, credential_id)
op.create_index(
"idx_document_cc_pair_connector_credential",
"document_by_connector_credential_pair",
["connector_id", "credential_id"],
unique=False,
)
def downgrade() -> None:
op.drop_index(
"idx_document_cc_pair_connector_credential",
table_name="document_by_connector_credential_pair",
)

View File

@@ -1,4 +1,4 @@
"""Danswer Custom Tool Flow
"""Onyx Custom Tool Flow
Revision ID: dba7f71618f5
Revises: d5645c915d0e

View File

@@ -9,12 +9,12 @@ from alembic import op
import sqlalchemy as sa
from sqlalchemy import table, column, String, Integer, Boolean
from danswer.db.search_settings import (
from onyx.db.search_settings import (
get_new_default_embedding_model,
get_old_default_embedding_model,
user_has_overridden_embedding_model,
)
from danswer.db.models import IndexModelStatus
from onyx.db.models import IndexModelStatus
# revision identifiers, used by Alembic.
revision = "dbaa756c2ccf"

View File

@@ -8,7 +8,7 @@ Create Date: 2024-03-14 18:06:08.523106
from alembic import op
import sqlalchemy as sa
from danswer.configs.constants import DocumentSource
from onyx.configs.constants import DocumentSource
# revision identifiers, used by Alembic.
revision = "e50154680a5c"

View File

@@ -0,0 +1,68 @@
"""create pro search persistence tables
Revision ID: e9cf2bd7baed
Revises: 98a5008d8711
Create Date: 2025-01-02 17:55:56.544246
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
# revision identifiers, used by Alembic.
revision = "e9cf2bd7baed"
down_revision = "98a5008d8711"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Create sub_question table
op.create_table(
"sub_question",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("primary_question_id", sa.Integer, sa.ForeignKey("chat_message.id")),
sa.Column(
"chat_session_id", UUID(as_uuid=True), sa.ForeignKey("chat_session.id")
),
sa.Column("sub_question", sa.Text),
sa.Column(
"time_created", sa.DateTime(timezone=True), server_default=sa.func.now()
),
sa.Column("sub_answer", sa.Text),
)
# Create sub_query table
op.create_table(
"sub_query",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("parent_question_id", sa.Integer, sa.ForeignKey("sub_question.id")),
sa.Column(
"chat_session_id", UUID(as_uuid=True), sa.ForeignKey("chat_session.id")
),
sa.Column("sub_query", sa.Text),
sa.Column(
"time_created", sa.DateTime(timezone=True), server_default=sa.func.now()
),
)
# Create sub_query__search_doc association table
op.create_table(
"sub_query__search_doc",
sa.Column(
"sub_query_id", sa.Integer, sa.ForeignKey("sub_query.id"), primary_key=True
),
sa.Column(
"search_doc_id",
sa.Integer,
sa.ForeignKey("search_doc.id"),
primary_key=True,
),
)
def downgrade() -> None:
op.drop_table("sub_query__search_doc")
op.drop_table("sub_query")
op.drop_table("sub_question")

View File

@@ -0,0 +1,40 @@
"""non-nullbale slack bot id in channel config
Revision ID: f7a894b06d02
Revises: 9f696734098f
Create Date: 2024-12-06 12:55:42.845723
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "f7a894b06d02"
down_revision = "9f696734098f"
branch_labels = None
depends_on = None
def upgrade() -> None:
# Delete all rows with null slack_bot_id
op.execute("DELETE FROM slack_channel_config WHERE slack_bot_id IS NULL")
# Make slack_bot_id non-nullable
op.alter_column(
"slack_channel_config",
"slack_bot_id",
existing_type=sa.Integer(),
nullable=False,
)
def downgrade() -> None:
# Make slack_bot_id nullable again
op.alter_column(
"slack_channel_config",
"slack_bot_id",
existing_type=sa.Integer(),
nullable=True,
)

View File

@@ -1,3 +1,3 @@
These files are for public table migrations when operating with multi tenancy.
If you are not a Danswer developer, you can ignore this directory entirely.
If you are not a Onyx developer, you can ignore this directory entirely.

View File

@@ -8,8 +8,8 @@ from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.schema import SchemaItem
from alembic import context
from danswer.db.engine import build_connection_string
from danswer.db.models import PublicBase
from onyx.db.engine import build_connection_string
from onyx.db.models import PublicBase
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.

View File

@@ -0,0 +1,31 @@
"""mapping for anonymous user path
Revision ID: a4f6ee863c47
Revises: 14a83a331951
Create Date: 2025-01-04 14:16:58.697451
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "a4f6ee863c47"
down_revision = "14a83a331951"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
"tenant_anonymous_user_path",
sa.Column("tenant_id", sa.String(), primary_key=True, nullable=False),
sa.Column("anonymous_user_path", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("tenant_id"),
sa.UniqueConstraint("anonymous_user_path"),
)
def downgrade() -> None:
op.drop_table("tenant_anonymous_user_path")

536
backend/chatt.txt Normal file
View File

@@ -0,0 +1,536 @@
"{\"user_message_id\": 475, \"reserved_assistant_message_id\": 476}\n"
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \"1\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 1}\n"
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \" specifications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_question\": \"2\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_query\": \" cases\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_question\": \"3\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 3}\n"
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \"What\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \" is\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_question\": \" On\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_question\": \"yx\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \"ony\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_question\": \"4\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \"?\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \"x\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_question\": \" \", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_question\": \"\", \"level\": 0, \"level_question_nr\": 4}\n"
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" differences\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \" product\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 0}\n"
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 0}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 0}\n"
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" features\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 0}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" specifications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 1}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 1}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \"4\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" previous\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" versions\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \" in\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" industry\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 3, \"query_id\": 2}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 1}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \"On\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \"yx\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \"3\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \"2\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" comparison\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" cases\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" previous\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 0, \"query_id\": 2}\n"
"{\"sub_query\": \" with\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" other\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" software\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 1, \"query_id\": 2}\n"
"{\"sub_query\": \" versions\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \" \", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"sub_query\": \"\", \"level\": 0, \"level_question_nr\": 2, \"query_id\": 2}\n"
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 4?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
"{\"llm_selected_doc_indices\": []}\n"
"{\"final_context_docs\": []}\n"
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 3, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" formerly\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" known\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" D\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" an\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Assistant\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" that\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" connects\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" company's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" documents\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" personnel\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" provides\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" can\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" integrate\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" any\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" large\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" language\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" model\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" (\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"LL\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"M\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \")\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 2?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
"{\"llm_selected_doc_indices\": []}\n"
"{\"final_context_docs\": []}\n"
"{\"answer_piece\": \" of\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" choice\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" designed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" modular\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" easily\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" extens\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"ible\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" allowing\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" deployment\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" various\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" platforms\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" including\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" laptops\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"-prem\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"ise\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" or\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" cloud\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" environments\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" ensures\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" that\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" data\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" chats\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" remain\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" under\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 1, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" control\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" deployment\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" owned\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" by\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" MIT\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" licensed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" comes\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" ready\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" production\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" use\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" featuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" authentication\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" role\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" management\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" persistence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" configuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Assist\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"ants\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" their\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" prompts\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Additionally\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" serves\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" unified\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" search\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" tool\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" across\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" common\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" workplace\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" like\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Slack\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Google\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Drive\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" Con\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"fluence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" enabling\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" it\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" act\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" subject\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" matter\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" expert\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" teams\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" by\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" combining\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" L\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"LM\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"s\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" team\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \"-specific\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" knowledge\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" [[1]]()\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"top_documents\": [], \"rephrased_query\": \"What is Onyx 3?\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
"{\"llm_selected_doc_indices\": []}\n"
"{\"final_context_docs\": []}\n"
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" don't\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \" know\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 2, \"answer_type\": \"agent_sub_answer\"}\n"
"{\"top_documents\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"chunk_ind\": 0, \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"link\": \"https://docs.onyx.app/introduction\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"source_type\": \"web\", \"boost\": 0, \"hidden\": false, \"metadata\": {}, \"score\": 0.6275177643886491, \"is_relevant\": null, \"relevance_explanation\": null, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"], \"updated_at\": null, \"primary_owners\": null, \"secondary_owners\": null, \"is_internet\": false, \"db_doc_id\": 35923}], \"rephrased_query\": \"what is onyx 1, 2, 3, 4\", \"predicted_flow\": \"question-answer\", \"predicted_search\": \"keyword\", \"applied_source_filters\": null, \"applied_time_cutoff\": null, \"recency_bias_multiplier\": 0.5}\n"
"{\"llm_selected_doc_indices\": []}\n"
"{\"final_context_docs\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"content\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible. The system also comes fully ready for production usage with user authentication, role management (admin/basic users), chat persistence, and a UI for configuring Personas (AI Assistants) and their Prompts.\\nOnyx also serves as a Unified Search across all common workplace tools such as Slack, Google Drive, Confluence, etc. By combining LLMs and team specific knowledge, Onyx becomes a subject matter expert for the team. Its like ChatGPT if it had access to your teams unique knowledge! It enables questions such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\nOnyx can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain Features \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants with different prompts and backing knowledge sets.\\n- Connect Onyx with LLM of your choice (self-host for a fully airgapped solution).\\n- Document Search + AI Answers for natural language queries.\\n- Connectors to all common workplace tools like Google Drive, Confluence, Slack, etc.\\n- Slack integration to get answers and search results directly in Slack.\\n\\nUpcoming\\n- Chat/Prompt sharing with specific teammates and user groups.\\n- Multi-modal model support, chat with images, video etc.\\n- Choosing between LLMs and parameters during chat session.\\n- Tool calling and agent configurations options.\\n- Organizational understanding and ability to locate and suggest experts from your team.\\n\\nOther Noteable Benefits of Onyx\\n- User Authentication with document level access management.\\n- Best in class Hybrid Search across all sources (BM-25 + prefix aware embedding models).\\n- Admin Dashboard to configure connectors, document-sets, access, etc.\\n- Custom deep learning models + learn from user feedback.\\n- Easy deployment and ability to host Onyx anywhere of your choosing.\\nQuickstart\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"source_type\": \"web\", \"metadata\": {}, \"updated_at\": null, \"link\": \"https://docs.onyx.app/introduction\", \"source_links\": {\"0\": \"https://docs.onyx.app/introduction\"}, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"]}]}\n"
"{\"answer_piece\": \"I\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" cannot\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" reliably\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" question\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" about\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" the\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" provided\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" only\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" describes\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" which\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" an\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" AI\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Assistant\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" formerly\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" known\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" D\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"answer\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"1\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" connects\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" company's\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" documents\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" personnel\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" providing\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" chat\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" interface\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" integration\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" with\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" any\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" large\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" language\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" model\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" (\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"LL\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"M\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \")\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" of\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" choice\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" designed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" to\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" modular\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" easily\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" extens\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"ible\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" can\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" be\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" deployed\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" on\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" various\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" platforms\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" while\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" ensuring\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" user\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" data\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" control\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" It\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" also\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" serves\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" unified\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" search\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" tool\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" across\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" common\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" workplace\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" applications\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" like\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Slack\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Google\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Drive\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" and\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" Con\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"fluence\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" acting\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" as\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" a\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" subject\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" matter\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" expert\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" for\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" teams\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" [[1]]()\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"{{1}}\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"There\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" is\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" no\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" information\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" available\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" regarding\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" On\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"yx\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"2\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"3\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" or\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" \", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \"4\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \",\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" so\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" I\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" cannot\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" provide\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" details\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" about\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \" them\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"answer_piece\": \".\", \"level\": 0, \"level_question_nr\": 0, \"answer_type\": \"agent_level_answer\"}\n"
"{\"citations\": []}\n"
"{\"message_id\": 476, \"parent_message\": 475, \"latest_child_message\": null, \"message\": \"I cannot reliably answer the question about Onyx 2, 3, and 4, as the provided information only describes Onyx 1, which is an AI Assistant formerly known as Danswer. Onyx 1 connects to a company's documents, applications, and personnel, providing a chat interface and integration with any large language model (LLM) of choice. It is designed to be modular, easily extensible, and can be deployed on various platforms while ensuring user data control. It also serves as a unified search tool across common workplace applications like Slack, Google Drive, and Confluence, acting as a subject matter expert for teams [[1]](){{1}}There is no information available regarding Onyx 2, 3, or 4, so I cannot provide details about them.\", \"rephrased_query\": \"what is onyx 1, 2, 3, 4\", \"context_docs\": {\"top_documents\": [{\"document_id\": \"https://docs.onyx.app/introduction\", \"chunk_ind\": 0, \"semantic_identifier\": \"Introduction - Onyx Documentation\", \"link\": \"https://docs.onyx.app/introduction\", \"blurb\": \"Onyx Documentation home page\\nSearch...\\nNavigation\\nWelcome to Onyx\\nIntroduction\\nWelcome to Onyx\\nIntroduction\\nOnyx Overview\\n\\nWhat is Onyx\\nOnyx (Formerly Danswer) is the AI Assistant connected to your companys docs, apps, and people. Onyx provides a Chat interface and plugs into any LLM of your choice. Onyx can be deployed anywhere and for any scale - on a laptop, on-premise, or to cloud. Since you own the deployment, your user data and chats are fully in your own control. Onyx is MIT licensed and designed to be modular and easily extensible.\", \"source_type\": \"web\", \"boost\": 0, \"hidden\": false, \"metadata\": {}, \"score\": 0.6275177643886491, \"is_relevant\": null, \"relevance_explanation\": null, \"match_highlights\": [\"\", \"such as A customer wants feature X, is this already supported? or Wheres the pull request for feature Y?\\n<hi>Onyx</hi> can also be plugged into existing tools like Slack to get answers and AI chats directly in Slack.\\n\\nDemo\\n\\nMain <hi>Features</hi> \\n- Chat UI with the ability to select documents to chat with.\\n- Create custom AI Assistants\", \"\"], \"updated_at\": null, \"primary_owners\": null, \"secondary_owners\": null, \"is_internet\": false, \"db_doc_id\": 35923}]}, \"message_type\": \"assistant\", \"time_sent\": \"2025-01-12T05:37:18.318251+00:00\", \"overridden_model\": \"gpt-4o\", \"alternate_assistant_id\": 0, \"chat_session_id\": \"40f91916-7419-48d1-9681-5882b0869d88\", \"citations\": {}, \"sub_questions\": [], \"files\": [], \"tool_call\": null}\n"

View File

@@ -1,3 +0,0 @@
import os
__version__ = os.environ.get("DANSWER_VERSION", "") or "Development"

View File

@@ -1,100 +0,0 @@
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from danswer.agent_search.answer_query.nodes.answer_check import answer_check
from danswer.agent_search.answer_query.nodes.answer_generation import answer_generation
from danswer.agent_search.answer_query.nodes.format_answer import format_answer
from danswer.agent_search.answer_query.states import AnswerQueryInput
from danswer.agent_search.answer_query.states import AnswerQueryOutput
from danswer.agent_search.answer_query.states import AnswerQueryState
from danswer.agent_search.expanded_retrieval.graph_builder import (
expanded_retrieval_graph_builder,
)
def answer_query_graph_builder() -> StateGraph:
graph = StateGraph(
state_schema=AnswerQueryState,
input=AnswerQueryInput,
output=AnswerQueryOutput,
)
### Add nodes ###
expanded_retrieval = expanded_retrieval_graph_builder().compile()
graph.add_node(
node="expanded_retrieval_for_initial_decomp",
action=expanded_retrieval,
)
graph.add_node(
node="answer_check",
action=answer_check,
)
graph.add_node(
node="answer_generation",
action=answer_generation,
)
graph.add_node(
node="format_answer",
action=format_answer,
)
### Add edges ###
graph.add_edge(
start_key=START,
end_key="expanded_retrieval_for_initial_decomp",
)
graph.add_edge(
start_key="expanded_retrieval_for_initial_decomp",
end_key="answer_generation",
)
graph.add_edge(
start_key="answer_generation",
end_key="answer_check",
)
graph.add_edge(
start_key="answer_check",
end_key="format_answer",
)
graph.add_edge(
start_key="format_answer",
end_key=END,
)
return graph
if __name__ == "__main__":
from danswer.db.engine import get_session_context_manager
from danswer.llm.factory import get_default_llms
from danswer.context.search.models import SearchRequest
graph = answer_query_graph_builder()
compiled_graph = graph.compile()
primary_llm, fast_llm = get_default_llms()
search_request = SearchRequest(
query="Who made Excel and what other products did they make?",
)
with get_session_context_manager() as db_session:
inputs = AnswerQueryInput(
search_request=search_request,
primary_llm=primary_llm,
fast_llm=fast_llm,
db_session=db_session,
query_to_answer="Who made Excel?",
)
output = compiled_graph.invoke(
input=inputs,
# debug=True,
# subgraphs=True,
)
print(output)
# for namespace, chunk in compiled_graph.stream(
# input=inputs,
# # debug=True,
# subgraphs=True,
# ):
# print(namespace)
# print(chunk)

View File

@@ -1,30 +0,0 @@
from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_message_runs
from danswer.agent_search.answer_query.states import AnswerQueryState
from danswer.agent_search.answer_query.states import QACheckOutput
from danswer.agent_search.shared_graph_utils.prompts import BASE_CHECK_PROMPT
def answer_check(state: AnswerQueryState) -> QACheckOutput:
msg = [
HumanMessage(
content=BASE_CHECK_PROMPT.format(
question=state["search_request"].query,
base_answer=state["answer"],
)
)
]
fast_llm = state["fast_llm"]
response = list(
fast_llm.stream(
prompt=msg,
)
)
response_str = merge_message_runs(response, chunk_separator="")[0].content
return QACheckOutput(
answer_quality=response_str,
)

View File

@@ -1,32 +0,0 @@
from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_message_runs
from danswer.agent_search.answer_query.states import AnswerQueryState
from danswer.agent_search.answer_query.states import QAGenerationOutput
from danswer.agent_search.shared_graph_utils.prompts import BASE_RAG_PROMPT
from danswer.agent_search.shared_graph_utils.utils import format_docs
def answer_generation(state: AnswerQueryState) -> QAGenerationOutput:
query = state["query_to_answer"]
docs = state["reordered_documents"]
print(f"Number of verified retrieval docs: {len(docs)}")
msg = [
HumanMessage(
content=BASE_RAG_PROMPT.format(question=query, context=format_docs(docs))
)
]
fast_llm = state["fast_llm"]
response = list(
fast_llm.stream(
prompt=msg,
)
)
answer_str = merge_message_runs(response, chunk_separator="")[0].content
return QAGenerationOutput(
answer=answer_str,
)

View File

@@ -1,16 +0,0 @@
from danswer.agent_search.answer_query.states import AnswerQueryOutput
from danswer.agent_search.answer_query.states import AnswerQueryState
from danswer.agent_search.answer_query.states import SearchAnswerResults
def format_answer(state: AnswerQueryState) -> AnswerQueryOutput:
return AnswerQueryOutput(
decomp_answer_results=[
SearchAnswerResults(
query=state["query_to_answer"],
quality=state["answer_quality"],
answer=state["answer"],
documents=state["reordered_documents"],
)
],
)

View File

@@ -1,45 +0,0 @@
from typing import Annotated
from typing import TypedDict
from pydantic import BaseModel
from danswer.agent_search.core_state import PrimaryState
from danswer.agent_search.shared_graph_utils.operators import dedup_inference_sections
from danswer.context.search.models import InferenceSection
class SearchAnswerResults(BaseModel):
query: str
answer: str
quality: str
documents: Annotated[list[InferenceSection], dedup_inference_sections]
class QACheckOutput(TypedDict, total=False):
answer_quality: str
class QAGenerationOutput(TypedDict, total=False):
answer: str
class ExpandedRetrievalOutput(TypedDict):
reordered_documents: Annotated[list[InferenceSection], dedup_inference_sections]
class AnswerQueryState(
PrimaryState,
QACheckOutput,
QAGenerationOutput,
ExpandedRetrievalOutput,
total=True,
):
query_to_answer: str
class AnswerQueryInput(PrimaryState, total=True):
query_to_answer: str
class AnswerQueryOutput(TypedDict):
decomp_answer_results: list[SearchAnswerResults]

View File

@@ -1,15 +0,0 @@
from typing import TypedDict
from sqlalchemy.orm import Session
from danswer.context.search.models import SearchRequest
from danswer.llm.interfaces import LLM
class PrimaryState(TypedDict, total=False):
search_request: SearchRequest
primary_llm: LLM
fast_llm: LLM
# a single session for the entire agent search
# is fine if we are only reading
db_session: Session

View File

@@ -1,114 +0,0 @@
from typing import Any
from langchain_core.messages import HumanMessage
from danswer.agent_search.main.states import MainState
from danswer.agent_search.shared_graph_utils.prompts import COMBINED_CONTEXT
from danswer.agent_search.shared_graph_utils.prompts import MODIFIED_RAG_PROMPT
from danswer.agent_search.shared_graph_utils.utils import format_docs
from danswer.agent_search.shared_graph_utils.utils import normalize_whitespace
# aggregate sub questions and answers
def deep_answer_generation(state: MainState) -> dict[str, Any]:
"""
Generate answer
Args:
state (messages): The current state
Returns:
dict: The updated state with re-phrased question
"""
print("---DEEP GENERATE---")
question = state["original_question"]
docs = state["deduped_retrieval_docs"]
deep_answer_context = state["core_answer_dynamic_context"]
print(f"Number of verified retrieval docs - deep: {len(docs)}")
combined_context = normalize_whitespace(
COMBINED_CONTEXT.format(
deep_answer_context=deep_answer_context, formated_docs=format_docs(docs)
)
)
msg = [
HumanMessage(
content=MODIFIED_RAG_PROMPT.format(
question=question, combined_context=combined_context
)
)
]
# Grader
model = state["fast_llm"]
response = model.invoke(msg)
return {
"deep_answer": response.content,
}
def final_stuff(state: MainState) -> dict[str, Any]:
"""
Invokes the agent model to generate a response based on the current state. Given
the question, it will decide to retrieve using the retriever tool, or simply end.
Args:
state (messages): The current state
Returns:
dict: The updated state with the agent response appended to messages
"""
print("---FINAL---")
messages = state["log_messages"]
time_ordered_messages = [x.pretty_repr() for x in messages]
time_ordered_messages.sort()
print("Message Log:")
print("\n".join(time_ordered_messages))
initial_sub_qas = state["initial_sub_qas"]
initial_sub_qa_list = []
for initial_sub_qa in initial_sub_qas:
if initial_sub_qa["sub_answer_check"] == "yes":
initial_sub_qa_list.append(
f' Question:\n {initial_sub_qa["sub_question"]}\n --\n Answer:\n {initial_sub_qa["sub_answer"]}\n -----'
)
initial_sub_qa_context = "\n".join(initial_sub_qa_list)
base_answer = state["base_answer"]
print(f"Final Base Answer:\n{base_answer}")
print("--------------------------------")
print(f"Initial Answered Sub Questions:\n{initial_sub_qa_context}")
print("--------------------------------")
if not state.get("deep_answer"):
print("No Deep Answer was required")
return {}
deep_answer = state["deep_answer"]
sub_qas = state["sub_qas"]
sub_qa_list = []
for sub_qa in sub_qas:
if sub_qa["sub_answer_check"] == "yes":
sub_qa_list.append(
f' Question:\n {sub_qa["sub_question"]}\n --\n Answer:\n {sub_qa["sub_answer"]}\n -----'
)
sub_qa_context = "\n".join(sub_qa_list)
print(f"Final Base Answer:\n{base_answer}")
print("--------------------------------")
print(f"Final Deep Answer:\n{deep_answer}")
print("--------------------------------")
print("Sub Questions and Answers:")
print(sub_qa_context)
return {}

View File

@@ -1,78 +0,0 @@
import json
import re
from datetime import datetime
from typing import Any
from langchain_core.messages import HumanMessage
from danswer.agent_search.main.states import MainState
from danswer.agent_search.shared_graph_utils.prompts import DEEP_DECOMPOSE_PROMPT
from danswer.agent_search.shared_graph_utils.utils import format_entity_term_extraction
from danswer.agent_search.shared_graph_utils.utils import generate_log_message
def decompose(state: MainState) -> dict[str, Any]:
""" """
node_start_time = datetime.now()
question = state["original_question"]
base_answer = state["base_answer"]
# get the entity term extraction dict and properly format it
entity_term_extraction_dict = state["retrieved_entities_relationships"][
"retrieved_entities_relationships"
]
entity_term_extraction_str = format_entity_term_extraction(
entity_term_extraction_dict
)
initial_question_answers = state["initial_sub_qas"]
addressed_question_list = [
x["sub_question"]
for x in initial_question_answers
if x["sub_answer_check"] == "yes"
]
failed_question_list = [
x["sub_question"]
for x in initial_question_answers
if x["sub_answer_check"] == "no"
]
msg = [
HumanMessage(
content=DEEP_DECOMPOSE_PROMPT.format(
question=question,
entity_term_extraction_str=entity_term_extraction_str,
base_answer=base_answer,
answered_sub_questions="\n - ".join(addressed_question_list),
failed_sub_questions="\n - ".join(failed_question_list),
),
)
]
# Grader
model = state["fast_llm"]
response = model.invoke(msg)
cleaned_response = re.sub(r"```json\n|\n```", "", response.pretty_repr())
parsed_response = json.loads(cleaned_response)
sub_questions_dict = {}
for sub_question_nr, sub_question_dict in enumerate(
parsed_response["sub_questions"]
):
sub_question_dict["answered"] = False
sub_question_dict["verified"] = False
sub_questions_dict[sub_question_nr] = sub_question_dict
return {
"decomposed_sub_questions_dict": sub_questions_dict,
"log_messages": generate_log_message(
message="deep - decompose",
node_start_time=node_start_time,
graph_start_time=state["graph_start_time"],
),
}

View File

@@ -1,40 +0,0 @@
import json
import re
from typing import Any
from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_message_runs
from danswer.agent_search.main.states import MainState
from danswer.agent_search.shared_graph_utils.prompts import ENTITY_TERM_PROMPT
from danswer.agent_search.shared_graph_utils.utils import format_docs
def entity_term_extraction(state: MainState) -> dict[str, Any]:
"""Extract entities and terms from the question and context"""
question = state["original_question"]
docs = state["deduped_retrieval_docs"]
doc_context = format_docs(docs)
msg = [
HumanMessage(
content=ENTITY_TERM_PROMPT.format(question=question, context=doc_context),
)
]
fast_llm = state["fast_llm"]
# Grader
llm_response_list = list(
fast_llm.stream(
prompt=msg,
)
)
llm_response = merge_message_runs(llm_response_list, chunk_separator="")[0].content
cleaned_response = re.sub(r"```json\n|\n```", "", llm_response)
parsed_response = json.loads(cleaned_response)
return {
"retrieved_entities_relationships": parsed_response,
}

View File

@@ -1,30 +0,0 @@
from typing import Any
from danswer.agent_search.main.states import MainState
# aggregate sub questions and answers
def sub_qa_level_aggregator(state: MainState) -> dict[str, Any]:
sub_qas = state["sub_qas"]
dynamic_context_list = [
"Below you will find useful information to answer the original question:"
]
checked_sub_qas = []
for core_answer_sub_qa in sub_qas:
question = core_answer_sub_qa["sub_question"]
answer = core_answer_sub_qa["sub_answer"]
verified = core_answer_sub_qa["sub_answer_check"]
if verified == "yes":
dynamic_context_list.append(
f"Question:\n{question}\n\nAnswer:\n{answer}\n\n---\n\n"
)
checked_sub_qas.append({"sub_question": question, "sub_answer": answer})
dynamic_context = "\n".join(dynamic_context_list)
return {
"core_answer_dynamic_context": dynamic_context,
"checked_sub_qas": checked_sub_qas,
}

View File

@@ -1,19 +0,0 @@
from typing import Any
from danswer.agent_search.main.states import MainState
def sub_qa_manager(state: MainState) -> dict[str, Any]:
""" """
sub_questions_dict = state["decomposed_sub_questions_dict"]
sub_questions = {}
for sub_question_nr, sub_question_dict in sub_questions_dict.items():
sub_questions[sub_question_nr] = sub_question_dict["sub_question"]
return {
"sub_questions": sub_questions,
"num_new_question_iterations": 0,
}

View File

@@ -1,44 +0,0 @@
from collections.abc import Hashable
from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_message_runs
from langgraph.types import Send
from danswer.agent_search.expanded_retrieval.nodes.doc_retrieval import RetrieveInput
from danswer.agent_search.expanded_retrieval.states import ExpandedRetrievalInput
from danswer.agent_search.shared_graph_utils.prompts import REWRITE_PROMPT_MULTI
from danswer.llm.interfaces import LLM
def parallel_retrieval_edge(state: ExpandedRetrievalInput) -> list[Send | Hashable]:
print(f"parallel_retrieval_edge state: {state.keys()}")
# This should be better...
question = state.get("query_to_answer") or state["search_request"].query
llm: LLM = state["fast_llm"]
msg = [
HumanMessage(
content=REWRITE_PROMPT_MULTI.format(question=question),
)
]
llm_response_list = list(
llm.stream(
prompt=msg,
)
)
llm_response = merge_message_runs(llm_response_list, chunk_separator="")[0].content
print(f"llm_response: {llm_response}")
rewritten_queries = llm_response.split("\n")
print(f"rewritten_queries: {rewritten_queries}")
return [
Send(
"doc_retrieval",
RetrieveInput(query_to_retrieve=query, **state),
)
for query in rewritten_queries
]

View File

@@ -1,88 +0,0 @@
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from danswer.agent_search.expanded_retrieval.edges import parallel_retrieval_edge
from danswer.agent_search.expanded_retrieval.nodes.doc_reranking import doc_reranking
from danswer.agent_search.expanded_retrieval.nodes.doc_retrieval import doc_retrieval
from danswer.agent_search.expanded_retrieval.nodes.doc_verification import (
doc_verification,
)
from danswer.agent_search.expanded_retrieval.nodes.verification_kickoff import (
verification_kickoff,
)
from danswer.agent_search.expanded_retrieval.states import ExpandedRetrievalInput
from danswer.agent_search.expanded_retrieval.states import ExpandedRetrievalOutput
from danswer.agent_search.expanded_retrieval.states import ExpandedRetrievalState
def expanded_retrieval_graph_builder() -> StateGraph:
graph = StateGraph(
state_schema=ExpandedRetrievalState,
input=ExpandedRetrievalInput,
output=ExpandedRetrievalOutput,
)
### Add nodes ###
graph.add_node(
node="doc_retrieval",
action=doc_retrieval,
)
graph.add_node(
node="verification_kickoff",
action=verification_kickoff,
)
graph.add_node(
node="doc_verification",
action=doc_verification,
)
graph.add_node(
node="doc_reranking",
action=doc_reranking,
)
### Add edges ###
graph.add_conditional_edges(
source=START,
path=parallel_retrieval_edge,
path_map=["doc_retrieval"],
)
graph.add_edge(
start_key="doc_retrieval",
end_key="verification_kickoff",
)
graph.add_edge(
start_key="doc_verification",
end_key="doc_reranking",
)
graph.add_edge(
start_key="doc_reranking",
end_key=END,
)
return graph
if __name__ == "__main__":
from danswer.db.engine import get_session_context_manager
from danswer.llm.factory import get_default_llms
from danswer.context.search.models import SearchRequest
graph = expanded_retrieval_graph_builder()
compiled_graph = graph.compile()
primary_llm, fast_llm = get_default_llms()
search_request = SearchRequest(
query="Who made Excel and what other products did they make?",
)
with get_session_context_manager() as db_session:
inputs = ExpandedRetrievalInput(
search_request=search_request,
primary_llm=primary_llm,
fast_llm=fast_llm,
db_session=db_session,
query_to_answer="Who made Excel?",
)
for thing in compiled_graph.stream(inputs, debug=True):
print(thing)

View File

@@ -1,11 +0,0 @@
from danswer.agent_search.expanded_retrieval.states import DocRerankingOutput
from danswer.agent_search.expanded_retrieval.states import ExpandedRetrievalState
def doc_reranking(state: ExpandedRetrievalState) -> DocRerankingOutput:
print(f"doc_reranking state: {state.keys()}")
verified_documents = state["verified_documents"]
reranked_documents = verified_documents
return DocRerankingOutput(reranked_documents=reranked_documents)

View File

@@ -1,47 +0,0 @@
from danswer.agent_search.expanded_retrieval.states import DocRetrievalOutput
from danswer.agent_search.expanded_retrieval.states import ExpandedRetrievalState
from danswer.context.search.models import InferenceSection
from danswer.context.search.models import SearchRequest
from danswer.context.search.pipeline import SearchPipeline
from danswer.db.engine import get_session_context_manager
class RetrieveInput(ExpandedRetrievalState):
query_to_retrieve: str
def doc_retrieval(state: RetrieveInput) -> DocRetrievalOutput:
# def doc_retrieval(state: RetrieveInput) -> Command[Literal["doc_verification"]]:
"""
Retrieve documents
Args:
state (dict): The current graph state
Returns:
state (dict): New key added to state, documents, that contains retrieved documents
"""
print(f"doc_retrieval state: {state.keys()}")
state["query_to_retrieve"]
documents: list[InferenceSection] = []
llm = state["primary_llm"]
fast_llm = state["fast_llm"]
# db_session = state["db_session"]
query_to_retrieve = state["search_request"].query
with get_session_context_manager() as db_session1:
documents = SearchPipeline(
search_request=SearchRequest(
query=query_to_retrieve,
),
user=None,
llm=llm,
fast_llm=fast_llm,
db_session=db_session1,
).reranked_sections
print(f"retrieved documents: {len(documents)}")
return DocRetrievalOutput(
retrieved_documents=documents,
)

View File

@@ -1,60 +0,0 @@
from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_message_runs
from danswer.agent_search.expanded_retrieval.states import DocVerificationOutput
from danswer.agent_search.expanded_retrieval.states import ExpandedRetrievalState
from danswer.agent_search.shared_graph_utils.models import BinaryDecision
from danswer.agent_search.shared_graph_utils.prompts import VERIFIER_PROMPT
from danswer.context.search.models import InferenceSection
class DocVerificationInput(ExpandedRetrievalState, total=True):
doc_to_verify: InferenceSection
def doc_verification(state: DocVerificationInput) -> DocVerificationOutput:
"""
Check whether the document is relevant for the original user question
Args:
state (VerifierState): The current state
Returns:
dict: ict: The updated state with the final decision
"""
print(f"doc_verification state: {state.keys()}")
original_query = state["search_request"].query
doc_to_verify = state["doc_to_verify"]
document_content = doc_to_verify.combined_content
msg = [
HumanMessage(
content=VERIFIER_PROMPT.format(
question=original_query, document_content=document_content
)
)
]
fast_llm = state["fast_llm"]
response = list(
fast_llm.stream(
prompt=msg,
)
)
response_string = merge_message_runs(response, chunk_separator="")[0].content
# Convert string response to proper dictionary format
decision_dict = {"decision": response_string.lower()}
formatted_response = BinaryDecision.model_validate(decision_dict)
print(f"Verdict: {formatted_response.decision}")
verified_documents = []
if formatted_response.decision == "yes":
verified_documents.append(doc_to_verify)
return DocVerificationOutput(
verified_documents=verified_documents,
)

View File

@@ -1,27 +0,0 @@
from typing import Literal
from langgraph.types import Command
from langgraph.types import Send
from danswer.agent_search.expanded_retrieval.nodes.doc_verification import (
DocVerificationInput,
)
from danswer.agent_search.expanded_retrieval.states import ExpandedRetrievalState
def verification_kickoff(
state: ExpandedRetrievalState,
) -> Command[Literal["doc_verification"]]:
print(f"verification_kickoff state: {state.keys()}")
documents = state["retrieved_documents"]
return Command(
update={},
goto=[
Send(
node="doc_verification",
arg=DocVerificationInput(doc_to_verify=doc, **state),
)
for doc in documents
],
)

View File

@@ -1,36 +0,0 @@
from typing import Annotated
from typing import TypedDict
from danswer.agent_search.core_state import PrimaryState
from danswer.agent_search.shared_graph_utils.operators import dedup_inference_sections
from danswer.context.search.models import InferenceSection
class DocRetrievalOutput(TypedDict, total=False):
retrieved_documents: Annotated[list[InferenceSection], dedup_inference_sections]
class DocVerificationOutput(TypedDict, total=False):
verified_documents: Annotated[list[InferenceSection], dedup_inference_sections]
class DocRerankingOutput(TypedDict, total=False):
reranked_documents: Annotated[list[InferenceSection], dedup_inference_sections]
class ExpandedRetrievalState(
PrimaryState,
DocRetrievalOutput,
DocVerificationOutput,
DocRerankingOutput,
total=True,
):
query_to_answer: str
class ExpandedRetrievalInput(PrimaryState, total=True):
query_to_answer: str
class ExpandedRetrievalOutput(TypedDict):
reordered_documents: Annotated[list[InferenceSection], dedup_inference_sections]

View File

@@ -1,61 +0,0 @@
from collections.abc import Hashable
from langgraph.types import Send
from danswer.agent_search.answer_query.states import AnswerQueryInput
from danswer.agent_search.main.states import MainState
def parallelize_decompozed_answer_queries(state: MainState) -> list[Send | Hashable]:
return [
Send(
"answer_query",
AnswerQueryInput(
**state,
query_to_answer=query,
),
)
for query in state["initial_decomp_queries"]
]
# def continue_to_answer_sub_questions(state: QAState) -> Union[Hashable, list[Hashable]]:
# # Routes re-written queries to the (parallel) retrieval steps
# # Notice the 'Send()' API that takes care of the parallelization
# return [
# Send(
# "sub_answers_graph",
# ResearchQAState(
# sub_question=sub_question["sub_question_str"],
# sub_question_nr=sub_question["sub_question_nr"],
# graph_start_time=state["graph_start_time"],
# primary_llm=state["primary_llm"],
# fast_llm=state["fast_llm"],
# ),
# )
# for sub_question in state["sub_questions"]
# ]
# def continue_to_deep_answer(state: QAState) -> Union[Hashable, list[Hashable]]:
# print("---GO TO DEEP ANSWER OR END---")
# base_answer = state["base_answer"]
# question = state["original_question"]
# BASE_CHECK_MESSAGE = [
# HumanMessage(
# content=BASE_CHECK_PROMPT.format(question=question, base_answer=base_answer)
# )
# ]
# model = state["fast_llm"]
# response = model.invoke(BASE_CHECK_MESSAGE)
# print(f"CAN WE CONTINUE W/O GENERATING A DEEP ANSWER? - {response.pretty_repr()}")
# if response.pretty_repr() == "no":
# return "decompose"
# else:
# return "end"

View File

@@ -1,98 +0,0 @@
from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from danswer.agent_search.answer_query.graph_builder import answer_query_graph_builder
from danswer.agent_search.expanded_retrieval.graph_builder import (
expanded_retrieval_graph_builder,
)
from danswer.agent_search.main.edges import parallelize_decompozed_answer_queries
from danswer.agent_search.main.nodes.base_decomp import main_decomp_base
from danswer.agent_search.main.nodes.generate_initial_answer import (
generate_initial_answer,
)
from danswer.agent_search.main.states import MainInput
from danswer.agent_search.main.states import MainState
def main_graph_builder() -> StateGraph:
graph = StateGraph(
state_schema=MainState,
input=MainInput,
)
### Add nodes ###
graph.add_node(
node="base_decomp",
action=main_decomp_base,
)
answer_query_subgraph = answer_query_graph_builder().compile()
graph.add_node(
node="answer_query",
action=answer_query_subgraph,
)
expanded_retrieval_subgraph = expanded_retrieval_graph_builder().compile()
graph.add_node(
node="expanded_retrieval",
action=expanded_retrieval_subgraph,
)
graph.add_node(
node="generate_initial_answer",
action=generate_initial_answer,
)
### Add edges ###
graph.add_edge(
start_key=START,
end_key="expanded_retrieval",
)
graph.add_edge(
start_key=START,
end_key="base_decomp",
)
graph.add_conditional_edges(
source="base_decomp",
path=parallelize_decompozed_answer_queries,
path_map=["answer_query"],
)
graph.add_edge(
start_key=["answer_query", "expanded_retrieval"],
end_key="generate_initial_answer",
)
graph.add_edge(
start_key="generate_initial_answer",
end_key=END,
)
return graph
if __name__ == "__main__":
from danswer.db.engine import get_session_context_manager
from danswer.llm.factory import get_default_llms
from danswer.context.search.models import SearchRequest
graph = main_graph_builder()
compiled_graph = graph.compile()
primary_llm, fast_llm = get_default_llms()
search_request = SearchRequest(
query="If i am familiar with the function that I need, how can I type it into a cell?",
)
with get_session_context_manager() as db_session:
inputs = MainInput(
search_request=search_request,
primary_llm=primary_llm,
fast_llm=fast_llm,
db_session=db_session,
)
for thing in compiled_graph.stream(
input=inputs,
# stream_mode="debug",
# debug=True,
subgraphs=True,
):
# print(thing)
print()
print()

View File

@@ -1,31 +0,0 @@
from langchain_core.messages import HumanMessage
from danswer.agent_search.main.states import BaseDecompOutput
from danswer.agent_search.main.states import MainState
from danswer.agent_search.shared_graph_utils.prompts import INITIAL_DECOMPOSITION_PROMPT
from danswer.agent_search.shared_graph_utils.utils import clean_and_parse_list_string
def main_decomp_base(state: MainState) -> BaseDecompOutput:
question = state["search_request"].query
msg = [
HumanMessage(
content=INITIAL_DECOMPOSITION_PROMPT.format(question=question),
)
]
# Get the rewritten queries in a defined format
model = state["fast_llm"]
response = model.invoke(msg)
content = response.pretty_repr()
list_of_subquestions = clean_and_parse_list_string(content)
decomp_list: list[str] = [
sub_question["sub_question"].strip() for sub_question in list_of_subquestions
]
return BaseDecompOutput(
initial_decomp_queries=decomp_list,
)

View File

@@ -1,53 +0,0 @@
from langchain_core.messages import HumanMessage
from danswer.agent_search.main.states import InitialAnswerOutput
from danswer.agent_search.main.states import MainState
from danswer.agent_search.shared_graph_utils.prompts import INITIAL_RAG_PROMPT
from danswer.agent_search.shared_graph_utils.utils import format_docs
def generate_initial_answer(state: MainState) -> InitialAnswerOutput:
print("---GENERATE INITIAL---")
question = state["search_request"].query
docs = state["documents"]
decomp_answer_results = state["decomp_answer_results"]
good_qa_list: list[str] = []
_SUB_QUESTION_ANSWER_TEMPLATE = """
Sub-Question:\n - {sub_question}\n --\nAnswer:\n - {sub_answer}\n\n
"""
for decomp_answer_result in decomp_answer_results:
if (
decomp_answer_result.quality.lower() == "yes"
and len(decomp_answer_result.answer) > 0
and decomp_answer_result.answer != "I don't know"
):
good_qa_list.append(
_SUB_QUESTION_ANSWER_TEMPLATE.format(
sub_question=decomp_answer_result.query,
sub_answer=decomp_answer_result.answer,
)
)
sub_question_answer_str = "\n\n------\n\n".join(good_qa_list)
msg = [
HumanMessage(
content=INITIAL_RAG_PROMPT.format(
question=question,
context=format_docs(docs),
answered_sub_questions=sub_question_answer_str,
)
)
]
# Grader
model = state["fast_llm"]
response = model.invoke(msg)
answer = response.pretty_repr()
print(answer)
return InitialAnswerOutput(initial_answer=answer)

View File

@@ -1,37 +0,0 @@
from operator import add
from typing import Annotated
from typing import TypedDict
from danswer.agent_search.answer_query.states import SearchAnswerResults
from danswer.agent_search.core_state import PrimaryState
from danswer.agent_search.shared_graph_utils.operators import dedup_inference_sections
from danswer.context.search.models import InferenceSection
class BaseDecompOutput(TypedDict, total=False):
initial_decomp_queries: list[str]
class InitialAnswerOutput(TypedDict, total=False):
initial_answer: str
class MainState(
PrimaryState,
BaseDecompOutput,
InitialAnswerOutput,
total=True,
):
documents: Annotated[list[InferenceSection], dedup_inference_sections]
decomp_answer_results: Annotated[list[SearchAnswerResults], add]
class MainInput(PrimaryState, total=True):
pass
class MainOutput(TypedDict):
"""
This is not used because defining the output only matters for filtering the output of
a .invoke() call but we are streaming so we just yield the entire state.
"""

View File

@@ -1,27 +0,0 @@
from danswer.agent_search.primary_graph.graph_builder import build_core_graph
from danswer.llm.answering.answer import AnswerStream
from danswer.llm.interfaces import LLM
from danswer.tools.tool import Tool
def run_graph(
query: str,
llm: LLM,
tools: list[Tool],
) -> AnswerStream:
graph = build_core_graph()
inputs = {
"original_query": query,
"messages": [],
"tools": tools,
"llm": llm,
}
compiled_graph = graph.compile()
output = compiled_graph.invoke(input=inputs)
yield from output
if __name__ == "__main__":
pass
# run_graph("What is the capital of France?", llm, [])

View File

@@ -1,12 +0,0 @@
from typing import Literal
from pydantic import BaseModel
# Pydantic models for structured outputs
class RewrittenQueries(BaseModel):
rewritten_queries: list[str]
class BinaryDecision(BaseModel):
decision: Literal["yes", "no"]

View File

@@ -1,9 +0,0 @@
from danswer.context.search.models import InferenceSection
from danswer.llm.answering.prune_and_merge import _merge_sections
def dedup_inference_sections(
list1: list[InferenceSection], list2: list[InferenceSection]
) -> list[InferenceSection]:
deduped = _merge_sections(list1 + list2)
return deduped

View File

@@ -1,427 +0,0 @@
REWRITE_PROMPT_MULTI_ORIGINAL = """ \n
Please convert an initial user question into a 2-3 more appropriate short and pointed search queries for retrievel from a
document store. Particularly, try to think about resolving ambiguities and make the search queries more specific,
enabling the system to search more broadly.
Also, try to make the search queries not redundant, i.e. not too similar! \n\n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Formulate the queries separated by '--' (Do not say 'Query 1: ...', just write the querytext): """
REWRITE_PROMPT_MULTI = """ \n
Please create a list of 2-3 sample documents that could answer an original question. Each document
should be about as long as the original question. \n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Formulate the sample documents separated by '--' (Do not say 'Document 1: ...', just write the text): """
BASE_RAG_PROMPT = """ \n
You are an assistant for question-answering tasks. Use the context provided below - and only the
provided context - to answer the question. If you don't know the answer or if the provided context is
empty, just say "I don't know". Do not use your internal knowledge!
Again, only use the provided context and do not use your internal knowledge! If you cannot answer the
question based on the context, say "I don't know". It is a matter of life and death that you do NOT
use your internal knowledge, just the provided information!
Use three sentences maximum and keep the answer concise.
answer concise.\nQuestion:\n {question} \nContext:\n {context} \n\n
\n\n
Answer:"""
BASE_CHECK_PROMPT = """ \n
Please check whether 1) the suggested answer seems to fully address the original question AND 2)the
original question requests a simple, factual answer, and there are no ambiguities, judgements,
aggregations, or any other complications that may require extra context. (I.e., if the question is
somewhat addressed, but the answer would benefit from more context, then answer with 'no'.)
Please only answer with 'yes' or 'no' \n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Here is the proposed answer:
\n ------- \n
{base_answer}
\n ------- \n
Please answer with yes or no:"""
VERIFIER_PROMPT = """ \n
Please check whether the document seems to be relevant for the answer of the question. Please
only answer with 'yes' or 'no' \n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Here is the document text:
\n ------- \n
{document_content}
\n ------- \n
Please answer with yes or no:"""
INITIAL_DECOMPOSITION_PROMPT_BASIC = """ \n
Please decompose an initial user question into not more than 4 appropriate sub-questions that help to
answer the original question. The purpose for this decomposition is to isolate individulal entities
(i.e., 'compare sales of company A and company B' -> 'what are sales for company A' + 'what are sales
for company B'), split ambiguous terms (i.e., 'what is our success with company A' -> 'what are our
sales with company A' + 'what is our market share with company A' + 'is company A a reference customer
for us'), etc. Each sub-question should be realistically be answerable by a good RAG system. \n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Please formulate your answer as a list of subquestions:
Answer:
"""
REWRITE_PROMPT_SINGLE = """ \n
Please convert an initial user question into a more appropriate search query for retrievel from a
document store. \n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Formulate the query: """
MODIFIED_RAG_PROMPT = """You are an assistant for question-answering tasks. Use the context provided below
- and only this context - to answer the question. If you don't know the answer, just say "I don't know".
Use three sentences maximum and keep the answer concise.
Pay also particular attention to the sub-questions and their answers, at least it may enrich the answer.
Again, only use the provided context and do not use your internal knowledge! If you cannot answer the
question based on the context, say "I don't know". It is a matter of life and death that you do NOT
use your internal knowledge, just the provided information!
\nQuestion: {question}
\nContext: {combined_context} \n
Answer:"""
ORIG_DEEP_DECOMPOSE_PROMPT = """ \n
An initial user question needs to be answered. An initial answer has been provided but it wasn't quite
good enough. Also, some sub-questions had been answered and this information has been used to provide
the initial answer. Some other subquestions may have been suggested based on little knowledge, but they
were not directly answerable. Also, some entities, relationships and terms are givenm to you so that
you have an idea of how the avaiolable data looks like.
Your role is to generate 3-5 new sub-questions that would help to answer the initial question,
considering:
1) The initial question
2) The initial answer that was found to be unsatisfactory
3) The sub-questions that were answered
4) The sub-questions that were suggested but not answered
5) The entities, relationships and terms that were extracted from the context
The individual questions should be answerable by a good RAG system.
So a good idea would be to use the sub-questions to resolve ambiguities and/or to separate the
question for different entities that may be involved in the original question, but in a way that does
not duplicate questions that were already tried.
Additional Guidelines:
- The sub-questions should be specific to the question and provide richer context for the question,
resolve ambiguities, or address shortcoming of the initial answer
- Each sub-question - when answered - should be relevant for the answer to the original question
- The sub-questions should be free from comparisions, ambiguities,judgements, aggregations, or any
other complications that may require extra context.
- The sub-questions MUST have the full context of the original question so that it can be executed by
a RAG system independently without the original question available
(Example:
- initial question: "What is the capital of France?"
- bad sub-question: "What is the name of the river there?"
- good sub-question: "What is the name of the river that flows through Paris?"
- For each sub-question, please provide a short explanation for why it is a good sub-question. So
generate a list of dictionaries with the following format:
[{{"sub_question": <sub-question>, "explanation": <explanation>, "search_term": <rewrite the
sub-question using as a search phrase for the document store>}}, ...]
\n\n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Here is the initial sub-optimal answer:
\n ------- \n
{base_answer}
\n ------- \n
Here are the sub-questions that were answered:
\n ------- \n
{answered_sub_questions}
\n ------- \n
Here are the sub-questions that were suggested but not answered:
\n ------- \n
{failed_sub_questions}
\n ------- \n
And here are the entities, relationships and terms extracted from the context:
\n ------- \n
{entity_term_extraction_str}
\n ------- \n
Please generate the list of good, fully contextualized sub-questions that would help to address the
main question. Again, please find questions that are NOT overlapping too much with the already answered
sub-questions or those that already were suggested and failed.
In other words - what can we try in addition to what has been tried so far?
Please think through it step by step and then generate the list of json dictionaries with the following
format:
{{"sub_questions": [{{"sub_question": <sub-question>,
"explanation": <explanation>,
"search_term": <rewrite the sub-question using as a search phrase for the document store>}},
...]}} """
DEEP_DECOMPOSE_PROMPT = """ \n
An initial user question needs to be answered. An initial answer has been provided but it wasn't quite
good enough. Also, some sub-questions had been answered and this information has been used to provide
the initial answer. Some other subquestions may have been suggested based on little knowledge, but they
were not directly answerable. Also, some entities, relationships and terms are givenm to you so that
you have an idea of how the avaiolable data looks like.
Your role is to generate 4-6 new sub-questions that would help to answer the initial question,
considering:
1) The initial question
2) The initial answer that was found to be unsatisfactory
3) The sub-questions that were answered
4) The sub-questions that were suggested but not answered
5) The entities, relationships and terms that were extracted from the context
The individual questions should be answerable by a good RAG system.
So a good idea would be to use the sub-questions to resolve ambiguities and/or to separate the
question for different entities that may be involved in the original question, but in a way that does
not duplicate questions that were already tried.
Additional Guidelines:
- The sub-questions should be specific to the question and provide richer context for the question,
resolve ambiguities, or address shortcoming of the initial answer
- Each sub-question - when answered - should be relevant for the answer to the original question
- The sub-questions should be free from comparisions, ambiguities,judgements, aggregations, or any
other complications that may require extra context.
- The sub-questions MUST have the full context of the original question so that it can be executed by
a RAG system independently without the original question available
(Example:
- initial question: "What is the capital of France?"
- bad sub-question: "What is the name of the river there?"
- good sub-question: "What is the name of the river that flows through Paris?"
- For each sub-question, please also provide a search term that can be used to retrieve relevant
documents from a document store.
\n\n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Here is the initial sub-optimal answer:
\n ------- \n
{base_answer}
\n ------- \n
Here are the sub-questions that were answered:
\n ------- \n
{answered_sub_questions}
\n ------- \n
Here are the sub-questions that were suggested but not answered:
\n ------- \n
{failed_sub_questions}
\n ------- \n
And here are the entities, relationships and terms extracted from the context:
\n ------- \n
{entity_term_extraction_str}
\n ------- \n
Please generate the list of good, fully contextualized sub-questions that would help to address the
main question. Again, please find questions that are NOT overlapping too much with the already answered
sub-questions or those that already were suggested and failed.
In other words - what can we try in addition to what has been tried so far?
Generate the list of json dictionaries with the following format:
{{"sub_questions": [{{"sub_question": <sub-question>,
"search_term": <rewrite the sub-question using as a search phrase for the document store>}},
...]}} """
DECOMPOSE_PROMPT = """ \n
For an initial user question, please generate at 5-10 individual sub-questions whose answers would help
\n to answer the initial question. The individual questions should be answerable by a good RAG system.
So a good idea would be to \n use the sub-questions to resolve ambiguities and/or to separate the
question for different entities that may be involved in the original question.
In order to arrive at meaningful sub-questions, please also consider the context retrieved from the
document store, expressed as entities, relationships and terms. You can also think about the types
mentioned in brackets
Guidelines:
- The sub-questions should be specific to the question and provide richer context for the question,
and or resolve ambiguities
- Each sub-question - when answered - should be relevant for the answer to the original question
- The sub-questions should be free from comparisions, ambiguities,judgements, aggregations, or any
other complications that may require extra context.
- The sub-questions MUST have the full context of the original question so that it can be executed by
a RAG system independently without the original question available
(Example:
- initial question: "What is the capital of France?"
- bad sub-question: "What is the name of the river there?"
- good sub-question: "What is the name of the river that flows through Paris?"
- For each sub-question, please provide a short explanation for why it is a good sub-question. So
generate a list of dictionaries with the following format:
[{{"sub_question": <sub-question>, "explanation": <explanation>, "search_term": <rewrite the
sub-question using as a search phrase for the document store>}}, ...]
\n\n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
And here are the entities, relationships and terms extracted from the context:
\n ------- \n
{entity_term_extraction_str}
\n ------- \n
Please generate the list of good, fully contextualized sub-questions that would help to address the
main question. Don't be too specific unless the original question is specific.
Please think through it step by step and then generate the list of json dictionaries with the following
format:
{{"sub_questions": [{{"sub_question": <sub-question>,
"explanation": <explanation>,
"search_term": <rewrite the sub-question using as a search phrase for the document store>}},
...]}} """
#### Consolidations
COMBINED_CONTEXT = """-------
Below you will find useful information to answer the original question. First, you see a number of
sub-questions with their answers. This information should be considered to be more focussed and
somewhat more specific to the original question as it tries to contextualized facts.
After that will see the documents that were considered to be relevant to answer the original question.
Here are the sub-questions and their answers:
\n\n {deep_answer_context} \n\n
\n\n Here are the documents that were considered to be relevant to answer the original question:
\n\n {formated_docs} \n\n
----------------
"""
SUB_QUESTION_EXPLANATION_RANKER_PROMPT = """-------
Below you will find a question that we ultimately want to answer (the original question) and a list of
motivations in arbitrary order for generated sub-questions that are supposed to help us answering the
original question. The motivations are formatted as <motivation number>: <motivation explanation>.
(Again, the numbering is arbitrary and does not necessarily mean that 1 is the most relevant
motivation and 2 is less relevant.)
Please rank the motivations in order of relevance for answering the original question. Also, try to
ensure that the top questions do not duplicate too much, i.e. that they are not too similar.
Ultimately, create a list with the motivation numbers where the number of the most relevant
motivations comes first.
Here is the original question:
\n\n {original_question} \n\n
\n\n Here is the list of sub-question motivations:
\n\n {sub_question_explanations} \n\n
----------------
Please think step by step and then generate the ranked list of motivations.
Please format your answer as a json object in the following format:
{{"reasonning": <explain your reasoning for the ranking>,
"ranked_motivations": <ranked list of motivation numbers>}}
"""
INITIAL_DECOMPOSITION_PROMPT = """ \n
Please decompose an initial user question into 2 or 3 appropriate sub-questions that help to
answer the original question. The purpose for this decomposition is to isolate individulal entities
(i.e., 'compare sales of company A and company B' -> 'what are sales for company A' + 'what are sales
for company B'), split ambiguous terms (i.e., 'what is our success with company A' -> 'what are our
sales with company A' + 'what is our market share with company A' + 'is company A a reference customer
for us'), etc. Each sub-question should be realistically be answerable by a good RAG system. \n
For each sub-question, please also create one search term that can be used to retrieve relevant
documents from a document store.
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Please formulate your answer as a list of json objects with the following format:
[{{"sub_question": <sub-question>, "search_term": <search term>}}, ...]
Answer:
"""
INITIAL_RAG_PROMPT = """ \n
You are an assistant for question-answering tasks. Use the information provided below - and only the
provided information - to answer the provided question.
The information provided below consists of:
1) a number of answered sub-questions - these are very important(!) and definitely should be
considered to answer the question.
2) a number of documents that were also deemed relevant for the question.
If you don't know the answer or if the provided information is empty or insufficient, just say
"I don't know". Do not use your internal knowledge!
Again, only use the provided informationand do not use your internal knowledge! It is a matter of life
and death that you do NOT use your internal knowledge, just the provided information!
Try to keep your answer concise.
And here is the question and the provided information:
\n
\nQuestion:\n {question}
\nAnswered Sub-questions:\n {answered_sub_questions}
\nContext:\n {context} \n\n
\n\n
Answer:"""
ENTITY_TERM_PROMPT = """ \n
Based on the original question and the context retieved from a dataset, please generate a list of
entities (e.g. companies, organizations, industries, products, locations, etc.), terms and concepts
(e.g. sales, revenue, etc.) that are relevant for the question, plus their relations to each other.
\n\n
Here is the original question:
\n ------- \n
{question}
\n ------- \n
And here is the context retrieved:
\n ------- \n
{context}
\n ------- \n
Please format your answer as a json object in the following format:
{{"retrieved_entities_relationships": {{
"entities": [{{
"entity_name": <assign a name for the entity>,
"entity_type": <specify a short type name for the entity, such as 'company', 'location',...>
}}],
"relationships": [{{
"name": <assign a name for the relationship>,
"type": <specify a short type name for the relationship, such as 'sales_to', 'is_location_of',...>,
"entities": [<related entity name 1>, <related entity name 2>]
}}],
"terms": [{{
"term_name": <assign a name for the term>,
"term_type": <specify a short type name for the term, such as 'revenue', 'market_share',...>,
"similar_to": <list terms that are similar to this term>
}}]
}}
}}
"""

View File

@@ -1,101 +0,0 @@
import ast
import json
import re
from collections.abc import Sequence
from datetime import datetime
from datetime import timedelta
from typing import Any
from danswer.context.search.models import InferenceSection
def normalize_whitespace(text: str) -> str:
"""Normalize whitespace in text to single spaces and strip leading/trailing whitespace."""
import re
return re.sub(r"\s+", " ", text.strip())
# Post-processing
def format_docs(docs: Sequence[InferenceSection]) -> str:
return "\n\n".join(doc.combined_content for doc in docs)
def clean_and_parse_list_string(json_string: str) -> list[dict]:
# Remove any prefixes/labels before the actual JSON content
json_string = re.sub(r"^.*?(?=\[)", "", json_string, flags=re.DOTALL)
# Remove markdown code block markers and any newline prefixes
cleaned_string = re.sub(r"```json\n|\n```", "", json_string)
cleaned_string = cleaned_string.replace("\\n", " ").replace("\n", " ")
cleaned_string = " ".join(cleaned_string.split())
# Try parsing with json.loads first, fall back to ast.literal_eval
try:
return json.loads(cleaned_string)
except json.JSONDecodeError:
try:
return ast.literal_eval(cleaned_string)
except (ValueError, SyntaxError) as e:
raise ValueError(f"Failed to parse JSON string: {cleaned_string}") from e
def clean_and_parse_json_string(json_string: str) -> dict[str, Any]:
# Remove markdown code block markers and any newline prefixes
cleaned_string = re.sub(r"```json\n|\n```", "", json_string)
cleaned_string = cleaned_string.replace("\\n", " ").replace("\n", " ")
cleaned_string = " ".join(cleaned_string.split())
# Parse the cleaned string into a Python dictionary
return json.loads(cleaned_string)
def format_entity_term_extraction(entity_term_extraction_dict: dict[str, Any]) -> str:
entities = entity_term_extraction_dict["entities"]
terms = entity_term_extraction_dict["terms"]
relationships = entity_term_extraction_dict["relationships"]
entity_strs = ["\nEntities:\n"]
for entity in entities:
entity_str = f"{entity['entity_name']} ({entity['entity_type']})"
entity_strs.append(entity_str)
entity_str = "\n - ".join(entity_strs)
relationship_strs = ["\n\nRelationships:\n"]
for relationship in relationships:
relationship_str = f"{relationship['name']} ({relationship['type']}): {relationship['entities']}"
relationship_strs.append(relationship_str)
relationship_str = "\n - ".join(relationship_strs)
term_strs = ["\n\nTerms:\n"]
for term in terms:
term_str = f"{term['term_name']} ({term['term_type']}): similar to {term['similar_to']}"
term_strs.append(term_str)
term_str = "\n - ".join(term_strs)
return "\n".join(entity_strs + relationship_strs + term_strs)
def _format_time_delta(time: timedelta) -> str:
seconds_from_start = f"{((time).seconds):03d}"
microseconds_from_start = f"{((time).microseconds):06d}"
return f"{seconds_from_start}.{microseconds_from_start}"
def generate_log_message(
message: str,
node_start_time: datetime,
graph_start_time: datetime | None = None,
) -> str:
current_time = datetime.now()
if graph_start_time is not None:
graph_time_str = _format_time_delta(current_time - graph_start_time)
else:
graph_time_str = "N/A"
node_time_str = _format_time_delta(current_time - node_start_time)
return f"{graph_time_str} ({node_time_str} s): {message}"

View File

@@ -1,25 +0,0 @@
# These are helper objects for tracking the keys we need to write in redis
from typing import cast
from redis import Redis
from danswer.background.celery.configs.base import CELERY_SEPARATOR
from danswer.configs.constants import DanswerCeleryPriority
def celery_get_queue_length(queue: str, r: Redis) -> int:
"""This is a redis specific way to get the length of a celery queue.
It is priority aware and knows how to count across the multiple redis lists
used to implement task prioritization.
This operation is not atomic."""
total_length = 0
for i in range(len(DanswerCeleryPriority)):
queue_name = queue
if i > 0:
queue_name += CELERY_SEPARATOR
queue_name += str(i)
length = r.llen(queue_name)
total_length += cast(int, length)
return total_length

View File

@@ -1,61 +0,0 @@
from datetime import timedelta
from typing import Any
from danswer.configs.constants import DanswerCeleryPriority
from danswer.configs.constants import DanswerCeleryTask
tasks_to_schedule = [
{
"name": "check-for-vespa-sync",
"task": DanswerCeleryTask.CHECK_FOR_VESPA_SYNC_TASK,
"schedule": timedelta(seconds=20),
"options": {"priority": DanswerCeleryPriority.HIGH},
},
{
"name": "check-for-connector-deletion",
"task": DanswerCeleryTask.CHECK_FOR_CONNECTOR_DELETION,
"schedule": timedelta(seconds=20),
"options": {"priority": DanswerCeleryPriority.HIGH},
},
{
"name": "check-for-indexing",
"task": DanswerCeleryTask.CHECK_FOR_INDEXING,
"schedule": timedelta(seconds=15),
"options": {"priority": DanswerCeleryPriority.HIGH},
},
{
"name": "check-for-prune",
"task": DanswerCeleryTask.CHECK_FOR_PRUNING,
"schedule": timedelta(seconds=15),
"options": {"priority": DanswerCeleryPriority.HIGH},
},
{
"name": "kombu-message-cleanup",
"task": DanswerCeleryTask.KOMBU_MESSAGE_CLEANUP_TASK,
"schedule": timedelta(seconds=3600),
"options": {"priority": DanswerCeleryPriority.LOWEST},
},
{
"name": "monitor-vespa-sync",
"task": DanswerCeleryTask.MONITOR_VESPA_SYNC,
"schedule": timedelta(seconds=5),
"options": {"priority": DanswerCeleryPriority.HIGH},
},
{
"name": "check-for-doc-permissions-sync",
"task": DanswerCeleryTask.CHECK_FOR_DOC_PERMISSIONS_SYNC,
"schedule": timedelta(seconds=30),
"options": {"priority": DanswerCeleryPriority.HIGH},
},
{
"name": "check-for-external-group-sync",
"task": DanswerCeleryTask.CHECK_FOR_EXTERNAL_GROUP_SYNC,
"schedule": timedelta(seconds=20),
"options": {"priority": DanswerCeleryPriority.HIGH},
},
]
def get_tasks_to_schedule() -> list[dict[str, Any]]:
return tasks_to_schedule

View File

@@ -1,10 +0,0 @@
"""Factory stub for running celery worker / celery beat."""
from celery import Celery
from danswer.utils.variable_functionality import fetch_versioned_implementation
from danswer.utils.variable_functionality import set_is_ee_based_on_env_variable
set_is_ee_based_on_env_variable()
app: Celery = fetch_versioned_implementation(
"danswer.background.celery.apps.primary", "celery_app"
)

View File

@@ -1,212 +0,0 @@
from collections.abc import Iterator
from datetime import datetime
from enum import Enum
from typing import Any
from pydantic import BaseModel
from pydantic import Field
from danswer.configs.constants import DocumentSource
from danswer.configs.constants import MessageType
from danswer.context.search.enums import QueryFlow
from danswer.context.search.enums import RecencyBiasSetting
from danswer.context.search.enums import SearchType
from danswer.context.search.models import RetrievalDocs
from danswer.tools.tool_implementations.custom.base_tool_types import ToolResultType
class LlmDoc(BaseModel):
"""This contains the minimal set information for the LLM portion including citations"""
document_id: str
content: str
blurb: str
semantic_identifier: str
source_type: DocumentSource
metadata: dict[str, str | list[str]]
updated_at: datetime | None
link: str | None
source_links: dict[int, str] | None
match_highlights: list[str] | None
# First chunk of info for streaming QA
class QADocsResponse(RetrievalDocs):
rephrased_query: str | None = None
predicted_flow: QueryFlow | None
predicted_search: SearchType | None
applied_source_filters: list[DocumentSource] | None
applied_time_cutoff: datetime | None
recency_bias_multiplier: float
def model_dump(self, *args: list, **kwargs: dict[str, Any]) -> dict[str, Any]: # type: ignore
initial_dict = super().model_dump(mode="json", *args, **kwargs) # type: ignore
initial_dict["applied_time_cutoff"] = (
self.applied_time_cutoff.isoformat() if self.applied_time_cutoff else None
)
return initial_dict
class StreamStopReason(Enum):
CONTEXT_LENGTH = "context_length"
CANCELLED = "cancelled"
class StreamStopInfo(BaseModel):
stop_reason: StreamStopReason
def model_dump(self, *args: list, **kwargs: dict[str, Any]) -> dict[str, Any]: # type: ignore
data = super().model_dump(mode="json", *args, **kwargs) # type: ignore
data["stop_reason"] = self.stop_reason.name
return data
class LLMRelevanceFilterResponse(BaseModel):
llm_selected_doc_indices: list[int]
class FinalUsedContextDocsResponse(BaseModel):
final_context_docs: list[LlmDoc]
class RelevanceAnalysis(BaseModel):
relevant: bool
content: str | None = None
class SectionRelevancePiece(RelevanceAnalysis):
"""LLM analysis mapped to an Inference Section"""
document_id: str
chunk_id: int # ID of the center chunk for a given inference section
class DocumentRelevance(BaseModel):
"""Contains all relevance information for a given search"""
relevance_summaries: dict[str, RelevanceAnalysis]
class DanswerAnswerPiece(BaseModel):
# A small piece of a complete answer. Used for streaming back answers.
answer_piece: str | None # if None, specifies the end of an Answer
# An intermediate representation of citations, later translated into
# a mapping of the citation [n] number to SearchDoc
class CitationInfo(BaseModel):
citation_num: int
document_id: str
class AllCitations(BaseModel):
citations: list[CitationInfo]
# This is a mapping of the citation number to the document index within
# the result search doc set
class MessageSpecificCitations(BaseModel):
citation_map: dict[int, int]
class MessageResponseIDInfo(BaseModel):
user_message_id: int | None
reserved_assistant_message_id: int
class StreamingError(BaseModel):
error: str
stack_trace: str | None = None
class DanswerContext(BaseModel):
content: str
document_id: str
semantic_identifier: str
blurb: str
class DanswerContexts(BaseModel):
contexts: list[DanswerContext]
class DanswerAnswer(BaseModel):
answer: str | None
class ThreadMessage(BaseModel):
message: str
sender: str | None = None
role: MessageType = MessageType.USER
class ChatDanswerBotResponse(BaseModel):
answer: str | None = None
citations: list[CitationInfo] | None = None
docs: QADocsResponse | None = None
llm_selected_doc_indices: list[int] | None = None
error_msg: str | None = None
chat_message_id: int | None = None
answer_valid: bool = True # Reflexion result, default True if Reflexion not run
class FileChatDisplay(BaseModel):
file_ids: list[str]
class CustomToolResponse(BaseModel):
response: ToolResultType
tool_name: str
class ToolConfig(BaseModel):
id: int
class PromptOverrideConfig(BaseModel):
name: str
description: str = ""
system_prompt: str
task_prompt: str = ""
include_citations: bool = True
datetime_aware: bool = True
class PersonaOverrideConfig(BaseModel):
name: str
description: str
search_type: SearchType = SearchType.SEMANTIC
num_chunks: float | None = None
llm_relevance_filter: bool = False
llm_filter_extraction: bool = False
recency_bias: RecencyBiasSetting = RecencyBiasSetting.AUTO
llm_model_provider_override: str | None = None
llm_model_version_override: str | None = None
prompts: list[PromptOverrideConfig] = Field(default_factory=list)
prompt_ids: list[int] = Field(default_factory=list)
document_set_ids: list[int] = Field(default_factory=list)
tools: list[ToolConfig] = Field(default_factory=list)
tool_ids: list[int] = Field(default_factory=list)
custom_tools_openapi: list[dict[str, Any]] = Field(default_factory=list)
AnswerQuestionPossibleReturn = (
DanswerAnswerPiece
| CitationInfo
| DanswerContexts
| FileChatDisplay
| CustomToolResponse
| StreamingError
| StreamStopInfo
)
AnswerQuestionStreamReturn = Iterator[AnswerQuestionPossibleReturn]
class LLMMetricsContainer(BaseModel):
prompt_tokens: int
response_tokens: int

View File

@@ -1,107 +0,0 @@
import json
from typing import cast
from google.auth.transport.requests import Request # type: ignore
from google.oauth2.credentials import Credentials as OAuthCredentials # type: ignore
from google.oauth2.service_account import Credentials as ServiceAccountCredentials # type: ignore
from danswer.configs.constants import DocumentSource
from danswer.connectors.google_utils.shared_constants import (
DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY,
)
from danswer.connectors.google_utils.shared_constants import (
DB_CREDENTIALS_DICT_TOKEN_KEY,
)
from danswer.connectors.google_utils.shared_constants import (
DB_CREDENTIALS_PRIMARY_ADMIN_KEY,
)
from danswer.connectors.google_utils.shared_constants import (
GOOGLE_SCOPES,
)
from danswer.utils.logger import setup_logger
logger = setup_logger()
def get_google_oauth_creds(
token_json_str: str, source: DocumentSource
) -> OAuthCredentials | None:
creds_json = json.loads(token_json_str)
creds = OAuthCredentials.from_authorized_user_info(
info=creds_json,
scopes=GOOGLE_SCOPES[source],
)
if creds.valid:
return creds
if creds.expired and creds.refresh_token:
try:
creds.refresh(Request())
if creds.valid:
logger.notice("Refreshed Google Drive tokens.")
return creds
except Exception:
logger.exception("Failed to refresh google drive access token due to:")
return None
return None
def get_google_creds(
credentials: dict[str, str],
source: DocumentSource,
) -> tuple[ServiceAccountCredentials | OAuthCredentials, dict[str, str] | None]:
"""Checks for two different types of credentials.
(1) A credential which holds a token acquired via a user going thorough
the Google OAuth flow.
(2) A credential which holds a service account key JSON file, which
can then be used to impersonate any user in the workspace.
"""
oauth_creds = None
service_creds = None
new_creds_dict = None
if DB_CREDENTIALS_DICT_TOKEN_KEY in credentials:
# OAUTH
access_token_json_str = cast(str, credentials[DB_CREDENTIALS_DICT_TOKEN_KEY])
oauth_creds = get_google_oauth_creds(
token_json_str=access_token_json_str, source=source
)
# tell caller to update token stored in DB if it has changed
# (e.g. the token has been refreshed)
new_creds_json_str = oauth_creds.to_json() if oauth_creds else ""
if new_creds_json_str != access_token_json_str:
new_creds_dict = {
DB_CREDENTIALS_DICT_TOKEN_KEY: new_creds_json_str,
DB_CREDENTIALS_PRIMARY_ADMIN_KEY: credentials[
DB_CREDENTIALS_PRIMARY_ADMIN_KEY
],
}
elif DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY in credentials:
# SERVICE ACCOUNT
service_account_key_json_str = credentials[
DB_CREDENTIALS_DICT_SERVICE_ACCOUNT_KEY
]
service_account_key = json.loads(service_account_key_json_str)
service_creds = ServiceAccountCredentials.from_service_account_info(
service_account_key, scopes=GOOGLE_SCOPES[source]
)
if not service_creds.valid or not service_creds.expired:
service_creds.refresh(Request())
if not service_creds.valid:
raise PermissionError(
f"Unable to access {source} - service account credentials are invalid."
)
creds: ServiceAccountCredentials | OAuthCredentials | None = (
oauth_creds or service_creds
)
if creds is None:
raise PermissionError(
f"Unable to access {source} - unknown credential structure."
)
return creds, new_creds_dict

View File

@@ -1,289 +0,0 @@
import os
from collections.abc import Iterator
from datetime import datetime
from datetime import timezone
from typing import Any
from simple_salesforce import Salesforce
from simple_salesforce import SFType
from danswer.configs.app_configs import INDEX_BATCH_SIZE
from danswer.configs.constants import DocumentSource
from danswer.connectors.cross_connector_utils.miscellaneous_utils import time_str_to_utc
from danswer.connectors.interfaces import GenerateDocumentsOutput
from danswer.connectors.interfaces import GenerateSlimDocumentOutput
from danswer.connectors.interfaces import LoadConnector
from danswer.connectors.interfaces import PollConnector
from danswer.connectors.interfaces import SecondsSinceUnixEpoch
from danswer.connectors.interfaces import SlimConnector
from danswer.connectors.models import BasicExpertInfo
from danswer.connectors.models import ConnectorMissingCredentialError
from danswer.connectors.models import Document
from danswer.connectors.models import Section
from danswer.connectors.models import SlimDocument
from danswer.connectors.salesforce.utils import extract_dict_text
from danswer.utils.logger import setup_logger
# TODO: this connector does not work well at large scales
# the large query against a large Salesforce instance has been reported to take 1.5 hours.
# Additionally it seems to eat up more memory over time if the connection is long running (again a scale issue).
DEFAULT_PARENT_OBJECT_TYPES = ["Account"]
MAX_QUERY_LENGTH = 10000 # max query length is 20,000 characters
ID_PREFIX = "SALESFORCE_"
logger = setup_logger()
class SalesforceConnector(LoadConnector, PollConnector, SlimConnector):
def __init__(
self,
batch_size: int = INDEX_BATCH_SIZE,
requested_objects: list[str] = [],
) -> None:
self.batch_size = batch_size
self.sf_client: Salesforce | None = None
self.parent_object_list = (
[obj.capitalize() for obj in requested_objects]
if requested_objects
else DEFAULT_PARENT_OBJECT_TYPES
)
def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None:
self.sf_client = Salesforce(
username=credentials["sf_username"],
password=credentials["sf_password"],
security_token=credentials["sf_security_token"],
)
return None
def _get_sf_type_object_json(self, type_name: str) -> Any:
if self.sf_client is None:
raise ConnectorMissingCredentialError("Salesforce")
sf_object = SFType(
type_name, self.sf_client.session_id, self.sf_client.sf_instance
)
return sf_object.describe()
def _get_name_from_id(self, id: str) -> str:
if self.sf_client is None:
raise ConnectorMissingCredentialError("Salesforce")
try:
user_object_info = self.sf_client.query(
f"SELECT Name FROM User WHERE Id = '{id}'"
)
name = user_object_info.get("Records", [{}])[0].get("Name", "Null User")
return name
except Exception:
logger.warning(f"Couldnt find name for object id: {id}")
return "Null User"
def _convert_object_instance_to_document(
self, object_dict: dict[str, Any]
) -> Document:
if self.sf_client is None:
raise ConnectorMissingCredentialError("Salesforce")
salesforce_id = object_dict["Id"]
danswer_salesforce_id = f"{ID_PREFIX}{salesforce_id}"
extracted_link = f"https://{self.sf_client.sf_instance}/{salesforce_id}"
extracted_doc_updated_at = time_str_to_utc(object_dict["LastModifiedDate"])
extracted_object_text = extract_dict_text(object_dict)
extracted_semantic_identifier = object_dict.get("Name", "Unknown Object")
extracted_primary_owners = [
BasicExpertInfo(
display_name=self._get_name_from_id(object_dict["LastModifiedById"])
)
]
doc = Document(
id=danswer_salesforce_id,
sections=[Section(link=extracted_link, text=extracted_object_text)],
source=DocumentSource.SALESFORCE,
semantic_identifier=extracted_semantic_identifier,
doc_updated_at=extracted_doc_updated_at,
primary_owners=extracted_primary_owners,
metadata={},
)
return doc
def _is_valid_child_object(self, child_relationship: dict) -> bool:
if self.sf_client is None:
raise ConnectorMissingCredentialError("Salesforce")
if not child_relationship["childSObject"]:
return False
if not child_relationship["relationshipName"]:
return False
sf_type = child_relationship["childSObject"]
object_description = self._get_sf_type_object_json(sf_type)
if not object_description["queryable"]:
return False
try:
query = f"SELECT Count() FROM {sf_type} LIMIT 1"
result = self.sf_client.query(query)
if result["totalSize"] == 0:
return False
except Exception as e:
logger.warning(f"Object type {sf_type} doesn't support query: {e}")
return False
if child_relationship["field"]:
if child_relationship["field"] == "RelatedToId":
return False
else:
return False
return True
def _get_all_children_of_sf_type(self, sf_type: str) -> list[dict]:
if self.sf_client is None:
raise ConnectorMissingCredentialError("Salesforce")
object_description = self._get_sf_type_object_json(sf_type)
children_objects: list[dict] = []
for child_relationship in object_description["childRelationships"]:
if self._is_valid_child_object(child_relationship):
children_objects.append(
{
"relationship_name": child_relationship["relationshipName"],
"object_type": child_relationship["childSObject"],
}
)
return children_objects
def _get_all_fields_for_sf_type(self, sf_type: str) -> list[str]:
if self.sf_client is None:
raise ConnectorMissingCredentialError("Salesforce")
object_description = self._get_sf_type_object_json(sf_type)
fields = [
field.get("name")
for field in object_description["fields"]
if field.get("type", "base64") != "base64"
]
return fields
def _generate_query_per_parent_type(self, parent_sf_type: str) -> Iterator[str]:
"""
This function takes in an object_type and generates query(s) designed to grab
information associated to objects of that type.
It does that by getting all the fields of the parent object type.
Then it gets all the child objects of that object type and all the fields of
those children as well.
"""
parent_fields = self._get_all_fields_for_sf_type(parent_sf_type)
child_sf_types = self._get_all_children_of_sf_type(parent_sf_type)
query = f"SELECT {', '.join(parent_fields)}"
for child_object_dict in child_sf_types:
fields = self._get_all_fields_for_sf_type(child_object_dict["object_type"])
query_addition = f", \n(SELECT {', '.join(fields)} FROM {child_object_dict['relationship_name']})"
if len(query_addition) + len(query) > MAX_QUERY_LENGTH:
query += f"\n FROM {parent_sf_type}"
yield query
query = "SELECT Id" + query_addition
else:
query += query_addition
query += f"\n FROM {parent_sf_type}"
yield query
def _fetch_from_salesforce(
self,
start: datetime | None = None,
end: datetime | None = None,
) -> GenerateDocumentsOutput:
if self.sf_client is None:
raise ConnectorMissingCredentialError("Salesforce")
doc_batch: list[Document] = []
for parent_object_type in self.parent_object_list:
logger.debug(f"Processing: {parent_object_type}")
query_results: dict = {}
for query in self._generate_query_per_parent_type(parent_object_type):
if start is not None and end is not None:
if start and start.tzinfo is None:
start = start.replace(tzinfo=timezone.utc)
if end and end.tzinfo is None:
end = end.replace(tzinfo=timezone.utc)
query += f" WHERE LastModifiedDate > {start.isoformat()} AND LastModifiedDate < {end.isoformat()}"
query_result = self.sf_client.query_all(query)
for record_dict in query_result["records"]:
query_results.setdefault(record_dict["Id"], {}).update(record_dict)
logger.info(
f"Number of {parent_object_type} Objects processed: {len(query_results)}"
)
for combined_object_dict in query_results.values():
doc_batch.append(
self._convert_object_instance_to_document(combined_object_dict)
)
if len(doc_batch) > self.batch_size:
yield doc_batch
doc_batch = []
yield doc_batch
def load_from_state(self) -> GenerateDocumentsOutput:
return self._fetch_from_salesforce()
def poll_source(
self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch
) -> GenerateDocumentsOutput:
if self.sf_client is None:
raise ConnectorMissingCredentialError("Salesforce")
start_datetime = datetime.utcfromtimestamp(start)
end_datetime = datetime.utcfromtimestamp(end)
return self._fetch_from_salesforce(start=start_datetime, end=end_datetime)
def retrieve_all_slim_documents(
self,
start: SecondsSinceUnixEpoch | None = None,
end: SecondsSinceUnixEpoch | None = None,
) -> GenerateSlimDocumentOutput:
if self.sf_client is None:
raise ConnectorMissingCredentialError("Salesforce")
doc_metadata_list: list[SlimDocument] = []
for parent_object_type in self.parent_object_list:
query = f"SELECT Id FROM {parent_object_type}"
query_result = self.sf_client.query_all(query)
doc_metadata_list.extend(
SlimDocument(
id=f"{ID_PREFIX}{instance_dict.get('Id', '')}",
perm_sync_data={},
)
for instance_dict in query_result["records"]
)
yield doc_metadata_list
if __name__ == "__main__":
connector = SalesforceConnector(
requested_objects=os.environ["REQUESTED_OBJECTS"].split(",")
)
connector.load_credentials(
{
"sf_username": os.environ["SF_USERNAME"],
"sf_password": os.environ["SF_PASSWORD"],
"sf_security_token": os.environ["SF_SECURITY_TOKEN"],
}
)
document_batches = connector.load_from_state()
print(next(document_batches))

Some files were not shown because too many files have changed in this diff Show More