mirror of
https://github.com/onyx-dot-app/onyx.git
synced 2026-02-16 23:35:46 +00:00
Compare commits
254 Commits
debug-shar
...
fix/projec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c54752e3ba | ||
|
|
bd32795804 | ||
|
|
7aa6b01ac0 | ||
|
|
93084a3a39 | ||
|
|
e46f632570 | ||
|
|
bbb4b9eda3 | ||
|
|
12b7c7d4dd | ||
|
|
464967340b | ||
|
|
a2308c2f45 | ||
|
|
2ee9f79f71 | ||
|
|
c3904b7c96 | ||
|
|
5009dcf911 | ||
|
|
c7b4a0fad9 | ||
|
|
60a402fcab | ||
|
|
c9bb078a37 | ||
|
|
c36c2a6c8d | ||
|
|
f9e2f9cbb4 | ||
|
|
0b7c808480 | ||
|
|
0a6ff30ee4 | ||
|
|
dc036eb452 | ||
|
|
ee950b9cbd | ||
|
|
dd71765849 | ||
|
|
dc6b97f1b1 | ||
|
|
d960c23b6a | ||
|
|
d9c753ba92 | ||
|
|
60234dd6da | ||
|
|
f88ef2e9ff | ||
|
|
6b479a01ea | ||
|
|
248fe416e1 | ||
|
|
cbea4bb75c | ||
|
|
4a147a48dc | ||
|
|
a77025cd46 | ||
|
|
d10914ccc6 | ||
|
|
7d44d48f87 | ||
|
|
82fd0e0316 | ||
|
|
d7e4c47ef1 | ||
|
|
799b0df1cb | ||
|
|
b31d36564a | ||
|
|
84df0a1bf9 | ||
|
|
dbc53fe176 | ||
|
|
1e4ba93daa | ||
|
|
d872715620 | ||
|
|
46ad541ebc | ||
|
|
613907a06f | ||
|
|
ff723992d1 | ||
|
|
bda3c6b189 | ||
|
|
264d1de994 | ||
|
|
335571ce79 | ||
|
|
4d3fac2574 | ||
|
|
7c229dd103 | ||
|
|
b5df182a36 | ||
|
|
7e7cfa4187 | ||
|
|
69d8430288 | ||
|
|
467d294b30 | ||
|
|
ba2dd18233 | ||
|
|
891eeb0212 | ||
|
|
9085731ff0 | ||
|
|
f5d88c47f4 | ||
|
|
807e5c21b0 | ||
|
|
1bcd795011 | ||
|
|
aae357df40 | ||
|
|
4f03e85c57 | ||
|
|
c3411fb28d | ||
|
|
b3d1b1f4aa | ||
|
|
cbb86c12aa | ||
|
|
8fd606b713 | ||
|
|
d69170ee13 | ||
|
|
e356c5308c | ||
|
|
3026ac8912 | ||
|
|
0cee7c849f | ||
|
|
14bfb7fd0c | ||
|
|
804e48a3da | ||
|
|
907271656e | ||
|
|
1f11dd3e46 | ||
|
|
048561ce0b | ||
|
|
8718f10c38 | ||
|
|
ab4d820089 | ||
|
|
77ae4f1a45 | ||
|
|
8fd1f42a1c | ||
|
|
b94c7e581b | ||
|
|
c90ff701dc | ||
|
|
b1ad58c5af | ||
|
|
345f9b3497 | ||
|
|
4671d18d4f | ||
|
|
f0598be875 | ||
|
|
eb361c6434 | ||
|
|
e39b0a921c | ||
|
|
2dd8a8c788 | ||
|
|
8b79e2e90b | ||
|
|
d05941d1bd | ||
|
|
50070fb264 | ||
|
|
5792d8d5ed | ||
|
|
e1c4b33cf7 | ||
|
|
2c2f6e7c23 | ||
|
|
3d30233d46 | ||
|
|
875f8cff5c | ||
|
|
6e4686a09f | ||
|
|
237c18e15e | ||
|
|
a71d80329d | ||
|
|
91c392b4fc | ||
|
|
a25df4002d | ||
|
|
436a5add88 | ||
|
|
3a4bb239b1 | ||
|
|
2acb4cfdb6 | ||
|
|
f1d626adb0 | ||
|
|
5ca604f186 | ||
|
|
c19c76c3ad | ||
|
|
4555f6badc | ||
|
|
71bd643537 | ||
|
|
23f70f0a96 | ||
|
|
c97672559a | ||
|
|
243f0bbdbd | ||
|
|
0a5ca7f1cf | ||
|
|
8d56d213ec | ||
|
|
cea2ea924b | ||
|
|
569d205e31 | ||
|
|
9feff5002f | ||
|
|
a1314e49a3 | ||
|
|
463f839154 | ||
|
|
5a0fe3c1d1 | ||
|
|
8ac5c86c1e | ||
|
|
d803b48edd | ||
|
|
bc3adcdc89 | ||
|
|
95e27f1c30 | ||
|
|
d0724312db | ||
|
|
5b1021f20b | ||
|
|
55cdbe396f | ||
|
|
e8fe0fecd2 | ||
|
|
5b4fc91a3e | ||
|
|
afd2d8c362 | ||
|
|
8a8cf13089 | ||
|
|
c7e872d4e3 | ||
|
|
1dbe926518 | ||
|
|
d095bec6df | ||
|
|
58e8d501a1 | ||
|
|
a39782468b | ||
|
|
d747b48d22 | ||
|
|
817de23854 | ||
|
|
6474d30ba0 | ||
|
|
6c9635373a | ||
|
|
1a945b6f94 | ||
|
|
526c76fa08 | ||
|
|
932e62531f | ||
|
|
83768e2ff1 | ||
|
|
f23b6506f4 | ||
|
|
5f09318302 | ||
|
|
674e789036 | ||
|
|
cb514e6e34 | ||
|
|
965dad785c | ||
|
|
c9558224d2 | ||
|
|
c2dbd3fd1e | ||
|
|
d27c2b1b4e | ||
|
|
8c52444bda | ||
|
|
b4caa85cd4 | ||
|
|
57163dd936 | ||
|
|
15f2a0bf60 | ||
|
|
aeae7ebdef | ||
|
|
eaa14a5ce0 | ||
|
|
b07c834e83 | ||
|
|
97cd308ef7 | ||
|
|
28cdab7a70 | ||
|
|
ad9aa01819 | ||
|
|
508a88c8d7 | ||
|
|
b6f81fbb8e | ||
|
|
b9b66396ec | ||
|
|
dd20b9ef4c | ||
|
|
e1f7e8cacf | ||
|
|
fd567279fd | ||
|
|
1427eb3cf0 | ||
|
|
e70be0f816 | ||
|
|
0014c7cff7 | ||
|
|
1c23dbeaee | ||
|
|
b2b122a24b | ||
|
|
033ae74b0e | ||
|
|
c593fb4866 | ||
|
|
b9580ef346 | ||
|
|
4df3a9204f | ||
|
|
e0ad313a60 | ||
|
|
a2bfb46edd | ||
|
|
25e3371bee | ||
|
|
4b9b306140 | ||
|
|
ccf55136be | ||
|
|
a13db828f3 | ||
|
|
b7d56d0645 | ||
|
|
9ac70d35a8 | ||
|
|
7da792dd27 | ||
|
|
136c2f4082 | ||
|
|
67bd14e801 | ||
|
|
8c9a20be7a | ||
|
|
0427845502 | ||
|
|
a85a5a324e | ||
|
|
78f1fb5bf4 | ||
|
|
6a8a214324 | ||
|
|
884266c009 | ||
|
|
2c422215e6 | ||
|
|
32fe185bb4 | ||
|
|
c2758a28d5 | ||
|
|
5cda2e0173 | ||
|
|
9e885a68b3 | ||
|
|
376fc86b0c | ||
|
|
2eb1444d80 | ||
|
|
bd6ebe4718 | ||
|
|
691d63bc0f | ||
|
|
dfd4d9abef | ||
|
|
4cb39bc150 | ||
|
|
4e357478e0 | ||
|
|
b5b1b3287c | ||
|
|
2f58a972eb | ||
|
|
6b39d8eed9 | ||
|
|
f81c34d040 | ||
|
|
0771b1f476 | ||
|
|
eedd2ba3fe | ||
|
|
98554e5025 | ||
|
|
dcd2cad6b4 | ||
|
|
189f4bb071 | ||
|
|
7eeab8fb80 | ||
|
|
60f83dd0db | ||
|
|
2618602fd6 | ||
|
|
b80f96de85 | ||
|
|
74a15b2c01 | ||
|
|
408b80ce51 | ||
|
|
e82b68c1b0 | ||
|
|
af5eec648b | ||
|
|
d186c5e82e | ||
|
|
4420a50aed | ||
|
|
9caa6ea7ff | ||
|
|
8d7b217d33 | ||
|
|
57908769f1 | ||
|
|
600cec7c89 | ||
|
|
bb8ea536c4 | ||
|
|
f97869b91e | ||
|
|
aa5be56884 | ||
|
|
7580178c95 | ||
|
|
2e0bc8caf0 | ||
|
|
f9bd03c7f0 | ||
|
|
77466e1f2b | ||
|
|
8dd79345ed | ||
|
|
a049835c49 | ||
|
|
d186d8e8ed | ||
|
|
082897eb9b | ||
|
|
e38f79dec5 | ||
|
|
26e7bba25d | ||
|
|
3cde4ef77f | ||
|
|
f4d135d710 | ||
|
|
6094f70ac8 | ||
|
|
a90e58b39b | ||
|
|
e82e3141ed | ||
|
|
f8e9060bab | ||
|
|
24831fa1a1 | ||
|
|
f6a0e69b2a | ||
|
|
0394eaea7f | ||
|
|
898b8c316e | ||
|
|
4b0c6d1e54 | ||
|
|
da7dc33afa |
50
.github/actions/prepare-build/action.yml
vendored
Normal file
50
.github/actions/prepare-build/action.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: "Prepare Build (OpenAPI generation)"
|
||||
description: "Sets up Python with uv, installs deps, generates OpenAPI schema and Python client, uploads artifact"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup uv
|
||||
uses: astral-sh/setup-uv@v3
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Install Python dependencies with uv
|
||||
shell: bash
|
||||
run: |
|
||||
uv pip install --system \
|
||||
-r backend/requirements/default.txt \
|
||||
-r backend/requirements/dev.txt
|
||||
|
||||
- name: Generate OpenAPI schema
|
||||
shell: bash
|
||||
working-directory: backend
|
||||
env:
|
||||
PYTHONPATH: "."
|
||||
run: |
|
||||
python scripts/onyx_openapi_schema.py --filename generated/openapi.json
|
||||
|
||||
- name: Generate OpenAPI Python client
|
||||
shell: bash
|
||||
run: |
|
||||
docker run --rm \
|
||||
-v "${{ github.workspace }}/backend/generated:/local" \
|
||||
openapitools/openapi-generator-cli generate \
|
||||
-i /local/openapi.json \
|
||||
-g python \
|
||||
-o /local/onyx_openapi_client \
|
||||
--package-name onyx_openapi_client \
|
||||
--skip-validate-spec \
|
||||
--openapi-normalizer "SIMPLIFY_ONEOF_ANYOF=true,SET_OAS3_NULLABLE=true"
|
||||
|
||||
- name: Upload OpenAPI artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: openapi-artifacts
|
||||
path: backend/generated/
|
||||
|
||||
5
.github/pull_request_template.md
vendored
5
.github/pull_request_template.md
vendored
@@ -6,9 +6,6 @@
|
||||
|
||||
[Describe the tests you ran to verify your changes]
|
||||
|
||||
## Backporting (check the box to trigger backport action)
|
||||
## Additional Options
|
||||
|
||||
Note: You have to check that the action passes, otherwise resolve the conflicts manually and tag the patches.
|
||||
|
||||
- [ ] This PR should be backported (make sure to check that the backport attempt succeeds)
|
||||
- [ ] [Optional] Override Linear Check
|
||||
|
||||
24
.github/workflows/check-lazy-imports.yml
vendored
Normal file
24
.github/workflows/check-lazy-imports.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Check Lazy Imports
|
||||
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'release/**'
|
||||
|
||||
jobs:
|
||||
check-lazy-imports:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Check lazy imports
|
||||
run: python3 backend/scripts/check_lazy_imports.py
|
||||
@@ -8,9 +8,9 @@ on:
|
||||
env:
|
||||
REGISTRY_IMAGE: ${{ contains(github.ref_name, 'cloud') && 'onyxdotapp/onyx-backend-cloud' || 'onyxdotapp/onyx-backend' }}
|
||||
DEPLOYMENT: ${{ contains(github.ref_name, 'cloud') && 'cloud' || 'standalone' }}
|
||||
|
||||
# don't tag cloud images with "latest"
|
||||
LATEST_TAG: ${{ contains(github.ref_name, 'latest') && !contains(github.ref_name, 'cloud') }}
|
||||
|
||||
# tag nightly builds with "edge"
|
||||
EDGE_TAG: ${{ startsWith(github.ref_name, 'nightly-latest') }}
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
@@ -33,7 +33,16 @@ jobs:
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Check if stable release version
|
||||
id: check_version
|
||||
run: |
|
||||
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && [[ "${{ github.ref_name }}" != *"cloud"* ]]; then
|
||||
echo "is_stable=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_stable=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
@@ -46,7 +55,8 @@ jobs:
|
||||
latest=false
|
||||
tags: |
|
||||
type=raw,value=${{ github.ref_name }}
|
||||
type=raw,value=${{ env.LATEST_TAG == 'true' && 'latest' || '' }}
|
||||
type=raw,value=${{ steps.check_version.outputs.is_stable == 'true' && 'latest' || '' }}
|
||||
type=raw,value=${{ env.EDGE_TAG == 'true' && 'edge' || '' }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@@ -99,6 +109,15 @@ jobs:
|
||||
# Needed for trivyignore
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check if stable release version
|
||||
id: check_version
|
||||
run: |
|
||||
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && [[ "${{ github.ref_name }}" != *"cloud"* ]]; then
|
||||
echo "is_stable=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_stable=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -119,7 +138,8 @@ jobs:
|
||||
latest=false
|
||||
tags: |
|
||||
type=raw,value=${{ github.ref_name }}
|
||||
type=raw,value=${{ env.LATEST_TAG == 'true' && 'latest' || '' }}
|
||||
type=raw,value=${{ steps.check_version.outputs.is_stable == 'true' && 'latest' || '' }}
|
||||
type=raw,value=${{ env.EDGE_TAG == 'true' && 'edge' || '' }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
|
||||
@@ -11,8 +11,8 @@ env:
|
||||
BUILDKIT_PROGRESS: plain
|
||||
DEPLOYMENT: ${{ contains(github.ref_name, 'cloud') && 'cloud' || 'standalone' }}
|
||||
|
||||
# don't tag cloud images with "latest"
|
||||
LATEST_TAG: ${{ contains(github.ref_name, 'latest') && !contains(github.ref_name, 'cloud') }}
|
||||
# tag nightly builds with "edge"
|
||||
EDGE_TAG: ${{ startsWith(github.ref_name, 'nightly-latest') }}
|
||||
|
||||
jobs:
|
||||
|
||||
@@ -88,7 +88,7 @@ jobs:
|
||||
push: true
|
||||
tags: ${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-amd64
|
||||
build-args: |
|
||||
DANSWER_VERSION=${{ github.ref_name }}
|
||||
ONYX_VERSION=${{ github.ref_name }}
|
||||
outputs: type=registry
|
||||
provenance: false
|
||||
cache-from: type=s3,prefix=cache/${{ github.repository }}/${{ env.DEPLOYMENT }}/model-server-${{ env.PLATFORM_PAIR }}/,region=${{ env.RUNS_ON_AWS_REGION }},bucket=${{ env.RUNS_ON_S3_BUCKET_CACHE }}
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
push: true
|
||||
tags: ${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-arm64
|
||||
build-args: |
|
||||
DANSWER_VERSION=${{ github.ref_name }}
|
||||
ONYX_VERSION=${{ github.ref_name }}
|
||||
outputs: type=registry
|
||||
provenance: false
|
||||
cache-from: type=s3,prefix=cache/${{ github.repository }}/${{ env.DEPLOYMENT }}/model-server-${{ env.PLATFORM_PAIR }}/,region=${{ env.RUNS_ON_AWS_REGION }},bucket=${{ env.RUNS_ON_S3_BUCKET_CACHE }}
|
||||
@@ -145,6 +145,15 @@ jobs:
|
||||
if: needs.check_model_server_changes.outputs.changed == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if stable release version
|
||||
id: check_version
|
||||
run: |
|
||||
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && [[ "${{ github.ref_name }}" != *"cloud"* ]]; then
|
||||
echo "is_stable=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_stable=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -157,11 +166,16 @@ jobs:
|
||||
docker buildx imagetools create -t ${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }} \
|
||||
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-amd64 \
|
||||
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-arm64
|
||||
if [[ "${{ env.LATEST_TAG }}" == "true" ]]; then
|
||||
if [[ "${{ steps.check_version.outputs.is_stable }}" == "true" ]]; then
|
||||
docker buildx imagetools create -t ${{ env.REGISTRY_IMAGE }}:latest \
|
||||
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-amd64 \
|
||||
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-arm64
|
||||
fi
|
||||
if [[ "${{ env.EDGE_TAG }}" == "true" ]]; then
|
||||
docker buildx imagetools create -t ${{ env.REGISTRY_IMAGE }}:edge \
|
||||
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-amd64 \
|
||||
${{ env.REGISTRY_IMAGE }}:${{ github.ref_name }}-arm64
|
||||
fi
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: nick-fields/retry@v3
|
||||
|
||||
@@ -7,7 +7,10 @@ on:
|
||||
|
||||
env:
|
||||
REGISTRY_IMAGE: onyxdotapp/onyx-web-server
|
||||
LATEST_TAG: ${{ contains(github.ref_name, 'latest') }}
|
||||
|
||||
# tag nightly builds with "edge"
|
||||
EDGE_TAG: ${{ startsWith(github.ref_name, 'nightly-latest') }}
|
||||
|
||||
DEPLOYMENT: standalone
|
||||
|
||||
jobs:
|
||||
@@ -45,6 +48,15 @@ jobs:
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Check if stable release version
|
||||
id: check_version
|
||||
run: |
|
||||
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "is_stable=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_stable=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
@@ -57,7 +69,8 @@ jobs:
|
||||
latest=false
|
||||
tags: |
|
||||
type=raw,value=${{ github.ref_name }}
|
||||
type=raw,value=${{ env.LATEST_TAG == 'true' && 'latest' || '' }}
|
||||
type=raw,value=${{ steps.check_version.outputs.is_stable == 'true' && 'latest' || '' }}
|
||||
type=raw,value=${{ env.EDGE_TAG == 'true' && 'edge' || '' }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@@ -107,6 +120,15 @@ jobs:
|
||||
if: needs.precheck.outputs.should-run == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if stable release version
|
||||
id: check_version
|
||||
run: |
|
||||
if [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && [[ "${{ github.ref_name }}" != *"cloud"* ]]; then
|
||||
echo "is_stable=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "is_stable=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -126,7 +148,8 @@ jobs:
|
||||
latest=false
|
||||
tags: |
|
||||
type=raw,value=${{ github.ref_name }}
|
||||
type=raw,value=${{ env.LATEST_TAG == 'true' && 'latest' || '' }}
|
||||
type=raw,value=${{ steps.check_version.outputs.is_stable == 'true' && 'latest' || '' }}
|
||||
type=raw,value=${{ env.EDGE_TAG == 'true' && 'edge' || '' }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
|
||||
4
.github/workflows/docker-tag-latest.yml
vendored
4
.github/workflows/docker-tag-latest.yml
vendored
@@ -35,3 +35,7 @@ jobs:
|
||||
- name: Pull, Tag and Push API Server Image
|
||||
run: |
|
||||
docker buildx imagetools create -t onyxdotapp/onyx-backend:latest onyxdotapp/onyx-backend:${{ github.event.inputs.version }}
|
||||
|
||||
- name: Pull, Tag and Push Model Server Image
|
||||
run: |
|
||||
docker buildx imagetools create -t onyxdotapp/onyx-model-server:latest onyxdotapp/onyx-model-server:${{ github.event.inputs.version }}
|
||||
|
||||
6
.github/workflows/helm-chart-releases.yml
vendored
6
.github/workflows/helm-chart-releases.yml
vendored
@@ -25,9 +25,11 @@ jobs:
|
||||
|
||||
- name: Add required Helm repositories
|
||||
run: |
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
|
||||
helm repo add onyx-vespa https://onyx-dot-app.github.io/vespa-helm-charts
|
||||
helm repo add keda https://kedacore.github.io/charts
|
||||
helm repo add cloudnative-pg https://cloudnative-pg.github.io/charts
|
||||
helm repo add ot-container-kit https://ot-container-kit.github.io/helm-charts
|
||||
helm repo add minio https://charts.min.io/
|
||||
helm repo update
|
||||
|
||||
- name: Build chart dependencies
|
||||
|
||||
171
.github/workflows/hotfix-release-branches.yml
vendored
171
.github/workflows/hotfix-release-branches.yml
vendored
@@ -1,171 +0,0 @@
|
||||
# This workflow is intended to be manually triggered via the GitHub Action tab.
|
||||
# Given a hotfix branch, it will attempt to open a PR to all release branches and
|
||||
# by default auto merge them
|
||||
|
||||
name: Hotfix release branches
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
hotfix_commit:
|
||||
description: "Hotfix commit hash"
|
||||
required: true
|
||||
hotfix_suffix:
|
||||
description: "Hotfix branch suffix (e.g. hotfix/v0.8-{suffix})"
|
||||
required: true
|
||||
release_branch_pattern:
|
||||
description: "Release branch pattern (regex)"
|
||||
required: true
|
||||
default: "release/.*"
|
||||
auto_merge:
|
||||
description: "Automatically merge the hotfix PRs"
|
||||
required: true
|
||||
type: choice
|
||||
default: "true"
|
||||
options:
|
||||
- true
|
||||
- false
|
||||
|
||||
jobs:
|
||||
hotfix_release_branches:
|
||||
permissions: write-all
|
||||
# See https://runs-on.com/runners/linux/
|
||||
# use a lower powered instance since this just does i/o to docker hub
|
||||
runs-on: [runs-on, runner=2cpu-linux-x64, "run-id=${{ github.run_id }}"]
|
||||
steps:
|
||||
# needs RKUO_DEPLOY_KEY for write access to merge PR's
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ssh-key: "${{ secrets.RKUO_DEPLOY_KEY }}"
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Git user
|
||||
run: |
|
||||
git config user.name "Richard Kuo [bot]"
|
||||
git config user.email "rkuo[bot]@onyx.app"
|
||||
|
||||
- name: Fetch All Branches
|
||||
run: |
|
||||
git fetch --all --prune
|
||||
|
||||
- name: Verify Hotfix Commit Exists
|
||||
run: |
|
||||
git rev-parse --verify "${{ github.event.inputs.hotfix_commit }}" || { echo "Commit not found: ${{ github.event.inputs.hotfix_commit }}"; exit 1; }
|
||||
|
||||
- name: Get Release Branches
|
||||
id: get_release_branches
|
||||
run: |
|
||||
BRANCHES=$(git branch -r | grep -E "${{ github.event.inputs.release_branch_pattern }}" | sed 's|origin/||' | tr -d ' ')
|
||||
if [ -z "$BRANCHES" ]; then
|
||||
echo "No release branches found matching pattern '${{ github.event.inputs.release_branch_pattern }}'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found release branches:"
|
||||
echo "$BRANCHES"
|
||||
|
||||
# Join the branches into a single line separated by commas
|
||||
BRANCHES_JOINED=$(echo "$BRANCHES" | tr '\n' ',' | sed 's/,$//')
|
||||
|
||||
# Set the branches as an output
|
||||
echo "branches=$BRANCHES_JOINED" >> $GITHUB_OUTPUT
|
||||
|
||||
# notes on all the vagaries of wiring up automated PR's
|
||||
# https://github.com/peter-evans/create-pull-request/blob/main/docs/concepts-guidelines.md#triggering-further-workflow-runs
|
||||
# we must use a custom token for GH_TOKEN to trigger the subsequent PR checks
|
||||
- name: Create and Merge Pull Requests to Matching Release Branches
|
||||
env:
|
||||
HOTFIX_COMMIT: ${{ github.event.inputs.hotfix_commit }}
|
||||
HOTFIX_SUFFIX: ${{ github.event.inputs.hotfix_suffix }}
|
||||
AUTO_MERGE: ${{ github.event.inputs.auto_merge }}
|
||||
GH_TOKEN: ${{ secrets.RKUO_PERSONAL_ACCESS_TOKEN }}
|
||||
run: |
|
||||
# Get the branches from the previous step
|
||||
BRANCHES="${{ steps.get_release_branches.outputs.branches }}"
|
||||
|
||||
# Convert BRANCHES to an array
|
||||
IFS=$',' read -ra BRANCH_ARRAY <<< "$BRANCHES"
|
||||
|
||||
# Loop through each release branch and create and merge a PR
|
||||
for RELEASE_BRANCH in "${BRANCH_ARRAY[@]}"; do
|
||||
echo "Processing $RELEASE_BRANCH..."
|
||||
|
||||
# Parse out the release version by removing "release/" from the branch name
|
||||
RELEASE_VERSION=${RELEASE_BRANCH#release/}
|
||||
echo "Release version parsed: $RELEASE_VERSION"
|
||||
|
||||
HOTFIX_BRANCH="hotfix/${RELEASE_VERSION}-${HOTFIX_SUFFIX}"
|
||||
echo "Creating PR from $HOTFIX_BRANCH to $RELEASE_BRANCH"
|
||||
|
||||
# Checkout the release branch
|
||||
echo "Checking out $RELEASE_BRANCH"
|
||||
git checkout "$RELEASE_BRANCH"
|
||||
|
||||
# Create the new hotfix branch
|
||||
if git rev-parse --verify "$HOTFIX_BRANCH" >/dev/null 2>&1; then
|
||||
echo "Hotfix branch $HOTFIX_BRANCH already exists. Skipping branch creation."
|
||||
else
|
||||
echo "Branching $RELEASE_BRANCH to $HOTFIX_BRANCH"
|
||||
git checkout -b "$HOTFIX_BRANCH"
|
||||
fi
|
||||
|
||||
# Check if the hotfix commit is a merge commit
|
||||
if git rev-list --merges -n 1 "$HOTFIX_COMMIT" >/dev/null 2>&1; then
|
||||
# -m 1 uses the target branch as the base (which is what we want)
|
||||
echo "Hotfix commit $HOTFIX_COMMIT is a merge commit, using -m 1 for cherry-pick"
|
||||
CHERRY_PICK_CMD="git cherry-pick -m 1 $HOTFIX_COMMIT"
|
||||
else
|
||||
CHERRY_PICK_CMD="git cherry-pick $HOTFIX_COMMIT"
|
||||
fi
|
||||
|
||||
# Perform the cherry-pick
|
||||
echo "Executing: $CHERRY_PICK_CMD"
|
||||
eval "$CHERRY_PICK_CMD"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Cherry-pick failed for $HOTFIX_COMMIT on $HOTFIX_BRANCH. Aborting..."
|
||||
git cherry-pick --abort
|
||||
continue
|
||||
fi
|
||||
|
||||
# Push the hotfix branch to the remote
|
||||
echo "Pushing $HOTFIX_BRANCH..."
|
||||
git push origin "$HOTFIX_BRANCH"
|
||||
echo "Hotfix branch $HOTFIX_BRANCH created and pushed."
|
||||
|
||||
# Check if PR already exists
|
||||
EXISTING_PR=$(gh pr list --head "$HOTFIX_BRANCH" --base "$RELEASE_BRANCH" --state open --json number --jq '.[0].number')
|
||||
|
||||
if [ -n "$EXISTING_PR" ]; then
|
||||
echo "An open PR already exists: #$EXISTING_PR. Skipping..."
|
||||
continue
|
||||
fi
|
||||
|
||||
# Create a new PR and capture the output
|
||||
PR_OUTPUT=$(gh pr create --title "Merge $HOTFIX_BRANCH into $RELEASE_BRANCH" \
|
||||
--body "Automated PR to merge \`$HOTFIX_BRANCH\` into \`$RELEASE_BRANCH\`." \
|
||||
--head "$HOTFIX_BRANCH" --base "$RELEASE_BRANCH")
|
||||
|
||||
# Extract the URL from the output
|
||||
PR_URL=$(echo "$PR_OUTPUT" | grep -Eo 'https://github.com/[^ ]+')
|
||||
echo "Pull request created: $PR_URL"
|
||||
|
||||
# Extract PR number from URL
|
||||
PR_NUMBER=$(basename "$PR_URL")
|
||||
echo "Pull request created: $PR_NUMBER"
|
||||
|
||||
if [ "$AUTO_MERGE" == "true" ]; then
|
||||
echo "Attempting to merge pull request #$PR_NUMBER"
|
||||
|
||||
# Attempt to merge the PR
|
||||
gh pr merge "$PR_NUMBER" --merge --auto --delete-branch
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Pull request #$PR_NUMBER merged successfully."
|
||||
else
|
||||
# Optionally, handle the error or continue
|
||||
echo "Failed to merge pull request #$PR_NUMBER."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
124
.github/workflows/pr-backport-autotrigger.yml
vendored
124
.github/workflows/pr-backport-autotrigger.yml
vendored
@@ -1,124 +0,0 @@
|
||||
name: Backport on Merge
|
||||
|
||||
# Note this workflow does not trigger the builds, be sure to manually tag the branches to trigger the builds
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed] # Later we check for merge so only PRs that go in can get backported
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
actions: write
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.YUHONG_GH_ACTIONS }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ssh-key: "${{ secrets.RKUO_DEPLOY_KEY }}"
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Git user
|
||||
run: |
|
||||
git config user.name "Richard Kuo [bot]"
|
||||
git config user.email "rkuo[bot]@onyx.app"
|
||||
git fetch --prune
|
||||
|
||||
- name: Check for Backport Checkbox
|
||||
id: checkbox-check
|
||||
run: |
|
||||
PR_BODY="${{ github.event.pull_request.body }}"
|
||||
if [[ "$PR_BODY" == *"[x] This PR should be backported"* ]]; then
|
||||
echo "backport=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "backport=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: List and sort release branches
|
||||
id: list-branches
|
||||
run: |
|
||||
git fetch --all --tags
|
||||
BRANCHES=$(git for-each-ref --format='%(refname:short)' refs/remotes/origin/release/* | sed 's|origin/release/||' | sort -Vr)
|
||||
BETA=$(echo "$BRANCHES" | head -n 1)
|
||||
STABLE=$(echo "$BRANCHES" | head -n 2 | tail -n 1)
|
||||
echo "beta=release/$BETA" >> $GITHUB_OUTPUT
|
||||
echo "stable=release/$STABLE" >> $GITHUB_OUTPUT
|
||||
# Fetch latest tags for beta and stable
|
||||
LATEST_BETA_TAG=$(git tag -l "v[0-9]*.[0-9]*.[0-9]*-beta.[0-9]*" | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+-beta\.[0-9]+$" | grep -v -- "-cloud" | sort -Vr | head -n 1)
|
||||
LATEST_STABLE_TAG=$(git tag -l "v[0-9]*.[0-9]*.[0-9]*" | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$" | sort -Vr | head -n 1)
|
||||
|
||||
# Handle case where no beta tags exist
|
||||
if [[ -z "$LATEST_BETA_TAG" ]]; then
|
||||
NEW_BETA_TAG="v1.0.0-beta.1"
|
||||
else
|
||||
NEW_BETA_TAG=$(echo $LATEST_BETA_TAG | awk -F '[.-]' '{print $1 "." $2 "." $3 "-beta." ($NF+1)}')
|
||||
fi
|
||||
|
||||
# Increment latest stable tag
|
||||
NEW_STABLE_TAG=$(echo $LATEST_STABLE_TAG | awk -F '.' '{print $1 "." $2 "." ($3+1)}')
|
||||
echo "latest_beta_tag=$LATEST_BETA_TAG" >> $GITHUB_OUTPUT
|
||||
echo "latest_stable_tag=$LATEST_STABLE_TAG" >> $GITHUB_OUTPUT
|
||||
echo "new_beta_tag=$NEW_BETA_TAG" >> $GITHUB_OUTPUT
|
||||
echo "new_stable_tag=$NEW_STABLE_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Echo branch and tag information
|
||||
run: |
|
||||
echo "Beta branch: ${{ steps.list-branches.outputs.beta }}"
|
||||
echo "Stable branch: ${{ steps.list-branches.outputs.stable }}"
|
||||
echo "Latest beta tag: ${{ steps.list-branches.outputs.latest_beta_tag }}"
|
||||
echo "Latest stable tag: ${{ steps.list-branches.outputs.latest_stable_tag }}"
|
||||
echo "New beta tag: ${{ steps.list-branches.outputs.new_beta_tag }}"
|
||||
echo "New stable tag: ${{ steps.list-branches.outputs.new_stable_tag }}"
|
||||
|
||||
- name: Trigger Backport
|
||||
if: steps.checkbox-check.outputs.backport == 'true'
|
||||
run: |
|
||||
set -e
|
||||
echo "Backporting to beta ${{ steps.list-branches.outputs.beta }} and stable ${{ steps.list-branches.outputs.stable }}"
|
||||
|
||||
# Echo the merge commit SHA
|
||||
echo "Merge commit SHA: ${{ github.event.pull_request.merge_commit_sha }}"
|
||||
|
||||
# Fetch all history for all branches and tags
|
||||
git fetch --prune
|
||||
|
||||
# Reset and prepare the beta branch
|
||||
git checkout ${{ steps.list-branches.outputs.beta }}
|
||||
echo "Last 5 commits on beta branch:"
|
||||
git log -n 5 --pretty=format:"%H"
|
||||
echo "" # Newline for formatting
|
||||
|
||||
# Cherry-pick the merge commit from the merged PR
|
||||
git cherry-pick -m 1 ${{ github.event.pull_request.merge_commit_sha }} || {
|
||||
echo "Cherry-pick to beta failed due to conflicts."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Create new beta branch/tag
|
||||
git tag ${{ steps.list-branches.outputs.new_beta_tag }}
|
||||
# Push the changes and tag to the beta branch using PAT
|
||||
git push origin ${{ steps.list-branches.outputs.beta }}
|
||||
git push origin ${{ steps.list-branches.outputs.new_beta_tag }}
|
||||
|
||||
# Reset and prepare the stable branch
|
||||
git checkout ${{ steps.list-branches.outputs.stable }}
|
||||
echo "Last 5 commits on stable branch:"
|
||||
git log -n 5 --pretty=format:"%H"
|
||||
echo "" # Newline for formatting
|
||||
|
||||
# Cherry-pick the merge commit from the merged PR
|
||||
git cherry-pick -m 1 ${{ github.event.pull_request.merge_commit_sha }} || {
|
||||
echo "Cherry-pick to stable failed due to conflicts."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Create new stable branch/tag
|
||||
git tag ${{ steps.list-branches.outputs.new_stable_tag }}
|
||||
# Push the changes and tag to the stable branch using PAT
|
||||
git push origin ${{ steps.list-branches.outputs.stable }}
|
||||
git push origin ${{ steps.list-branches.outputs.new_stable_tag }}
|
||||
@@ -14,15 +14,16 @@ env:
|
||||
S3_ENDPOINT_URL: "http://localhost:9004"
|
||||
|
||||
# Confluence
|
||||
CONFLUENCE_TEST_SPACE_URL: ${{ secrets.CONFLUENCE_TEST_SPACE_URL }}
|
||||
CONFLUENCE_TEST_SPACE: ${{ secrets.CONFLUENCE_TEST_SPACE }}
|
||||
CONFLUENCE_TEST_SPACE_URL: ${{ vars.CONFLUENCE_TEST_SPACE_URL }}
|
||||
CONFLUENCE_TEST_SPACE: ${{ vars.CONFLUENCE_TEST_SPACE }}
|
||||
CONFLUENCE_TEST_PAGE_ID: ${{ secrets.CONFLUENCE_TEST_PAGE_ID }}
|
||||
CONFLUENCE_IS_CLOUD: ${{ secrets.CONFLUENCE_IS_CLOUD }}
|
||||
CONFLUENCE_USER_NAME: ${{ secrets.CONFLUENCE_USER_NAME }}
|
||||
CONFLUENCE_USER_NAME: ${{ vars.CONFLUENCE_USER_NAME }}
|
||||
CONFLUENCE_ACCESS_TOKEN: ${{ secrets.CONFLUENCE_ACCESS_TOKEN }}
|
||||
CONFLUENCE_ACCESS_TOKEN_SCOPED: ${{ secrets.CONFLUENCE_ACCESS_TOKEN_SCOPED }}
|
||||
|
||||
# LLMs
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
|
||||
jobs:
|
||||
discover-test-dirs:
|
||||
@@ -42,8 +43,8 @@ jobs:
|
||||
|
||||
external-dependency-unit-tests:
|
||||
needs: discover-test-dirs
|
||||
# See https://runs-on.com/runners/linux/
|
||||
runs-on: [runs-on, runner=8cpu-linux-x64, "run-id=${{ github.run_id }}"]
|
||||
# Use larger runner with more resources for Vespa
|
||||
runs-on: [runs-on, runner=16cpu-linux-x64, "run-id=${{ github.run_id }}"]
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -52,6 +53,7 @@ jobs:
|
||||
|
||||
env:
|
||||
PYTHONPATH: ./backend
|
||||
MODEL_SERVER_HOST: "disabled"
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -77,12 +79,25 @@ jobs:
|
||||
- name: Set up Standard Dependencies
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack up -d minio relational_db cache index
|
||||
docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d minio relational_db cache index
|
||||
|
||||
- name: Wait for services
|
||||
run: |
|
||||
echo "Waiting for services to be ready..."
|
||||
sleep 30
|
||||
|
||||
# Wait for Vespa specifically
|
||||
echo "Waiting for Vespa to be ready..."
|
||||
timeout 300 bash -c 'until curl -f -s http://localhost:8081/ApplicationStatus > /dev/null 2>&1; do echo "Vespa not ready, waiting..."; sleep 10; done' || echo "Vespa timeout - continuing anyway"
|
||||
|
||||
echo "Services should be ready now"
|
||||
|
||||
- name: Run migrations
|
||||
run: |
|
||||
cd backend
|
||||
# Run migrations to head
|
||||
alembic upgrade head
|
||||
alembic heads --verbose
|
||||
|
||||
- name: Run Tests for ${{ matrix.test-dir }}
|
||||
shell: script -q -e -c "bash --noprofile --norc -eo pipefail {0}"
|
||||
|
||||
72
.github/workflows/pr-helm-chart-testing.yml
vendored
72
.github/workflows/pr-helm-chart-testing.yml
vendored
@@ -65,35 +65,45 @@ jobs:
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
echo "=== Adding Helm repositories ==="
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
|
||||
helm repo add vespa https://onyx-dot-app.github.io/vespa-helm-charts
|
||||
helm repo add cloudnative-pg https://cloudnative-pg.github.io/charts
|
||||
helm repo add ot-container-kit https://ot-container-kit.github.io/helm-charts
|
||||
helm repo add minio https://charts.min.io/
|
||||
helm repo update
|
||||
|
||||
- name: Pre-pull critical images
|
||||
- name: Install Redis operator
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "=== Installing redis-operator CRDs ==="
|
||||
helm upgrade --install redis-operator ot-container-kit/redis-operator \
|
||||
--namespace redis-operator --create-namespace --wait --timeout 300s
|
||||
|
||||
- name: Pre-pull required images
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
echo "=== Pre-pulling critical images to avoid timeout ==="
|
||||
# Get kind cluster name
|
||||
echo "=== Pre-pulling required images to avoid timeout ==="
|
||||
KIND_CLUSTER=$(kubectl config current-context | sed 's/kind-//')
|
||||
echo "Kind cluster: $KIND_CLUSTER"
|
||||
|
||||
# Pre-pull images that are likely to be used
|
||||
echo "Pre-pulling PostgreSQL image..."
|
||||
docker pull postgres:15-alpine || echo "Failed to pull postgres:15-alpine"
|
||||
kind load docker-image postgres:15-alpine --name $KIND_CLUSTER || echo "Failed to load postgres image"
|
||||
|
||||
echo "Pre-pulling Redis image..."
|
||||
docker pull redis:7-alpine || echo "Failed to pull redis:7-alpine"
|
||||
kind load docker-image redis:7-alpine --name $KIND_CLUSTER || echo "Failed to load redis image"
|
||||
|
||||
echo "Pre-pulling Onyx images..."
|
||||
docker pull docker.io/onyxdotapp/onyx-web-server:latest || echo "Failed to pull onyx web server"
|
||||
docker pull docker.io/onyxdotapp/onyx-backend:latest || echo "Failed to pull onyx backend"
|
||||
kind load docker-image docker.io/onyxdotapp/onyx-web-server:latest --name $KIND_CLUSTER || echo "Failed to load onyx web server"
|
||||
kind load docker-image docker.io/onyxdotapp/onyx-backend:latest --name $KIND_CLUSTER || echo "Failed to load onyx backend"
|
||||
|
||||
|
||||
IMAGES=(
|
||||
"ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0"
|
||||
"quay.io/opstree/redis:v7.0.15"
|
||||
"docker.io/onyxdotapp/onyx-web-server:latest"
|
||||
)
|
||||
|
||||
for image in "${IMAGES[@]}"; do
|
||||
echo "Pre-pulling $image"
|
||||
if docker pull "$image"; then
|
||||
kind load docker-image "$image" --name "$KIND_CLUSTER" || echo "Failed to load $image into kind"
|
||||
else
|
||||
echo "Failed to pull $image"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "=== Images loaded into Kind cluster ==="
|
||||
docker exec $KIND_CLUSTER-control-plane crictl images | grep -E "(postgres|redis|onyx)" || echo "Some images may still be loading..."
|
||||
docker exec "$KIND_CLUSTER"-control-plane crictl images | grep -E "(cloudnative-pg|redis|onyx)" || echo "Some images may still be loading..."
|
||||
|
||||
- name: Validate chart dependencies
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
@@ -149,6 +159,7 @@ jobs:
|
||||
|
||||
# Run the actual installation with detailed logging
|
||||
echo "=== Starting ct install ==="
|
||||
set +e
|
||||
ct install --all \
|
||||
--helm-extra-set-args="\
|
||||
--set=nginx.enabled=false \
|
||||
@@ -156,8 +167,10 @@ jobs:
|
||||
--set=vespa.enabled=false \
|
||||
--set=slackbot.enabled=false \
|
||||
--set=postgresql.enabled=true \
|
||||
--set=postgresql.primary.persistence.enabled=false \
|
||||
--set=postgresql.nameOverride=cloudnative-pg \
|
||||
--set=postgresql.cluster.storage.storageClass=standard \
|
||||
--set=redis.enabled=true \
|
||||
--set=redis.storageSpec.volumeClaimTemplate.spec.storageClassName=standard \
|
||||
--set=webserver.replicaCount=1 \
|
||||
--set=api.replicaCount=0 \
|
||||
--set=inferenceCapability.replicaCount=0 \
|
||||
@@ -169,11 +182,20 @@ jobs:
|
||||
--set=celery_worker_light.replicaCount=0 \
|
||||
--set=celery_worker_monitoring.replicaCount=0 \
|
||||
--set=celery_worker_primary.replicaCount=0 \
|
||||
--set=celery_worker_user_file_processing.replicaCount=0 \
|
||||
--set=celery_worker_user_files_indexing.replicaCount=0" \
|
||||
--helm-extra-args="--timeout 900s --debug" \
|
||||
--debug --config ct.yaml
|
||||
|
||||
echo "=== Installation completed successfully ==="
|
||||
CT_EXIT=$?
|
||||
set -e
|
||||
|
||||
if [[ $CT_EXIT -ne 0 ]]; then
|
||||
echo "ct install failed with exit code $CT_EXIT"
|
||||
exit $CT_EXIT
|
||||
else
|
||||
echo "=== Installation completed successfully ==="
|
||||
fi
|
||||
|
||||
kubectl get pods --all-namespaces
|
||||
|
||||
- name: Post-install verification
|
||||
@@ -198,7 +220,7 @@ jobs:
|
||||
|
||||
echo "=== Recent logs for debugging ==="
|
||||
kubectl logs --all-namespaces --tail=50 | grep -i "error\|timeout\|failed\|pull" || echo "No error logs found"
|
||||
|
||||
|
||||
echo "=== Helm releases ==="
|
||||
helm list --all-namespaces
|
||||
# the following would install only changed charts, but we only have one chart so
|
||||
|
||||
93
.github/workflows/pr-integration-tests.yml
vendored
93
.github/workflows/pr-integration-tests.yml
vendored
@@ -19,12 +19,14 @@ env:
|
||||
# Test Environment Variables
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
CONFLUENCE_TEST_SPACE_URL: ${{ secrets.CONFLUENCE_TEST_SPACE_URL }}
|
||||
CONFLUENCE_USER_NAME: ${{ secrets.CONFLUENCE_USER_NAME }}
|
||||
CONFLUENCE_TEST_SPACE_URL: ${{ vars.CONFLUENCE_TEST_SPACE_URL }}
|
||||
CONFLUENCE_USER_NAME: ${{ vars.CONFLUENCE_USER_NAME }}
|
||||
CONFLUENCE_ACCESS_TOKEN: ${{ secrets.CONFLUENCE_ACCESS_TOKEN }}
|
||||
CONFLUENCE_ACCESS_TOKEN_SCOPED: ${{ secrets.CONFLUENCE_ACCESS_TOKEN_SCOPED }}
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
JIRA_API_TOKEN_SCOPED: ${{ secrets.JIRA_API_TOKEN_SCOPED }}
|
||||
PERM_SYNC_SHAREPOINT_CLIENT_ID: ${{ secrets.PERM_SYNC_SHAREPOINT_CLIENT_ID }}
|
||||
PERM_SYNC_SHAREPOINT_PRIVATE_KEY: ${{ secrets.PERM_SYNC_SHAREPOINT_PRIVATE_KEY }}
|
||||
PERM_SYNC_SHAREPOINT_CERTIFICATE_PASSWORD: ${{ secrets.PERM_SYNC_SHAREPOINT_CERTIFICATE_PASSWORD }}
|
||||
@@ -65,46 +67,8 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: "pip"
|
||||
cache-dependency-path: |
|
||||
backend/requirements/default.txt
|
||||
backend/requirements/dev.txt
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install --retries 5 --timeout 30 -r backend/requirements/default.txt
|
||||
pip install --retries 5 --timeout 30 -r backend/requirements/dev.txt
|
||||
|
||||
- name: Generate OpenAPI schema
|
||||
working-directory: ./backend
|
||||
env:
|
||||
PYTHONPATH: "."
|
||||
run: |
|
||||
python scripts/onyx_openapi_schema.py --filename generated/openapi.json
|
||||
|
||||
- name: Generate OpenAPI Python client
|
||||
working-directory: ./backend
|
||||
run: |
|
||||
docker run --rm \
|
||||
-v "${{ github.workspace }}/backend/generated:/local" \
|
||||
openapitools/openapi-generator-cli generate \
|
||||
-i /local/openapi.json \
|
||||
-g python \
|
||||
-o /local/onyx_openapi_client \
|
||||
--package-name onyx_openapi_client \
|
||||
--skip-validate-spec \
|
||||
--openapi-normalizer "SIMPLIFY_ONEOF_ANYOF=true,SET_OAS3_NULLABLE=true"
|
||||
|
||||
- name: Upload OpenAPI artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: openapi-artifacts
|
||||
path: backend/generated/
|
||||
- name: Prepare build
|
||||
uses: ./.github/actions/prepare-build
|
||||
|
||||
build-backend-image:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404-arm
|
||||
@@ -130,6 +94,9 @@ jobs:
|
||||
platforms: linux/arm64
|
||||
tags: ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-backend:test-${{ github.run_id }}
|
||||
push: true
|
||||
outputs: type=registry
|
||||
no-cache: ${{ vars.DOCKER_NO_CACHE == 'true' }}
|
||||
|
||||
|
||||
build-model-server-image:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404-arm
|
||||
@@ -157,6 +124,8 @@ jobs:
|
||||
push: true
|
||||
outputs: type=registry
|
||||
provenance: false
|
||||
no-cache: ${{ vars.DOCKER_NO_CACHE == 'true' }}
|
||||
|
||||
|
||||
build-integration-image:
|
||||
needs: prepare-build
|
||||
@@ -189,6 +158,8 @@ jobs:
|
||||
platforms: linux/arm64
|
||||
tags: ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-integration:test-${{ github.run_id }}
|
||||
push: true
|
||||
outputs: type=registry
|
||||
no-cache: ${{ vars.DOCKER_NO_CACHE == 'true' }}
|
||||
|
||||
integration-tests:
|
||||
needs:
|
||||
@@ -230,9 +201,9 @@ jobs:
|
||||
# Pull all images from registry in parallel
|
||||
echo "Pulling Docker images in parallel..."
|
||||
# Pull images from private registry
|
||||
(docker pull ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-backend:test-${{ github.run_id }}) &
|
||||
(docker pull ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-model-server:test-${{ github.run_id }}) &
|
||||
(docker pull ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-integration:test-${{ github.run_id }}) &
|
||||
(docker pull --platform linux/arm64 ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-backend:test-${{ github.run_id }}) &
|
||||
(docker pull --platform linux/arm64 ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-model-server:test-${{ github.run_id }}) &
|
||||
(docker pull --platform linux/arm64 ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-integration:test-${{ github.run_id }}) &
|
||||
|
||||
# Wait for all background jobs to complete
|
||||
wait
|
||||
@@ -257,7 +228,7 @@ jobs:
|
||||
IMAGE_TAG=test \
|
||||
INTEGRATION_TESTS_MODE=true \
|
||||
CHECK_TTL_MANAGEMENT_TASK_FREQUENCY_IN_HOURS=0.001 \
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack up \
|
||||
docker compose -f docker-compose.yml -f docker-compose.dev.yml up \
|
||||
relational_db \
|
||||
index \
|
||||
cache \
|
||||
@@ -273,7 +244,7 @@ jobs:
|
||||
run: |
|
||||
echo "Starting wait-for-service script..."
|
||||
|
||||
docker logs -f onyx-stack-api_server-1 &
|
||||
docker logs -f onyx-api_server-1 &
|
||||
|
||||
start_time=$(date +%s)
|
||||
timeout=300 # 5 minutes in seconds
|
||||
@@ -317,7 +288,7 @@ jobs:
|
||||
retry_wait_seconds: 10
|
||||
command: |
|
||||
echo "Running integration tests for ${{ matrix.test-dir.path }}..."
|
||||
docker run --rm --network onyx-stack_default \
|
||||
docker run --rm --network onyx_default \
|
||||
--name test-runner \
|
||||
-e POSTGRES_HOST=relational_db \
|
||||
-e POSTGRES_USER=postgres \
|
||||
@@ -335,9 +306,11 @@ jobs:
|
||||
-e CONFLUENCE_TEST_SPACE_URL=${CONFLUENCE_TEST_SPACE_URL} \
|
||||
-e CONFLUENCE_USER_NAME=${CONFLUENCE_USER_NAME} \
|
||||
-e CONFLUENCE_ACCESS_TOKEN=${CONFLUENCE_ACCESS_TOKEN} \
|
||||
-e CONFLUENCE_ACCESS_TOKEN_SCOPED=${CONFLUENCE_ACCESS_TOKEN_SCOPED} \
|
||||
-e JIRA_BASE_URL=${JIRA_BASE_URL} \
|
||||
-e JIRA_USER_EMAIL=${JIRA_USER_EMAIL} \
|
||||
-e JIRA_API_TOKEN=${JIRA_API_TOKEN} \
|
||||
-e JIRA_API_TOKEN_SCOPED=${JIRA_API_TOKEN_SCOPED} \
|
||||
-e PERM_SYNC_SHAREPOINT_CLIENT_ID=${PERM_SYNC_SHAREPOINT_CLIENT_ID} \
|
||||
-e PERM_SYNC_SHAREPOINT_PRIVATE_KEY="${PERM_SYNC_SHAREPOINT_PRIVATE_KEY}" \
|
||||
-e PERM_SYNC_SHAREPOINT_CERTIFICATE_PASSWORD=${PERM_SYNC_SHAREPOINT_CERTIFICATE_PASSWORD} \
|
||||
@@ -354,13 +327,13 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack logs --no-color api_server > $GITHUB_WORKSPACE/api_server.log || true
|
||||
docker compose logs --no-color api_server > $GITHUB_WORKSPACE/api_server.log || true
|
||||
|
||||
- name: Dump all-container logs (optional)
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack logs --no-color > $GITHUB_WORKSPACE/docker-compose.log || true
|
||||
docker compose logs --no-color > $GITHUB_WORKSPACE/docker-compose.log || true
|
||||
|
||||
- name: Upload logs
|
||||
if: always()
|
||||
@@ -374,7 +347,7 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack down -v
|
||||
docker compose down -v
|
||||
|
||||
|
||||
multitenant-tests:
|
||||
@@ -405,9 +378,9 @@ jobs:
|
||||
|
||||
- name: Pull Docker images
|
||||
run: |
|
||||
(docker pull ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-backend:test-${{ github.run_id }}) &
|
||||
(docker pull ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-model-server:test-${{ github.run_id }}) &
|
||||
(docker pull ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-integration:test-${{ github.run_id }}) &
|
||||
(docker pull --platform linux/arm64 ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-backend:test-${{ github.run_id }}) &
|
||||
(docker pull --platform linux/arm64 ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-model-server:test-${{ github.run_id }}) &
|
||||
(docker pull --platform linux/arm64 ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-integration:test-${{ github.run_id }}) &
|
||||
wait
|
||||
docker tag ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-backend:test-${{ github.run_id }} onyxdotapp/onyx-backend:test
|
||||
docker tag ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-model-server:test-${{ github.run_id }} onyxdotapp/onyx-model-server:test
|
||||
@@ -423,7 +396,7 @@ jobs:
|
||||
DISABLE_TELEMETRY=true \
|
||||
IMAGE_TAG=test \
|
||||
DEV_MODE=true \
|
||||
docker compose -f docker-compose.multitenant-dev.yml -p onyx-stack up \
|
||||
docker compose -f docker-compose.multitenant-dev.yml up \
|
||||
relational_db \
|
||||
index \
|
||||
cache \
|
||||
@@ -438,7 +411,7 @@ jobs:
|
||||
- name: Wait for service to be ready (multi-tenant)
|
||||
run: |
|
||||
echo "Starting wait-for-service script for multi-tenant..."
|
||||
docker logs -f onyx-stack-api_server-1 &
|
||||
docker logs -f onyx-api_server-1 &
|
||||
start_time=$(date +%s)
|
||||
timeout=300
|
||||
while true; do
|
||||
@@ -464,7 +437,7 @@ jobs:
|
||||
- name: Run Multi-Tenant Integration Tests
|
||||
run: |
|
||||
echo "Running multi-tenant integration tests..."
|
||||
docker run --rm --network onyx-stack_default \
|
||||
docker run --rm --network onyx_default \
|
||||
--name test-runner \
|
||||
-e POSTGRES_HOST=relational_db \
|
||||
-e POSTGRES_USER=postgres \
|
||||
@@ -493,13 +466,13 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.multitenant-dev.yml -p onyx-stack logs --no-color api_server > $GITHUB_WORKSPACE/api_server_multitenant.log || true
|
||||
docker compose -f docker-compose.multitenant-dev.yml logs --no-color api_server > $GITHUB_WORKSPACE/api_server_multitenant.log || true
|
||||
|
||||
- name: Dump all-container logs (multi-tenant)
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.multitenant-dev.yml -p onyx-stack logs --no-color > $GITHUB_WORKSPACE/docker-compose-multitenant.log || true
|
||||
docker compose -f docker-compose.multitenant-dev.yml logs --no-color > $GITHUB_WORKSPACE/docker-compose-multitenant.log || true
|
||||
|
||||
- name: Upload logs (multi-tenant)
|
||||
if: always()
|
||||
@@ -512,7 +485,7 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.multitenant-dev.yml -p onyx-stack down -v
|
||||
docker compose -f docker-compose.multitenant-dev.yml down -v
|
||||
|
||||
required:
|
||||
runs-on: blacksmith-2vcpu-ubuntu-2404-arm
|
||||
|
||||
35
.github/workflows/pr-jest-tests.yml
vendored
Normal file
35
.github/workflows/pr-jest-tests.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Run Jest Tests
|
||||
concurrency:
|
||||
group: Run-Jest-Tests-${{ github.workflow }}-${{ github.head_ref || github.event.workflow_run.head_branch || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
jest-tests:
|
||||
name: Jest Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install node dependencies
|
||||
working-directory: ./web
|
||||
run: npm ci
|
||||
|
||||
- name: Run Jest tests
|
||||
working-directory: ./web
|
||||
run: npm test -- --ci --coverage --maxWorkers=50%
|
||||
|
||||
- name: Upload coverage reports
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: jest-coverage-${{ github.run_id }}
|
||||
path: ./web/coverage
|
||||
retention-days: 7
|
||||
76
.github/workflows/pr-mit-integration-tests.yml
vendored
76
.github/workflows/pr-mit-integration-tests.yml
vendored
@@ -16,12 +16,14 @@ env:
|
||||
# Test Environment Variables
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
CONFLUENCE_TEST_SPACE_URL: ${{ secrets.CONFLUENCE_TEST_SPACE_URL }}
|
||||
CONFLUENCE_USER_NAME: ${{ secrets.CONFLUENCE_USER_NAME }}
|
||||
CONFLUENCE_TEST_SPACE_URL: ${{ vars.CONFLUENCE_TEST_SPACE_URL }}
|
||||
CONFLUENCE_USER_NAME: ${{ vars.CONFLUENCE_USER_NAME }}
|
||||
CONFLUENCE_ACCESS_TOKEN: ${{ secrets.CONFLUENCE_ACCESS_TOKEN }}
|
||||
CONFLUENCE_ACCESS_TOKEN_SCOPED: ${{ secrets.CONFLUENCE_ACCESS_TOKEN_SCOPED }}
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
JIRA_API_TOKEN_SCOPED: ${{ secrets.JIRA_API_TOKEN_SCOPED }}
|
||||
PERM_SYNC_SHAREPOINT_CLIENT_ID: ${{ secrets.PERM_SYNC_SHAREPOINT_CLIENT_ID }}
|
||||
PERM_SYNC_SHAREPOINT_PRIVATE_KEY: ${{ secrets.PERM_SYNC_SHAREPOINT_PRIVATE_KEY }}
|
||||
PERM_SYNC_SHAREPOINT_CERTIFICATE_PASSWORD: ${{ secrets.PERM_SYNC_SHAREPOINT_CERTIFICATE_PASSWORD }}
|
||||
@@ -62,46 +64,8 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: "pip"
|
||||
cache-dependency-path: |
|
||||
backend/requirements/default.txt
|
||||
backend/requirements/dev.txt
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install --retries 5 --timeout 30 -r backend/requirements/default.txt
|
||||
pip install --retries 5 --timeout 30 -r backend/requirements/dev.txt
|
||||
|
||||
- name: Generate OpenAPI schema
|
||||
working-directory: ./backend
|
||||
env:
|
||||
PYTHONPATH: "."
|
||||
run: |
|
||||
python scripts/onyx_openapi_schema.py --filename generated/openapi.json
|
||||
|
||||
- name: Generate OpenAPI Python client
|
||||
working-directory: ./backend
|
||||
run: |
|
||||
docker run --rm \
|
||||
-v "${{ github.workspace }}/backend/generated:/local" \
|
||||
openapitools/openapi-generator-cli generate \
|
||||
-i /local/openapi.json \
|
||||
-g python \
|
||||
-o /local/onyx_openapi_client \
|
||||
--package-name onyx_openapi_client \
|
||||
--skip-validate-spec \
|
||||
--openapi-normalizer "SIMPLIFY_ONEOF_ANYOF=true,SET_OAS3_NULLABLE=true"
|
||||
|
||||
- name: Upload OpenAPI artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: openapi-artifacts
|
||||
path: backend/generated/
|
||||
- name: Prepare build
|
||||
uses: ./.github/actions/prepare-build
|
||||
|
||||
build-backend-image:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404-arm
|
||||
@@ -127,6 +91,9 @@ jobs:
|
||||
platforms: linux/arm64
|
||||
tags: ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-backend:test-${{ github.run_id }}
|
||||
push: true
|
||||
outputs: type=registry
|
||||
no-cache: ${{ vars.DOCKER_NO_CACHE == 'true' }}
|
||||
|
||||
|
||||
build-model-server-image:
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404-arm
|
||||
@@ -154,6 +121,8 @@ jobs:
|
||||
push: true
|
||||
outputs: type=registry
|
||||
provenance: false
|
||||
no-cache: ${{ vars.DOCKER_NO_CACHE == 'true' }}
|
||||
|
||||
|
||||
build-integration-image:
|
||||
needs: prepare-build
|
||||
@@ -186,6 +155,9 @@ jobs:
|
||||
platforms: linux/arm64
|
||||
tags: ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-integration:test-${{ github.run_id }}
|
||||
push: true
|
||||
outputs: type=registry
|
||||
no-cache: ${{ vars.DOCKER_NO_CACHE == 'true' }}
|
||||
|
||||
|
||||
integration-tests-mit:
|
||||
needs:
|
||||
@@ -228,9 +200,9 @@ jobs:
|
||||
# Pull all images from registry in parallel
|
||||
echo "Pulling Docker images in parallel..."
|
||||
# Pull images from private registry
|
||||
(docker pull ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-backend:test-${{ github.run_id }}) &
|
||||
(docker pull ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-model-server:test-${{ github.run_id }}) &
|
||||
(docker pull ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-integration:test-${{ github.run_id }}) &
|
||||
(docker pull --platform linux/arm64 ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-backend:test-${{ github.run_id }}) &
|
||||
(docker pull --platform linux/arm64 ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-model-server:test-${{ github.run_id }}) &
|
||||
(docker pull --platform linux/arm64 ${{ env.PRIVATE_REGISTRY }}/integration-test-onyx-integration:test-${{ github.run_id }}) &
|
||||
|
||||
# Wait for all background jobs to complete
|
||||
wait
|
||||
@@ -253,7 +225,7 @@ jobs:
|
||||
DISABLE_TELEMETRY=true \
|
||||
IMAGE_TAG=test \
|
||||
INTEGRATION_TESTS_MODE=true \
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack up \
|
||||
docker compose -f docker-compose.yml -f docker-compose.dev.yml up \
|
||||
relational_db \
|
||||
index \
|
||||
cache \
|
||||
@@ -269,7 +241,7 @@ jobs:
|
||||
run: |
|
||||
echo "Starting wait-for-service script..."
|
||||
|
||||
docker logs -f onyx-stack-api_server-1 &
|
||||
docker logs -f onyx-api_server-1 &
|
||||
|
||||
start_time=$(date +%s)
|
||||
timeout=300 # 5 minutes in seconds
|
||||
@@ -314,7 +286,7 @@ jobs:
|
||||
retry_wait_seconds: 10
|
||||
command: |
|
||||
echo "Running integration tests for ${{ matrix.test-dir.path }}..."
|
||||
docker run --rm --network onyx-stack_default \
|
||||
docker run --rm --network onyx_default \
|
||||
--name test-runner \
|
||||
-e POSTGRES_HOST=relational_db \
|
||||
-e POSTGRES_USER=postgres \
|
||||
@@ -332,9 +304,11 @@ jobs:
|
||||
-e CONFLUENCE_TEST_SPACE_URL=${CONFLUENCE_TEST_SPACE_URL} \
|
||||
-e CONFLUENCE_USER_NAME=${CONFLUENCE_USER_NAME} \
|
||||
-e CONFLUENCE_ACCESS_TOKEN=${CONFLUENCE_ACCESS_TOKEN} \
|
||||
-e CONFLUENCE_ACCESS_TOKEN_SCOPED=${CONFLUENCE_ACCESS_TOKEN_SCOPED} \
|
||||
-e JIRA_BASE_URL=${JIRA_BASE_URL} \
|
||||
-e JIRA_USER_EMAIL=${JIRA_USER_EMAIL} \
|
||||
-e JIRA_API_TOKEN=${JIRA_API_TOKEN} \
|
||||
-e JIRA_API_TOKEN_SCOPED=${JIRA_API_TOKEN_SCOPED} \
|
||||
-e PERM_SYNC_SHAREPOINT_CLIENT_ID=${PERM_SYNC_SHAREPOINT_CLIENT_ID} \
|
||||
-e PERM_SYNC_SHAREPOINT_PRIVATE_KEY="${PERM_SYNC_SHAREPOINT_PRIVATE_KEY}" \
|
||||
-e PERM_SYNC_SHAREPOINT_CERTIFICATE_PASSWORD=${PERM_SYNC_SHAREPOINT_CERTIFICATE_PASSWORD} \
|
||||
@@ -351,13 +325,13 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack logs --no-color api_server > $GITHUB_WORKSPACE/api_server.log || true
|
||||
docker compose logs --no-color api_server > $GITHUB_WORKSPACE/api_server.log || true
|
||||
|
||||
- name: Dump all-container logs (optional)
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack logs --no-color > $GITHUB_WORKSPACE/docker-compose.log || true
|
||||
docker compose logs --no-color > $GITHUB_WORKSPACE/docker-compose.log || true
|
||||
|
||||
- name: Upload logs
|
||||
if: always()
|
||||
@@ -371,7 +345,7 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack down -v
|
||||
docker compose down -v
|
||||
|
||||
|
||||
required:
|
||||
|
||||
43
.github/workflows/pr-playwright-tests.yml
vendored
43
.github/workflows/pr-playwright-tests.yml
vendored
@@ -56,6 +56,8 @@ jobs:
|
||||
provenance: false
|
||||
sbom: false
|
||||
push: true
|
||||
outputs: type=registry
|
||||
no-cache: ${{ vars.DOCKER_NO_CACHE == 'true' }}
|
||||
|
||||
build-backend-image:
|
||||
runs-on: blacksmith-8vcpu-ubuntu-2404-arm
|
||||
@@ -87,6 +89,8 @@ jobs:
|
||||
provenance: false
|
||||
sbom: false
|
||||
push: true
|
||||
outputs: type=registry
|
||||
no-cache: ${{ vars.DOCKER_NO_CACHE == 'true' }}
|
||||
|
||||
build-model-server-image:
|
||||
runs-on: blacksmith-8vcpu-ubuntu-2404-arm
|
||||
@@ -118,6 +122,8 @@ jobs:
|
||||
provenance: false
|
||||
sbom: false
|
||||
push: true
|
||||
outputs: type=registry
|
||||
no-cache: ${{ vars.DOCKER_NO_CACHE == 'true' }}
|
||||
|
||||
playwright-tests:
|
||||
needs: [build-web-image, build-backend-image, build-model-server-image]
|
||||
@@ -179,24 +185,29 @@ jobs:
|
||||
working-directory: ./web
|
||||
run: npx playwright install --with-deps
|
||||
|
||||
- name: Create .env file for Docker Compose
|
||||
run: |
|
||||
cat <<EOF > deployment/docker_compose/.env
|
||||
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true
|
||||
AUTH_TYPE=basic
|
||||
GEN_AI_API_KEY=${{ env.OPENAI_API_KEY }}
|
||||
EXA_API_KEY=${{ env.EXA_API_KEY }}
|
||||
REQUIRE_EMAIL_VERIFICATION=false
|
||||
DISABLE_TELEMETRY=true
|
||||
IMAGE_TAG=test
|
||||
EOF
|
||||
|
||||
- name: Start Docker containers
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true \
|
||||
AUTH_TYPE=basic \
|
||||
GEN_AI_API_KEY=${{ env.OPENAI_API_KEY }} \
|
||||
EXA_API_KEY=${{ env.EXA_API_KEY }} \
|
||||
REQUIRE_EMAIL_VERIFICATION=false \
|
||||
DISABLE_TELEMETRY=true \
|
||||
IMAGE_TAG=test \
|
||||
docker compose -f docker-compose.dev.yml -p danswer-stack up -d
|
||||
docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d
|
||||
id: start_docker
|
||||
|
||||
- name: Wait for service to be ready
|
||||
run: |
|
||||
echo "Starting wait-for-service script..."
|
||||
|
||||
docker logs -f danswer-stack-api_server-1 &
|
||||
docker logs -f onyx-api_server-1 &
|
||||
|
||||
start_time=$(date +%s)
|
||||
timeout=300 # 5 minutes in seconds
|
||||
@@ -228,14 +239,16 @@ jobs:
|
||||
|
||||
- name: Run Playwright tests
|
||||
working-directory: ./web
|
||||
run: npx playwright test
|
||||
run: |
|
||||
# Create test-results directory to ensure it exists for artifact upload
|
||||
mkdir -p test-results
|
||||
npx playwright test
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
# Chromatic automatically defaults to the test-results directory.
|
||||
# Replace with the path to your custom directory and adjust the CHROMATIC_ARCHIVE_LOCATION environment variable accordingly.
|
||||
name: test-results
|
||||
# Includes test results and debug screenshots
|
||||
name: playwright-test-results-${{ github.run_id }}
|
||||
path: ./web/test-results
|
||||
retention-days: 30
|
||||
|
||||
@@ -244,7 +257,7 @@ jobs:
|
||||
if: success() || failure()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.dev.yml -p danswer-stack logs > docker-compose.log
|
||||
docker compose logs > docker-compose.log
|
||||
mv docker-compose.log ${{ github.workspace }}/docker-compose.log
|
||||
|
||||
- name: Upload logs
|
||||
@@ -257,7 +270,7 @@ jobs:
|
||||
- name: Stop Docker containers
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.dev.yml -p danswer-stack down -v
|
||||
docker compose down -v
|
||||
|
||||
# NOTE: Chromatic UI diff testing is currently disabled.
|
||||
# We are using Playwright for local and CI testing without visual regression checks.
|
||||
|
||||
109
.github/workflows/pr-python-connector-tests.yml
vendored
109
.github/workflows/pr-python-connector-tests.yml
vendored
@@ -13,18 +13,28 @@ env:
|
||||
AWS_ACCESS_KEY_ID_DAILY_CONNECTOR_TESTS: ${{ secrets.AWS_ACCESS_KEY_ID_DAILY_CONNECTOR_TESTS }}
|
||||
AWS_SECRET_ACCESS_KEY_DAILY_CONNECTOR_TESTS: ${{ secrets.AWS_SECRET_ACCESS_KEY_DAILY_CONNECTOR_TESTS }}
|
||||
|
||||
# Cloudflare R2
|
||||
R2_ACCOUNT_ID_DAILY_CONNECTOR_TESTS: ${{ vars.R2_ACCOUNT_ID_DAILY_CONNECTOR_TESTS }}
|
||||
R2_ACCESS_KEY_ID_DAILY_CONNECTOR_TESTS: ${{ secrets.R2_ACCESS_KEY_ID_DAILY_CONNECTOR_TESTS }}
|
||||
R2_SECRET_ACCESS_KEY_DAILY_CONNECTOR_TESTS: ${{ secrets.R2_SECRET_ACCESS_KEY_DAILY_CONNECTOR_TESTS }}
|
||||
|
||||
# Google Cloud Storage
|
||||
GCS_ACCESS_KEY_ID_DAILY_CONNECTOR_TESTS: ${{ secrets.GCS_ACCESS_KEY_ID_DAILY_CONNECTOR_TESTS }}
|
||||
GCS_SECRET_ACCESS_KEY_DAILY_CONNECTOR_TESTS: ${{ secrets.GCS_SECRET_ACCESS_KEY_DAILY_CONNECTOR_TESTS }}
|
||||
|
||||
# Confluence
|
||||
CONFLUENCE_TEST_SPACE_URL: ${{ secrets.CONFLUENCE_TEST_SPACE_URL }}
|
||||
CONFLUENCE_TEST_SPACE: ${{ secrets.CONFLUENCE_TEST_SPACE }}
|
||||
CONFLUENCE_TEST_SPACE_URL: ${{ vars.CONFLUENCE_TEST_SPACE_URL }}
|
||||
CONFLUENCE_TEST_SPACE: ${{ vars.CONFLUENCE_TEST_SPACE }}
|
||||
CONFLUENCE_TEST_PAGE_ID: ${{ secrets.CONFLUENCE_TEST_PAGE_ID }}
|
||||
CONFLUENCE_IS_CLOUD: ${{ secrets.CONFLUENCE_IS_CLOUD }}
|
||||
CONFLUENCE_USER_NAME: ${{ secrets.CONFLUENCE_USER_NAME }}
|
||||
CONFLUENCE_USER_NAME: ${{ vars.CONFLUENCE_USER_NAME }}
|
||||
CONFLUENCE_ACCESS_TOKEN: ${{ secrets.CONFLUENCE_ACCESS_TOKEN }}
|
||||
CONFLUENCE_ACCESS_TOKEN_SCOPED: ${{ secrets.CONFLUENCE_ACCESS_TOKEN_SCOPED }}
|
||||
|
||||
# Jira
|
||||
JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
|
||||
JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
|
||||
JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
|
||||
JIRA_API_TOKEN_SCOPED: ${{ secrets.JIRA_API_TOKEN_SCOPED }}
|
||||
|
||||
# Gong
|
||||
GONG_ACCESS_KEY: ${{ secrets.GONG_ACCESS_KEY }}
|
||||
@@ -54,22 +64,22 @@ env:
|
||||
HUBSPOT_ACCESS_TOKEN: ${{ secrets.HUBSPOT_ACCESS_TOKEN }}
|
||||
|
||||
# IMAP
|
||||
IMAP_HOST: ${{ secrets.IMAP_HOST }}
|
||||
IMAP_USERNAME: ${{ secrets.IMAP_USERNAME }}
|
||||
IMAP_HOST: ${{ vars.IMAP_HOST }}
|
||||
IMAP_USERNAME: ${{ vars.IMAP_USERNAME }}
|
||||
IMAP_PASSWORD: ${{ secrets.IMAP_PASSWORD }}
|
||||
IMAP_MAILBOXES: ${{ secrets.IMAP_MAILBOXES }}
|
||||
IMAP_MAILBOXES: ${{ vars.IMAP_MAILBOXES }}
|
||||
|
||||
# Airtable
|
||||
AIRTABLE_TEST_BASE_ID: ${{ secrets.AIRTABLE_TEST_BASE_ID }}
|
||||
AIRTABLE_TEST_TABLE_ID: ${{ secrets.AIRTABLE_TEST_TABLE_ID }}
|
||||
AIRTABLE_TEST_TABLE_NAME: ${{ secrets.AIRTABLE_TEST_TABLE_NAME }}
|
||||
AIRTABLE_TEST_BASE_ID: ${{ vars.AIRTABLE_TEST_BASE_ID }}
|
||||
AIRTABLE_TEST_TABLE_ID: ${{ vars.AIRTABLE_TEST_TABLE_ID }}
|
||||
AIRTABLE_TEST_TABLE_NAME: ${{ vars.AIRTABLE_TEST_TABLE_NAME }}
|
||||
AIRTABLE_ACCESS_TOKEN: ${{ secrets.AIRTABLE_ACCESS_TOKEN }}
|
||||
|
||||
# Sharepoint
|
||||
SHAREPOINT_CLIENT_ID: ${{ secrets.SHAREPOINT_CLIENT_ID }}
|
||||
SHAREPOINT_CLIENT_ID: ${{ vars.SHAREPOINT_CLIENT_ID }}
|
||||
SHAREPOINT_CLIENT_SECRET: ${{ secrets.SHAREPOINT_CLIENT_SECRET }}
|
||||
SHAREPOINT_CLIENT_DIRECTORY_ID: ${{ secrets.SHAREPOINT_CLIENT_DIRECTORY_ID }}
|
||||
SHAREPOINT_SITE: ${{ secrets.SHAREPOINT_SITE }}
|
||||
SHAREPOINT_CLIENT_DIRECTORY_ID: ${{ vars.SHAREPOINT_CLIENT_DIRECTORY_ID }}
|
||||
SHAREPOINT_SITE: ${{ vars.SHAREPOINT_SITE }}
|
||||
|
||||
# Github
|
||||
ACCESS_TOKEN_GITHUB: ${{ secrets.ACCESS_TOKEN_GITHUB }}
|
||||
@@ -96,6 +106,16 @@ env:
|
||||
TEAMS_DIRECTORY_ID: ${{ secrets.TEAMS_DIRECTORY_ID }}
|
||||
TEAMS_SECRET: ${{ secrets.TEAMS_SECRET }}
|
||||
|
||||
# Bitbucket
|
||||
BITBUCKET_WORKSPACE: ${{ secrets.BITBUCKET_WORKSPACE }}
|
||||
BITBUCKET_REPOSITORIES: ${{ secrets.BITBUCKET_REPOSITORIES }}
|
||||
BITBUCKET_PROJECTS: ${{ secrets.BITBUCKET_PROJECTS }}
|
||||
BITBUCKET_EMAIL: ${{ vars.BITBUCKET_EMAIL }}
|
||||
BITBUCKET_API_TOKEN: ${{ secrets.BITBUCKET_API_TOKEN }}
|
||||
|
||||
# Fireflies
|
||||
FIREFLIES_API_KEY: ${{ secrets.FIREFLIES_API_KEY }}
|
||||
|
||||
jobs:
|
||||
connectors-check:
|
||||
# See https://runs-on.com/runners/linux/
|
||||
@@ -125,7 +145,24 @@ jobs:
|
||||
playwright install chromium
|
||||
playwright install-deps chromium
|
||||
|
||||
- name: Run Tests
|
||||
- name: Detect Connector changes
|
||||
id: changes
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
hubspot:
|
||||
- 'backend/onyx/connectors/hubspot/**'
|
||||
- 'backend/tests/daily/connectors/hubspot/**'
|
||||
salesforce:
|
||||
- 'backend/onyx/connectors/salesforce/**'
|
||||
- 'backend/tests/daily/connectors/salesforce/**'
|
||||
github:
|
||||
- 'backend/onyx/connectors/github/**'
|
||||
- 'backend/tests/daily/connectors/github/**'
|
||||
file_processing:
|
||||
- 'backend/onyx/file_processing/**'
|
||||
|
||||
- name: Run Tests (excluding HubSpot, Salesforce, and GitHub)
|
||||
shell: script -q -e -c "bash --noprofile --norc -eo pipefail {0}"
|
||||
run: |
|
||||
py.test \
|
||||
@@ -135,7 +172,49 @@ jobs:
|
||||
-o junit_family=xunit2 \
|
||||
-xv \
|
||||
--ff \
|
||||
backend/tests/daily/connectors
|
||||
backend/tests/daily/connectors \
|
||||
--ignore backend/tests/daily/connectors/hubspot \
|
||||
--ignore backend/tests/daily/connectors/salesforce \
|
||||
--ignore backend/tests/daily/connectors/github
|
||||
|
||||
- name: Run HubSpot Connector Tests
|
||||
if: ${{ github.event_name == 'schedule' || steps.changes.outputs.hubspot == 'true' || steps.changes.outputs.file_processing == 'true' }}
|
||||
shell: script -q -e -c "bash --noprofile --norc -eo pipefail {0}"
|
||||
run: |
|
||||
py.test \
|
||||
-n 8 \
|
||||
--dist loadfile \
|
||||
--durations=8 \
|
||||
-o junit_family=xunit2 \
|
||||
-xv \
|
||||
--ff \
|
||||
backend/tests/daily/connectors/hubspot
|
||||
|
||||
- name: Run Salesforce Connector Tests
|
||||
if: ${{ github.event_name == 'schedule' || steps.changes.outputs.salesforce == 'true' || steps.changes.outputs.file_processing == 'true' }}
|
||||
shell: script -q -e -c "bash --noprofile --norc -eo pipefail {0}"
|
||||
run: |
|
||||
py.test \
|
||||
-n 8 \
|
||||
--dist loadfile \
|
||||
--durations=8 \
|
||||
-o junit_family=xunit2 \
|
||||
-xv \
|
||||
--ff \
|
||||
backend/tests/daily/connectors/salesforce
|
||||
|
||||
- name: Run GitHub Connector Tests
|
||||
if: ${{ github.event_name == 'schedule' || steps.changes.outputs.github == 'true' || steps.changes.outputs.file_processing == 'true' }}
|
||||
shell: script -q -e -c "bash --noprofile --norc -eo pipefail {0}"
|
||||
run: |
|
||||
py.test \
|
||||
-n 8 \
|
||||
--dist loadfile \
|
||||
--durations=8 \
|
||||
-o junit_family=xunit2 \
|
||||
-xv \
|
||||
--ff \
|
||||
backend/tests/daily/connectors/github
|
||||
|
||||
- name: Alert on Failure
|
||||
if: failure() && github.event_name == 'schedule'
|
||||
|
||||
10
.github/workflows/pr-python-model-tests.yml
vendored
10
.github/workflows/pr-python-model-tests.yml
vendored
@@ -15,7 +15,7 @@ env:
|
||||
# Bedrock
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION_NAME: ${{ secrets.AWS_REGION_NAME }}
|
||||
AWS_REGION_NAME: ${{ vars.AWS_REGION_NAME }}
|
||||
|
||||
# API keys for testing
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
@@ -23,7 +23,7 @@ env:
|
||||
LITELLM_API_URL: ${{ secrets.LITELLM_API_URL }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
AZURE_API_KEY: ${{ secrets.AZURE_API_KEY }}
|
||||
AZURE_API_URL: ${{ secrets.AZURE_API_URL }}
|
||||
AZURE_API_URL: ${{ vars.AZURE_API_URL }}
|
||||
|
||||
jobs:
|
||||
model-check:
|
||||
@@ -77,7 +77,7 @@ jobs:
|
||||
REQUIRE_EMAIL_VERIFICATION=false \
|
||||
DISABLE_TELEMETRY=true \
|
||||
IMAGE_TAG=test \
|
||||
docker compose -f docker-compose.model-server-test.yml -p onyx-stack up -d indexing_model_server
|
||||
docker compose -f docker-compose.model-server-test.yml up -d indexing_model_server
|
||||
id: start_docker
|
||||
|
||||
- name: Wait for service to be ready
|
||||
@@ -132,7 +132,7 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.model-server-test.yml -p onyx-stack logs --no-color > $GITHUB_WORKSPACE/docker-compose.log || true
|
||||
docker compose -f docker-compose.model-server-test.yml logs --no-color > $GITHUB_WORKSPACE/docker-compose.log || true
|
||||
|
||||
- name: Upload logs
|
||||
if: always()
|
||||
@@ -145,5 +145,5 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
cd deployment/docker_compose
|
||||
docker compose -f docker-compose.model-server-test.yml -p onyx-stack down -v
|
||||
docker compose -f docker-compose.model-server-test.yml down -v
|
||||
|
||||
|
||||
2
.github/workflows/pr-python-tests.yml
vendored
2
.github/workflows/pr-python-tests.yml
vendored
@@ -31,12 +31,14 @@ jobs:
|
||||
cache-dependency-path: |
|
||||
backend/requirements/default.txt
|
||||
backend/requirements/dev.txt
|
||||
backend/requirements/model_server.txt
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install --retries 5 --timeout 30 -r backend/requirements/default.txt
|
||||
pip install --retries 5 --timeout 30 -r backend/requirements/dev.txt
|
||||
pip install --retries 5 --timeout 30 -r backend/requirements/model_server.txt
|
||||
|
||||
- name: Run Tests
|
||||
shell: script -q -e -c "bash --noprofile --norc -eo pipefail {0}"
|
||||
|
||||
3
.github/workflows/tag-nightly.yml
vendored
3
.github/workflows/tag-nightly.yml
vendored
@@ -15,6 +15,9 @@ jobs:
|
||||
# actions using GITHUB_TOKEN cannot trigger another workflow, but we do want this to trigger docker pushes
|
||||
# see https://github.com/orgs/community/discussions/27028#discussioncomment-3254367 for the workaround we
|
||||
# implement here which needs an actual user's deploy key
|
||||
|
||||
# Additional NOTE: even though this is named "rkuo", the actual key is tied to the onyx repo
|
||||
# and not rkuo's personal account. It is fine to leave this key as is!
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
@@ -34,8 +34,25 @@ repos:
|
||||
hooks:
|
||||
- id: prettier
|
||||
types_or: [html, css, javascript, ts, tsx]
|
||||
additional_dependencies:
|
||||
- prettier
|
||||
language_version: system
|
||||
|
||||
- repo: https://github.com/sirwart/ripsecrets
|
||||
rev: v0.1.11
|
||||
hooks:
|
||||
- id: ripsecrets
|
||||
args:
|
||||
- --additional-pattern
|
||||
- ^sk-[A-Za-z0-9_\-]{20,}$
|
||||
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: check-lazy-imports
|
||||
name: Check lazy imports are not directly imported
|
||||
entry: python3 backend/scripts/check_lazy_imports.py
|
||||
language: system
|
||||
files: ^backend/.*\.py$
|
||||
pass_filenames: false
|
||||
|
||||
# We would like to have a mypy pre-commit hook, but due to the fact that
|
||||
# pre-commit runs in it's own isolated environment, we would need to install
|
||||
|
||||
6
.vscode/env_template.txt
vendored
6
.vscode/env_template.txt
vendored
@@ -10,7 +10,7 @@ SKIP_WARM_UP=True
|
||||
|
||||
# Always keep these on for Dev
|
||||
# Logs all model prompts to stdout
|
||||
LOG_DANSWER_MODEL_INTERACTIONS=True
|
||||
LOG_ONYX_MODEL_INTERACTIONS=True
|
||||
# More verbose logging
|
||||
LOG_LEVEL=debug
|
||||
|
||||
@@ -39,8 +39,8 @@ FAST_GEN_AI_MODEL_VERSION=gpt-4o
|
||||
|
||||
# For Danswer Slack Bot, overrides the UI values so no need to set this up via UI every time
|
||||
# Only needed if using DanswerBot
|
||||
#DANSWER_BOT_SLACK_APP_TOKEN=<REPLACE THIS>
|
||||
#DANSWER_BOT_SLACK_BOT_TOKEN=<REPLACE THIS>
|
||||
#ONYX_BOT_SLACK_APP_TOKEN=<REPLACE THIS>
|
||||
#ONYX_BOT_SLACK_BOT_TOKEN=<REPLACE THIS>
|
||||
|
||||
|
||||
# Python stuff
|
||||
|
||||
985
.vscode/launch.template.jsonc
vendored
985
.vscode/launch.template.jsonc
vendored
File diff suppressed because it is too large
Load Diff
36
AGENTS.md
36
AGENTS.md
@@ -4,14 +4,14 @@ This file provides guidance to Codex when working with code in this repository.
|
||||
|
||||
## KEY NOTES
|
||||
|
||||
- If you run into any missing python dependency errors, try running your command with `workon onyx &&` in front
|
||||
- If you run into any missing python dependency errors, try running your command with `source backend/.venv/bin/activate` \
|
||||
to assume the python venv.
|
||||
- To make tests work, check the `.env` file at the root of the project to find an OpenAI key.
|
||||
- If using `playwright` to explore the frontend, you can usually log in with username `a@test.com` and password
|
||||
`a`. The app can be accessed at `http://localhost:3000`.
|
||||
- You should assume that all Onyx services are running. To verify, you can check the `backend/log` directory to
|
||||
make sure we see logs coming out from the relevant service.
|
||||
- To connect to the Postgres database, use: `docker exec -it onyx-stack-relational_db-1 psql -U postgres -c "<SQL>"`
|
||||
- To connect to the Postgres database, use: `docker exec -it onyx-relational_db-1 psql -U postgres -c "<SQL>"`
|
||||
- When making calls to the backend, always go through the frontend. E.g. make a call to `http://localhost:3000/api/persona` not `http://localhost:8080/api/persona`
|
||||
- Put ALL db operations under the `backend/onyx/db` / `backend/ee/onyx/db` directories. Don't run queries
|
||||
outside of those directories.
|
||||
@@ -70,7 +70,12 @@ Onyx uses Celery for asynchronous task processing with multiple specialized work
|
||||
- Single thread (monitoring doesn't need parallelism)
|
||||
- Cloud-specific monitoring tasks
|
||||
|
||||
8. **Beat Worker** (`beat`)
|
||||
8. **User File Processing Worker** (`user_file_processing`)
|
||||
- Processes user-uploaded files
|
||||
- Handles user file indexing and project synchronization
|
||||
- Configurable concurrency
|
||||
|
||||
9. **Beat Worker** (`beat`)
|
||||
- Celery's scheduler for periodic tasks
|
||||
- Uses DynamicTenantScheduler for multi-tenant support
|
||||
- Schedules tasks like:
|
||||
@@ -82,6 +87,31 @@ Onyx uses Celery for asynchronous task processing with multiple specialized work
|
||||
- Monitoring tasks (every 5 minutes)
|
||||
- Cleanup tasks (hourly)
|
||||
|
||||
#### Worker Deployment Modes
|
||||
|
||||
Onyx supports two deployment modes for background workers, controlled by the `USE_LIGHTWEIGHT_BACKGROUND_WORKER` environment variable:
|
||||
|
||||
**Lightweight Mode** (default, `USE_LIGHTWEIGHT_BACKGROUND_WORKER=true`):
|
||||
- Runs a single consolidated `background` worker that handles all background tasks:
|
||||
- Pruning operations (from `heavy` worker)
|
||||
- Knowledge graph processing (from `kg_processing` worker)
|
||||
- Monitoring tasks (from `monitoring` worker)
|
||||
- User file processing (from `user_file_processing` worker)
|
||||
- Lower resource footprint (single worker process)
|
||||
- Suitable for smaller deployments or development environments
|
||||
- Default concurrency: 6 threads
|
||||
|
||||
**Standard Mode** (`USE_LIGHTWEIGHT_BACKGROUND_WORKER=false`):
|
||||
- Runs separate specialized workers as documented above (heavy, kg_processing, monitoring, user_file_processing)
|
||||
- Better isolation and scalability
|
||||
- Can scale individual workers independently based on workload
|
||||
- Suitable for production deployments with higher load
|
||||
|
||||
The deployment mode affects:
|
||||
- **Backend**: Worker processes spawned by supervisord or dev scripts
|
||||
- **Helm**: Which Kubernetes deployments are created
|
||||
- **Dev Environment**: Which workers `dev_run_background_jobs.py` spawns
|
||||
|
||||
#### Key Features
|
||||
|
||||
- **Thread-based Workers**: All workers use thread pools (not processes) for stability
|
||||
|
||||
43
CLAUDE.md
43
CLAUDE.md
@@ -4,14 +4,14 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
## KEY NOTES
|
||||
|
||||
- If you run into any missing python dependency errors, try running your command with `workon onyx &&` in front
|
||||
- If you run into any missing python dependency errors, try running your command with `source backend/.venv/bin/activate` \
|
||||
to assume the python venv.
|
||||
- To make tests work, check the `.env` file at the root of the project to find an OpenAI key.
|
||||
- If using `playwright` to explore the frontend, you can usually log in with username `a@test.com` and password
|
||||
`a`. The app can be accessed at `http://localhost:3000`.
|
||||
- You should assume that all Onyx services are running. To verify, you can check the `backend/log` directory to
|
||||
make sure we see logs coming out from the relevant service.
|
||||
- To connect to the Postgres database, use: `docker exec -it onyx-stack-relational_db-1 psql -U postgres -c "<SQL>"`
|
||||
- To connect to the Postgres database, use: `docker exec -it onyx-relational_db-1 psql -U postgres -c "<SQL>"`
|
||||
- When making calls to the backend, always go through the frontend. E.g. make a call to `http://localhost:3000/api/persona` not `http://localhost:8080/api/persona`
|
||||
- Put ALL db operations under the `backend/onyx/db` / `backend/ee/onyx/db` directories. Don't run queries
|
||||
outside of those directories.
|
||||
@@ -70,7 +70,12 @@ Onyx uses Celery for asynchronous task processing with multiple specialized work
|
||||
- Single thread (monitoring doesn't need parallelism)
|
||||
- Cloud-specific monitoring tasks
|
||||
|
||||
8. **Beat Worker** (`beat`)
|
||||
8. **User File Processing Worker** (`user_file_processing`)
|
||||
- Processes user-uploaded files
|
||||
- Handles user file indexing and project synchronization
|
||||
- Configurable concurrency
|
||||
|
||||
9. **Beat Worker** (`beat`)
|
||||
- Celery's scheduler for periodic tasks
|
||||
- Uses DynamicTenantScheduler for multi-tenant support
|
||||
- Schedules tasks like:
|
||||
@@ -82,11 +87,39 @@ Onyx uses Celery for asynchronous task processing with multiple specialized work
|
||||
- Monitoring tasks (every 5 minutes)
|
||||
- Cleanup tasks (hourly)
|
||||
|
||||
#### Worker Deployment Modes
|
||||
|
||||
Onyx supports two deployment modes for background workers, controlled by the `USE_LIGHTWEIGHT_BACKGROUND_WORKER` environment variable:
|
||||
|
||||
**Lightweight Mode** (default, `USE_LIGHTWEIGHT_BACKGROUND_WORKER=true`):
|
||||
- Runs a single consolidated `background` worker that handles all background tasks:
|
||||
- Light worker tasks (Vespa operations, permissions sync, deletion)
|
||||
- Document processing (indexing pipeline)
|
||||
- Document fetching (connector data retrieval)
|
||||
- Pruning operations (from `heavy` worker)
|
||||
- Knowledge graph processing (from `kg_processing` worker)
|
||||
- Monitoring tasks (from `monitoring` worker)
|
||||
- User file processing (from `user_file_processing` worker)
|
||||
- Lower resource footprint (fewer worker processes)
|
||||
- Suitable for smaller deployments or development environments
|
||||
- Default concurrency: 20 threads (increased to handle combined workload)
|
||||
|
||||
**Standard Mode** (`USE_LIGHTWEIGHT_BACKGROUND_WORKER=false`):
|
||||
- Runs separate specialized workers as documented above (light, docprocessing, docfetching, heavy, kg_processing, monitoring, user_file_processing)
|
||||
- Better isolation and scalability
|
||||
- Can scale individual workers independently based on workload
|
||||
- Suitable for production deployments with higher load
|
||||
|
||||
The deployment mode affects:
|
||||
- **Backend**: Worker processes spawned by supervisord or dev scripts
|
||||
- **Helm**: Which Kubernetes deployments are created
|
||||
- **Dev Environment**: Which workers `dev_run_background_jobs.py` spawns
|
||||
|
||||
#### Key Features
|
||||
|
||||
- **Thread-based Workers**: All workers use thread pools (not processes) for stability
|
||||
- **Tenant Awareness**: Multi-tenant support with per-tenant task isolation. There is a
|
||||
middleware layer that automatically finds the appropriate tenant ID when sending tasks
|
||||
- **Tenant Awareness**: Multi-tenant support with per-tenant task isolation. There is a
|
||||
middleware layer that automatically finds the appropriate tenant ID when sending tasks
|
||||
via Celery Beat.
|
||||
- **Task Prioritization**: High, Medium, Low priority queues
|
||||
- **Monitoring**: Built-in heartbeat and liveness checking
|
||||
|
||||
@@ -13,8 +13,7 @@ As an open source project in a rapidly changing space, we welcome all contributi
|
||||
The [GitHub Issues](https://github.com/onyx-dot-app/onyx/issues) page is a great place to start for contribution ideas.
|
||||
|
||||
To ensure that your contribution is aligned with the project's direction, please reach out to any maintainer on the Onyx team
|
||||
via [Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-34lu4m7xg-TsKGO6h8PDvR5W27zTdyhA) /
|
||||
[Discord](https://discord.gg/TDJ59cGV2X) or [email](mailto:founders@onyx.app).
|
||||
via [Discord](https://discord.gg/4NA5SbzrWb) or [email](mailto:hello@onyx.app).
|
||||
|
||||
Issues that have been explicitly approved by the maintainers (aligned with the direction of the project)
|
||||
will be marked with the `approved by maintainers` label.
|
||||
@@ -28,8 +27,7 @@ Your input is vital to making sure that Onyx moves in the right direction.
|
||||
Before starting on implementation, please raise a GitHub issue.
|
||||
|
||||
Also, always feel free to message the founders (Chris Weaver / Yuhong Sun) on
|
||||
[Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-34lu4m7xg-TsKGO6h8PDvR5W27zTdyhA) /
|
||||
[Discord](https://discord.gg/TDJ59cGV2X) directly about anything at all.
|
||||
[Discord](https://discord.gg/4NA5SbzrWb) directly about anything at all.
|
||||
|
||||
### Contributing Code
|
||||
|
||||
@@ -46,9 +44,7 @@ Our goal is to make contributing as easy as possible. If you run into any issues
|
||||
That way we can help future contributors and users can avoid the same issue.
|
||||
|
||||
We also have support channels and generally interesting discussions on our
|
||||
[Slack](https://join.slack.com/t/onyx-dot-app/shared_invite/zt-2twesxdr6-5iQitKZQpgq~hYIZ~dv3KA)
|
||||
and
|
||||
[Discord](https://discord.gg/TDJ59cGV2X).
|
||||
[Discord](https://discord.gg/4NA5SbzrWb).
|
||||
|
||||
We would love to see you there!
|
||||
|
||||
@@ -84,10 +80,6 @@ python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
> This virtual environment MUST NOT be set up WITHIN the onyx directory if you plan on using mypy within certain IDEs.
|
||||
> For simplicity, we recommend setting up the virtual environment outside of the onyx directory.
|
||||
|
||||
_For Windows, activate the virtual environment using Command Prompt:_
|
||||
|
||||
```bash
|
||||
@@ -109,6 +101,11 @@ pip install -r backend/requirements/ee.txt
|
||||
pip install -r backend/requirements/model_server.txt
|
||||
```
|
||||
|
||||
Fix vscode/cursor auto-imports:
|
||||
```bash
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Install Playwright for Python (headless browser required by the Web Connector)
|
||||
|
||||
In the activated Python virtualenv, install Playwright for Python by running:
|
||||
@@ -121,8 +118,15 @@ You may have to deactivate and reactivate your virtualenv for `playwright` to ap
|
||||
|
||||
#### Frontend: Node dependencies
|
||||
|
||||
Install [Node.js and npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) for the frontend.
|
||||
Once the above is done, navigate to `onyx/web` run:
|
||||
Onyx uses Node v22.20.0. We highly recommend you use [Node Version Manager (nvm)](https://github.com/nvm-sh/nvm)
|
||||
to manage your Node installations. Once installed, you can run
|
||||
|
||||
```bash
|
||||
nvm install 22 && nvm use 22`
|
||||
node -v # verify your active version
|
||||
```
|
||||
|
||||
Navigate to `onyx/web` and run:
|
||||
|
||||
```bash
|
||||
npm i
|
||||
@@ -133,8 +137,6 @@ npm i
|
||||
### Backend
|
||||
|
||||
For the backend, you'll need to setup pre-commit hooks (black / reorder-python-imports).
|
||||
First, install pre-commit (if you don't have it already) following the instructions
|
||||
[here](https://pre-commit.com/#installation).
|
||||
|
||||
With the virtual environment active, install the pre-commit library with:
|
||||
|
||||
@@ -154,15 +156,17 @@ To run the mypy checks manually, run `python -m mypy .` from the `onyx/backend`
|
||||
|
||||
### Web
|
||||
|
||||
We use `prettier` for formatting. The desired version (2.8.8) will be installed via a `npm i` from the `onyx/web` directory.
|
||||
We use `prettier` for formatting. The desired version will be installed via a `npm i` from the `onyx/web` directory.
|
||||
To run the formatter, use `npx prettier --write .` from the `onyx/web` directory.
|
||||
Please double check that prettier passes before creating a pull request.
|
||||
|
||||
Pre-commit will also run prettier automatically on files you've recently touched. If re-formatted, your commit will fail.
|
||||
Re-stage your changes and commit again.
|
||||
|
||||
# Running the application for development
|
||||
|
||||
## Developing using VSCode Debugger (recommended)
|
||||
|
||||
We highly recommend using VSCode debugger for development.
|
||||
**We highly recommend using VSCode debugger for development.**
|
||||
See [CONTRIBUTING_VSCODE.md](./CONTRIBUTING_VSCODE.md) for more details.
|
||||
|
||||
Otherwise, you can follow the instructions below to run the application for development.
|
||||
@@ -175,7 +179,7 @@ You will need Docker installed to run these containers.
|
||||
First navigate to `onyx/deployment/docker_compose`, then start up Postgres/Vespa/Redis/MinIO with:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack up -d index relational_db cache minio
|
||||
docker compose up -d index relational_db cache minio
|
||||
```
|
||||
|
||||
(index refers to Vespa, relational_db refers to Postgres, and cache refers to Redis)
|
||||
@@ -257,7 +261,7 @@ You can run the full Onyx application stack from pre-built images including all
|
||||
Navigate to `onyx/deployment/docker_compose` and run:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack up -d
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
After Docker pulls and starts these containers, navigate to `http://localhost:3000` to use Onyx.
|
||||
@@ -265,7 +269,7 @@ After Docker pulls and starts these containers, navigate to `http://localhost:30
|
||||
If you want to make changes to Onyx and run those changes in Docker, you can also build a local version of the Onyx container images that incorporates your changes like so:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.dev.yml -p onyx-stack up -d --build
|
||||
docker compose up -d --build
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -21,6 +21,9 @@ Before starting, make sure the Docker Daemon is running.
|
||||
5. You can set breakpoints by clicking to the left of line numbers to help debug while the app is running
|
||||
6. Use the debug toolbar to step through code, inspect variables, etc.
|
||||
|
||||
Note: Clear and Restart External Volumes and Containers will reset your postgres and Vespa (relational-db and index).
|
||||
Only run this if you are okay with wiping your data.
|
||||
|
||||
## Features
|
||||
|
||||
- Hot reload is enabled for the web server and API servers
|
||||
|
||||
135
README.md
135
README.md
@@ -1,116 +1,103 @@
|
||||
<!-- ONYX_METADATA={"link": "https://github.com/onyx-dot-app/onyx/blob/main/README.md"} -->
|
||||
|
||||
<a name="readme-top"></a>
|
||||
|
||||
<h2 align="center">
|
||||
<a href="https://www.onyx.app/"> <img width="50%" src="https://github.com/onyx-dot-app/onyx/blob/logo/OnyxLogoCropped.jpg?raw=true)" /></a>
|
||||
<a href="https://www.onyx.app/"> <img width="50%" src="https://github.com/onyx-dot-app/onyx/blob/logo/OnyxLogoCropped.jpg?raw=true)" /></a>
|
||||
</h2>
|
||||
|
||||
<p align="center">
|
||||
<p align="center">Open Source Gen-AI + Enterprise Search.</p>
|
||||
<p align="center">Open Source AI Platform</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.onyx.app/" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docs-view-blue" alt="Documentation">
|
||||
</a>
|
||||
<a href="https://join.slack.com/t/onyx-dot-app/shared_invite/zt-34lu4m7xg-TsKGO6h8PDvR5W27zTdyhA" target="_blank">
|
||||
<img src="https://img.shields.io/badge/slack-join-blue.svg?logo=slack" alt="Slack">
|
||||
</a>
|
||||
<a href="https://discord.gg/TDJ59cGV2X" target="_blank">
|
||||
<img src="https://img.shields.io/badge/discord-join-blue.svg?logo=discord&logoColor=white" alt="Discord">
|
||||
</a>
|
||||
<a href="https://github.com/onyx-dot-app/onyx/blob/main/README.md" target="_blank">
|
||||
<img src="https://img.shields.io/static/v1?label=license&message=MIT&color=blue" alt="License">
|
||||
</a>
|
||||
<a href="https://discord.gg/TDJ59cGV2X" target="_blank">
|
||||
<img src="https://img.shields.io/badge/discord-join-blue.svg?logo=discord&logoColor=white" alt="Discord">
|
||||
</a>
|
||||
<a href="https://docs.onyx.app/" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docs-view-blue" alt="Documentation">
|
||||
</a>
|
||||
<a href="https://docs.onyx.app/" target="_blank">
|
||||
<img src="https://img.shields.io/website?url=https://www.onyx.app&up_message=visit&up_color=blue" alt="Documentation">
|
||||
</a>
|
||||
<a href="https://github.com/onyx-dot-app/onyx/blob/main/LICENSE" target="_blank">
|
||||
<img src="https://img.shields.io/static/v1?label=license&message=MIT&color=blue" alt="License">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<strong>[Onyx](https://www.onyx.app/)</strong> (formerly Danswer) is the AI platform connected to your company's docs, apps, and people.
|
||||
Onyx provides a feature rich Chat interface and plugs into any LLM of your choice.
|
||||
Keep knowledge and access controls sync-ed across over 40 connectors like Google Drive, Slack, Confluence, Salesforce, etc.
|
||||
Create custom AI agents with unique prompts, knowledge, and actions that the agents can take.
|
||||
Onyx can be deployed securely anywhere and for any scale - on a laptop, on-premise, or to cloud.
|
||||
|
||||
|
||||
<h3>Feature Highlights</h3>
|
||||
**[Onyx](https://www.onyx.app/)** is a feature-rich, self-hostable Chat UI that works with any LLM. It is easy to deploy and can run in a completely airgapped environment.
|
||||
|
||||
**Deep research over your team's knowledge:**
|
||||
Onyx comes loaded with advanced features like Agents, Web Search, RAG, MCP, Deep Research, Connectors to 40+ knowledge sources, and more.
|
||||
|
||||
https://private-user-images.githubusercontent.com/32520769/414509312-48392e83-95d0-4fb5-8650-a396e05e0a32.mp4?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3Mzk5Mjg2MzYsIm5iZiI6MTczOTkyODMzNiwicGF0aCI6Ii8zMjUyMDc2OS80MTQ1MDkzMTItNDgzOTJlODMtOTVkMC00ZmI1LTg2NTAtYTM5NmUwNWUwYTMyLm1wND9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNTAyMTklMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjUwMjE5VDAxMjUzNlomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWFhMzk5Njg2Y2Y5YjFmNDNiYTQ2YzM5ZTg5YWJiYTU2NWMyY2YwNmUyODE2NWUxMDRiMWQxZWJmODI4YTA0MTUmWC1BbXotU2lnbmVkSGVhZGVycz1ob3N0In0.a9D8A0sgKE9AoaoE-mfFbJ6_OKYeqaf7TZ4Han2JfW8
|
||||
> [!TIP]
|
||||
> Run Onyx with one command (or see deployment section below):
|
||||
> ```
|
||||
> curl -fsSL https://raw.githubusercontent.com/onyx-dot-app/onyx/main/deployment/docker_compose/install.sh > install.sh && chmod +x install.sh && ./install.sh
|
||||
> ```
|
||||
|
||||
|
||||
**Use Onyx as a secure AI Chat with any LLM:**
|
||||
****
|
||||
|
||||

|
||||
|
||||
|
||||
**Easily set up connectors to your apps:**
|
||||
|
||||

|
||||
## ⭐ Features
|
||||
- **🤖 Custom Agents:** Build AI Agents with unique instructions, knowledge and actions.
|
||||
- **🌍 Web Search:** Browse the web with Google PSE, Exa, and Serper as well as an in-house scraper or Firecrawl.
|
||||
- **🔍 RAG:** Best in class hybrid-search + knowledge graph for uploaded files and ingested documents from connectors.
|
||||
- **🔄 Connectors:** Pull knowledge, metadata, and access information from over 40 applications.
|
||||
- **🔬 Deep Research:** Get in depth answers with an agentic multi-step search.
|
||||
- **▶️ Actions & MCP:** Give AI Agents the ability to interact with external systems.
|
||||
- **💻 Code Interpreter:** Execute code to analyze data, render graphs and create files.
|
||||
- **🎨 Image Generation:** Generate images based on user prompts.
|
||||
- **👥 Collaboration:** Chat sharing, feedback gathering, user management, usage analytics, and more.
|
||||
|
||||
Onyx works with all LLMs (like OpenAI, Anthropic, Gemini, etc.) and self-hosted LLMs (like Ollama, vLLM, etc.)
|
||||
|
||||
To learn more about the features, check out our [documentation](https://docs.onyx.app/welcome)!
|
||||
|
||||
|
||||
**Access Onyx where your team already works:**
|
||||
|
||||

|
||||
## 🚀 Deployment
|
||||
Onyx supports deployments in Docker, Kubernetes, Terraform, along with guides for major cloud providers.
|
||||
|
||||
See guides below:
|
||||
- [Docker](https://docs.onyx.app/deployment/local/docker) or [Quickstart](https://docs.onyx.app/deployment/getting_started/quickstart) (best for most users)
|
||||
- [Kubernetes](https://docs.onyx.app/deployment/local/kubernetes) (best for large teams)
|
||||
- [Terraform](https://docs.onyx.app/deployment/local/terraform) (best for teams already using Terraform)
|
||||
- Cloud specific guides (best if specifically using [AWS EKS](https://docs.onyx.app/deployment/cloud/aws/eks), [Azure VMs](https://docs.onyx.app/deployment/cloud/azure), etc.)
|
||||
|
||||
> [!TIP]
|
||||
> **To try Onyx for free without deploying, check out [Onyx Cloud](https://cloud.onyx.app/signup)**.
|
||||
|
||||
|
||||
## Deployment
|
||||
**To try it out for free and get started in seconds, check out [Onyx Cloud](https://cloud.onyx.app/signup)**.
|
||||
|
||||
Onyx can also be run locally (even on a laptop) or deployed on a virtual machine with a single
|
||||
`docker compose` command. Checkout our [docs](https://docs.onyx.app/deployment/getting_started/quickstart) to learn more.
|
||||
## 🔍 Other Notable Benefits
|
||||
Onyx is built for teams of all sizes, from individual users to the largest global enterprises.
|
||||
|
||||
We also have built-in support for high-availability/scalable deployment on Kubernetes.
|
||||
References [here](https://github.com/onyx-dot-app/onyx/tree/main/deployment).
|
||||
- **Enterprise Search**: far more than simple RAG, Onyx has custom indexing and retrieval that remains performant and accurate for scales of up to tens of millions of documents.
|
||||
- **Security**: SSO (OIDC/SAML/OAuth2), RBAC, encryption of credentials, etc.
|
||||
- **Management UI**: different user roles such as basic, curator, and admin.
|
||||
- **Document Permissioning**: mirrors user access from external apps for RAG use cases.
|
||||
|
||||
|
||||
## 🔍 Other Notable Benefits of Onyx
|
||||
- Custom deep learning models for indexing and inference time, only through Onyx + learning from user feedback.
|
||||
- Flexible security features like SSO (OIDC/SAML/OAuth2), RBAC, encryption of credentials, etc.
|
||||
- Knowledge curation features like document-sets, query history, usage analytics, etc.
|
||||
- Scalable deployment options tested up to many tens of thousands users and hundreds of millions of documents.
|
||||
|
||||
|
||||
## 🚧 Roadmap
|
||||
- New methods in information retrieval (StructRAG, LightGraphRAG, etc.)
|
||||
- Personalized Search
|
||||
- Organizational understanding and ability to locate and suggest experts from your team.
|
||||
- Code Search
|
||||
- SQL and Structured Query Language
|
||||
To see ongoing and upcoming projects, check out our [roadmap](https://github.com/orgs/onyx-dot-app/projects/2)!
|
||||
|
||||
|
||||
## 🔌 Connectors
|
||||
Keep knowledge and access up to sync across 40+ connectors:
|
||||
|
||||
- Google Drive
|
||||
- Confluence
|
||||
- Slack
|
||||
- Gmail
|
||||
- Salesforce
|
||||
- Microsoft Sharepoint
|
||||
- Github
|
||||
- Jira
|
||||
- Zendesk
|
||||
- Gong
|
||||
- Microsoft Teams
|
||||
- Dropbox
|
||||
- Local Files
|
||||
- Websites
|
||||
- And more ...
|
||||
|
||||
See the full list [here](https://docs.onyx.app/admin/connectors/overview).
|
||||
|
||||
|
||||
## 📚 Licensing
|
||||
There are two editions of Onyx:
|
||||
|
||||
- Onyx Community Edition (CE) is available freely under the MIT Expat license. Simply follow the Deployment guide above.
|
||||
- Onyx Community Edition (CE) is available freely under the MIT license.
|
||||
- Onyx Enterprise Edition (EE) includes extra features that are primarily useful for larger organizations.
|
||||
For feature details, check out [our website](https://www.onyx.app/pricing).
|
||||
|
||||
To try the Onyx Enterprise Edition:
|
||||
1. Checkout [Onyx Cloud](https://cloud.onyx.app/signup).
|
||||
2. For self-hosting the Enterprise Edition, contact us at [founders@onyx.app](mailto:founders@onyx.app) or book a call with us on our [Cal](https://cal.com/team/onyx/founders).
|
||||
|
||||
|
||||
## 👪 Community
|
||||
Join our open source community on **[Discord](https://discord.gg/TDJ59cGV2X)**!
|
||||
|
||||
|
||||
|
||||
## 💡 Contributing
|
||||
Looking to contribute? Please check out the [Contribution Guide](CONTRIBUTING.md) for more details.
|
||||
Looking to contribute? Please check out the [Contribution Guide](CONTRIBUTING.md) for more details.
|
||||
|
||||
@@ -15,8 +15,8 @@ ENV ONYX_VERSION=${ONYX_VERSION} \
|
||||
DO_NOT_TRACK="true" \
|
||||
PLAYWRIGHT_BROWSERS_PATH="/app/.cache/ms-playwright"
|
||||
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
|
||||
|
||||
RUN echo "ONYX_VERSION: ${ONYX_VERSION}"
|
||||
# Install system dependencies
|
||||
# cmake needed for psycopg (postgres)
|
||||
# libpq-dev needed for psycopg (postgres)
|
||||
@@ -48,22 +48,19 @@ RUN apt-get update && \
|
||||
# Remove py which is pulled in by retry, py is not needed and is a CVE
|
||||
COPY ./requirements/default.txt /tmp/requirements.txt
|
||||
COPY ./requirements/ee.txt /tmp/ee-requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade \
|
||||
--retries 5 \
|
||||
--timeout 30 \
|
||||
RUN uv pip install --system --no-cache-dir --upgrade \
|
||||
-r /tmp/requirements.txt \
|
||||
-r /tmp/ee-requirements.txt && \
|
||||
pip uninstall -y py && \
|
||||
playwright install chromium && \
|
||||
playwright install-deps chromium && \
|
||||
ln -s /usr/local/bin/supervisord /usr/bin/supervisord
|
||||
|
||||
# Cleanup for CVEs and size reduction
|
||||
# https://github.com/tornadoweb/tornado/issues/3107
|
||||
# xserver-common and xvfb included by playwright installation but not needed after
|
||||
# perl-base is part of the base Python Debian image but not needed for Onyx functionality
|
||||
# perl-base could only be removed with --allow-remove-essential
|
||||
RUN apt-get update && \
|
||||
ln -s /usr/local/bin/supervisord /usr/bin/supervisord && \
|
||||
# Cleanup for CVEs and size reduction
|
||||
# https://github.com/tornadoweb/tornado/issues/3107
|
||||
# xserver-common and xvfb included by playwright installation but not needed after
|
||||
# perl-base is part of the base Python Debian image but not needed for Onyx functionality
|
||||
# perl-base could only be removed with --allow-remove-essential
|
||||
apt-get update && \
|
||||
apt-get remove -y --allow-remove-essential \
|
||||
perl-base \
|
||||
xserver-common \
|
||||
@@ -73,15 +70,16 @@ RUN apt-get update && \
|
||||
libxmlsec1-dev \
|
||||
pkg-config \
|
||||
gcc && \
|
||||
apt-get install -y libxmlsec1-openssl && \
|
||||
# Install here to avoid some packages being cleaned up above
|
||||
apt-get install -y \
|
||||
libxmlsec1-openssl \
|
||||
# Install postgresql-client for easy manual tests
|
||||
postgresql-client && \
|
||||
apt-get autoremove -y && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rm -rf ~/.cache/uv /tmp/*.txt && \
|
||||
rm -f /usr/local/lib/python3.11/site-packages/tornado/test/test.key
|
||||
|
||||
# Install postgresql-client for easy manual tests
|
||||
# Install it here to avoid it being cleaned up above
|
||||
RUN apt-get update && apt-get install -y postgresql-client
|
||||
|
||||
# Pre-downloading models for setups with limited egress
|
||||
RUN python -c "from tokenizers import Tokenizer; \
|
||||
Tokenizer.from_pretrained('nomic-ai/nomic-embed-text-v1')"
|
||||
@@ -95,36 +93,37 @@ nltk.download('punkt_tab', quiet=True);"
|
||||
# Set up application files
|
||||
WORKDIR /app
|
||||
|
||||
# Enterprise Version Files
|
||||
COPY ./ee /app/ee
|
||||
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
# Set up application files
|
||||
COPY ./onyx /app/onyx
|
||||
COPY ./shared_configs /app/shared_configs
|
||||
COPY ./alembic /app/alembic
|
||||
COPY ./alembic_tenants /app/alembic_tenants
|
||||
COPY ./alembic.ini /app/alembic.ini
|
||||
COPY supervisord.conf /usr/etc/supervisord.conf
|
||||
COPY ./static /app/static
|
||||
|
||||
# Escape hatch scripts
|
||||
COPY ./scripts/debugging /app/scripts/debugging
|
||||
COPY ./scripts/force_delete_connector_by_id.py /app/scripts/force_delete_connector_by_id.py
|
||||
|
||||
# Put logo in assets
|
||||
COPY ./assets /app/assets
|
||||
|
||||
ENV PYTHONPATH=/app
|
||||
|
||||
# Create non-root user for security best practices
|
||||
RUN groupadd -g 1001 onyx && \
|
||||
useradd -u 1001 -g onyx -m -s /bin/bash onyx && \
|
||||
chown -R onyx:onyx /app && \
|
||||
mkdir -p /var/log/onyx && \
|
||||
chmod 755 /var/log/onyx && \
|
||||
chown onyx:onyx /var/log/onyx
|
||||
|
||||
# Enterprise Version Files
|
||||
COPY --chown=onyx:onyx ./ee /app/ee
|
||||
COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
||||
|
||||
# Set up application files
|
||||
COPY --chown=onyx:onyx ./onyx /app/onyx
|
||||
COPY --chown=onyx:onyx ./shared_configs /app/shared_configs
|
||||
COPY --chown=onyx:onyx ./alembic /app/alembic
|
||||
COPY --chown=onyx:onyx ./alembic_tenants /app/alembic_tenants
|
||||
COPY --chown=onyx:onyx ./alembic.ini /app/alembic.ini
|
||||
COPY supervisord.conf /usr/etc/supervisord.conf
|
||||
COPY --chown=onyx:onyx ./static /app/static
|
||||
|
||||
# Escape hatch scripts
|
||||
COPY --chown=onyx:onyx ./scripts/debugging /app/scripts/debugging
|
||||
COPY --chown=onyx:onyx ./scripts/force_delete_connector_by_id.py /app/scripts/force_delete_connector_by_id.py
|
||||
COPY --chown=onyx:onyx ./scripts/supervisord_entrypoint.sh /app/scripts/supervisord_entrypoint.sh
|
||||
RUN chmod +x /app/scripts/supervisord_entrypoint.sh
|
||||
|
||||
# Put logo in assets
|
||||
COPY --chown=onyx:onyx ./assets /app/assets
|
||||
|
||||
ENV PYTHONPATH=/app
|
||||
|
||||
# Default command which does nothing
|
||||
# This container is used by api server and background which specify their own CMD
|
||||
CMD ["tail", "-f", "/dev/null"]
|
||||
|
||||
@@ -12,7 +12,7 @@ ENV ONYX_VERSION=${ONYX_VERSION} \
|
||||
DANSWER_RUNNING_IN_DOCKER="true" \
|
||||
HF_HOME=/app/.cache/huggingface
|
||||
|
||||
RUN echo "ONYX_VERSION: ${ONYX_VERSION}"
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
|
||||
|
||||
# Create non-root user for security best practices
|
||||
RUN mkdir -p /app && \
|
||||
@@ -34,19 +34,17 @@ RUN set -eux; \
|
||||
pkg-config \
|
||||
curl \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
# Install latest stable Rust (supports Cargo.lock v4)
|
||||
&& curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain stable \
|
||||
&& rustc --version && cargo --version
|
||||
&& rustc --version && cargo --version \
|
||||
&& apt-get remove -y --allow-remove-essential perl-base \
|
||||
&& apt-get autoremove -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY ./requirements/model_server.txt /tmp/requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade \
|
||||
--retries 5 \
|
||||
--timeout 30 \
|
||||
-r /tmp/requirements.txt
|
||||
|
||||
RUN apt-get remove -y --allow-remove-essential perl-base && \
|
||||
apt-get autoremove -y
|
||||
RUN uv pip install --system --no-cache-dir --upgrade \
|
||||
-r /tmp/requirements.txt && \
|
||||
rm -rf ~/.cache/uv /tmp/*.txt
|
||||
|
||||
# Pre-downloading models for setups with limited egress
|
||||
# Download tokenizers, distilbert for the Onyx model
|
||||
@@ -61,12 +59,11 @@ snapshot_download(repo_id='onyx-dot-app/information-content-model'); \
|
||||
snapshot_download('nomic-ai/nomic-embed-text-v1'); \
|
||||
snapshot_download('mixedbread-ai/mxbai-rerank-xsmall-v1'); \
|
||||
from sentence_transformers import SentenceTransformer; \
|
||||
SentenceTransformer(model_name_or_path='nomic-ai/nomic-embed-text-v1', trust_remote_code=True);"
|
||||
|
||||
# In case the user has volumes mounted to /app/.cache/huggingface that they've downloaded while
|
||||
# running Onyx, move the current contents of the cache folder to a temporary location to ensure
|
||||
# it's preserved in order to combine with the user's cache contents
|
||||
RUN mv /app/.cache/huggingface /app/.cache/temp_huggingface && \
|
||||
SentenceTransformer(model_name_or_path='nomic-ai/nomic-embed-text-v1', trust_remote_code=True);" && \
|
||||
# In case the user has volumes mounted to /app/.cache/huggingface that they've downloaded while
|
||||
# running Onyx, move the current contents of the cache folder to a temporary location to ensure
|
||||
# it's preserved in order to combine with the user's cache contents
|
||||
mv /app/.cache/huggingface /app/.cache/temp_huggingface && \
|
||||
chown -R onyx:onyx /app
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -0,0 +1,153 @@
|
||||
"""add permission sync attempt tables
|
||||
|
||||
Revision ID: 03d710ccf29c
|
||||
Revises: 96a5702df6aa
|
||||
Create Date: 2025-09-11 13:30:00.000000
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "03d710ccf29c" # Generate a new unique ID
|
||||
down_revision = "96a5702df6aa"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Create the permission sync status enum
|
||||
permission_sync_status_enum = sa.Enum(
|
||||
"not_started",
|
||||
"in_progress",
|
||||
"success",
|
||||
"canceled",
|
||||
"failed",
|
||||
"completed_with_errors",
|
||||
name="permissionsyncstatus",
|
||||
native_enum=False,
|
||||
)
|
||||
|
||||
# Create doc_permission_sync_attempt table
|
||||
op.create_table(
|
||||
"doc_permission_sync_attempt",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("connector_credential_pair_id", sa.Integer(), nullable=False),
|
||||
sa.Column("status", permission_sync_status_enum, nullable=False),
|
||||
sa.Column("total_docs_synced", sa.Integer(), nullable=True),
|
||||
sa.Column("docs_with_permission_errors", sa.Integer(), nullable=True),
|
||||
sa.Column("error_message", sa.Text(), nullable=True),
|
||||
sa.Column(
|
||||
"time_created",
|
||||
sa.DateTime(timezone=True),
|
||||
server_default=sa.text("now()"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("time_started", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column("time_finished", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.ForeignKeyConstraint(
|
||||
["connector_credential_pair_id"],
|
||||
["connector_credential_pair.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
# Create indexes for doc_permission_sync_attempt
|
||||
op.create_index(
|
||||
"ix_doc_permission_sync_attempt_time_created",
|
||||
"doc_permission_sync_attempt",
|
||||
["time_created"],
|
||||
unique=False,
|
||||
)
|
||||
op.create_index(
|
||||
"ix_permission_sync_attempt_latest_for_cc_pair",
|
||||
"doc_permission_sync_attempt",
|
||||
["connector_credential_pair_id", "time_created"],
|
||||
unique=False,
|
||||
)
|
||||
op.create_index(
|
||||
"ix_permission_sync_attempt_status_time",
|
||||
"doc_permission_sync_attempt",
|
||||
["status", sa.text("time_finished DESC")],
|
||||
unique=False,
|
||||
)
|
||||
|
||||
# Create external_group_permission_sync_attempt table
|
||||
# connector_credential_pair_id is nullable - group syncs can be global (e.g., Confluence)
|
||||
op.create_table(
|
||||
"external_group_permission_sync_attempt",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("connector_credential_pair_id", sa.Integer(), nullable=True),
|
||||
sa.Column("status", permission_sync_status_enum, nullable=False),
|
||||
sa.Column("total_users_processed", sa.Integer(), nullable=True),
|
||||
sa.Column("total_groups_processed", sa.Integer(), nullable=True),
|
||||
sa.Column("total_group_memberships_synced", sa.Integer(), nullable=True),
|
||||
sa.Column("error_message", sa.Text(), nullable=True),
|
||||
sa.Column(
|
||||
"time_created",
|
||||
sa.DateTime(timezone=True),
|
||||
server_default=sa.text("now()"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("time_started", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column("time_finished", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.ForeignKeyConstraint(
|
||||
["connector_credential_pair_id"],
|
||||
["connector_credential_pair.id"],
|
||||
),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
# Create indexes for external_group_permission_sync_attempt
|
||||
op.create_index(
|
||||
"ix_external_group_permission_sync_attempt_time_created",
|
||||
"external_group_permission_sync_attempt",
|
||||
["time_created"],
|
||||
unique=False,
|
||||
)
|
||||
op.create_index(
|
||||
"ix_group_sync_attempt_cc_pair_time",
|
||||
"external_group_permission_sync_attempt",
|
||||
["connector_credential_pair_id", "time_created"],
|
||||
unique=False,
|
||||
)
|
||||
op.create_index(
|
||||
"ix_group_sync_attempt_status_time",
|
||||
"external_group_permission_sync_attempt",
|
||||
["status", sa.text("time_finished DESC")],
|
||||
unique=False,
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop indexes
|
||||
op.drop_index(
|
||||
"ix_group_sync_attempt_status_time",
|
||||
table_name="external_group_permission_sync_attempt",
|
||||
)
|
||||
op.drop_index(
|
||||
"ix_group_sync_attempt_cc_pair_time",
|
||||
table_name="external_group_permission_sync_attempt",
|
||||
)
|
||||
op.drop_index(
|
||||
"ix_external_group_permission_sync_attempt_time_created",
|
||||
table_name="external_group_permission_sync_attempt",
|
||||
)
|
||||
op.drop_index(
|
||||
"ix_permission_sync_attempt_status_time",
|
||||
table_name="doc_permission_sync_attempt",
|
||||
)
|
||||
op.drop_index(
|
||||
"ix_permission_sync_attempt_latest_for_cc_pair",
|
||||
table_name="doc_permission_sync_attempt",
|
||||
)
|
||||
op.drop_index(
|
||||
"ix_doc_permission_sync_attempt_time_created",
|
||||
table_name="doc_permission_sync_attempt",
|
||||
)
|
||||
|
||||
# Drop tables
|
||||
op.drop_table("external_group_permission_sync_attempt")
|
||||
op.drop_table("doc_permission_sync_attempt")
|
||||
@@ -0,0 +1,389 @@
|
||||
"""Migration 2: User file data preparation and backfill
|
||||
|
||||
Revision ID: 0cd424f32b1d
|
||||
Revises: 9b66d3156fc6
|
||||
Create Date: 2025-09-22 09:44:42.727034
|
||||
|
||||
This migration populates the new columns added in migration 1.
|
||||
It prepares data for the UUID transition and relationship migration.
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import text
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("alembic.runtime.migration")
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "0cd424f32b1d"
|
||||
down_revision = "9b66d3156fc6"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Populate new columns with data."""
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
# === Step 1: Populate user_file.new_id ===
|
||||
user_file_columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
has_new_id = "new_id" in user_file_columns
|
||||
|
||||
if has_new_id:
|
||||
logger.info("Populating user_file.new_id with UUIDs...")
|
||||
|
||||
# Count rows needing UUIDs
|
||||
null_count = bind.execute(
|
||||
text("SELECT COUNT(*) FROM user_file WHERE new_id IS NULL")
|
||||
).scalar_one()
|
||||
|
||||
if null_count > 0:
|
||||
logger.info(f"Generating UUIDs for {null_count} user_file records...")
|
||||
|
||||
# Populate in batches to avoid long locks
|
||||
batch_size = 10000
|
||||
total_updated = 0
|
||||
|
||||
while True:
|
||||
result = bind.execute(
|
||||
text(
|
||||
"""
|
||||
UPDATE user_file
|
||||
SET new_id = gen_random_uuid()
|
||||
WHERE new_id IS NULL
|
||||
AND id IN (
|
||||
SELECT id FROM user_file
|
||||
WHERE new_id IS NULL
|
||||
LIMIT :batch_size
|
||||
)
|
||||
"""
|
||||
),
|
||||
{"batch_size": batch_size},
|
||||
)
|
||||
|
||||
updated = result.rowcount
|
||||
total_updated += updated
|
||||
|
||||
if updated < batch_size:
|
||||
break
|
||||
|
||||
logger.info(f" Updated {total_updated}/{null_count} records...")
|
||||
|
||||
logger.info(f"Generated UUIDs for {total_updated} user_file records")
|
||||
|
||||
# Verify all records have UUIDs
|
||||
remaining_null = bind.execute(
|
||||
text("SELECT COUNT(*) FROM user_file WHERE new_id IS NULL")
|
||||
).scalar_one()
|
||||
|
||||
if remaining_null > 0:
|
||||
raise Exception(
|
||||
f"Failed to populate all user_file.new_id values ({remaining_null} NULL)"
|
||||
)
|
||||
|
||||
# Lock down the column
|
||||
op.alter_column("user_file", "new_id", nullable=False)
|
||||
op.alter_column("user_file", "new_id", server_default=None)
|
||||
logger.info("Locked down user_file.new_id column")
|
||||
|
||||
# === Step 2: Populate persona__user_file.user_file_id_uuid ===
|
||||
persona_user_file_columns = [
|
||||
col["name"] for col in inspector.get_columns("persona__user_file")
|
||||
]
|
||||
|
||||
if has_new_id and "user_file_id_uuid" in persona_user_file_columns:
|
||||
logger.info("Populating persona__user_file.user_file_id_uuid...")
|
||||
|
||||
# Count rows needing update
|
||||
null_count = bind.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT COUNT(*) FROM persona__user_file
|
||||
WHERE user_file_id IS NOT NULL AND user_file_id_uuid IS NULL
|
||||
"""
|
||||
)
|
||||
).scalar_one()
|
||||
|
||||
if null_count > 0:
|
||||
logger.info(f"Updating {null_count} persona__user_file records...")
|
||||
|
||||
# Update in batches
|
||||
batch_size = 10000
|
||||
total_updated = 0
|
||||
|
||||
while True:
|
||||
result = bind.execute(
|
||||
text(
|
||||
"""
|
||||
UPDATE persona__user_file p
|
||||
SET user_file_id_uuid = uf.new_id
|
||||
FROM user_file uf
|
||||
WHERE p.user_file_id = uf.id
|
||||
AND p.user_file_id_uuid IS NULL
|
||||
AND p.persona_id IN (
|
||||
SELECT persona_id
|
||||
FROM persona__user_file
|
||||
WHERE user_file_id_uuid IS NULL
|
||||
LIMIT :batch_size
|
||||
)
|
||||
"""
|
||||
),
|
||||
{"batch_size": batch_size},
|
||||
)
|
||||
|
||||
updated = result.rowcount
|
||||
total_updated += updated
|
||||
|
||||
if updated < batch_size:
|
||||
break
|
||||
|
||||
logger.info(f" Updated {total_updated}/{null_count} records...")
|
||||
|
||||
logger.info(f"Updated {total_updated} persona__user_file records")
|
||||
|
||||
# Verify all records are populated
|
||||
remaining_null = bind.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT COUNT(*) FROM persona__user_file
|
||||
WHERE user_file_id IS NOT NULL AND user_file_id_uuid IS NULL
|
||||
"""
|
||||
)
|
||||
).scalar_one()
|
||||
|
||||
if remaining_null > 0:
|
||||
raise Exception(
|
||||
f"Failed to populate all persona__user_file.user_file_id_uuid values ({remaining_null} NULL)"
|
||||
)
|
||||
|
||||
op.alter_column("persona__user_file", "user_file_id_uuid", nullable=False)
|
||||
logger.info("Locked down persona__user_file.user_file_id_uuid column")
|
||||
|
||||
# === Step 3: Create user_project records from chat_folder ===
|
||||
if "chat_folder" in inspector.get_table_names():
|
||||
logger.info("Creating user_project records from chat_folder...")
|
||||
|
||||
result = bind.execute(
|
||||
text(
|
||||
"""
|
||||
INSERT INTO user_project (user_id, name)
|
||||
SELECT cf.user_id, cf.name
|
||||
FROM chat_folder cf
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM user_project up
|
||||
WHERE up.user_id = cf.user_id AND up.name = cf.name
|
||||
)
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
logger.info(f"Created {result.rowcount} user_project records from chat_folder")
|
||||
|
||||
# === Step 4: Populate chat_session.project_id ===
|
||||
chat_session_columns = [
|
||||
col["name"] for col in inspector.get_columns("chat_session")
|
||||
]
|
||||
|
||||
if "folder_id" in chat_session_columns and "project_id" in chat_session_columns:
|
||||
logger.info("Populating chat_session.project_id...")
|
||||
|
||||
# Count sessions needing update
|
||||
null_count = bind.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT COUNT(*) FROM chat_session
|
||||
WHERE project_id IS NULL AND folder_id IS NOT NULL
|
||||
"""
|
||||
)
|
||||
).scalar_one()
|
||||
|
||||
if null_count > 0:
|
||||
logger.info(f"Updating {null_count} chat_session records...")
|
||||
|
||||
result = bind.execute(
|
||||
text(
|
||||
"""
|
||||
UPDATE chat_session cs
|
||||
SET project_id = up.id
|
||||
FROM chat_folder cf
|
||||
JOIN user_project up ON up.user_id = cf.user_id AND up.name = cf.name
|
||||
WHERE cs.folder_id = cf.id AND cs.project_id IS NULL
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
logger.info(f"Updated {result.rowcount} chat_session records")
|
||||
|
||||
# Verify all records are populated
|
||||
remaining_null = bind.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT COUNT(*) FROM chat_session
|
||||
WHERE project_id IS NULL AND folder_id IS NOT NULL
|
||||
"""
|
||||
)
|
||||
).scalar_one()
|
||||
|
||||
if remaining_null > 0:
|
||||
logger.warning(
|
||||
f"Warning: {remaining_null} chat_session records could not be mapped to projects"
|
||||
)
|
||||
|
||||
# === Step 5: Update plaintext FileRecord IDs/display names to UUID scheme ===
|
||||
# Prior to UUID migration, plaintext cache files were stored with file_id like 'plain_text_<int_id>'.
|
||||
# After migration, we use 'plaintext_<uuid>' (note the name change to 'plaintext_').
|
||||
# This step remaps existing FileRecord rows to the new naming while preserving object_key/bucket.
|
||||
logger.info("Updating plaintext FileRecord ids and display names to UUID scheme...")
|
||||
|
||||
# Count legacy plaintext records that can be mapped to UUID user_file ids
|
||||
count_query = text(
|
||||
"""
|
||||
SELECT COUNT(*)
|
||||
FROM file_record fr
|
||||
JOIN user_file uf ON fr.file_id = CONCAT('plaintext_', uf.id::text)
|
||||
WHERE LOWER(fr.file_origin::text) = 'plaintext_cache'
|
||||
"""
|
||||
)
|
||||
legacy_count = bind.execute(count_query).scalar_one()
|
||||
|
||||
if legacy_count and legacy_count > 0:
|
||||
logger.info(f"Found {legacy_count} legacy plaintext file records to update")
|
||||
|
||||
# Update display_name first for readability (safe regardless of rename)
|
||||
bind.execute(
|
||||
text(
|
||||
"""
|
||||
UPDATE file_record fr
|
||||
SET display_name = CONCAT('Plaintext for user file ', uf.new_id::text)
|
||||
FROM user_file uf
|
||||
WHERE LOWER(fr.file_origin::text) = 'plaintext_cache'
|
||||
AND fr.file_id = CONCAT('plaintext_', uf.id::text)
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
# Remap file_id from 'plaintext_<int>' -> 'plaintext_<uuid>' using transitional new_id
|
||||
# Use a single UPDATE ... WHERE file_id LIKE 'plain_text_%'
|
||||
# and ensure it aligns to existing user_file ids to avoid renaming unrelated rows
|
||||
result = bind.execute(
|
||||
text(
|
||||
"""
|
||||
UPDATE file_record fr
|
||||
SET file_id = CONCAT('plaintext_', uf.new_id::text)
|
||||
FROM user_file uf
|
||||
WHERE LOWER(fr.file_origin::text) = 'plaintext_cache'
|
||||
AND fr.file_id = CONCAT('plaintext_', uf.id::text)
|
||||
"""
|
||||
)
|
||||
)
|
||||
logger.info(
|
||||
f"Updated {result.rowcount} plaintext file_record ids to UUID scheme"
|
||||
)
|
||||
|
||||
# === Step 6: Ensure document_id_migrated default TRUE and backfill existing FALSE ===
|
||||
# New records should default to migrated=True so the migration task won't run for them.
|
||||
# Existing rows that had a legacy document_id should be marked as not migrated to be processed.
|
||||
|
||||
# Backfill existing records: if document_id is not null, set to FALSE
|
||||
bind.execute(
|
||||
text(
|
||||
"""
|
||||
UPDATE user_file
|
||||
SET document_id_migrated = FALSE
|
||||
WHERE document_id IS NOT NULL
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
# === Step 7: Backfill user_file.status from index_attempt ===
|
||||
logger.info("Backfilling user_file.status from index_attempt...")
|
||||
|
||||
# Update user_file status based on latest index attempt
|
||||
# Using CTEs instead of temp tables for asyncpg compatibility
|
||||
result = bind.execute(
|
||||
text(
|
||||
"""
|
||||
WITH latest_attempt AS (
|
||||
SELECT DISTINCT ON (ia.connector_credential_pair_id)
|
||||
ia.connector_credential_pair_id,
|
||||
ia.status
|
||||
FROM index_attempt ia
|
||||
ORDER BY ia.connector_credential_pair_id, ia.time_updated DESC
|
||||
),
|
||||
uf_to_ccp AS (
|
||||
SELECT DISTINCT uf.id AS uf_id, ccp.id AS cc_pair_id
|
||||
FROM user_file uf
|
||||
JOIN document_by_connector_credential_pair dcc
|
||||
ON dcc.id = REPLACE(uf.document_id, 'USER_FILE_CONNECTOR__', 'FILE_CONNECTOR__')
|
||||
JOIN connector_credential_pair ccp
|
||||
ON ccp.connector_id = dcc.connector_id
|
||||
AND ccp.credential_id = dcc.credential_id
|
||||
)
|
||||
UPDATE user_file uf
|
||||
SET status = CASE
|
||||
WHEN la.status IN ('NOT_STARTED', 'IN_PROGRESS') THEN 'PROCESSING'
|
||||
WHEN la.status = 'SUCCESS' THEN 'COMPLETED'
|
||||
ELSE 'FAILED'
|
||||
END
|
||||
FROM uf_to_ccp ufc
|
||||
LEFT JOIN latest_attempt la
|
||||
ON la.connector_credential_pair_id = ufc.cc_pair_id
|
||||
WHERE uf.id = ufc.uf_id
|
||||
AND uf.status = 'PROCESSING'
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
logger.info(f"Updated status for {result.rowcount} user_file records")
|
||||
|
||||
logger.info("Migration 2 (data preparation) completed successfully")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Reset populated data to allow clean downgrade of schema."""
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
logger.info("Starting downgrade of data preparation...")
|
||||
|
||||
# Reset user_file columns to allow nulls before data removal
|
||||
if "user_file" in inspector.get_table_names():
|
||||
columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
|
||||
if "new_id" in columns:
|
||||
op.alter_column(
|
||||
"user_file",
|
||||
"new_id",
|
||||
nullable=True,
|
||||
server_default=sa.text("gen_random_uuid()"),
|
||||
)
|
||||
# Optionally clear the data
|
||||
# bind.execute(text("UPDATE user_file SET new_id = NULL"))
|
||||
logger.info("Reset user_file.new_id to nullable")
|
||||
|
||||
# Reset persona__user_file.user_file_id_uuid
|
||||
if "persona__user_file" in inspector.get_table_names():
|
||||
columns = [col["name"] for col in inspector.get_columns("persona__user_file")]
|
||||
|
||||
if "user_file_id_uuid" in columns:
|
||||
op.alter_column("persona__user_file", "user_file_id_uuid", nullable=True)
|
||||
# Optionally clear the data
|
||||
# bind.execute(text("UPDATE persona__user_file SET user_file_id_uuid = NULL"))
|
||||
logger.info("Reset persona__user_file.user_file_id_uuid to nullable")
|
||||
|
||||
# Note: We don't delete user_project records or reset chat_session.project_id
|
||||
# as these might be in use and can be handled by the schema downgrade
|
||||
|
||||
# Reset user_file.status to default
|
||||
if "user_file" in inspector.get_table_names():
|
||||
columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
if "status" in columns:
|
||||
bind.execute(text("UPDATE user_file SET status = 'PROCESSING'"))
|
||||
logger.info("Reset user_file.status to default")
|
||||
|
||||
logger.info("Downgrade completed successfully")
|
||||
@@ -0,0 +1,261 @@
|
||||
"""Migration 3: User file relationship migration
|
||||
|
||||
Revision ID: 16c37a30adf2
|
||||
Revises: 0cd424f32b1d
|
||||
Create Date: 2025-09-22 09:47:34.175596
|
||||
|
||||
This migration converts folder-based relationships to project-based relationships.
|
||||
It migrates persona__user_folder to persona__user_file and populates project__user_file.
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import text
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("alembic.runtime.migration")
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "16c37a30adf2"
|
||||
down_revision = "0cd424f32b1d"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Migrate folder-based relationships to project-based relationships."""
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
# === Step 1: Migrate persona__user_folder to persona__user_file ===
|
||||
table_names = inspector.get_table_names()
|
||||
|
||||
if "persona__user_folder" in table_names and "user_file" in table_names:
|
||||
user_file_columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
has_new_id = "new_id" in user_file_columns
|
||||
|
||||
if has_new_id and "folder_id" in user_file_columns:
|
||||
logger.info(
|
||||
"Migrating persona__user_folder relationships to persona__user_file..."
|
||||
)
|
||||
|
||||
# Count relationships to migrate (asyncpg-compatible)
|
||||
count_query = text(
|
||||
"""
|
||||
SELECT COUNT(*)
|
||||
FROM (
|
||||
SELECT DISTINCT puf.persona_id, uf.id
|
||||
FROM persona__user_folder puf
|
||||
JOIN user_file uf ON uf.folder_id = puf.user_folder_id
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM persona__user_file p2
|
||||
WHERE p2.persona_id = puf.persona_id
|
||||
AND p2.user_file_id = uf.id
|
||||
)
|
||||
) AS distinct_pairs
|
||||
"""
|
||||
)
|
||||
to_migrate = bind.execute(count_query).scalar_one()
|
||||
|
||||
if to_migrate > 0:
|
||||
logger.info(f"Creating {to_migrate} persona-file relationships...")
|
||||
|
||||
# Migrate in batches to avoid memory issues
|
||||
batch_size = 10000
|
||||
total_inserted = 0
|
||||
|
||||
while True:
|
||||
# Insert batch directly using subquery (asyncpg compatible)
|
||||
result = bind.execute(
|
||||
text(
|
||||
"""
|
||||
INSERT INTO persona__user_file (persona_id, user_file_id, user_file_id_uuid)
|
||||
SELECT DISTINCT puf.persona_id, uf.id as file_id, uf.new_id
|
||||
FROM persona__user_folder puf
|
||||
JOIN user_file uf ON uf.folder_id = puf.user_folder_id
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM persona__user_file p2
|
||||
WHERE p2.persona_id = puf.persona_id
|
||||
AND p2.user_file_id = uf.id
|
||||
)
|
||||
LIMIT :batch_size
|
||||
"""
|
||||
),
|
||||
{"batch_size": batch_size},
|
||||
)
|
||||
|
||||
inserted = result.rowcount
|
||||
total_inserted += inserted
|
||||
|
||||
if inserted < batch_size:
|
||||
break
|
||||
|
||||
logger.info(
|
||||
f" Migrated {total_inserted}/{to_migrate} relationships..."
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Created {total_inserted} persona__user_file relationships"
|
||||
)
|
||||
|
||||
# === Step 2: Add foreign key for chat_session.project_id ===
|
||||
chat_session_fks = inspector.get_foreign_keys("chat_session")
|
||||
fk_exists = any(
|
||||
fk["name"] == "fk_chat_session_project_id" for fk in chat_session_fks
|
||||
)
|
||||
|
||||
if not fk_exists:
|
||||
logger.info("Adding foreign key constraint for chat_session.project_id...")
|
||||
op.create_foreign_key(
|
||||
"fk_chat_session_project_id",
|
||||
"chat_session",
|
||||
"user_project",
|
||||
["project_id"],
|
||||
["id"],
|
||||
)
|
||||
logger.info("Added foreign key constraint")
|
||||
|
||||
# === Step 3: Populate project__user_file from user_file.folder_id ===
|
||||
user_file_columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
has_new_id = "new_id" in user_file_columns
|
||||
|
||||
if has_new_id and "folder_id" in user_file_columns:
|
||||
logger.info("Populating project__user_file from folder relationships...")
|
||||
|
||||
# Count relationships to create
|
||||
count_query = text(
|
||||
"""
|
||||
SELECT COUNT(*)
|
||||
FROM user_file uf
|
||||
WHERE uf.folder_id IS NOT NULL
|
||||
AND NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM project__user_file puf
|
||||
WHERE puf.project_id = uf.folder_id
|
||||
AND puf.user_file_id = uf.new_id
|
||||
)
|
||||
"""
|
||||
)
|
||||
to_create = bind.execute(count_query).scalar_one()
|
||||
|
||||
if to_create > 0:
|
||||
logger.info(f"Creating {to_create} project-file relationships...")
|
||||
|
||||
# Insert in batches
|
||||
batch_size = 10000
|
||||
total_inserted = 0
|
||||
|
||||
while True:
|
||||
result = bind.execute(
|
||||
text(
|
||||
"""
|
||||
INSERT INTO project__user_file (project_id, user_file_id)
|
||||
SELECT uf.folder_id, uf.new_id
|
||||
FROM user_file uf
|
||||
WHERE uf.folder_id IS NOT NULL
|
||||
AND NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM project__user_file puf
|
||||
WHERE puf.project_id = uf.folder_id
|
||||
AND puf.user_file_id = uf.new_id
|
||||
)
|
||||
LIMIT :batch_size
|
||||
ON CONFLICT (project_id, user_file_id) DO NOTHING
|
||||
"""
|
||||
),
|
||||
{"batch_size": batch_size},
|
||||
)
|
||||
|
||||
inserted = result.rowcount
|
||||
total_inserted += inserted
|
||||
|
||||
if inserted < batch_size:
|
||||
break
|
||||
|
||||
logger.info(f" Created {total_inserted}/{to_create} relationships...")
|
||||
|
||||
logger.info(f"Created {total_inserted} project__user_file relationships")
|
||||
|
||||
# === Step 4: Create index on chat_session.project_id ===
|
||||
try:
|
||||
indexes = [ix.get("name") for ix in inspector.get_indexes("chat_session")]
|
||||
except Exception:
|
||||
indexes = []
|
||||
|
||||
if "ix_chat_session_project_id" not in indexes:
|
||||
logger.info("Creating index on chat_session.project_id...")
|
||||
op.create_index(
|
||||
"ix_chat_session_project_id", "chat_session", ["project_id"], unique=False
|
||||
)
|
||||
logger.info("Created index")
|
||||
|
||||
logger.info("Migration 3 (relationship migration) completed successfully")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Remove migrated relationships and constraints."""
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
logger.info("Starting downgrade of relationship migration...")
|
||||
|
||||
# Drop index on chat_session.project_id
|
||||
try:
|
||||
indexes = [ix.get("name") for ix in inspector.get_indexes("chat_session")]
|
||||
if "ix_chat_session_project_id" in indexes:
|
||||
op.drop_index("ix_chat_session_project_id", "chat_session")
|
||||
logger.info("Dropped index on chat_session.project_id")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Drop foreign key constraint
|
||||
try:
|
||||
chat_session_fks = inspector.get_foreign_keys("chat_session")
|
||||
fk_exists = any(
|
||||
fk["name"] == "fk_chat_session_project_id" for fk in chat_session_fks
|
||||
)
|
||||
if fk_exists:
|
||||
op.drop_constraint(
|
||||
"fk_chat_session_project_id", "chat_session", type_="foreignkey"
|
||||
)
|
||||
logger.info("Dropped foreign key constraint on chat_session.project_id")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Clear project__user_file relationships (but keep the table for migration 1 to handle)
|
||||
if "project__user_file" in inspector.get_table_names():
|
||||
result = bind.execute(text("DELETE FROM project__user_file"))
|
||||
logger.info(f"Cleared {result.rowcount} records from project__user_file")
|
||||
|
||||
# Remove migrated persona__user_file relationships
|
||||
# Only remove those that came from folder relationships
|
||||
if all(
|
||||
table in inspector.get_table_names()
|
||||
for table in ["persona__user_file", "persona__user_folder", "user_file"]
|
||||
):
|
||||
user_file_columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
if "folder_id" in user_file_columns:
|
||||
result = bind.execute(
|
||||
text(
|
||||
"""
|
||||
DELETE FROM persona__user_file puf
|
||||
WHERE EXISTS (
|
||||
SELECT 1
|
||||
FROM user_file uf
|
||||
JOIN persona__user_folder puf2
|
||||
ON puf2.user_folder_id = uf.folder_id
|
||||
WHERE puf.persona_id = puf2.persona_id
|
||||
AND puf.user_file_id = uf.id
|
||||
)
|
||||
"""
|
||||
)
|
||||
)
|
||||
logger.info(
|
||||
f"Removed {result.rowcount} migrated persona__user_file relationships"
|
||||
)
|
||||
|
||||
logger.info("Downgrade completed successfully")
|
||||
@@ -0,0 +1,218 @@
|
||||
"""Migration 6: User file schema cleanup
|
||||
|
||||
Revision ID: 2b75d0a8ffcb
|
||||
Revises: 3a78dba1080a
|
||||
Create Date: 2025-09-22 10:09:26.375377
|
||||
|
||||
This migration removes legacy columns and tables after data migration is complete.
|
||||
It should only be run after verifying all data has been successfully migrated.
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import text
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("alembic.runtime.migration")
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "2b75d0a8ffcb"
|
||||
down_revision = "3a78dba1080a"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Remove legacy columns and tables."""
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
logger.info("Starting schema cleanup...")
|
||||
|
||||
# === Step 1: Verify data migration is complete ===
|
||||
logger.info("Verifying data migration completion...")
|
||||
|
||||
# Check if any chat sessions still have folder_id references
|
||||
chat_session_columns = [
|
||||
col["name"] for col in inspector.get_columns("chat_session")
|
||||
]
|
||||
if "folder_id" in chat_session_columns:
|
||||
orphaned_count = bind.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT COUNT(*) FROM chat_session
|
||||
WHERE folder_id IS NOT NULL AND project_id IS NULL
|
||||
"""
|
||||
)
|
||||
).scalar_one()
|
||||
|
||||
if orphaned_count > 0:
|
||||
logger.warning(
|
||||
f"WARNING: {orphaned_count} chat_session records still have "
|
||||
f"folder_id without project_id. Proceeding anyway."
|
||||
)
|
||||
|
||||
# === Step 2: Drop chat_session.folder_id ===
|
||||
if "folder_id" in chat_session_columns:
|
||||
logger.info("Dropping chat_session.folder_id...")
|
||||
|
||||
# Drop foreign key constraint first
|
||||
op.execute(
|
||||
"ALTER TABLE chat_session DROP CONSTRAINT IF EXISTS chat_session_folder_fk"
|
||||
)
|
||||
|
||||
# Drop the column
|
||||
op.drop_column("chat_session", "folder_id")
|
||||
logger.info("Dropped chat_session.folder_id")
|
||||
|
||||
# === Step 3: Drop persona__user_folder table ===
|
||||
if "persona__user_folder" in inspector.get_table_names():
|
||||
logger.info("Dropping persona__user_folder table...")
|
||||
|
||||
# Check for any remaining data
|
||||
remaining = bind.execute(
|
||||
text("SELECT COUNT(*) FROM persona__user_folder")
|
||||
).scalar_one()
|
||||
|
||||
if remaining > 0:
|
||||
logger.warning(
|
||||
f"WARNING: Dropping persona__user_folder with {remaining} records"
|
||||
)
|
||||
|
||||
op.drop_table("persona__user_folder")
|
||||
logger.info("Dropped persona__user_folder table")
|
||||
|
||||
# === Step 4: Drop chat_folder table ===
|
||||
if "chat_folder" in inspector.get_table_names():
|
||||
logger.info("Dropping chat_folder table...")
|
||||
|
||||
# Check for any remaining data
|
||||
remaining = bind.execute(text("SELECT COUNT(*) FROM chat_folder")).scalar_one()
|
||||
|
||||
if remaining > 0:
|
||||
logger.warning(f"WARNING: Dropping chat_folder with {remaining} records")
|
||||
|
||||
op.drop_table("chat_folder")
|
||||
logger.info("Dropped chat_folder table")
|
||||
|
||||
# === Step 5: Drop user_file legacy columns ===
|
||||
user_file_columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
|
||||
# Drop folder_id
|
||||
if "folder_id" in user_file_columns:
|
||||
logger.info("Dropping user_file.folder_id...")
|
||||
op.drop_column("user_file", "folder_id")
|
||||
logger.info("Dropped user_file.folder_id")
|
||||
|
||||
# Drop cc_pair_id (already handled in migration 5, but be sure)
|
||||
if "cc_pair_id" in user_file_columns:
|
||||
logger.info("Dropping user_file.cc_pair_id...")
|
||||
|
||||
# Drop any remaining foreign key constraints
|
||||
bind.execute(
|
||||
text(
|
||||
"""
|
||||
DO $$
|
||||
DECLARE r RECORD;
|
||||
BEGIN
|
||||
FOR r IN (
|
||||
SELECT conname
|
||||
FROM pg_constraint c
|
||||
JOIN pg_class t ON c.conrelid = t.oid
|
||||
WHERE c.contype = 'f'
|
||||
AND t.relname = 'user_file'
|
||||
AND EXISTS (
|
||||
SELECT 1 FROM pg_attribute a
|
||||
WHERE a.attrelid = t.oid
|
||||
AND a.attname = 'cc_pair_id'
|
||||
)
|
||||
) LOOP
|
||||
EXECUTE format('ALTER TABLE user_file DROP CONSTRAINT IF EXISTS %I', r.conname);
|
||||
END LOOP;
|
||||
END$$;
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
op.drop_column("user_file", "cc_pair_id")
|
||||
logger.info("Dropped user_file.cc_pair_id")
|
||||
|
||||
# === Step 6: Clean up any remaining constraints ===
|
||||
logger.info("Cleaning up remaining constraints...")
|
||||
|
||||
# Drop any unique constraints on removed columns
|
||||
op.execute(
|
||||
"ALTER TABLE user_file DROP CONSTRAINT IF EXISTS user_file_cc_pair_id_key"
|
||||
)
|
||||
|
||||
logger.info("Migration 6 (schema cleanup) completed successfully")
|
||||
logger.info("Legacy schema has been fully removed")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Recreate dropped columns and tables (structure only, no data)."""
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
logger.warning("Downgrading schema cleanup - recreating structure only, no data!")
|
||||
|
||||
# Recreate user_file columns
|
||||
if "user_file" in inspector.get_table_names():
|
||||
columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
|
||||
if "cc_pair_id" not in columns:
|
||||
op.add_column(
|
||||
"user_file", sa.Column("cc_pair_id", sa.Integer(), nullable=True)
|
||||
)
|
||||
|
||||
if "folder_id" not in columns:
|
||||
op.add_column(
|
||||
"user_file", sa.Column("folder_id", sa.Integer(), nullable=True)
|
||||
)
|
||||
|
||||
# Recreate chat_folder table
|
||||
if "chat_folder" not in inspector.get_table_names():
|
||||
op.create_table(
|
||||
"chat_folder",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("user_id", sa.UUID(), nullable=False),
|
||||
sa.Column("name", sa.String(), nullable=False),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.ForeignKeyConstraint(
|
||||
["user_id"], ["user.id"], name="chat_folder_user_fk"
|
||||
),
|
||||
)
|
||||
|
||||
# Recreate persona__user_folder table
|
||||
if "persona__user_folder" not in inspector.get_table_names():
|
||||
op.create_table(
|
||||
"persona__user_folder",
|
||||
sa.Column("persona_id", sa.Integer(), nullable=False),
|
||||
sa.Column("user_folder_id", sa.Integer(), nullable=False),
|
||||
sa.PrimaryKeyConstraint("persona_id", "user_folder_id"),
|
||||
sa.ForeignKeyConstraint(["persona_id"], ["persona.id"]),
|
||||
sa.ForeignKeyConstraint(["user_folder_id"], ["user_project.id"]),
|
||||
)
|
||||
|
||||
# Add folder_id back to chat_session
|
||||
if "chat_session" in inspector.get_table_names():
|
||||
columns = [col["name"] for col in inspector.get_columns("chat_session")]
|
||||
if "folder_id" not in columns:
|
||||
op.add_column(
|
||||
"chat_session", sa.Column("folder_id", sa.Integer(), nullable=True)
|
||||
)
|
||||
|
||||
# Add foreign key if chat_folder exists
|
||||
if "chat_folder" in inspector.get_table_names():
|
||||
op.create_foreign_key(
|
||||
"chat_session_folder_fk",
|
||||
"chat_session",
|
||||
"chat_folder",
|
||||
["folder_id"],
|
||||
["id"],
|
||||
)
|
||||
|
||||
logger.info("Downgrade completed - structure recreated but data is lost")
|
||||
@@ -0,0 +1,298 @@
|
||||
"""Migration 5: User file legacy data cleanup
|
||||
|
||||
Revision ID: 3a78dba1080a
|
||||
Revises: 7cc3fcc116c1
|
||||
Create Date: 2025-09-22 10:04:27.986294
|
||||
|
||||
This migration removes legacy user-file documents and connector_credential_pairs.
|
||||
It performs bulk deletions of obsolete data after the UUID migration.
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql as psql
|
||||
from sqlalchemy import text
|
||||
import logging
|
||||
from typing import List
|
||||
import uuid
|
||||
|
||||
logger = logging.getLogger("alembic.runtime.migration")
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "3a78dba1080a"
|
||||
down_revision = "7cc3fcc116c1"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def batch_delete(
|
||||
bind: sa.engine.Connection,
|
||||
table_name: str,
|
||||
id_column: str,
|
||||
ids: List[str | int | uuid.UUID],
|
||||
batch_size: int = 1000,
|
||||
id_type: str = "int",
|
||||
) -> int:
|
||||
"""Delete records in batches to avoid memory issues and timeouts."""
|
||||
total_count = len(ids)
|
||||
if total_count == 0:
|
||||
return 0
|
||||
|
||||
logger.info(
|
||||
f"Starting batch deletion of {total_count} records from {table_name}..."
|
||||
)
|
||||
|
||||
# Determine appropriate ARRAY type
|
||||
if id_type == "uuid":
|
||||
array_type = psql.ARRAY(psql.UUID(as_uuid=True))
|
||||
elif id_type == "int":
|
||||
array_type = psql.ARRAY(sa.Integer())
|
||||
else:
|
||||
array_type = psql.ARRAY(sa.String())
|
||||
|
||||
total_deleted = 0
|
||||
failed_batches = []
|
||||
|
||||
for i in range(0, total_count, batch_size):
|
||||
batch_ids = ids[i : i + batch_size]
|
||||
try:
|
||||
stmt = text(
|
||||
f"DELETE FROM {table_name} WHERE {id_column} = ANY(:ids)"
|
||||
).bindparams(sa.bindparam("ids", value=batch_ids, type_=array_type))
|
||||
result = bind.execute(stmt)
|
||||
total_deleted += result.rowcount
|
||||
|
||||
# Log progress every 10 batches or at completion
|
||||
batch_num = (i // batch_size) + 1
|
||||
if batch_num % 10 == 0 or i + batch_size >= total_count:
|
||||
logger.info(
|
||||
f" Deleted {min(i + batch_size, total_count)}/{total_count} records "
|
||||
f"({total_deleted} actual) from {table_name}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete batch {(i // batch_size) + 1}: {e}")
|
||||
failed_batches.append((i, min(i + batch_size, total_count)))
|
||||
|
||||
if failed_batches:
|
||||
logger.warning(
|
||||
f"Failed to delete {len(failed_batches)} batches from {table_name}. "
|
||||
f"Total deleted: {total_deleted}/{total_count}"
|
||||
)
|
||||
# Fail the migration to avoid silently succeeding on partial cleanup
|
||||
raise RuntimeError(
|
||||
f"Batch deletion failed for {table_name}: "
|
||||
f"{len(failed_batches)} failed batches out of "
|
||||
f"{(total_count + batch_size - 1) // batch_size}."
|
||||
)
|
||||
|
||||
return total_deleted
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Remove legacy user-file documents and connector_credential_pairs."""
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
logger.info("Starting legacy data cleanup...")
|
||||
|
||||
# === Step 1: Identify and delete user-file documents ===
|
||||
logger.info("Identifying user-file documents to delete...")
|
||||
|
||||
# Get document IDs to delete
|
||||
doc_rows = bind.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT DISTINCT dcc.id AS document_id
|
||||
FROM document_by_connector_credential_pair dcc
|
||||
JOIN connector_credential_pair u
|
||||
ON u.connector_id = dcc.connector_id
|
||||
AND u.credential_id = dcc.credential_id
|
||||
WHERE u.is_user_file IS TRUE
|
||||
"""
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
doc_ids = [r[0] for r in doc_rows]
|
||||
|
||||
if doc_ids:
|
||||
logger.info(f"Found {len(doc_ids)} user-file documents to delete")
|
||||
|
||||
# Delete dependent rows first
|
||||
tables_to_clean = [
|
||||
("document_retrieval_feedback", "document_id"),
|
||||
("document__tag", "document_id"),
|
||||
("chunk_stats", "document_id"),
|
||||
]
|
||||
|
||||
for table_name, column_name in tables_to_clean:
|
||||
if table_name in inspector.get_table_names():
|
||||
# document_id is a string in these tables
|
||||
deleted = batch_delete(
|
||||
bind, table_name, column_name, doc_ids, id_type="str"
|
||||
)
|
||||
logger.info(f"Deleted {deleted} records from {table_name}")
|
||||
|
||||
# Delete document_by_connector_credential_pair entries
|
||||
deleted = batch_delete(
|
||||
bind, "document_by_connector_credential_pair", "id", doc_ids, id_type="str"
|
||||
)
|
||||
logger.info(f"Deleted {deleted} document_by_connector_credential_pair records")
|
||||
|
||||
# Delete documents themselves
|
||||
deleted = batch_delete(bind, "document", "id", doc_ids, id_type="str")
|
||||
logger.info(f"Deleted {deleted} document records")
|
||||
else:
|
||||
logger.info("No user-file documents found to delete")
|
||||
|
||||
# === Step 2: Clean up user-file connector_credential_pairs ===
|
||||
logger.info("Cleaning up user-file connector_credential_pairs...")
|
||||
|
||||
# Get cc_pair IDs
|
||||
cc_pair_rows = bind.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT id AS cc_pair_id
|
||||
FROM connector_credential_pair
|
||||
WHERE is_user_file IS TRUE
|
||||
"""
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
cc_pair_ids = [r[0] for r in cc_pair_rows]
|
||||
|
||||
if cc_pair_ids:
|
||||
logger.info(
|
||||
f"Found {len(cc_pair_ids)} user-file connector_credential_pairs to clean up"
|
||||
)
|
||||
|
||||
# Delete related records
|
||||
# Clean child tables first to satisfy foreign key constraints,
|
||||
# then the parent tables
|
||||
tables_to_clean = [
|
||||
("index_attempt_errors", "connector_credential_pair_id"),
|
||||
("index_attempt", "connector_credential_pair_id"),
|
||||
("background_error", "cc_pair_id"),
|
||||
("document_set__connector_credential_pair", "connector_credential_pair_id"),
|
||||
("user_group__connector_credential_pair", "cc_pair_id"),
|
||||
]
|
||||
|
||||
for table_name, column_name in tables_to_clean:
|
||||
if table_name in inspector.get_table_names():
|
||||
deleted = batch_delete(
|
||||
bind, table_name, column_name, cc_pair_ids, id_type="int"
|
||||
)
|
||||
logger.info(f"Deleted {deleted} records from {table_name}")
|
||||
|
||||
# === Step 3: Identify connectors and credentials to delete ===
|
||||
logger.info("Identifying orphaned connectors and credentials...")
|
||||
|
||||
# Get connectors used only by user-file cc_pairs
|
||||
connector_rows = bind.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT DISTINCT ccp.connector_id
|
||||
FROM connector_credential_pair ccp
|
||||
WHERE ccp.is_user_file IS TRUE
|
||||
AND ccp.connector_id != 0 -- Exclude system default
|
||||
AND NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM connector_credential_pair c2
|
||||
WHERE c2.connector_id = ccp.connector_id
|
||||
AND c2.is_user_file IS NOT TRUE
|
||||
)
|
||||
"""
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
userfile_only_connector_ids = [r[0] for r in connector_rows]
|
||||
|
||||
# Get credentials used only by user-file cc_pairs
|
||||
credential_rows = bind.execute(
|
||||
text(
|
||||
"""
|
||||
SELECT DISTINCT ccp.credential_id
|
||||
FROM connector_credential_pair ccp
|
||||
WHERE ccp.is_user_file IS TRUE
|
||||
AND ccp.credential_id != 0 -- Exclude public/default
|
||||
AND NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM connector_credential_pair c2
|
||||
WHERE c2.credential_id = ccp.credential_id
|
||||
AND c2.is_user_file IS NOT TRUE
|
||||
)
|
||||
"""
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
userfile_only_credential_ids = [r[0] for r in credential_rows]
|
||||
|
||||
# === Step 4: Delete the cc_pairs themselves ===
|
||||
if cc_pair_ids:
|
||||
# Remove FK dependency from user_file first
|
||||
bind.execute(
|
||||
text(
|
||||
"""
|
||||
DO $$
|
||||
DECLARE r RECORD;
|
||||
BEGIN
|
||||
FOR r IN (
|
||||
SELECT conname
|
||||
FROM pg_constraint c
|
||||
JOIN pg_class t ON c.conrelid = t.oid
|
||||
JOIN pg_class ft ON c.confrelid = ft.oid
|
||||
WHERE c.contype = 'f'
|
||||
AND t.relname = 'user_file'
|
||||
AND ft.relname = 'connector_credential_pair'
|
||||
) LOOP
|
||||
EXECUTE format('ALTER TABLE user_file DROP CONSTRAINT IF EXISTS %I', r.conname);
|
||||
END LOOP;
|
||||
END$$;
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
# Delete cc_pairs
|
||||
deleted = batch_delete(
|
||||
bind, "connector_credential_pair", "id", cc_pair_ids, id_type="int"
|
||||
)
|
||||
logger.info(f"Deleted {deleted} connector_credential_pair records")
|
||||
|
||||
# === Step 5: Delete orphaned connectors ===
|
||||
if userfile_only_connector_ids:
|
||||
deleted = batch_delete(
|
||||
bind, "connector", "id", userfile_only_connector_ids, id_type="int"
|
||||
)
|
||||
logger.info(f"Deleted {deleted} orphaned connector records")
|
||||
|
||||
# === Step 6: Delete orphaned credentials ===
|
||||
if userfile_only_credential_ids:
|
||||
# Clean up credential__user_group mappings first
|
||||
deleted = batch_delete(
|
||||
bind,
|
||||
"credential__user_group",
|
||||
"credential_id",
|
||||
userfile_only_credential_ids,
|
||||
id_type="int",
|
||||
)
|
||||
logger.info(f"Deleted {deleted} credential__user_group records")
|
||||
|
||||
# Delete credentials
|
||||
deleted = batch_delete(
|
||||
bind, "credential", "id", userfile_only_credential_ids, id_type="int"
|
||||
)
|
||||
logger.info(f"Deleted {deleted} orphaned credential records")
|
||||
|
||||
logger.info("Migration 5 (legacy data cleanup) completed successfully")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Cannot restore deleted data - requires backup restoration."""
|
||||
|
||||
logger.error("CRITICAL: Downgrading data cleanup cannot restore deleted data!")
|
||||
logger.error("Data restoration requires backup files or database backup.")
|
||||
|
||||
raise NotImplementedError(
|
||||
"Downgrade of legacy data cleanup is not supported. "
|
||||
"Deleted data must be restored from backups."
|
||||
)
|
||||
@@ -0,0 +1,28 @@
|
||||
"""reset userfile document_id_migrated field
|
||||
|
||||
Revision ID: 40926a4dab77
|
||||
Revises: 64bd5677aeb6
|
||||
Create Date: 2025-10-06 16:10:32.898668
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "40926a4dab77"
|
||||
down_revision = "64bd5677aeb6"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Set all existing records to not migrated
|
||||
op.execute(
|
||||
"UPDATE user_file SET document_id_migrated = FALSE "
|
||||
"WHERE document_id_migrated IS DISTINCT FROM FALSE;"
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# No-op
|
||||
pass
|
||||
@@ -0,0 +1,37 @@
|
||||
"""Add image input support to model config
|
||||
|
||||
Revision ID: 64bd5677aeb6
|
||||
Revises: b30353be4eec
|
||||
Create Date: 2025-09-28 15:48:12.003612
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "64bd5677aeb6"
|
||||
down_revision = "b30353be4eec"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"model_configuration",
|
||||
sa.Column("supports_image_input", sa.Boolean(), nullable=True),
|
||||
)
|
||||
|
||||
# Seems to be left over from when model visibility was introduced and a nullable field.
|
||||
# Set any null is_visible values to False
|
||||
connection = op.get_bind()
|
||||
connection.execute(
|
||||
sa.text(
|
||||
"UPDATE model_configuration SET is_visible = false WHERE is_visible IS NULL"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("model_configuration", "supports_image_input")
|
||||
@@ -0,0 +1,37 @@
|
||||
"""add queries and is web fetch to iteration answer
|
||||
|
||||
Revision ID: 6f4f86aef280
|
||||
Revises: 03d710ccf29c
|
||||
Create Date: 2025-10-14 18:08:30.920123
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "6f4f86aef280"
|
||||
down_revision = "03d710ccf29c"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add is_web_fetch column
|
||||
op.add_column(
|
||||
"research_agent_iteration_sub_step",
|
||||
sa.Column("is_web_fetch", sa.Boolean(), nullable=True),
|
||||
)
|
||||
|
||||
# Add queries column
|
||||
op.add_column(
|
||||
"research_agent_iteration_sub_step",
|
||||
sa.Column("queries", postgresql.JSONB(), nullable=True),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("research_agent_iteration_sub_step", "queries")
|
||||
op.drop_column("research_agent_iteration_sub_step", "is_web_fetch")
|
||||
@@ -0,0 +1,193 @@
|
||||
"""Migration 4: User file UUID primary key swap
|
||||
|
||||
Revision ID: 7cc3fcc116c1
|
||||
Revises: 16c37a30adf2
|
||||
Create Date: 2025-09-22 09:54:38.292952
|
||||
|
||||
This migration performs the critical UUID primary key swap on user_file table.
|
||||
It updates all foreign key references to use UUIDs instead of integers.
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql as psql
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("alembic.runtime.migration")
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "7cc3fcc116c1"
|
||||
down_revision = "16c37a30adf2"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Swap user_file primary key from integer to UUID."""
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
# Verify we're in the expected state
|
||||
user_file_columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
if "new_id" not in user_file_columns:
|
||||
logger.warning(
|
||||
"user_file.new_id not found - migration may have already been applied"
|
||||
)
|
||||
return
|
||||
|
||||
logger.info("Starting UUID primary key swap...")
|
||||
|
||||
# === Step 1: Update persona__user_file foreign key to UUID ===
|
||||
logger.info("Updating persona__user_file foreign key...")
|
||||
|
||||
# Drop existing foreign key constraints
|
||||
op.execute(
|
||||
"ALTER TABLE persona__user_file DROP CONSTRAINT IF EXISTS persona__user_file_user_file_id_uuid_fkey"
|
||||
)
|
||||
op.execute(
|
||||
"ALTER TABLE persona__user_file DROP CONSTRAINT IF EXISTS persona__user_file_user_file_id_fkey"
|
||||
)
|
||||
|
||||
# Create new foreign key to user_file.new_id
|
||||
op.create_foreign_key(
|
||||
"persona__user_file_user_file_id_fkey",
|
||||
"persona__user_file",
|
||||
"user_file",
|
||||
local_cols=["user_file_id_uuid"],
|
||||
remote_cols=["new_id"],
|
||||
)
|
||||
|
||||
# Drop the old integer column and rename UUID column
|
||||
op.execute("ALTER TABLE persona__user_file DROP COLUMN IF EXISTS user_file_id")
|
||||
op.alter_column(
|
||||
"persona__user_file",
|
||||
"user_file_id_uuid",
|
||||
new_column_name="user_file_id",
|
||||
existing_type=psql.UUID(as_uuid=True),
|
||||
nullable=False,
|
||||
)
|
||||
|
||||
# Recreate composite primary key
|
||||
op.execute(
|
||||
"ALTER TABLE persona__user_file DROP CONSTRAINT IF EXISTS persona__user_file_pkey"
|
||||
)
|
||||
op.execute(
|
||||
"ALTER TABLE persona__user_file ADD PRIMARY KEY (persona_id, user_file_id)"
|
||||
)
|
||||
|
||||
logger.info("Updated persona__user_file to use UUID foreign key")
|
||||
|
||||
# === Step 2: Perform the primary key swap on user_file ===
|
||||
logger.info("Swapping user_file primary key to UUID...")
|
||||
|
||||
# Drop the primary key constraint
|
||||
op.execute("ALTER TABLE user_file DROP CONSTRAINT IF EXISTS user_file_pkey")
|
||||
|
||||
# Drop the old id column and rename new_id to id
|
||||
op.execute("ALTER TABLE user_file DROP COLUMN IF EXISTS id")
|
||||
op.alter_column(
|
||||
"user_file",
|
||||
"new_id",
|
||||
new_column_name="id",
|
||||
existing_type=psql.UUID(as_uuid=True),
|
||||
nullable=False,
|
||||
)
|
||||
|
||||
# Set default for new inserts
|
||||
op.alter_column(
|
||||
"user_file",
|
||||
"id",
|
||||
existing_type=psql.UUID(as_uuid=True),
|
||||
server_default=sa.text("gen_random_uuid()"),
|
||||
)
|
||||
|
||||
# Create new primary key
|
||||
op.execute("ALTER TABLE user_file ADD PRIMARY KEY (id)")
|
||||
|
||||
logger.info("Swapped user_file primary key to UUID")
|
||||
|
||||
# === Step 3: Update foreign key constraints ===
|
||||
logger.info("Updating foreign key constraints...")
|
||||
|
||||
# Recreate persona__user_file foreign key to point to user_file.id
|
||||
# Drop existing FK first to break dependency on the unique constraint
|
||||
op.execute(
|
||||
"ALTER TABLE persona__user_file DROP CONSTRAINT IF EXISTS persona__user_file_user_file_id_fkey"
|
||||
)
|
||||
# Drop the unique constraint on (formerly) new_id BEFORE recreating the FK,
|
||||
# so the FK will bind to the primary key instead of the unique index.
|
||||
op.execute("ALTER TABLE user_file DROP CONSTRAINT IF EXISTS uq_user_file_new_id")
|
||||
# Now recreate FK to the primary key column
|
||||
op.create_foreign_key(
|
||||
"persona__user_file_user_file_id_fkey",
|
||||
"persona__user_file",
|
||||
"user_file",
|
||||
local_cols=["user_file_id"],
|
||||
remote_cols=["id"],
|
||||
)
|
||||
|
||||
# Add foreign keys for project__user_file
|
||||
existing_fks = inspector.get_foreign_keys("project__user_file")
|
||||
|
||||
has_user_file_fk = any(
|
||||
fk.get("referred_table") == "user_file"
|
||||
and fk.get("constrained_columns") == ["user_file_id"]
|
||||
for fk in existing_fks
|
||||
)
|
||||
|
||||
if not has_user_file_fk:
|
||||
op.create_foreign_key(
|
||||
"fk_project__user_file_user_file_id",
|
||||
"project__user_file",
|
||||
"user_file",
|
||||
["user_file_id"],
|
||||
["id"],
|
||||
)
|
||||
logger.info("Added project__user_file -> user_file foreign key")
|
||||
|
||||
has_project_fk = any(
|
||||
fk.get("referred_table") == "user_project"
|
||||
and fk.get("constrained_columns") == ["project_id"]
|
||||
for fk in existing_fks
|
||||
)
|
||||
|
||||
if not has_project_fk:
|
||||
op.create_foreign_key(
|
||||
"fk_project__user_file_project_id",
|
||||
"project__user_file",
|
||||
"user_project",
|
||||
["project_id"],
|
||||
["id"],
|
||||
)
|
||||
logger.info("Added project__user_file -> user_project foreign key")
|
||||
|
||||
# === Step 4: Mark files for document_id migration ===
|
||||
logger.info("Marking files for background document_id migration...")
|
||||
|
||||
logger.info("Migration 4 (UUID primary key swap) completed successfully")
|
||||
logger.info(
|
||||
"NOTE: Background task will update document IDs in Vespa and search_doc"
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Revert UUID primary key back to integer (data destructive!)."""
|
||||
|
||||
logger.error("CRITICAL: Downgrading UUID primary key swap is data destructive!")
|
||||
logger.error(
|
||||
"This will break all UUID-based references created after the migration."
|
||||
)
|
||||
logger.error("Only proceed if absolutely necessary and have backups.")
|
||||
|
||||
# The downgrade would need to:
|
||||
# 1. Add back integer columns
|
||||
# 2. Generate new sequential IDs
|
||||
# 3. Update all foreign key references
|
||||
# 4. Swap primary keys back
|
||||
# This is complex and risky, so we raise an error instead
|
||||
|
||||
raise NotImplementedError(
|
||||
"Downgrade of UUID primary key swap is not supported due to data loss risk. "
|
||||
"Manual intervention with data backup/restore is required."
|
||||
)
|
||||
45
backend/alembic/versions/96a5702df6aa_mcp_tool_enabled.py
Normal file
45
backend/alembic/versions/96a5702df6aa_mcp_tool_enabled.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""mcp_tool_enabled
|
||||
|
||||
Revision ID: 96a5702df6aa
|
||||
Revises: 40926a4dab77
|
||||
Create Date: 2025-10-09 12:10:21.733097
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "96a5702df6aa"
|
||||
down_revision = "40926a4dab77"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
DELETE_DISABLED_TOOLS_SQL = "DELETE FROM tool WHERE enabled = false"
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"tool",
|
||||
sa.Column(
|
||||
"enabled",
|
||||
sa.Boolean(),
|
||||
nullable=False,
|
||||
server_default=sa.true(),
|
||||
),
|
||||
)
|
||||
op.create_index(
|
||||
"ix_tool_mcp_server_enabled",
|
||||
"tool",
|
||||
["mcp_server_id", "enabled"],
|
||||
)
|
||||
# Remove the server default so application controls defaulting
|
||||
op.alter_column("tool", "enabled", server_default=None)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.execute(DELETE_DISABLED_TOOLS_SQL)
|
||||
op.drop_index("ix_tool_mcp_server_enabled", table_name="tool")
|
||||
op.drop_column("tool", "enabled")
|
||||
@@ -0,0 +1,257 @@
|
||||
"""Migration 1: User file schema additions
|
||||
|
||||
Revision ID: 9b66d3156fc6
|
||||
Revises: b4ef3ae0bf6e
|
||||
Create Date: 2025-09-22 09:42:06.086732
|
||||
|
||||
This migration adds new columns and tables without modifying existing data.
|
||||
It is safe to run and can be easily rolled back.
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql as psql
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger("alembic.runtime.migration")
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "9b66d3156fc6"
|
||||
down_revision = "b4ef3ae0bf6e"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Add new columns and tables without modifying existing data."""
|
||||
|
||||
# Enable pgcrypto for UUID generation
|
||||
op.execute("CREATE EXTENSION IF NOT EXISTS pgcrypto")
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
# === USER_FILE: Add new columns ===
|
||||
logger.info("Adding new columns to user_file table...")
|
||||
|
||||
user_file_columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
|
||||
# Check if ID is already UUID (in case of re-run after partial migration)
|
||||
id_is_uuid = any(
|
||||
col["name"] == "id" and "uuid" in str(col["type"]).lower()
|
||||
for col in inspector.get_columns("user_file")
|
||||
)
|
||||
|
||||
# Add transitional UUID column only if ID is not already UUID
|
||||
if "new_id" not in user_file_columns and not id_is_uuid:
|
||||
op.add_column(
|
||||
"user_file",
|
||||
sa.Column(
|
||||
"new_id",
|
||||
psql.UUID(as_uuid=True),
|
||||
nullable=True,
|
||||
server_default=sa.text("gen_random_uuid()"),
|
||||
),
|
||||
)
|
||||
op.create_unique_constraint("uq_user_file_new_id", "user_file", ["new_id"])
|
||||
logger.info("Added new_id column to user_file")
|
||||
|
||||
# Add status column
|
||||
if "status" not in user_file_columns:
|
||||
op.add_column(
|
||||
"user_file",
|
||||
sa.Column(
|
||||
"status",
|
||||
sa.Enum(
|
||||
"PROCESSING",
|
||||
"COMPLETED",
|
||||
"FAILED",
|
||||
"CANCELED",
|
||||
name="userfilestatus",
|
||||
native_enum=False,
|
||||
),
|
||||
nullable=False,
|
||||
server_default="PROCESSING",
|
||||
),
|
||||
)
|
||||
logger.info("Added status column to user_file")
|
||||
|
||||
# Add other tracking columns
|
||||
if "chunk_count" not in user_file_columns:
|
||||
op.add_column(
|
||||
"user_file", sa.Column("chunk_count", sa.Integer(), nullable=True)
|
||||
)
|
||||
logger.info("Added chunk_count column to user_file")
|
||||
|
||||
if "last_accessed_at" not in user_file_columns:
|
||||
op.add_column(
|
||||
"user_file",
|
||||
sa.Column("last_accessed_at", sa.DateTime(timezone=True), nullable=True),
|
||||
)
|
||||
logger.info("Added last_accessed_at column to user_file")
|
||||
|
||||
if "needs_project_sync" not in user_file_columns:
|
||||
op.add_column(
|
||||
"user_file",
|
||||
sa.Column(
|
||||
"needs_project_sync",
|
||||
sa.Boolean(),
|
||||
nullable=False,
|
||||
server_default=sa.text("false"),
|
||||
),
|
||||
)
|
||||
logger.info("Added needs_project_sync column to user_file")
|
||||
|
||||
if "last_project_sync_at" not in user_file_columns:
|
||||
op.add_column(
|
||||
"user_file",
|
||||
sa.Column(
|
||||
"last_project_sync_at", sa.DateTime(timezone=True), nullable=True
|
||||
),
|
||||
)
|
||||
logger.info("Added last_project_sync_at column to user_file")
|
||||
|
||||
if "document_id_migrated" not in user_file_columns:
|
||||
op.add_column(
|
||||
"user_file",
|
||||
sa.Column(
|
||||
"document_id_migrated",
|
||||
sa.Boolean(),
|
||||
nullable=False,
|
||||
server_default=sa.text("true"),
|
||||
),
|
||||
)
|
||||
logger.info("Added document_id_migrated column to user_file")
|
||||
|
||||
# === USER_FOLDER -> USER_PROJECT rename ===
|
||||
table_names = set(inspector.get_table_names())
|
||||
|
||||
if "user_folder" in table_names:
|
||||
logger.info("Updating user_folder table...")
|
||||
# Make description nullable first
|
||||
op.alter_column("user_folder", "description", nullable=True)
|
||||
|
||||
# Rename table if user_project doesn't exist
|
||||
if "user_project" not in table_names:
|
||||
op.execute("ALTER TABLE user_folder RENAME TO user_project")
|
||||
logger.info("Renamed user_folder to user_project")
|
||||
elif "user_project" in table_names:
|
||||
# If already renamed, ensure column nullability
|
||||
project_cols = [col["name"] for col in inspector.get_columns("user_project")]
|
||||
if "description" in project_cols:
|
||||
op.alter_column("user_project", "description", nullable=True)
|
||||
|
||||
# Add instructions column to user_project
|
||||
inspector = sa.inspect(bind) # Refresh after rename
|
||||
if "user_project" in inspector.get_table_names():
|
||||
project_columns = [col["name"] for col in inspector.get_columns("user_project")]
|
||||
if "instructions" not in project_columns:
|
||||
op.add_column(
|
||||
"user_project",
|
||||
sa.Column("instructions", sa.String(), nullable=True),
|
||||
)
|
||||
logger.info("Added instructions column to user_project")
|
||||
|
||||
# === CHAT_SESSION: Add project_id ===
|
||||
chat_session_columns = [
|
||||
col["name"] for col in inspector.get_columns("chat_session")
|
||||
]
|
||||
if "project_id" not in chat_session_columns:
|
||||
op.add_column(
|
||||
"chat_session",
|
||||
sa.Column("project_id", sa.Integer(), nullable=True),
|
||||
)
|
||||
logger.info("Added project_id column to chat_session")
|
||||
|
||||
# === PERSONA__USER_FILE: Add UUID column ===
|
||||
persona_user_file_columns = [
|
||||
col["name"] for col in inspector.get_columns("persona__user_file")
|
||||
]
|
||||
if "user_file_id_uuid" not in persona_user_file_columns:
|
||||
op.add_column(
|
||||
"persona__user_file",
|
||||
sa.Column("user_file_id_uuid", psql.UUID(as_uuid=True), nullable=True),
|
||||
)
|
||||
logger.info("Added user_file_id_uuid column to persona__user_file")
|
||||
|
||||
# === PROJECT__USER_FILE: Create new table ===
|
||||
if "project__user_file" not in inspector.get_table_names():
|
||||
op.create_table(
|
||||
"project__user_file",
|
||||
sa.Column("project_id", sa.Integer(), nullable=False),
|
||||
sa.Column("user_file_id", psql.UUID(as_uuid=True), nullable=False),
|
||||
sa.PrimaryKeyConstraint("project_id", "user_file_id"),
|
||||
)
|
||||
op.create_index(
|
||||
"idx_project__user_file_user_file_id",
|
||||
"project__user_file",
|
||||
["user_file_id"],
|
||||
)
|
||||
logger.info("Created project__user_file table")
|
||||
|
||||
logger.info("Migration 1 (schema additions) completed successfully")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Remove added columns and tables."""
|
||||
|
||||
bind = op.get_bind()
|
||||
inspector = sa.inspect(bind)
|
||||
|
||||
logger.info("Starting downgrade of schema additions...")
|
||||
|
||||
# Drop project__user_file table
|
||||
if "project__user_file" in inspector.get_table_names():
|
||||
op.drop_index("idx_project__user_file_user_file_id", "project__user_file")
|
||||
op.drop_table("project__user_file")
|
||||
logger.info("Dropped project__user_file table")
|
||||
|
||||
# Remove columns from persona__user_file
|
||||
if "persona__user_file" in inspector.get_table_names():
|
||||
columns = [col["name"] for col in inspector.get_columns("persona__user_file")]
|
||||
if "user_file_id_uuid" in columns:
|
||||
op.drop_column("persona__user_file", "user_file_id_uuid")
|
||||
logger.info("Dropped user_file_id_uuid from persona__user_file")
|
||||
|
||||
# Remove columns from chat_session
|
||||
if "chat_session" in inspector.get_table_names():
|
||||
columns = [col["name"] for col in inspector.get_columns("chat_session")]
|
||||
if "project_id" in columns:
|
||||
op.drop_column("chat_session", "project_id")
|
||||
logger.info("Dropped project_id from chat_session")
|
||||
|
||||
# Rename user_project back to user_folder and remove instructions
|
||||
if "user_project" in inspector.get_table_names():
|
||||
columns = [col["name"] for col in inspector.get_columns("user_project")]
|
||||
if "instructions" in columns:
|
||||
op.drop_column("user_project", "instructions")
|
||||
op.execute("ALTER TABLE user_project RENAME TO user_folder")
|
||||
op.alter_column("user_folder", "description", nullable=False)
|
||||
logger.info("Renamed user_project back to user_folder")
|
||||
|
||||
# Remove columns from user_file
|
||||
if "user_file" in inspector.get_table_names():
|
||||
columns = [col["name"] for col in inspector.get_columns("user_file")]
|
||||
|
||||
columns_to_drop = [
|
||||
"document_id_migrated",
|
||||
"last_project_sync_at",
|
||||
"needs_project_sync",
|
||||
"last_accessed_at",
|
||||
"chunk_count",
|
||||
"status",
|
||||
]
|
||||
|
||||
for col in columns_to_drop:
|
||||
if col in columns:
|
||||
op.drop_column("user_file", col)
|
||||
logger.info(f"Dropped {col} from user_file")
|
||||
|
||||
if "new_id" in columns:
|
||||
op.drop_constraint("uq_user_file_new_id", "user_file", type_="unique")
|
||||
op.drop_column("user_file", "new_id")
|
||||
logger.info("Dropped new_id from user_file")
|
||||
|
||||
# Drop enum type if no columns use it
|
||||
bind.execute(sa.text("DROP TYPE IF EXISTS userfilestatus"))
|
||||
|
||||
logger.info("Downgrade completed successfully")
|
||||
123
backend/alembic/versions/b30353be4eec_add_mcp_auth_performer.py
Normal file
123
backend/alembic/versions/b30353be4eec_add_mcp_auth_performer.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""add_mcp_auth_performer
|
||||
|
||||
Revision ID: b30353be4eec
|
||||
Revises: 2b75d0a8ffcb
|
||||
Create Date: 2025-09-13 14:58:08.413534
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from onyx.db.enums import MCPAuthenticationPerformer, MCPTransport
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "b30353be4eec"
|
||||
down_revision = "2b75d0a8ffcb"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""moving to a better way of handling auth performer and transport"""
|
||||
# Add nullable column first for backward compatibility
|
||||
op.add_column(
|
||||
"mcp_server",
|
||||
sa.Column(
|
||||
"auth_performer",
|
||||
sa.Enum(MCPAuthenticationPerformer, native_enum=False),
|
||||
nullable=True,
|
||||
),
|
||||
)
|
||||
|
||||
op.add_column(
|
||||
"mcp_server",
|
||||
sa.Column(
|
||||
"transport",
|
||||
sa.Enum(MCPTransport, native_enum=False),
|
||||
nullable=True,
|
||||
),
|
||||
)
|
||||
|
||||
# # Backfill values using existing data and inference rules
|
||||
bind = op.get_bind()
|
||||
|
||||
# 1) OAUTH servers are always PER_USER
|
||||
bind.execute(
|
||||
sa.text(
|
||||
"""
|
||||
UPDATE mcp_server
|
||||
SET auth_performer = 'PER_USER'
|
||||
WHERE auth_type = 'OAUTH'
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
# 2) If there is no admin connection config, mark as ADMIN (and not set yet)
|
||||
bind.execute(
|
||||
sa.text(
|
||||
"""
|
||||
UPDATE mcp_server
|
||||
SET auth_performer = 'ADMIN'
|
||||
WHERE admin_connection_config_id IS NULL
|
||||
AND auth_performer IS NULL
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
# 3) If there exists any user-specific connection config (user_email != ''), mark as PER_USER
|
||||
bind.execute(
|
||||
sa.text(
|
||||
"""
|
||||
UPDATE mcp_server AS ms
|
||||
SET auth_performer = 'PER_USER'
|
||||
FROM mcp_connection_config AS mcc
|
||||
WHERE mcc.mcp_server_id = ms.id
|
||||
AND COALESCE(mcc.user_email, '') <> ''
|
||||
AND ms.auth_performer IS NULL
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
# 4) Default any remaining nulls to ADMIN (covers API_TOKEN admin-managed and NONE)
|
||||
bind.execute(
|
||||
sa.text(
|
||||
"""
|
||||
UPDATE mcp_server
|
||||
SET auth_performer = 'ADMIN'
|
||||
WHERE auth_performer IS NULL
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
# Finally, make the column non-nullable
|
||||
op.alter_column(
|
||||
"mcp_server",
|
||||
"auth_performer",
|
||||
existing_type=sa.Enum(MCPAuthenticationPerformer, native_enum=False),
|
||||
nullable=False,
|
||||
)
|
||||
|
||||
# Backfill transport for existing rows to STREAMABLE_HTTP, then make non-nullable
|
||||
bind.execute(
|
||||
sa.text(
|
||||
"""
|
||||
UPDATE mcp_server
|
||||
SET transport = 'STREAMABLE_HTTP'
|
||||
WHERE transport IS NULL
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
op.alter_column(
|
||||
"mcp_server",
|
||||
"transport",
|
||||
existing_type=sa.Enum(MCPTransport, native_enum=False),
|
||||
nullable=False,
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""remove cols"""
|
||||
op.drop_column("mcp_server", "transport")
|
||||
op.drop_column("mcp_server", "auth_performer")
|
||||
@@ -0,0 +1,27 @@
|
||||
"""add_user_oauth_token_to_slack_bot
|
||||
|
||||
Revision ID: b4ef3ae0bf6e
|
||||
Revises: 505c488f6662
|
||||
Create Date: 2025-08-26 17:47:41.788462
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "b4ef3ae0bf6e"
|
||||
down_revision = "505c488f6662"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add user_token column to slack_bot table
|
||||
op.add_column("slack_bot", sa.Column("user_token", sa.LargeBinary(), nullable=True))
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Remove user_token column from slack_bot table
|
||||
op.drop_column("slack_bot", "user_token")
|
||||
@@ -0,0 +1,72 @@
|
||||
"""personalization_user_info
|
||||
|
||||
Revision ID: c8a93a2af083
|
||||
Revises: 6f4f86aef280
|
||||
Create Date: 2025-10-14 15:59:03.577343
|
||||
|
||||
"""
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "c8a93a2af083"
|
||||
down_revision = "6f4f86aef280"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"user",
|
||||
sa.Column("personal_name", sa.String(), nullable=True),
|
||||
)
|
||||
op.add_column(
|
||||
"user",
|
||||
sa.Column("personal_role", sa.String(), nullable=True),
|
||||
)
|
||||
op.add_column(
|
||||
"user",
|
||||
sa.Column(
|
||||
"use_memories",
|
||||
sa.Boolean(),
|
||||
nullable=False,
|
||||
server_default=sa.true(),
|
||||
),
|
||||
)
|
||||
|
||||
op.create_table(
|
||||
"memory",
|
||||
sa.Column("id", sa.Integer(), nullable=False),
|
||||
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
|
||||
sa.Column("memory_text", sa.Text(), nullable=False),
|
||||
sa.Column("conversation_id", postgresql.UUID(as_uuid=True), nullable=True),
|
||||
sa.Column("message_id", sa.Integer(), nullable=True),
|
||||
sa.Column(
|
||||
"created_at",
|
||||
sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column(
|
||||
"updated_at",
|
||||
sa.DateTime(timezone=True),
|
||||
server_default=sa.func.now(),
|
||||
nullable=False,
|
||||
),
|
||||
sa.ForeignKeyConstraint(["user_id"], ["user.id"], ondelete="CASCADE"),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
op.create_index("ix_memory_user_id", "memory", ["user_id"])
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_index("ix_memory_user_id", table_name="memory")
|
||||
op.drop_table("memory")
|
||||
|
||||
op.drop_column("user", "use_memories")
|
||||
op.drop_column("user", "personal_role")
|
||||
op.drop_column("user", "personal_name")
|
||||
@@ -1,29 +1,17 @@
|
||||
from datetime import datetime
|
||||
from functools import lru_cache
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
from fastapi import Depends
|
||||
from fastapi import HTTPException
|
||||
from fastapi import Request
|
||||
from fastapi import status
|
||||
from jwt import decode as jwt_decode
|
||||
from jwt import InvalidTokenError
|
||||
from jwt import PyJWTError
|
||||
from sqlalchemy import func
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from ee.onyx.configs.app_configs import JWT_PUBLIC_KEY_URL
|
||||
from ee.onyx.configs.app_configs import SUPER_CLOUD_API_KEY
|
||||
from ee.onyx.configs.app_configs import SUPER_USERS
|
||||
from ee.onyx.db.saml import get_saml_account
|
||||
from ee.onyx.server.seeding import get_seed_config
|
||||
from ee.onyx.utils.secrets import extract_hashed_cookie
|
||||
from onyx.auth.users import current_admin_user
|
||||
from onyx.configs.app_configs import AUTH_TYPE
|
||||
from onyx.configs.app_configs import USER_AUTH_SECRET
|
||||
from onyx.configs.constants import AuthType
|
||||
from onyx.db.models import User
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
@@ -31,75 +19,11 @@ from onyx.utils.logger import setup_logger
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_public_key() -> str | None:
|
||||
if JWT_PUBLIC_KEY_URL is None:
|
||||
logger.error("JWT_PUBLIC_KEY_URL is not set")
|
||||
return None
|
||||
|
||||
response = requests.get(JWT_PUBLIC_KEY_URL)
|
||||
response.raise_for_status()
|
||||
return response.text
|
||||
|
||||
|
||||
async def verify_jwt_token(token: str, async_db_session: AsyncSession) -> User | None:
|
||||
try:
|
||||
public_key_pem = get_public_key()
|
||||
if public_key_pem is None:
|
||||
logger.error("Failed to retrieve public key")
|
||||
return None
|
||||
|
||||
payload = jwt_decode(
|
||||
token,
|
||||
public_key_pem,
|
||||
algorithms=["RS256"],
|
||||
audience=None,
|
||||
)
|
||||
email = payload.get("email")
|
||||
if email:
|
||||
result = await async_db_session.execute(
|
||||
select(User).where(func.lower(User.email) == func.lower(email))
|
||||
)
|
||||
return result.scalars().first()
|
||||
except InvalidTokenError:
|
||||
logger.error("Invalid JWT token")
|
||||
get_public_key.cache_clear()
|
||||
except PyJWTError as e:
|
||||
logger.error(f"JWT decoding error: {str(e)}")
|
||||
get_public_key.cache_clear()
|
||||
return None
|
||||
|
||||
|
||||
def verify_auth_setting() -> None:
|
||||
# All the Auth flows are valid for EE version
|
||||
logger.notice(f"Using Auth Type: {AUTH_TYPE.value}")
|
||||
|
||||
|
||||
async def optional_user_(
|
||||
request: Request,
|
||||
user: User | None,
|
||||
async_db_session: AsyncSession,
|
||||
) -> User | None:
|
||||
# Check if the user has a session cookie from SAML
|
||||
if AUTH_TYPE == AuthType.SAML:
|
||||
saved_cookie = extract_hashed_cookie(request)
|
||||
|
||||
if saved_cookie:
|
||||
saml_account = await get_saml_account(
|
||||
cookie=saved_cookie, async_db_session=async_db_session
|
||||
)
|
||||
user = saml_account.user if saml_account else None
|
||||
|
||||
# If user is still None, check for JWT in Authorization header
|
||||
if user is None and JWT_PUBLIC_KEY_URL is not None:
|
||||
auth_header = request.headers.get("Authorization")
|
||||
if auth_header and auth_header.startswith("Bearer "):
|
||||
token = auth_header[len("Bearer ") :].strip()
|
||||
user = await verify_jwt_token(token, async_db_session)
|
||||
|
||||
return user
|
||||
|
||||
|
||||
def get_default_admin_user_emails_() -> list[str]:
|
||||
seed_config = get_seed_config()
|
||||
if seed_config and seed_config.admin_user_emails:
|
||||
|
||||
12
backend/ee/onyx/background/celery/apps/background.py
Normal file
12
backend/ee/onyx/background/celery/apps/background.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from onyx.background.celery.apps.background import celery_app
|
||||
|
||||
|
||||
celery_app.autodiscover_tasks(
|
||||
[
|
||||
"ee.onyx.background.celery.tasks.doc_permission_syncing",
|
||||
"ee.onyx.background.celery.tasks.external_group_syncing",
|
||||
"ee.onyx.background.celery.tasks.cleanup",
|
||||
"ee.onyx.background.celery.tasks.tenant_provisioning",
|
||||
"ee.onyx.background.celery.tasks.query_history",
|
||||
]
|
||||
)
|
||||
@@ -1,123 +1,4 @@
|
||||
import csv
|
||||
import io
|
||||
from datetime import datetime
|
||||
|
||||
from celery import shared_task
|
||||
from celery import Task
|
||||
|
||||
from ee.onyx.server.query_history.api import fetch_and_process_chat_session_history
|
||||
from ee.onyx.server.query_history.api import ONYX_ANONYMIZED_EMAIL
|
||||
from ee.onyx.server.query_history.models import QuestionAnswerPairSnapshot
|
||||
from onyx.background.celery.apps.heavy import celery_app
|
||||
from onyx.background.task_utils import construct_query_history_report_name
|
||||
from onyx.configs.app_configs import JOB_TIMEOUT
|
||||
from onyx.configs.app_configs import ONYX_QUERY_HISTORY_TYPE
|
||||
from onyx.configs.constants import FileOrigin
|
||||
from onyx.configs.constants import FileType
|
||||
from onyx.configs.constants import OnyxCeleryTask
|
||||
from onyx.configs.constants import QueryHistoryType
|
||||
from onyx.db.engine.sql_engine import get_session_with_current_tenant
|
||||
from onyx.db.tasks import delete_task_with_id
|
||||
from onyx.db.tasks import mark_task_as_finished_with_id
|
||||
from onyx.db.tasks import mark_task_as_started_with_id
|
||||
from onyx.file_store.file_store import get_default_file_store
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
@shared_task(
|
||||
name=OnyxCeleryTask.EXPORT_QUERY_HISTORY_TASK,
|
||||
ignore_result=True,
|
||||
soft_time_limit=JOB_TIMEOUT,
|
||||
bind=True,
|
||||
trail=False,
|
||||
)
|
||||
def export_query_history_task(
|
||||
self: Task,
|
||||
*,
|
||||
start: datetime,
|
||||
end: datetime,
|
||||
start_time: datetime,
|
||||
# Need to include the tenant_id since the TenantAwareTask needs this
|
||||
tenant_id: str,
|
||||
) -> None:
|
||||
if not self.request.id:
|
||||
raise RuntimeError("No task id defined for this task; cannot identify it")
|
||||
|
||||
task_id = self.request.id
|
||||
stream = io.StringIO()
|
||||
writer = csv.DictWriter(
|
||||
stream,
|
||||
fieldnames=list(QuestionAnswerPairSnapshot.model_fields.keys()),
|
||||
)
|
||||
writer.writeheader()
|
||||
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
try:
|
||||
mark_task_as_started_with_id(
|
||||
db_session=db_session,
|
||||
task_id=task_id,
|
||||
)
|
||||
|
||||
snapshot_generator = fetch_and_process_chat_session_history(
|
||||
db_session=db_session,
|
||||
start=start,
|
||||
end=end,
|
||||
)
|
||||
|
||||
for snapshot in snapshot_generator:
|
||||
if ONYX_QUERY_HISTORY_TYPE == QueryHistoryType.ANONYMIZED:
|
||||
snapshot.user_email = ONYX_ANONYMIZED_EMAIL
|
||||
|
||||
writer.writerows(
|
||||
qa_pair.to_json()
|
||||
for qa_pair in QuestionAnswerPairSnapshot.from_chat_session_snapshot(
|
||||
snapshot
|
||||
)
|
||||
)
|
||||
|
||||
except Exception:
|
||||
logger.exception(f"Failed to export query history with {task_id=}")
|
||||
mark_task_as_finished_with_id(
|
||||
db_session=db_session,
|
||||
task_id=task_id,
|
||||
success=False,
|
||||
)
|
||||
raise
|
||||
|
||||
report_name = construct_query_history_report_name(task_id)
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
try:
|
||||
stream.seek(0)
|
||||
get_default_file_store().save_file(
|
||||
content=stream,
|
||||
display_name=report_name,
|
||||
file_origin=FileOrigin.QUERY_HISTORY_CSV,
|
||||
file_type=FileType.CSV,
|
||||
file_metadata={
|
||||
"start": start.isoformat(),
|
||||
"end": end.isoformat(),
|
||||
"start_time": start_time.isoformat(),
|
||||
},
|
||||
file_id=report_name,
|
||||
)
|
||||
|
||||
delete_task_with_id(
|
||||
db_session=db_session,
|
||||
task_id=task_id,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to save query history export file; {report_name=}"
|
||||
)
|
||||
mark_task_as_finished_with_id(
|
||||
db_session=db_session,
|
||||
task_id=task_id,
|
||||
success=False,
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
celery_app.autodiscover_tasks(
|
||||
@@ -125,5 +6,6 @@ celery_app.autodiscover_tasks(
|
||||
"ee.onyx.background.celery.tasks.doc_permission_syncing",
|
||||
"ee.onyx.background.celery.tasks.external_group_syncing",
|
||||
"ee.onyx.background.celery.tasks.cleanup",
|
||||
"ee.onyx.background.celery.tasks.query_history",
|
||||
]
|
||||
)
|
||||
|
||||
@@ -5,7 +5,6 @@ from celery import Task
|
||||
from celery.exceptions import SoftTimeLimitExceeded
|
||||
from redis.lock import Lock as RedisLock
|
||||
|
||||
from ee.onyx.server.tenants.product_gating import get_gated_tenants
|
||||
from onyx.background.celery.apps.app_base import task_logger
|
||||
from onyx.background.celery.tasks.beat_schedule import BEAT_EXPIRES_DEFAULT
|
||||
from onyx.configs.constants import CELERY_GENERIC_BEAT_LOCK_TIMEOUT
|
||||
@@ -52,10 +51,18 @@ def cloud_beat_task_generator(
|
||||
|
||||
try:
|
||||
tenant_ids = get_all_tenant_ids()
|
||||
gated_tenants = get_gated_tenants()
|
||||
|
||||
# NOTE: for now, we are running tasks for gated tenants, since we want to allow
|
||||
# connector deletion to run successfully. The new plan is to continously prune
|
||||
# the gated tenants set, so we won't have a build up of old, unused gated tenants.
|
||||
# Keeping this around in case we want to revert to the previous behavior.
|
||||
# gated_tenants = get_gated_tenants()
|
||||
|
||||
for tenant_id in tenant_ids:
|
||||
if tenant_id in gated_tenants:
|
||||
continue
|
||||
|
||||
# Same comment here as the above NOTE
|
||||
# if tenant_id in gated_tenants:
|
||||
# continue
|
||||
|
||||
current_time = time.monotonic()
|
||||
if current_time - last_lock_time >= (CELERY_GENERIC_BEAT_LOCK_TIMEOUT / 4):
|
||||
|
||||
@@ -56,6 +56,12 @@ from onyx.db.enums import ConnectorCredentialPairStatus
|
||||
from onyx.db.enums import SyncStatus
|
||||
from onyx.db.enums import SyncType
|
||||
from onyx.db.models import ConnectorCredentialPair
|
||||
from onyx.db.permission_sync_attempt import complete_doc_permission_sync_attempt
|
||||
from onyx.db.permission_sync_attempt import create_doc_permission_sync_attempt
|
||||
from onyx.db.permission_sync_attempt import mark_doc_permission_sync_attempt_failed
|
||||
from onyx.db.permission_sync_attempt import (
|
||||
mark_doc_permission_sync_attempt_in_progress,
|
||||
)
|
||||
from onyx.db.sync_record import insert_sync_record
|
||||
from onyx.db.sync_record import update_sync_record_status
|
||||
from onyx.db.users import batch_add_ext_perm_user_if_not_exists
|
||||
@@ -113,6 +119,14 @@ def _get_fence_validation_block_expiration() -> int:
|
||||
"""Jobs / utils for kicking off doc permissions sync tasks."""
|
||||
|
||||
|
||||
def _fail_doc_permission_sync_attempt(attempt_id: int, error_msg: str) -> None:
|
||||
"""Helper to mark a doc permission sync attempt as failed with an error message."""
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
mark_doc_permission_sync_attempt_failed(
|
||||
attempt_id, db_session, error_message=error_msg
|
||||
)
|
||||
|
||||
|
||||
def _is_external_doc_permissions_sync_due(cc_pair: ConnectorCredentialPair) -> bool:
|
||||
"""Returns boolean indicating if external doc permissions sync is due."""
|
||||
|
||||
@@ -379,6 +393,15 @@ def connector_permission_sync_generator_task(
|
||||
doc_permission_sync_ctx_dict["request_id"] = self.request.id
|
||||
doc_permission_sync_ctx.set(doc_permission_sync_ctx_dict)
|
||||
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
attempt_id = create_doc_permission_sync_attempt(
|
||||
connector_credential_pair_id=cc_pair_id,
|
||||
db_session=db_session,
|
||||
)
|
||||
task_logger.info(
|
||||
f"Created doc permission sync attempt: {attempt_id} for cc_pair={cc_pair_id}"
|
||||
)
|
||||
|
||||
redis_connector = RedisConnector(tenant_id, cc_pair_id)
|
||||
|
||||
r = get_redis_client()
|
||||
@@ -389,22 +412,28 @@ def connector_permission_sync_generator_task(
|
||||
start = time.monotonic()
|
||||
while True:
|
||||
if time.monotonic() - start > CELERY_TASK_WAIT_FOR_FENCE_TIMEOUT:
|
||||
raise ValueError(
|
||||
error_msg = (
|
||||
f"connector_permission_sync_generator_task - timed out waiting for fence to be ready: "
|
||||
f"fence={redis_connector.permissions.fence_key}"
|
||||
)
|
||||
_fail_doc_permission_sync_attempt(attempt_id, error_msg)
|
||||
raise ValueError(error_msg)
|
||||
|
||||
if not redis_connector.permissions.fenced: # The fence must exist
|
||||
raise ValueError(
|
||||
error_msg = (
|
||||
f"connector_permission_sync_generator_task - fence not found: "
|
||||
f"fence={redis_connector.permissions.fence_key}"
|
||||
)
|
||||
_fail_doc_permission_sync_attempt(attempt_id, error_msg)
|
||||
raise ValueError(error_msg)
|
||||
|
||||
payload = redis_connector.permissions.payload # The payload must exist
|
||||
if not payload:
|
||||
raise ValueError(
|
||||
error_msg = (
|
||||
"connector_permission_sync_generator_task: payload invalid or not found"
|
||||
)
|
||||
_fail_doc_permission_sync_attempt(attempt_id, error_msg)
|
||||
raise ValueError(error_msg)
|
||||
|
||||
if payload.celery_task_id is None:
|
||||
logger.info(
|
||||
@@ -432,9 +461,11 @@ def connector_permission_sync_generator_task(
|
||||
|
||||
acquired = lock.acquire(blocking=False)
|
||||
if not acquired:
|
||||
task_logger.warning(
|
||||
error_msg = (
|
||||
f"Permission sync task already running, exiting...: cc_pair={cc_pair_id}"
|
||||
)
|
||||
task_logger.warning(error_msg)
|
||||
_fail_doc_permission_sync_attempt(attempt_id, error_msg)
|
||||
return None
|
||||
|
||||
try:
|
||||
@@ -470,11 +501,15 @@ def connector_permission_sync_generator_task(
|
||||
source_type = cc_pair.connector.source
|
||||
sync_config = get_source_perm_sync_config(source_type)
|
||||
if sync_config is None:
|
||||
logger.error(f"No sync config found for {source_type}")
|
||||
error_msg = f"No sync config found for {source_type}"
|
||||
logger.error(error_msg)
|
||||
_fail_doc_permission_sync_attempt(attempt_id, error_msg)
|
||||
return None
|
||||
|
||||
if sync_config.doc_sync_config is None:
|
||||
if sync_config.censoring_config:
|
||||
error_msg = f"Doc sync config is None but censoring config exists for {source_type}"
|
||||
_fail_doc_permission_sync_attempt(attempt_id, error_msg)
|
||||
return None
|
||||
|
||||
raise ValueError(
|
||||
@@ -483,6 +518,8 @@ def connector_permission_sync_generator_task(
|
||||
|
||||
logger.info(f"Syncing docs for {source_type} with cc_pair={cc_pair_id}")
|
||||
|
||||
mark_doc_permission_sync_attempt_in_progress(attempt_id, db_session)
|
||||
|
||||
payload = redis_connector.permissions.payload
|
||||
if not payload:
|
||||
raise ValueError(f"No fence payload found: cc_pair={cc_pair_id}")
|
||||
@@ -533,8 +570,9 @@ def connector_permission_sync_generator_task(
|
||||
)
|
||||
|
||||
tasks_generated = 0
|
||||
docs_with_errors = 0
|
||||
for doc_external_access in document_external_accesses:
|
||||
redis_connector.permissions.update_db(
|
||||
result = redis_connector.permissions.update_db(
|
||||
lock=lock,
|
||||
new_permissions=[doc_external_access],
|
||||
source_string=source_type,
|
||||
@@ -542,11 +580,23 @@ def connector_permission_sync_generator_task(
|
||||
credential_id=cc_pair.credential.id,
|
||||
task_logger=task_logger,
|
||||
)
|
||||
tasks_generated += 1
|
||||
tasks_generated += result.num_updated
|
||||
docs_with_errors += result.num_errors
|
||||
|
||||
task_logger.info(
|
||||
f"RedisConnector.permissions.generate_tasks finished. "
|
||||
f"cc_pair={cc_pair_id} tasks_generated={tasks_generated}"
|
||||
f"cc_pair={cc_pair_id} tasks_generated={tasks_generated} docs_with_errors={docs_with_errors}"
|
||||
)
|
||||
|
||||
complete_doc_permission_sync_attempt(
|
||||
db_session=db_session,
|
||||
attempt_id=attempt_id,
|
||||
total_docs_synced=tasks_generated,
|
||||
docs_with_permission_errors=docs_with_errors,
|
||||
)
|
||||
task_logger.info(
|
||||
f"Completed doc permission sync attempt {attempt_id}: "
|
||||
f"{tasks_generated} docs, {docs_with_errors} errors"
|
||||
)
|
||||
|
||||
redis_connector.permissions.generator_complete = tasks_generated
|
||||
@@ -561,6 +611,11 @@ def connector_permission_sync_generator_task(
|
||||
f"Permission sync exceptioned: cc_pair={cc_pair_id} payload_id={payload_id}"
|
||||
)
|
||||
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
mark_doc_permission_sync_attempt_failed(
|
||||
attempt_id, db_session, error_message=error_msg
|
||||
)
|
||||
|
||||
redis_connector.permissions.generator_clear()
|
||||
redis_connector.permissions.taskset_clear()
|
||||
redis_connector.permissions.set_fence(None)
|
||||
|
||||
@@ -49,6 +49,16 @@ from onyx.db.enums import ConnectorCredentialPairStatus
|
||||
from onyx.db.enums import SyncStatus
|
||||
from onyx.db.enums import SyncType
|
||||
from onyx.db.models import ConnectorCredentialPair
|
||||
from onyx.db.permission_sync_attempt import complete_external_group_sync_attempt
|
||||
from onyx.db.permission_sync_attempt import (
|
||||
create_external_group_sync_attempt,
|
||||
)
|
||||
from onyx.db.permission_sync_attempt import (
|
||||
mark_external_group_sync_attempt_failed,
|
||||
)
|
||||
from onyx.db.permission_sync_attempt import (
|
||||
mark_external_group_sync_attempt_in_progress,
|
||||
)
|
||||
from onyx.db.sync_record import insert_sync_record
|
||||
from onyx.db.sync_record import update_sync_record_status
|
||||
from onyx.redis.redis_connector import RedisConnector
|
||||
@@ -70,6 +80,14 @@ logger = setup_logger()
|
||||
_EXTERNAL_GROUP_BATCH_SIZE = 100
|
||||
|
||||
|
||||
def _fail_external_group_sync_attempt(attempt_id: int, error_msg: str) -> None:
|
||||
"""Helper to mark an external group sync attempt as failed with an error message."""
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
mark_external_group_sync_attempt_failed(
|
||||
attempt_id, db_session, error_message=error_msg
|
||||
)
|
||||
|
||||
|
||||
def _get_fence_validation_block_expiration() -> int:
|
||||
"""
|
||||
Compute the expiration time for the fence validation block signal.
|
||||
@@ -93,7 +111,7 @@ def _is_external_group_sync_due(cc_pair: ConnectorCredentialPair) -> bool:
|
||||
|
||||
if cc_pair.access_type != AccessType.SYNC:
|
||||
task_logger.error(
|
||||
f"Recieved non-sync CC Pair {cc_pair.id} for external "
|
||||
f"Received non-sync CC Pair {cc_pair.id} for external "
|
||||
f"group sync. Actual access type: {cc_pair.access_type}"
|
||||
)
|
||||
return False
|
||||
@@ -449,6 +467,16 @@ def _perform_external_group_sync(
|
||||
cc_pair_id: int,
|
||||
tenant_id: str,
|
||||
) -> None:
|
||||
# Create attempt record at the start
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
attempt_id = create_external_group_sync_attempt(
|
||||
connector_credential_pair_id=cc_pair_id,
|
||||
db_session=db_session,
|
||||
)
|
||||
logger.info(
|
||||
f"Created external group sync attempt: {attempt_id} for cc_pair={cc_pair_id}"
|
||||
)
|
||||
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
cc_pair = get_connector_credential_pair_from_id(
|
||||
db_session=db_session,
|
||||
@@ -463,11 +491,13 @@ def _perform_external_group_sync(
|
||||
if sync_config is None:
|
||||
msg = f"No sync config found for {source_type} for cc_pair: {cc_pair_id}"
|
||||
emit_background_error(msg, cc_pair_id=cc_pair_id)
|
||||
_fail_external_group_sync_attempt(attempt_id, msg)
|
||||
raise ValueError(msg)
|
||||
|
||||
if sync_config.group_sync_config is None:
|
||||
msg = f"No group sync config found for {source_type} for cc_pair: {cc_pair_id}"
|
||||
emit_background_error(msg, cc_pair_id=cc_pair_id)
|
||||
_fail_external_group_sync_attempt(attempt_id, msg)
|
||||
raise ValueError(msg)
|
||||
|
||||
ext_group_sync_func = sync_config.group_sync_config.group_sync_func
|
||||
@@ -477,14 +507,27 @@ def _perform_external_group_sync(
|
||||
)
|
||||
mark_old_external_groups_as_stale(db_session, cc_pair_id)
|
||||
|
||||
# Mark attempt as in progress
|
||||
mark_external_group_sync_attempt_in_progress(attempt_id, db_session)
|
||||
logger.info(f"Marked external group sync attempt {attempt_id} as in progress")
|
||||
|
||||
logger.info(
|
||||
f"Syncing external groups for {source_type} for cc_pair: {cc_pair_id}"
|
||||
)
|
||||
external_user_group_batch: list[ExternalUserGroup] = []
|
||||
seen_users: set[str] = set() # Track unique users across all groups
|
||||
total_groups_processed = 0
|
||||
total_group_memberships_synced = 0
|
||||
try:
|
||||
external_user_group_generator = ext_group_sync_func(tenant_id, cc_pair)
|
||||
for external_user_group in external_user_group_generator:
|
||||
external_user_group_batch.append(external_user_group)
|
||||
|
||||
# Track progress
|
||||
total_groups_processed += 1
|
||||
total_group_memberships_synced += len(external_user_group.user_emails)
|
||||
seen_users = seen_users.union(external_user_group.user_emails)
|
||||
|
||||
if len(external_user_group_batch) >= _EXTERNAL_GROUP_BATCH_SIZE:
|
||||
logger.debug(
|
||||
f"New external user groups: {external_user_group_batch}"
|
||||
@@ -506,6 +549,13 @@ def _perform_external_group_sync(
|
||||
source=cc_pair.connector.source,
|
||||
)
|
||||
except Exception as e:
|
||||
format_error_for_logging(e)
|
||||
|
||||
# Mark as failed (this also updates progress to show partial progress)
|
||||
mark_external_group_sync_attempt_failed(
|
||||
attempt_id, db_session, error_message=str(e)
|
||||
)
|
||||
|
||||
# TODO: add some notification to the admins here
|
||||
logger.exception(
|
||||
f"Error syncing external groups for {source_type} for cc_pair: {cc_pair_id} {e}"
|
||||
@@ -517,6 +567,24 @@ def _perform_external_group_sync(
|
||||
)
|
||||
remove_stale_external_groups(db_session, cc_pair_id)
|
||||
|
||||
# Calculate total unique users processed
|
||||
total_users_processed = len(seen_users)
|
||||
|
||||
# Complete the sync attempt with final progress
|
||||
complete_external_group_sync_attempt(
|
||||
db_session=db_session,
|
||||
attempt_id=attempt_id,
|
||||
total_users_processed=total_users_processed,
|
||||
total_groups_processed=total_groups_processed,
|
||||
total_group_memberships_synced=total_group_memberships_synced,
|
||||
errors_encountered=0,
|
||||
)
|
||||
logger.info(
|
||||
f"Completed external group sync attempt {attempt_id}: "
|
||||
f"{total_groups_processed} groups, {total_users_processed} users, "
|
||||
f"{total_group_memberships_synced} memberships"
|
||||
)
|
||||
|
||||
mark_all_relevant_cc_pairs_as_external_group_synced(db_session, cc_pair)
|
||||
|
||||
|
||||
|
||||
119
backend/ee/onyx/background/celery/tasks/query_history/tasks.py
Normal file
119
backend/ee/onyx/background/celery/tasks/query_history/tasks.py
Normal file
@@ -0,0 +1,119 @@
|
||||
import csv
|
||||
import io
|
||||
from datetime import datetime
|
||||
|
||||
from celery import shared_task
|
||||
from celery import Task
|
||||
|
||||
from ee.onyx.server.query_history.api import fetch_and_process_chat_session_history
|
||||
from ee.onyx.server.query_history.api import ONYX_ANONYMIZED_EMAIL
|
||||
from ee.onyx.server.query_history.models import QuestionAnswerPairSnapshot
|
||||
from onyx.background.task_utils import construct_query_history_report_name
|
||||
from onyx.configs.app_configs import JOB_TIMEOUT
|
||||
from onyx.configs.app_configs import ONYX_QUERY_HISTORY_TYPE
|
||||
from onyx.configs.constants import FileOrigin
|
||||
from onyx.configs.constants import FileType
|
||||
from onyx.configs.constants import OnyxCeleryTask
|
||||
from onyx.configs.constants import QueryHistoryType
|
||||
from onyx.db.engine.sql_engine import get_session_with_current_tenant
|
||||
from onyx.db.tasks import delete_task_with_id
|
||||
from onyx.db.tasks import mark_task_as_finished_with_id
|
||||
from onyx.db.tasks import mark_task_as_started_with_id
|
||||
from onyx.file_store.file_store import get_default_file_store
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
@shared_task(
|
||||
name=OnyxCeleryTask.EXPORT_QUERY_HISTORY_TASK,
|
||||
ignore_result=True,
|
||||
soft_time_limit=JOB_TIMEOUT,
|
||||
bind=True,
|
||||
trail=False,
|
||||
)
|
||||
def export_query_history_task(
|
||||
self: Task,
|
||||
*,
|
||||
start: datetime,
|
||||
end: datetime,
|
||||
start_time: datetime,
|
||||
# Need to include the tenant_id since the TenantAwareTask needs this
|
||||
tenant_id: str,
|
||||
) -> None:
|
||||
if not self.request.id:
|
||||
raise RuntimeError("No task id defined for this task; cannot identify it")
|
||||
|
||||
task_id = self.request.id
|
||||
stream = io.StringIO()
|
||||
writer = csv.DictWriter(
|
||||
stream,
|
||||
fieldnames=list(QuestionAnswerPairSnapshot.model_fields.keys()),
|
||||
)
|
||||
writer.writeheader()
|
||||
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
try:
|
||||
mark_task_as_started_with_id(
|
||||
db_session=db_session,
|
||||
task_id=task_id,
|
||||
)
|
||||
|
||||
snapshot_generator = fetch_and_process_chat_session_history(
|
||||
db_session=db_session,
|
||||
start=start,
|
||||
end=end,
|
||||
)
|
||||
|
||||
for snapshot in snapshot_generator:
|
||||
if ONYX_QUERY_HISTORY_TYPE == QueryHistoryType.ANONYMIZED:
|
||||
snapshot.user_email = ONYX_ANONYMIZED_EMAIL
|
||||
|
||||
writer.writerows(
|
||||
qa_pair.to_json()
|
||||
for qa_pair in QuestionAnswerPairSnapshot.from_chat_session_snapshot(
|
||||
snapshot
|
||||
)
|
||||
)
|
||||
|
||||
except Exception:
|
||||
logger.exception(f"Failed to export query history with {task_id=}")
|
||||
mark_task_as_finished_with_id(
|
||||
db_session=db_session,
|
||||
task_id=task_id,
|
||||
success=False,
|
||||
)
|
||||
raise
|
||||
|
||||
report_name = construct_query_history_report_name(task_id)
|
||||
with get_session_with_current_tenant() as db_session:
|
||||
try:
|
||||
stream.seek(0)
|
||||
get_default_file_store().save_file(
|
||||
content=stream,
|
||||
display_name=report_name,
|
||||
file_origin=FileOrigin.QUERY_HISTORY_CSV,
|
||||
file_type=FileType.CSV,
|
||||
file_metadata={
|
||||
"start": start.isoformat(),
|
||||
"end": end.isoformat(),
|
||||
"start_time": start_time.isoformat(),
|
||||
},
|
||||
file_id=report_name,
|
||||
)
|
||||
|
||||
delete_task_with_id(
|
||||
db_session=db_session,
|
||||
task_id=task_id,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to save query history export file; {report_name=}"
|
||||
)
|
||||
mark_task_as_finished_with_id(
|
||||
db_session=db_session,
|
||||
task_id=task_id,
|
||||
success=False,
|
||||
)
|
||||
raise
|
||||
@@ -1,26 +1,6 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
# Applicable for OIDC Auth
|
||||
OPENID_CONFIG_URL = os.environ.get("OPENID_CONFIG_URL", "")
|
||||
|
||||
# Applicable for OIDC Auth, allows you to override the scopes that
|
||||
# are requested from the OIDC provider. Currently used when passing
|
||||
# over access tokens to tool calls and the tool needs more scopes
|
||||
OIDC_SCOPE_OVERRIDE: list[str] | None = None
|
||||
_OIDC_SCOPE_OVERRIDE = os.environ.get("OIDC_SCOPE_OVERRIDE")
|
||||
|
||||
if _OIDC_SCOPE_OVERRIDE:
|
||||
try:
|
||||
OIDC_SCOPE_OVERRIDE = [
|
||||
scope.strip() for scope in _OIDC_SCOPE_OVERRIDE.split(",")
|
||||
]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Applicable for SAML Auth
|
||||
SAML_CONF_DIR = os.environ.get("SAML_CONF_DIR") or "/app/ee/onyx/configs/saml_config"
|
||||
|
||||
|
||||
#####
|
||||
# Auto Permission Sync
|
||||
|
||||
@@ -73,6 +73,12 @@ def fetch_per_user_query_analytics(
|
||||
ChatSession.user_id,
|
||||
)
|
||||
.join(ChatSession, ChatSession.id == ChatMessage.chat_session_id)
|
||||
# Include chats that have no explicit feedback instead of dropping them
|
||||
.join(
|
||||
ChatMessageFeedback,
|
||||
ChatMessageFeedback.chat_message_id == ChatMessage.id,
|
||||
isouter=True,
|
||||
)
|
||||
.where(
|
||||
ChatMessage.time_sent >= start,
|
||||
)
|
||||
|
||||
@@ -50,6 +50,25 @@ def get_empty_chat_messages_entries__paginated(
|
||||
if message.message_type != MessageType.USER:
|
||||
continue
|
||||
|
||||
# Get user email
|
||||
user_email = chat_session.user.email if chat_session.user else None
|
||||
|
||||
# Get assistant name (from session persona, or alternate if specified)
|
||||
assistant_name = None
|
||||
if message.alternate_assistant_id:
|
||||
# If there's an alternate assistant, we need to fetch it
|
||||
from onyx.db.models import Persona
|
||||
|
||||
alternate_persona = (
|
||||
db_session.query(Persona)
|
||||
.filter(Persona.id == message.alternate_assistant_id)
|
||||
.first()
|
||||
)
|
||||
if alternate_persona:
|
||||
assistant_name = alternate_persona.name
|
||||
elif chat_session.persona:
|
||||
assistant_name = chat_session.persona.name
|
||||
|
||||
message_skeletons.append(
|
||||
ChatMessageSkeleton(
|
||||
message_id=message.id,
|
||||
@@ -57,6 +76,9 @@ def get_empty_chat_messages_entries__paginated(
|
||||
user_id=str(chat_session.user_id) if chat_session.user_id else None,
|
||||
flow_type=flow_type,
|
||||
time_sent=message.time_sent,
|
||||
assistant_name=assistant_name,
|
||||
user_email=user_email,
|
||||
number_of_tokens=message.token_count,
|
||||
)
|
||||
)
|
||||
if len(chat_sessions) == 0:
|
||||
|
||||
@@ -124,9 +124,9 @@ def get_space_permission(
|
||||
and not space_permissions.external_user_group_ids
|
||||
):
|
||||
logger.warning(
|
||||
f"No permissions found for space '{space_key}'. This is very unlikely"
|
||||
"to be correct and is more likely caused by an access token with"
|
||||
"insufficient permissions. Make sure that the access token has Admin"
|
||||
f"No permissions found for space '{space_key}'. This is very unlikely "
|
||||
"to be correct and is more likely caused by an access token with "
|
||||
"insufficient permissions. Make sure that the access token has Admin "
|
||||
f"permissions for space '{space_key}'"
|
||||
)
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ def _get_slim_doc_generator(
|
||||
else 0.0
|
||||
)
|
||||
|
||||
return gmail_connector.retrieve_all_slim_documents(
|
||||
return gmail_connector.retrieve_all_slim_docs_perm_sync(
|
||||
start=start_time,
|
||||
end=current_time.timestamp(),
|
||||
callback=callback,
|
||||
|
||||
@@ -34,7 +34,7 @@ def _get_slim_doc_generator(
|
||||
else 0.0
|
||||
)
|
||||
|
||||
return google_drive_connector.retrieve_all_slim_documents(
|
||||
return google_drive_connector.retrieve_all_slim_docs_perm_sync(
|
||||
start=start_time,
|
||||
end=current_time.timestamp(),
|
||||
callback=callback,
|
||||
|
||||
@@ -59,7 +59,7 @@ def _build_holder_map(permissions: list[dict]) -> dict[str, list[Holder]]:
|
||||
|
||||
for raw_perm in permissions:
|
||||
if not hasattr(raw_perm, "raw"):
|
||||
logger.warn(f"Expected a 'raw' field, but none was found: {raw_perm=}")
|
||||
logger.warning(f"Expected a 'raw' field, but none was found: {raw_perm=}")
|
||||
continue
|
||||
|
||||
permission = Permission(**raw_perm.raw)
|
||||
@@ -71,14 +71,14 @@ def _build_holder_map(permissions: list[dict]) -> dict[str, list[Holder]]:
|
||||
# In order to associate this permission to some Atlassian entity, we need the "Holder".
|
||||
# If this doesn't exist, then we cannot associate this permission to anyone; just skip.
|
||||
if not permission.holder:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
f"Expected to find a permission holder, but none was found: {permission=}"
|
||||
)
|
||||
continue
|
||||
|
||||
type = permission.holder.get("type")
|
||||
if not type:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
f"Expected to find the type of permission holder, but none was found: {permission=}"
|
||||
)
|
||||
continue
|
||||
|
||||
@@ -105,7 +105,9 @@ def _get_slack_document_access(
|
||||
channel_permissions: dict[str, ExternalAccess],
|
||||
callback: IndexingHeartbeatInterface | None,
|
||||
) -> Generator[DocExternalAccess, None, None]:
|
||||
slim_doc_generator = slack_connector.retrieve_all_slim_documents(callback=callback)
|
||||
slim_doc_generator = slack_connector.retrieve_all_slim_docs_perm_sync(
|
||||
callback=callback
|
||||
)
|
||||
|
||||
for doc_metadata_batch in slim_doc_generator:
|
||||
for doc_metadata in doc_metadata_batch:
|
||||
|
||||
@@ -4,7 +4,7 @@ from ee.onyx.external_permissions.perm_sync_types import FetchAllDocumentsIdsFun
|
||||
from onyx.access.models import DocExternalAccess
|
||||
from onyx.access.models import ExternalAccess
|
||||
from onyx.configs.constants import DocumentSource
|
||||
from onyx.connectors.interfaces import SlimConnector
|
||||
from onyx.connectors.interfaces import SlimConnectorWithPermSync
|
||||
from onyx.db.models import ConnectorCredentialPair
|
||||
from onyx.indexing.indexing_heartbeat import IndexingHeartbeatInterface
|
||||
from onyx.utils.logger import setup_logger
|
||||
@@ -17,7 +17,7 @@ def generic_doc_sync(
|
||||
fetch_all_existing_docs_ids_fn: FetchAllDocumentsIdsFunction,
|
||||
callback: IndexingHeartbeatInterface | None,
|
||||
doc_source: DocumentSource,
|
||||
slim_connector: SlimConnector,
|
||||
slim_connector: SlimConnectorWithPermSync,
|
||||
label: str,
|
||||
) -> Generator[DocExternalAccess, None, None]:
|
||||
"""
|
||||
@@ -40,7 +40,7 @@ def generic_doc_sync(
|
||||
newly_fetched_doc_ids: set[str] = set()
|
||||
|
||||
logger.info(f"Fetching all slim documents from {doc_source}")
|
||||
for doc_batch in slim_connector.retrieve_all_slim_documents(callback=callback):
|
||||
for doc_batch in slim_connector.retrieve_all_slim_docs_perm_sync(callback=callback):
|
||||
logger.info(f"Got {len(doc_batch)} slim documents from {doc_source}")
|
||||
|
||||
if callback:
|
||||
|
||||
15
backend/ee/onyx/feature_flags/factory.py
Normal file
15
backend/ee/onyx/feature_flags/factory.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from ee.onyx.feature_flags.posthog_provider import PostHogFeatureFlagProvider
|
||||
from onyx.feature_flags.interface import FeatureFlagProvider
|
||||
|
||||
|
||||
def get_posthog_feature_flag_provider() -> FeatureFlagProvider:
|
||||
"""
|
||||
Get the PostHog feature flag provider instance.
|
||||
|
||||
This is the EE implementation that gets loaded by the versioned
|
||||
implementation loader.
|
||||
|
||||
Returns:
|
||||
PostHogFeatureFlagProvider: The PostHog-based feature flag provider
|
||||
"""
|
||||
return PostHogFeatureFlagProvider()
|
||||
54
backend/ee/onyx/feature_flags/posthog_provider.py
Normal file
54
backend/ee/onyx/feature_flags/posthog_provider.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from typing import Any
|
||||
from uuid import UUID
|
||||
|
||||
from ee.onyx.utils.posthog_client import posthog
|
||||
from onyx.feature_flags.interface import FeatureFlagProvider
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
class PostHogFeatureFlagProvider(FeatureFlagProvider):
|
||||
"""
|
||||
PostHog-based feature flag provider.
|
||||
|
||||
Uses PostHog's feature flag API to determine if features are enabled
|
||||
for specific users. Only active in multi-tenant mode.
|
||||
"""
|
||||
|
||||
def feature_enabled(
|
||||
self,
|
||||
flag_key: str,
|
||||
user_id: UUID,
|
||||
user_properties: dict[str, Any] | None = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if a feature flag is enabled for a user via PostHog.
|
||||
|
||||
Args:
|
||||
flag_key: The identifier for the feature flag to check
|
||||
user_id: The unique identifier for the user
|
||||
user_properties: Optional dictionary of user properties/attributes
|
||||
that may influence flag evaluation
|
||||
|
||||
Returns:
|
||||
True if the feature is enabled for the user, False otherwise.
|
||||
"""
|
||||
try:
|
||||
posthog.set(
|
||||
distinct_id=user_id,
|
||||
properties=user_properties,
|
||||
)
|
||||
is_enabled = posthog.feature_enabled(
|
||||
flag_key,
|
||||
str(user_id),
|
||||
person_properties=user_properties,
|
||||
)
|
||||
|
||||
return bool(is_enabled) if is_enabled is not None else False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error checking feature flag {flag_key} for user {user_id}: {e}"
|
||||
)
|
||||
return False
|
||||
@@ -3,11 +3,7 @@ from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import FastAPI
|
||||
from httpx_oauth.clients.google import GoogleOAuth2
|
||||
from httpx_oauth.clients.openid import BASE_SCOPES
|
||||
from httpx_oauth.clients.openid import OpenID
|
||||
|
||||
from ee.onyx.configs.app_configs import OIDC_SCOPE_OVERRIDE
|
||||
from ee.onyx.configs.app_configs import OPENID_CONFIG_URL
|
||||
from ee.onyx.server.analytics.api import router as analytics_router
|
||||
from ee.onyx.server.auth_check import check_ee_router_auth
|
||||
from ee.onyx.server.documents.cc_pair import router as ee_document_cc_pair_router
|
||||
@@ -31,7 +27,6 @@ from ee.onyx.server.query_and_chat.query_backend import (
|
||||
)
|
||||
from ee.onyx.server.query_history.api import router as query_history_router
|
||||
from ee.onyx.server.reporting.usage_export_api import router as usage_export_router
|
||||
from ee.onyx.server.saml import router as saml_router
|
||||
from ee.onyx.server.seeding import seed_db
|
||||
from ee.onyx.server.tenants.api import router as tenants_router
|
||||
from ee.onyx.server.token_rate_limits.api import (
|
||||
@@ -117,49 +112,6 @@ def get_application() -> FastAPI:
|
||||
prefix="/auth",
|
||||
)
|
||||
|
||||
if AUTH_TYPE == AuthType.OIDC:
|
||||
# Ensure we request offline_access for refresh tokens
|
||||
try:
|
||||
oidc_scopes = list(OIDC_SCOPE_OVERRIDE or BASE_SCOPES)
|
||||
if "offline_access" not in oidc_scopes:
|
||||
oidc_scopes.append("offline_access")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error configuring OIDC scopes: {e}")
|
||||
# Fall back to default scopes if there's an error
|
||||
oidc_scopes = BASE_SCOPES
|
||||
|
||||
include_auth_router_with_prefix(
|
||||
application,
|
||||
create_onyx_oauth_router(
|
||||
OpenID(
|
||||
OAUTH_CLIENT_ID,
|
||||
OAUTH_CLIENT_SECRET,
|
||||
OPENID_CONFIG_URL,
|
||||
# Use the configured scopes
|
||||
base_scopes=oidc_scopes,
|
||||
),
|
||||
auth_backend,
|
||||
USER_AUTH_SECRET,
|
||||
associate_by_email=True,
|
||||
is_verified_by_default=True,
|
||||
redirect_url=f"{WEB_DOMAIN}/auth/oidc/callback",
|
||||
),
|
||||
prefix="/auth/oidc",
|
||||
)
|
||||
|
||||
# need basic auth router for `logout` endpoint
|
||||
include_auth_router_with_prefix(
|
||||
application,
|
||||
fastapi_users.get_auth_router(auth_backend),
|
||||
prefix="/auth",
|
||||
)
|
||||
|
||||
elif AUTH_TYPE == AuthType.SAML:
|
||||
include_auth_router_with_prefix(
|
||||
application,
|
||||
saml_router,
|
||||
)
|
||||
|
||||
# RBAC / group access control
|
||||
include_router_with_global_prefix_prepended(application, user_group_router)
|
||||
# Analytics endpoints
|
||||
|
||||
@@ -8,7 +8,7 @@ from sqlalchemy.orm import Session
|
||||
from ee.onyx.db.standard_answer import fetch_standard_answer_categories_by_names
|
||||
from ee.onyx.db.standard_answer import find_matching_standard_answers
|
||||
from onyx.configs.constants import MessageType
|
||||
from onyx.configs.onyxbot_configs import DANSWER_REACT_EMOJI
|
||||
from onyx.configs.onyxbot_configs import ONYX_BOT_REACT_EMOJI
|
||||
from onyx.db.chat import create_chat_session
|
||||
from onyx.db.chat import create_new_chat_message
|
||||
from onyx.db.chat import get_chat_messages_by_sessions
|
||||
@@ -193,7 +193,7 @@ def _handle_standard_answers(
|
||||
db_session.commit()
|
||||
|
||||
update_emote_react(
|
||||
emoji=DANSWER_REACT_EMOJI,
|
||||
emoji=ONYX_BOT_REACT_EMOJI,
|
||||
channel=message_info.channel_to_respond,
|
||||
message_ts=message_info.msg_to_respond,
|
||||
remove=True,
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
from typing import cast
|
||||
from typing import List
|
||||
|
||||
from cohere import Client
|
||||
|
||||
from ee.onyx.configs.app_configs import COHERE_DEFAULT_API_KEY
|
||||
|
||||
Embedding = List[float]
|
||||
|
||||
|
||||
def load_processed_docs(cohere_enabled: bool) -> list[dict]:
|
||||
base_path = os.path.join(os.getcwd(), "onyx", "seeding")
|
||||
|
||||
if cohere_enabled and COHERE_DEFAULT_API_KEY:
|
||||
initial_docs_path = os.path.join(base_path, "initial_docs_cohere.json")
|
||||
processed_docs = json.load(open(initial_docs_path))
|
||||
|
||||
cohere_client = Client(api_key=COHERE_DEFAULT_API_KEY)
|
||||
embed_model = "embed-english-v3.0"
|
||||
|
||||
for doc in processed_docs:
|
||||
title_embed_response = cohere_client.embed(
|
||||
texts=[doc["title"]],
|
||||
model=embed_model,
|
||||
input_type="search_document",
|
||||
)
|
||||
content_embed_response = cohere_client.embed(
|
||||
texts=[doc["content"]],
|
||||
model=embed_model,
|
||||
input_type="search_document",
|
||||
)
|
||||
|
||||
doc["title_embedding"] = cast(
|
||||
List[Embedding], title_embed_response.embeddings
|
||||
)[0]
|
||||
doc["content_embedding"] = cast(
|
||||
List[Embedding], content_embed_response.embeddings
|
||||
)[0]
|
||||
else:
|
||||
initial_docs_path = os.path.join(base_path, "initial_docs.json")
|
||||
processed_docs = json.load(open(initial_docs_path))
|
||||
|
||||
return processed_docs
|
||||
@@ -10,13 +10,6 @@ EE_PUBLIC_ENDPOINT_SPECS = PUBLIC_ENDPOINT_SPECS + [
|
||||
("/enterprise-settings/logo", {"GET"}),
|
||||
("/enterprise-settings/logotype", {"GET"}),
|
||||
("/enterprise-settings/custom-analytics-script", {"GET"}),
|
||||
# oidc
|
||||
("/auth/oidc/authorize", {"GET"}),
|
||||
("/auth/oidc/callback", {"GET"}),
|
||||
# saml
|
||||
("/auth/saml/authorize", {"GET"}),
|
||||
("/auth/saml/callback", {"POST"}),
|
||||
("/auth/saml/logout", {"POST"}),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -182,7 +182,6 @@ def admin_get_chat_sessions(
|
||||
time_created=chat.time_created.isoformat(),
|
||||
time_updated=chat.time_updated.isoformat(),
|
||||
shared_status=chat.shared_status,
|
||||
folder_id=chat.folder_id,
|
||||
current_alternate_model=chat.current_alternate_model,
|
||||
)
|
||||
for chat in chat_sessions
|
||||
|
||||
@@ -48,7 +48,17 @@ def generate_chat_messages_report(
|
||||
max_size=MAX_IN_MEMORY_SIZE, mode="w+"
|
||||
) as temp_file:
|
||||
csvwriter = csv.writer(temp_file, delimiter=",")
|
||||
csvwriter.writerow(["session_id", "user_id", "flow_type", "time_sent"])
|
||||
csvwriter.writerow(
|
||||
[
|
||||
"session_id",
|
||||
"user_id",
|
||||
"flow_type",
|
||||
"time_sent",
|
||||
"assistant_name",
|
||||
"user_email",
|
||||
"number_of_tokens",
|
||||
]
|
||||
)
|
||||
for chat_message_skeleton_batch in get_all_empty_chat_message_entries(
|
||||
db_session, period
|
||||
):
|
||||
@@ -59,6 +69,9 @@ def generate_chat_messages_report(
|
||||
chat_message_skeleton.user_id,
|
||||
chat_message_skeleton.flow_type,
|
||||
chat_message_skeleton.time_sent.isoformat(),
|
||||
chat_message_skeleton.assistant_name,
|
||||
chat_message_skeleton.user_email,
|
||||
chat_message_skeleton.number_of_tokens,
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@@ -16,6 +16,9 @@ class ChatMessageSkeleton(BaseModel):
|
||||
user_id: str | None
|
||||
flow_type: FlowType
|
||||
time_sent: datetime
|
||||
assistant_name: str | None
|
||||
user_email: str | None
|
||||
number_of_tokens: int
|
||||
|
||||
|
||||
class UserSkeleton(BaseModel):
|
||||
|
||||
@@ -37,9 +37,9 @@ from onyx.db.models import AvailableTenant
|
||||
from onyx.db.models import IndexModelStatus
|
||||
from onyx.db.models import SearchSettings
|
||||
from onyx.db.models import UserTenantMapping
|
||||
from onyx.llm.llm_provider_options import ANTHROPIC_MODEL_NAMES
|
||||
from onyx.llm.llm_provider_options import ANTHROPIC_PROVIDER_NAME
|
||||
from onyx.llm.llm_provider_options import ANTHROPIC_VISIBLE_MODEL_NAMES
|
||||
from onyx.llm.llm_provider_options import get_anthropic_model_names
|
||||
from onyx.llm.llm_provider_options import OPEN_AI_MODEL_NAMES
|
||||
from onyx.llm.llm_provider_options import OPEN_AI_VISIBLE_MODEL_NAMES
|
||||
from onyx.llm.llm_provider_options import OPENAI_PROVIDER_NAME
|
||||
@@ -278,7 +278,7 @@ def configure_default_api_keys(db_session: Session) -> None:
|
||||
is_visible=name in ANTHROPIC_VISIBLE_MODEL_NAMES,
|
||||
max_input_tokens=None,
|
||||
)
|
||||
for name in ANTHROPIC_MODEL_NAMES
|
||||
for name in get_anthropic_model_names()
|
||||
],
|
||||
api_key_changed=True,
|
||||
)
|
||||
|
||||
22
backend/ee/onyx/utils/posthog_client.py
Normal file
22
backend/ee/onyx/utils/posthog_client.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from typing import Any
|
||||
|
||||
from posthog import Posthog
|
||||
|
||||
from ee.onyx.configs.app_configs import POSTHOG_API_KEY
|
||||
from ee.onyx.configs.app_configs import POSTHOG_HOST
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def posthog_on_error(error: Any, items: Any) -> None:
|
||||
"""Log any PostHog delivery errors."""
|
||||
logger.error(f"PostHog error: {error}, items: {items}")
|
||||
|
||||
|
||||
posthog = Posthog(
|
||||
project_api_key=POSTHOG_API_KEY,
|
||||
host=POSTHOG_HOST,
|
||||
debug=True,
|
||||
on_error=posthog_on_error,
|
||||
)
|
||||
@@ -1,27 +1,9 @@
|
||||
from typing import Any
|
||||
|
||||
from posthog import Posthog
|
||||
|
||||
from ee.onyx.configs.app_configs import POSTHOG_API_KEY
|
||||
from ee.onyx.configs.app_configs import POSTHOG_HOST
|
||||
from ee.onyx.utils.posthog_client import posthog
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
|
||||
def posthog_on_error(error: Any, items: Any) -> None:
|
||||
"""Log any PostHog delivery errors."""
|
||||
logger.error(f"PostHog error: {error}, items: {items}")
|
||||
|
||||
|
||||
posthog = Posthog(
|
||||
project_api_key=POSTHOG_API_KEY,
|
||||
host=POSTHOG_HOST,
|
||||
debug=True,
|
||||
on_error=posthog_on_error,
|
||||
)
|
||||
|
||||
|
||||
def event_telemetry(
|
||||
distinct_id: str, event: str, properties: dict | None = None
|
||||
) -> None:
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
from typing import cast
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from fastapi import APIRouter
|
||||
from huggingface_hub import snapshot_download # type: ignore
|
||||
from setfit import SetFitModel # type: ignore[import]
|
||||
from transformers import AutoTokenizer # type: ignore
|
||||
from transformers import BatchEncoding # type: ignore
|
||||
from transformers import PreTrainedTokenizer # type: ignore
|
||||
|
||||
from model_server.constants import INFORMATION_CONTENT_MODEL_WARM_UP_STRING
|
||||
from model_server.constants import MODEL_WARM_UP_STRING
|
||||
@@ -37,23 +35,30 @@ from shared_configs.model_server_models import ContentClassificationPrediction
|
||||
from shared_configs.model_server_models import IntentRequest
|
||||
from shared_configs.model_server_models import IntentResponse
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from setfit import SetFitModel # type: ignore
|
||||
from transformers import PreTrainedTokenizer, BatchEncoding # type: ignore
|
||||
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
router = APIRouter(prefix="/custom")
|
||||
|
||||
_CONNECTOR_CLASSIFIER_TOKENIZER: PreTrainedTokenizer | None = None
|
||||
_CONNECTOR_CLASSIFIER_TOKENIZER: Optional["PreTrainedTokenizer"] = None
|
||||
_CONNECTOR_CLASSIFIER_MODEL: ConnectorClassifier | None = None
|
||||
|
||||
_INTENT_TOKENIZER: PreTrainedTokenizer | None = None
|
||||
_INTENT_TOKENIZER: Optional["PreTrainedTokenizer"] = None
|
||||
_INTENT_MODEL: HybridClassifier | None = None
|
||||
|
||||
_INFORMATION_CONTENT_MODEL: SetFitModel | None = None
|
||||
_INFORMATION_CONTENT_MODEL: Optional["SetFitModel"] = None
|
||||
|
||||
_INFORMATION_CONTENT_MODEL_PROMPT_PREFIX: str = "" # spec to model version!
|
||||
|
||||
|
||||
def get_connector_classifier_tokenizer() -> PreTrainedTokenizer:
|
||||
def get_connector_classifier_tokenizer() -> "PreTrainedTokenizer":
|
||||
global _CONNECTOR_CLASSIFIER_TOKENIZER
|
||||
from transformers import AutoTokenizer, PreTrainedTokenizer
|
||||
|
||||
if _CONNECTOR_CLASSIFIER_TOKENIZER is None:
|
||||
# The tokenizer details are not uploaded to the HF hub since it's just the
|
||||
# unmodified distilbert tokenizer.
|
||||
@@ -95,7 +100,9 @@ def get_local_connector_classifier(
|
||||
return _CONNECTOR_CLASSIFIER_MODEL
|
||||
|
||||
|
||||
def get_intent_model_tokenizer() -> PreTrainedTokenizer:
|
||||
def get_intent_model_tokenizer() -> "PreTrainedTokenizer":
|
||||
from transformers import AutoTokenizer, PreTrainedTokenizer
|
||||
|
||||
global _INTENT_TOKENIZER
|
||||
if _INTENT_TOKENIZER is None:
|
||||
# The tokenizer details are not uploaded to the HF hub since it's just the
|
||||
@@ -141,7 +148,9 @@ def get_local_intent_model(
|
||||
def get_local_information_content_model(
|
||||
model_name_or_path: str = INFORMATION_CONTENT_MODEL_VERSION,
|
||||
tag: str | None = INFORMATION_CONTENT_MODEL_TAG,
|
||||
) -> SetFitModel:
|
||||
) -> "SetFitModel":
|
||||
from setfit import SetFitModel
|
||||
|
||||
global _INFORMATION_CONTENT_MODEL
|
||||
if _INFORMATION_CONTENT_MODEL is None:
|
||||
try:
|
||||
@@ -179,7 +188,7 @@ def get_local_information_content_model(
|
||||
def tokenize_connector_classification_query(
|
||||
connectors: list[str],
|
||||
query: str,
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
tokenizer: "PreTrainedTokenizer",
|
||||
connector_token_end_id: int,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
@@ -267,7 +276,7 @@ def warm_up_information_content_model() -> None:
|
||||
|
||||
|
||||
@simple_log_function_time()
|
||||
def run_inference(tokens: BatchEncoding) -> tuple[list[float], list[float]]:
|
||||
def run_inference(tokens: "BatchEncoding") -> tuple[list[float], list[float]]:
|
||||
intent_model = get_local_intent_model()
|
||||
device = intent_model.device
|
||||
|
||||
@@ -401,7 +410,7 @@ def run_content_classification_inference(
|
||||
|
||||
|
||||
def map_keywords(
|
||||
input_ids: torch.Tensor, tokenizer: PreTrainedTokenizer, is_keyword: list[bool]
|
||||
input_ids: torch.Tensor, tokenizer: "PreTrainedTokenizer", is_keyword: list[bool]
|
||||
) -> list[str]:
|
||||
tokens = tokenizer.convert_ids_to_tokens(input_ids) # type: ignore
|
||||
|
||||
|
||||
@@ -2,13 +2,11 @@ import asyncio
|
||||
import time
|
||||
from typing import Any
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fastapi import APIRouter
|
||||
from fastapi import HTTPException
|
||||
from fastapi import Request
|
||||
from litellm.exceptions import RateLimitError
|
||||
from sentence_transformers import CrossEncoder # type: ignore
|
||||
from sentence_transformers import SentenceTransformer # type: ignore
|
||||
|
||||
from model_server.utils import simple_log_function_time
|
||||
from onyx.utils.logger import setup_logger
|
||||
@@ -20,6 +18,9 @@ from shared_configs.model_server_models import EmbedResponse
|
||||
from shared_configs.model_server_models import RerankRequest
|
||||
from shared_configs.model_server_models import RerankResponse
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from sentence_transformers import CrossEncoder, SentenceTransformer
|
||||
|
||||
logger = setup_logger()
|
||||
|
||||
router = APIRouter(prefix="/encoder")
|
||||
@@ -88,8 +89,10 @@ def get_embedding_model(
|
||||
|
||||
def get_local_reranking_model(
|
||||
model_name: str,
|
||||
) -> CrossEncoder:
|
||||
) -> "CrossEncoder":
|
||||
global _RERANK_MODEL
|
||||
from sentence_transformers import CrossEncoder # type: ignore
|
||||
|
||||
if _RERANK_MODEL is None:
|
||||
logger.notice(f"Loading {model_name}")
|
||||
model = CrossEncoder(model_name)
|
||||
@@ -207,6 +210,8 @@ async def route_bi_encoder_embed(
|
||||
async def process_embed_request(
|
||||
embed_request: EmbedRequest, gpu_type: str = "UNKNOWN"
|
||||
) -> EmbedResponse:
|
||||
from litellm.exceptions import RateLimitError
|
||||
|
||||
# Only local models should use this endpoint - API providers should make direct API calls
|
||||
if embed_request.provider_type is not None:
|
||||
raise ValueError(
|
||||
|
||||
@@ -30,6 +30,7 @@ from shared_configs.configs import MIN_THREADS_ML_MODELS
|
||||
from shared_configs.configs import MODEL_SERVER_ALLOWED_HOST
|
||||
from shared_configs.configs import MODEL_SERVER_PORT
|
||||
from shared_configs.configs import SENTRY_DSN
|
||||
from shared_configs.configs import SKIP_WARM_UP
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"
|
||||
@@ -91,16 +92,17 @@ async def lifespan(app: FastAPI) -> AsyncGenerator:
|
||||
torch.set_num_threads(max(MIN_THREADS_ML_MODELS, torch.get_num_threads()))
|
||||
logger.notice(f"Torch Threads: {torch.get_num_threads()}")
|
||||
|
||||
if not INDEXING_ONLY:
|
||||
logger.notice(
|
||||
"The intent model should run on the model server. The information content model should not run here."
|
||||
)
|
||||
warm_up_intent_model()
|
||||
if not SKIP_WARM_UP:
|
||||
if not INDEXING_ONLY:
|
||||
logger.notice("Warming up intent model for inference model server")
|
||||
warm_up_intent_model()
|
||||
else:
|
||||
logger.notice(
|
||||
"Warming up content information model for indexing model server"
|
||||
)
|
||||
warm_up_information_content_model()
|
||||
else:
|
||||
logger.notice(
|
||||
"The content information model should run on the indexing model server. The intent model should not run here."
|
||||
)
|
||||
warm_up_information_content_model()
|
||||
logger.notice("Skipping model warmup due to SKIP_WARM_UP=true")
|
||||
|
||||
yield
|
||||
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
import json
|
||||
import os
|
||||
from typing import cast
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from transformers import DistilBertConfig # type: ignore
|
||||
from transformers import DistilBertModel # type: ignore
|
||||
from transformers import DistilBertTokenizer # type: ignore
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers import DistilBertConfig # type: ignore
|
||||
|
||||
|
||||
class HybridClassifier(nn.Module):
|
||||
def __init__(self) -> None:
|
||||
from transformers import DistilBertConfig, DistilBertModel
|
||||
|
||||
super().__init__()
|
||||
config = DistilBertConfig()
|
||||
self.distilbert = DistilBertModel(config)
|
||||
@@ -74,7 +78,9 @@ class HybridClassifier(nn.Module):
|
||||
|
||||
|
||||
class ConnectorClassifier(nn.Module):
|
||||
def __init__(self, config: DistilBertConfig) -> None:
|
||||
def __init__(self, config: "DistilBertConfig") -> None:
|
||||
from transformers import DistilBertTokenizer, DistilBertModel
|
||||
|
||||
super().__init__()
|
||||
|
||||
self.config = config
|
||||
@@ -115,6 +121,8 @@ class ConnectorClassifier(nn.Module):
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(cls, repo_dir: str) -> "ConnectorClassifier":
|
||||
from transformers import DistilBertConfig
|
||||
|
||||
config = cast(
|
||||
DistilBertConfig,
|
||||
DistilBertConfig.from_pretrained(os.path.join(repo_dir, "config.json")),
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from collections.abc import Callable
|
||||
from typing import cast
|
||||
|
||||
from sqlalchemy.orm import joinedload
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from onyx.access.models import DocumentAccess
|
||||
@@ -10,6 +11,7 @@ from onyx.configs.constants import PUBLIC_DOC_PAT
|
||||
from onyx.db.document import get_access_info_for_document
|
||||
from onyx.db.document import get_access_info_for_documents
|
||||
from onyx.db.models import User
|
||||
from onyx.db.models import UserFile
|
||||
from onyx.utils.variable_functionality import fetch_ee_implementation_or_noop
|
||||
from onyx.utils.variable_functionality import fetch_versioned_implementation
|
||||
|
||||
@@ -124,3 +126,25 @@ def source_should_fetch_permissions_during_indexing(source: DocumentSource) -> b
|
||||
),
|
||||
)
|
||||
return _source_should_fetch_permissions_during_indexing_func(source)
|
||||
|
||||
|
||||
def get_access_for_user_files(
|
||||
user_file_ids: list[str],
|
||||
db_session: Session,
|
||||
) -> dict[str, DocumentAccess]:
|
||||
user_files = (
|
||||
db_session.query(UserFile)
|
||||
.options(joinedload(UserFile.user)) # Eager load the user relationship
|
||||
.filter(UserFile.id.in_(user_file_ids))
|
||||
.all()
|
||||
)
|
||||
return {
|
||||
str(user_file.id): DocumentAccess.build(
|
||||
user_emails=[user_file.user.email] if user_file.user else [],
|
||||
user_groups=[],
|
||||
is_public=True if user_file.user is None else False,
|
||||
external_user_emails=[],
|
||||
external_user_group_ids=[],
|
||||
)
|
||||
for user_file in user_files
|
||||
}
|
||||
|
||||
78
backend/onyx/agents/agent_sdk/message_format.py
Normal file
78
backend/onyx/agents/agent_sdk/message_format.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from collections.abc import Sequence
|
||||
|
||||
from langchain.schema.messages import BaseMessage
|
||||
|
||||
|
||||
# TODO: Currently, we only support native API input for images. For other
|
||||
# files, we process the content and share it as text in the message. In
|
||||
# the future, we might support native file uploads for other types of files.
|
||||
def base_messages_to_agent_sdk_msgs(msgs: Sequence[BaseMessage]) -> list[dict]:
|
||||
return [_base_message_to_agent_sdk_msg(msg) for msg in msgs]
|
||||
|
||||
|
||||
def _base_message_to_agent_sdk_msg(msg: BaseMessage) -> dict:
|
||||
message_type_to_agent_sdk_role = {
|
||||
"human": "user",
|
||||
"system": "system",
|
||||
"ai": "assistant",
|
||||
}
|
||||
role = message_type_to_agent_sdk_role[msg.type]
|
||||
|
||||
# Convert content to Agent SDK format
|
||||
content = msg.content
|
||||
if isinstance(content, str):
|
||||
# Convert string to structured text format
|
||||
structured_content = [
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": content,
|
||||
}
|
||||
]
|
||||
elif isinstance(content, list):
|
||||
# Content is already a list, process each item
|
||||
structured_content = []
|
||||
for item in content:
|
||||
if isinstance(item, str):
|
||||
structured_content.append(
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": item,
|
||||
}
|
||||
)
|
||||
elif isinstance(item, dict):
|
||||
# Handle different item types
|
||||
item_type = item.get("type")
|
||||
|
||||
if item_type == "text":
|
||||
# Convert text type to input_text
|
||||
structured_content.append(
|
||||
{
|
||||
"type": "input_text",
|
||||
"text": item.get("text", ""),
|
||||
}
|
||||
)
|
||||
elif item_type == "image_url":
|
||||
# Convert image_url to input_image format
|
||||
image_url = item.get("image_url", {})
|
||||
if isinstance(image_url, dict):
|
||||
url = image_url.get("url", "")
|
||||
else:
|
||||
url = image_url
|
||||
structured_content.append(
|
||||
{
|
||||
"type": "input_image",
|
||||
"image_url": url,
|
||||
"detail": "auto",
|
||||
}
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unexpected item type: {type(item)}. Item: {item}")
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unexpected content type: {type(content)}. Content: {content}"
|
||||
)
|
||||
|
||||
return {
|
||||
"role": role,
|
||||
"content": structured_content,
|
||||
}
|
||||
176
backend/onyx/agents/agent_sdk/sync_agent_stream_adapter.py
Normal file
176
backend/onyx/agents/agent_sdk/sync_agent_stream_adapter.py
Normal file
@@ -0,0 +1,176 @@
|
||||
import asyncio
|
||||
import queue
|
||||
import threading
|
||||
from collections.abc import Iterator
|
||||
from typing import Generic
|
||||
from typing import Optional
|
||||
from typing import TypeVar
|
||||
|
||||
from agents import Agent
|
||||
from agents import RunResultStreaming
|
||||
from agents import TContext
|
||||
from agents.run import Runner
|
||||
|
||||
from onyx.utils.threadpool_concurrency import run_in_background
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class SyncAgentStream(Generic[T]):
|
||||
"""
|
||||
Convert an async streamed run into a sync iterator with cooperative cancellation.
|
||||
Runs the Agent in a background thread.
|
||||
|
||||
Usage:
|
||||
adapter = SyncStreamAdapter(
|
||||
agent=agent,
|
||||
input=input,
|
||||
context=context,
|
||||
max_turns=100,
|
||||
queue_maxsize=0, # optional backpressure
|
||||
)
|
||||
for ev in adapter: # sync iteration
|
||||
...
|
||||
# or cancel from elsewhere:
|
||||
adapter.cancel()
|
||||
"""
|
||||
|
||||
_SENTINEL = object()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
agent: Agent,
|
||||
input: list[dict],
|
||||
context: TContext | None = None,
|
||||
max_turns: int = 100,
|
||||
queue_maxsize: int = 0,
|
||||
) -> None:
|
||||
self._agent = agent
|
||||
self._input = input
|
||||
self._context = context
|
||||
self._max_turns = max_turns
|
||||
|
||||
self._q: "queue.Queue[object]" = queue.Queue(maxsize=queue_maxsize)
|
||||
self._loop: Optional[asyncio.AbstractEventLoop] = None
|
||||
self._thread: Optional[threading.Thread] = None
|
||||
self.streamed: RunResultStreaming | None = None
|
||||
self._exc: Optional[BaseException] = None
|
||||
self._cancel_requested = threading.Event()
|
||||
self._started = threading.Event()
|
||||
self._done = threading.Event()
|
||||
|
||||
self._start_thread()
|
||||
|
||||
# ---------- public sync API ----------
|
||||
|
||||
def __iter__(self) -> Iterator[T]:
|
||||
try:
|
||||
while True:
|
||||
item = self._q.get()
|
||||
if item is self._SENTINEL:
|
||||
# If the consumer thread raised, surface it now
|
||||
if self._exc is not None:
|
||||
raise self._exc
|
||||
# Normal completion
|
||||
return
|
||||
yield item # type: ignore[misc,return-value]
|
||||
finally:
|
||||
# Ensure we fully clean up whether we exited due to exception,
|
||||
# StopIteration, or external cancel.
|
||||
self.close()
|
||||
|
||||
def cancel(self) -> bool:
|
||||
"""
|
||||
Cooperatively cancel the underlying streamed run and shut down.
|
||||
Safe to call multiple times and from any thread.
|
||||
"""
|
||||
self._cancel_requested.set()
|
||||
loop = self._loop
|
||||
streamed = self.streamed
|
||||
if loop is not None and streamed is not None and not self._done.is_set():
|
||||
loop.call_soon_threadsafe(streamed.cancel)
|
||||
return True
|
||||
return False
|
||||
|
||||
def close(self, *, wait: bool = True) -> None:
|
||||
"""Idempotent shutdown."""
|
||||
self.cancel()
|
||||
# ask the loop to stop if it's still running
|
||||
loop = self._loop
|
||||
if loop is not None and loop.is_running():
|
||||
try:
|
||||
loop.call_soon_threadsafe(loop.stop)
|
||||
except Exception:
|
||||
pass
|
||||
# join the thread
|
||||
if wait and self._thread is not None and self._thread.is_alive():
|
||||
self._thread.join(timeout=5.0)
|
||||
|
||||
# ---------- internals ----------
|
||||
|
||||
def _start_thread(self) -> None:
|
||||
t = run_in_background(self._thread_main)
|
||||
self._thread = t
|
||||
# Optionally wait until the loop/worker is started so .cancel() is safe soon after init
|
||||
self._started.wait(timeout=1.0)
|
||||
|
||||
def _thread_main(self) -> None:
|
||||
loop = asyncio.new_event_loop()
|
||||
self._loop = loop
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
async def worker() -> None:
|
||||
try:
|
||||
# Start the streamed run inside the loop thread
|
||||
self.streamed = Runner.run_streamed(
|
||||
self._agent,
|
||||
self._input, # type: ignore[arg-type]
|
||||
context=self._context,
|
||||
max_turns=self._max_turns,
|
||||
)
|
||||
|
||||
# If cancel was requested before we created _streamed, honor it now
|
||||
if self._cancel_requested.is_set():
|
||||
await self.streamed.cancel() # type: ignore[func-returns-value]
|
||||
|
||||
# Consume async events and forward into the thread-safe queue
|
||||
async for ev in self.streamed.stream_events():
|
||||
# Early exit if a late cancel arrives
|
||||
if self._cancel_requested.is_set():
|
||||
# Try to cancel gracefully; don't break until cancel takes effect
|
||||
try:
|
||||
await self.streamed.cancel() # type: ignore[func-returns-value]
|
||||
except Exception:
|
||||
pass
|
||||
break
|
||||
# This put() may block if queue_maxsize > 0 (backpressure)
|
||||
self._q.put(ev)
|
||||
|
||||
except BaseException as e:
|
||||
# Save exception to surface on the sync iterator side
|
||||
self._exc = e
|
||||
finally:
|
||||
# Signal end-of-stream
|
||||
self._q.put(self._SENTINEL)
|
||||
self._done.set()
|
||||
|
||||
# Mark started and run the worker to completion
|
||||
self._started.set()
|
||||
try:
|
||||
loop.run_until_complete(worker())
|
||||
finally:
|
||||
try:
|
||||
# Drain pending tasks/callbacks safely
|
||||
pending = asyncio.all_tasks(loop=loop)
|
||||
for task in pending:
|
||||
task.cancel()
|
||||
if pending:
|
||||
loop.run_until_complete(
|
||||
asyncio.gather(*pending, return_exceptions=True)
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
loop.close()
|
||||
self._loop = None
|
||||
@@ -24,6 +24,8 @@ def decision_router(state: MainState) -> list[Send | Hashable] | DRPath | str:
|
||||
return END
|
||||
elif next_tool_name == DRPath.LOGGER.value:
|
||||
return DRPath.LOGGER
|
||||
elif next_tool_name == DRPath.CLOSER.value:
|
||||
return DRPath.CLOSER
|
||||
else:
|
||||
return DRPath.ORCHESTRATOR
|
||||
|
||||
|
||||
@@ -100,9 +100,14 @@ class IterationAnswer(BaseModel):
|
||||
response_type: str | None = None
|
||||
data: dict | list | str | int | float | bool | None = None
|
||||
file_ids: list[str] | None = None
|
||||
|
||||
# TODO: This is not ideal, but we'll can rework the schema
|
||||
# for deep research later
|
||||
is_web_fetch: bool = False
|
||||
# for image generation step-types
|
||||
generated_images: list[GeneratedImage] | None = None
|
||||
# for multi-query search tools (v2 web search and internal search)
|
||||
# TODO: Clean this up to be more flexible to tools
|
||||
queries: list[str] | None = None
|
||||
|
||||
|
||||
class AggregatedDRContext(BaseModel):
|
||||
|
||||
@@ -3,6 +3,7 @@ from datetime import datetime
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
|
||||
from braintrust import traced
|
||||
from langchain_core.messages import HumanMessage
|
||||
from langchain_core.messages import merge_content
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
@@ -22,6 +23,9 @@ from onyx.agents.agent_search.dr.models import DecisionResponse
|
||||
from onyx.agents.agent_search.dr.models import DRPromptPurpose
|
||||
from onyx.agents.agent_search.dr.models import OrchestrationClarificationInfo
|
||||
from onyx.agents.agent_search.dr.models import OrchestratorTool
|
||||
from onyx.agents.agent_search.dr.process_llm_stream import (
|
||||
BasicSearchProcessedStreamResults,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.process_llm_stream import process_llm_stream
|
||||
from onyx.agents.agent_search.dr.states import MainState
|
||||
from onyx.agents.agent_search.dr.states import OrchestrationSetup
|
||||
@@ -35,14 +39,25 @@ from onyx.agents.agent_search.shared_graph_utils.utils import (
|
||||
from onyx.agents.agent_search.shared_graph_utils.utils import run_with_timeout
|
||||
from onyx.agents.agent_search.shared_graph_utils.utils import write_custom_event
|
||||
from onyx.agents.agent_search.utils import create_question_prompt
|
||||
from onyx.chat.chat_utils import build_citation_map_from_numbers
|
||||
from onyx.chat.chat_utils import saved_search_docs_from_llm_docs
|
||||
from onyx.chat.memories import make_memories_callback
|
||||
from onyx.chat.models import PromptConfig
|
||||
from onyx.chat.prompt_builder.citations_prompt import build_citations_system_message
|
||||
from onyx.chat.prompt_builder.citations_prompt import build_citations_user_message
|
||||
from onyx.chat.stream_processing.citation_processing import (
|
||||
normalize_square_bracket_citations_to_double_with_links,
|
||||
)
|
||||
from onyx.configs.agent_configs import TF_DR_TIMEOUT_LONG
|
||||
from onyx.configs.agent_configs import TF_DR_TIMEOUT_SHORT
|
||||
from onyx.configs.constants import DocumentSource
|
||||
from onyx.configs.constants import DocumentSourceDescription
|
||||
from onyx.configs.constants import TMP_DRALPHA_PERSONA_NAME
|
||||
from onyx.db.chat import create_search_doc_from_saved_search_doc
|
||||
from onyx.db.chat import update_db_session_with_messages
|
||||
from onyx.db.connector import fetch_unique_document_sources
|
||||
from onyx.db.kg_config import get_kg_config_settings
|
||||
from onyx.db.models import SearchDoc
|
||||
from onyx.db.models import Tool
|
||||
from onyx.db.tools import get_tools
|
||||
from onyx.file_store.models import ChatFileType
|
||||
@@ -52,6 +67,7 @@ from onyx.kg.utils.extraction_utils import get_relationship_types_str
|
||||
from onyx.llm.utils import check_number_of_tokens
|
||||
from onyx.llm.utils import get_max_input_tokens
|
||||
from onyx.natural_language_processing.utils import get_tokenizer
|
||||
from onyx.prompts.chat_prompts import PROJECT_INSTRUCTIONS_SEPARATOR
|
||||
from onyx.prompts.dr_prompts import ANSWER_PROMPT_WO_TOOL_CALLING
|
||||
from onyx.prompts.dr_prompts import DECISION_PROMPT_W_TOOL_CALLING
|
||||
from onyx.prompts.dr_prompts import DECISION_PROMPT_WO_TOOL_CALLING
|
||||
@@ -59,6 +75,8 @@ from onyx.prompts.dr_prompts import DEFAULT_DR_SYSTEM_PROMPT
|
||||
from onyx.prompts.dr_prompts import REPEAT_PROMPT
|
||||
from onyx.prompts.dr_prompts import TOOL_DESCRIPTION
|
||||
from onyx.prompts.prompt_template import PromptTemplate
|
||||
from onyx.prompts.prompt_utils import handle_company_awareness
|
||||
from onyx.prompts.prompt_utils import handle_memories
|
||||
from onyx.server.query_and_chat.streaming_models import MessageStart
|
||||
from onyx.server.query_and_chat.streaming_models import OverallStop
|
||||
from onyx.server.query_and_chat.streaming_models import SectionEnd
|
||||
@@ -105,7 +123,9 @@ def _get_available_tools(
|
||||
else:
|
||||
include_kg = False
|
||||
|
||||
tool_dict: dict[int, Tool] = {tool.id: tool for tool in get_tools(db_session)}
|
||||
tool_dict: dict[int, Tool] = {
|
||||
tool.id: tool for tool in get_tools(db_session, only_enabled=True)
|
||||
}
|
||||
|
||||
for tool in graph_config.tooling.tools:
|
||||
|
||||
@@ -310,6 +330,52 @@ def _get_existing_clarification_request(
|
||||
return clarification, original_question, chat_history_string
|
||||
|
||||
|
||||
def _persist_final_docs_and_citations(
|
||||
db_session: Session,
|
||||
context_llm_docs: list[Any] | None,
|
||||
full_answer: str | None,
|
||||
) -> tuple[list[SearchDoc], dict[int, int] | None]:
|
||||
"""Persist final documents from in-context docs and derive citation mapping.
|
||||
|
||||
Returns the list of persisted `SearchDoc` records and an optional
|
||||
citation map translating inline [[n]] references to DB doc indices.
|
||||
"""
|
||||
final_documents_db: list[SearchDoc] = []
|
||||
citations_map: dict[int, int] | None = None
|
||||
|
||||
if not context_llm_docs:
|
||||
return final_documents_db, citations_map
|
||||
|
||||
saved_search_docs = saved_search_docs_from_llm_docs(context_llm_docs)
|
||||
for saved_doc in saved_search_docs:
|
||||
db_doc = create_search_doc_from_saved_search_doc(saved_doc)
|
||||
db_session.add(db_doc)
|
||||
final_documents_db.append(db_doc)
|
||||
db_session.flush()
|
||||
|
||||
cited_numbers: set[int] = set()
|
||||
try:
|
||||
# Match [[1]] or [[1, 2]] optionally followed by a link like ([[1]](http...))
|
||||
matches = re.findall(
|
||||
r"\[\[(\d+(?:,\s*\d+)*)\]\](?:\([^)]*\))?", full_answer or ""
|
||||
)
|
||||
for match in matches:
|
||||
for num_str in match.split(","):
|
||||
num = int(num_str.strip())
|
||||
cited_numbers.add(num)
|
||||
except Exception:
|
||||
cited_numbers = set()
|
||||
|
||||
if cited_numbers and final_documents_db:
|
||||
translations = build_citation_map_from_numbers(
|
||||
cited_numbers=cited_numbers,
|
||||
db_docs=final_documents_db,
|
||||
)
|
||||
citations_map = translations or None
|
||||
|
||||
return final_documents_db, citations_map
|
||||
|
||||
|
||||
_ARTIFICIAL_ALL_ENCOMPASSING_TOOL = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
@@ -421,6 +487,23 @@ def clarifier(
|
||||
assistant_system_prompt = PromptTemplate(DEFAULT_DR_SYSTEM_PROMPT).build()
|
||||
assistant_task_prompt = ""
|
||||
|
||||
if graph_config.inputs.project_instructions:
|
||||
assistant_system_prompt = (
|
||||
assistant_system_prompt
|
||||
+ PROJECT_INSTRUCTIONS_SEPARATOR
|
||||
+ graph_config.inputs.project_instructions
|
||||
)
|
||||
user = (
|
||||
graph_config.tooling.search_tool.user
|
||||
if graph_config.tooling.search_tool
|
||||
else None
|
||||
)
|
||||
memories_callback = make_memories_callback(user, db_session)
|
||||
assistant_system_prompt = handle_company_awareness(assistant_system_prompt)
|
||||
assistant_system_prompt = handle_memories(
|
||||
assistant_system_prompt, memories_callback
|
||||
)
|
||||
|
||||
chat_history_string = (
|
||||
get_chat_history_string(
|
||||
graph_config.inputs.prompt_builder.message_history,
|
||||
@@ -449,6 +532,11 @@ def clarifier(
|
||||
graph_config.inputs.files
|
||||
)
|
||||
|
||||
# Use project/search context docs if available to enable citation mapping
|
||||
context_llm_docs = getattr(
|
||||
graph_config.inputs.prompt_builder, "context_llm_docs", None
|
||||
)
|
||||
|
||||
if not (force_use_tool and force_use_tool.force_use):
|
||||
|
||||
if not use_tool_calling_llm or len(available_tools) == 1:
|
||||
@@ -563,42 +651,93 @@ def clarifier(
|
||||
active_source_type_descriptions_str=active_source_type_descriptions_str,
|
||||
)
|
||||
|
||||
stream = graph_config.tooling.primary_llm.stream(
|
||||
prompt=create_question_prompt(
|
||||
assistant_system_prompt,
|
||||
decision_prompt + assistant_task_prompt,
|
||||
uploaded_image_context=uploaded_image_context,
|
||||
),
|
||||
tools=([_ARTIFICIAL_ALL_ENCOMPASSING_TOOL]),
|
||||
tool_choice=(None),
|
||||
structured_response_format=graph_config.inputs.structured_response_format,
|
||||
)
|
||||
if context_llm_docs:
|
||||
persona = graph_config.inputs.persona
|
||||
if persona is not None:
|
||||
prompt_config = PromptConfig.from_model(persona)
|
||||
else:
|
||||
prompt_config = PromptConfig(
|
||||
system_prompt=assistant_system_prompt,
|
||||
task_prompt="",
|
||||
datetime_aware=True,
|
||||
)
|
||||
|
||||
full_response = process_llm_stream(
|
||||
messages=stream,
|
||||
should_stream_answer=True,
|
||||
writer=writer,
|
||||
ind=0,
|
||||
generate_final_answer=True,
|
||||
chat_message_id=str(graph_config.persistence.chat_session_id),
|
||||
)
|
||||
system_prompt_to_use_content = build_citations_system_message(
|
||||
prompt_config
|
||||
).content
|
||||
system_prompt_to_use: str = cast(str, system_prompt_to_use_content)
|
||||
if graph_config.inputs.project_instructions:
|
||||
system_prompt_to_use = (
|
||||
system_prompt_to_use
|
||||
+ PROJECT_INSTRUCTIONS_SEPARATOR
|
||||
+ graph_config.inputs.project_instructions
|
||||
)
|
||||
user_prompt_to_use = build_citations_user_message(
|
||||
user_query=original_question,
|
||||
files=[],
|
||||
prompt_config=prompt_config,
|
||||
context_docs=context_llm_docs,
|
||||
all_doc_useful=False,
|
||||
history_message=chat_history_string,
|
||||
context_type="user files",
|
||||
).content
|
||||
else:
|
||||
system_prompt_to_use = assistant_system_prompt
|
||||
user_prompt_to_use = decision_prompt + assistant_task_prompt
|
||||
|
||||
@traced(name="clarifier stream and process", type="llm")
|
||||
def stream_and_process() -> BasicSearchProcessedStreamResults:
|
||||
stream = graph_config.tooling.primary_llm.stream(
|
||||
prompt=create_question_prompt(
|
||||
cast(str, system_prompt_to_use),
|
||||
cast(str, user_prompt_to_use),
|
||||
uploaded_image_context=uploaded_image_context,
|
||||
),
|
||||
tools=([_ARTIFICIAL_ALL_ENCOMPASSING_TOOL]),
|
||||
tool_choice=(None),
|
||||
structured_response_format=graph_config.inputs.structured_response_format,
|
||||
)
|
||||
return process_llm_stream(
|
||||
messages=stream,
|
||||
should_stream_answer=True,
|
||||
writer=writer,
|
||||
ind=0,
|
||||
final_search_results=context_llm_docs,
|
||||
displayed_search_results=context_llm_docs,
|
||||
generate_final_answer=True,
|
||||
chat_message_id=str(graph_config.persistence.chat_session_id),
|
||||
)
|
||||
|
||||
full_response = stream_and_process()
|
||||
if len(full_response.ai_message_chunk.tool_calls) == 0:
|
||||
|
||||
if isinstance(full_response.full_answer, str):
|
||||
full_answer = full_response.full_answer
|
||||
full_answer = (
|
||||
normalize_square_bracket_citations_to_double_with_links(
|
||||
full_response.full_answer
|
||||
)
|
||||
)
|
||||
else:
|
||||
full_answer = None
|
||||
|
||||
# Persist final documents and derive citations when using in-context docs
|
||||
final_documents_db, citations_map = _persist_final_docs_and_citations(
|
||||
db_session=db_session,
|
||||
context_llm_docs=context_llm_docs,
|
||||
full_answer=full_answer,
|
||||
)
|
||||
|
||||
update_db_session_with_messages(
|
||||
db_session=db_session,
|
||||
chat_message_id=message_id,
|
||||
chat_session_id=graph_config.persistence.chat_session_id,
|
||||
is_agentic=graph_config.behavior.use_agentic_search,
|
||||
message=full_answer,
|
||||
token_count=len(llm_tokenizer.encode(full_answer or "")),
|
||||
citations=citations_map,
|
||||
final_documents=final_documents_db or None,
|
||||
update_parent_message=True,
|
||||
research_answer_purpose=ResearchAnswerPurpose.ANSWER,
|
||||
token_count=len(llm_tokenizer.encode(full_answer or "")),
|
||||
)
|
||||
|
||||
db_session.commit()
|
||||
|
||||
@@ -181,6 +181,15 @@ def orchestrator(
|
||||
remaining_time_budget = DR_TIME_BUDGET_BY_TYPE[research_type]
|
||||
|
||||
elif remaining_time_budget <= 0:
|
||||
|
||||
write_custom_event(
|
||||
current_step_nr,
|
||||
SectionEnd(),
|
||||
writer,
|
||||
)
|
||||
|
||||
current_step_nr += 1
|
||||
|
||||
return OrchestrationUpdate(
|
||||
tools_used=[DRPath.CLOSER.value],
|
||||
current_step_nr=current_step_nr,
|
||||
|
||||
@@ -42,6 +42,7 @@ from onyx.db.models import ResearchAgentIteration
|
||||
from onyx.db.models import ResearchAgentIterationSubStep
|
||||
from onyx.db.models import SearchDoc as DbSearchDoc
|
||||
from onyx.llm.utils import check_number_of_tokens
|
||||
from onyx.prompts.chat_prompts import PROJECT_INSTRUCTIONS_SEPARATOR
|
||||
from onyx.prompts.dr_prompts import FINAL_ANSWER_PROMPT_W_SUB_ANSWERS
|
||||
from onyx.prompts.dr_prompts import FINAL_ANSWER_PROMPT_WITHOUT_SUB_ANSWERS
|
||||
from onyx.prompts.dr_prompts import TEST_INFO_COMPLETE_PROMPT
|
||||
@@ -198,6 +199,7 @@ def save_iteration(
|
||||
else None
|
||||
),
|
||||
additional_data=iteration_answer.additional_data,
|
||||
queries=iteration_answer.queries,
|
||||
)
|
||||
db_session.add(research_agent_iteration_sub_step)
|
||||
|
||||
@@ -225,7 +227,7 @@ def closer(
|
||||
|
||||
research_type = graph_config.behavior.research_type
|
||||
|
||||
assistant_system_prompt = state.assistant_system_prompt
|
||||
assistant_system_prompt: str = state.assistant_system_prompt or ""
|
||||
assistant_task_prompt = state.assistant_task_prompt
|
||||
|
||||
uploaded_context = state.uploaded_test_context or ""
|
||||
@@ -349,6 +351,13 @@ def closer(
|
||||
uploaded_context=uploaded_context,
|
||||
)
|
||||
|
||||
if graph_config.inputs.project_instructions:
|
||||
assistant_system_prompt = (
|
||||
assistant_system_prompt
|
||||
+ PROJECT_INSTRUCTIONS_SEPARATOR
|
||||
+ (graph_config.inputs.project_instructions or "")
|
||||
)
|
||||
|
||||
all_context_llmdocs = [
|
||||
llm_doc_from_inference_section(inference_section)
|
||||
for inference_section in all_cited_documents
|
||||
|
||||
@@ -180,6 +180,7 @@ def save_iteration(
|
||||
else None
|
||||
),
|
||||
additional_data=iteration_answer.additional_data,
|
||||
queries=iteration_answer.queries,
|
||||
)
|
||||
db_session.add(research_agent_iteration_sub_step)
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ from pydantic import BaseModel
|
||||
from onyx.agents.agent_search.shared_graph_utils.utils import write_custom_event
|
||||
from onyx.chat.chat_utils import saved_search_docs_from_llm_docs
|
||||
from onyx.chat.models import AgentAnswerPiece
|
||||
from onyx.chat.models import CitationInfo
|
||||
from onyx.chat.models import LlmDoc
|
||||
from onyx.chat.models import OnyxAnswerPiece
|
||||
from onyx.chat.stream_processing.answer_response_handler import AnswerResponseHandler
|
||||
@@ -18,6 +19,8 @@ from onyx.chat.stream_processing.answer_response_handler import (
|
||||
)
|
||||
from onyx.chat.stream_processing.utils import map_document_id_order
|
||||
from onyx.context.search.models import InferenceSection
|
||||
from onyx.server.query_and_chat.streaming_models import CitationDelta
|
||||
from onyx.server.query_and_chat.streaming_models import CitationStart
|
||||
from onyx.server.query_and_chat.streaming_models import MessageDelta
|
||||
from onyx.server.query_and_chat.streaming_models import MessageStart
|
||||
from onyx.server.query_and_chat.streaming_models import SectionEnd
|
||||
@@ -56,6 +59,9 @@ def process_llm_stream(
|
||||
|
||||
full_answer = ""
|
||||
start_final_answer_streaming_set = False
|
||||
# Accumulate citation infos if handler emits them
|
||||
collected_citation_infos: list[CitationInfo] = []
|
||||
|
||||
# This stream will be the llm answer if no tool is chosen. When a tool is chosen,
|
||||
# the stream will contain AIMessageChunks with tool call information.
|
||||
for message in messages:
|
||||
@@ -102,6 +108,9 @@ def process_llm_stream(
|
||||
MessageDelta(content=response_part.answer_piece),
|
||||
writer,
|
||||
)
|
||||
# collect citation info objects
|
||||
elif isinstance(response_part, CitationInfo):
|
||||
collected_citation_infos.append(response_part)
|
||||
|
||||
if generate_final_answer and start_final_answer_streaming_set:
|
||||
# start_final_answer_streaming_set is only set if the answer is verbal and not a tool call
|
||||
@@ -111,6 +120,14 @@ def process_llm_stream(
|
||||
writer,
|
||||
)
|
||||
|
||||
# Emit citations section if any were collected
|
||||
if collected_citation_infos:
|
||||
write_custom_event(ind, CitationStart(), writer)
|
||||
write_custom_event(
|
||||
ind, CitationDelta(citations=collected_citation_infos), writer
|
||||
)
|
||||
write_custom_event(ind, SectionEnd(), writer)
|
||||
|
||||
logger.debug(f"Full answer: {full_answer}")
|
||||
return BasicSearchProcessedStreamResults(
|
||||
ai_message_chunk=cast(AIMessageChunk, tool_call_chunk), full_answer=full_answer
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import cast
|
||||
from uuid import UUID
|
||||
|
||||
from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.types import StreamWriter
|
||||
@@ -73,6 +74,7 @@ def basic_search(
|
||||
|
||||
search_tool_info = state.available_tools[state.tools_used[-1]]
|
||||
search_tool = cast(SearchTool, search_tool_info.tool_object)
|
||||
force_use_tool = graph_config.tooling.force_use_tool
|
||||
|
||||
# sanity check
|
||||
if search_tool != graph_config.tooling.search_tool:
|
||||
@@ -141,6 +143,15 @@ def basic_search(
|
||||
retrieved_docs: list[InferenceSection] = []
|
||||
callback_container: list[list[InferenceSection]] = []
|
||||
|
||||
user_file_ids: list[UUID] | None = None
|
||||
project_id: int | None = None
|
||||
if force_use_tool.override_kwargs and isinstance(
|
||||
force_use_tool.override_kwargs, SearchToolOverrideKwargs
|
||||
):
|
||||
override_kwargs = force_use_tool.override_kwargs
|
||||
user_file_ids = override_kwargs.user_file_ids
|
||||
project_id = override_kwargs.project_id
|
||||
|
||||
# new db session to avoid concurrency issues
|
||||
with get_session_with_current_tenant() as search_db_session:
|
||||
for tool_response in search_tool.run(
|
||||
@@ -153,6 +164,8 @@ def basic_search(
|
||||
retrieved_sections_callback=callback_container.append,
|
||||
skip_query_analysis=True,
|
||||
original_query=rewritten_query,
|
||||
user_file_ids=user_file_ids,
|
||||
project_id=project_id,
|
||||
),
|
||||
):
|
||||
# get retrieved docs to send to the rest of the graph
|
||||
|
||||
@@ -5,12 +5,12 @@ from langgraph.types import StreamWriter
|
||||
|
||||
from onyx.agents.agent_search.dr.sub_agents.states import SubAgentMainState
|
||||
from onyx.agents.agent_search.dr.sub_agents.states import SubAgentUpdate
|
||||
from onyx.agents.agent_search.dr.utils import chunks_or_sections_to_search_docs
|
||||
from onyx.agents.agent_search.shared_graph_utils.utils import (
|
||||
get_langgraph_node_log_string,
|
||||
)
|
||||
from onyx.agents.agent_search.shared_graph_utils.utils import write_custom_event
|
||||
from onyx.context.search.models import SavedSearchDoc
|
||||
from onyx.context.search.models import SearchDoc
|
||||
from onyx.server.query_and_chat.streaming_models import SectionEnd
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
@@ -47,7 +47,7 @@ def is_reducer(
|
||||
doc_list.append(x)
|
||||
|
||||
# Convert InferenceSections to SavedSearchDocs
|
||||
search_docs = chunks_or_sections_to_search_docs(doc_list)
|
||||
search_docs = SearchDoc.from_chunks_or_sections(doc_list)
|
||||
retrieved_saved_search_docs = [
|
||||
SavedSearchDoc.from_search_doc(search_doc, db_doc_id=0)
|
||||
for search_doc in search_docs
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import cast
|
||||
|
||||
@@ -28,6 +29,7 @@ from onyx.tools.tool_implementations.images.image_generation_tool import (
|
||||
from onyx.tools.tool_implementations.images.image_generation_tool import (
|
||||
ImageGenerationTool,
|
||||
)
|
||||
from onyx.tools.tool_implementations.images.image_generation_tool import ImageShape
|
||||
from onyx.utils.logger import setup_logger
|
||||
|
||||
logger = setup_logger()
|
||||
@@ -62,6 +64,29 @@ def image_generation(
|
||||
image_tool_info = state.available_tools[state.tools_used[-1]]
|
||||
image_tool = cast(ImageGenerationTool, image_tool_info.tool_object)
|
||||
|
||||
image_prompt = branch_query
|
||||
requested_shape: ImageShape | None = None
|
||||
|
||||
try:
|
||||
parsed_query = json.loads(branch_query)
|
||||
except json.JSONDecodeError:
|
||||
parsed_query = None
|
||||
|
||||
if isinstance(parsed_query, dict):
|
||||
prompt_from_llm = parsed_query.get("prompt")
|
||||
if isinstance(prompt_from_llm, str) and prompt_from_llm.strip():
|
||||
image_prompt = prompt_from_llm.strip()
|
||||
|
||||
raw_shape = parsed_query.get("shape")
|
||||
if isinstance(raw_shape, str):
|
||||
try:
|
||||
requested_shape = ImageShape(raw_shape)
|
||||
except ValueError:
|
||||
logger.warning(
|
||||
"Received unsupported image shape '%s' from LLM. Falling back to square.",
|
||||
raw_shape,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Image generation start for {iteration_nr}.{parallelization_nr} at {datetime.now()}"
|
||||
)
|
||||
@@ -69,7 +94,15 @@ def image_generation(
|
||||
# Generate images using the image generation tool
|
||||
image_generation_responses: list[ImageGenerationResponse] = []
|
||||
|
||||
for tool_response in image_tool.run(prompt=branch_query):
|
||||
if requested_shape is not None:
|
||||
tool_iterator = image_tool.run(
|
||||
prompt=image_prompt,
|
||||
shape=requested_shape.value,
|
||||
)
|
||||
else:
|
||||
tool_iterator = image_tool.run(prompt=image_prompt)
|
||||
|
||||
for tool_response in tool_iterator:
|
||||
if tool_response.id == IMAGE_GENERATION_HEARTBEAT_ID:
|
||||
# Stream heartbeat to frontend
|
||||
write_custom_event(
|
||||
@@ -95,6 +128,7 @@ def image_generation(
|
||||
file_id=file_id,
|
||||
url=build_frontend_file_url(file_id),
|
||||
revised_prompt=img.revised_prompt,
|
||||
shape=(requested_shape or ImageShape.SQUARE).value,
|
||||
)
|
||||
for file_id, img in zip(file_ids, image_generation_responses)
|
||||
]
|
||||
@@ -107,15 +141,29 @@ def image_generation(
|
||||
if final_generated_images:
|
||||
image_descriptions = []
|
||||
for i, img in enumerate(final_generated_images, 1):
|
||||
image_descriptions.append(f"Image {i}: {img.revised_prompt}")
|
||||
if img.shape and img.shape != ImageShape.SQUARE.value:
|
||||
image_descriptions.append(
|
||||
f"Image {i}: {img.revised_prompt} (shape: {img.shape})"
|
||||
)
|
||||
else:
|
||||
image_descriptions.append(f"Image {i}: {img.revised_prompt}")
|
||||
|
||||
answer_string = (
|
||||
f"Generated {len(final_generated_images)} image(s) based on the request: {branch_query}\n\n"
|
||||
f"Generated {len(final_generated_images)} image(s) based on the request: {image_prompt}\n\n"
|
||||
+ "\n".join(image_descriptions)
|
||||
)
|
||||
reasoning = f"Used image generation tool to create {len(final_generated_images)} image(s) based on the user's request."
|
||||
if requested_shape:
|
||||
reasoning = (
|
||||
"Used image generation tool to create "
|
||||
f"{len(final_generated_images)} image(s) in {requested_shape.value} orientation."
|
||||
)
|
||||
else:
|
||||
reasoning = (
|
||||
"Used image generation tool to create "
|
||||
f"{len(final_generated_images)} image(s) based on the user's request."
|
||||
)
|
||||
else:
|
||||
answer_string = f"Failed to generate images for request: {branch_query}"
|
||||
answer_string = f"Failed to generate images for request: {image_prompt}"
|
||||
reasoning = "Image generation tool did not return any results."
|
||||
|
||||
return BranchUpdate(
|
||||
|
||||
@@ -5,6 +5,7 @@ class GeneratedImage(BaseModel):
|
||||
file_id: str
|
||||
url: str
|
||||
revised_prompt: str
|
||||
shape: str | None = None
|
||||
|
||||
|
||||
# Needed for PydanticType
|
||||
|
||||
@@ -2,30 +2,28 @@ from exa_py import Exa
|
||||
from exa_py.api import HighlightsContentsOptions
|
||||
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
InternetContent,
|
||||
WebContent,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
InternetSearchProvider,
|
||||
WebSearchProvider,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
InternetSearchResult,
|
||||
WebSearchResult,
|
||||
)
|
||||
from onyx.configs.chat_configs import EXA_API_KEY
|
||||
from onyx.connectors.cross_connector_utils.miscellaneous_utils import time_str_to_utc
|
||||
from onyx.utils.retry_wrapper import retry_builder
|
||||
|
||||
|
||||
# TODO Dependency inject for testing
|
||||
class ExaClient(InternetSearchProvider):
|
||||
class ExaClient(WebSearchProvider):
|
||||
def __init__(self, api_key: str | None = EXA_API_KEY) -> None:
|
||||
self.exa = Exa(api_key=api_key)
|
||||
|
||||
@retry_builder(tries=3, delay=1, backoff=2)
|
||||
def search(self, query: str) -> list[InternetSearchResult]:
|
||||
def search(self, query: str) -> list[WebSearchResult]:
|
||||
response = self.exa.search_and_contents(
|
||||
query,
|
||||
type="fast",
|
||||
livecrawl="never",
|
||||
type="auto",
|
||||
highlights=HighlightsContentsOptions(
|
||||
num_sentences=2,
|
||||
highlights_per_url=1,
|
||||
@@ -34,7 +32,7 @@ class ExaClient(InternetSearchProvider):
|
||||
)
|
||||
|
||||
return [
|
||||
InternetSearchResult(
|
||||
WebSearchResult(
|
||||
title=result.title or "",
|
||||
link=result.url,
|
||||
snippet=result.highlights[0] if result.highlights else "",
|
||||
@@ -49,7 +47,7 @@ class ExaClient(InternetSearchProvider):
|
||||
]
|
||||
|
||||
@retry_builder(tries=3, delay=1, backoff=2)
|
||||
def contents(self, urls: list[str]) -> list[InternetContent]:
|
||||
def contents(self, urls: list[str]) -> list[WebContent]:
|
||||
response = self.exa.get_contents(
|
||||
urls=urls,
|
||||
text=True,
|
||||
@@ -57,7 +55,7 @@ class ExaClient(InternetSearchProvider):
|
||||
)
|
||||
|
||||
return [
|
||||
InternetContent(
|
||||
WebContent(
|
||||
title=result.title or "",
|
||||
link=result.url,
|
||||
full_content=result.text or "",
|
||||
|
||||
@@ -0,0 +1,147 @@
|
||||
import json
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
import requests
|
||||
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
WebContent,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
WebSearchProvider,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
WebSearchResult,
|
||||
)
|
||||
from onyx.configs.chat_configs import SERPER_API_KEY
|
||||
from onyx.connectors.cross_connector_utils.miscellaneous_utils import time_str_to_utc
|
||||
from onyx.utils.retry_wrapper import retry_builder
|
||||
|
||||
SERPER_SEARCH_URL = "https://google.serper.dev/search"
|
||||
SERPER_CONTENTS_URL = "https://scrape.serper.dev"
|
||||
|
||||
|
||||
class SerperClient(WebSearchProvider):
|
||||
def __init__(self, api_key: str | None = SERPER_API_KEY) -> None:
|
||||
self.headers = {
|
||||
"X-API-KEY": api_key,
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
@retry_builder(tries=3, delay=1, backoff=2)
|
||||
def search(self, query: str) -> list[WebSearchResult]:
|
||||
payload = {
|
||||
"q": query,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
SERPER_SEARCH_URL,
|
||||
headers=self.headers,
|
||||
data=json.dumps(payload),
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
results = response.json()
|
||||
organic_results = results["organic"]
|
||||
|
||||
return [
|
||||
WebSearchResult(
|
||||
title=result["title"],
|
||||
link=result["link"],
|
||||
snippet=result["snippet"],
|
||||
author=None,
|
||||
published_date=None,
|
||||
)
|
||||
for result in organic_results
|
||||
]
|
||||
|
||||
def contents(self, urls: list[str]) -> list[WebContent]:
|
||||
if not urls:
|
||||
return []
|
||||
|
||||
# Serper can responds with 500s regularly. We want to retry,
|
||||
# but in the event of failure, return an unsuccesful scrape.
|
||||
def safe_get_webpage_content(url: str) -> WebContent:
|
||||
try:
|
||||
return self._get_webpage_content(url)
|
||||
except Exception:
|
||||
return WebContent(
|
||||
title="",
|
||||
link=url,
|
||||
full_content="",
|
||||
published_date=None,
|
||||
scrape_successful=False,
|
||||
)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=min(8, len(urls))) as e:
|
||||
return list(e.map(safe_get_webpage_content, urls))
|
||||
|
||||
@retry_builder(tries=3, delay=1, backoff=2)
|
||||
def _get_webpage_content(self, url: str) -> WebContent:
|
||||
payload = {
|
||||
"url": url,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
SERPER_CONTENTS_URL,
|
||||
headers=self.headers,
|
||||
data=json.dumps(payload),
|
||||
)
|
||||
|
||||
# 400 returned when serper cannot scrape
|
||||
if response.status_code == 400:
|
||||
return WebContent(
|
||||
title="",
|
||||
link=url,
|
||||
full_content="",
|
||||
published_date=None,
|
||||
scrape_successful=False,
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
response_json = response.json()
|
||||
|
||||
# Response only guarantees text
|
||||
text = response_json["text"]
|
||||
|
||||
# metadata & jsonld is not guaranteed to be present
|
||||
metadata = response_json.get("metadata", {})
|
||||
jsonld = response_json.get("jsonld", {})
|
||||
|
||||
title = extract_title_from_metadata(metadata)
|
||||
|
||||
# Serper does not provide a reliable mechanism to extract the url
|
||||
response_url = url
|
||||
published_date_str = extract_published_date_from_jsonld(jsonld)
|
||||
published_date = None
|
||||
|
||||
if published_date_str:
|
||||
try:
|
||||
published_date = time_str_to_utc(published_date_str)
|
||||
except Exception:
|
||||
published_date = None
|
||||
|
||||
return WebContent(
|
||||
title=title or "",
|
||||
link=response_url,
|
||||
full_content=text or "",
|
||||
published_date=published_date,
|
||||
)
|
||||
|
||||
|
||||
def extract_title_from_metadata(metadata: dict[str, str]) -> str | None:
|
||||
keys = ["title", "og:title"]
|
||||
return extract_value_from_dict(metadata, keys)
|
||||
|
||||
|
||||
def extract_published_date_from_jsonld(jsonld: dict[str, str]) -> str | None:
|
||||
keys = ["dateModified"]
|
||||
return extract_value_from_dict(jsonld, keys)
|
||||
|
||||
|
||||
def extract_value_from_dict(data: dict[str, str], keys: list[str]) -> str | None:
|
||||
for key in keys:
|
||||
if key in data:
|
||||
return data[key]
|
||||
return None
|
||||
@@ -7,7 +7,7 @@ from langsmith import traceable
|
||||
|
||||
from onyx.agents.agent_search.dr.models import WebSearchAnswer
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
InternetSearchResult,
|
||||
WebSearchResult,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.providers import (
|
||||
get_default_provider,
|
||||
@@ -75,15 +75,15 @@ def web_search(
|
||||
raise ValueError("No internet search provider found")
|
||||
|
||||
@traceable(name="Search Provider API Call")
|
||||
def _search(search_query: str) -> list[InternetSearchResult]:
|
||||
search_results: list[InternetSearchResult] = []
|
||||
def _search(search_query: str) -> list[WebSearchResult]:
|
||||
search_results: list[WebSearchResult] = []
|
||||
try:
|
||||
search_results = provider.search(search_query)
|
||||
except Exception as e:
|
||||
logger.error(f"Error performing search: {e}")
|
||||
return search_results
|
||||
|
||||
search_results: list[InternetSearchResult] = _search(search_query)
|
||||
search_results: list[WebSearchResult] = _search(search_query)
|
||||
search_results_text = "\n\n".join(
|
||||
[
|
||||
f"{i}. {result.title}\n URL: {result.link}\n"
|
||||
|
||||
@@ -4,7 +4,7 @@ from langchain_core.runnables import RunnableConfig
|
||||
from langgraph.types import StreamWriter
|
||||
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
InternetSearchResult,
|
||||
WebSearchResult,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.states import (
|
||||
InternetSearchInput,
|
||||
@@ -23,7 +23,7 @@ def dedup_urls(
|
||||
writer: StreamWriter = lambda _: None,
|
||||
) -> InternetSearchInput:
|
||||
branch_questions_to_urls: dict[str, list[str]] = defaultdict(list)
|
||||
unique_results_by_link: dict[str, InternetSearchResult] = {}
|
||||
unique_results_by_link: dict[str, WebSearchResult] = {}
|
||||
for query, result in state.results_to_open:
|
||||
branch_questions_to_urls[query].append(result.link)
|
||||
if result.link not in unique_results_by_link:
|
||||
|
||||
@@ -13,7 +13,7 @@ class ProviderType(Enum):
|
||||
EXA = "exa"
|
||||
|
||||
|
||||
class InternetSearchResult(BaseModel):
|
||||
class WebSearchResult(BaseModel):
|
||||
title: str
|
||||
link: str
|
||||
author: str | None = None
|
||||
@@ -21,18 +21,19 @@ class InternetSearchResult(BaseModel):
|
||||
snippet: str | None = None
|
||||
|
||||
|
||||
class InternetContent(BaseModel):
|
||||
class WebContent(BaseModel):
|
||||
title: str
|
||||
link: str
|
||||
full_content: str
|
||||
published_date: datetime | None = None
|
||||
scrape_successful: bool = True
|
||||
|
||||
|
||||
class InternetSearchProvider(ABC):
|
||||
class WebSearchProvider(ABC):
|
||||
@abstractmethod
|
||||
def search(self, query: str) -> list[InternetSearchResult]:
|
||||
def search(self, query: str) -> list[WebSearchResult]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def contents(self, urls: list[str]) -> list[InternetContent]:
|
||||
def contents(self, urls: list[str]) -> list[WebContent]:
|
||||
pass
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.clients.exa_client import (
|
||||
ExaClient,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.clients.serper_client import (
|
||||
SerperClient,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
InternetSearchProvider,
|
||||
WebSearchProvider,
|
||||
)
|
||||
from onyx.configs.chat_configs import EXA_API_KEY
|
||||
from onyx.configs.chat_configs import SERPER_API_KEY
|
||||
|
||||
|
||||
def get_default_provider() -> InternetSearchProvider | None:
|
||||
def get_default_provider() -> WebSearchProvider | None:
|
||||
if EXA_API_KEY:
|
||||
return ExaClient()
|
||||
if SERPER_API_KEY:
|
||||
return SerperClient()
|
||||
return None
|
||||
|
||||
@@ -4,13 +4,13 @@ from typing import Annotated
|
||||
from onyx.agents.agent_search.dr.states import LoggerUpdate
|
||||
from onyx.agents.agent_search.dr.sub_agents.states import SubAgentInput
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
InternetSearchResult,
|
||||
WebSearchResult,
|
||||
)
|
||||
from onyx.context.search.models import InferenceSection
|
||||
|
||||
|
||||
class InternetSearchInput(SubAgentInput):
|
||||
results_to_open: Annotated[list[tuple[str, InternetSearchResult]], add] = []
|
||||
results_to_open: Annotated[list[tuple[str, WebSearchResult]], add] = []
|
||||
parallelization_nr: int = 0
|
||||
branch_question: Annotated[str, lambda x, y: y] = ""
|
||||
branch_questions_to_urls: Annotated[dict[str, list[str]], lambda x, y: y] = {}
|
||||
@@ -18,7 +18,7 @@ class InternetSearchInput(SubAgentInput):
|
||||
|
||||
|
||||
class InternetSearchUpdate(LoggerUpdate):
|
||||
results_to_open: Annotated[list[tuple[str, InternetSearchResult]], add] = []
|
||||
results_to_open: Annotated[list[tuple[str, WebSearchResult]], add] = []
|
||||
|
||||
|
||||
class FetchInput(SubAgentInput):
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
InternetContent,
|
||||
WebContent,
|
||||
)
|
||||
from onyx.agents.agent_search.dr.sub_agents.web_search.models import (
|
||||
InternetSearchResult,
|
||||
WebSearchResult,
|
||||
)
|
||||
from onyx.configs.constants import DocumentSource
|
||||
from onyx.context.search.models import InferenceChunk
|
||||
@@ -17,7 +17,7 @@ def truncate_search_result_content(content: str, max_chars: int = 10000) -> str:
|
||||
|
||||
|
||||
def dummy_inference_section_from_internet_content(
|
||||
result: InternetContent,
|
||||
result: WebContent,
|
||||
) -> InferenceSection:
|
||||
truncated_content = truncate_search_result_content(result.full_content)
|
||||
return InferenceSection(
|
||||
@@ -34,7 +34,7 @@ def dummy_inference_section_from_internet_content(
|
||||
boost=1,
|
||||
recency_bias=1.0,
|
||||
score=1.0,
|
||||
hidden=False,
|
||||
hidden=(not result.scrape_successful),
|
||||
metadata={},
|
||||
match_highlights=[],
|
||||
doc_summary=truncated_content,
|
||||
@@ -48,7 +48,7 @@ def dummy_inference_section_from_internet_content(
|
||||
|
||||
|
||||
def dummy_inference_section_from_internet_search_result(
|
||||
result: InternetSearchResult,
|
||||
result: WebSearchResult,
|
||||
) -> InferenceSection:
|
||||
return InferenceSection(
|
||||
center_chunk=InferenceChunk(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user