Compare commits

...

7 Commits

Author SHA1 Message Date
Nikolas Garza
f5d66f389c fix(input): differentiate attach file and add model icons (#10024) 2026-04-09 03:30:10 +00:00
Nikolas Garza
82d89f78c6 fix(chat): resolve model selector showing stale model on agent switch (#10022) 2026-04-09 03:06:24 +00:00
Jamison Lahman
6f49c5e32c chore: update generic LLM configuration help copy (#10011) 2026-04-09 01:08:41 +00:00
Justin Tahara
41f2bd2f19 chore(edge): Skip edge tag (#10019) 2026-04-09 00:56:51 +00:00
Jamison Lahman
bfa2f672f9 fix: /api/admin/llm/built-in/options/custom 404 (#10009) 2026-04-08 17:47:13 -07:00
Justin Tahara
a823c3ead1 chore(ods): Bump from v0.7.2 -> v0.7.3 (#10018) 2026-04-09 00:30:22 +00:00
Justin Tahara
bd7d378a9a chore(python sandbox): Bump to v0.3.3 (#10016) 2026-04-09 00:10:19 +00:00
22 changed files with 238 additions and 168 deletions

View File

@@ -156,7 +156,7 @@ jobs:
check-version-tag:
runs-on: ubuntu-slim
timeout-minutes: 10
if: ${{ !startsWith(github.ref_name, 'nightly-latest') && github.event_name != 'workflow_dispatch' }}
if: ${{ !startsWith(github.ref_name, 'nightly-latest') && github.ref_name != 'edge' && github.event_name != 'workflow_dispatch' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # ratchet:actions/checkout@v6

View File

@@ -66,7 +66,7 @@ PROVIDER_DISPLAY_NAMES: dict[str, str] = {
LlmProviderNames.LM_STUDIO: "LM Studio",
LlmProviderNames.LITELLM_PROXY: "LiteLLM Proxy",
LlmProviderNames.BIFROST: "Bifrost",
LlmProviderNames.OPENAI_COMPATIBLE: "OpenAI Compatible",
LlmProviderNames.OPENAI_COMPATIBLE: "OpenAI-Compatible",
"groq": "Groq",
"anyscale": "Anyscale",
"deepseek": "DeepSeek",

View File

@@ -338,7 +338,7 @@ def get_provider_display_name(provider_name: str) -> str:
VERTEXAI_PROVIDER_NAME: "Google Vertex AI",
OPENROUTER_PROVIDER_NAME: "OpenRouter",
LITELLM_PROXY_PROVIDER_NAME: "LiteLLM Proxy",
OPENAI_COMPATIBLE_PROVIDER_NAME: "OpenAI Compatible",
OPENAI_COMPATIBLE_PROVIDER_NAME: "OpenAI-Compatible",
}
if provider_name in _ONYX_PROVIDER_DISPLAY_NAMES:

View File

@@ -1674,7 +1674,7 @@ def get_openai_compatible_server_available_models(
)
for r in sorted_results
],
source_label="OpenAI Compatible",
source_label="OpenAI-Compatible",
)
return sorted_results
@@ -1693,6 +1693,6 @@ def _get_openai_compatible_server_response(
return _get_openai_compatible_models_response(
url=url,
source_name="OpenAI Compatible",
source_name="OpenAI-Compatible",
api_key=api_key,
)

View File

@@ -263,7 +263,7 @@ oauthlib==3.2.2
# via
# kubernetes
# requests-oauthlib
onyx-devtools==0.7.2
onyx-devtools==0.7.3
# via onyx
openai==2.14.0
# via

View File

@@ -19,6 +19,6 @@ dependencies:
version: 5.4.0
- name: code-interpreter
repository: https://onyx-dot-app.github.io/python-sandbox/
version: 0.3.2
digest: sha256:74908ea45ace2b4be913ff762772e6d87e40bab64e92c6662aa51730eaeb9d87
generated: "2026-04-06T15:34:02.597166-07:00"
version: 0.3.3
digest: sha256:a57f29088b1624a72f6c70e4c3ccc2f2aad675e4624278c4e9be92083d6d5dad
generated: "2026-04-08T16:47:29.33368-07:00"

View File

@@ -45,6 +45,6 @@ dependencies:
repository: https://charts.min.io/
condition: minio.enabled
- name: code-interpreter
version: 0.3.2
version: 0.3.3
repository: https://onyx-dot-app.github.io/python-sandbox/
condition: codeInterpreter.enabled

View File

@@ -148,7 +148,7 @@ dev = [
"matplotlib==3.10.8",
"mypy-extensions==1.0.0",
"mypy==1.13.0",
"onyx-devtools==0.7.2",
"onyx-devtools==0.7.3",
"openapi-generator-cli==7.17.0",
"pandas-stubs~=2.3.3",
"pre-commit==3.2.2",

16
uv.lock generated
View File

@@ -4459,7 +4459,7 @@ requires-dist = [
{ name = "numpy", marker = "extra == 'model-server'", specifier = "==2.4.1" },
{ name = "oauthlib", marker = "extra == 'backend'", specifier = "==3.2.2" },
{ name = "office365-rest-python-client", marker = "extra == 'backend'", specifier = "==2.6.2" },
{ name = "onyx-devtools", marker = "extra == 'dev'", specifier = "==0.7.2" },
{ name = "onyx-devtools", marker = "extra == 'dev'", specifier = "==0.7.3" },
{ name = "openai", specifier = "==2.14.0" },
{ name = "openapi-generator-cli", marker = "extra == 'dev'", specifier = "==7.17.0" },
{ name = "openinference-instrumentation", marker = "extra == 'backend'", specifier = "==0.1.42" },
@@ -4564,19 +4564,19 @@ requires-dist = [{ name = "onyx", extras = ["backend", "dev", "ee"], editable =
[[package]]
name = "onyx-devtools"
version = "0.7.2"
version = "0.7.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "fastapi" },
{ name = "openapi-generator-cli" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/22/b0/765ed49157470e8ccc8ab89e6a896ade50cde3aa2a494662ad4db92a48c4/onyx_devtools-0.7.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:553a2b5e61b29b7913c991c8d5aed78f930f0f81a0f42229c6a8de2b1e8ff57e", size = 4203859, upload-time = "2026-03-27T15:09:49.63Z" },
{ url = "https://files.pythonhosted.org/packages/f7/9d/bba0a44a16d2fc27e5441aaf10727e10514e7a49bce70eca02bced566eb9/onyx_devtools-0.7.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5cf0782dca8b3d861de9e18e65e990cfce5161cd559df44d8fabd3fefd54fdcd", size = 3879750, upload-time = "2026-03-27T15:09:42.413Z" },
{ url = "https://files.pythonhosted.org/packages/4d/d8/c5725e8af14c74fe0aeed29e4746400bb3c0a078fd1240df729dc6432b84/onyx_devtools-0.7.2-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:9a0d67373e16b4fbb38a5290c0d9dfd4cfa837e5da0c165b32841b9d37f7455b", size = 3743529, upload-time = "2026-03-27T15:09:44.546Z" },
{ url = "https://files.pythonhosted.org/packages/1a/82/b7c398a21dbc3e14fd7a29e49caa86b1bc0f8d7c75c051514785441ab779/onyx_devtools-0.7.2-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:794af14b2de575d0ae41b94551399eca8f8ba9b950c5db7acb7612767fd228f9", size = 4166562, upload-time = "2026-03-27T15:09:49.471Z" },
{ url = "https://files.pythonhosted.org/packages/26/76/be129e2baafc91fe792d919b1f4d73fc943ba9c2b728a60f1fb98e0c115a/onyx_devtools-0.7.2-py3-none-win_amd64.whl", hash = "sha256:83b3eb84df58d865e4f714222a5fab3ea464836e2c8690569454a940bbb651ff", size = 4282270, upload-time = "2026-03-27T15:09:44.676Z" },
{ url = "https://files.pythonhosted.org/packages/3b/72/29b8c8dbcf069c56475f00511f04c4aaa5ba3faba1dfc8276107d4b3ef7f/onyx_devtools-0.7.2-py3-none-win_arm64.whl", hash = "sha256:62f0836624ee6a5b31e64fd93162e7fce142ac8a4f959607e411824bc2b88174", size = 3823053, upload-time = "2026-03-27T15:09:43.546Z" },
{ url = "https://files.pythonhosted.org/packages/72/64/c75be8ab325896cc64bccd0e1e139a03ce305bf05598967922d380fc4694/onyx_devtools-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:675e2fdbd8d291fba4b8a6dfcf2bc94c56d22d11f395a9f0d0c3c0e5b39d7f9b", size = 4220613, upload-time = "2026-04-09T00:04:36.624Z" },
{ url = "https://files.pythonhosted.org/packages/ae/1f/589ff6bd446c4498f5bcdfd2a315709e91fc15edf5440c91ff64cbf0800f/onyx_devtools-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:bf3993de8ba02d6c2f1ab12b5b9b965e005040b37502f97db8a7d88d9b0cde4b", size = 3897867, upload-time = "2026-04-09T00:04:40.781Z" },
{ url = "https://files.pythonhosted.org/packages/10/c0/53c9173eefc13218707282c5b99753960d039684994c3b3caf90ce286094/onyx_devtools-0.7.3-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:6138a94084bed05c674ad210a0bc4006c43bc4384e8eb54d469233de85c72bd7", size = 3762408, upload-time = "2026-04-09T00:04:41.592Z" },
{ url = "https://files.pythonhosted.org/packages/d2/37/69fadb65112854a596d200f704da94b837817d4dd0f46cb4482dc0309c94/onyx_devtools-0.7.3-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:90dac91b0cdc32eb8861f6e83545009a34c439fd3c41fc7dd499acd0105b660e", size = 4184427, upload-time = "2026-04-09T00:04:41.525Z" },
{ url = "https://files.pythonhosted.org/packages/bd/45/91c829ccb45f1a15e7c9641eccc6dd154adb540e03c7dee2a8f28cea24d0/onyx_devtools-0.7.3-py3-none-win_amd64.whl", hash = "sha256:abc68d70bec06e349481beec4b212de28a1a8b7ed6ef3b41daf7093ee10b44f3", size = 4299935, upload-time = "2026-04-09T00:04:40.262Z" },
{ url = "https://files.pythonhosted.org/packages/cc/30/c5adcb8e3b46b71d8d92c3f9ee0c1d0bc5e2adc9f46e93931f21b36a3ee4/onyx_devtools-0.7.3-py3-none-win_arm64.whl", hash = "sha256:9e4411cadc5e81fabc9ed991402e3b4b40f02800681299c277b2142e5af0dcee", size = 3840228, upload-time = "2026-04-09T00:04:39.708Z" },
]
[[package]]

View File

@@ -171,6 +171,7 @@ export { default as SvgTrash } from "@opal/icons/trash";
export { default as SvgTwoLineSmall } from "@opal/icons/two-line-small";
export { default as SvgUnplug } from "@opal/icons/unplug";
export { default as SvgUploadCloud } from "@opal/icons/upload-cloud";
export { default as SvgUploadSquare } from "@opal/icons/upload-square";
export { default as SvgUser } from "@opal/icons/user";
export { default as SvgUserCheck } from "@opal/icons/user-check";
export { default as SvgUserEdit } from "@opal/icons/user-edit";

View File

@@ -0,0 +1,22 @@
import type { IconProps } from "@opal/types";
const SvgUploadSquare = ({ size, ...props }: IconProps) => (
<svg
width={size}
height={size}
viewBox="0 0 16 16"
fill="none"
xmlns="http://www.w3.org/2000/svg"
stroke="currentColor"
{...props}
>
<path
d="M11 14H12.6667C13.3929 14 14 13.3929 14 12.6667V3.33333C14 2.60711 13.3929 2 12.6667 2H3.33333C2.60711 2 2 2.60711 2 3.33333V12.6667C2 13.3929 2.60711 14 3.33333 14H5M10.6666 8.16667L7.99998 5.5M7.99998 5.5L5.33331 8.16667M7.99998 5.5V14"
strokeWidth={1.5}
strokeLinecap="round"
strokeLinejoin="round"
/>
</svg>
);
export default SvgUploadSquare;

View File

@@ -144,7 +144,9 @@ export function useAdminLLMProviders() {
*/
export function useWellKnownLLMProvider(providerName: LLMProviderName) {
const { data, error, isLoading } = useSWR<WellKnownLLMProviderDescriptor>(
providerName ? SWR_KEYS.wellKnownLlmProvider(providerName) : null,
providerName && providerName !== LLMProviderName.CUSTOM
? SWR_KEYS.wellKnownLlmProvider(providerName)
: null,
errorHandlingFetcher,
{
revalidateOnFocus: false,

View File

@@ -1,6 +1,6 @@
"use client";
import { useState, useCallback, useEffect, useMemo, useRef } from "react";
import { useState, useCallback, useMemo } from "react";
import {
MAX_MODELS,
SelectedModel,
@@ -40,7 +40,6 @@ export default function useMultiModelChat(
llmManager: LlmManager
): UseMultiModelChatReturn {
const [selectedModels, setSelectedModels] = useState<SelectedModel[]>([]);
const [defaultInitialized, setDefaultInitialized] = useState(false);
// Initialize with the default model from llmManager once providers load
const llmOptions = useMemo(
@@ -49,89 +48,99 @@ export default function useMultiModelChat(
[llmManager.llmProviders]
);
// Sync selectedModels[0] with llmManager.currentLlm when in single-model
// mode. This handles both initial load and session override changes (e.g.
// page reload restores the persisted model after providers load).
// Skip when user has manually added multiple models (multi-model mode).
const selectedModelsRef = useRef(selectedModels);
selectedModelsRef.current = selectedModels;
useEffect(() => {
if (llmOptions.length === 0) return;
// In single-model mode, derive the displayed model directly from
// llmManager.currentLlm so it always stays in sync (no stale state).
// Only use the selectedModels state array when the user has manually
// added multiple models (multi-model mode).
const currentLlmModel = useMemo((): SelectedModel | null => {
if (llmOptions.length === 0) return null;
const { currentLlm } = llmManager;
if (!currentLlm.modelName) return;
const current = selectedModelsRef.current;
// Don't override multi-model selections
if (current.length > 1) return;
// Skip if already showing the correct model
if (
current.length === 1 &&
current[0]!.provider === currentLlm.provider &&
current[0]!.modelName === currentLlm.modelName
) {
return;
}
if (!currentLlm.modelName) return null;
const match = llmOptions.find(
(opt) =>
opt.provider === currentLlm.provider &&
opt.modelName === currentLlm.modelName
);
if (match) {
setSelectedModels([
{
name: match.name,
provider: match.provider,
modelName: match.modelName,
displayName: match.displayName,
},
]);
setDefaultInitialized(true);
}
if (!match) return null;
return {
name: match.name,
provider: match.provider,
modelName: match.modelName,
displayName: match.displayName,
};
}, [llmOptions, llmManager.currentLlm]);
const isMultiModelActive = selectedModels.length > 1;
const addModel = useCallback((model: SelectedModel) => {
setSelectedModels((prev) => {
if (prev.length >= MAX_MODELS) return prev;
if (
prev.some(
(m) =>
m.provider === model.provider && m.modelName === model.modelName
)
) {
return prev;
}
return [...prev, model];
});
}, []);
// Expose the effective selection: multi-model state when active,
// otherwise the single model derived from llmManager.
const effectiveSelectedModels = useMemo(
() =>
isMultiModelActive
? selectedModels
: currentLlmModel
? [currentLlmModel]
: [],
[isMultiModelActive, selectedModels, currentLlmModel]
);
const addModel = useCallback(
(model: SelectedModel) => {
setSelectedModels((prev) => {
// When in effective single-model mode (prev <= 1), always re-seed from
// the current derived model so stale state from a prior remove doesn't persist.
const base =
prev.length <= 1 && currentLlmModel ? [currentLlmModel] : prev;
if (base.length >= MAX_MODELS) return base;
if (
base.some(
(m) =>
m.provider === model.provider && m.modelName === model.modelName
)
) {
return base;
}
return [...base, model];
});
},
[currentLlmModel]
);
const removeModel = useCallback((index: number) => {
setSelectedModels((prev) => prev.filter((_, i) => i !== index));
}, []);
const replaceModel = useCallback((index: number, model: SelectedModel) => {
setSelectedModels((prev) => {
// Don't replace with a model that's already selected elsewhere
if (
prev.some(
(m, i) =>
i !== index &&
m.provider === model.provider &&
m.modelName === model.modelName
)
) {
return prev;
const replaceModel = useCallback(
(index: number, model: SelectedModel) => {
// In single-model mode, update llmManager directly so currentLlm
// (and thus effectiveSelectedModels) reflects the change immediately.
if (!isMultiModelActive) {
llmManager.updateCurrentLlm({
name: model.name,
provider: model.provider,
modelName: model.modelName,
});
return;
}
const next = [...prev];
next[index] = model;
return next;
});
}, []);
setSelectedModels((prev) => {
// Don't replace with a model that's already selected elsewhere
if (
prev.some(
(m, i) =>
i !== index &&
m.provider === model.provider &&
m.modelName === model.modelName
)
) {
return prev;
}
const next = [...prev];
next[index] = model;
return next;
});
},
[isMultiModelActive, llmManager]
);
const clearModels = useCallback(() => {
setSelectedModels([]);
@@ -161,7 +170,6 @@ export default function useMultiModelChat(
}
if (restored.length >= 2) {
setSelectedModels(restored.slice(0, MAX_MODELS));
setDefaultInitialized(true);
}
},
[llmOptions]
@@ -191,15 +199,15 @@ export default function useMultiModelChat(
);
const buildLlmOverrides = useCallback((): LLMOverride[] => {
return selectedModels.map((m) => ({
return effectiveSelectedModels.map((m) => ({
model_provider: m.name,
model_version: m.modelName,
display_name: m.displayName,
}));
}, [selectedModels]);
}, [effectiveSelectedModels]);
return {
selectedModels,
selectedModels: effectiveSelectedModels,
isMultiModelActive,
addModel,
removeModel,

View File

@@ -671,7 +671,8 @@ export function useLlmManager(
const [userHasManuallyOverriddenLLM, setUserHasManuallyOverriddenLLM] =
useState(false);
const [chatSession, setChatSession] = useState<ChatSession | null>(null);
const [currentLlm, setCurrentLlm] = useState<LlmDescriptor>({
// Manual override value — only used when userHasManuallyOverriddenLLM is true
const [manualLlm, setManualLlm] = useState<LlmDescriptor>({
name: "",
provider: "",
modelName: "",
@@ -693,55 +694,77 @@ export function useLlmManager(
prevAgentIdRef.current = liveAgent?.id;
}, [liveAgent?.id]);
const llmUpdate = () => {
/* Should be called when the live assistant or current chat session changes */
// Don't update if providers haven't loaded yet (undefined/null)
// Empty arrays are valid (user has no provider access for this assistant)
if (llmProviders === undefined || llmProviders === null) {
return;
}
// separate function so we can `return` to break out
const _llmUpdate = () => {
// if the user has overridden in this session and just switched to a brand
// new session, use their manually specified model
if (userHasManuallyOverriddenLLM && !currentChatSession) {
return;
}
if (currentChatSession?.current_alternate_model) {
setCurrentLlm(
getValidLlmDescriptor(currentChatSession.current_alternate_model)
);
} else if (liveAgent?.llm_model_version_override) {
setCurrentLlm(
getValidLlmDescriptor(liveAgent.llm_model_version_override)
);
} else if (userHasManuallyOverriddenLLM) {
// if the user has an override and there's nothing special about the
// current chat session, use the override
return;
} else if (user?.preferences?.default_model) {
setCurrentLlm(getValidLlmDescriptor(user.preferences.default_model));
} else {
const defaultLlm = getDefaultLlmDescriptor(llmProviders, defaultText);
if (defaultLlm) {
setCurrentLlm(defaultLlm);
}
}
};
_llmUpdate();
setChatSession(currentChatSession || null);
};
function getValidLlmDescriptor(
modelName: string | null | undefined
): LlmDescriptor {
return getValidLlmDescriptorForProviders(modelName, llmProviders);
}
// Compute the resolved LLM synchronously so it's never one render behind.
// This replaces the old llmUpdate() effect for model resolution.
// Wrapped with a ref for referential stability — returns the same object
// when the resolved name/provider/modelName haven't actually changed,
// preventing unnecessary re-creation of downstream callbacks (e.g. onSubmit).
const prevLlmRef = useRef<LlmDescriptor>({
name: "",
provider: "",
modelName: "",
});
const currentLlm = useMemo((): LlmDescriptor => {
let resolved: LlmDescriptor;
if (llmProviders === undefined || llmProviders === null) {
resolved = manualLlm;
} else if (userHasManuallyOverriddenLLM && !currentChatSession) {
// User has overridden in this session and switched to a new session
resolved = manualLlm;
} else if (currentChatSession?.current_alternate_model) {
resolved = getValidLlmDescriptorForProviders(
currentChatSession.current_alternate_model,
llmProviders
);
} else if (liveAgent?.llm_model_version_override) {
resolved = getValidLlmDescriptorForProviders(
liveAgent.llm_model_version_override,
llmProviders
);
} else if (userHasManuallyOverriddenLLM) {
resolved = manualLlm;
} else if (user?.preferences?.default_model) {
resolved = getValidLlmDescriptorForProviders(
user.preferences.default_model,
llmProviders
);
} else {
resolved =
getDefaultLlmDescriptor(llmProviders, defaultText) ?? manualLlm;
}
const prev = prevLlmRef.current;
if (
prev.name === resolved.name &&
prev.provider === resolved.provider &&
prev.modelName === resolved.modelName
) {
return prev;
}
prevLlmRef.current = resolved;
return resolved;
}, [
llmProviders,
defaultText,
currentChatSession,
liveAgent?.llm_model_version_override,
userHasManuallyOverriddenLLM,
manualLlm,
user?.preferences?.default_model,
]);
// Keep chatSession state in sync (used by temperature effect)
useEffect(() => {
setChatSession(currentChatSession || null);
}, [currentChatSession]);
const [imageFilesPresent, setImageFilesPresent] = useState(false);
const updateImageFilesPresent = (present: boolean) => {
@@ -750,18 +773,18 @@ export function useLlmManager(
// Manually set the LLM
const updateCurrentLlm = (newLlm: LlmDescriptor) => {
setCurrentLlm(newLlm);
setManualLlm(newLlm);
setUserHasManuallyOverriddenLLM(true);
};
const updateCurrentLlmToModelName = (modelName: string) => {
setCurrentLlm(getValidLlmDescriptor(modelName));
setManualLlm(getValidLlmDescriptor(modelName));
setUserHasManuallyOverriddenLLM(true);
};
const updateModelOverrideBasedOnChatSession = (chatSession?: ChatSession) => {
if (chatSession && chatSession.current_alternate_model?.length > 0) {
setCurrentLlm(getValidLlmDescriptor(chatSession.current_alternate_model));
setManualLlm(getValidLlmDescriptor(chatSession.current_alternate_model));
}
};
@@ -811,8 +834,6 @@ export function useLlmManager(
}, [currentLlm]);
useEffect(() => {
llmUpdate();
if (!chatSession && currentChatSession) {
if (temperature) {
updateTemperatureOverrideForChatSession(

View File

@@ -62,7 +62,7 @@ const PROVIDER_PRODUCT_NAMES: Record<string, string> = {
[LLMProviderName.OPENROUTER]: "OpenRouter",
[LLMProviderName.LM_STUDIO]: "LM Studio",
[LLMProviderName.BIFROST]: "Bifrost",
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI Compatible",
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI-Compatible",
// fallback
[LLMProviderName.CUSTOM]: "Custom Models",
@@ -80,10 +80,10 @@ const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
[LLMProviderName.OPENROUTER]: "OpenRouter",
[LLMProviderName.LM_STUDIO]: "LM Studio",
[LLMProviderName.BIFROST]: "Bifrost",
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI Compatible",
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI-Compatible",
// fallback
[LLMProviderName.CUSTOM]: "Other providers or self-hosted",
[LLMProviderName.CUSTOM]: "models from other LiteLLM-compatible providers",
};
export function getProviderProductName(providerName: string): string {

View File

@@ -22,7 +22,7 @@ import {
SvgImage,
SvgLoader,
SvgMoreHorizontal,
SvgPaperclip,
SvgUploadSquare,
} from "@opal/icons";
const getFileExtension = (fileName: string): string => {
const idx = fileName.lastIndexOf(".");
@@ -125,7 +125,7 @@ function FilePickerPopoverContents({
// Action button to upload more files
<LineItem
key="upload-files"
icon={SvgPaperclip}
icon={SvgUploadSquare}
description="Upload a file from your device"
onClick={triggerUploadPicker}
>

View File

@@ -44,8 +44,8 @@ import {
SvgGlobe,
SvgHourglass,
SvgMicrophone,
SvgPaperclip,
SvgPlus,
SvgPlusCircle,
SvgSearch,
SvgStop,
SvgX,
@@ -507,7 +507,7 @@ const AppInputBar = React.memo(
trigger={(open) => (
<Button
disabled={disabled}
icon={SvgPlusCircle}
icon={SvgPaperclip}
tooltip="Attach Files"
interaction={open ? "hover" : "rest"}
prominence="tertiary"

View File

@@ -87,7 +87,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
await user.type(nameInput, options.name);
// Select provider from the combo box dropdown
const providerInput = screen.getByPlaceholderText("Select a provider");
const providerInput = screen.getByPlaceholderText(
"Provider ID string as shown on LiteLLM"
);
await user.click(providerInput);
const providerOption = await screen.findByRole("option", {
name: new RegExp(options.provider, "i"),
@@ -524,7 +526,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
await user.type(nameInput, "Cloudflare Provider");
// Select provider from the combo box dropdown
const providerInput = screen.getByPlaceholderText("Select a provider");
const providerInput = screen.getByPlaceholderText(
"Provider ID string as shown on LiteLLM"
);
await user.click(providerInput);
const providerOption = await screen.findByRole("option", {
name: /cloudflare/i,
@@ -538,7 +542,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
await user.click(addLineButton);
// Fill in custom config key-value pair
const keyInputs = screen.getAllByRole("textbox", { name: /Key \d+/ });
const keyInputs = screen.getAllByRole("textbox", {
name: /e\.g\. OPENAI_ORGANIZATION \d+/,
});
const valueInputs = screen.getAllByRole("textbox", { name: /Value \d+/ });
await user.type(keyInputs[0]!, "CLOUDFLARE_ACCOUNT_ID");

View File

@@ -184,6 +184,7 @@ function CustomConfigKeyValue() {
return (
<KeyValueInput
items={formikProps.values.custom_config_list}
keyPlaceholder="e.g. OPENAI_ORGANIZATION"
onChange={(items) =>
formikProps.setFieldValue("custom_config_list", items)
}
@@ -213,7 +214,7 @@ function ProviderNameSelect({ disabled }: { disabled?: boolean }) {
value={values.provider}
onValueChange={(value) => setFieldValue("provider", value)}
options={options}
placeholder="Select a provider"
placeholder="Provider ID string as shown on LiteLLM"
disabled={disabled}
createPrefix="Use"
dropdownMaxHeight="60vh"
@@ -307,6 +308,7 @@ export default function CustomModal({
onClose={onClose}
initialValues={initialValues}
validationSchema={validationSchema}
description="Connect models from other LiteLLM-compatible providers."
onSubmit={async (values, { setSubmitting, setStatus }) => {
setSubmitting(true);
@@ -370,15 +372,20 @@ export default function CustomModal({
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="provider"
title="Provider Name"
title="Provider"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
"See full list of supported LLM providers at [LiteLLM](https://docs.litellm.ai/docs/providers)."
)}
>
<ProviderNameSelect disabled={!!existingLlmProvider} />
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<APIKeyField
optional
subDescription="Paste your API key if your model provider requires authentication."
/>
<APIBaseField optional />
<InputLayouts.FieldPadder>
@@ -391,15 +398,10 @@ export default function CustomModal({
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<APIKeyField
optional
subDescription="Paste your API key if your model provider requires authentication."
/>
<InputLayouts.FieldPadder>
<Section gap={0.75}>
<Content
title="Additional Configs"
title="Environment Variables"
description={markdown(
"Add extra properties as needed by the model provider. These are passed to LiteLLM's `completion()` call as [environment variables](https://docs.litellm.ai/docs/set_keys#environment-variables). See [documentation](https://docs.onyx.app/admins/ai_models/custom_inference_provider) for more instructions."
)}

View File

@@ -61,15 +61,15 @@ function OpenAICompatibleModalInternals({
return (
<>
<APIBaseField
subDescription="The base URL of your OpenAI-compatible server."
subDescription={markdown(
"Paste your OpenAI-compatible endpoint URL. [Learn More](https://docs.litellm.ai/docs/providers/openai_compatible)"
)}
placeholder="http://localhost:8000/v1"
/>
<APIKeyField
optional
subDescription={markdown(
"Provide an API key if your server requires authentication."
)}
subDescription="Paste your API key if your model provider requires authentication."
/>
{!isOnboarding && (
@@ -123,6 +123,7 @@ export default function OpenAICompatibleModal({
llmProvider={existingLlmProvider}
onClose={onClose}
initialValues={initialValues}
description="Connect from other cloud or self-hosted models via OpenAI-compatible endpoints."
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
await submitProvider({

View File

@@ -640,6 +640,7 @@ export interface ModalWrapperProps<
validationSchema: FormikConfig<T>["validationSchema"];
onSubmit: FormikConfig<T>["onSubmit"];
children: React.ReactNode;
description?: string;
}
export function ModalWrapper<T extends BaseLLMFormValues = BaseLLMFormValues>({
providerName,
@@ -649,6 +650,7 @@ export function ModalWrapper<T extends BaseLLMFormValues = BaseLLMFormValues>({
validationSchema,
onSubmit,
children,
description,
}: ModalWrapperProps<T>) {
return (
<Formik
@@ -663,6 +665,7 @@ export function ModalWrapper<T extends BaseLLMFormValues = BaseLLMFormValues>({
llmProvider={llmProvider}
onClose={onClose}
modelConfigurations={initialValues.model_configurations}
description={description}
>
{children}
</ModalWrapperInner>
@@ -677,6 +680,7 @@ interface ModalWrapperInnerProps {
onClose: () => void;
modelConfigurations?: ModelConfiguration[];
children: React.ReactNode;
description?: string;
}
function ModalWrapperInner({
providerName,
@@ -684,6 +688,7 @@ function ModalWrapperInner({
onClose,
modelConfigurations,
children,
description: descriptionOverride,
}: ModalWrapperInnerProps) {
const { isValid, dirty, isSubmitting, status, setFieldValue, values } =
useFormikContext<BaseLLMFormValues>();
@@ -719,7 +724,9 @@ function ModalWrapperInner({
const title = llmProvider
? `Configure "${llmProvider.name}"`
: `Set up ${providerProductName}`;
const description = `Connect to ${providerDisplayName} and set up your ${providerProductName} models.`;
const description =
descriptionOverride ??
`Connect to ${providerDisplayName} and set up your ${providerProductName} models.`;
return (
<Modal open onOpenChange={onClose}>

View File

@@ -50,8 +50,8 @@ const PROVIDER_DISPLAY_INFO: Record<
displayName: "LiteLLM Proxy",
},
[LLMProviderName.OPENAI_COMPATIBLE]: {
title: "OpenAI Compatible",
displayName: "OpenAI Compatible",
title: "OpenAI-Compatible",
displayName: "OpenAI-Compatible",
},
};