Compare commits

..

1 Commits

Author SHA1 Message Date
Jamison Lahman
583236391f fix(onboarding): disabled button UX and auto_mode nits 2026-04-08 09:00:04 -07:00
32 changed files with 1109 additions and 843 deletions

View File

@@ -90,7 +90,6 @@ from onyx.onyxbot.slack.utils import respond_in_thread_or_channel
from onyx.onyxbot.slack.utils import TenantSocketModeClient
from onyx.redis.redis_pool import get_redis_client
from onyx.server.manage.models import SlackBotTokens
from onyx.tracing.setup import setup_tracing
from onyx.utils.logger import setup_logger
from onyx.utils.variable_functionality import fetch_ee_implementation_or_noop
from onyx.utils.variable_functionality import set_is_ee_based_on_env_variable
@@ -1207,7 +1206,6 @@ if __name__ == "__main__":
tenant_handler = SlackbotHandler()
set_is_ee_based_on_env_variable()
setup_tracing()
try:
# Keep the main thread alive

View File

@@ -0,0 +1,163 @@
"use client";
import { ArrayHelpers, FieldArray, FormikProps, useField } from "formik";
import { ModelConfiguration } from "@/interfaces/llm";
import { ManualErrorMessage, TextFormField } from "@/components/Field";
import { useEffect, useState } from "react";
import CreateButton from "@/refresh-components/buttons/CreateButton";
import { Button } from "@opal/components";
import { SvgX } from "@opal/icons";
import Text from "@/refresh-components/texts/Text";
function ModelConfigurationRow({
name,
index,
arrayHelpers,
formikProps,
setError,
}: {
name: string;
index: number;
arrayHelpers: ArrayHelpers;
formikProps: FormikProps<{ model_configurations: ModelConfiguration[] }>;
setError: (value: string | null) => void;
}) {
const [, input] = useField(`${name}[${index}]`);
useEffect(() => {
if (!input.touched) return;
setError((input.error as { name: string } | undefined)?.name ?? null);
}, [input.touched, input.error]);
return (
<div key={index} className="flex flex-row w-full gap-4">
<div
className={`flex flex-[2] ${
input.touched && input.error ? "border-2 border-error rounded-lg" : ""
}`}
>
<TextFormField
name={`${name}[${index}].name`}
label=""
placeholder={`model-name-${index + 1}`}
removeLabel
hideError
/>
</div>
<div className="flex flex-[1]">
<TextFormField
name={`${name}[${index}].max_input_tokens`}
label=""
placeholder="Default"
removeLabel
hideError
type="number"
min={1}
/>
</div>
<div className="flex flex-col justify-center">
<Button
disabled={formikProps.values.model_configurations.length <= 1}
onClick={() => {
if (formikProps.values.model_configurations.length > 1) {
setError(null);
arrayHelpers.remove(index);
}
}}
icon={SvgX}
prominence="secondary"
/>
</div>
</div>
);
}
export function ModelConfigurationField({
name,
formikProps,
}: {
name: string;
formikProps: FormikProps<{ model_configurations: ModelConfiguration[] }>;
}) {
const [errorMap, setErrorMap] = useState<{ [index: number]: string }>({});
const [finalError, setFinalError] = useState<string | undefined>();
return (
<div className="pb-5 flex flex-col w-full">
<div className="flex flex-col">
<Text as="p" mainUiAction>
Model Configurations
</Text>
<Text as="p" secondaryBody text03>
Add models and customize the number of input tokens that they accept.
</Text>
</div>
<FieldArray
name={name}
render={(arrayHelpers: ArrayHelpers) => (
<div className="flex flex-col">
<div className="flex flex-col gap-4 py-4">
<div className="flex">
<Text as="p" secondaryBody className="flex flex-[2]">
Model Name
</Text>
<Text as="p" secondaryBody className="flex flex-[1]">
Max Input Tokens
</Text>
<div className="w-10" />
</div>
{formikProps.values.model_configurations.map((_, index) => (
<ModelConfigurationRow
key={index}
name={name}
formikProps={formikProps}
arrayHelpers={arrayHelpers}
index={index}
setError={(message: string | null) => {
const newErrors = { ...errorMap };
if (message) {
newErrors[index] = message;
} else {
delete newErrors[index];
for (const key in newErrors) {
const numKey = Number(key);
if (numKey > index) {
const errorValue = newErrors[key];
if (errorValue !== undefined) {
// Ensure the value is not undefined
newErrors[numKey - 1] = errorValue;
delete newErrors[numKey];
}
}
}
}
setErrorMap(newErrors);
setFinalError(
Object.values(newErrors).filter((item) => item)[0]
);
}}
/>
))}
</div>
{finalError && (
<ManualErrorMessage>{finalError}</ManualErrorMessage>
)}
<div className="mt-3">
<CreateButton
onClick={() => {
arrayHelpers.push({
name: "",
is_visible: true,
// Use null so Yup.number().nullable() accepts empty inputs
max_input_tokens: null,
});
}}
>
Add New
</CreateButton>
</div>
</div>
)}
/>
</div>
);
}

View File

@@ -1,18 +0,0 @@
import { defaultTailwindCSS } from "@/components/icons/icons";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { IconProps } from "@opal/types";
export interface ModelIconProps extends IconProps {
provider: string;
modelName?: string;
}
export default function ModelIcon({
provider,
modelName,
size = 16,
className = defaultTailwindCSS,
}: ModelIconProps) {
const Icon = getModelIcon(provider, modelName);
return <Icon size={size} className={className} />;
}

View File

@@ -0,0 +1,17 @@
import { defaultTailwindCSS, IconProps } from "@/components/icons/icons";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
export interface ProviderIconProps extends IconProps {
provider: string;
modelName?: string;
}
export const ProviderIcon = ({
provider,
modelName,
size = 16,
className = defaultTailwindCSS,
}: ProviderIconProps) => {
const Icon = getProviderIcon(provider, modelName);
return <Icon size={size} className={className} />;
};

View File

@@ -0,0 +1,622 @@
import { JSX } from "react";
import {
AnthropicIcon,
AmazonIcon,
AzureIcon,
CPUIcon,
MicrosoftIconSVG,
MistralIcon,
MetaIcon,
GeminiIcon,
IconProps,
DeepseekIcon,
OpenAISVG,
QwenIcon,
OllamaIcon,
LMStudioIcon,
LiteLLMIcon,
ZAIIcon,
} from "@/components/icons/icons";
import {
OllamaModelResponse,
OpenRouterModelResponse,
BedrockModelResponse,
LMStudioModelResponse,
LiteLLMProxyModelResponse,
BifrostModelResponse,
ModelConfiguration,
LLMProviderName,
BedrockFetchParams,
OllamaFetchParams,
LMStudioFetchParams,
OpenRouterFetchParams,
LiteLLMProxyFetchParams,
BifrostFetchParams,
OpenAICompatibleFetchParams,
OpenAICompatibleModelResponse,
} from "@/interfaces/llm";
import { SvgAws, SvgBifrost, SvgOpenrouter, SvgPlug } from "@opal/icons";
// Aggregator providers that host models from multiple vendors
export const AGGREGATOR_PROVIDERS = new Set([
"bedrock",
"bedrock_converse",
"openrouter",
"ollama_chat",
"lm_studio",
"litellm_proxy",
"bifrost",
"openai_compatible",
"vertex_ai",
]);
export const getProviderIcon = (
providerName: string,
modelName?: string
): (({ size, className }: IconProps) => JSX.Element) => {
const iconMap: Record<
string,
({ size, className }: IconProps) => JSX.Element
> = {
amazon: AmazonIcon,
phi: MicrosoftIconSVG,
mistral: MistralIcon,
ministral: MistralIcon,
llama: MetaIcon,
ollama_chat: OllamaIcon,
ollama: OllamaIcon,
lm_studio: LMStudioIcon,
gemini: GeminiIcon,
deepseek: DeepseekIcon,
claude: AnthropicIcon,
anthropic: AnthropicIcon,
openai: OpenAISVG,
// Azure OpenAI should display the Azure logo
azure: AzureIcon,
microsoft: MicrosoftIconSVG,
meta: MetaIcon,
google: GeminiIcon,
qwen: QwenIcon,
qwq: QwenIcon,
zai: ZAIIcon,
// Cloud providers - use AWS icon for Bedrock
bedrock: SvgAws,
bedrock_converse: SvgAws,
openrouter: SvgOpenrouter,
litellm_proxy: LiteLLMIcon,
bifrost: SvgBifrost,
openai_compatible: SvgPlug,
vertex_ai: GeminiIcon,
};
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers (bedrock, openrouter, vertex_ai), prioritize showing
// the vendor icon based on model name (e.g., show Claude icon for Bedrock Claude models)
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(iconMap)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in iconMap) {
const icon = iconMap[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(iconMap)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return CPUIcon;
};
export const isAnthropic = (provider: string, modelName?: string) =>
provider === LLMProviderName.ANTHROPIC ||
!!modelName?.toLowerCase().includes("claude");
/**
* Fetches Bedrock models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBedrockModels = async (
params: BedrockFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
if (!params.aws_region_name) {
return { models: [], error: "AWS region is required" };
}
try {
const response = await fetch("/api/admin/llm/bedrock/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
aws_region_name: params.aws_region_name,
aws_access_key_id: params.aws_access_key_id,
aws_secret_access_key: params.aws_secret_access_key,
aws_bearer_token_bedrock: params.aws_bearer_token_bedrock,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: BedrockModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: false,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Ollama models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOllamaModels = async (
params: OllamaFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/ollama/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OllamaModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches OpenRouter models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOpenRouterModels = async (
params: OpenRouterFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/openrouter/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse OpenRouter model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: OpenRouterModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LM Studio models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLMStudioModels = async (
params: LMStudioFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/lm-studio/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
api_key_changed: params.api_key_changed ?? false,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse LM Studio model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: LMStudioModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Bifrost models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBifrostModels = async (
params: BifrostFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/bifrost/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse Bifrost model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: BifrostModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models from a generic OpenAI-compatible server.
* Uses snake_case params to match API structure.
*/
export const fetchOpenAICompatibleModels = async (
params: OpenAICompatibleFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch(
"/api/admin/llm/openai-compatible/available-models",
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
}
);
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OpenAICompatibleModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LiteLLM Proxy models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLiteLLMProxyModels = async (
params: LiteLLMProxyFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/litellm/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: LiteLLMProxyModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.model_name,
display_name: modelData.model_name,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models for a provider. Accepts form values directly and maps them
* to the expected fetch params format internally.
*/
export const fetchModels = async (
providerName: string,
formValues: {
api_base?: string;
api_key?: string;
api_key_changed?: boolean;
name?: string;
custom_config?: Record<string, string>;
model_configurations?: ModelConfiguration[];
},
signal?: AbortSignal
) => {
const customConfig = formValues.custom_config || {};
switch (providerName) {
case LLMProviderName.BEDROCK:
return fetchBedrockModels({
aws_region_name: customConfig.AWS_REGION_NAME || "",
aws_access_key_id: customConfig.AWS_ACCESS_KEY_ID,
aws_secret_access_key: customConfig.AWS_SECRET_ACCESS_KEY,
aws_bearer_token_bedrock: customConfig.AWS_BEARER_TOKEN_BEDROCK,
provider_name: formValues.name,
});
case LLMProviderName.OLLAMA_CHAT:
return fetchOllamaModels({
api_base: formValues.api_base,
provider_name: formValues.name,
signal,
});
case LLMProviderName.LM_STUDIO:
return fetchLMStudioModels({
api_base: formValues.api_base,
api_key: formValues.custom_config?.LM_STUDIO_API_KEY,
api_key_changed: formValues.api_key_changed ?? false,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENROUTER:
return fetchOpenRouterModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
});
case LLMProviderName.LITELLM_PROXY:
return fetchLiteLLMProxyModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.BIFROST:
return fetchBifrostModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENAI_COMPATIBLE:
return fetchOpenAICompatibleModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
default:
return { models: [], error: `Unknown provider: ${providerName}` };
}
};
export function canProviderFetchModels(providerName?: string) {
if (!providerName) return false;
switch (providerName) {
case LLMProviderName.BEDROCK:
case LLMProviderName.OLLAMA_CHAT:
case LLMProviderName.LM_STUDIO:
case LLMProviderName.OPENROUTER:
case LLMProviderName.LITELLM_PROXY:
case LLMProviderName.BIFROST:
case LLMProviderName.OPENAI_COMPATIBLE:
return true;
default:
return false;
}
}

View File

@@ -5,7 +5,7 @@ import { Button } from "@opal/components";
import { Text } from "@opal/components";
import { ContentAction } from "@opal/layouts";
import { SvgEyeOff, SvgX } from "@opal/icons";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import AgentMessage, {
AgentMessageProps,
} from "@/app/app/message/messageComponents/AgentMessage";
@@ -71,7 +71,7 @@ export default function MultiModelPanel({
errorStackTrace,
errorDetails,
}: MultiModelPanelProps) {
const ModelIcon = getModelIcon(provider, modelName);
const ProviderIcon = getProviderIcon(provider, modelName);
const handlePanelClick = useCallback(() => {
if (!isHidden && !isPreferred) onSelect();
@@ -88,7 +88,7 @@ export default function MultiModelPanel({
sizePreset="main-ui"
variant="body"
paddingVariant="lg"
icon={ModelIcon}
icon={ProviderIcon}
title={isHidden ? markdown(`~~${displayName}~~`) : displayName}
rightChildren={
<div className="flex items-center gap-1 px-2">

View File

@@ -18,7 +18,7 @@ import {
isRecommendedModel,
} from "@/app/craft/onboarding/constants";
import { ToggleWarningModal } from "./ToggleWarningModal";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { Section } from "@/layouts/general-layouts";
import {
Accordion,
@@ -365,7 +365,9 @@ export function BuildLLMPopover({
const isExpanded = expandedGroups.includes(
group.providerKey
);
const ModelIcon = getModelIcon(group.providerKey);
const ProviderIcon = getProviderIcon(
group.providerKey
);
return (
<AccordionItem
@@ -377,7 +379,7 @@ export function BuildLLMPopover({
<AccordionTrigger className="flex items-center rounded-08 hover:no-underline hover:bg-background-tint-02 group [&>svg]:hidden w-full py-1">
<div className="flex items-center gap-1 shrink-0">
<div className="flex items-center justify-center size-5 shrink-0">
<ModelIcon size={16} />
<ProviderIcon size={16} />
</div>
<Text
secondaryBody

View File

@@ -48,7 +48,7 @@ import NotAllowedModal from "@/app/craft/onboarding/components/NotAllowedModal";
import { useOnboarding } from "@/app/craft/onboarding/BuildOnboardingProvider";
import { useLLMProviders } from "@/hooks/useLLMProviders";
import { useUser } from "@/providers/UserProvider";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import {
getBuildUserPersona,
getPersonaInfo,
@@ -475,10 +475,10 @@ export default function BuildConfigPage() {
>
{pendingLlmSelection?.provider &&
(() => {
const ModelIcon = getModelIcon(
const ProviderIcon = getProviderIcon(
pendingLlmSelection.provider
);
return <ModelIcon className="w-4 h-4" />;
return <ProviderIcon className="w-4 h-4" />;
})()}
<Text mainUiAction>{pendingLlmDisplayName}</Text>
<SvgChevronDown className="w-4 h-4 text-text-03" />

View File

@@ -3,14 +3,14 @@
import { useMemo } from "react";
import { parseLlmDescriptor, structureValue } from "@/lib/llmConfig/utils";
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { createIcon } from "@/components/icons/icons";
interface LLMOption {
name: string;
value: string;
icon: ReturnType<typeof getModelIcon>;
icon: ReturnType<typeof getProviderIcon>;
modelName: string;
providerName: string;
provider: string;
@@ -85,7 +85,7 @@ export default function LLMSelector({
provider.provider,
modelConfiguration.name
),
icon: getModelIcon(provider.provider, modelConfiguration.name),
icon: getProviderIcon(provider.provider, modelConfiguration.name),
modelName: modelConfiguration.name,
providerName: provider.name,
provider: provider.provider,

View File

@@ -1,4 +1,7 @@
import type { OnboardingActions } from "@/interfaces/onboarding";
import type {
OnboardingState,
OnboardingActions,
} from "@/interfaces/onboarding";
export enum LLMProviderName {
OPENAI = "openai",

View File

@@ -32,7 +32,7 @@ import {
PersonaLabel,
} from "@/app/admin/agents/interfaces";
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
import { isAnthropic } from "@/lib/llmConfig/svc";
import { isAnthropic } from "@/app/admin/configuration/llm/utils";
import { getSourceMetadataForSources } from "./sources";
import { AuthType, NEXT_PUBLIC_CLOUD_ENABLED } from "./constants";
import { useUser } from "@/providers/UserProvider";

View File

@@ -14,28 +14,8 @@ import {
SvgLitellm,
SvgLmStudio,
} from "@opal/icons";
import {
MicrosoftIconSVG,
MistralIcon,
MetaIcon,
DeepseekIcon,
QwenIcon,
ZAIIcon,
} from "@/components/icons/icons";
import { LLMProviderName } from "@/interfaces/llm";
export const AGGREGATOR_PROVIDERS = new Set([
LLMProviderName.BEDROCK,
"bedrock_converse",
LLMProviderName.OPENROUTER,
LLMProviderName.OLLAMA_CHAT,
LLMProviderName.LM_STUDIO,
LLMProviderName.LITELLM_PROXY,
LLMProviderName.BIFROST,
LLMProviderName.OPENAI_COMPATIBLE,
LLMProviderName.VERTEX_AI,
]);
const PROVIDER_ICONS: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
@@ -101,80 +81,3 @@ export function getProviderDisplayName(providerName: string): string {
export function getProviderIcon(providerName: string): IconFunctionComponent {
return PROVIDER_ICONS[providerName] ?? SvgCpu;
}
// ---------------------------------------------------------------------------
// Model-aware icon resolver (legacy icon set)
// ---------------------------------------------------------------------------
const MODEL_ICON_MAP: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
[LLMProviderName.VERTEX_AI]: SvgGemini,
[LLMProviderName.BEDROCK]: SvgAws,
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
[LLMProviderName.BIFROST]: SvgBifrost,
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
amazon: SvgAws,
phi: MicrosoftIconSVG,
mistral: MistralIcon,
ministral: MistralIcon,
llama: MetaIcon,
ollama: SvgOllama,
gemini: SvgGemini,
deepseek: DeepseekIcon,
claude: SvgClaude,
azure: SvgAzure,
microsoft: MicrosoftIconSVG,
meta: MetaIcon,
google: SvgGemini,
qwen: QwenIcon,
qwq: QwenIcon,
zai: ZAIIcon,
bedrock_converse: SvgAws,
};
/**
* Model-aware icon resolver that checks both provider name and model name
* to pick the most specific icon (e.g. Claude icon for a Bedrock Claude model).
*/
export const getModelIcon = (
providerName: string,
modelName?: string
): IconFunctionComponent => {
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers, prioritise showing the vendor icon based on model name
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in MODEL_ICON_MAP) {
const icon = MODEL_ICON_MAP[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return SvgCpu;
};

View File

@@ -1,5 +1,5 @@
/**
* LLM action functions for mutations and model fetching.
* LLM action functions for mutations.
*
* These are async functions for one-off actions that don't need SWR caching.
*
@@ -7,31 +7,12 @@
* - /api/admin/llm/test/default - Test the default LLM provider connection
* - /api/admin/llm/default - Set the default LLM model
* - /api/admin/llm/provider/{id} - Delete an LLM provider
* - /api/admin/llm/{provider}/available-models - Fetch available models for a provider
*/
import {
LLM_ADMIN_URL,
LLM_PROVIDERS_ADMIN_URL,
} from "@/lib/llmConfig/constants";
import {
OllamaModelResponse,
OpenRouterModelResponse,
BedrockModelResponse,
LMStudioModelResponse,
LiteLLMProxyModelResponse,
BifrostModelResponse,
ModelConfiguration,
LLMProviderName,
BedrockFetchParams,
OllamaFetchParams,
LMStudioFetchParams,
OpenRouterFetchParams,
LiteLLMProxyFetchParams,
BifrostFetchParams,
OpenAICompatibleFetchParams,
OpenAICompatibleModelResponse,
} from "@/interfaces/llm";
/**
* Test the default LLM provider.
@@ -76,522 +57,15 @@ export async function setDefaultLlmModel(
/**
* Delete an LLM provider.
* @param providerId - The provider ID to delete
* @param force - Force delete even if this is the default provider
* @throws Error with the detail message from the API on failure
*/
export async function deleteLlmProvider(
providerId: number,
force = false
): Promise<void> {
const url = force
? `${LLM_PROVIDERS_ADMIN_URL}/${providerId}?force=true`
: `${LLM_PROVIDERS_ADMIN_URL}/${providerId}`;
const response = await fetch(url, { method: "DELETE" });
export async function deleteLlmProvider(providerId: number): Promise<void> {
const response = await fetch(`${LLM_PROVIDERS_ADMIN_URL}/${providerId}`, {
method: "DELETE",
});
if (!response.ok) {
const errorMsg = (await response.json()).detail;
throw new Error(errorMsg);
}
}
// ---------------------------------------------------------------------------
// Aggregator providers & helpers
// ---------------------------------------------------------------------------
/** Aggregator providers that host models from multiple vendors. */
export const AGGREGATOR_PROVIDERS = new Set([
"bedrock",
"bedrock_converse",
"openrouter",
"ollama_chat",
"lm_studio",
"litellm_proxy",
"bifrost",
"openai_compatible",
"vertex_ai",
]);
export const isAnthropic = (provider: string, modelName?: string) =>
provider === LLMProviderName.ANTHROPIC ||
!!modelName?.toLowerCase().includes("claude");
// ---------------------------------------------------------------------------
// Model fetching
// ---------------------------------------------------------------------------
/**
* Fetches Bedrock models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBedrockModels = async (
params: BedrockFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
if (!params.aws_region_name) {
return { models: [], error: "AWS region is required" };
}
try {
const response = await fetch("/api/admin/llm/bedrock/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
aws_region_name: params.aws_region_name,
aws_access_key_id: params.aws_access_key_id,
aws_secret_access_key: params.aws_secret_access_key,
aws_bearer_token_bedrock: params.aws_bearer_token_bedrock,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: BedrockModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: false,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Ollama models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOllamaModels = async (
params: OllamaFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/ollama/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OllamaModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches OpenRouter models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOpenRouterModels = async (
params: OpenRouterFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/openrouter/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse OpenRouter model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: OpenRouterModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LM Studio models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLMStudioModels = async (
params: LMStudioFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/lm-studio/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
api_key_changed: params.api_key_changed ?? false,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse LM Studio model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: LMStudioModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Bifrost models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBifrostModels = async (
params: BifrostFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/bifrost/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse Bifrost model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: BifrostModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models from a generic OpenAI-compatible server.
* Uses snake_case params to match API structure.
*/
export const fetchOpenAICompatibleModels = async (
params: OpenAICompatibleFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch(
"/api/admin/llm/openai-compatible/available-models",
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
}
);
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OpenAICompatibleModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LiteLLM Proxy models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLiteLLMProxyModels = async (
params: LiteLLMProxyFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/litellm/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: LiteLLMProxyModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.model_name,
display_name: modelData.model_name,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models for a provider. Accepts form values directly and maps them
* to the expected fetch params format internally.
*/
export const fetchModels = async (
providerName: string,
formValues: {
api_base?: string;
api_key?: string;
api_key_changed?: boolean;
name?: string;
custom_config?: Record<string, string>;
model_configurations?: ModelConfiguration[];
},
signal?: AbortSignal
) => {
const customConfig = formValues.custom_config || {};
switch (providerName) {
case LLMProviderName.BEDROCK:
return fetchBedrockModels({
aws_region_name: customConfig.AWS_REGION_NAME || "",
aws_access_key_id: customConfig.AWS_ACCESS_KEY_ID,
aws_secret_access_key: customConfig.AWS_SECRET_ACCESS_KEY,
aws_bearer_token_bedrock: customConfig.AWS_BEARER_TOKEN_BEDROCK,
provider_name: formValues.name,
});
case LLMProviderName.OLLAMA_CHAT:
return fetchOllamaModels({
api_base: formValues.api_base,
provider_name: formValues.name,
signal,
});
case LLMProviderName.LM_STUDIO:
return fetchLMStudioModels({
api_base: formValues.api_base,
api_key: formValues.custom_config?.LM_STUDIO_API_KEY,
api_key_changed: formValues.api_key_changed ?? false,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENROUTER:
return fetchOpenRouterModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
});
case LLMProviderName.LITELLM_PROXY:
return fetchLiteLLMProxyModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.BIFROST:
return fetchBifrostModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENAI_COMPATIBLE:
return fetchOpenAICompatibleModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
default:
return { models: [], error: `Unknown provider: ${providerName}` };
}
};

View File

@@ -0,0 +1,38 @@
import { LLMProviderResponse, VisionProvider } from "@/interfaces/llm";
import { LLM_ADMIN_URL } from "@/lib/llmConfig/constants";
export async function fetchVisionProviders(): Promise<VisionProvider[]> {
const response = await fetch(`${LLM_ADMIN_URL}/vision-providers`, {
headers: {
"Content-Type": "application/json",
},
});
if (!response.ok) {
throw new Error(
`Failed to fetch vision providers: ${await response.text()}`
);
}
const data = (await response.json()) as LLMProviderResponse<VisionProvider>;
return data.providers;
}
export async function setDefaultVisionProvider(
providerId: number,
visionModel: string
): Promise<void> {
const response = await fetch(`${LLM_ADMIN_URL}/default-vision`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
provider_id: providerId,
model_name: visionModel,
}),
});
if (!response.ok) {
const errorMsg = await response.text();
throw new Error(errorMsg);
}
}

View File

@@ -1,7 +1,7 @@
"use client";
import React from "react";
import type { IconProps, RichStr } from "@opal/types";
import type { IconProps } from "@opal/types";
import Text from "@/refresh-components/texts/Text";
import { Button } from "@opal/components";
import Modal from "@/refresh-components/Modal";
@@ -9,8 +9,8 @@ import { useModalClose } from "../contexts/ModalContext";
export interface ConfirmationModalProps {
icon: React.FunctionComponent<IconProps>;
title: string | RichStr;
description?: string | RichStr;
title: string;
description?: string;
children?: React.ReactNode;
submit: React.ReactNode;

View File

@@ -4,9 +4,11 @@ import { useState, useEffect, useCallback, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmDescriptor, LlmManager } from "@/lib/hooks";
import { structureValue } from "@/lib/llmConfig/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { AGGREGATOR_PROVIDERS } from "@/lib/llmConfig/svc";
import {
getProviderIcon,
AGGREGATOR_PROVIDERS,
} from "@/app/admin/configuration/llm/utils";
import { LLMProviderDescriptor } from "@/interfaces/llm";
import { Slider } from "@/components/ui/slider";
import { useUser } from "@/providers/UserProvider";
import Text from "@/refresh-components/texts/Text";
@@ -53,7 +55,7 @@ export function groupLlmOptions(
groups.set(groupKey, {
displayName,
options: [],
Icon: getModelIcon(provider),
Icon: getProviderIcon(provider),
});
}
@@ -191,7 +193,7 @@ export default function LLMPopover({
icon={
foldable
? SvgRefreshCw
: getModelIcon(
: getProviderIcon(
llmManager.currentLlm.provider,
llmManager.currentLlm.modelName
)

View File

@@ -3,7 +3,7 @@
import { useState, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmManager } from "@/lib/hooks";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { Button, SelectButton, OpenButton } from "@opal/components";
import { SvgPlusCircle, SvgX } from "@opal/icons";
import { LLMOption } from "@/refresh-components/popovers/interfaces";
@@ -152,7 +152,7 @@ export default function ModelSelector({
)}
<div className="flex items-center shrink-0">
{selectedModels.map((model, index) => {
const ProviderIcon = getModelIcon(
const ProviderIcon = getProviderIcon(
model.provider,
model.modelName
);

View File

@@ -18,7 +18,7 @@ import {
unsetDefaultImageGenerationConfig,
deleteImageGenerationConfig,
} from "@/refresh-pages/admin/ImageGenerationPage/svc";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import Message from "@/refresh-components/messages/Message";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
import InputSelect from "@/refresh-components/inputs/InputSelect";
@@ -264,7 +264,7 @@ export default function ImageGenerationContent() {
sizePreset="main-ui"
variant="section"
icon={() => (
<ModelIcon
<ProviderIcon
provider={provider.provider_name}
size={16}
/>
@@ -391,7 +391,7 @@ export default function ImageGenerationContent() {
key={p.image_provider_id}
value={p.image_provider_id}
icon={() => (
<ModelIcon
<ProviderIcon
provider={p.provider_name}
size={16}
/>

View File

@@ -3,7 +3,7 @@
import React, { useState, useMemo, useEffect } from "react";
import { Form, Formik, FormikProps } from "formik";
import ProviderModal from "@/components/modals/ProviderModal";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ConnectionProviderIcon from "@/refresh-components/ConnectionProviderIcon";
import {
testImageGenerationApiKey,
@@ -246,7 +246,7 @@ export function ImageGenFormWrapper<T extends FormValues>({
const icon = () => (
<ConnectionProviderIcon
icon={<ModelIcon provider={imageProvider.provider_name} size={24} />}
icon={<ProviderIcon provider={imageProvider.provider_name} size={24} />}
/>
);

View File

@@ -8,8 +8,8 @@ import {
useWellKnownLLMProviders,
} from "@/hooks/useLLMProviders";
import { ThreeDotsLoader } from "@/components/Loading";
import { Content, Card as CardLayout } from "@opal/layouts";
import { Button, SelectCard, Text, Card } from "@opal/components";
import { Content, Card } from "@opal/layouts";
import { Button, SelectCard } from "@opal/components";
import { Hoverable } from "@opal/core";
import { SvgArrowExchange, SvgSettings, SvgTrash } from "@opal/icons";
import * as SettingsLayouts from "@/layouts/settings-layouts";
@@ -22,7 +22,9 @@ import {
} from "@/lib/llmConfig/providers";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { deleteLlmProvider, setDefaultLlmModel } from "@/lib/llmConfig/svc";
import Text from "@/refresh-components/texts/Text";
import { Horizontal as HorizontalInput } from "@/layouts/input-layouts";
import LegacyCard from "@/refresh-components/cards/Card";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import Message from "@/refresh-components/messages/Message";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
@@ -47,7 +49,6 @@ import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
import { Section } from "@/layouts/general-layouts";
import { markdown } from "@opal/utils";
const route = ADMIN_ROUTES.LLM_MODELS;
@@ -140,7 +141,7 @@ function ExistingProviderCard({
const handleDelete = async () => {
try {
await deleteLlmProvider(provider.id, isLastProvider);
await deleteLlmProvider(provider.id);
await refreshLlmProviderCaches(mutate);
deleteModal.toggle(false);
toast.success("Provider deleted successfully!");
@@ -155,37 +156,24 @@ function ExistingProviderCard({
{deleteModal.isOpen && (
<ConfirmationModalLayout
icon={SvgTrash}
title={markdown(`Delete *${provider.name}*`)}
title={`Delete ${provider.name}`}
onClose={() => deleteModal.toggle(false)}
submit={
<Button
variant="danger"
onClick={handleDelete}
disabled={isDefault && !isLastProvider}
>
<Button variant="danger" onClick={handleDelete}>
Delete
</Button>
}
>
<Section alignItems="start" gap={0.5}>
{isDefault && !isLastProvider ? (
<Text font="main-ui-body" color="text-03">
Cannot delete the default provider. Select another provider as
the default prior to deleting this one.
<Text text03>
All LLM models from provider <b>{provider.name}</b> will be
removed and unavailable for future chats. Chat history will be
preserved.
</Text>
{isLastProvider && (
<Text text03>
Connect another provider to continue using chats.
</Text>
) : (
<>
<Text font="main-ui-body" color="text-03">
{markdown(
`All LLM models from provider **${provider.name}** will be removed and unavailable for future chats. Chat history will be preserved.`
)}
</Text>
{isLastProvider && (
<Text font="main-ui-body" color="text-03">
Connect another provider to continue using chats.
</Text>
)}
</>
)}
</Section>
</ConfirmationModalLayout>
@@ -201,7 +189,7 @@ function ExistingProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<CardLayout.Header
<Card.Header
icon={getProviderIcon(provider.provider)}
title={provider.name}
description={getProviderDisplayName(provider.provider)}
@@ -271,7 +259,7 @@ function NewProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<CardLayout.Header
<Card.Header
icon={getProviderIcon(provider.name)}
title={getProviderProductName(provider.name)}
description={getProviderDisplayName(provider.name)}
@@ -315,7 +303,7 @@ function NewCustomProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<CardLayout.Header
<Card.Header
icon={getProviderIcon("custom")}
title={getProviderProductName("custom")}
description={getProviderDisplayName("custom")}
@@ -404,7 +392,7 @@ export default function LLMProviderConfigurationPage() {
<SettingsLayouts.Body>
{hasProviders ? (
<Card border="solid" rounding="lg">
<LegacyCard>
<HorizontalInput
title="Default Model"
description="This model will be used by Onyx by default in your chats."
@@ -435,7 +423,7 @@ export default function LLMProviderConfigurationPage() {
</InputSelect.Content>
</InputSelect>
</HorizontalInput>
</Card>
</LegacyCard>
) : (
<Message
info

View File

@@ -27,13 +27,14 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchBedrockModels } from "@/lib/llmConfig/svc";
import { fetchBedrockModels } from "@/app/admin/configuration/llm/utils";
import { Card } from "@opal/components";
import { Section } from "@/layouts/general-layouts";
import { SvgAlertCircle } from "@opal/icons";
import { Content } from "@opal/layouts";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import useOnMount from "@/hooks/useOnMount";
const AWS_REGION_OPTIONS = [
{ name: "us-east-1", value: "us-east-1" },
@@ -122,6 +123,17 @@ function BedrockModalInternals({
formikProps.setFieldValue("model_configurations", models);
};
// Auto-fetch models on initial load when editing an existing provider
useOnMount(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
});
return (
<>
<InputLayouts.FieldPadder>

View File

@@ -1,5 +1,6 @@
"use client";
import { useEffect } from "react";
import { markdown } from "@opal/utils";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
@@ -9,7 +10,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchBifrostModels } from "@/lib/llmConfig/svc";
import { fetchBifrostModels } from "@/app/admin/configuration/llm/utils";
import {
useInitialValues,
buildValidationSchema,
@@ -58,6 +59,19 @@ function BifrostModalInternals({
formikProps.setFieldValue("model_configurations", models);
};
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
console.error("Failed to fetch Bifrost models:", err);
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
return (
<>
<APIBaseField

View File

@@ -99,6 +99,7 @@ function ModelConfigurationItem({
/>
<Button
disabled={!canRemove}
tooltip={!canRemove ? "At least one model is required" : undefined}
prominence="tertiary"
icon={SvgMinusCircle}
onClick={onRemove}
@@ -335,21 +336,23 @@ export default function CustomModal({
});
}}
>
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="provider"
title="Provider Name"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
)}
>
<InputTypeInField
{!isOnboarding && (
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="provider"
placeholder="Provider Name as shown on LiteLLM"
variant={existingLlmProvider ? "disabled" : undefined}
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
title="Provider Name"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
)}
>
<InputTypeInField
name="provider"
placeholder="Provider Name as shown on LiteLLM"
variant={existingLlmProvider ? "disabled" : undefined}
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
)}
<APIBaseField optional />

View File

@@ -1,5 +1,6 @@
"use client";
import { useCallback, useEffect, useMemo } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import * as InputLayouts from "@/layouts/input-layouts";
@@ -23,7 +24,8 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchModels } from "@/lib/llmConfig/svc";
import { fetchModels } from "@/app/admin/configuration/llm/utils";
import debounce from "lodash/debounce";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
@@ -46,23 +48,54 @@ function LMStudioModalInternals({
isOnboarding,
}: LMStudioModalInternalsProps) {
const formikProps = useFormikContext<LMStudioModalValues>();
const initialApiKey = existingLlmProvider?.custom_config?.LM_STUDIO_API_KEY;
const isFetchDisabled = !formikProps.values.api_base;
const doFetchModels = useCallback(
(apiBase: string, apiKey: string | undefined, signal: AbortSignal) => {
fetchModels(
LLMProviderName.LM_STUDIO,
{
api_base: apiBase,
custom_config: apiKey ? { LM_STUDIO_API_KEY: apiKey } : {},
api_key_changed: apiKey !== initialApiKey,
name: existingLlmProvider?.name,
},
signal
).then((data) => {
if (signal.aborted) return;
if (data.error) {
toast.error(data.error);
formikProps.setFieldValue("model_configurations", []);
return;
}
formikProps.setFieldValue("model_configurations", data.models);
});
},
// eslint-disable-next-line react-hooks/exhaustive-deps
[existingLlmProvider?.name, initialApiKey]
);
const handleFetchModels = async () => {
const apiKey = formikProps.values.custom_config?.LM_STUDIO_API_KEY;
const initialApiKey = existingLlmProvider?.custom_config?.LM_STUDIO_API_KEY;
const data = await fetchModels(LLMProviderName.LM_STUDIO, {
api_base: formikProps.values.api_base,
custom_config: apiKey ? { LM_STUDIO_API_KEY: apiKey } : {},
api_key_changed: apiKey !== initialApiKey,
name: existingLlmProvider?.name,
});
if (data.error) {
throw new Error(data.error);
const debouncedFetchModels = useMemo(
() => debounce(doFetchModels, 500),
[doFetchModels]
);
const apiBase = formikProps.values.api_base;
const apiKey = formikProps.values.custom_config?.LM_STUDIO_API_KEY;
useEffect(() => {
if (apiBase) {
const controller = new AbortController();
debouncedFetchModels(apiBase, apiKey, controller.signal);
return () => {
debouncedFetchModels.cancel();
controller.abort();
};
} else {
formikProps.setFieldValue("model_configurations", []);
}
formikProps.setFieldValue("model_configurations", data.models);
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [apiBase, apiKey, debouncedFetchModels]);
return (
<>
@@ -72,7 +105,6 @@ function LMStudioModalInternals({
/>
<APIKeyField
name="custom_config.LM_STUDIO_API_KEY"
optional
subDescription="Optional API key if your LM Studio server requires authentication."
/>
@@ -85,10 +117,7 @@ function LMStudioModalInternals({
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
<ModelSelectionField shouldShowAutoUpdateToggle={false} />
{!isOnboarding && (
<>

View File

@@ -1,5 +1,6 @@
"use client";
import { useEffect } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import * as InputLayouts from "@/layouts/input-layouts";
@@ -8,7 +9,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchLiteLLMProxyModels } from "@/lib/llmConfig/svc";
import { fetchLiteLLMProxyModels } from "@/app/admin/configuration/llm/utils";
import {
useInitialValues,
buildValidationSchema,
@@ -60,6 +61,18 @@ function LiteLLMProxyModalInternals({
formikProps.setFieldValue("model_configurations", models);
};
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
return (
<>
<APIBaseField

View File

@@ -24,12 +24,13 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchOllamaModels } from "@/lib/llmConfig/svc";
import { fetchOllamaModels } from "@/app/admin/configuration/llm/utils";
import Tabs from "@/refresh-components/Tabs";
import { Card } from "@opal/components";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import useOnMount from "@/hooks/useOnMount";
const DEFAULT_API_BASE = "http://127.0.0.1:11434";
const CLOUD_API_BASE = "https://ollama.com";
@@ -86,6 +87,17 @@ function OllamaModalInternals({
formikProps.setFieldValue("model_configurations", models);
};
// Auto-fetch models on initial load when editing an existing provider
useOnMount(() => {
if (existingLlmProvider) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
});
return (
<>
<Card background="light" border="none" padding="sm">

View File

@@ -1,5 +1,6 @@
"use client";
import { useEffect } from "react";
import { markdown } from "@opal/utils";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
@@ -9,7 +10,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchOpenAICompatibleModels } from "@/lib/llmConfig/svc";
import { fetchOpenAICompatibleModels } from "@/app/admin/configuration/llm/utils";
import {
useInitialValues,
buildValidationSchema,
@@ -58,6 +59,18 @@ function OpenAICompatibleModalInternals({
formikProps.setFieldValue("model_configurations", models);
};
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
return (
<>
<APIBaseField

View File

@@ -1,5 +1,6 @@
"use client";
import { useEffect } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import * as InputLayouts from "@/layouts/input-layouts";
@@ -8,7 +9,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchOpenRouterModels } from "@/lib/llmConfig/svc";
import { fetchOpenRouterModels } from "@/app/admin/configuration/llm/utils";
import {
useInitialValues,
buildValidationSchema,
@@ -60,6 +61,18 @@ function OpenRouterModalInternals({
formikProps.setFieldValue("model_configurations", models);
};
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
return (
<>
<APIBaseField

View File

@@ -3,8 +3,6 @@
import React, { useEffect, useRef, useState } from "react";
import { Formik, Form, useFormikContext } from "formik";
import type { FormikConfig } from "formik";
import { cn } from "@/lib/utils";
import { Interactive } from "@opal/core";
import { usePaidEnterpriseFeaturesEnabled } from "@/components/settings/usePaidEnterpriseFeaturesEnabled";
import { useAgents } from "@/hooks/useAgents";
import { useUserGroups } from "@/lib/hooks";
@@ -25,7 +23,6 @@ import { Section } from "@/layouts/general-layouts";
import { Content } from "@opal/layouts";
import {
SvgArrowExchange,
SvgChevronDown,
SvgOnyxOctagon,
SvgOrganization,
SvgPlusCircle,
@@ -76,14 +73,11 @@ export function DisplayNameField({ disabled = false }: DisplayNameFieldProps) {
// ─── APIKeyField ─────────────────────────────────────────────────────────────
export interface APIKeyFieldProps {
/** Formik field name. @default "api_key" */
name?: string;
optional?: boolean;
providerName?: string;
subDescription?: string | RichStr;
}
export function APIKeyField({
name = "api_key",
optional = false,
providerName,
subDescription,
@@ -91,7 +85,7 @@ export function APIKeyField({
return (
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name={name}
name="api_key"
title="API Key"
subDescription={
subDescription
@@ -102,7 +96,7 @@ export function APIKeyField({
}
suffix={optional ? "optional" : undefined}
>
<PasswordInputTypeInField name={name} />
<PasswordInputTypeInField name="api_key" />
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
);
@@ -404,14 +398,13 @@ function RefetchButton({ onRefetch }: RefetchButtonProps) {
}
}}
disabled={isFetching}
tooltip={isFetching ? "Fetching models..." : "Refresh models"}
/>
);
}
// ─── ModelsField ─────────────────────────────────────────────────────
const FOLD_THRESHOLD = 3;
export interface ModelSelectionFieldProps {
shouldShowAutoUpdateToggle: boolean;
onRefetch?: (signal: AbortSignal) => Promise<void> | void;
@@ -425,11 +418,7 @@ export function ModelSelectionField({
}: ModelSelectionFieldProps) {
const formikProps = useFormikContext<BaseLLMFormValues>();
const [newModelName, setNewModelName] = useState("");
const [isExpanded, setIsExpanded] = useState(false);
// When the auto-update toggle is hidden, auto mode should have no effect —
// otherwise models can't be deselected and "Select All" stays disabled.
const isAutoMode =
shouldShowAutoUpdateToggle && formikProps.values.is_auto_mode;
const isAutoMode = formikProps.values.is_auto_mode;
const models = formikProps.values.model_configurations;
// Snapshot the original model visibility so we can restore it when
@@ -480,6 +469,13 @@ export function ModelSelectionField({
const visibleModels = models.filter((m) => m.is_visible);
const selectAllDisabled = isAutoMode || models.length === 0;
const selectAllTooltip = isAutoMode
? "Disable Auto Update to manually select models"
: models.length === 0
? "No models available"
: undefined;
return (
<Card background="light" border="none" padding="sm">
<Section gap={0.5}>
@@ -491,12 +487,13 @@ export function ModelSelectionField({
>
<Section flexDirection="row" gap={0}>
<Button
disabled={isAutoMode || models.length === 0}
disabled={selectAllDisabled}
tooltip={selectAllTooltip}
prominence="tertiary"
size="md"
onClick={handleToggleSelectAll}
>
{allSelected ? "Deselect All" : "Select All"}
{allSelected ? "Unselect All" : "Select All"}
</Button>
{onRefetch && <RefetchButton onRefetch={onRefetch} />}
</Section>
@@ -506,68 +503,30 @@ export function ModelSelectionField({
<EmptyMessageCard title="No models available." padding="sm" />
) : (
<Section gap={0.25}>
{(() => {
const displayModels = isAutoMode ? visibleModels : models;
const isFoldable = displayModels.length > FOLD_THRESHOLD;
const shownModels =
isFoldable && !isExpanded
? displayModels.slice(0, FOLD_THRESHOLD)
: displayModels;
return (
<>
{shownModels.map((model) =>
isAutoMode ? (
<LineItemButton
key={model.name}
variant="section"
sizePreset="main-ui"
selectVariant="select-heavy"
state="selected"
icon={() => <Checkbox checked />}
title={model.display_name || model.name}
/>
) : (
<LineItemButton
key={model.name}
variant="section"
sizePreset="main-ui"
selectVariant="select-heavy"
state={model.is_visible ? "selected" : "empty"}
icon={() => <Checkbox checked={model.is_visible} />}
title={model.name}
onClick={() =>
setVisibility(model.name, !model.is_visible)
}
/>
)
)}
{isFoldable && (
<Interactive.Stateless
prominence="tertiary"
onClick={() => setIsExpanded(!isExpanded)}
>
<Interactive.Container type="button" widthVariant="full">
<Content
sizePreset="secondary"
variant="body"
title={isExpanded ? "Fold Models" : "More Models"}
icon={() => (
<SvgChevronDown
className={cn(
"transition-transform",
isExpanded && "-rotate-180"
)}
size={14}
/>
)}
/>
</Interactive.Container>
</Interactive.Stateless>
)}
</>
);
})()}
{isAutoMode
? visibleModels.map((model) => (
<LineItemButton
key={model.name}
variant="section"
sizePreset="main-ui"
selectVariant="select-heavy"
state="selected"
icon={() => <Checkbox checked />}
title={model.display_name || model.name}
/>
))
: models.map((model) => (
<LineItemButton
key={model.name}
variant="section"
sizePreset="main-ui"
selectVariant="select-heavy"
state={model.is_visible ? "selected" : "empty"}
icon={() => <Checkbox checked={model.is_visible} />}
title={model.name}
onClick={() => setVisibility(model.name, !model.is_visible)}
/>
))}
</Section>
)}
@@ -599,6 +558,13 @@ export function ModelSelectionField({
!newModelName.trim() ||
models.some((m) => m.name === newModelName.trim())
}
tooltip={
!newModelName.trim()
? "Enter a model name first"
: models.some((m) => m.name === newModelName.trim())
? "This model already exists"
: undefined
}
onClick={() => {
const trimmed = newModelName.trim();
if (trimmed && !models.some((m) => m.name === trimmed)) {
@@ -703,15 +669,6 @@ function ModalWrapperInner({
const isTesting = status?.isTesting === true;
const busy = isTesting || isSubmitting;
const disabledTooltip = busy
? undefined
: !isValid
? "Please fill in all required fields."
: !dirty
? "No changes to save."
: undefined;
const providerIcon = getProviderIcon(providerName);
const providerDisplayName = getProviderDisplayName(providerName);
const providerProductName = getProviderProductName(providerName);
@@ -742,9 +699,17 @@ function ModalWrapperInner({
</Button>
<Button
disabled={!isValid || !dirty || busy}
tooltip={
busy
? "Testing provider connection..."
: !dirty
? "No changes to save"
: !isValid
? "Please fill in all required fields"
: undefined
}
type="submit"
icon={busy ? SimpleLoader : undefined}
tooltip={disabledTooltip}
>
{llmProvider?.name
? busy

View File

@@ -47,7 +47,7 @@ export function useInitialValues(
api_key: existingLlmProvider?.api_key ?? undefined,
api_base: existingLlmProvider?.api_base ?? undefined,
is_public: existingLlmProvider?.is_public ?? true,
is_auto_mode: existingLlmProvider?.is_auto_mode ?? true,
is_auto_mode: existingLlmProvider?.is_auto_mode ?? false,
groups: existingLlmProvider?.groups ?? [],
personas: existingLlmProvider?.personas ?? [],
model_configurations: modelConfigurations,

View File

@@ -12,7 +12,7 @@ import {
SvgServer,
SvgSettings,
} from "@opal/icons";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
export interface LLMProviderCardProps {
title: string;
@@ -75,7 +75,7 @@ function LLMProviderCardInner({
<div className="flex gap-1 p-1 flex-1 min-w-0">
<div className="flex items-start h-full pt-0.5">
{providerName ? (
<ModelIcon provider={providerName} size={16} className="" />
<ProviderIcon provider={providerName} size={16} className="" />
) : (
<SvgServer className="w-4 h-4 stroke-text-04" />
)}

View File

@@ -16,7 +16,7 @@ import {
getProviderDisplayInfo,
} from "../forms/getOnboardingForm";
import { Disabled } from "@opal/core";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import { SvgCheckCircle, SvgCpu, SvgExternalLink } from "@opal/icons";
import { ContentAction } from "@opal/layouts";
import { useLLMProviderOptions } from "@/lib/hooks/useLLMProviderOptions";
@@ -69,7 +69,7 @@ const StackedProviderIcons = ({ providers }: StackedProviderIconsProps) => {
zIndex: providers.length - index,
}}
>
<ModelIcon provider={provider} size={16} />
<ProviderIcon provider={provider} size={16} />
</div>
))}
{providers.length > 3 && (