Compare commits

...

3 Commits

Author SHA1 Message Date
Jamison Lahman
ab4d1dce01 fix: Custom LLM Provider requires a Provider Name (#10003) 2026-04-08 20:33:43 +00:00
Raunak Bhagat
80c928eb58 fix: enable force-delete for last LLM provider (#9998) 2026-04-08 20:09:38 +00:00
Raunak Bhagat
77528876b1 chore: delete unused files (#10001) 2026-04-08 19:53:47 +00:00
30 changed files with 746 additions and 929 deletions

View File

@@ -1,163 +0,0 @@
"use client";
import { ArrayHelpers, FieldArray, FormikProps, useField } from "formik";
import { ModelConfiguration } from "@/interfaces/llm";
import { ManualErrorMessage, TextFormField } from "@/components/Field";
import { useEffect, useState } from "react";
import CreateButton from "@/refresh-components/buttons/CreateButton";
import { Button } from "@opal/components";
import { SvgX } from "@opal/icons";
import Text from "@/refresh-components/texts/Text";
function ModelConfigurationRow({
name,
index,
arrayHelpers,
formikProps,
setError,
}: {
name: string;
index: number;
arrayHelpers: ArrayHelpers;
formikProps: FormikProps<{ model_configurations: ModelConfiguration[] }>;
setError: (value: string | null) => void;
}) {
const [, input] = useField(`${name}[${index}]`);
useEffect(() => {
if (!input.touched) return;
setError((input.error as { name: string } | undefined)?.name ?? null);
}, [input.touched, input.error]);
return (
<div key={index} className="flex flex-row w-full gap-4">
<div
className={`flex flex-[2] ${
input.touched && input.error ? "border-2 border-error rounded-lg" : ""
}`}
>
<TextFormField
name={`${name}[${index}].name`}
label=""
placeholder={`model-name-${index + 1}`}
removeLabel
hideError
/>
</div>
<div className="flex flex-[1]">
<TextFormField
name={`${name}[${index}].max_input_tokens`}
label=""
placeholder="Default"
removeLabel
hideError
type="number"
min={1}
/>
</div>
<div className="flex flex-col justify-center">
<Button
disabled={formikProps.values.model_configurations.length <= 1}
onClick={() => {
if (formikProps.values.model_configurations.length > 1) {
setError(null);
arrayHelpers.remove(index);
}
}}
icon={SvgX}
prominence="secondary"
/>
</div>
</div>
);
}
export function ModelConfigurationField({
name,
formikProps,
}: {
name: string;
formikProps: FormikProps<{ model_configurations: ModelConfiguration[] }>;
}) {
const [errorMap, setErrorMap] = useState<{ [index: number]: string }>({});
const [finalError, setFinalError] = useState<string | undefined>();
return (
<div className="pb-5 flex flex-col w-full">
<div className="flex flex-col">
<Text as="p" mainUiAction>
Model Configurations
</Text>
<Text as="p" secondaryBody text03>
Add models and customize the number of input tokens that they accept.
</Text>
</div>
<FieldArray
name={name}
render={(arrayHelpers: ArrayHelpers) => (
<div className="flex flex-col">
<div className="flex flex-col gap-4 py-4">
<div className="flex">
<Text as="p" secondaryBody className="flex flex-[2]">
Model Name
</Text>
<Text as="p" secondaryBody className="flex flex-[1]">
Max Input Tokens
</Text>
<div className="w-10" />
</div>
{formikProps.values.model_configurations.map((_, index) => (
<ModelConfigurationRow
key={index}
name={name}
formikProps={formikProps}
arrayHelpers={arrayHelpers}
index={index}
setError={(message: string | null) => {
const newErrors = { ...errorMap };
if (message) {
newErrors[index] = message;
} else {
delete newErrors[index];
for (const key in newErrors) {
const numKey = Number(key);
if (numKey > index) {
const errorValue = newErrors[key];
if (errorValue !== undefined) {
// Ensure the value is not undefined
newErrors[numKey - 1] = errorValue;
delete newErrors[numKey];
}
}
}
}
setErrorMap(newErrors);
setFinalError(
Object.values(newErrors).filter((item) => item)[0]
);
}}
/>
))}
</div>
{finalError && (
<ManualErrorMessage>{finalError}</ManualErrorMessage>
)}
<div className="mt-3">
<CreateButton
onClick={() => {
arrayHelpers.push({
name: "",
is_visible: true,
// Use null so Yup.number().nullable() accepts empty inputs
max_input_tokens: null,
});
}}
>
Add New
</CreateButton>
</div>
</div>
)}
/>
</div>
);
}

View File

@@ -0,0 +1,18 @@
import { defaultTailwindCSS } from "@/components/icons/icons";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { IconProps } from "@opal/types";
export interface ModelIconProps extends IconProps {
provider: string;
modelName?: string;
}
export default function ModelIcon({
provider,
modelName,
size = 16,
className = defaultTailwindCSS,
}: ModelIconProps) {
const Icon = getModelIcon(provider, modelName);
return <Icon size={size} className={className} />;
}

View File

@@ -1,17 +0,0 @@
import { defaultTailwindCSS, IconProps } from "@/components/icons/icons";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
export interface ProviderIconProps extends IconProps {
provider: string;
modelName?: string;
}
export const ProviderIcon = ({
provider,
modelName,
size = 16,
className = defaultTailwindCSS,
}: ProviderIconProps) => {
const Icon = getProviderIcon(provider, modelName);
return <Icon size={size} className={className} />;
};

View File

@@ -1,622 +0,0 @@
import { JSX } from "react";
import {
AnthropicIcon,
AmazonIcon,
AzureIcon,
CPUIcon,
MicrosoftIconSVG,
MistralIcon,
MetaIcon,
GeminiIcon,
IconProps,
DeepseekIcon,
OpenAISVG,
QwenIcon,
OllamaIcon,
LMStudioIcon,
LiteLLMIcon,
ZAIIcon,
} from "@/components/icons/icons";
import {
OllamaModelResponse,
OpenRouterModelResponse,
BedrockModelResponse,
LMStudioModelResponse,
LiteLLMProxyModelResponse,
BifrostModelResponse,
ModelConfiguration,
LLMProviderName,
BedrockFetchParams,
OllamaFetchParams,
LMStudioFetchParams,
OpenRouterFetchParams,
LiteLLMProxyFetchParams,
BifrostFetchParams,
OpenAICompatibleFetchParams,
OpenAICompatibleModelResponse,
} from "@/interfaces/llm";
import { SvgAws, SvgBifrost, SvgOpenrouter, SvgPlug } from "@opal/icons";
// Aggregator providers that host models from multiple vendors
export const AGGREGATOR_PROVIDERS = new Set([
"bedrock",
"bedrock_converse",
"openrouter",
"ollama_chat",
"lm_studio",
"litellm_proxy",
"bifrost",
"openai_compatible",
"vertex_ai",
]);
export const getProviderIcon = (
providerName: string,
modelName?: string
): (({ size, className }: IconProps) => JSX.Element) => {
const iconMap: Record<
string,
({ size, className }: IconProps) => JSX.Element
> = {
amazon: AmazonIcon,
phi: MicrosoftIconSVG,
mistral: MistralIcon,
ministral: MistralIcon,
llama: MetaIcon,
ollama_chat: OllamaIcon,
ollama: OllamaIcon,
lm_studio: LMStudioIcon,
gemini: GeminiIcon,
deepseek: DeepseekIcon,
claude: AnthropicIcon,
anthropic: AnthropicIcon,
openai: OpenAISVG,
// Azure OpenAI should display the Azure logo
azure: AzureIcon,
microsoft: MicrosoftIconSVG,
meta: MetaIcon,
google: GeminiIcon,
qwen: QwenIcon,
qwq: QwenIcon,
zai: ZAIIcon,
// Cloud providers - use AWS icon for Bedrock
bedrock: SvgAws,
bedrock_converse: SvgAws,
openrouter: SvgOpenrouter,
litellm_proxy: LiteLLMIcon,
bifrost: SvgBifrost,
openai_compatible: SvgPlug,
vertex_ai: GeminiIcon,
};
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers (bedrock, openrouter, vertex_ai), prioritize showing
// the vendor icon based on model name (e.g., show Claude icon for Bedrock Claude models)
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(iconMap)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in iconMap) {
const icon = iconMap[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(iconMap)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return CPUIcon;
};
export const isAnthropic = (provider: string, modelName?: string) =>
provider === LLMProviderName.ANTHROPIC ||
!!modelName?.toLowerCase().includes("claude");
/**
* Fetches Bedrock models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBedrockModels = async (
params: BedrockFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
if (!params.aws_region_name) {
return { models: [], error: "AWS region is required" };
}
try {
const response = await fetch("/api/admin/llm/bedrock/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
aws_region_name: params.aws_region_name,
aws_access_key_id: params.aws_access_key_id,
aws_secret_access_key: params.aws_secret_access_key,
aws_bearer_token_bedrock: params.aws_bearer_token_bedrock,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: BedrockModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: false,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Ollama models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOllamaModels = async (
params: OllamaFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/ollama/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OllamaModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches OpenRouter models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOpenRouterModels = async (
params: OpenRouterFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/openrouter/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse OpenRouter model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: OpenRouterModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LM Studio models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLMStudioModels = async (
params: LMStudioFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/lm-studio/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
api_key_changed: params.api_key_changed ?? false,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse LM Studio model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: LMStudioModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Bifrost models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBifrostModels = async (
params: BifrostFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/bifrost/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse Bifrost model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: BifrostModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models from a generic OpenAI-compatible server.
* Uses snake_case params to match API structure.
*/
export const fetchOpenAICompatibleModels = async (
params: OpenAICompatibleFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch(
"/api/admin/llm/openai-compatible/available-models",
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
}
);
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OpenAICompatibleModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LiteLLM Proxy models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLiteLLMProxyModels = async (
params: LiteLLMProxyFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/litellm/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: LiteLLMProxyModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.model_name,
display_name: modelData.model_name,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models for a provider. Accepts form values directly and maps them
* to the expected fetch params format internally.
*/
export const fetchModels = async (
providerName: string,
formValues: {
api_base?: string;
api_key?: string;
api_key_changed?: boolean;
name?: string;
custom_config?: Record<string, string>;
model_configurations?: ModelConfiguration[];
},
signal?: AbortSignal
) => {
const customConfig = formValues.custom_config || {};
switch (providerName) {
case LLMProviderName.BEDROCK:
return fetchBedrockModels({
aws_region_name: customConfig.AWS_REGION_NAME || "",
aws_access_key_id: customConfig.AWS_ACCESS_KEY_ID,
aws_secret_access_key: customConfig.AWS_SECRET_ACCESS_KEY,
aws_bearer_token_bedrock: customConfig.AWS_BEARER_TOKEN_BEDROCK,
provider_name: formValues.name,
});
case LLMProviderName.OLLAMA_CHAT:
return fetchOllamaModels({
api_base: formValues.api_base,
provider_name: formValues.name,
signal,
});
case LLMProviderName.LM_STUDIO:
return fetchLMStudioModels({
api_base: formValues.api_base,
api_key: formValues.custom_config?.LM_STUDIO_API_KEY,
api_key_changed: formValues.api_key_changed ?? false,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENROUTER:
return fetchOpenRouterModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
});
case LLMProviderName.LITELLM_PROXY:
return fetchLiteLLMProxyModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.BIFROST:
return fetchBifrostModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENAI_COMPATIBLE:
return fetchOpenAICompatibleModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
default:
return { models: [], error: `Unknown provider: ${providerName}` };
}
};
export function canProviderFetchModels(providerName?: string) {
if (!providerName) return false;
switch (providerName) {
case LLMProviderName.BEDROCK:
case LLMProviderName.OLLAMA_CHAT:
case LLMProviderName.LM_STUDIO:
case LLMProviderName.OPENROUTER:
case LLMProviderName.LITELLM_PROXY:
case LLMProviderName.BIFROST:
case LLMProviderName.OPENAI_COMPATIBLE:
return true;
default:
return false;
}
}

View File

@@ -5,7 +5,7 @@ import { Button } from "@opal/components";
import { Text } from "@opal/components";
import { ContentAction } from "@opal/layouts";
import { SvgEyeOff, SvgX } from "@opal/icons";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import AgentMessage, {
AgentMessageProps,
} from "@/app/app/message/messageComponents/AgentMessage";
@@ -71,7 +71,7 @@ export default function MultiModelPanel({
errorStackTrace,
errorDetails,
}: MultiModelPanelProps) {
const ProviderIcon = getProviderIcon(provider, modelName);
const ModelIcon = getModelIcon(provider, modelName);
const handlePanelClick = useCallback(() => {
if (!isHidden && !isPreferred) onSelect();
@@ -88,7 +88,7 @@ export default function MultiModelPanel({
sizePreset="main-ui"
variant="body"
paddingVariant="lg"
icon={ProviderIcon}
icon={ModelIcon}
title={isHidden ? markdown(`~~${displayName}~~`) : displayName}
rightChildren={
<div className="flex items-center gap-1 px-2">

View File

@@ -18,7 +18,7 @@ import {
isRecommendedModel,
} from "@/app/craft/onboarding/constants";
import { ToggleWarningModal } from "./ToggleWarningModal";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { Section } from "@/layouts/general-layouts";
import {
Accordion,
@@ -365,9 +365,7 @@ export function BuildLLMPopover({
const isExpanded = expandedGroups.includes(
group.providerKey
);
const ProviderIcon = getProviderIcon(
group.providerKey
);
const ModelIcon = getModelIcon(group.providerKey);
return (
<AccordionItem
@@ -379,7 +377,7 @@ export function BuildLLMPopover({
<AccordionTrigger className="flex items-center rounded-08 hover:no-underline hover:bg-background-tint-02 group [&>svg]:hidden w-full py-1">
<div className="flex items-center gap-1 shrink-0">
<div className="flex items-center justify-center size-5 shrink-0">
<ProviderIcon size={16} />
<ModelIcon size={16} />
</div>
<Text
secondaryBody

View File

@@ -48,7 +48,7 @@ import NotAllowedModal from "@/app/craft/onboarding/components/NotAllowedModal";
import { useOnboarding } from "@/app/craft/onboarding/BuildOnboardingProvider";
import { useLLMProviders } from "@/hooks/useLLMProviders";
import { useUser } from "@/providers/UserProvider";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import {
getBuildUserPersona,
getPersonaInfo,
@@ -475,10 +475,10 @@ export default function BuildConfigPage() {
>
{pendingLlmSelection?.provider &&
(() => {
const ProviderIcon = getProviderIcon(
const ModelIcon = getModelIcon(
pendingLlmSelection.provider
);
return <ProviderIcon className="w-4 h-4" />;
return <ModelIcon className="w-4 h-4" />;
})()}
<Text mainUiAction>{pendingLlmDisplayName}</Text>
<SvgChevronDown className="w-4 h-4 text-text-03" />

View File

@@ -3,14 +3,14 @@
import { useMemo } from "react";
import { parseLlmDescriptor, structureValue } from "@/lib/llmConfig/utils";
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { createIcon } from "@/components/icons/icons";
interface LLMOption {
name: string;
value: string;
icon: ReturnType<typeof getProviderIcon>;
icon: ReturnType<typeof getModelIcon>;
modelName: string;
providerName: string;
provider: string;
@@ -85,7 +85,7 @@ export default function LLMSelector({
provider.provider,
modelConfiguration.name
),
icon: getProviderIcon(provider.provider, modelConfiguration.name),
icon: getModelIcon(provider.provider, modelConfiguration.name),
modelName: modelConfiguration.name,
providerName: provider.name,
provider: provider.provider,

View File

@@ -1,7 +1,4 @@
import type {
OnboardingState,
OnboardingActions,
} from "@/interfaces/onboarding";
import type { OnboardingActions } from "@/interfaces/onboarding";
export enum LLMProviderName {
OPENAI = "openai",

View File

@@ -32,7 +32,7 @@ import {
PersonaLabel,
} from "@/app/admin/agents/interfaces";
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
import { isAnthropic } from "@/app/admin/configuration/llm/utils";
import { isAnthropic } from "@/lib/llmConfig/svc";
import { getSourceMetadataForSources } from "./sources";
import { AuthType, NEXT_PUBLIC_CLOUD_ENABLED } from "./constants";
import { useUser } from "@/providers/UserProvider";

View File

@@ -14,8 +14,28 @@ import {
SvgLitellm,
SvgLmStudio,
} from "@opal/icons";
import {
MicrosoftIconSVG,
MistralIcon,
MetaIcon,
DeepseekIcon,
QwenIcon,
ZAIIcon,
} from "@/components/icons/icons";
import { LLMProviderName } from "@/interfaces/llm";
export const AGGREGATOR_PROVIDERS = new Set([
LLMProviderName.BEDROCK,
"bedrock_converse",
LLMProviderName.OPENROUTER,
LLMProviderName.OLLAMA_CHAT,
LLMProviderName.LM_STUDIO,
LLMProviderName.LITELLM_PROXY,
LLMProviderName.BIFROST,
LLMProviderName.OPENAI_COMPATIBLE,
LLMProviderName.VERTEX_AI,
]);
const PROVIDER_ICONS: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
@@ -81,3 +101,80 @@ export function getProviderDisplayName(providerName: string): string {
export function getProviderIcon(providerName: string): IconFunctionComponent {
return PROVIDER_ICONS[providerName] ?? SvgCpu;
}
// ---------------------------------------------------------------------------
// Model-aware icon resolver (legacy icon set)
// ---------------------------------------------------------------------------
const MODEL_ICON_MAP: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
[LLMProviderName.VERTEX_AI]: SvgGemini,
[LLMProviderName.BEDROCK]: SvgAws,
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
[LLMProviderName.BIFROST]: SvgBifrost,
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
amazon: SvgAws,
phi: MicrosoftIconSVG,
mistral: MistralIcon,
ministral: MistralIcon,
llama: MetaIcon,
ollama: SvgOllama,
gemini: SvgGemini,
deepseek: DeepseekIcon,
claude: SvgClaude,
azure: SvgAzure,
microsoft: MicrosoftIconSVG,
meta: MetaIcon,
google: SvgGemini,
qwen: QwenIcon,
qwq: QwenIcon,
zai: ZAIIcon,
bedrock_converse: SvgAws,
};
/**
* Model-aware icon resolver that checks both provider name and model name
* to pick the most specific icon (e.g. Claude icon for a Bedrock Claude model).
*/
export const getModelIcon = (
providerName: string,
modelName?: string
): IconFunctionComponent => {
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers, prioritise showing the vendor icon based on model name
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in MODEL_ICON_MAP) {
const icon = MODEL_ICON_MAP[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return SvgCpu;
};

View File

@@ -1,5 +1,5 @@
/**
* LLM action functions for mutations.
* LLM action functions for mutations and model fetching.
*
* These are async functions for one-off actions that don't need SWR caching.
*
@@ -7,12 +7,31 @@
* - /api/admin/llm/test/default - Test the default LLM provider connection
* - /api/admin/llm/default - Set the default LLM model
* - /api/admin/llm/provider/{id} - Delete an LLM provider
* - /api/admin/llm/{provider}/available-models - Fetch available models for a provider
*/
import {
LLM_ADMIN_URL,
LLM_PROVIDERS_ADMIN_URL,
} from "@/lib/llmConfig/constants";
import {
OllamaModelResponse,
OpenRouterModelResponse,
BedrockModelResponse,
LMStudioModelResponse,
LiteLLMProxyModelResponse,
BifrostModelResponse,
ModelConfiguration,
LLMProviderName,
BedrockFetchParams,
OllamaFetchParams,
LMStudioFetchParams,
OpenRouterFetchParams,
LiteLLMProxyFetchParams,
BifrostFetchParams,
OpenAICompatibleFetchParams,
OpenAICompatibleModelResponse,
} from "@/interfaces/llm";
/**
* Test the default LLM provider.
@@ -57,15 +76,522 @@ export async function setDefaultLlmModel(
/**
* Delete an LLM provider.
* @param providerId - The provider ID to delete
* @param force - Force delete even if this is the default provider
* @throws Error with the detail message from the API on failure
*/
export async function deleteLlmProvider(providerId: number): Promise<void> {
const response = await fetch(`${LLM_PROVIDERS_ADMIN_URL}/${providerId}`, {
method: "DELETE",
});
export async function deleteLlmProvider(
providerId: number,
force = false
): Promise<void> {
const url = force
? `${LLM_PROVIDERS_ADMIN_URL}/${providerId}?force=true`
: `${LLM_PROVIDERS_ADMIN_URL}/${providerId}`;
const response = await fetch(url, { method: "DELETE" });
if (!response.ok) {
const errorMsg = (await response.json()).detail;
throw new Error(errorMsg);
}
}
// ---------------------------------------------------------------------------
// Aggregator providers & helpers
// ---------------------------------------------------------------------------
/** Aggregator providers that host models from multiple vendors. */
export const AGGREGATOR_PROVIDERS = new Set([
"bedrock",
"bedrock_converse",
"openrouter",
"ollama_chat",
"lm_studio",
"litellm_proxy",
"bifrost",
"openai_compatible",
"vertex_ai",
]);
export const isAnthropic = (provider: string, modelName?: string) =>
provider === LLMProviderName.ANTHROPIC ||
!!modelName?.toLowerCase().includes("claude");
// ---------------------------------------------------------------------------
// Model fetching
// ---------------------------------------------------------------------------
/**
* Fetches Bedrock models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBedrockModels = async (
params: BedrockFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
if (!params.aws_region_name) {
return { models: [], error: "AWS region is required" };
}
try {
const response = await fetch("/api/admin/llm/bedrock/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
aws_region_name: params.aws_region_name,
aws_access_key_id: params.aws_access_key_id,
aws_secret_access_key: params.aws_secret_access_key,
aws_bearer_token_bedrock: params.aws_bearer_token_bedrock,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: BedrockModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: false,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Ollama models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOllamaModels = async (
params: OllamaFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/ollama/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OllamaModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches OpenRouter models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOpenRouterModels = async (
params: OpenRouterFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/openrouter/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse OpenRouter model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: OpenRouterModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LM Studio models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLMStudioModels = async (
params: LMStudioFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/lm-studio/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
api_key_changed: params.api_key_changed ?? false,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse LM Studio model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: LMStudioModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Bifrost models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBifrostModels = async (
params: BifrostFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/bifrost/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse Bifrost model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: BifrostModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models from a generic OpenAI-compatible server.
* Uses snake_case params to match API structure.
*/
export const fetchOpenAICompatibleModels = async (
params: OpenAICompatibleFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch(
"/api/admin/llm/openai-compatible/available-models",
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
}
);
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OpenAICompatibleModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LiteLLM Proxy models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLiteLLMProxyModels = async (
params: LiteLLMProxyFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/litellm/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: LiteLLMProxyModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.model_name,
display_name: modelData.model_name,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models for a provider. Accepts form values directly and maps them
* to the expected fetch params format internally.
*/
export const fetchModels = async (
providerName: string,
formValues: {
api_base?: string;
api_key?: string;
api_key_changed?: boolean;
name?: string;
custom_config?: Record<string, string>;
model_configurations?: ModelConfiguration[];
},
signal?: AbortSignal
) => {
const customConfig = formValues.custom_config || {};
switch (providerName) {
case LLMProviderName.BEDROCK:
return fetchBedrockModels({
aws_region_name: customConfig.AWS_REGION_NAME || "",
aws_access_key_id: customConfig.AWS_ACCESS_KEY_ID,
aws_secret_access_key: customConfig.AWS_SECRET_ACCESS_KEY,
aws_bearer_token_bedrock: customConfig.AWS_BEARER_TOKEN_BEDROCK,
provider_name: formValues.name,
});
case LLMProviderName.OLLAMA_CHAT:
return fetchOllamaModels({
api_base: formValues.api_base,
provider_name: formValues.name,
signal,
});
case LLMProviderName.LM_STUDIO:
return fetchLMStudioModels({
api_base: formValues.api_base,
api_key: formValues.custom_config?.LM_STUDIO_API_KEY,
api_key_changed: formValues.api_key_changed ?? false,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENROUTER:
return fetchOpenRouterModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
});
case LLMProviderName.LITELLM_PROXY:
return fetchLiteLLMProxyModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.BIFROST:
return fetchBifrostModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENAI_COMPATIBLE:
return fetchOpenAICompatibleModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
default:
return { models: [], error: `Unknown provider: ${providerName}` };
}
};

View File

@@ -1,38 +0,0 @@
import { LLMProviderResponse, VisionProvider } from "@/interfaces/llm";
import { LLM_ADMIN_URL } from "@/lib/llmConfig/constants";
export async function fetchVisionProviders(): Promise<VisionProvider[]> {
const response = await fetch(`${LLM_ADMIN_URL}/vision-providers`, {
headers: {
"Content-Type": "application/json",
},
});
if (!response.ok) {
throw new Error(
`Failed to fetch vision providers: ${await response.text()}`
);
}
const data = (await response.json()) as LLMProviderResponse<VisionProvider>;
return data.providers;
}
export async function setDefaultVisionProvider(
providerId: number,
visionModel: string
): Promise<void> {
const response = await fetch(`${LLM_ADMIN_URL}/default-vision`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
provider_id: providerId,
model_name: visionModel,
}),
});
if (!response.ok) {
const errorMsg = await response.text();
throw new Error(errorMsg);
}
}

View File

@@ -1,7 +1,7 @@
"use client";
import React from "react";
import type { IconProps } from "@opal/types";
import type { IconProps, RichStr } from "@opal/types";
import Text from "@/refresh-components/texts/Text";
import { Button } from "@opal/components";
import Modal from "@/refresh-components/Modal";
@@ -9,8 +9,8 @@ import { useModalClose } from "../contexts/ModalContext";
export interface ConfirmationModalProps {
icon: React.FunctionComponent<IconProps>;
title: string;
description?: string;
title: string | RichStr;
description?: string | RichStr;
children?: React.ReactNode;
submit: React.ReactNode;

View File

@@ -4,11 +4,9 @@ import { useState, useEffect, useCallback, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmDescriptor, LlmManager } from "@/lib/hooks";
import { structureValue } from "@/lib/llmConfig/utils";
import {
getProviderIcon,
AGGREGATOR_PROVIDERS,
} from "@/app/admin/configuration/llm/utils";
import { LLMProviderDescriptor } from "@/interfaces/llm";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { AGGREGATOR_PROVIDERS } from "@/lib/llmConfig/svc";
import { Slider } from "@/components/ui/slider";
import { useUser } from "@/providers/UserProvider";
import Text from "@/refresh-components/texts/Text";
@@ -55,7 +53,7 @@ export function groupLlmOptions(
groups.set(groupKey, {
displayName,
options: [],
Icon: getProviderIcon(provider),
Icon: getModelIcon(provider),
});
}
@@ -193,7 +191,7 @@ export default function LLMPopover({
icon={
foldable
? SvgRefreshCw
: getProviderIcon(
: getModelIcon(
llmManager.currentLlm.provider,
llmManager.currentLlm.modelName
)

View File

@@ -3,7 +3,7 @@
import { useState, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmManager } from "@/lib/hooks";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { Button, SelectButton, OpenButton } from "@opal/components";
import { SvgPlusCircle, SvgX } from "@opal/icons";
import { LLMOption } from "@/refresh-components/popovers/interfaces";
@@ -152,7 +152,7 @@ export default function ModelSelector({
)}
<div className="flex items-center shrink-0">
{selectedModels.map((model, index) => {
const ProviderIcon = getProviderIcon(
const ProviderIcon = getModelIcon(
model.provider,
model.modelName
);

View File

@@ -18,7 +18,7 @@ import {
unsetDefaultImageGenerationConfig,
deleteImageGenerationConfig,
} from "@/refresh-pages/admin/ImageGenerationPage/svc";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import Message from "@/refresh-components/messages/Message";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
import InputSelect from "@/refresh-components/inputs/InputSelect";
@@ -264,7 +264,7 @@ export default function ImageGenerationContent() {
sizePreset="main-ui"
variant="section"
icon={() => (
<ProviderIcon
<ModelIcon
provider={provider.provider_name}
size={16}
/>
@@ -391,7 +391,7 @@ export default function ImageGenerationContent() {
key={p.image_provider_id}
value={p.image_provider_id}
icon={() => (
<ProviderIcon
<ModelIcon
provider={p.provider_name}
size={16}
/>

View File

@@ -3,7 +3,7 @@
import React, { useState, useMemo, useEffect } from "react";
import { Form, Formik, FormikProps } from "formik";
import ProviderModal from "@/components/modals/ProviderModal";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import ConnectionProviderIcon from "@/refresh-components/ConnectionProviderIcon";
import {
testImageGenerationApiKey,
@@ -246,7 +246,7 @@ export function ImageGenFormWrapper<T extends FormValues>({
const icon = () => (
<ConnectionProviderIcon
icon={<ProviderIcon provider={imageProvider.provider_name} size={24} />}
icon={<ModelIcon provider={imageProvider.provider_name} size={24} />}
/>
);

View File

@@ -8,8 +8,8 @@ import {
useWellKnownLLMProviders,
} from "@/hooks/useLLMProviders";
import { ThreeDotsLoader } from "@/components/Loading";
import { Content, Card } from "@opal/layouts";
import { Button, SelectCard } from "@opal/components";
import { Content, Card as CardLayout } from "@opal/layouts";
import { Button, SelectCard, Text, Card } from "@opal/components";
import { Hoverable } from "@opal/core";
import { SvgArrowExchange, SvgSettings, SvgTrash } from "@opal/icons";
import * as SettingsLayouts from "@/layouts/settings-layouts";
@@ -22,9 +22,7 @@ import {
} from "@/lib/llmConfig/providers";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { deleteLlmProvider, setDefaultLlmModel } from "@/lib/llmConfig/svc";
import Text from "@/refresh-components/texts/Text";
import { Horizontal as HorizontalInput } from "@/layouts/input-layouts";
import LegacyCard from "@/refresh-components/cards/Card";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import Message from "@/refresh-components/messages/Message";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
@@ -49,6 +47,7 @@ import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
import { Section } from "@/layouts/general-layouts";
import { markdown } from "@opal/utils";
const route = ADMIN_ROUTES.LLM_MODELS;
@@ -141,7 +140,7 @@ function ExistingProviderCard({
const handleDelete = async () => {
try {
await deleteLlmProvider(provider.id);
await deleteLlmProvider(provider.id, isLastProvider);
await refreshLlmProviderCaches(mutate);
deleteModal.toggle(false);
toast.success("Provider deleted successfully!");
@@ -156,24 +155,37 @@ function ExistingProviderCard({
{deleteModal.isOpen && (
<ConfirmationModalLayout
icon={SvgTrash}
title={`Delete ${provider.name}`}
title={markdown(`Delete *${provider.name}*`)}
onClose={() => deleteModal.toggle(false)}
submit={
<Button variant="danger" onClick={handleDelete}>
<Button
variant="danger"
onClick={handleDelete}
disabled={isDefault && !isLastProvider}
>
Delete
</Button>
}
>
<Section alignItems="start" gap={0.5}>
<Text text03>
All LLM models from provider <b>{provider.name}</b> will be
removed and unavailable for future chats. Chat history will be
preserved.
</Text>
{isLastProvider && (
<Text text03>
Connect another provider to continue using chats.
{isDefault && !isLastProvider ? (
<Text font="main-ui-body" color="text-03">
Cannot delete the default provider. Select another provider as
the default prior to deleting this one.
</Text>
) : (
<>
<Text font="main-ui-body" color="text-03">
{markdown(
`All LLM models from provider **${provider.name}** will be removed and unavailable for future chats. Chat history will be preserved.`
)}
</Text>
{isLastProvider && (
<Text font="main-ui-body" color="text-03">
Connect another provider to continue using chats.
</Text>
)}
</>
)}
</Section>
</ConfirmationModalLayout>
@@ -189,7 +201,7 @@ function ExistingProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<Card.Header
<CardLayout.Header
icon={getProviderIcon(provider.provider)}
title={provider.name}
description={getProviderDisplayName(provider.provider)}
@@ -259,7 +271,7 @@ function NewProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<Card.Header
<CardLayout.Header
icon={getProviderIcon(provider.name)}
title={getProviderProductName(provider.name)}
description={getProviderDisplayName(provider.name)}
@@ -303,7 +315,7 @@ function NewCustomProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<Card.Header
<CardLayout.Header
icon={getProviderIcon("custom")}
title={getProviderProductName("custom")}
description={getProviderDisplayName("custom")}
@@ -392,7 +404,7 @@ export default function LLMProviderConfigurationPage() {
<SettingsLayouts.Body>
{hasProviders ? (
<LegacyCard>
<Card border="solid" rounding="lg">
<HorizontalInput
title="Default Model"
description="This model will be used by Onyx by default in your chats."
@@ -423,7 +435,7 @@ export default function LLMProviderConfigurationPage() {
</InputSelect.Content>
</InputSelect>
</HorizontalInput>
</LegacyCard>
</Card>
) : (
<Message
info

View File

@@ -27,7 +27,7 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchBedrockModels } from "@/app/admin/configuration/llm/utils";
import { fetchBedrockModels } from "@/lib/llmConfig/svc";
import { Card } from "@opal/components";
import { Section } from "@/layouts/general-layouts";
import { SvgAlertCircle } from "@opal/icons";

View File

@@ -9,7 +9,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchBifrostModels } from "@/app/admin/configuration/llm/utils";
import { fetchBifrostModels } from "@/lib/llmConfig/svc";
import {
useInitialValues,
buildValidationSchema,

View File

@@ -335,23 +335,21 @@ export default function CustomModal({
});
}}
>
{!isOnboarding && (
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="provider"
title="Provider Name"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
)}
>
<InputTypeInField
name="provider"
title="Provider Name"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
)}
>
<InputTypeInField
name="provider"
placeholder="Provider Name as shown on LiteLLM"
variant={existingLlmProvider ? "disabled" : undefined}
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
)}
placeholder="Provider Name as shown on LiteLLM"
variant={existingLlmProvider ? "disabled" : undefined}
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<APIBaseField optional />

View File

@@ -23,7 +23,7 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchModels } from "@/app/admin/configuration/llm/utils";
import { fetchModels } from "@/lib/llmConfig/svc";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";

View File

@@ -8,7 +8,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchLiteLLMProxyModels } from "@/app/admin/configuration/llm/utils";
import { fetchLiteLLMProxyModels } from "@/lib/llmConfig/svc";
import {
useInitialValues,
buildValidationSchema,

View File

@@ -24,7 +24,7 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchOllamaModels } from "@/app/admin/configuration/llm/utils";
import { fetchOllamaModels } from "@/lib/llmConfig/svc";
import Tabs from "@/refresh-components/Tabs";
import { Card } from "@opal/components";
import { toast } from "@/hooks/useToast";

View File

@@ -9,7 +9,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchOpenAICompatibleModels } from "@/app/admin/configuration/llm/utils";
import { fetchOpenAICompatibleModels } from "@/lib/llmConfig/svc";
import {
useInitialValues,
buildValidationSchema,

View File

@@ -8,7 +8,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchOpenRouterModels } from "@/app/admin/configuration/llm/utils";
import { fetchOpenRouterModels } from "@/lib/llmConfig/svc";
import {
useInitialValues,
buildValidationSchema,

View File

@@ -426,7 +426,10 @@ export function ModelSelectionField({
const formikProps = useFormikContext<BaseLLMFormValues>();
const [newModelName, setNewModelName] = useState("");
const [isExpanded, setIsExpanded] = useState(false);
const isAutoMode = formikProps.values.is_auto_mode;
// When the auto-update toggle is hidden, auto mode should have no effect —
// otherwise models can't be deselected and "Select All" stays disabled.
const isAutoMode =
shouldShowAutoUpdateToggle && formikProps.values.is_auto_mode;
const models = formikProps.values.model_configurations;
// Snapshot the original model visibility so we can restore it when
@@ -700,6 +703,15 @@ function ModalWrapperInner({
const isTesting = status?.isTesting === true;
const busy = isTesting || isSubmitting;
const disabledTooltip = busy
? undefined
: !isValid
? "Please fill in all required fields."
: !dirty
? "No changes to save."
: undefined;
const providerIcon = getProviderIcon(providerName);
const providerDisplayName = getProviderDisplayName(providerName);
const providerProductName = getProviderProductName(providerName);
@@ -732,6 +744,7 @@ function ModalWrapperInner({
disabled={!isValid || !dirty || busy}
type="submit"
icon={busy ? SimpleLoader : undefined}
tooltip={disabledTooltip}
>
{llmProvider?.name
? busy

View File

@@ -12,7 +12,7 @@ import {
SvgServer,
SvgSettings,
} from "@opal/icons";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
export interface LLMProviderCardProps {
title: string;
@@ -75,7 +75,7 @@ function LLMProviderCardInner({
<div className="flex gap-1 p-1 flex-1 min-w-0">
<div className="flex items-start h-full pt-0.5">
{providerName ? (
<ProviderIcon provider={providerName} size={16} className="" />
<ModelIcon provider={providerName} size={16} className="" />
) : (
<SvgServer className="w-4 h-4 stroke-text-04" />
)}

View File

@@ -16,7 +16,7 @@ import {
getProviderDisplayInfo,
} from "../forms/getOnboardingForm";
import { Disabled } from "@opal/core";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { SvgCheckCircle, SvgCpu, SvgExternalLink } from "@opal/icons";
import { ContentAction } from "@opal/layouts";
import { useLLMProviderOptions } from "@/lib/hooks/useLLMProviderOptions";
@@ -69,7 +69,7 @@ const StackedProviderIcons = ({ providers }: StackedProviderIconsProps) => {
zIndex: providers.length - index,
}}
>
<ProviderIcon provider={provider} size={16} />
<ModelIcon provider={provider} size={16} />
</div>
))}
{providers.length > 3 && (