Compare commits

...

5 Commits

Author SHA1 Message Date
Jamison Lahman
069dd181a4 nit 2026-04-08 14:55:03 -07:00
Jamison Lahman
3c5f77f5a4 fix: fetch Custom Models provider names (#10004) 2026-04-08 14:22:42 -07:00
Jamison Lahman
ab4d1dce01 fix: Custom LLM Provider requires a Provider Name (#10003) 2026-04-08 20:33:43 +00:00
Raunak Bhagat
80c928eb58 fix: enable force-delete for last LLM provider (#9998) 2026-04-08 20:09:38 +00:00
Raunak Bhagat
77528876b1 chore: delete unused files (#10001) 2026-04-08 19:53:47 +00:00
41 changed files with 1236 additions and 974 deletions

View File

@@ -87,6 +87,44 @@ PROVIDER_DISPLAY_NAMES: dict[str, str] = {
"gemini": "Gemini",
"stability": "Stability",
"writer": "Writer",
# Custom provider display names (used in the custom provider picker)
"aiml": "AI/ML",
"assemblyai": "AssemblyAI",
"aws_polly": "AWS Polly",
"azure_ai": "Azure AI",
"chatgpt": "ChatGPT",
"cohere_chat": "Cohere Chat",
"datarobot": "DataRobot",
"deepgram": "Deepgram",
"deepinfra": "DeepInfra",
"elevenlabs": "ElevenLabs",
"fal_ai": "fal.ai",
"featherless_ai": "Featherless AI",
"fireworks_ai": "Fireworks AI",
"friendliai": "FriendliAI",
"gigachat": "GigaChat",
"github_copilot": "GitHub Copilot",
"gradient_ai": "Gradient AI",
"huggingface": "HuggingFace",
"jina_ai": "Jina AI",
"lambda_ai": "Lambda AI",
"llamagate": "LlamaGate",
"meta_llama": "Meta Llama",
"minimax": "MiniMax",
"nlp_cloud": "NLP Cloud",
"nvidia_nim": "NVIDIA NIM",
"oci": "OCI",
"ovhcloud": "OVHcloud",
"palm": "PaLM",
"publicai": "PublicAI",
"runwayml": "RunwayML",
"sambanova": "SambaNova",
"together_ai": "Together AI",
"vercel_ai_gateway": "Vercel AI Gateway",
"volcengine": "Volcengine",
"wandb": "W&B",
"watsonx": "IBM watsonx",
"zai": "ZAI",
}
# Map vendors to their brand names (used for provider_display_name generation)

View File

@@ -40,11 +40,15 @@ from onyx.db.models import User
from onyx.db.persona import user_can_access_persona
from onyx.error_handling.error_codes import OnyxErrorCode
from onyx.error_handling.exceptions import OnyxError
from onyx.llm.constants import PROVIDER_DISPLAY_NAMES
from onyx.llm.constants import WELL_KNOWN_PROVIDER_NAMES
from onyx.llm.factory import get_default_llm
from onyx.llm.factory import get_llm
from onyx.llm.factory import get_max_input_tokens_from_llm_provider
from onyx.llm.utils import get_bedrock_token_limit
from onyx.llm.utils import get_llm_contextual_cost
from onyx.llm.utils import get_max_input_tokens
from onyx.llm.utils import litellm_thinks_model_supports_image_input
from onyx.llm.utils import test_llm
from onyx.llm.well_known_providers.auto_update_service import (
fetch_llm_recommendations_from_github,
@@ -60,6 +64,9 @@ from onyx.server.manage.llm.models import BedrockFinalModelResponse
from onyx.server.manage.llm.models import BedrockModelsRequest
from onyx.server.manage.llm.models import BifrostFinalModelResponse
from onyx.server.manage.llm.models import BifrostModelsRequest
from onyx.server.manage.llm.models import CustomProviderModelResponse
from onyx.server.manage.llm.models import CustomProviderModelsRequest
from onyx.server.manage.llm.models import CustomProviderOption
from onyx.server.manage.llm.models import DefaultModel
from onyx.server.manage.llm.models import LitellmFinalModelResponse
from onyx.server.manage.llm.models import LitellmModelDetails
@@ -250,6 +257,181 @@ def _validate_llm_provider_change(
)
@admin_router.get("/custom-provider-names")
def fetch_custom_provider_names(
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
) -> list[CustomProviderOption]:
"""Returns the sorted list of LiteLLM provider names that can be used
with the custom provider modal (i.e. everything that is not already
covered by a well-known provider modal)."""
import litellm
well_known = {p.value for p in WELL_KNOWN_PROVIDER_NAMES}
return sorted(
(
CustomProviderOption(
value=name,
label=PROVIDER_DISPLAY_NAMES.get(name, name.replace("_", " ").title()),
)
for name in litellm.models_by_provider.keys()
if name not in well_known
),
key=lambda o: o.label.lower(),
)
@admin_router.post("/custom/available-models")
def fetch_custom_provider_models(
request: CustomProviderModelsRequest,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
) -> list[CustomProviderModelResponse]:
"""Fetch models for a custom provider.
When ``api_base`` is provided the endpoint hits the provider's
OpenAI-compatible ``/v1/models`` (or ``/{api_version}/models``) to
discover live models. Otherwise it falls back to the static list
that LiteLLM ships for the given provider slug.
In both cases the response is enriched with metadata from LiteLLM
(display name, max input tokens, vision support) when available.
"""
if request.api_base:
return _fetch_custom_models_from_api(
provider=request.provider,
api_base=request.api_base,
api_key=request.api_key,
api_version=request.api_version,
)
return _fetch_custom_models_from_litellm(request.provider)
def _enrich_custom_model(
name: str,
provider: str,
*,
api_display_name: str | None = None,
api_max_input_tokens: int | None = None,
api_supports_image_input: bool | None = None,
) -> CustomProviderModelResponse:
"""Build a ``CustomProviderModelResponse`` enriched with LiteLLM metadata.
Values explicitly provided by the source API take precedence; LiteLLM
metadata is used as a fallback.
"""
from onyx.llm.model_name_parser import parse_litellm_model_name
# LiteLLM keys are typically "provider/model"
litellm_key = f"{provider}/{name}" if not name.startswith(f"{provider}/") else name
parsed = parse_litellm_model_name(litellm_key)
# display_name: prefer API-provided name, then LiteLLM enrichment, then raw name
if api_display_name and api_display_name != name:
display_name = api_display_name
else:
display_name = parsed.display_name or name
# max_input_tokens: prefer API value, then LiteLLM lookup
if api_max_input_tokens is not None:
max_input_tokens: int | None = api_max_input_tokens
else:
try:
max_input_tokens = get_max_input_tokens(name, provider)
except Exception:
max_input_tokens = None
# supports_image_input: prefer API value, then LiteLLM inference
if api_supports_image_input is not None:
supports_image = api_supports_image_input
else:
supports_image = litellm_thinks_model_supports_image_input(name, provider)
return CustomProviderModelResponse(
name=name,
display_name=display_name,
max_input_tokens=max_input_tokens,
supports_image_input=supports_image,
)
def _fetch_custom_models_from_api(
provider: str,
api_base: str,
api_key: str | None,
api_version: str | None,
) -> list[CustomProviderModelResponse]:
"""Hit an OpenAI-compatible ``/v1/models`` (or versioned variant)."""
cleaned = api_base.strip().rstrip("/")
if api_version:
url = f"{cleaned}/{api_version.strip().strip('/')}/models"
elif cleaned.endswith("/v1"):
url = f"{cleaned}/models"
else:
url = f"{cleaned}/v1/models"
response_json = _get_openai_compatible_models_response(
url=url,
source_name="Custom provider",
api_key=api_key,
)
models = response_json.get("data", [])
if not isinstance(models, list) or len(models) == 0:
raise OnyxError(
OnyxErrorCode.VALIDATION_ERROR,
"No models found from the provider's API.",
)
results: list[CustomProviderModelResponse] = []
for model in models:
try:
model_id = model.get("id", "")
if not model_id:
continue
if is_embedding_model(model_id):
continue
results.append(
_enrich_custom_model(
model_id,
provider,
api_display_name=model.get("name"),
api_max_input_tokens=model.get("context_length"),
api_supports_image_input=infer_vision_support(model_id),
)
)
except Exception as e:
logger.warning(
"Failed to parse custom provider model entry",
extra={"error": str(e), "item": str(model)[:1000]},
)
if not results:
raise OnyxError(
OnyxErrorCode.VALIDATION_ERROR,
"No compatible models found from the provider's API.",
)
return sorted(results, key=lambda m: m.name.lower())
def _fetch_custom_models_from_litellm(
provider: str,
) -> list[CustomProviderModelResponse]:
"""Fall back to litellm's static ``models_by_provider`` mapping."""
import litellm
model_names = litellm.models_by_provider.get(provider)
if model_names is None:
raise OnyxError(
OnyxErrorCode.NOT_FOUND,
f"Unknown provider: {provider}",
)
return sorted(
(_enrich_custom_model(name, provider) for name in model_names),
key=lambda m: m.name.lower(),
)
@admin_router.get("/built-in/options")
def fetch_llm_options(
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),

View File

@@ -28,6 +28,13 @@ if TYPE_CHECKING:
T = TypeVar("T", "LLMProviderDescriptor", "LLMProviderView", "VisionProviderResponse")
class CustomProviderOption(BaseModel):
"""A provider slug + human-friendly label for the custom-provider picker."""
value: str
label: str
class TestLLMRequest(BaseModel):
# provider level
id: int | None = None
@@ -470,6 +477,21 @@ class BifrostFinalModelResponse(BaseModel):
supports_reasoning: bool
# Custom provider dynamic models fetch
class CustomProviderModelsRequest(BaseModel):
provider: str # LiteLLM provider slug (e.g. "deepseek", "fireworks_ai")
api_base: str | None = None # If set, fetches live models via /v1/models
api_key: str | None = None
api_version: str | None = None # If set, used to construct the models URL
class CustomProviderModelResponse(BaseModel):
name: str
display_name: str
max_input_tokens: int | None
supports_image_input: bool
# OpenAI Compatible dynamic models fetch
class OpenAICompatibleModelsRequest(BaseModel):
api_base: str

View File

@@ -1,163 +0,0 @@
"use client";
import { ArrayHelpers, FieldArray, FormikProps, useField } from "formik";
import { ModelConfiguration } from "@/interfaces/llm";
import { ManualErrorMessage, TextFormField } from "@/components/Field";
import { useEffect, useState } from "react";
import CreateButton from "@/refresh-components/buttons/CreateButton";
import { Button } from "@opal/components";
import { SvgX } from "@opal/icons";
import Text from "@/refresh-components/texts/Text";
function ModelConfigurationRow({
name,
index,
arrayHelpers,
formikProps,
setError,
}: {
name: string;
index: number;
arrayHelpers: ArrayHelpers;
formikProps: FormikProps<{ model_configurations: ModelConfiguration[] }>;
setError: (value: string | null) => void;
}) {
const [, input] = useField(`${name}[${index}]`);
useEffect(() => {
if (!input.touched) return;
setError((input.error as { name: string } | undefined)?.name ?? null);
}, [input.touched, input.error]);
return (
<div key={index} className="flex flex-row w-full gap-4">
<div
className={`flex flex-[2] ${
input.touched && input.error ? "border-2 border-error rounded-lg" : ""
}`}
>
<TextFormField
name={`${name}[${index}].name`}
label=""
placeholder={`model-name-${index + 1}`}
removeLabel
hideError
/>
</div>
<div className="flex flex-[1]">
<TextFormField
name={`${name}[${index}].max_input_tokens`}
label=""
placeholder="Default"
removeLabel
hideError
type="number"
min={1}
/>
</div>
<div className="flex flex-col justify-center">
<Button
disabled={formikProps.values.model_configurations.length <= 1}
onClick={() => {
if (formikProps.values.model_configurations.length > 1) {
setError(null);
arrayHelpers.remove(index);
}
}}
icon={SvgX}
prominence="secondary"
/>
</div>
</div>
);
}
export function ModelConfigurationField({
name,
formikProps,
}: {
name: string;
formikProps: FormikProps<{ model_configurations: ModelConfiguration[] }>;
}) {
const [errorMap, setErrorMap] = useState<{ [index: number]: string }>({});
const [finalError, setFinalError] = useState<string | undefined>();
return (
<div className="pb-5 flex flex-col w-full">
<div className="flex flex-col">
<Text as="p" mainUiAction>
Model Configurations
</Text>
<Text as="p" secondaryBody text03>
Add models and customize the number of input tokens that they accept.
</Text>
</div>
<FieldArray
name={name}
render={(arrayHelpers: ArrayHelpers) => (
<div className="flex flex-col">
<div className="flex flex-col gap-4 py-4">
<div className="flex">
<Text as="p" secondaryBody className="flex flex-[2]">
Model Name
</Text>
<Text as="p" secondaryBody className="flex flex-[1]">
Max Input Tokens
</Text>
<div className="w-10" />
</div>
{formikProps.values.model_configurations.map((_, index) => (
<ModelConfigurationRow
key={index}
name={name}
formikProps={formikProps}
arrayHelpers={arrayHelpers}
index={index}
setError={(message: string | null) => {
const newErrors = { ...errorMap };
if (message) {
newErrors[index] = message;
} else {
delete newErrors[index];
for (const key in newErrors) {
const numKey = Number(key);
if (numKey > index) {
const errorValue = newErrors[key];
if (errorValue !== undefined) {
// Ensure the value is not undefined
newErrors[numKey - 1] = errorValue;
delete newErrors[numKey];
}
}
}
}
setErrorMap(newErrors);
setFinalError(
Object.values(newErrors).filter((item) => item)[0]
);
}}
/>
))}
</div>
{finalError && (
<ManualErrorMessage>{finalError}</ManualErrorMessage>
)}
<div className="mt-3">
<CreateButton
onClick={() => {
arrayHelpers.push({
name: "",
is_visible: true,
// Use null so Yup.number().nullable() accepts empty inputs
max_input_tokens: null,
});
}}
>
Add New
</CreateButton>
</div>
</div>
)}
/>
</div>
);
}

View File

@@ -0,0 +1,18 @@
import { defaultTailwindCSS } from "@/components/icons/icons";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { IconProps } from "@opal/types";
export interface ModelIconProps extends IconProps {
provider: string;
modelName?: string;
}
export default function ModelIcon({
provider,
modelName,
size = 16,
className = defaultTailwindCSS,
}: ModelIconProps) {
const Icon = getModelIcon(provider, modelName);
return <Icon size={size} className={className} />;
}

View File

@@ -1,17 +0,0 @@
import { defaultTailwindCSS, IconProps } from "@/components/icons/icons";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
export interface ProviderIconProps extends IconProps {
provider: string;
modelName?: string;
}
export const ProviderIcon = ({
provider,
modelName,
size = 16,
className = defaultTailwindCSS,
}: ProviderIconProps) => {
const Icon = getProviderIcon(provider, modelName);
return <Icon size={size} className={className} />;
};

View File

@@ -1,622 +0,0 @@
import { JSX } from "react";
import {
AnthropicIcon,
AmazonIcon,
AzureIcon,
CPUIcon,
MicrosoftIconSVG,
MistralIcon,
MetaIcon,
GeminiIcon,
IconProps,
DeepseekIcon,
OpenAISVG,
QwenIcon,
OllamaIcon,
LMStudioIcon,
LiteLLMIcon,
ZAIIcon,
} from "@/components/icons/icons";
import {
OllamaModelResponse,
OpenRouterModelResponse,
BedrockModelResponse,
LMStudioModelResponse,
LiteLLMProxyModelResponse,
BifrostModelResponse,
ModelConfiguration,
LLMProviderName,
BedrockFetchParams,
OllamaFetchParams,
LMStudioFetchParams,
OpenRouterFetchParams,
LiteLLMProxyFetchParams,
BifrostFetchParams,
OpenAICompatibleFetchParams,
OpenAICompatibleModelResponse,
} from "@/interfaces/llm";
import { SvgAws, SvgBifrost, SvgOpenrouter, SvgPlug } from "@opal/icons";
// Aggregator providers that host models from multiple vendors
export const AGGREGATOR_PROVIDERS = new Set([
"bedrock",
"bedrock_converse",
"openrouter",
"ollama_chat",
"lm_studio",
"litellm_proxy",
"bifrost",
"openai_compatible",
"vertex_ai",
]);
export const getProviderIcon = (
providerName: string,
modelName?: string
): (({ size, className }: IconProps) => JSX.Element) => {
const iconMap: Record<
string,
({ size, className }: IconProps) => JSX.Element
> = {
amazon: AmazonIcon,
phi: MicrosoftIconSVG,
mistral: MistralIcon,
ministral: MistralIcon,
llama: MetaIcon,
ollama_chat: OllamaIcon,
ollama: OllamaIcon,
lm_studio: LMStudioIcon,
gemini: GeminiIcon,
deepseek: DeepseekIcon,
claude: AnthropicIcon,
anthropic: AnthropicIcon,
openai: OpenAISVG,
// Azure OpenAI should display the Azure logo
azure: AzureIcon,
microsoft: MicrosoftIconSVG,
meta: MetaIcon,
google: GeminiIcon,
qwen: QwenIcon,
qwq: QwenIcon,
zai: ZAIIcon,
// Cloud providers - use AWS icon for Bedrock
bedrock: SvgAws,
bedrock_converse: SvgAws,
openrouter: SvgOpenrouter,
litellm_proxy: LiteLLMIcon,
bifrost: SvgBifrost,
openai_compatible: SvgPlug,
vertex_ai: GeminiIcon,
};
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers (bedrock, openrouter, vertex_ai), prioritize showing
// the vendor icon based on model name (e.g., show Claude icon for Bedrock Claude models)
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(iconMap)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in iconMap) {
const icon = iconMap[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(iconMap)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return CPUIcon;
};
export const isAnthropic = (provider: string, modelName?: string) =>
provider === LLMProviderName.ANTHROPIC ||
!!modelName?.toLowerCase().includes("claude");
/**
* Fetches Bedrock models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBedrockModels = async (
params: BedrockFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
if (!params.aws_region_name) {
return { models: [], error: "AWS region is required" };
}
try {
const response = await fetch("/api/admin/llm/bedrock/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
aws_region_name: params.aws_region_name,
aws_access_key_id: params.aws_access_key_id,
aws_secret_access_key: params.aws_secret_access_key,
aws_bearer_token_bedrock: params.aws_bearer_token_bedrock,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: BedrockModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: false,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Ollama models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOllamaModels = async (
params: OllamaFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/ollama/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OllamaModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches OpenRouter models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOpenRouterModels = async (
params: OpenRouterFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/openrouter/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse OpenRouter model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: OpenRouterModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LM Studio models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLMStudioModels = async (
params: LMStudioFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/lm-studio/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
api_key_changed: params.api_key_changed ?? false,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse LM Studio model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: LMStudioModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Bifrost models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBifrostModels = async (
params: BifrostFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/bifrost/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse Bifrost model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: BifrostModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models from a generic OpenAI-compatible server.
* Uses snake_case params to match API structure.
*/
export const fetchOpenAICompatibleModels = async (
params: OpenAICompatibleFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch(
"/api/admin/llm/openai-compatible/available-models",
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
}
);
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OpenAICompatibleModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LiteLLM Proxy models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLiteLLMProxyModels = async (
params: LiteLLMProxyFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/litellm/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: LiteLLMProxyModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.model_name,
display_name: modelData.model_name,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models for a provider. Accepts form values directly and maps them
* to the expected fetch params format internally.
*/
export const fetchModels = async (
providerName: string,
formValues: {
api_base?: string;
api_key?: string;
api_key_changed?: boolean;
name?: string;
custom_config?: Record<string, string>;
model_configurations?: ModelConfiguration[];
},
signal?: AbortSignal
) => {
const customConfig = formValues.custom_config || {};
switch (providerName) {
case LLMProviderName.BEDROCK:
return fetchBedrockModels({
aws_region_name: customConfig.AWS_REGION_NAME || "",
aws_access_key_id: customConfig.AWS_ACCESS_KEY_ID,
aws_secret_access_key: customConfig.AWS_SECRET_ACCESS_KEY,
aws_bearer_token_bedrock: customConfig.AWS_BEARER_TOKEN_BEDROCK,
provider_name: formValues.name,
});
case LLMProviderName.OLLAMA_CHAT:
return fetchOllamaModels({
api_base: formValues.api_base,
provider_name: formValues.name,
signal,
});
case LLMProviderName.LM_STUDIO:
return fetchLMStudioModels({
api_base: formValues.api_base,
api_key: formValues.custom_config?.LM_STUDIO_API_KEY,
api_key_changed: formValues.api_key_changed ?? false,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENROUTER:
return fetchOpenRouterModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
});
case LLMProviderName.LITELLM_PROXY:
return fetchLiteLLMProxyModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.BIFROST:
return fetchBifrostModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENAI_COMPATIBLE:
return fetchOpenAICompatibleModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
default:
return { models: [], error: `Unknown provider: ${providerName}` };
}
};
export function canProviderFetchModels(providerName?: string) {
if (!providerName) return false;
switch (providerName) {
case LLMProviderName.BEDROCK:
case LLMProviderName.OLLAMA_CHAT:
case LLMProviderName.LM_STUDIO:
case LLMProviderName.OPENROUTER:
case LLMProviderName.LITELLM_PROXY:
case LLMProviderName.BIFROST:
case LLMProviderName.OPENAI_COMPATIBLE:
return true;
default:
return false;
}
}

View File

@@ -401,7 +401,7 @@ export default function VoiceProviderSetupModal({
options={existingApiKeyOptions}
separatorLabel="Reuse OpenAI API Keys"
strict={false}
showAddPrefix
createPrefix="Add"
/>
) : (
<PasswordInputTypeIn

View File

@@ -5,7 +5,7 @@ import { Button } from "@opal/components";
import { Text } from "@opal/components";
import { ContentAction } from "@opal/layouts";
import { SvgEyeOff, SvgX } from "@opal/icons";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import AgentMessage, {
AgentMessageProps,
} from "@/app/app/message/messageComponents/AgentMessage";
@@ -71,7 +71,7 @@ export default function MultiModelPanel({
errorStackTrace,
errorDetails,
}: MultiModelPanelProps) {
const ProviderIcon = getProviderIcon(provider, modelName);
const ModelIcon = getModelIcon(provider, modelName);
const handlePanelClick = useCallback(() => {
if (!isHidden && !isPreferred) onSelect();
@@ -88,7 +88,7 @@ export default function MultiModelPanel({
sizePreset="main-ui"
variant="body"
paddingVariant="lg"
icon={ProviderIcon}
icon={ModelIcon}
title={isHidden ? markdown(`~~${displayName}~~`) : displayName}
rightChildren={
<div className="flex items-center gap-1 px-2">

View File

@@ -18,7 +18,7 @@ import {
isRecommendedModel,
} from "@/app/craft/onboarding/constants";
import { ToggleWarningModal } from "./ToggleWarningModal";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { Section } from "@/layouts/general-layouts";
import {
Accordion,
@@ -365,9 +365,7 @@ export function BuildLLMPopover({
const isExpanded = expandedGroups.includes(
group.providerKey
);
const ProviderIcon = getProviderIcon(
group.providerKey
);
const ModelIcon = getModelIcon(group.providerKey);
return (
<AccordionItem
@@ -379,7 +377,7 @@ export function BuildLLMPopover({
<AccordionTrigger className="flex items-center rounded-08 hover:no-underline hover:bg-background-tint-02 group [&>svg]:hidden w-full py-1">
<div className="flex items-center gap-1 shrink-0">
<div className="flex items-center justify-center size-5 shrink-0">
<ProviderIcon size={16} />
<ModelIcon size={16} />
</div>
<Text
secondaryBody

View File

@@ -48,7 +48,7 @@ import NotAllowedModal from "@/app/craft/onboarding/components/NotAllowedModal";
import { useOnboarding } from "@/app/craft/onboarding/BuildOnboardingProvider";
import { useLLMProviders } from "@/hooks/useLLMProviders";
import { useUser } from "@/providers/UserProvider";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import {
getBuildUserPersona,
getPersonaInfo,
@@ -475,10 +475,10 @@ export default function BuildConfigPage() {
>
{pendingLlmSelection?.provider &&
(() => {
const ProviderIcon = getProviderIcon(
const ModelIcon = getModelIcon(
pendingLlmSelection.provider
);
return <ProviderIcon className="w-4 h-4" />;
return <ModelIcon className="w-4 h-4" />;
})()}
<Text mainUiAction>{pendingLlmDisplayName}</Text>
<SvgChevronDown className="w-4 h-4 text-text-03" />

View File

@@ -3,14 +3,14 @@
import { useMemo } from "react";
import { parseLlmDescriptor, structureValue } from "@/lib/llmConfig/utils";
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { createIcon } from "@/components/icons/icons";
interface LLMOption {
name: string;
value: string;
icon: ReturnType<typeof getProviderIcon>;
icon: ReturnType<typeof getModelIcon>;
modelName: string;
providerName: string;
provider: string;
@@ -85,7 +85,7 @@ export default function LLMSelector({
provider.provider,
modelConfiguration.name
),
icon: getProviderIcon(provider.provider, modelConfiguration.name),
icon: getModelIcon(provider.provider, modelConfiguration.name),
modelName: modelConfiguration.name,
providerName: provider.name,
provider: provider.provider,

View File

@@ -160,6 +160,35 @@ export function useWellKnownLLMProvider(providerName: LLMProviderName) {
};
}
export interface CustomProviderOption {
value: string;
label: string;
}
/**
* Fetches the list of LiteLLM provider names available for custom provider
* configuration (i.e. providers that don't have a dedicated well-known modal).
*
* Hits `GET /api/admin/llm/custom-provider-names`.
*/
export function useCustomProviderNames() {
const { data, error, isLoading } = useSWR<CustomProviderOption[]>(
SWR_KEYS.customProviderNames,
errorHandlingFetcher,
{
revalidateOnFocus: false,
revalidateIfStale: false,
dedupingInterval: 60000,
}
);
return {
customProviderNames: data ?? null,
isLoading,
error,
};
}
export function useWellKnownLLMProviders() {
const {
data: wellKnownLLMProviders,

View File

@@ -1,7 +1,4 @@
import type {
OnboardingState,
OnboardingActions,
} from "@/interfaces/onboarding";
import type { OnboardingActions } from "@/interfaces/onboarding";
export enum LLMProviderName {
OPENAI = "openai",

View File

@@ -32,7 +32,7 @@ import {
PersonaLabel,
} from "@/app/admin/agents/interfaces";
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
import { isAnthropic } from "@/app/admin/configuration/llm/utils";
import { isAnthropic } from "@/lib/llmConfig/svc";
import { getSourceMetadataForSources } from "./sources";
import { AuthType, NEXT_PUBLIC_CLOUD_ENABLED } from "./constants";
import { useUser } from "@/providers/UserProvider";

View File

@@ -14,8 +14,28 @@ import {
SvgLitellm,
SvgLmStudio,
} from "@opal/icons";
import {
MicrosoftIconSVG,
MistralIcon,
MetaIcon,
DeepseekIcon,
QwenIcon,
ZAIIcon,
} from "@/components/icons/icons";
import { LLMProviderName } from "@/interfaces/llm";
export const AGGREGATOR_PROVIDERS = new Set([
LLMProviderName.BEDROCK,
"bedrock_converse",
LLMProviderName.OPENROUTER,
LLMProviderName.OLLAMA_CHAT,
LLMProviderName.LM_STUDIO,
LLMProviderName.LITELLM_PROXY,
LLMProviderName.BIFROST,
LLMProviderName.OPENAI_COMPATIBLE,
LLMProviderName.VERTEX_AI,
]);
const PROVIDER_ICONS: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
@@ -81,3 +101,80 @@ export function getProviderDisplayName(providerName: string): string {
export function getProviderIcon(providerName: string): IconFunctionComponent {
return PROVIDER_ICONS[providerName] ?? SvgCpu;
}
// ---------------------------------------------------------------------------
// Model-aware icon resolver (legacy icon set)
// ---------------------------------------------------------------------------
const MODEL_ICON_MAP: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
[LLMProviderName.VERTEX_AI]: SvgGemini,
[LLMProviderName.BEDROCK]: SvgAws,
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
[LLMProviderName.BIFROST]: SvgBifrost,
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
amazon: SvgAws,
phi: MicrosoftIconSVG,
mistral: MistralIcon,
ministral: MistralIcon,
llama: MetaIcon,
ollama: SvgOllama,
gemini: SvgGemini,
deepseek: DeepseekIcon,
claude: SvgClaude,
azure: SvgAzure,
microsoft: MicrosoftIconSVG,
meta: MetaIcon,
google: SvgGemini,
qwen: QwenIcon,
qwq: QwenIcon,
zai: ZAIIcon,
bedrock_converse: SvgAws,
};
/**
* Model-aware icon resolver that checks both provider name and model name
* to pick the most specific icon (e.g. Claude icon for a Bedrock Claude model).
*/
export const getModelIcon = (
providerName: string,
modelName?: string
): IconFunctionComponent => {
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers, prioritise showing the vendor icon based on model name
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in MODEL_ICON_MAP) {
const icon = MODEL_ICON_MAP[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return SvgCpu;
};

View File

@@ -1,5 +1,5 @@
/**
* LLM action functions for mutations.
* LLM action functions for mutations and model fetching.
*
* These are async functions for one-off actions that don't need SWR caching.
*
@@ -7,12 +7,31 @@
* - /api/admin/llm/test/default - Test the default LLM provider connection
* - /api/admin/llm/default - Set the default LLM model
* - /api/admin/llm/provider/{id} - Delete an LLM provider
* - /api/admin/llm/{provider}/available-models - Fetch available models for a provider
*/
import {
LLM_ADMIN_URL,
LLM_PROVIDERS_ADMIN_URL,
} from "@/lib/llmConfig/constants";
import {
OllamaModelResponse,
OpenRouterModelResponse,
BedrockModelResponse,
LMStudioModelResponse,
LiteLLMProxyModelResponse,
BifrostModelResponse,
ModelConfiguration,
LLMProviderName,
BedrockFetchParams,
OllamaFetchParams,
LMStudioFetchParams,
OpenRouterFetchParams,
LiteLLMProxyFetchParams,
BifrostFetchParams,
OpenAICompatibleFetchParams,
OpenAICompatibleModelResponse,
} from "@/interfaces/llm";
/**
* Test the default LLM provider.
@@ -57,15 +76,522 @@ export async function setDefaultLlmModel(
/**
* Delete an LLM provider.
* @param providerId - The provider ID to delete
* @param force - Force delete even if this is the default provider
* @throws Error with the detail message from the API on failure
*/
export async function deleteLlmProvider(providerId: number): Promise<void> {
const response = await fetch(`${LLM_PROVIDERS_ADMIN_URL}/${providerId}`, {
method: "DELETE",
});
export async function deleteLlmProvider(
providerId: number,
force = false
): Promise<void> {
const url = force
? `${LLM_PROVIDERS_ADMIN_URL}/${providerId}?force=true`
: `${LLM_PROVIDERS_ADMIN_URL}/${providerId}`;
const response = await fetch(url, { method: "DELETE" });
if (!response.ok) {
const errorMsg = (await response.json()).detail;
throw new Error(errorMsg);
}
}
// ---------------------------------------------------------------------------
// Aggregator providers & helpers
// ---------------------------------------------------------------------------
/** Aggregator providers that host models from multiple vendors. */
export const AGGREGATOR_PROVIDERS = new Set([
"bedrock",
"bedrock_converse",
"openrouter",
"ollama_chat",
"lm_studio",
"litellm_proxy",
"bifrost",
"openai_compatible",
"vertex_ai",
]);
export const isAnthropic = (provider: string, modelName?: string) =>
provider === LLMProviderName.ANTHROPIC ||
!!modelName?.toLowerCase().includes("claude");
// ---------------------------------------------------------------------------
// Model fetching
// ---------------------------------------------------------------------------
/**
* Fetches Bedrock models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBedrockModels = async (
params: BedrockFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
if (!params.aws_region_name) {
return { models: [], error: "AWS region is required" };
}
try {
const response = await fetch("/api/admin/llm/bedrock/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
aws_region_name: params.aws_region_name,
aws_access_key_id: params.aws_access_key_id,
aws_secret_access_key: params.aws_secret_access_key,
aws_bearer_token_bedrock: params.aws_bearer_token_bedrock,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: BedrockModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: false,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Ollama models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOllamaModels = async (
params: OllamaFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/ollama/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OllamaModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches OpenRouter models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOpenRouterModels = async (
params: OpenRouterFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/openrouter/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse OpenRouter model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: OpenRouterModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LM Studio models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLMStudioModels = async (
params: LMStudioFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/lm-studio/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
api_key_changed: params.api_key_changed ?? false,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse LM Studio model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: LMStudioModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Bifrost models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBifrostModels = async (
params: BifrostFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/bifrost/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse Bifrost model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: BifrostModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models from a generic OpenAI-compatible server.
* Uses snake_case params to match API structure.
*/
export const fetchOpenAICompatibleModels = async (
params: OpenAICompatibleFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch(
"/api/admin/llm/openai-compatible/available-models",
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
}
);
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OpenAICompatibleModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LiteLLM Proxy models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLiteLLMProxyModels = async (
params: LiteLLMProxyFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/litellm/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: LiteLLMProxyModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.model_name,
display_name: modelData.model_name,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models for a provider. Accepts form values directly and maps them
* to the expected fetch params format internally.
*/
export const fetchModels = async (
providerName: string,
formValues: {
api_base?: string;
api_key?: string;
api_key_changed?: boolean;
name?: string;
custom_config?: Record<string, string>;
model_configurations?: ModelConfiguration[];
},
signal?: AbortSignal
) => {
const customConfig = formValues.custom_config || {};
switch (providerName) {
case LLMProviderName.BEDROCK:
return fetchBedrockModels({
aws_region_name: customConfig.AWS_REGION_NAME || "",
aws_access_key_id: customConfig.AWS_ACCESS_KEY_ID,
aws_secret_access_key: customConfig.AWS_SECRET_ACCESS_KEY,
aws_bearer_token_bedrock: customConfig.AWS_BEARER_TOKEN_BEDROCK,
provider_name: formValues.name,
});
case LLMProviderName.OLLAMA_CHAT:
return fetchOllamaModels({
api_base: formValues.api_base,
provider_name: formValues.name,
signal,
});
case LLMProviderName.LM_STUDIO:
return fetchLMStudioModels({
api_base: formValues.api_base,
api_key: formValues.custom_config?.LM_STUDIO_API_KEY,
api_key_changed: formValues.api_key_changed ?? false,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENROUTER:
return fetchOpenRouterModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
});
case LLMProviderName.LITELLM_PROXY:
return fetchLiteLLMProxyModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.BIFROST:
return fetchBifrostModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENAI_COMPATIBLE:
return fetchOpenAICompatibleModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
default:
return { models: [], error: `Unknown provider: ${providerName}` };
}
};

View File

@@ -1,38 +0,0 @@
import { LLMProviderResponse, VisionProvider } from "@/interfaces/llm";
import { LLM_ADMIN_URL } from "@/lib/llmConfig/constants";
export async function fetchVisionProviders(): Promise<VisionProvider[]> {
const response = await fetch(`${LLM_ADMIN_URL}/vision-providers`, {
headers: {
"Content-Type": "application/json",
},
});
if (!response.ok) {
throw new Error(
`Failed to fetch vision providers: ${await response.text()}`
);
}
const data = (await response.json()) as LLMProviderResponse<VisionProvider>;
return data.providers;
}
export async function setDefaultVisionProvider(
providerId: number,
visionModel: string
): Promise<void> {
const response = await fetch(`${LLM_ADMIN_URL}/default-vision`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
provider_id: providerId,
model_name: visionModel,
}),
});
if (!response.ok) {
const errorMsg = await response.text();
throw new Error(errorMsg);
}
}

View File

@@ -32,6 +32,7 @@ export const SWR_KEYS = {
`/api/llm/persona/${personaId}/providers`,
adminLlmProviders: "/api/admin/llm/provider",
llmProvidersWithImageGen: "/api/admin/llm/provider?include_image_gen=true",
customProviderNames: "/api/admin/llm/custom-provider-names",
wellKnownLlmProviders: "/api/admin/llm/built-in/options",
wellKnownLlmProvider: (providerEndpoint: string) =>
`/api/admin/llm/built-in/options/${providerEndpoint}`,

View File

@@ -129,8 +129,9 @@ const InputComboBox = ({
leftSearchIcon = false,
rightSection,
separatorLabel = "Other options",
showAddPrefix = false,
createPrefix,
showOtherOptions = false,
dropdownMaxHeight,
...rest
}: WithoutStyles<InputComboBoxProps>) => {
const inputRef = useRef<HTMLInputElement>(null);
@@ -446,7 +447,8 @@ const InputComboBox = ({
inputValue={inputValue}
allowCreate={!strict}
showCreateOption={showCreateOption}
showAddPrefix={showAddPrefix}
createPrefix={createPrefix}
dropdownMaxHeight={dropdownMaxHeight}
/>
</>

View File

@@ -27,8 +27,10 @@ interface ComboBoxDropdownProps {
allowCreate: boolean;
/** Whether to show create option (pre-computed by parent) */
showCreateOption: boolean;
/** Show "Add" prefix in create option */
showAddPrefix: boolean;
/** Prefix shown before the typed value in the create option (e.g., "Use", "Add") */
createPrefix?: string;
/** Max height of the dropdown in CSS units. Defaults to "15rem". */
dropdownMaxHeight?: string;
}
/**
@@ -60,7 +62,8 @@ export const ComboBoxDropdown = forwardRef<
inputValue,
allowCreate,
showCreateOption,
showAddPrefix,
createPrefix,
dropdownMaxHeight,
},
ref
) => {
@@ -104,12 +107,14 @@ export const ComboBoxDropdown = forwardRef<
role="listbox"
aria-label={placeholder}
className={cn(
"z-[10000] bg-background-neutral-00 border border-border-02 rounded-12 shadow-02 max-h-60 overflow-y-auto overflow-x-hidden p-1 pointer-events-auto touch-auto"
"z-[10000] bg-background-neutral-00 border border-border-02 rounded-12 shadow-02 overflow-y-auto overflow-x-hidden p-1 pointer-events-auto touch-auto",
!dropdownMaxHeight && "max-h-60"
)}
style={{
...floatingStyles,
// Ensure the dropdown can scroll independently
overscrollBehavior: "contain",
...(dropdownMaxHeight ? { maxHeight: dropdownMaxHeight } : {}),
}}
onWheel={(e) => {
// Prevent event from bubbling to prevent any parent scroll blocking
@@ -135,7 +140,7 @@ export const ComboBoxDropdown = forwardRef<
inputValue={inputValue}
allowCreate={allowCreate}
showCreateOption={showCreateOption}
showAddPrefix={showAddPrefix}
createPrefix={createPrefix}
/>
</div>,
document.body

View File

@@ -24,8 +24,8 @@ interface OptionsListProps {
allowCreate: boolean;
/** Whether to show create option (pre-computed by parent) */
showCreateOption: boolean;
/** Show "Add" prefix in create option */
showAddPrefix: boolean;
/** Prefix shown before the typed value in the create option (e.g., "Use", "Add") */
createPrefix?: string;
}
/**
@@ -47,7 +47,7 @@ export const OptionsList: React.FC<OptionsListProps> = ({
inputValue,
allowCreate,
showCreateOption,
showAddPrefix,
createPrefix,
}) => {
// Index offset for other options when create option is shown
const indexOffset = showCreateOption ? 1 : 0;
@@ -73,7 +73,7 @@ export const OptionsList: React.FC<OptionsListProps> = ({
data-index={0}
role="option"
aria-selected={false}
aria-label={`${showAddPrefix ? "Add" : "Create"} "${inputValue}"`}
aria-label={`${createPrefix ?? "Create"} "${inputValue}"`}
onClick={(e) => {
e.stopPropagation();
onSelect({ value: inputValue, label: inputValue });
@@ -88,18 +88,18 @@ export const OptionsList: React.FC<OptionsListProps> = ({
"flex items-center justify-between rounded-08",
highlightedIndex === 0 && "bg-background-tint-02",
"hover:bg-background-tint-02",
showAddPrefix ? "px-1.5 py-1.5" : "px-3 py-2"
createPrefix ? "px-1.5 py-1.5" : "px-3 py-2"
)}
>
<span
className={cn(
"font-main-ui-action truncate min-w-0",
showAddPrefix ? "px-1" : ""
createPrefix ? "px-1" : ""
)}
>
{showAddPrefix ? (
{createPrefix ? (
<>
<span className="text-text-03">Add</span>
<span className="text-text-03">{createPrefix}</span>
<span className="text-text-04">{` ${inputValue}`}</span>
</>
) : (
@@ -109,7 +109,7 @@ export const OptionsList: React.FC<OptionsListProps> = ({
<SvgPlus
className={cn(
"w-4 h-4 flex-shrink-0",
showAddPrefix ? "text-text-04 mx-1" : "text-text-03 ml-2"
createPrefix ? "text-text-04 mx-1" : "text-text-03 ml-2"
)}
/>
</div>

View File

@@ -40,11 +40,13 @@ export interface InputComboBoxProps
rightSection?: React.ReactNode;
/** Label for the separator between matched and unmatched options */
separatorLabel?: string;
/** Show "Add" prefix in create option (e.g., "Add [value]") */
showAddPrefix?: boolean;
/** Prefix shown before the typed value in the create option (e.g., "Use", "Add"). When omitted, the raw value is shown without a prefix. */
createPrefix?: string;
/**
* When true, keep non-matching options visible under a separator while searching.
* Defaults to false so search results are strictly filtered.
*/
showOtherOptions?: boolean;
/** Max height of the dropdown in CSS units. Defaults to "15rem". */
dropdownMaxHeight?: string;
}

View File

@@ -1,7 +1,7 @@
"use client";
import React from "react";
import type { IconProps } from "@opal/types";
import type { IconProps, RichStr } from "@opal/types";
import Text from "@/refresh-components/texts/Text";
import { Button } from "@opal/components";
import Modal from "@/refresh-components/Modal";
@@ -9,8 +9,8 @@ import { useModalClose } from "../contexts/ModalContext";
export interface ConfirmationModalProps {
icon: React.FunctionComponent<IconProps>;
title: string;
description?: string;
title: string | RichStr;
description?: string | RichStr;
children?: React.ReactNode;
submit: React.ReactNode;

View File

@@ -4,11 +4,9 @@ import { useState, useEffect, useCallback, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmDescriptor, LlmManager } from "@/lib/hooks";
import { structureValue } from "@/lib/llmConfig/utils";
import {
getProviderIcon,
AGGREGATOR_PROVIDERS,
} from "@/app/admin/configuration/llm/utils";
import { LLMProviderDescriptor } from "@/interfaces/llm";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { AGGREGATOR_PROVIDERS } from "@/lib/llmConfig/svc";
import { Slider } from "@/components/ui/slider";
import { useUser } from "@/providers/UserProvider";
import Text from "@/refresh-components/texts/Text";
@@ -55,7 +53,7 @@ export function groupLlmOptions(
groups.set(groupKey, {
displayName,
options: [],
Icon: getProviderIcon(provider),
Icon: getModelIcon(provider),
});
}
@@ -193,7 +191,7 @@ export default function LLMPopover({
icon={
foldable
? SvgRefreshCw
: getProviderIcon(
: getModelIcon(
llmManager.currentLlm.provider,
llmManager.currentLlm.modelName
)

View File

@@ -3,7 +3,7 @@
import { useState, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmManager } from "@/lib/hooks";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { Button, SelectButton, OpenButton } from "@opal/components";
import { SvgPlusCircle, SvgX } from "@opal/icons";
import { LLMOption } from "@/refresh-components/popovers/interfaces";
@@ -152,7 +152,7 @@ export default function ModelSelector({
)}
<div className="flex items-center shrink-0">
{selectedModels.map((model, index) => {
const ProviderIcon = getProviderIcon(
const ProviderIcon = getModelIcon(
model.provider,
model.modelName
);

View File

@@ -18,7 +18,7 @@ import {
unsetDefaultImageGenerationConfig,
deleteImageGenerationConfig,
} from "@/refresh-pages/admin/ImageGenerationPage/svc";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import Message from "@/refresh-components/messages/Message";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
import InputSelect from "@/refresh-components/inputs/InputSelect";
@@ -264,7 +264,7 @@ export default function ImageGenerationContent() {
sizePreset="main-ui"
variant="section"
icon={() => (
<ProviderIcon
<ModelIcon
provider={provider.provider_name}
size={16}
/>
@@ -391,7 +391,7 @@ export default function ImageGenerationContent() {
key={p.image_provider_id}
value={p.image_provider_id}
icon={() => (
<ProviderIcon
<ModelIcon
provider={p.provider_name}
size={16}
/>

View File

@@ -3,7 +3,7 @@
import React, { useState, useMemo, useEffect } from "react";
import { Form, Formik, FormikProps } from "formik";
import ProviderModal from "@/components/modals/ProviderModal";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import ConnectionProviderIcon from "@/refresh-components/ConnectionProviderIcon";
import {
testImageGenerationApiKey,
@@ -246,7 +246,7 @@ export function ImageGenFormWrapper<T extends FormValues>({
const icon = () => (
<ConnectionProviderIcon
icon={<ProviderIcon provider={imageProvider.provider_name} size={24} />}
icon={<ModelIcon provider={imageProvider.provider_name} size={24} />}
/>
);

View File

@@ -8,8 +8,8 @@ import {
useWellKnownLLMProviders,
} from "@/hooks/useLLMProviders";
import { ThreeDotsLoader } from "@/components/Loading";
import { Content, Card } from "@opal/layouts";
import { Button, SelectCard } from "@opal/components";
import { Content, Card as CardLayout } from "@opal/layouts";
import { Button, SelectCard, Text, Card } from "@opal/components";
import { Hoverable } from "@opal/core";
import { SvgArrowExchange, SvgSettings, SvgTrash } from "@opal/icons";
import * as SettingsLayouts from "@/layouts/settings-layouts";
@@ -22,9 +22,7 @@ import {
} from "@/lib/llmConfig/providers";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { deleteLlmProvider, setDefaultLlmModel } from "@/lib/llmConfig/svc";
import Text from "@/refresh-components/texts/Text";
import { Horizontal as HorizontalInput } from "@/layouts/input-layouts";
import LegacyCard from "@/refresh-components/cards/Card";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import Message from "@/refresh-components/messages/Message";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
@@ -49,6 +47,7 @@ import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
import { Section } from "@/layouts/general-layouts";
import { markdown } from "@opal/utils";
const route = ADMIN_ROUTES.LLM_MODELS;
@@ -141,7 +140,7 @@ function ExistingProviderCard({
const handleDelete = async () => {
try {
await deleteLlmProvider(provider.id);
await deleteLlmProvider(provider.id, isLastProvider);
await refreshLlmProviderCaches(mutate);
deleteModal.toggle(false);
toast.success("Provider deleted successfully!");
@@ -156,24 +155,37 @@ function ExistingProviderCard({
{deleteModal.isOpen && (
<ConfirmationModalLayout
icon={SvgTrash}
title={`Delete ${provider.name}`}
title={markdown(`Delete *${provider.name}*`)}
onClose={() => deleteModal.toggle(false)}
submit={
<Button variant="danger" onClick={handleDelete}>
<Button
variant="danger"
onClick={handleDelete}
disabled={isDefault && !isLastProvider}
>
Delete
</Button>
}
>
<Section alignItems="start" gap={0.5}>
<Text text03>
All LLM models from provider <b>{provider.name}</b> will be
removed and unavailable for future chats. Chat history will be
preserved.
</Text>
{isLastProvider && (
<Text text03>
Connect another provider to continue using chats.
{isDefault && !isLastProvider ? (
<Text font="main-ui-body" color="text-03">
Cannot delete the default provider. Select another provider as
the default prior to deleting this one.
</Text>
) : (
<>
<Text font="main-ui-body" color="text-03">
{markdown(
`All LLM models from provider **${provider.name}** will be removed and unavailable for future chats. Chat history will be preserved.`
)}
</Text>
{isLastProvider && (
<Text font="main-ui-body" color="text-03">
Connect another provider to continue using chats.
</Text>
)}
</>
)}
</Section>
</ConfirmationModalLayout>
@@ -189,7 +201,7 @@ function ExistingProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<Card.Header
<CardLayout.Header
icon={getProviderIcon(provider.provider)}
title={provider.name}
description={getProviderDisplayName(provider.provider)}
@@ -259,7 +271,7 @@ function NewProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<Card.Header
<CardLayout.Header
icon={getProviderIcon(provider.name)}
title={getProviderProductName(provider.name)}
description={getProviderDisplayName(provider.name)}
@@ -303,7 +315,7 @@ function NewCustomProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<Card.Header
<CardLayout.Header
icon={getProviderIcon("custom")}
title={getProviderProductName("custom")}
description={getProviderDisplayName("custom")}
@@ -392,7 +404,7 @@ export default function LLMProviderConfigurationPage() {
<SettingsLayouts.Body>
{hasProviders ? (
<LegacyCard>
<Card border="solid" rounding="lg">
<HorizontalInput
title="Default Model"
description="This model will be used by Onyx by default in your chats."
@@ -423,7 +435,7 @@ export default function LLMProviderConfigurationPage() {
</InputSelect.Content>
</InputSelect>
</HorizontalInput>
</LegacyCard>
</Card>
) : (
<Message
info

View File

@@ -27,7 +27,7 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchBedrockModels } from "@/app/admin/configuration/llm/utils";
import { fetchBedrockModels } from "@/lib/llmConfig/svc";
import { Card } from "@opal/components";
import { Section } from "@/layouts/general-layouts";
import { SvgAlertCircle } from "@opal/icons";

View File

@@ -9,7 +9,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchBifrostModels } from "@/app/admin/configuration/llm/utils";
import { fetchBifrostModels } from "@/lib/llmConfig/svc";
import {
useInitialValues,
buildValidationSchema,

View File

@@ -6,18 +6,32 @@
*/
import { render, screen, setupUser, waitFor } from "@tests/setup/test-utils";
import { PointerEventsCheckLevel } from "@testing-library/user-event";
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import { toast } from "@/hooks/useToast";
import { SWR_KEYS } from "@/lib/swr-keys";
// Mock SWR's mutate function and useSWR
const mockMutate = jest.fn();
const MOCK_CUSTOM_PROVIDER_OPTIONS = [
{ value: "anthropic", label: "Anthropic" },
{ value: "cloudflare", label: "Cloudflare" },
{ value: "openai", label: "OpenAI" },
];
jest.mock("swr", () => {
const actual = jest.requireActual("swr");
return {
...actual,
useSWRConfig: () => ({ mutate: mockMutate }),
__esModule: true,
default: () => ({ data: undefined, error: undefined, isLoading: false }),
default: (key: string | null) => ({
data:
key === SWR_KEYS.customProviderNames
? MOCK_CUSTOM_PROVIDER_OPTIONS
: undefined,
error: undefined,
isLoading: false,
}),
};
});
@@ -70,12 +84,15 @@ describe("Custom LLM Provider Configuration Workflow", () => {
}
) {
const nameInput = screen.getByPlaceholderText("Display Name");
const providerInput = screen.getByPlaceholderText(
"Provider Name as shown on LiteLLM"
);
await user.type(nameInput, options.name);
await user.type(providerInput, options.provider);
// Select provider from the combo box dropdown
const providerInput = screen.getByPlaceholderText("Select a provider");
await user.click(providerInput);
const providerOption = await screen.findByRole("option", {
name: new RegExp(options.provider, "i"),
});
await user.click(providerOption);
// Fill in model name (first model row)
const modelNameInput = screen.getByPlaceholderText("Model name");
@@ -83,7 +100,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
}
test("creates a new custom LLM provider successfully", async () => {
const user = setupUser();
const user = setupUser({
pointerEventsCheck: PointerEventsCheckLevel.Never,
});
// Mock POST /api/admin/llm/test
fetchSpy.mockResolvedValueOnce({
@@ -159,7 +178,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
});
test("shows error when test configuration fails", async () => {
const user = setupUser();
const user = setupUser({
pointerEventsCheck: PointerEventsCheckLevel.Never,
});
// Mock POST /api/admin/llm/test (failure)
fetchSpy.mockResolvedValueOnce({
@@ -204,7 +225,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
});
test("updates an existing LLM provider", async () => {
const user = setupUser();
const user = setupUser({
pointerEventsCheck: PointerEventsCheckLevel.Never,
});
const existingProvider = {
id: 1,
@@ -285,7 +308,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
});
test("preserves additional models when updating a provider", async () => {
const user = setupUser();
const user = setupUser({
pointerEventsCheck: PointerEventsCheckLevel.Never,
});
const existingProvider = {
id: 7,
@@ -382,7 +407,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
});
test("sets provider as default when shouldMarkAsDefault is true", async () => {
const user = setupUser();
const user = setupUser({
pointerEventsCheck: PointerEventsCheckLevel.Never,
});
// Mock POST /api/admin/llm/test
fetchSpy.mockResolvedValueOnce({
@@ -436,7 +463,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
});
test("shows error when provider creation fails", async () => {
const user = setupUser();
const user = setupUser({
pointerEventsCheck: PointerEventsCheckLevel.Never,
});
// Mock POST /api/admin/llm/test
fetchSpy.mockResolvedValueOnce({
@@ -472,7 +501,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
});
test("adds custom configuration key-value pairs", async () => {
const user = setupUser();
const user = setupUser({
pointerEventsCheck: PointerEventsCheckLevel.Never,
});
// Mock POST /api/admin/llm/test
fetchSpy.mockResolvedValueOnce({
@@ -492,10 +523,13 @@ describe("Custom LLM Provider Configuration Workflow", () => {
const nameInput = screen.getByPlaceholderText("Display Name");
await user.type(nameInput, "Cloudflare Provider");
const providerInput = screen.getByPlaceholderText(
"Provider Name as shown on LiteLLM"
);
await user.type(providerInput, "cloudflare");
// Select provider from the combo box dropdown
const providerInput = screen.getByPlaceholderText("Select a provider");
await user.click(providerInput);
const providerOption = await screen.findByRole("option", {
name: /cloudflare/i,
});
await user.click(providerOption);
// Click "Add Line" button for custom config (aria-label from KeyValueInput)
const addLineButton = screen.getByRole("button", {

View File

@@ -1,5 +1,6 @@
"use client";
import { useEffect, useMemo, useRef, useState } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import {
@@ -18,16 +19,19 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { useCustomProviderNames } from "@/hooks/useLLMProviders";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import KeyValueInput, {
KeyValue,
} from "@/refresh-components/inputs/InputKeyValue";
import InputComboBox from "@/refresh-components/inputs/InputComboBox";
import InputTypeIn from "@/refresh-components/inputs/InputTypeIn";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import Text from "@/refresh-components/texts/Text";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import { Button, Card, EmptyMessageCard } from "@opal/components";
import { SvgMinusCircle, SvgPlusCircle } from "@opal/icons";
import { SvgMinusCircle, SvgPlusCircle, SvgRefreshCw } from "@opal/icons";
import { markdown } from "@opal/utils";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
@@ -107,6 +111,95 @@ function ModelConfigurationItem({
);
}
interface FetchedModel {
name: string;
display_name: string;
max_input_tokens: number | null;
supports_image_input: boolean;
}
function FetchModelsButton({ provider }: { provider: string }) {
const abortRef = useRef<AbortController | null>(null);
const [isFetching, setIsFetching] = useState(false);
const formikProps = useFormikContext<{
api_base?: string;
api_key?: string;
api_version?: string;
model_configurations: CustomModelConfiguration[];
}>();
useEffect(() => {
return () => abortRef.current?.abort();
}, []);
async function handleFetch() {
abortRef.current?.abort();
const controller = new AbortController();
abortRef.current = controller;
setIsFetching(true);
try {
const response = await fetch("/api/admin/llm/custom/available-models", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
provider,
api_base: formikProps.values.api_base || undefined,
api_key: formikProps.values.api_key || undefined,
api_version: formikProps.values.api_version || undefined,
}),
signal: controller.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorMessage;
} catch {
// ignore JSON parsing errors
}
throw new Error(errorMessage);
}
const fetched: FetchedModel[] = await response.json();
const existing = formikProps.values.model_configurations;
const existingNames = new Set(existing.map((m) => m.name));
const newModels: CustomModelConfiguration[] = fetched
.filter((m) => !existingNames.has(m.name))
.map((m) => ({
name: m.name,
display_name: m.display_name !== m.name ? m.display_name : "",
max_input_tokens: m.max_input_tokens,
supports_image_input: m.supports_image_input,
}));
// Replace empty placeholder rows, then merge
const nonEmpty = existing.filter((m) => m.name.trim() !== "");
formikProps.setFieldValue("model_configurations", [
...nonEmpty,
...newModels,
]);
toast.success(`Fetched ${fetched.length} models`);
} catch (err) {
if (err instanceof DOMException && err.name === "AbortError") return;
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
} finally {
if (!controller.signal.aborted) {
setIsFetching(false);
}
}
}
return (
<Button
prominence="tertiary"
icon={isFetching ? SimpleLoader : SvgRefreshCw}
onClick={handleFetch}
disabled={isFetching || !provider}
type="button"
/>
);
}
function ModelConfigurationList() {
const formikProps = useFormikContext<{
model_configurations: CustomModelConfiguration[];
@@ -189,6 +282,53 @@ function CustomConfigKeyValue() {
);
}
// ─── Provider Name Select ─────────────────────────────────────────────────────
function ProviderNameSelect({ disabled }: { disabled?: boolean }) {
const { customProviderNames } = useCustomProviderNames();
const { values, setFieldValue } = useFormikContext<{ provider: string }>();
const options = useMemo(
() =>
(customProviderNames ?? []).map((opt) => ({
value: opt.value,
label: opt.value,
description: opt.label,
})),
[customProviderNames]
);
return (
<InputComboBox
value={values.provider}
onValueChange={(value) => setFieldValue("provider", value)}
options={options}
placeholder="Select a provider"
disabled={disabled}
createPrefix="Use"
dropdownMaxHeight="60vh"
/>
);
}
function ModelsHeader() {
const { values } = useFormikContext<{ provider: string }>();
return (
<InputLayouts.Horizontal
title="Models"
description="List LLM models you wish to use and their configurations for this provider. See full list of models at LiteLLM."
nonInteractive
center
>
{values.provider ? (
<FetchModelsButton provider={values.provider} />
) : (
<div />
)}
</InputLayouts.Horizontal>
);
}
// ─── Custom Config Processing ─────────────────────────────────────────────────
function keyValueListToDict(items: KeyValue[]): Record<string, string> {
@@ -335,23 +475,17 @@ export default function CustomModal({
});
}}
>
{!isOnboarding && (
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="provider"
title="Provider Name"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
)}
>
<InputTypeInField
name="provider"
placeholder="Provider Name as shown on LiteLLM"
variant={existingLlmProvider ? "disabled" : undefined}
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
)}
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="provider"
title="Provider Name"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
)}
>
<ProviderNameSelect disabled={!!existingLlmProvider} />
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<APIBaseField optional />
@@ -396,13 +530,7 @@ export default function CustomModal({
<InputLayouts.FieldSeparator />
<Section gap={0.5}>
<InputLayouts.FieldPadder>
<Content
title="Models"
description="List LLM models you wish to use and their configurations for this provider. See full list of models at LiteLLM."
variant="section"
sizePreset="main-content"
widthVariant="full"
/>
<ModelsHeader />
</InputLayouts.FieldPadder>
<Card padding="sm">

View File

@@ -23,7 +23,7 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchModels } from "@/app/admin/configuration/llm/utils";
import { fetchModels } from "@/lib/llmConfig/svc";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";

View File

@@ -8,7 +8,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchLiteLLMProxyModels } from "@/app/admin/configuration/llm/utils";
import { fetchLiteLLMProxyModels } from "@/lib/llmConfig/svc";
import {
useInitialValues,
buildValidationSchema,

View File

@@ -24,7 +24,7 @@ import {
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchOllamaModels } from "@/app/admin/configuration/llm/utils";
import { fetchOllamaModels } from "@/lib/llmConfig/svc";
import Tabs from "@/refresh-components/Tabs";
import { Card } from "@opal/components";
import { toast } from "@/hooks/useToast";

View File

@@ -9,7 +9,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchOpenAICompatibleModels } from "@/app/admin/configuration/llm/utils";
import { fetchOpenAICompatibleModels } from "@/lib/llmConfig/svc";
import {
useInitialValues,
buildValidationSchema,

View File

@@ -8,7 +8,7 @@ import {
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import { fetchOpenRouterModels } from "@/app/admin/configuration/llm/utils";
import { fetchOpenRouterModels } from "@/lib/llmConfig/svc";
import {
useInitialValues,
buildValidationSchema,

View File

@@ -426,7 +426,10 @@ export function ModelSelectionField({
const formikProps = useFormikContext<BaseLLMFormValues>();
const [newModelName, setNewModelName] = useState("");
const [isExpanded, setIsExpanded] = useState(false);
const isAutoMode = formikProps.values.is_auto_mode;
// When the auto-update toggle is hidden, auto mode should have no effect —
// otherwise models can't be deselected and "Select All" stays disabled.
const isAutoMode =
shouldShowAutoUpdateToggle && formikProps.values.is_auto_mode;
const models = formikProps.values.model_configurations;
// Snapshot the original model visibility so we can restore it when
@@ -700,6 +703,15 @@ function ModalWrapperInner({
const isTesting = status?.isTesting === true;
const busy = isTesting || isSubmitting;
const disabledTooltip = busy
? undefined
: !isValid
? "Please fill in all required fields."
: !dirty
? "No changes to save."
: undefined;
const providerIcon = getProviderIcon(providerName);
const providerDisplayName = getProviderDisplayName(providerName);
const providerProductName = getProviderProductName(providerName);
@@ -732,6 +744,7 @@ function ModalWrapperInner({
disabled={!isValid || !dirty || busy}
type="submit"
icon={busy ? SimpleLoader : undefined}
tooltip={disabledTooltip}
>
{llmProvider?.name
? busy

View File

@@ -12,7 +12,7 @@ import {
SvgServer,
SvgSettings,
} from "@opal/icons";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
export interface LLMProviderCardProps {
title: string;
@@ -75,7 +75,7 @@ function LLMProviderCardInner({
<div className="flex gap-1 p-1 flex-1 min-w-0">
<div className="flex items-start h-full pt-0.5">
{providerName ? (
<ProviderIcon provider={providerName} size={16} className="" />
<ModelIcon provider={providerName} size={16} className="" />
) : (
<SvgServer className="w-4 h-4 stroke-text-04" />
)}

View File

@@ -16,7 +16,7 @@ import {
getProviderDisplayInfo,
} from "../forms/getOnboardingForm";
import { Disabled } from "@opal/core";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { SvgCheckCircle, SvgCpu, SvgExternalLink } from "@opal/icons";
import { ContentAction } from "@opal/layouts";
import { useLLMProviderOptions } from "@/lib/hooks/useLLMProviderOptions";
@@ -69,7 +69,7 @@ const StackedProviderIcons = ({ providers }: StackedProviderIconsProps) => {
zIndex: providers.length - index,
}}
>
<ProviderIcon provider={provider} size={16} />
<ModelIcon provider={provider} size={16} />
</div>
))}
{providers.length > 3 && (