Compare commits

..

1 Commits

Author SHA1 Message Date
Jamison Lahman
0a64c7c8ff fix(fe): editing an LLM provider uses the global default model 2026-03-19 14:30:20 -07:00
21 changed files with 118 additions and 1355 deletions

View File

@@ -157,7 +157,9 @@ def _execute_single_retrieval(
logger.error(f"Error executing request: {e}")
raise e
elif _is_rate_limit_error(e):
results = _execute_with_retry(retrieval_function(**request_kwargs))
results = _execute_with_retry(
lambda: retrieval_function(**request_kwargs).execute()
)
elif e.resp.status == 404 or e.resp.status == 403:
if continue_on_404_or_403:
logger.debug(f"Error executing request: {e}")

View File

@@ -530,11 +530,6 @@ class LitellmLLM(LLM):
):
messages = _strip_tool_content_from_messages(messages)
# Only pass tool_choice when tools are present — some providers (e.g. Fireworks)
# reject requests where tool_choice is explicitly null.
if tools and tool_choice is not None:
optional_kwargs["tool_choice"] = tool_choice
response = litellm.completion(
mock_response=get_llm_mock_response() or MOCK_LLM_RESPONSE,
model=model,
@@ -543,6 +538,7 @@ class LitellmLLM(LLM):
custom_llm_provider=self._custom_llm_provider or None,
messages=messages,
tools=tools,
tool_choice=tool_choice,
stream=stream,
temperature=temperature,
timeout=timeout_override or self._timeout,

View File

@@ -256,6 +256,7 @@ def test_multiple_tool_calls(default_multi_llm: LitellmLLM) -> None:
{"role": "user", "content": "What's the weather and time in New York?"}
],
tools=tools,
tool_choice=None,
stream=True,
temperature=0.0, # Default value from GEN_AI_TEMPERATURE
timeout=30,
@@ -411,6 +412,7 @@ def test_multiple_tool_calls_streaming(default_multi_llm: LitellmLLM) -> None:
{"role": "user", "content": "What's the weather and time in New York?"}
],
tools=tools,
tool_choice=None,
stream=True,
temperature=0.0, # Default value from GEN_AI_TEMPERATURE
timeout=30,
@@ -1429,36 +1431,3 @@ def test_strip_tool_content_merges_consecutive_tool_results() -> None:
assert "sunny 72F" in merged
assert "tc_2" in merged
assert "headline news" in merged
def test_no_tool_choice_sent_when_no_tools(default_multi_llm: LitellmLLM) -> None:
"""Regression test for providers (e.g. Fireworks) that reject tool_choice=null.
When no tools are provided, tool_choice must not be forwarded to
litellm.completion() at all — not even as None.
"""
messages: LanguageModelInput = [UserMessage(content="Hello!")]
mock_stream_chunks = [
litellm.ModelResponse(
id="chatcmpl-123",
choices=[
litellm.Choices(
delta=_create_delta(role="assistant", content="Hello!"),
finish_reason="stop",
index=0,
)
],
model="gpt-3.5-turbo",
),
]
with patch("litellm.completion") as mock_completion:
mock_completion.return_value = mock_stream_chunks
default_multi_llm.invoke(messages, tools=None)
_, kwargs = mock_completion.call_args
assert (
"tool_choice" not in kwargs
), "tool_choice must not be sent to providers when no tools are provided"

File diff suppressed because it is too large Load Diff

View File

@@ -207,16 +207,6 @@ prompt_yn_or_default() {
fi
}
confirm_action() {
local description="$1"
prompt_yn_or_default "Install ${description}? (Y/n) [default: Y] " "Y"
if [[ "$REPLY" =~ ^[Nn] ]]; then
print_warning "Skipping: ${description}"
return 1
fi
return 0
}
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
@@ -405,11 +395,6 @@ fi
if ! command -v docker &> /dev/null; then
if [[ "$OSTYPE" == "linux-gnu"* ]] || [[ -n "${WSL_DISTRO_NAME:-}" ]]; then
print_info "Docker is required but not installed."
if ! confirm_action "Docker Engine"; then
print_error "Docker is required to run Onyx."
exit 1
fi
install_docker_linux
if ! command -v docker &> /dev/null; then
print_error "Docker installation failed."
@@ -426,11 +411,7 @@ if command -v docker &> /dev/null \
&& ! command -v docker-compose &> /dev/null \
&& { [[ "$OSTYPE" == "linux-gnu"* ]] || [[ -n "${WSL_DISTRO_NAME:-}" ]]; }; then
print_info "Docker Compose is required but not installed."
if ! confirm_action "Docker Compose plugin"; then
print_error "Docker Compose is required to run Onyx."
exit 1
fi
print_info "Docker Compose not found — installing plugin..."
COMPOSE_ARCH="$(uname -m)"
COMPOSE_URL="https://github.com/docker/compose/releases/latest/download/docker-compose-linux-${COMPOSE_ARCH}"
COMPOSE_DIR="/usr/local/lib/docker/cli-plugins"
@@ -581,31 +562,10 @@ version_compare() {
# Check Docker daemon
if ! docker info &> /dev/null; then
if [[ "$OSTYPE" == "darwin"* ]]; then
print_info "Docker daemon is not running. Starting Docker Desktop..."
open -a Docker
# Wait up to 120 seconds for Docker to be ready
DOCKER_WAIT=0
DOCKER_MAX_WAIT=120
while ! docker info &> /dev/null; do
if [ $DOCKER_WAIT -ge $DOCKER_MAX_WAIT ]; then
print_error "Docker Desktop did not start within ${DOCKER_MAX_WAIT} seconds."
print_info "Please start Docker Desktop manually and re-run this script."
exit 1
fi
printf "\r\033[KWaiting for Docker Desktop to start... (%ds)" "$DOCKER_WAIT"
sleep 2
DOCKER_WAIT=$((DOCKER_WAIT + 2))
done
echo ""
print_success "Docker Desktop is now running"
else
print_error "Docker daemon is not running. Please start Docker."
exit 1
fi
else
print_success "Docker daemon is running"
print_error "Docker daemon is not running. Please start Docker."
exit 1
fi
print_success "Docker daemon is running"
# Check Docker resources
print_step "Verifying Docker resources"
@@ -785,7 +745,6 @@ print_success "All configuration files ready"
# Set up deployment configuration
print_step "Setting up deployment configs"
ENV_FILE="${INSTALL_ROOT}/deployment/.env"
ENV_TEMPLATE="${INSTALL_ROOT}/deployment/env.template"
# Check if services are already running
if [ -d "${INSTALL_ROOT}/deployment" ] && [ -f "${INSTALL_ROOT}/deployment/docker-compose.yml" ]; then
# Determine compose command
@@ -1125,25 +1084,6 @@ else
USE_LATEST=false
fi
# For pinned version tags, re-download config files from that tag so the
# compose file matches the images being pulled (the initial download used main).
if [[ "$USE_LATEST" = false ]] && [[ "$USE_LOCAL_FILES" = false ]]; then
PINNED_BASE="https://raw.githubusercontent.com/onyx-dot-app/onyx/${CURRENT_IMAGE_TAG}/deployment"
print_info "Fetching config files matching tag ${CURRENT_IMAGE_TAG}..."
if download_file "${PINNED_BASE}/docker_compose/docker-compose.yml" "${INSTALL_ROOT}/deployment/docker-compose.yml" 2>/dev/null; then
download_file "${PINNED_BASE}/data/nginx/app.conf.template" "${INSTALL_ROOT}/data/nginx/app.conf.template" 2>/dev/null || true
download_file "${PINNED_BASE}/data/nginx/run-nginx.sh" "${INSTALL_ROOT}/data/nginx/run-nginx.sh" 2>/dev/null || true
chmod +x "${INSTALL_ROOT}/data/nginx/run-nginx.sh"
if [[ "$LITE_MODE" = true ]]; then
download_file "${PINNED_BASE}/docker_compose/${LITE_COMPOSE_FILE}" \
"${INSTALL_ROOT}/deployment/${LITE_COMPOSE_FILE}" 2>/dev/null || true
fi
print_success "Config files updated to match ${CURRENT_IMAGE_TAG}"
else
print_warning "Tag ${CURRENT_IMAGE_TAG} not found on GitHub — using main branch configs"
fi
fi
# Pull Docker images with reduced output
print_step "Pulling Docker images"
print_info "This may take several minutes depending on your internet connection..."

View File

@@ -17,7 +17,6 @@ import (
type RunCIOptions struct {
DryRun bool
Yes bool
Rerun bool
}
// NewRunCICommand creates a new run-ci command
@@ -50,7 +49,6 @@ Example usage:
cmd.Flags().BoolVar(&opts.DryRun, "dry-run", false, "Perform all local operations but skip pushing to remote and creating PRs")
cmd.Flags().BoolVar(&opts.Yes, "yes", false, "Skip confirmation prompts and automatically proceed")
cmd.Flags().BoolVar(&opts.Rerun, "rerun", false, "Update an existing CI PR with the latest fork changes to re-trigger CI")
return cmd
}
@@ -109,44 +107,19 @@ func runCI(cmd *cobra.Command, args []string, opts *RunCIOptions) {
log.Fatalf("PR #%s is not from a fork - CI should already run automatically", prNumber)
}
// Create the CI branch
ciBranch := fmt.Sprintf("run-ci/%s", prNumber)
prTitle := fmt.Sprintf("chore: [Running GitHub actions for #%s]", prNumber)
prBody := fmt.Sprintf("This PR runs GitHub Actions CI for #%s.\n\n- [x] Override Linear Check\n\n**This PR should be closed (not merged) after CI completes.**", prNumber)
// Check if a CI PR already exists for this branch
existingPRURL, err := findExistingCIPR(ciBranch)
if err != nil {
log.Fatalf("Failed to check for existing CI PR: %v", err)
}
if existingPRURL != "" && !opts.Rerun {
log.Infof("A CI PR already exists for #%s: %s", prNumber, existingPRURL)
log.Info("Run with --rerun to update it with the latest fork changes and re-trigger CI.")
return
}
if opts.Rerun && existingPRURL == "" {
log.Warn("--rerun was specified but no existing open CI PR was found. A new PR will be created.")
}
if existingPRURL != "" && opts.Rerun {
log.Infof("Existing CI PR found: %s", existingPRURL)
log.Info("Will update the CI branch with the latest fork changes to re-trigger CI.")
}
// Confirm before proceeding
if !opts.Yes {
action := "Create CI branch"
if existingPRURL != "" {
action = "Update existing CI branch"
}
if !prompt.Confirm(fmt.Sprintf("%s for PR #%s? (yes/no): ", action, prNumber)) {
if !prompt.Confirm(fmt.Sprintf("Create CI branch for PR #%s? (yes/no): ", prNumber)) {
log.Info("Exiting...")
return
}
}
// Create the CI branch
ciBranch := fmt.Sprintf("run-ci/%s", prNumber)
prTitle := fmt.Sprintf("chore: [Running GitHub actions for #%s]", prNumber)
prBody := fmt.Sprintf("This PR runs GitHub Actions CI for #%s.\n\n- [x] Override Linear Check\n\n**This PR should be closed (not merged) after CI completes.**", prNumber)
// Fetch the fork's branch
if forkRepo == "" {
log.Fatalf("Could not determine fork repository - headRepositoryOwner or headRepository.name is empty")
@@ -185,11 +158,7 @@ func runCI(cmd *cobra.Command, args []string, opts *RunCIOptions) {
if opts.DryRun {
log.Warnf("[DRY RUN] Would push CI branch: %s", ciBranch)
if existingPRURL == "" {
log.Warnf("[DRY RUN] Would create PR: %s", prTitle)
} else {
log.Warnf("[DRY RUN] Would update existing PR: %s", existingPRURL)
}
log.Warnf("[DRY RUN] Would create PR: %s", prTitle)
// Switch back to original branch
if err := git.RunCommand("switch", "--quiet", originalBranch); err != nil {
log.Warnf("Failed to switch back to original branch: %v", err)
@@ -207,17 +176,6 @@ func runCI(cmd *cobra.Command, args []string, opts *RunCIOptions) {
log.Fatalf("Failed to push CI branch: %v", err)
}
if existingPRURL != "" {
// PR already exists - force push is enough to re-trigger CI
log.Infof("Switching back to original branch: %s", originalBranch)
if err := git.RunCommand("switch", "--quiet", originalBranch); err != nil {
log.Warnf("Failed to switch back to original branch: %v", err)
}
log.Infof("CI PR updated successfully: %s", existingPRURL)
log.Info("The force push will re-trigger CI. Remember to close (not merge) this PR after CI completes!")
return
}
// Create PR using GitHub CLI
log.Info("Creating PR...")
prURL, err := createCIPR(ciBranch, prInfo.BaseRefName, prTitle, prBody)
@@ -259,39 +217,6 @@ func getPRInfo(prNumber string) (*PRInfo, error) {
return &prInfo, nil
}
// findExistingCIPR checks if an open PR already exists for the given CI branch.
// Returns the PR URL if found, or empty string if not.
func findExistingCIPR(headBranch string) (string, error) {
cmd := exec.Command("gh", "pr", "list",
"--head", headBranch,
"--state", "open",
"--json", "url",
)
output, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return "", fmt.Errorf("%w: %s", err, string(exitErr.Stderr))
}
return "", err
}
var prs []struct {
URL string `json:"url"`
}
if err := json.Unmarshal(output, &prs); err != nil {
log.Debugf("Failed to parse PR list JSON: %v (raw: %s)", err, string(output))
return "", fmt.Errorf("failed to parse PR list: %w", err)
}
if len(prs) == 0 {
log.Debugf("No existing open PRs found for branch %s", headBranch)
return "", nil
}
log.Debugf("Found existing PR for branch %s: %s", headBranch, prs[0].URL)
return prs[0].URL, nil
}
// createCIPR creates a pull request for CI using the GitHub CLI
func createCIPR(headBranch, baseBranch, title, body string) (string, error) {
cmd := exec.Command("gh", "pr", "create",

View File

@@ -7,10 +7,8 @@ import { processRawChatHistory } from "@/app/app/services/lib";
import { getLatestMessageChain } from "@/app/app/services/messageTree";
import HumanMessage from "@/app/app/message/HumanMessage";
import AgentMessage from "@/app/app/message/messageComponents/AgentMessage";
import { Callout } from "@/components/ui/callout";
import OnyxInitializingLoader from "@/components/OnyxInitializingLoader";
import { IllustrationContent } from "@opal/layouts";
import SvgNotFound from "@opal/illustrations/not-found";
import { Button } from "@opal/components";
import { Persona } from "@/app/admin/agents/interfaces";
import { MinimalOnyxDocument } from "@/lib/search/interfaces";
import PreviewModal from "@/sections/modals/PreviewModal";
@@ -35,16 +33,11 @@ export default function SharedChatDisplay({
if (!chatSession) {
return (
<div className="h-full w-full flex flex-col items-center justify-center">
<div className="flex flex-col items-center gap-4">
<IllustrationContent
illustration={SvgNotFound}
title="Shared chat not found"
description="Did not find a shared chat with the specified ID."
/>
<Button href="/app" prominence="secondary">
Start a new chat
</Button>
<div className="min-h-full w-full">
<div className="mx-auto w-fit pt-8">
<Callout type="danger" title="Shared Chat Not Found">
Did not find a shared chat with the specified ID.
</Callout>
</div>
</div>
);
@@ -58,16 +51,11 @@ export default function SharedChatDisplay({
if (firstMessage === undefined) {
return (
<div className="h-full w-full flex flex-col items-center justify-center">
<div className="flex flex-col items-center gap-4">
<IllustrationContent
illustration={SvgNotFound}
title="Shared chat not found"
description="No messages found in shared chat."
/>
<Button href="/app" prominence="secondary">
Start a new chat
</Button>
<div className="min-h-full w-full">
<div className="mx-auto w-fit pt-8">
<Callout type="danger" title="Shared Chat Not Found">
No messages found in shared chat.
</Callout>
</div>
</div>
);

View File

@@ -123,6 +123,9 @@ export interface LLMProviderFormProps {
open?: boolean;
onOpenChange?: (open: boolean) => void;
/** The current default model name for this provider (from the global default). */
defaultModelName?: string;
// Onboarding-specific (only when variant === "onboarding")
onboardingState?: OnboardingState;
onboardingActions?: OnboardingActions;

View File

@@ -148,12 +148,14 @@ interface ExistingProviderCardProps {
provider: LLMProviderView;
isDefault: boolean;
isLastProvider: boolean;
defaultModelName?: string;
}
function ExistingProviderCard({
provider,
isDefault,
isLastProvider,
defaultModelName,
}: ExistingProviderCardProps) {
const { mutate } = useSWRConfig();
const [isOpen, setIsOpen] = useState(false);
@@ -230,7 +232,12 @@ function ExistingProviderCard({
</Section>
}
/>
{getModalForExistingProvider(provider, isOpen, setIsOpen)}
{getModalForExistingProvider(
provider,
isOpen,
setIsOpen,
defaultModelName
)}
</Card>
</Hoverable.Root>
</>
@@ -446,6 +453,11 @@ export default function LLMConfigurationPage() {
provider={provider}
isDefault={defaultText?.provider_id === provider.id}
isLastProvider={sortedProviders.length === 1}
defaultModelName={
defaultText?.provider_id === provider.id
? defaultText.model_name
: undefined
}
/>
))}
</div>

View File

@@ -35,6 +35,7 @@ export default function AnthropicModal({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
@@ -64,10 +65,15 @@ export default function AnthropicModal({
default_model_name: DEFAULT_DEFAULT_MODEL_NAME,
}
: {
...buildDefaultInitialValues(existingLlmProvider, modelConfigurations),
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? undefined,
default_model_name:
defaultModelName ??
wellKnownLLMProvider?.recommended_default_model?.name ??
DEFAULT_DEFAULT_MODEL_NAME,
is_auto_mode: existingLlmProvider?.is_auto_mode ?? true,

View File

@@ -81,6 +81,7 @@ export default function AzureModal({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
@@ -109,7 +110,11 @@ export default function AzureModal({
default_model_name: "",
} as AzureModalValues)
: {
...buildDefaultInitialValues(existingLlmProvider, modelConfigurations),
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
target_uri: buildTargetUri(existingLlmProvider),
};

View File

@@ -315,6 +315,7 @@ export default function BedrockModal({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
@@ -351,7 +352,11 @@ export default function BedrockModal({
},
} as BedrockModalValues)
: {
...buildDefaultInitialValues(existingLlmProvider, modelConfigurations),
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
custom_config: {
AWS_REGION_NAME:
(existingLlmProvider?.custom_config?.AWS_REGION_NAME as string) ??

View File

@@ -197,6 +197,7 @@ export default function CustomModal({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
}: LLMProviderFormProps) {
@@ -209,7 +210,11 @@ export default function CustomModal({
const onClose = () => onOpenChange?.(false);
const initialValues = {
...buildDefaultInitialValues(existingLlmProvider),
...buildDefaultInitialValues(
existingLlmProvider,
undefined,
defaultModelName
),
...(isOnboarding ? buildOnboardingInitialValues() : {}),
provider: existingLlmProvider?.provider ?? "",
model_configurations: existingLlmProvider?.model_configurations.map(

View File

@@ -192,6 +192,7 @@ export default function LMStudioForm({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
@@ -225,7 +226,11 @@ export default function LMStudioForm({
},
} as LMStudioFormValues)
: {
...buildDefaultInitialValues(existingLlmProvider, modelConfigurations),
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
custom_config: {
LM_STUDIO_API_KEY:

View File

@@ -159,6 +159,7 @@ export default function LiteLLMProxyModal({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
@@ -190,7 +191,11 @@ export default function LiteLLMProxyModal({
default_model_name: "",
} as LiteLLMProxyModalValues)
: {
...buildDefaultInitialValues(existingLlmProvider, modelConfigurations),
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
};

View File

@@ -212,6 +212,7 @@ export default function OllamaModal({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
@@ -244,7 +245,11 @@ export default function OllamaModal({
},
} as OllamaModalValues)
: {
...buildDefaultInitialValues(existingLlmProvider, modelConfigurations),
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
custom_config: {
OLLAMA_API_KEY:

View File

@@ -35,6 +35,7 @@ export default function OpenAIModal({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
@@ -63,9 +64,14 @@ export default function OpenAIModal({
default_model_name: DEFAULT_DEFAULT_MODEL_NAME,
}
: {
...buildDefaultInitialValues(existingLlmProvider, modelConfigurations),
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
default_model_name:
defaultModelName ??
wellKnownLLMProvider?.recommended_default_model?.name ??
DEFAULT_DEFAULT_MODEL_NAME,
is_auto_mode: existingLlmProvider?.is_auto_mode ?? true,

View File

@@ -158,6 +158,7 @@ export default function OpenRouterModal({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
@@ -189,7 +190,11 @@ export default function OpenRouterModal({
default_model_name: "",
} as OpenRouterModalValues)
: {
...buildDefaultInitialValues(existingLlmProvider, modelConfigurations),
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
};

View File

@@ -48,6 +48,7 @@ export default function VertexAIModal({
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
@@ -80,8 +81,13 @@ export default function VertexAIModal({
},
} as VertexAIModalValues)
: {
...buildDefaultInitialValues(existingLlmProvider, modelConfigurations),
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
default_model_name:
defaultModelName ??
wellKnownLLMProvider?.recommended_default_model?.name ??
VERTEXAI_DEFAULT_MODEL,
is_auto_mode: existingLlmProvider?.is_auto_mode ?? true,

View File

@@ -22,9 +22,15 @@ function detectIfRealOpenAIProvider(provider: LLMProviderView) {
export function getModalForExistingProvider(
provider: LLMProviderView,
open?: boolean,
onOpenChange?: (open: boolean) => void
onOpenChange?: (open: boolean) => void,
defaultModelName?: string
) {
const props = { existingLlmProvider: provider, open, onOpenChange };
const props = {
existingLlmProvider: provider,
open,
onOpenChange,
defaultModelName,
};
switch (provider.provider) {
case LLMProviderName.OPENAI:

View File

@@ -12,9 +12,11 @@ export const LLM_FORM_CLASS_NAME = "flex flex-col gap-y-4 items-stretch mt-6";
export const buildDefaultInitialValues = (
existingLlmProvider?: LLMProviderView,
modelConfigurations?: ModelConfiguration[]
modelConfigurations?: ModelConfiguration[],
currentDefaultModelName?: string
) => {
const defaultModelName =
currentDefaultModelName ??
existingLlmProvider?.model_configurations?.[0]?.name ??
modelConfigurations?.[0]?.name ??
"";