Compare commits

..

2 Commits

Author SHA1 Message Date
Jessica Singh
8b9a6301e7 fix(fe): lift fallback icon to module-level constant 2026-03-19 15:05:54 -07:00
Jessica Singh
1a4fb6f1a1 refactor(fe): migrate voice admin page to opal components 2026-03-19 14:53:06 -07:00
8 changed files with 150 additions and 1655 deletions

View File

@@ -157,7 +157,9 @@ def _execute_single_retrieval(
logger.error(f"Error executing request: {e}")
raise e
elif _is_rate_limit_error(e):
results = _execute_with_retry(retrieval_function(**request_kwargs))
results = _execute_with_retry(
lambda: retrieval_function(**request_kwargs).execute()
)
elif e.resp.status == 404 or e.resp.status == 403:
if continue_on_404_or_403:
logger.debug(f"Error executing request: {e}")

View File

@@ -530,11 +530,6 @@ class LitellmLLM(LLM):
):
messages = _strip_tool_content_from_messages(messages)
# Only pass tool_choice when tools are present — some providers (e.g. Fireworks)
# reject requests where tool_choice is explicitly null.
if tools and tool_choice is not None:
optional_kwargs["tool_choice"] = tool_choice
response = litellm.completion(
mock_response=get_llm_mock_response() or MOCK_LLM_RESPONSE,
model=model,
@@ -543,6 +538,7 @@ class LitellmLLM(LLM):
custom_llm_provider=self._custom_llm_provider or None,
messages=messages,
tools=tools,
tool_choice=tool_choice,
stream=stream,
temperature=temperature,
timeout=timeout_override or self._timeout,

View File

@@ -256,6 +256,7 @@ def test_multiple_tool_calls(default_multi_llm: LitellmLLM) -> None:
{"role": "user", "content": "What's the weather and time in New York?"}
],
tools=tools,
tool_choice=None,
stream=True,
temperature=0.0, # Default value from GEN_AI_TEMPERATURE
timeout=30,
@@ -411,6 +412,7 @@ def test_multiple_tool_calls_streaming(default_multi_llm: LitellmLLM) -> None:
{"role": "user", "content": "What's the weather and time in New York?"}
],
tools=tools,
tool_choice=None,
stream=True,
temperature=0.0, # Default value from GEN_AI_TEMPERATURE
timeout=30,
@@ -1429,36 +1431,3 @@ def test_strip_tool_content_merges_consecutive_tool_results() -> None:
assert "sunny 72F" in merged
assert "tc_2" in merged
assert "headline news" in merged
def test_no_tool_choice_sent_when_no_tools(default_multi_llm: LitellmLLM) -> None:
"""Regression test for providers (e.g. Fireworks) that reject tool_choice=null.
When no tools are provided, tool_choice must not be forwarded to
litellm.completion() at all — not even as None.
"""
messages: LanguageModelInput = [UserMessage(content="Hello!")]
mock_stream_chunks = [
litellm.ModelResponse(
id="chatcmpl-123",
choices=[
litellm.Choices(
delta=_create_delta(role="assistant", content="Hello!"),
finish_reason="stop",
index=0,
)
],
model="gpt-3.5-turbo",
),
]
with patch("litellm.completion") as mock_completion:
mock_completion.return_value = mock_stream_chunks
default_multi_llm.invoke(messages, tools=None)
_, kwargs = mock_completion.call_args
assert (
"tool_choice" not in kwargs
), "tool_choice must not be sent to providers when no tools are provided"

File diff suppressed because it is too large Load Diff

View File

@@ -207,16 +207,6 @@ prompt_yn_or_default() {
fi
}
confirm_action() {
local description="$1"
prompt_yn_or_default "Install ${description}? (Y/n) [default: Y] " "Y"
if [[ "$REPLY" =~ ^[Nn] ]]; then
print_warning "Skipping: ${description}"
return 1
fi
return 0
}
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
@@ -405,11 +395,6 @@ fi
if ! command -v docker &> /dev/null; then
if [[ "$OSTYPE" == "linux-gnu"* ]] || [[ -n "${WSL_DISTRO_NAME:-}" ]]; then
print_info "Docker is required but not installed."
if ! confirm_action "Docker Engine"; then
print_error "Docker is required to run Onyx."
exit 1
fi
install_docker_linux
if ! command -v docker &> /dev/null; then
print_error "Docker installation failed."
@@ -426,11 +411,7 @@ if command -v docker &> /dev/null \
&& ! command -v docker-compose &> /dev/null \
&& { [[ "$OSTYPE" == "linux-gnu"* ]] || [[ -n "${WSL_DISTRO_NAME:-}" ]]; }; then
print_info "Docker Compose is required but not installed."
if ! confirm_action "Docker Compose plugin"; then
print_error "Docker Compose is required to run Onyx."
exit 1
fi
print_info "Docker Compose not found — installing plugin..."
COMPOSE_ARCH="$(uname -m)"
COMPOSE_URL="https://github.com/docker/compose/releases/latest/download/docker-compose-linux-${COMPOSE_ARCH}"
COMPOSE_DIR="/usr/local/lib/docker/cli-plugins"
@@ -581,31 +562,10 @@ version_compare() {
# Check Docker daemon
if ! docker info &> /dev/null; then
if [[ "$OSTYPE" == "darwin"* ]]; then
print_info "Docker daemon is not running. Starting Docker Desktop..."
open -a Docker
# Wait up to 120 seconds for Docker to be ready
DOCKER_WAIT=0
DOCKER_MAX_WAIT=120
while ! docker info &> /dev/null; do
if [ $DOCKER_WAIT -ge $DOCKER_MAX_WAIT ]; then
print_error "Docker Desktop did not start within ${DOCKER_MAX_WAIT} seconds."
print_info "Please start Docker Desktop manually and re-run this script."
exit 1
fi
printf "\r\033[KWaiting for Docker Desktop to start... (%ds)" "$DOCKER_WAIT"
sleep 2
DOCKER_WAIT=$((DOCKER_WAIT + 2))
done
echo ""
print_success "Docker Desktop is now running"
else
print_error "Docker daemon is not running. Please start Docker."
exit 1
fi
else
print_success "Docker daemon is running"
print_error "Docker daemon is not running. Please start Docker."
exit 1
fi
print_success "Docker daemon is running"
# Check Docker resources
print_step "Verifying Docker resources"
@@ -785,7 +745,6 @@ print_success "All configuration files ready"
# Set up deployment configuration
print_step "Setting up deployment configs"
ENV_FILE="${INSTALL_ROOT}/deployment/.env"
ENV_TEMPLATE="${INSTALL_ROOT}/deployment/env.template"
# Check if services are already running
if [ -d "${INSTALL_ROOT}/deployment" ] && [ -f "${INSTALL_ROOT}/deployment/docker-compose.yml" ]; then
# Determine compose command
@@ -1125,25 +1084,6 @@ else
USE_LATEST=false
fi
# For pinned version tags, re-download config files from that tag so the
# compose file matches the images being pulled (the initial download used main).
if [[ "$USE_LATEST" = false ]] && [[ "$USE_LOCAL_FILES" = false ]]; then
PINNED_BASE="https://raw.githubusercontent.com/onyx-dot-app/onyx/${CURRENT_IMAGE_TAG}/deployment"
print_info "Fetching config files matching tag ${CURRENT_IMAGE_TAG}..."
if download_file "${PINNED_BASE}/docker_compose/docker-compose.yml" "${INSTALL_ROOT}/deployment/docker-compose.yml" 2>/dev/null; then
download_file "${PINNED_BASE}/data/nginx/app.conf.template" "${INSTALL_ROOT}/data/nginx/app.conf.template" 2>/dev/null || true
download_file "${PINNED_BASE}/data/nginx/run-nginx.sh" "${INSTALL_ROOT}/data/nginx/run-nginx.sh" 2>/dev/null || true
chmod +x "${INSTALL_ROOT}/data/nginx/run-nginx.sh"
if [[ "$LITE_MODE" = true ]]; then
download_file "${PINNED_BASE}/docker_compose/${LITE_COMPOSE_FILE}" \
"${INSTALL_ROOT}/deployment/${LITE_COMPOSE_FILE}" 2>/dev/null || true
fi
print_success "Config files updated to match ${CURRENT_IMAGE_TAG}"
else
print_warning "Tag ${CURRENT_IMAGE_TAG} not found on GitHub — using main branch configs"
fi
fi
# Pull Docker images with reduced output
print_step "Pulling Docker images"
print_info "This may take several minutes depending on your internet connection..."

View File

@@ -17,7 +17,6 @@ import (
type RunCIOptions struct {
DryRun bool
Yes bool
Rerun bool
}
// NewRunCICommand creates a new run-ci command
@@ -50,7 +49,6 @@ Example usage:
cmd.Flags().BoolVar(&opts.DryRun, "dry-run", false, "Perform all local operations but skip pushing to remote and creating PRs")
cmd.Flags().BoolVar(&opts.Yes, "yes", false, "Skip confirmation prompts and automatically proceed")
cmd.Flags().BoolVar(&opts.Rerun, "rerun", false, "Update an existing CI PR with the latest fork changes to re-trigger CI")
return cmd
}
@@ -109,44 +107,19 @@ func runCI(cmd *cobra.Command, args []string, opts *RunCIOptions) {
log.Fatalf("PR #%s is not from a fork - CI should already run automatically", prNumber)
}
// Create the CI branch
ciBranch := fmt.Sprintf("run-ci/%s", prNumber)
prTitle := fmt.Sprintf("chore: [Running GitHub actions for #%s]", prNumber)
prBody := fmt.Sprintf("This PR runs GitHub Actions CI for #%s.\n\n- [x] Override Linear Check\n\n**This PR should be closed (not merged) after CI completes.**", prNumber)
// Check if a CI PR already exists for this branch
existingPRURL, err := findExistingCIPR(ciBranch)
if err != nil {
log.Fatalf("Failed to check for existing CI PR: %v", err)
}
if existingPRURL != "" && !opts.Rerun {
log.Infof("A CI PR already exists for #%s: %s", prNumber, existingPRURL)
log.Info("Run with --rerun to update it with the latest fork changes and re-trigger CI.")
return
}
if opts.Rerun && existingPRURL == "" {
log.Warn("--rerun was specified but no existing open CI PR was found. A new PR will be created.")
}
if existingPRURL != "" && opts.Rerun {
log.Infof("Existing CI PR found: %s", existingPRURL)
log.Info("Will update the CI branch with the latest fork changes to re-trigger CI.")
}
// Confirm before proceeding
if !opts.Yes {
action := "Create CI branch"
if existingPRURL != "" {
action = "Update existing CI branch"
}
if !prompt.Confirm(fmt.Sprintf("%s for PR #%s? (yes/no): ", action, prNumber)) {
if !prompt.Confirm(fmt.Sprintf("Create CI branch for PR #%s? (yes/no): ", prNumber)) {
log.Info("Exiting...")
return
}
}
// Create the CI branch
ciBranch := fmt.Sprintf("run-ci/%s", prNumber)
prTitle := fmt.Sprintf("chore: [Running GitHub actions for #%s]", prNumber)
prBody := fmt.Sprintf("This PR runs GitHub Actions CI for #%s.\n\n- [x] Override Linear Check\n\n**This PR should be closed (not merged) after CI completes.**", prNumber)
// Fetch the fork's branch
if forkRepo == "" {
log.Fatalf("Could not determine fork repository - headRepositoryOwner or headRepository.name is empty")
@@ -185,11 +158,7 @@ func runCI(cmd *cobra.Command, args []string, opts *RunCIOptions) {
if opts.DryRun {
log.Warnf("[DRY RUN] Would push CI branch: %s", ciBranch)
if existingPRURL == "" {
log.Warnf("[DRY RUN] Would create PR: %s", prTitle)
} else {
log.Warnf("[DRY RUN] Would update existing PR: %s", existingPRURL)
}
log.Warnf("[DRY RUN] Would create PR: %s", prTitle)
// Switch back to original branch
if err := git.RunCommand("switch", "--quiet", originalBranch); err != nil {
log.Warnf("Failed to switch back to original branch: %v", err)
@@ -207,17 +176,6 @@ func runCI(cmd *cobra.Command, args []string, opts *RunCIOptions) {
log.Fatalf("Failed to push CI branch: %v", err)
}
if existingPRURL != "" {
// PR already exists - force push is enough to re-trigger CI
log.Infof("Switching back to original branch: %s", originalBranch)
if err := git.RunCommand("switch", "--quiet", originalBranch); err != nil {
log.Warnf("Failed to switch back to original branch: %v", err)
}
log.Infof("CI PR updated successfully: %s", existingPRURL)
log.Info("The force push will re-trigger CI. Remember to close (not merge) this PR after CI completes!")
return
}
// Create PR using GitHub CLI
log.Info("Creating PR...")
prURL, err := createCIPR(ciBranch, prInfo.BaseRefName, prTitle, prBody)
@@ -259,39 +217,6 @@ func getPRInfo(prNumber string) (*PRInfo, error) {
return &prInfo, nil
}
// findExistingCIPR checks if an open PR already exists for the given CI branch.
// Returns the PR URL if found, or empty string if not.
func findExistingCIPR(headBranch string) (string, error) {
cmd := exec.Command("gh", "pr", "list",
"--head", headBranch,
"--state", "open",
"--json", "url",
)
output, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return "", fmt.Errorf("%w: %s", err, string(exitErr.Stderr))
}
return "", err
}
var prs []struct {
URL string `json:"url"`
}
if err := json.Unmarshal(output, &prs); err != nil {
log.Debugf("Failed to parse PR list JSON: %v (raw: %s)", err, string(output))
return "", fmt.Errorf("failed to parse PR list: %w", err)
}
if len(prs) == 0 {
log.Debugf("No existing open PRs found for branch %s", headBranch)
return "", nil
}
log.Debugf("Found existing PR for branch %s: %s", headBranch, prs[0].URL)
return prs[0].URL, nil
}
// createCIPR creates a pull request for CI using the GitHub CLI
func createCIPR(headBranch, baseBranch, title, body string) (string, error) {
cmd := exec.Command("gh", "pr", "create",

View File

@@ -1,16 +1,16 @@
"use client";
import Image from "next/image";
import { useMemo, useState } from "react";
import { AdminPageTitle } from "@/components/admin/Title";
import {
AzureIcon,
ElevenLabsIcon,
InfoIcon,
IconProps,
OpenAIIcon,
} from "@/components/icons/icons";
import Text from "@/refresh-components/texts/Text";
import Separator from "@/refresh-components/Separator";
import { Select } from "@/refresh-components/cards";
import Message from "@/refresh-components/messages/Message";
import * as SettingsLayouts from "@/layouts/settings-layouts";
import { FetchError } from "@/lib/fetcher";
import {
useVoiceProviders,
@@ -22,32 +22,21 @@ import {
} from "@/lib/admin/voice/svc";
import { ThreeDotsLoader } from "@/components/Loading";
import { Callout } from "@/components/ui/callout";
import Button from "@/refresh-components/buttons/Button";
import { Button as OpalButton } from "@opal/components";
import { cn } from "@/lib/utils";
import {
SvgArrowExchange,
SvgArrowRightCircle,
SvgAudio,
SvgCheckSquare,
SvgEdit,
SvgMicrophone,
SvgX,
} from "@opal/icons";
import { Content } from "@opal/layouts";
import { SvgMicrophone } from "@opal/icons";
import { ADMIN_ROUTES } from "@/lib/admin-routes";
import VoiceProviderSetupModal from "./VoiceProviderSetupModal";
interface ModelDetails {
id: string;
label: string;
subtitle: string;
logoSrc?: string;
providerType: string;
}
interface ProviderGroup {
providerType: string;
providerLabel: string;
logoSrc?: string;
models: ModelDetails[];
}
@@ -57,21 +46,18 @@ const STT_MODELS: ModelDetails[] = [
id: "whisper",
label: "Whisper",
subtitle: "OpenAI's general purpose speech recognition model.",
logoSrc: "/Openai.svg",
providerType: "openai",
},
{
id: "azure-speech-stt",
label: "Azure Speech",
subtitle: "Speech to text in Microsoft Foundry Tools.",
logoSrc: "/Azure.png",
providerType: "azure",
},
{
id: "elevenlabs-stt",
label: "ElevenAPI",
subtitle: "ElevenLabs Speech to Text API.",
logoSrc: "/ElevenLabs.svg",
providerType: "elevenlabs",
},
];
@@ -81,20 +67,17 @@ const TTS_PROVIDER_GROUPS: ProviderGroup[] = [
{
providerType: "openai",
providerLabel: "OpenAI",
logoSrc: "/Openai.svg",
models: [
{
id: "tts-1",
label: "TTS-1",
subtitle: "OpenAI's text-to-speech model optimized for speed.",
logoSrc: "/Openai.svg",
providerType: "openai",
},
{
id: "tts-1-hd",
label: "TTS-1 HD",
subtitle: "OpenAI's text-to-speech model optimized for quality.",
logoSrc: "/Openai.svg",
providerType: "openai",
},
],
@@ -102,13 +85,11 @@ const TTS_PROVIDER_GROUPS: ProviderGroup[] = [
{
providerType: "azure",
providerLabel: "Azure",
logoSrc: "/Azure.png",
models: [
{
id: "azure-speech-tts",
label: "Azure Speech",
subtitle: "Text to speech in Microsoft Foundry Tools.",
logoSrc: "/Azure.png",
providerType: "azure",
},
],
@@ -116,44 +97,42 @@ const TTS_PROVIDER_GROUPS: ProviderGroup[] = [
{
providerType: "elevenlabs",
providerLabel: "ElevenLabs",
logoSrc: "/ElevenLabs.svg",
models: [
{
id: "elevenlabs-tts",
label: "ElevenAPI",
subtitle: "ElevenLabs Text to Speech API.",
logoSrc: "/ElevenLabs.svg",
providerType: "elevenlabs",
},
],
},
];
interface HoverIconButtonProps extends React.ComponentProps<typeof Button> {
isHovered: boolean;
onMouseEnter: () => void;
onMouseLeave: () => void;
children: React.ReactNode;
}
const FallbackMicrophoneIcon = ({ size, className }: IconProps) => (
<SvgMicrophone size={size} className={className} />
);
function HoverIconButton({
isHovered,
onMouseEnter,
onMouseLeave,
children,
...buttonProps
}: HoverIconButtonProps) {
return (
<div onMouseEnter={onMouseEnter} onMouseLeave={onMouseLeave}>
<Button {...buttonProps} rightIcon={isHovered ? SvgX : SvgCheckSquare}>
{children}
</Button>
</div>
);
function getProviderIcon(
providerType: string
): React.FunctionComponent<IconProps> {
switch (providerType) {
case "openai":
return OpenAIIcon;
case "azure":
return AzureIcon;
case "elevenlabs":
return ElevenLabsIcon;
default:
return FallbackMicrophoneIcon;
}
}
type ProviderMode = "stt" | "tts";
const route = ADMIN_ROUTES.VOICE;
const pageDescription =
"Configure speech-to-text and text-to-speech providers for voice input and spoken responses.";
export default function VoiceConfigurationPage() {
const [modalOpen, setModalOpen] = useState(false);
const [selectedProvider, setSelectedProvider] = useState<string | null>(null);
@@ -167,7 +146,6 @@ export default function VoiceConfigurationPage() {
const [ttsActivationError, setTTSActivationError] = useState<string | null>(
null
);
const [hoveredButtonKey, setHoveredButtonKey] = useState<string | null>(null);
const { providers, error, isLoading, refresh: mutate } = useVoiceProviders();
@@ -261,7 +239,6 @@ export default function VoiceConfigurationPage() {
return !!provider?.has_api_key;
};
// Map provider types to their configured provider data
const providersByType = useMemo(() => {
return new Map((providers ?? []).map((p) => [p.provider_type, p] as const));
}, [providers]);
@@ -271,186 +248,46 @@ export default function VoiceConfigurationPage() {
const hasActiveTTSProvider =
providers?.some((p) => p.is_default_tts) ?? false;
const renderLogo = ({
logoSrc,
providerType,
alt,
size = 16,
}: {
logoSrc?: string;
providerType: string;
alt: string;
size?: number;
}) => {
const containerSizeClass = size === 24 ? "size-7" : "size-5";
return (
<div
className={cn(
"flex items-center justify-center px-0.5 py-0 shrink-0 overflow-clip",
containerSizeClass
)}
>
{providerType === "openai" ? (
<OpenAIIcon size={size} />
) : providerType === "azure" ? (
<AzureIcon size={size} />
) : providerType === "elevenlabs" ? (
<ElevenLabsIcon size={size} />
) : logoSrc ? (
<Image
src={logoSrc}
alt={alt}
width={size}
height={size}
className="object-contain"
/>
) : (
<SvgMicrophone size={size} className="text-text-02" />
)}
</div>
);
};
const renderModelCard = ({
model,
mode,
}: {
model: ModelDetails;
mode: ProviderMode;
}) => {
const getModelStatus = (
model: ModelDetails,
mode: ProviderMode
): "disconnected" | "connected" | "selected" => {
const provider = providersByType.get(model.providerType);
const isConfigured = isProviderConfigured(provider);
// For TTS, also check that this specific model is the default (not just the provider)
if (!provider || !isProviderConfigured(provider)) return "disconnected";
const isActive =
mode === "stt"
? provider?.is_default_stt
: provider?.is_default_tts && provider?.tts_model === model.id;
const isHighlighted = isActive ?? false;
const providerId = provider?.id;
? provider.is_default_stt
: provider.is_default_tts && provider.tts_model === model.id;
const buttonState = (() => {
if (!provider || !isConfigured) {
return {
label: "Connect",
disabled: false,
icon: "arrow" as const,
onClick: () => handleConnect(model.providerType, mode, model.id),
};
}
if (isActive) return "selected";
return "connected";
};
if (isActive) {
return {
label: "Current Default",
disabled: false,
icon: "check" as const,
onClick: providerId
? () => handleDeactivate(providerId, mode)
: undefined,
};
}
return {
label: "Set as Default",
disabled: false,
icon: "arrow-circle" as const,
onClick: providerId
? () => handleSetDefault(providerId, mode, model.id)
: undefined,
};
})();
const buttonKey = `${mode}-${model.id}`;
const isButtonHovered = hoveredButtonKey === buttonKey;
const isCardClickable =
buttonState.icon === "arrow" &&
typeof buttonState.onClick === "function" &&
!buttonState.disabled;
const handleCardClick = () => {
if (isCardClickable) {
buttonState.onClick?.();
}
};
const renderModelSelect = (model: ModelDetails, mode: ProviderMode) => {
const provider = providersByType.get(model.providerType);
const status = getModelStatus(model, mode);
const Icon = getProviderIcon(model.providerType);
return (
<div
<Select
key={`${mode}-${model.id}`}
onClick={isCardClickable ? handleCardClick : undefined}
className={cn(
"flex items-start justify-between gap-4 rounded-16 border p-2 bg-background-neutral-01",
isHighlighted ? "border-action-link-05" : "border-border-01",
isCardClickable &&
"cursor-pointer hover:bg-background-tint-01 transition-colors"
)}
>
<div className="flex flex-1 items-start gap-2.5 p-2">
{renderLogo({
logoSrc: model.logoSrc,
providerType: model.providerType,
alt: `${model.label} logo`,
size: 16,
})}
<div className="flex flex-col gap-0.5">
<Text as="p" mainUiAction text04>
{model.label}
</Text>
<Text as="p" secondaryBody text03>
{model.subtitle}
</Text>
</div>
</div>
<div className="flex items-center justify-end gap-1.5 self-center">
{isConfigured && (
<OpalButton
icon={SvgEdit}
tooltip="Edit"
prominence="tertiary"
size="sm"
onClick={(e) => {
e.stopPropagation();
if (provider) handleEdit(provider, mode, model.id);
}}
aria-label={`Edit ${model.label}`}
/>
)}
{buttonState.icon === "check" ? (
<HoverIconButton
isHovered={isButtonHovered}
onMouseEnter={() => setHoveredButtonKey(buttonKey)}
onMouseLeave={() => setHoveredButtonKey(null)}
action={true}
tertiary
disabled={buttonState.disabled}
onClick={(e) => {
e.stopPropagation();
buttonState.onClick?.();
}}
>
{buttonState.label}
</HoverIconButton>
) : (
<Button
action={false}
tertiary
disabled={buttonState.disabled || !buttonState.onClick}
onClick={(e) => {
e.stopPropagation();
buttonState.onClick?.();
}}
rightIcon={
buttonState.icon === "arrow"
? SvgArrowExchange
: buttonState.icon === "arrow-circle"
? SvgArrowRightCircle
: undefined
}
>
{buttonState.label}
</Button>
)}
</div>
</div>
aria-label={`voice-${mode}-${model.id}`}
icon={Icon}
title={model.label}
description={model.subtitle}
status={status}
onConnect={() => handleConnect(model.providerType, mode, model.id)}
onSelect={() => {
if (provider?.id) handleSetDefault(provider.id, mode, model.id);
}}
onDeselect={() => {
if (provider?.id) handleDeactivate(provider.id, mode);
}}
onEdit={() => {
if (provider) handleEdit(provider, mode, model.id);
}}
/>
);
};
@@ -462,61 +299,56 @@ export default function VoiceConfigurationPage() {
: undefined;
return (
<>
<AdminPageTitle
title="Voice"
icon={SvgMicrophone}
includeDivider={false}
<SettingsLayouts.Root>
<SettingsLayouts.Header
icon={route.icon}
title={route.title}
description={pageDescription}
/>
<Callout type="danger" title="Failed to load voice settings">
{message}
{detail && (
<Text as="p" className="mt-2 text-text-03" mainContentBody text03>
{detail}
</Text>
)}
</Callout>
</>
<SettingsLayouts.Body>
<Callout type="danger" title="Failed to load voice settings">
{message}
{detail && (
<Text as="p" mainContentBody text03>
{detail}
</Text>
)}
</Callout>
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
if (isLoading) {
return (
<>
<AdminPageTitle
title="Voice"
icon={SvgMicrophone}
includeDivider={false}
<SettingsLayouts.Root>
<SettingsLayouts.Header
icon={route.icon}
title={route.title}
description={pageDescription}
/>
<div className="mt-8">
<SettingsLayouts.Body>
<ThreeDotsLoader />
</div>
</>
</SettingsLayouts.Body>
</SettingsLayouts.Root>
);
}
return (
<>
<AdminPageTitle icon={SvgAudio} title="Voice" />
<div className="pt-4 pb-4">
<Text as="p" secondaryBody text03>
Speech to text (STT) and text to speech (TTS) capabilities.
</Text>
</div>
<Separator />
<div className="flex w-full flex-col gap-8 pb-6">
{/* Speech-to-Text Section */}
<div className="flex w-full max-w-[960px] flex-col gap-3">
<div className="flex flex-col">
<Text as="p" mainContentEmphasis text04>
Speech to Text
</Text>
<Text as="p" secondaryBody text03>
Select a model to transcribe speech to text in chats.
</Text>
</div>
<SettingsLayouts.Root>
<SettingsLayouts.Header
icon={route.icon}
title={route.title}
description={pageDescription}
/>
<SettingsLayouts.Body>
<div className="flex flex-col gap-6">
<Content
title="Speech to Text"
description="Select a model to transcribe speech to text in chats."
sizePreset="main-content"
variant="section"
/>
{sttActivationError && (
<Callout type="danger" title="Unable to update STT provider">
@@ -525,46 +357,28 @@ export default function VoiceConfigurationPage() {
)}
{!hasActiveSTTProvider && (
<div
className="flex items-start rounded-16 border p-2"
style={{
backgroundColor: "var(--status-info-00)",
borderColor: "var(--status-info-02)",
}}
>
<div className="flex items-start gap-1 p-2">
<div
className="flex size-5 items-center justify-center rounded-full p-0.5"
style={{
backgroundColor: "var(--status-info-01)",
}}
>
<div style={{ color: "var(--status-text-info-05)" }}>
<InfoIcon size={16} />
</div>
</div>
<Text as="p" className="flex-1 px-0.5" mainUiBody text04>
Connect a speech to text provider to use in chat.
</Text>
</div>
</div>
<Message
info
static
large
close={false}
text="Connect a speech to text provider to use in chat."
className="w-full"
/>
)}
<div className="flex flex-col gap-2">
{STT_MODELS.map((model) => renderModelCard({ model, mode: "stt" }))}
{STT_MODELS.map((model) => renderModelSelect(model, "stt"))}
</div>
</div>
{/* Text-to-Speech Section */}
<div className="flex w-full max-w-[960px] flex-col gap-3">
<div className="flex flex-col">
<Text as="p" mainContentEmphasis text04>
Text to Speech
</Text>
<Text as="p" secondaryBody text03>
Select a model to speak out chat responses.
</Text>
</div>
<div className="flex flex-col gap-6">
<Content
title="Text to Speech"
description="Select a model to speak out chat responses."
sizePreset="main-content"
variant="section"
/>
{ttsActivationError && (
<Callout type="danger" title="Unable to update TTS provider">
@@ -573,47 +387,28 @@ export default function VoiceConfigurationPage() {
)}
{!hasActiveTTSProvider && (
<div
className="flex items-start rounded-16 border p-2"
style={{
backgroundColor: "var(--status-info-00)",
borderColor: "var(--status-info-02)",
}}
>
<div className="flex items-start gap-1 p-2">
<div
className="flex size-5 items-center justify-center rounded-full p-0.5"
style={{
backgroundColor: "var(--status-info-01)",
}}
>
<div style={{ color: "var(--status-text-info-05)" }}>
<InfoIcon size={16} />
</div>
</div>
<Text as="p" className="flex-1 px-0.5" mainUiBody text04>
Connect a text to speech provider to use in chat.
</Text>
</div>
</div>
<Message
info
static
large
close={false}
text="Connect a text to speech provider to use in chat."
className="w-full"
/>
)}
<div className="flex flex-col gap-4">
{TTS_PROVIDER_GROUPS.map((group) => (
<div key={group.providerType} className="flex flex-col gap-2">
<Text as="p" secondaryBody text03 className="px-0.5">
{group.providerLabel}
</Text>
<div className="flex flex-col gap-2">
{group.models.map((model) =>
renderModelCard({ model, mode: "tts" })
)}
</div>
{TTS_PROVIDER_GROUPS.map((group) => (
<div key={group.providerType} className="flex flex-col gap-2">
<Text secondaryBody text03>
{group.providerLabel}
</Text>
<div className="flex flex-col gap-2">
{group.models.map((model) => renderModelSelect(model, "tts"))}
</div>
))}
</div>
</div>
))}
</div>
</div>
</SettingsLayouts.Body>
{modalOpen && selectedProvider && (
<VoiceProviderSetupModal
@@ -625,6 +420,6 @@ export default function VoiceConfigurationPage() {
onSuccess={handleModalSuccess}
/>
)}
</>
</SettingsLayouts.Root>
);
}

View File

@@ -45,6 +45,7 @@ const SETTINGS_LAYOUT_PREFIXES = [
ADMIN_ROUTES.GROUPS.path,
ADMIN_ROUTES.PERFORMANCE.path,
ADMIN_ROUTES.SCIM.path,
ADMIN_ROUTES.VOICE.path,
];
export function ClientLayout({ children, enableCloud }: ClientLayoutProps) {