diff --git a/crates/goose-cli/Cargo.toml b/crates/goose-cli/Cargo.toml
index a69c702cbb1f..b4262268e2f1 100644
--- a/crates/goose-cli/Cargo.toml
+++ b/crates/goose-cli/Cargo.toml
@@ -68,9 +68,10 @@ sigstore-verification = { version = "0.1", default-features = false, features =
winapi = { version = "0.3", features = ["wincred"] }
[features]
-default = ["code-mode"]
+default = ["code-mode", "local-inference"]
code-mode = ["goose/code-mode", "goose-acp/code-mode"]
-cuda = ["goose/cuda"]
+local-inference = ["goose/local-inference"]
+cuda = ["goose/cuda", "local-inference"]
# disables the update command
disable-update = []
diff --git a/crates/goose-cli/src/cli.rs b/crates/goose-cli/src/cli.rs
index 063e73b8f207..143e3db27e00 100644
--- a/crates/goose-cli/src/cli.rs
+++ b/crates/goose-cli/src/cli.rs
@@ -865,6 +865,7 @@ enum Command {
command: TermCommand,
},
/// Manage local inference models
+ #[cfg(feature = "local-inference")]
#[command(about = "Manage local inference models", visible_alias = "lm")]
LocalModels {
#[command(subcommand)]
@@ -892,6 +893,7 @@ enum Command {
},
}
+#[cfg(feature = "local-inference")]
#[derive(Subcommand)]
enum LocalModelsCommand {
/// Search HuggingFace for GGUF models
@@ -1013,6 +1015,7 @@ fn get_command_name(command: &Option) -> &'static str {
Some(Command::Update { .. }) => "update",
Some(Command::Recipe { .. }) => "recipe",
Some(Command::Term { .. }) => "term",
+ #[cfg(feature = "local-inference")]
Some(Command::LocalModels { .. }) => "local-models",
Some(Command::Completion { .. }) => "completion",
Some(Command::ValidateExtensions { .. }) => "validate-extensions",
@@ -1473,6 +1476,7 @@ async fn handle_term_subcommand(command: TermCommand) -> Result<()> {
}
}
+#[cfg(feature = "local-inference")]
async fn handle_local_models_command(command: LocalModelsCommand) -> Result<()> {
use goose::providers::local_inference::hf_models;
use goose::providers::local_inference::local_model_registry::{
@@ -1759,6 +1763,7 @@ pub async fn cli() -> anyhow::Result<()> {
}
Some(Command::Recipe { command }) => handle_recipe_subcommand(command),
Some(Command::Term { command }) => handle_term_subcommand(command).await,
+ #[cfg(feature = "local-inference")]
Some(Command::LocalModels { command }) => handle_local_models_command(command).await,
Some(Command::ValidateExtensions { file }) => {
use goose::agents::validate_extensions::validate_bundled_extensions;
diff --git a/crates/goose-server/Cargo.toml b/crates/goose-server/Cargo.toml
index c8902014b017..f8774920ac9d 100644
--- a/crates/goose-server/Cargo.toml
+++ b/crates/goose-server/Cargo.toml
@@ -11,9 +11,10 @@ description.workspace = true
workspace = true
[features]
-default = ["code-mode"]
+default = ["code-mode", "local-inference"]
code-mode = ["goose/code-mode"]
-cuda = ["goose/cuda"]
+local-inference = ["goose/local-inference"]
+cuda = ["goose/cuda", "local-inference"]
[dependencies]
goose = { path = "../goose", default-features = false }
diff --git a/crates/goose-server/src/auth.rs b/crates/goose-server/src/auth.rs
index 66a2d2a1eef7..d174306e6f25 100644
--- a/crates/goose-server/src/auth.rs
+++ b/crates/goose-server/src/auth.rs
@@ -12,6 +12,7 @@ pub async fn check_token(
next: Next,
) -> Result {
if request.uri().path() == "/status"
+ || request.uri().path() == "/features"
|| request.uri().path() == "/mcp-ui-proxy"
|| request.uri().path() == "/mcp-app-proxy"
|| request.uri().path() == "/mcp-app-guest"
diff --git a/crates/goose-server/src/openapi.rs b/crates/goose-server/src/openapi.rs
index eec8aeefe2ac..6eaaa1bc6140 100644
--- a/crates/goose-server/src/openapi.rs
+++ b/crates/goose-server/src/openapi.rs
@@ -479,20 +479,7 @@ derive_utoipa!(Icon as IconSchema);
super::routes::telemetry::send_telemetry_event,
super::routes::dictation::transcribe_dictation,
super::routes::dictation::get_dictation_config,
- super::routes::dictation::list_models,
- super::routes::dictation::download_model,
- super::routes::dictation::get_download_progress,
- super::routes::dictation::cancel_download,
- super::routes::dictation::delete_model,
- super::routes::local_inference::list_local_models,
- super::routes::local_inference::search_hf_models,
- super::routes::local_inference::get_repo_files,
- super::routes::local_inference::download_hf_model,
- super::routes::local_inference::get_local_model_download_progress,
- super::routes::local_inference::cancel_local_model_download,
- super::routes::local_inference::delete_local_model,
- super::routes::local_inference::get_model_settings,
- super::routes::local_inference::update_model_settings,
+ super::routes::features::get_features,
),
components(schemas(
super::routes::config_management::UpsertConfigQuery,
@@ -671,6 +658,33 @@ derive_utoipa!(Icon as IconSchema);
super::routes::dictation::TranscribeResponse,
goose::dictation::providers::DictationProvider,
super::routes::dictation::DictationProviderStatus,
+ super::routes::features::FeaturesResponse,
+ DownloadProgress,
+ DownloadStatus,
+ ))
+)]
+pub struct ApiDoc;
+
+#[cfg(feature = "local-inference")]
+#[derive(OpenApi)]
+#[openapi(
+ paths(
+ super::routes::dictation::list_models,
+ super::routes::dictation::download_model,
+ super::routes::dictation::get_download_progress,
+ super::routes::dictation::cancel_download,
+ super::routes::dictation::delete_model,
+ super::routes::local_inference::list_local_models,
+ super::routes::local_inference::search_hf_models,
+ super::routes::local_inference::get_repo_files,
+ super::routes::local_inference::download_hf_model,
+ super::routes::local_inference::get_local_model_download_progress,
+ super::routes::local_inference::cancel_local_model_download,
+ super::routes::local_inference::delete_local_model,
+ super::routes::local_inference::get_model_settings,
+ super::routes::local_inference::update_model_settings,
+ ),
+ components(schemas(
super::routes::dictation::WhisperModelResponse,
super::routes::local_inference::LocalModelResponse,
super::routes::local_inference::ModelDownloadStatus,
@@ -681,14 +695,17 @@ derive_utoipa!(Icon as IconSchema);
super::routes::local_inference::RepoVariantsResponse,
goose::providers::local_inference::local_model_registry::ModelSettings,
goose::providers::local_inference::local_model_registry::SamplingConfig,
- DownloadProgress,
- DownloadStatus,
))
)]
-pub struct ApiDoc;
+pub struct LocalInferenceApiDoc;
#[allow(dead_code)] // Used by generate_schema binary
pub fn generate_schema() -> String {
- let api_doc = ApiDoc::openapi();
+ #[allow(unused_mut)]
+ let mut api_doc = ApiDoc::openapi();
+
+ #[cfg(feature = "local-inference")]
+ api_doc.merge(LocalInferenceApiDoc::openapi());
+
serde_json::to_string_pretty(&api_doc).unwrap()
}
diff --git a/crates/goose-server/src/routes/dictation.rs b/crates/goose-server/src/routes/dictation.rs
index b4aaf8b71b46..e7c8111451c5 100644
--- a/crates/goose-server/src/routes/dictation.rs
+++ b/crates/goose-server/src/routes/dictation.rs
@@ -1,16 +1,22 @@
use crate::routes::errors::ErrorResponse;
use crate::state::AppState;
use axum::{
- extract::{DefaultBodyLimit, Path},
+ extract::DefaultBodyLimit,
http::StatusCode,
- routing::{delete, get, post},
+ routing::{get, post},
Json, Router,
};
+#[cfg(feature = "local-inference")]
+use axum::{extract::Path, routing::delete};
use base64::{engine::general_purpose::STANDARD as BASE64, Engine};
+#[cfg(feature = "local-inference")]
+use goose::dictation::providers::transcribe_local;
use goose::dictation::providers::{
- is_configured, transcribe_local, transcribe_with_provider, DictationProvider, PROVIDERS,
+ all_providers, is_configured, transcribe_with_provider, DictationProvider,
};
+#[cfg(feature = "local-inference")]
use goose::dictation::whisper;
+#[cfg(feature = "local-inference")]
use goose::download_manager::{get_download_manager, DownloadProgress};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
@@ -19,6 +25,7 @@ use utoipa::ToSchema;
const MAX_AUDIO_SIZE_BYTES: usize = 50 * 1024 * 1024;
+#[cfg(feature = "local-inference")]
#[derive(Debug, Serialize, ToSchema)]
pub struct WhisperModelResponse {
#[serde(flatten)]
@@ -171,6 +178,7 @@ pub async fn transcribe_dictation(
)
.await
.map_err(convert_error)?,
+ #[cfg(feature = "local-inference")]
DictationProvider::Local => transcribe_local(audio_bytes).await.map_err(convert_error)?,
};
@@ -189,7 +197,7 @@ pub async fn get_dictation_config(
let config = goose::config::Config::global();
let mut providers = HashMap::new();
- for def in PROVIDERS {
+ for def in all_providers() {
let provider = def.provider;
let configured = is_configured(provider);
@@ -222,6 +230,7 @@ pub async fn get_dictation_config(
Ok(Json(providers))
}
+#[cfg(feature = "local-inference")]
#[utoipa::path(
get,
path = "/dictation/models",
@@ -243,6 +252,7 @@ pub async fn list_models() -> Result>, ErrorRespo
Ok(Json(models))
}
+#[cfg(feature = "local-inference")]
#[utoipa::path(
post,
path = "/dictation/models/{model_id}/download",
@@ -274,6 +284,7 @@ pub async fn download_model(Path(model_id): Path) -> Result) -> Result) -> Result) -> Router {
- Router::new()
+ let router = Router::new()
.route("/dictation/transcribe", post(transcribe_dictation))
- .route("/dictation/config", get(get_dictation_config))
+ .route("/dictation/config", get(get_dictation_config));
+
+ #[cfg(feature = "local-inference")]
+ let router = router
.route("/dictation/models", get(list_models))
.route(
"/dictation/models/{model_id}/download",
@@ -350,7 +366,9 @@ pub fn routes(state: Arc) -> Router {
"/dictation/models/{model_id}/download",
delete(cancel_download),
)
- .route("/dictation/models/{model_id}", delete(delete_model))
+ .route("/dictation/models/{model_id}", delete(delete_model));
+
+ router
.layer(DefaultBodyLimit::max(MAX_AUDIO_SIZE_BYTES))
.with_state(state)
}
diff --git a/crates/goose-server/src/routes/features.rs b/crates/goose-server/src/routes/features.rs
new file mode 100644
index 000000000000..f974df9fa9ca
--- /dev/null
+++ b/crates/goose-server/src/routes/features.rs
@@ -0,0 +1,33 @@
+use axum::{routing::get, Json, Router};
+use serde::Serialize;
+use std::collections::HashMap;
+use utoipa::ToSchema;
+
+#[derive(Serialize, ToSchema)]
+pub struct FeaturesResponse {
+ /// Map of feature name to enabled status
+ pub features: HashMap,
+}
+
+#[utoipa::path(
+ get,
+ path = "/features",
+ responses(
+ (status = 200, description = "Compile-time feature flags", body = FeaturesResponse),
+ )
+)]
+pub async fn get_features() -> Json {
+ let mut features = HashMap::new();
+
+ features.insert(
+ "local-inference".to_string(),
+ cfg!(feature = "local-inference"),
+ );
+ features.insert("code-mode".to_string(), cfg!(feature = "code-mode"));
+
+ Json(FeaturesResponse { features })
+}
+
+pub fn routes() -> Router {
+ Router::new().route("/features", get(get_features))
+}
diff --git a/crates/goose-server/src/routes/mod.rs b/crates/goose-server/src/routes/mod.rs
index c440a0d99157..00777d80acc4 100644
--- a/crates/goose-server/src/routes/mod.rs
+++ b/crates/goose-server/src/routes/mod.rs
@@ -3,7 +3,9 @@ pub mod agent;
pub mod config_management;
pub mod dictation;
pub mod errors;
+pub mod features;
pub mod gateway;
+#[cfg(feature = "local-inference")]
pub mod local_inference;
pub mod mcp_app_proxy;
pub mod mcp_ui_proxy;
@@ -27,13 +29,11 @@ use axum::Router;
// Function to configure all routes
pub fn configure(state: Arc, secret_key: String) -> Router {
- Router::new()
+ let router = Router::new()
.merge(status::routes(state.clone()))
.merge(reply::routes(state.clone()))
.merge(action_required::routes(state.clone()))
.merge(agent::routes(state.clone()))
- .merge(dictation::routes(state.clone()))
- .merge(local_inference::routes(state.clone()))
.merge(config_management::routes(state.clone()))
.merge(prompts::routes())
.merge(recipe::routes(state.clone()))
@@ -46,5 +46,12 @@ pub fn configure(state: Arc, secret_key: String) -> Rout
.merge(mcp_ui_proxy::routes(secret_key.clone()))
.merge(mcp_app_proxy::routes(secret_key))
.merge(session_events::routes(state.clone()))
- .merge(sampling::routes(state))
+ .merge(sampling::routes(state.clone()))
+ .merge(dictation::routes(state.clone()))
+ .merge(features::routes());
+
+ #[cfg(feature = "local-inference")]
+ let router = router.merge(local_inference::routes(state));
+
+ router
}
diff --git a/crates/goose-server/src/state.rs b/crates/goose-server/src/state.rs
index 09d903c9521d..73bb1726acad 100644
--- a/crates/goose-server/src/state.rs
+++ b/crates/goose-server/src/state.rs
@@ -13,6 +13,7 @@ use crate::session_event_bus::SessionEventBus;
use crate::tunnel::TunnelManager;
use goose::agents::ExtensionLoadResult;
use goose::gateway::manager::GatewayManager;
+#[cfg(feature = "local-inference")]
use goose::providers::local_inference::InferenceRuntime;
type ExtensionLoadingTasks =
@@ -26,6 +27,7 @@ pub struct AppState {
pub tunnel_manager: Arc,
pub gateway_manager: Arc,
pub extension_loading_tasks: ExtensionLoadingTasks,
+ #[cfg(feature = "local-inference")]
pub inference_runtime: Arc,
session_buses: Arc>>>,
}
@@ -45,6 +47,7 @@ impl AppState {
tunnel_manager,
gateway_manager,
extension_loading_tasks: Arc::new(Mutex::new(HashMap::new())),
+ #[cfg(feature = "local-inference")]
inference_runtime: InferenceRuntime::get_or_init(),
session_buses: Arc::new(Mutex::new(HashMap::new())),
}))
diff --git a/crates/goose/Cargo.toml b/crates/goose/Cargo.toml
index ca94fe8d8b04..01eaf708dc71 100644
--- a/crates/goose/Cargo.toml
+++ b/crates/goose/Cargo.toml
@@ -8,9 +8,19 @@ repository.workspace = true
description.workspace = true
[features]
-default = ["code-mode"]
+default = ["code-mode", "local-inference"]
code-mode = ["dep:pctx_code_mode"]
-cuda = ["candle-core/cuda", "candle-nn/cuda", "llama-cpp-2/cuda"]
+local-inference = [
+ "dep:candle-core",
+ "dep:candle-nn",
+ "dep:candle-transformers",
+ "dep:llama-cpp-2",
+ "dep:tokenizers",
+ "dep:symphonia",
+ "dep:rubato",
+ "dep:byteorder",
+]
+cuda = ["local-inference", "candle-core/cuda", "candle-nn/cuda", "llama-cpp-2/cuda"]
[lints]
workspace = true
@@ -107,14 +117,14 @@ sacp = { workspace = true }
agent-client-protocol-schema = { version = "0.10", features = ["unstable"] }
unicode-normalization = "0.1"
-# For local Whisper transcription
-candle-core = { version = "0.9", default-features = false }
-candle-nn = { version = "0.9", default-features = false }
-candle-transformers = { version = "0.9", default-features = false }
-byteorder = "1.5.0"
-tokenizers = { version = "0.21.0", default-features = false, features = ["onig"] }
-symphonia = { version = "0.5", features = ["all"] }
-rubato = "0.16"
+# For local Whisper transcription (optional, behind "local-inference" feature)
+candle-core = { version = "0.9", default-features = false, optional = true }
+candle-nn = { version = "0.9", default-features = false, optional = true }
+candle-transformers = { version = "0.9", default-features = false, optional = true }
+byteorder = { version = "1.5.0", optional = true }
+tokenizers = { version = "0.21.0", default-features = false, features = ["onig"], optional = true }
+symphonia = { version = "0.5", features = ["all"], optional = true }
+rubato = { version = "0.16", optional = true }
zip = "0.6"
sys-info = "0.9"
@@ -139,7 +149,7 @@ tree-sitter-typescript = { workspace = true }
which = { workspace = true }
pctx_code_mode = { version = "^0.3.0", optional = true }
pulldown-cmark = "0.13.0"
-llama-cpp-2 = { version = "0.1.137", features = ["sampler"] }
+llama-cpp-2 = { version = "0.1.137", features = ["sampler"], optional = true }
encoding_rs = "0.8.35"
pastey = "0.2.1"
shell-words = "1.1.1"
@@ -149,9 +159,9 @@ winapi = { version = "0.3", features = ["wincred"] }
# Platform-specific GPU acceleration for Whisper and local inference
[target.'cfg(target_os = "macos")'.dependencies]
-candle-core = { version = "0.9", default-features = false, features = ["metal"] }
-candle-nn = { version = "0.9", default-features = false, features = ["metal"] }
-llama-cpp-2 = { version = "0.1.137", features = ["sampler", "metal"] }
+candle-core = { version = "0.9", default-features = false, features = ["metal"], optional = true }
+candle-nn = { version = "0.9", default-features = false, features = ["metal"], optional = true }
+llama-cpp-2 = { version = "0.1.137", features = ["sampler", "metal"], optional = true }
[dev-dependencies]
serial_test = { workspace = true }
@@ -175,6 +185,10 @@ path = "examples/agent.rs"
name = "databricks_oauth"
path = "examples/databricks_oauth.rs"
+[[example]]
+name = "test_whisper"
+path = "examples/test_whisper.rs"
+required-features = ["local-inference"]
[[bin]]
name = "analyze_cli"
diff --git a/crates/goose/src/dictation/mod.rs b/crates/goose/src/dictation/mod.rs
index d14fb2164201..8e70c8204578 100644
--- a/crates/goose/src/dictation/mod.rs
+++ b/crates/goose/src/dictation/mod.rs
@@ -1,2 +1,3 @@
pub mod providers;
+#[cfg(feature = "local-inference")]
pub mod whisper;
diff --git a/crates/goose/src/dictation/providers.rs b/crates/goose/src/dictation/providers.rs
index d1d88202dcd6..a23d971f946b 100644
--- a/crates/goose/src/dictation/providers.rs
+++ b/crates/goose/src/dictation/providers.rs
@@ -1,18 +1,22 @@
use crate::config::Config;
+#[cfg(feature = "local-inference")]
use crate::dictation::whisper::LOCAL_WHISPER_MODEL_CONFIG_KEY;
use crate::providers::api_client::{ApiClient, AuthMethod};
use anyhow::Result;
use serde::{Deserialize, Serialize};
+#[cfg(feature = "local-inference")]
use std::sync::Mutex;
use std::time::Duration;
use utoipa::ToSchema;
const REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
+#[cfg(feature = "local-inference")]
static LOCAL_TRANSCRIBER: once_cell::sync::Lazy<
Mutex
-
-
-
-
- Use a Local Model
-
- Free & Private
-
-
-
-
+ {localInference && (
+
+
+
+
+ Use a Local Model
+
+ Free & Private
+
+
+
+
+
+
+ Download a model and run entirely on your machine. No API keys, no accounts.
+
-
- Download a model and run entirely on your machine. No API keys, no accounts.
-
-
+ )}
{error && (
diff --git a/ui/desktop/src/components/settings/SettingsView.tsx b/ui/desktop/src/components/settings/SettingsView.tsx
index ed655af406f0..181b3d4015dd 100644
--- a/ui/desktop/src/components/settings/SettingsView.tsx
+++ b/ui/desktop/src/components/settings/SettingsView.tsx
@@ -19,6 +19,7 @@ import KeyboardShortcutsSection from './keyboard/KeyboardShortcutsSection';
import LocalInferenceSection from './localInference/LocalInferenceSection';
import { CONFIGURATION_ENABLED } from '../../updates';
import { trackSettingsTabViewed } from '../../utils/analytics';
+import { useFeatures } from '../../contexts/FeaturesContext';
export type SettingsViewOptions = {
deepLinkConfig?: ExtensionConfig;
@@ -39,6 +40,7 @@ export default function SettingsView({
const [activeTab, setActiveTab] = useState('models');
const [tunnelDisabled, setTunnelDisabled] = useState(false);
const hasTrackedInitialTab = useRef(false);
+ const { localInference } = useFeatures();
const handleTabChange = (tab: string) => {
setActiveTab(tab);
@@ -65,11 +67,18 @@ export default function SettingsView({
};
const targetTab = sectionToTab[viewOptions.section];
- if (targetTab) {
+ if (targetTab && (targetTab !== 'local-inference' || localInference)) {
setActiveTab(targetTab);
}
}
- }, [viewOptions.section]);
+ }, [viewOptions.section, localInference]);
+
+ // Reset active tab if local-inference becomes unavailable
+ useEffect(() => {
+ if (!localInference && activeTab === 'local-inference') {
+ setActiveTab('models');
+ }
+ }, [localInference, activeTab]);
useEffect(() => {
if (!hasTrackedInitialTab.current) {
@@ -130,14 +139,16 @@ export default function SettingsView({
Models
-
-
- Local Inference
-
+ {localInference && (
+
+
+ Local Inference
+
+ )}
Chat
@@ -181,12 +192,14 @@ export default function SettingsView({
-
-
-
+ {localInference && (
+
+
+
+ )}
{
+ const { localInference, isLoading: isFeaturesLoading } = useFeatures();
const [provider, setProvider] = useState(null);
const [providerStatuses, setProviderStatuses] = useState>(
{}
@@ -32,6 +34,8 @@ export const DictationSettings = () => {
};
useEffect(() => {
+ if (isFeaturesLoading) return;
+
const loadSettings = async () => {
const providerValue = await read('voice_dictation_provider', false);
let loadedProvider: DictationProvider | null = (providerValue as DictationProvider) || null;
@@ -45,6 +49,11 @@ export const DictationSettings = () => {
await upsert('voice_dictation_provider', '', false);
}
+ if (!localInference && loadedProvider === 'local') {
+ loadedProvider = null;
+ await upsert('voice_dictation_provider', '', false);
+ }
+
setProvider(loadedProvider);
const micValue = await read('voice_dictation_preferred_mic', false);
@@ -54,7 +63,7 @@ export const DictationSettings = () => {
};
loadSettings();
- }, [read, upsert]);
+ }, [read, upsert, localInference, isFeaturesLoading]);
const handleProviderChange = (value: string) => {
const newProvider = value === 'disabled' ? null : (value as DictationProvider);
diff --git a/ui/desktop/src/contexts/FeaturesContext.tsx b/ui/desktop/src/contexts/FeaturesContext.tsx
new file mode 100644
index 000000000000..c0467cab52e9
--- /dev/null
+++ b/ui/desktop/src/contexts/FeaturesContext.tsx
@@ -0,0 +1,49 @@
+import { createContext, useContext, useEffect, useState, useMemo } from 'react';
+import { getFeatures } from '../api';
+
+interface FeaturesContextValue {
+ localInference: boolean;
+ codeMode: boolean;
+ isLoading: boolean;
+}
+
+const FeaturesContext = createContext(null);
+
+export function FeaturesProvider({ children }: { children: React.ReactNode }) {
+ const [features, setFeatures] = useState>({});
+ const [isLoading, setIsLoading] = useState(true);
+
+ useEffect(() => {
+ (async () => {
+ try {
+ const response = await getFeatures({ throwOnError: false });
+ if (response.data) {
+ setFeatures(response.data.features);
+ }
+ } catch (error) {
+ console.warn('[FeaturesContext] Failed to fetch features:', error);
+ } finally {
+ setIsLoading(false);
+ }
+ })();
+ }, []);
+
+ const value = useMemo(
+ () => ({
+ localInference: features['local-inference'] ?? false,
+ codeMode: features['code-mode'] ?? true,
+ isLoading,
+ }),
+ [features, isLoading]
+ );
+
+ return {children};
+}
+
+export function useFeatures(): FeaturesContextValue {
+ const context = useContext(FeaturesContext);
+ if (!context) {
+ throw new Error('useFeatures must be used within a FeaturesProvider');
+ }
+ return context;
+}