diff --git a/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/add_model_popover.test.tsx b/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/add_model_popover.test.tsx index ea22b1fd6ae49..db98ad49101fb 100644 --- a/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/add_model_popover.test.tsx +++ b/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/add_model_popover.test.tsx @@ -10,12 +10,14 @@ import { render, screen, fireEvent } from '@testing-library/react'; import { EuiThemeProvider } from '@elastic/eui'; import { I18nProvider } from '@kbn/i18n-react'; import { QueryClient, QueryClientProvider } from '@kbn/react-query'; +import { InferenceConnectorType } from '@kbn/inference-common'; +import type { InferenceConnector } from '@kbn/inference-common'; import { AddModelPopover } from './add_model_popover'; -import { useQueryInferenceEndpoints } from '../../hooks/use_inference_endpoints'; +import { useConnectors } from '../../hooks/use_connectors'; -jest.mock('../../hooks/use_inference_endpoints'); +jest.mock('../../hooks/use_connectors'); -const mockUseQueryInferenceEndpoints = useQueryInferenceEndpoints as jest.Mock; +const mockUseConnectors = useConnectors as jest.Mock; const Wrapper = ({ children }: { children: React.ReactNode }) => { const queryClient = new QueryClient(); @@ -28,43 +30,66 @@ const Wrapper = ({ children }: { children: React.ReactNode }) => { ); }; -const mockEndpoints = [ - { - inference_id: 'ep-1', - service: 'openai', - task_type: 'chat_completion', - service_settings: { model_id: 'gpt-4o' }, - }, - { - inference_id: 'ep-2', - service: 'openai', - task_type: 'chat_completion', - service_settings: { model_id: 'gpt-4o-mini' }, - }, - { - inference_id: 'ep-eis', - service: 'elastic', - task_type: 'chat_completion', - service_settings: { model_id: 'claude-sonnet' }, - metadata: { - display: { - name: 'Claude Sonnet', - model_creator: 'Anthropic', - }, - }, - }, - { - inference_id: 'ep-eis-no-meta', - service: 'elastic', - task_type: 'chat_completion', - service_settings: { model_id: 'some-model' }, - }, - { - inference_id: 'ep-embed', - service: 'elastic', - task_type: 'text_embedding', - service_settings: { model_id: 'e5' }, - }, +const createConnector = (overrides: Partial): InferenceConnector => ({ + type: InferenceConnectorType.Inference, + name: 'test-connector', + connectorId: 'test-id', + config: {}, + capabilities: {}, + isInferenceEndpoint: true, + isPreconfigured: false, + ...overrides, +}); + +/** + * Connectors returned by useConnectors(): + * - Stack connectors (OpenAI, Bedrock, Gemini) — always chat_completion + * - ES inference endpoints with chat_completion task type + */ +const mockConnectors: InferenceConnector[] = [ + createConnector({ + connectorId: 'ep-1', + name: 'OpenAI GPT-4o', + type: InferenceConnectorType.Inference, + config: { taskType: 'chat_completion', service: 'openai' }, + isInferenceEndpoint: true, + }), + createConnector({ + connectorId: 'ep-2', + name: 'OpenAI GPT-4o-mini', + type: InferenceConnectorType.Inference, + config: { taskType: 'chat_completion', service: 'openai' }, + isInferenceEndpoint: true, + }), + createConnector({ + connectorId: 'ep-eis', + name: 'Claude Sonnet', + type: InferenceConnectorType.Inference, + config: { taskType: 'chat_completion', service: 'elastic', modelCreator: 'Anthropic' }, + isInferenceEndpoint: true, + isEis: true, + }), + createConnector({ + connectorId: 'stack-openai-1', + name: 'My OpenAI Connector', + type: InferenceConnectorType.OpenAI, + config: { apiProvider: 'OpenAI' }, + isInferenceEndpoint: false, + }), + createConnector({ + connectorId: 'stack-bedrock-1', + name: 'My Bedrock Connector', + type: InferenceConnectorType.Bedrock, + config: {}, + isInferenceEndpoint: false, + }), + createConnector({ + connectorId: 'stack-gemini-1', + name: 'My Gemini Connector', + type: InferenceConnectorType.Gemini, + config: {}, + isInferenceEndpoint: false, + }), ]; describe('AddModelPopover', () => { @@ -72,7 +97,7 @@ describe('AddModelPopover', () => { beforeEach(() => { jest.clearAllMocks(); - mockUseQueryInferenceEndpoints.mockReturnValue({ data: mockEndpoints }); + mockUseConnectors.mockReturnValue({ data: mockConnectors }); }); it('renders the add model button', () => { @@ -106,23 +131,11 @@ describe('AddModelPopover', () => { fireEvent.click(screen.getByTestId('add-model-button')); - expect(screen.queryByText('gpt-4o')).not.toBeInTheDocument(); - expect(screen.getByText('gpt-4o-mini')).toBeInTheDocument(); - }); - - it('filters by task type when provided', () => { - render( - - - - ); - - fireEvent.click(screen.getByTestId('add-model-button')); - - expect(screen.queryByText('gpt-4o')).not.toBeInTheDocument(); + expect(screen.queryByText('OpenAI GPT-4o')).not.toBeInTheDocument(); + expect(screen.getByText('OpenAI GPT-4o-mini')).toBeInTheDocument(); }); - it('uses display name for EIS endpoints with metadata', () => { + it('lists all connectors returned by useConnectors (stack OpenAI, Bedrock, Gemini, and chat_completion endpoints)', () => { render( @@ -131,10 +144,13 @@ describe('AddModelPopover', () => { fireEvent.click(screen.getByTestId('add-model-button')); - expect(screen.getByText('Claude Sonnet')).toBeInTheDocument(); + expect(screen.getByText('My OpenAI Connector')).toBeInTheDocument(); + expect(screen.getByText('My Bedrock Connector')).toBeInTheDocument(); + expect(screen.getByText('My Gemini Connector')).toBeInTheDocument(); + expect(screen.getByText('OpenAI GPT-4o')).toBeInTheDocument(); }); - it('falls back to model_id for EIS endpoints without display metadata', () => { + it('calls onAdd with the selected connector ID', () => { render( @@ -142,11 +158,30 @@ describe('AddModelPopover', () => { ); fireEvent.click(screen.getByTestId('add-model-button')); + fireEvent.click(screen.getByText('My Gemini Connector')); - expect(screen.getByText('some-model')).toBeInTheDocument(); + expect(onAdd).toHaveBeenCalledWith('stack-gemini-1'); }); - it('calls onAdd with the selected endpoint inference_id', () => { + it('shows disambiguation suffix when multiple connectors share a name', () => { + const duplicateConnectors = [ + createConnector({ + connectorId: 'ep-a', + name: 'My Connector', + type: InferenceConnectorType.Inference, + config: { taskType: 'chat_completion', service: 'openai' }, + isInferenceEndpoint: true, + }), + createConnector({ + connectorId: 'ep-b', + name: 'My Connector', + type: InferenceConnectorType.Inference, + config: { taskType: 'chat_completion', service: 'openai' }, + isInferenceEndpoint: true, + }), + ]; + mockUseConnectors.mockReturnValue({ data: duplicateConnectors }); + render( @@ -154,27 +189,13 @@ describe('AddModelPopover', () => { ); fireEvent.click(screen.getByTestId('add-model-button')); - fireEvent.click(screen.getByText('gpt-4o-mini')); - expect(onAdd).toHaveBeenCalledWith('ep-2'); + expect(screen.getByText('My Connector (ep-a)')).toBeInTheDocument(); + expect(screen.getByText('My Connector (ep-b)')).toBeInTheDocument(); }); - it('shows disambiguation suffix when multiple endpoints share a model name', () => { - const duplicateEndpoints = [ - { - inference_id: 'ep-a', - service: 'openai', - task_type: 'chat_completion', - service_settings: { model_id: 'gpt-4o' }, - }, - { - inference_id: 'ep-b', - service: 'openai', - task_type: 'chat_completion', - service_settings: { model_id: 'gpt-4o' }, - }, - ]; - mockUseQueryInferenceEndpoints.mockReturnValue({ data: duplicateEndpoints }); + it('handles empty connectors gracefully', () => { + mockUseConnectors.mockReturnValue({ data: [] }); render( @@ -184,7 +205,6 @@ describe('AddModelPopover', () => { fireEvent.click(screen.getByTestId('add-model-button')); - expect(screen.getByText('gpt-4o (ep-a)')).toBeInTheDocument(); - expect(screen.getByText('gpt-4o (ep-b)')).toBeInTheDocument(); + expect(screen.getByTestId('add-model-search')).toBeInTheDocument(); }); }); diff --git a/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/add_model_popover.tsx b/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/add_model_popover.tsx index 92364727c112b..4465394f0729b 100644 --- a/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/add_model_popover.tsx +++ b/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/add_model_popover.tsx @@ -10,69 +10,42 @@ import { css } from '@emotion/react'; import { EuiButtonEmpty, EuiIcon, EuiPopover, EuiSelectable } from '@elastic/eui'; import { i18n } from '@kbn/i18n'; import type { EuiSelectableOption } from '@elastic/eui'; -import { SERVICE_PROVIDERS } from '@kbn/inference-endpoint-ui-common'; -import type { ServiceProviderKeys } from '@kbn/inference-endpoint-ui-common'; -import { useQueryInferenceEndpoints } from '../../hooks/use_inference_endpoints'; -import { getModelId } from '../../utils/get_model_id'; -import { - isEisEndpoint, - getModelName, - getModelCreator, - getProviderKeyForCreator, -} from '../../utils/eis_utils'; +import { useConnectors } from '../../hooks/use_connectors'; +import { getConnectorIcon } from '../../utils/connector_display'; interface AddModelPopoverProps { existingEndpointIds: string[]; onAdd: (endpointId: string) => void; - taskType?: string; panelWidth?: number; } export const AddModelPopover: React.FC = ({ existingEndpointIds, onAdd, - taskType, panelWidth, }) => { - const { data: inferenceEndpoints = [] } = useQueryInferenceEndpoints(); + const { data: connectors = [] } = useConnectors(); const [isOpen, setIsOpen] = useState(false); const options: EuiSelectableOption[] = useMemo(() => { const existingSet = new Set(existingEndpointIds); - const available = inferenceEndpoints.filter( - (endpoint) => - !existingSet.has(endpoint.inference_id) && (!taskType || endpoint.task_type === taskType) - ); + const available = connectors.filter((connector) => !existingSet.has(connector.connectorId)); - const modelToCount = inferenceEndpoints.reduce>((acc, ep) => { - const modelId = getModelId(ep) ?? ep.inference_id; - acc.set(modelId, (acc.get(modelId) ?? 0) + 1); + const nameToCount = connectors.reduce>((acc, connector) => { + acc.set(connector.name, (acc.get(connector.name) ?? 0) + 1); return acc; }, new Map()); - return available.map((endpoint) => { - const modelId = getModelId(endpoint) ?? endpoint.inference_id; - const count = modelToCount.get(modelId) ?? 1; - let icon: string; - let baseName: string; - if (isEisEndpoint(endpoint)) { - const creator = getModelCreator(endpoint); - const providerKey = getProviderKeyForCreator(creator); - icon = (providerKey && SERVICE_PROVIDERS[providerKey]?.icon) ?? 'compute'; - baseName = getModelName(endpoint); - } else { - const provider = SERVICE_PROVIDERS[endpoint.service as ServiceProviderKeys]; - icon = provider?.icon ?? 'compute'; - baseName = modelId; - } - const label = count > 1 ? `${baseName} (${endpoint.inference_id})` : baseName; + return available.map((connector) => { + const count = nameToCount.get(connector.name) ?? 1; + const label = count > 1 ? `${connector.name} (${connector.connectorId})` : connector.name; return { label, - key: endpoint.inference_id, - prepend: , + key: connector.connectorId, + prepend: , }; }); - }, [inferenceEndpoints, existingEndpointIds, taskType]); + }, [connectors, existingEndpointIds]); const handleChange = useCallback( (newOptions: EuiSelectableOption[]) => { diff --git a/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/sub_feature_card.tsx b/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/sub_feature_card.tsx index c061b3dd6f5a3..4c6bce47d11e3 100644 --- a/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/sub_feature_card.tsx +++ b/x-pack/platform/plugins/shared/search_inference_endpoints/public/components/settings/sub_feature_card.tsx @@ -28,14 +28,10 @@ import { euiDragDropReorder, } from '@elastic/eui'; import { i18n } from '@kbn/i18n'; -import type { InferenceConnector } from '@kbn/inference-common'; -import { InferenceConnectorType } from '@kbn/inference-common'; -import { SERVICE_PROVIDERS } from '@kbn/inference-endpoint-ui-common'; -import type { ServiceProviderKeys } from '@kbn/inference-endpoint-ui-common'; import { css } from '@emotion/react'; import { NO_DEFAULT_MODEL } from '../../../common/constants'; import { useRegisteredFeatures } from '../../hooks/use_registered_features'; -import { getProviderKeyForCreator } from '../../utils/eis_utils'; +import { getConnectorIcon } from '../../utils/connector_display'; import type { InferenceFeatureResponse as InferenceFeatureConfig } from '../../../common/types'; import { AddModelPopover } from './add_model_popover'; import { CopyToModal } from './copy_to_modal'; @@ -43,28 +39,6 @@ import { useConnectors } from '../../hooks/use_connectors'; const COLLAPSED_COUNT = 5; -const getConnectorIcon = (connector: InferenceConnector): string => { - let key: string | undefined; - switch (connector.type) { - case InferenceConnectorType.OpenAI: - key = connector.config?.apiProvider === 'Azure OpenAI' ? 'azureopenai' : 'openai'; - break; - case InferenceConnectorType.Bedrock: - key = 'amazonbedrock'; - break; - case InferenceConnectorType.Gemini: - key = 'googlevertexai'; - break; - case InferenceConnectorType.Inference: - key = - getProviderKeyForCreator(connector.config?.modelCreator) ?? - connector.config?.service ?? - connector.config?.provider; - break; - } - return SERVICE_PROVIDERS[key as ServiceProviderKeys]?.icon ?? 'compute'; -}; - interface SubFeatureCardProps { featureId: string; feature: InferenceFeatureConfig; @@ -442,7 +416,6 @@ export const SubFeatureCard: React.FC = ({ diff --git a/x-pack/platform/plugins/shared/search_inference_endpoints/public/utils/connector_display.test.ts b/x-pack/platform/plugins/shared/search_inference_endpoints/public/utils/connector_display.test.ts new file mode 100644 index 0000000000000..a742d7886e793 --- /dev/null +++ b/x-pack/platform/plugins/shared/search_inference_endpoints/public/utils/connector_display.test.ts @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { InferenceConnectorType } from '@kbn/inference-common'; +import type { InferenceConnector } from '@kbn/inference-common'; +import { getConnectorIcon } from './connector_display'; + +const createConnector = (overrides: Partial): InferenceConnector => ({ + type: InferenceConnectorType.Inference, + name: 'test-connector', + connectorId: 'test-id', + config: {}, + capabilities: {}, + isInferenceEndpoint: true, + isPreconfigured: false, + ...overrides, +}); + +describe('getConnectorIcon', () => { + it('returns openai icon for OpenAI connectors', () => { + const connector = createConnector({ + type: InferenceConnectorType.OpenAI, + config: { apiProvider: 'OpenAI' }, + }); + expect(getConnectorIcon(connector)).not.toBe('compute'); + }); + + it('returns azureopenai icon for Azure OpenAI connectors', () => { + const connector = createConnector({ + type: InferenceConnectorType.OpenAI, + config: { apiProvider: 'Azure OpenAI' }, + }); + expect(getConnectorIcon(connector)).not.toBe('compute'); + }); + + it('returns compute fallback for unknown provider', () => { + const connector = createConnector({ + type: InferenceConnectorType.Inference, + config: { service: 'unknown_service_xyz' }, + }); + expect(getConnectorIcon(connector)).toBe('compute'); + }); +}); diff --git a/x-pack/platform/plugins/shared/search_inference_endpoints/public/utils/connector_display.ts b/x-pack/platform/plugins/shared/search_inference_endpoints/public/utils/connector_display.ts new file mode 100644 index 0000000000000..282a7531940d0 --- /dev/null +++ b/x-pack/platform/plugins/shared/search_inference_endpoints/public/utils/connector_display.ts @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import type { InferenceConnector } from '@kbn/inference-common'; +import { InferenceConnectorType } from '@kbn/inference-common'; +import { SERVICE_PROVIDERS } from '@kbn/inference-endpoint-ui-common'; +import type { ServiceProviderKeys } from '@kbn/inference-endpoint-ui-common'; +import { getProviderKeyForCreator } from './eis_utils'; + +/** + * Returns the icon identifier for a given connector, suitable for use with EuiIcon. + */ +export const getConnectorIcon = (connector: InferenceConnector): string => { + let key: string | undefined; + switch (connector.type) { + case InferenceConnectorType.OpenAI: + key = connector.config?.apiProvider === 'Azure OpenAI' ? 'azureopenai' : 'openai'; + break; + case InferenceConnectorType.Bedrock: + key = 'amazonbedrock'; + break; + case InferenceConnectorType.Gemini: + key = 'googlevertexai'; + break; + case InferenceConnectorType.Inference: + key = + getProviderKeyForCreator(connector.config?.modelCreator) ?? + connector.config?.service ?? + connector.config?.provider; + break; + } + return SERVICE_PROVIDERS[key as ServiceProviderKeys]?.icon ?? 'compute'; +}; diff --git a/x-pack/platform/plugins/shared/search_inference_endpoints/test/scout_inference_test/ui/fixtures/mocks.ts b/x-pack/platform/plugins/shared/search_inference_endpoints/test/scout_inference_test/ui/fixtures/mocks.ts index d2e3e8b6d24c1..48bde55c58100 100644 --- a/x-pack/platform/plugins/shared/search_inference_endpoints/test/scout_inference_test/ui/fixtures/mocks.ts +++ b/x-pack/platform/plugins/shared/search_inference_endpoints/test/scout_inference_test/ui/fixtures/mocks.ts @@ -10,42 +10,77 @@ import type { ScoutPage } from '@kbn/scout'; const CONNECTORS_ROUTE = '**/internal/inference/connectors'; const ENDPOINTS_ROUTE = '**/internal/inference_endpoints/endpoints'; -export async function mockConnectors(page: ScoutPage) { +interface MockEndpoint { + inference_id: string; + task_type: string; + service: string; + service_settings?: Record; + metadata?: { + display?: { name?: string; model_creator?: string }; + [key: string]: unknown; + }; +} + +const STACK_CONNECTOR = { + connectorId: 'mock-connector', + name: 'Mock Connector', + type: '.gen-ai', + config: {}, + capabilities: {}, + isPreconfigured: false, +}; + +// Mirrors the server-side transformation in getConnectorList: chat_completion +// inference endpoints surface in /internal/inference/connectors as Inference-type +// connectors so the Add Model popover can list them alongside stack connectors. +const endpointsAsConnectors = (endpoints: MockEndpoint[]) => + endpoints + .filter((ep) => ep.task_type === 'chat_completion') + .map((ep) => ({ + connectorId: ep.inference_id, + name: ep.metadata?.display?.name ?? ep.inference_id, + type: '.inference', + config: { + inferenceId: ep.inference_id, + taskType: ep.task_type, + service: ep.service, + serviceSettings: ep.service_settings, + modelCreator: ep.metadata?.display?.model_creator, + }, + capabilities: {}, + isInferenceEndpoint: true, + isPreconfigured: !!ep.metadata?.display?.name, + isEis: ep.service === 'elastic', + })); + +const fulfillConnectors = async (page: ScoutPage, connectors: unknown[]) => { await page.route(CONNECTORS_ROUTE, async (route) => { await route.fulfill({ status: 200, contentType: 'application/json', - body: JSON.stringify({ - connectors: [ - { - connectorId: 'mock-connector', - name: 'Mock Connector', - type: '.gen-ai', - config: {}, - capabilities: {}, - isPreconfigured: false, - }, - ], - }), + body: JSON.stringify({ connectors }), }); }); +}; + +export async function mockConnectors(page: ScoutPage) { + await fulfillConnectors(page, [STACK_CONNECTOR]); } export async function mockEmptyConnectors(page: ScoutPage) { - await page.route(CONNECTORS_ROUTE, async (route) => { - await route.fulfill({ - status: 200, - contentType: 'application/json', - body: JSON.stringify({ connectors: [] }), - }); - }); + await fulfillConnectors(page, []); } export async function unmockConnectors(page: ScoutPage) { await page.unroute(CONNECTORS_ROUTE); } -export async function mockInferenceEndpoints(page: ScoutPage, endpoints: unknown[]) { +export async function mockInferenceEndpoints(page: ScoutPage, endpoints: MockEndpoint[]) { + // Re-mock the connectors route so the Add Model popover (which reads from + // /internal/inference/connectors only) sees the same endpoint set. + await page.unroute(CONNECTORS_ROUTE); + await fulfillConnectors(page, [STACK_CONNECTOR, ...endpointsAsConnectors(endpoints)]); + await page.route(ENDPOINTS_ROUTE, async (route) => { await route.fulfill({ status: 200,