Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,14 @@ import { render, screen, fireEvent } from '@testing-library/react';
import { EuiThemeProvider } from '@elastic/eui';
import { I18nProvider } from '@kbn/i18n-react';
import { QueryClient, QueryClientProvider } from '@kbn/react-query';
import { InferenceConnectorType } from '@kbn/inference-common';
import type { InferenceConnector } from '@kbn/inference-common';
import { AddModelPopover } from './add_model_popover';
import { useQueryInferenceEndpoints } from '../../hooks/use_inference_endpoints';
import { useConnectors } from '../../hooks/use_connectors';

jest.mock('../../hooks/use_inference_endpoints');
jest.mock('../../hooks/use_connectors');

const mockUseQueryInferenceEndpoints = useQueryInferenceEndpoints as jest.Mock;
const mockUseConnectors = useConnectors as jest.Mock;

const Wrapper = ({ children }: { children: React.ReactNode }) => {
const queryClient = new QueryClient();
Expand All @@ -28,51 +30,74 @@ const Wrapper = ({ children }: { children: React.ReactNode }) => {
);
};

const mockEndpoints = [
{
inference_id: 'ep-1',
service: 'openai',
task_type: 'chat_completion',
service_settings: { model_id: 'gpt-4o' },
},
{
inference_id: 'ep-2',
service: 'openai',
task_type: 'chat_completion',
service_settings: { model_id: 'gpt-4o-mini' },
},
{
inference_id: 'ep-eis',
service: 'elastic',
task_type: 'chat_completion',
service_settings: { model_id: 'claude-sonnet' },
metadata: {
display: {
name: 'Claude Sonnet',
model_creator: 'Anthropic',
},
},
},
{
inference_id: 'ep-eis-no-meta',
service: 'elastic',
task_type: 'chat_completion',
service_settings: { model_id: 'some-model' },
},
{
inference_id: 'ep-embed',
service: 'elastic',
task_type: 'text_embedding',
service_settings: { model_id: 'e5' },
},
const createConnector = (overrides: Partial<InferenceConnector>): InferenceConnector => ({
type: InferenceConnectorType.Inference,
name: 'test-connector',
connectorId: 'test-id',
config: {},
capabilities: {},
isInferenceEndpoint: true,
isPreconfigured: false,
...overrides,
});

/**
* Connectors returned by useConnectors():
* - Stack connectors (OpenAI, Bedrock, Gemini) — always chat_completion
* - ES inference endpoints with chat_completion task type
*/
const mockConnectors: InferenceConnector[] = [
createConnector({
connectorId: 'ep-1',
name: 'OpenAI GPT-4o',
type: InferenceConnectorType.Inference,
config: { taskType: 'chat_completion', service: 'openai' },
isInferenceEndpoint: true,
}),
createConnector({
connectorId: 'ep-2',
name: 'OpenAI GPT-4o-mini',
type: InferenceConnectorType.Inference,
config: { taskType: 'chat_completion', service: 'openai' },
isInferenceEndpoint: true,
}),
createConnector({
connectorId: 'ep-eis',
name: 'Claude Sonnet',
type: InferenceConnectorType.Inference,
config: { taskType: 'chat_completion', service: 'elastic', modelCreator: 'Anthropic' },
isInferenceEndpoint: true,
isEis: true,
}),
createConnector({
connectorId: 'stack-openai-1',
name: 'My OpenAI Connector',
type: InferenceConnectorType.OpenAI,
config: { apiProvider: 'OpenAI' },
isInferenceEndpoint: false,
}),
createConnector({
connectorId: 'stack-bedrock-1',
name: 'My Bedrock Connector',
type: InferenceConnectorType.Bedrock,
config: {},
isInferenceEndpoint: false,
}),
createConnector({
connectorId: 'stack-gemini-1',
name: 'My Gemini Connector',
type: InferenceConnectorType.Gemini,
config: {},
isInferenceEndpoint: false,
}),
];

describe('AddModelPopover', () => {
const onAdd = jest.fn();

beforeEach(() => {
jest.clearAllMocks();
mockUseQueryInferenceEndpoints.mockReturnValue({ data: mockEndpoints });
mockUseConnectors.mockReturnValue({ data: mockConnectors });
});

it('renders the add model button', () => {
Expand Down Expand Up @@ -106,23 +131,11 @@ describe('AddModelPopover', () => {

fireEvent.click(screen.getByTestId('add-model-button'));

expect(screen.queryByText('gpt-4o')).not.toBeInTheDocument();
expect(screen.getByText('gpt-4o-mini')).toBeInTheDocument();
});

it('filters by task type when provided', () => {
render(
<Wrapper>
<AddModelPopover existingEndpointIds={[]} onAdd={onAdd} taskType="text_embedding" />
</Wrapper>
);

fireEvent.click(screen.getByTestId('add-model-button'));

expect(screen.queryByText('gpt-4o')).not.toBeInTheDocument();
expect(screen.queryByText('OpenAI GPT-4o')).not.toBeInTheDocument();
expect(screen.getByText('OpenAI GPT-4o-mini')).toBeInTheDocument();
});

it('uses display name for EIS endpoints with metadata', () => {
it('lists all connectors returned by useConnectors (stack OpenAI, Bedrock, Gemini, and chat_completion endpoints)', () => {
render(
<Wrapper>
<AddModelPopover existingEndpointIds={[]} onAdd={onAdd} />
Expand All @@ -131,50 +144,58 @@ describe('AddModelPopover', () => {

fireEvent.click(screen.getByTestId('add-model-button'));

expect(screen.getByText('Claude Sonnet')).toBeInTheDocument();
expect(screen.getByText('My OpenAI Connector')).toBeInTheDocument();
expect(screen.getByText('My Bedrock Connector')).toBeInTheDocument();
expect(screen.getByText('My Gemini Connector')).toBeInTheDocument();
expect(screen.getByText('OpenAI GPT-4o')).toBeInTheDocument();
});

it('falls back to model_id for EIS endpoints without display metadata', () => {
it('calls onAdd with the selected connector ID', () => {
render(
<Wrapper>
<AddModelPopover existingEndpointIds={[]} onAdd={onAdd} />
</Wrapper>
);

fireEvent.click(screen.getByTestId('add-model-button'));
fireEvent.click(screen.getByText('My Gemini Connector'));

expect(screen.getByText('some-model')).toBeInTheDocument();
expect(onAdd).toHaveBeenCalledWith('stack-gemini-1');
});

it('calls onAdd with the selected endpoint inference_id', () => {
it('shows disambiguation suffix when multiple connectors share a name', () => {
const duplicateConnectors = [
createConnector({
connectorId: 'ep-a',
name: 'My Connector',
type: InferenceConnectorType.Inference,
config: { taskType: 'chat_completion', service: 'openai' },
isInferenceEndpoint: true,
}),
createConnector({
connectorId: 'ep-b',
name: 'My Connector',
type: InferenceConnectorType.Inference,
config: { taskType: 'chat_completion', service: 'openai' },
isInferenceEndpoint: true,
}),
];
mockUseConnectors.mockReturnValue({ data: duplicateConnectors });

render(
<Wrapper>
<AddModelPopover existingEndpointIds={[]} onAdd={onAdd} />
</Wrapper>
);

fireEvent.click(screen.getByTestId('add-model-button'));
fireEvent.click(screen.getByText('gpt-4o-mini'));

expect(onAdd).toHaveBeenCalledWith('ep-2');
expect(screen.getByText('My Connector (ep-a)')).toBeInTheDocument();
expect(screen.getByText('My Connector (ep-b)')).toBeInTheDocument();
});

it('shows disambiguation suffix when multiple endpoints share a model name', () => {
const duplicateEndpoints = [
{
inference_id: 'ep-a',
service: 'openai',
task_type: 'chat_completion',
service_settings: { model_id: 'gpt-4o' },
},
{
inference_id: 'ep-b',
service: 'openai',
task_type: 'chat_completion',
service_settings: { model_id: 'gpt-4o' },
},
];
mockUseQueryInferenceEndpoints.mockReturnValue({ data: duplicateEndpoints });
it('handles empty connectors gracefully', () => {
mockUseConnectors.mockReturnValue({ data: [] });

render(
<Wrapper>
Expand All @@ -184,7 +205,6 @@ describe('AddModelPopover', () => {

fireEvent.click(screen.getByTestId('add-model-button'));

expect(screen.getByText('gpt-4o (ep-a)')).toBeInTheDocument();
expect(screen.getByText('gpt-4o (ep-b)')).toBeInTheDocument();
expect(screen.getByTestId('add-model-search')).toBeInTheDocument();
});
});
Original file line number Diff line number Diff line change
Expand Up @@ -10,69 +10,42 @@ import { css } from '@emotion/react';
import { EuiButtonEmpty, EuiIcon, EuiPopover, EuiSelectable } from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import type { EuiSelectableOption } from '@elastic/eui';
import { SERVICE_PROVIDERS } from '@kbn/inference-endpoint-ui-common';
import type { ServiceProviderKeys } from '@kbn/inference-endpoint-ui-common';
import { useQueryInferenceEndpoints } from '../../hooks/use_inference_endpoints';
import { getModelId } from '../../utils/get_model_id';
import {
isEisEndpoint,
getModelName,
getModelCreator,
getProviderKeyForCreator,
} from '../../utils/eis_utils';
import { useConnectors } from '../../hooks/use_connectors';
import { getConnectorIcon } from '../../utils/connector_display';

interface AddModelPopoverProps {
existingEndpointIds: string[];
onAdd: (endpointId: string) => void;
taskType?: string;
panelWidth?: number;
}

export const AddModelPopover: React.FC<AddModelPopoverProps> = ({
existingEndpointIds,
onAdd,
taskType,
panelWidth,
}) => {
const { data: inferenceEndpoints = [] } = useQueryInferenceEndpoints();
const { data: connectors = [] } = useConnectors();
const [isOpen, setIsOpen] = useState(false);

const options: EuiSelectableOption[] = useMemo(() => {
const existingSet = new Set(existingEndpointIds);
const available = inferenceEndpoints.filter(
(endpoint) =>
!existingSet.has(endpoint.inference_id) && (!taskType || endpoint.task_type === taskType)
);
const available = connectors.filter((connector) => !existingSet.has(connector.connectorId));

const modelToCount = inferenceEndpoints.reduce<Map<string, number>>((acc, ep) => {
const modelId = getModelId(ep) ?? ep.inference_id;
acc.set(modelId, (acc.get(modelId) ?? 0) + 1);
const nameToCount = connectors.reduce<Map<string, number>>((acc, connector) => {
acc.set(connector.name, (acc.get(connector.name) ?? 0) + 1);
return acc;
}, new Map());

return available.map((endpoint) => {
const modelId = getModelId(endpoint) ?? endpoint.inference_id;
const count = modelToCount.get(modelId) ?? 1;
let icon: string;
let baseName: string;
if (isEisEndpoint(endpoint)) {
const creator = getModelCreator(endpoint);
const providerKey = getProviderKeyForCreator(creator);
icon = (providerKey && SERVICE_PROVIDERS[providerKey]?.icon) ?? 'compute';
baseName = getModelName(endpoint);
} else {
const provider = SERVICE_PROVIDERS[endpoint.service as ServiceProviderKeys];
icon = provider?.icon ?? 'compute';
baseName = modelId;
}
const label = count > 1 ? `${baseName} (${endpoint.inference_id})` : baseName;
return available.map((connector) => {
const count = nameToCount.get(connector.name) ?? 1;
const label = count > 1 ? `${connector.name} (${connector.connectorId})` : connector.name;
return {
label,
key: endpoint.inference_id,
prepend: <EuiIcon type={icon} size="s" aria-hidden />,
key: connector.connectorId,
prepend: <EuiIcon type={getConnectorIcon(connector)} size="s" aria-hidden />,
};
});
}, [inferenceEndpoints, existingEndpointIds, taskType]);
}, [connectors, existingEndpointIds]);

const handleChange = useCallback(
(newOptions: EuiSelectableOption[]) => {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,43 +28,17 @@ import {
euiDragDropReorder,
} from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import type { InferenceConnector } from '@kbn/inference-common';
import { InferenceConnectorType } from '@kbn/inference-common';
import { SERVICE_PROVIDERS } from '@kbn/inference-endpoint-ui-common';
import type { ServiceProviderKeys } from '@kbn/inference-endpoint-ui-common';
import { css } from '@emotion/react';
import { NO_DEFAULT_MODEL } from '../../../common/constants';
import { useRegisteredFeatures } from '../../hooks/use_registered_features';
import { getProviderKeyForCreator } from '../../utils/eis_utils';
import { getConnectorIcon } from '../../utils/connector_display';
import type { InferenceFeatureResponse as InferenceFeatureConfig } from '../../../common/types';
import { AddModelPopover } from './add_model_popover';
import { CopyToModal } from './copy_to_modal';
import { useConnectors } from '../../hooks/use_connectors';

const COLLAPSED_COUNT = 5;

const getConnectorIcon = (connector: InferenceConnector): string => {
let key: string | undefined;
switch (connector.type) {
case InferenceConnectorType.OpenAI:
key = connector.config?.apiProvider === 'Azure OpenAI' ? 'azureopenai' : 'openai';
break;
case InferenceConnectorType.Bedrock:
key = 'amazonbedrock';
break;
case InferenceConnectorType.Gemini:
key = 'googlevertexai';
break;
case InferenceConnectorType.Inference:
key =
getProviderKeyForCreator(connector.config?.modelCreator) ??
connector.config?.service ??
connector.config?.provider;
break;
}
return SERVICE_PROVIDERS[key as ServiceProviderKeys]?.icon ?? 'compute';
};

interface SubFeatureCardProps {
featureId: string;
feature: InferenceFeatureConfig;
Expand Down Expand Up @@ -442,7 +416,6 @@ export const SubFeatureCard: React.FC<SubFeatureCardProps> = ({
<AddModelPopover
existingEndpointIds={endpointIds}
onAdd={handleAdd}
taskType={feature.taskType}
panelWidth={listWidth}
/>
</EuiFlexItem>
Expand Down
Loading
Loading