From 513c7317547d2db22c4f1d6a758ed5dd5eeb7e88 Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Thu, 29 Jan 2026 11:57:21 +0200 Subject: [PATCH 01/11] Add Google Drive data source with OAuth and file extraction workflows - Add Google Drive data source with OAuth and file extraction workflows - Refactor generateWorkflows to use ConnectorReference for type-safe connector matching - Add i18n translations to stackConnector descriptions - Remove accidental trailing newline - Fix misleading comment about quote escaping - Remove mimeType param, improve query description - Extract Google Drive API base URL to constant - Simplify ConnectorReference comment - Revert unnecessary comment edit - Add connector processing order logic Co-Authored-By: Claude Opus 4.5 --- .../kbn-connector-specs/src/all_specs.ts | 1 + .../src/connector_icons_map.ts | 6 + .../src/specs/google_drive/google_drive.ts | 293 +++++ .../specs/google_drive/icon/google_drive.png | Bin 0 -> 25087 bytes .../src/specs/google_drive/icon/index.tsx | 19 + .../src/specs/notion/notion.ts | 2 +- .../included_operations.ts | 1 + .../elasticsearch.ingest_simulate.gen.ts | 83 ++ .../spec/elasticsearch/generated/index.ts | 8 +- .../generated/schemas/es_openapi_zod.gen.ts | 1012 ++++++++++++++++- .../data_catalog/common/data_source_spec.ts | 44 +- .../shared/data_catalog/common/index.ts | 2 + .../plugins/shared/data_catalog/index.ts | 2 + .../shared/data_catalog/server/routes.ts | 4 +- .../components/active_sources_view.tsx | 35 +- .../components/connectors_view.tsx | 23 +- .../components/optional_connector_prompt.tsx | 136 +++ .../hooks/use_add_connector_flyout.ts | 393 +++++-- .../hooks/use_clone_active_source_flyout.ts | 78 -- .../application/hooks/use_connectors.ts | 13 +- .../routes/data_sources_helpers.test.ts | 8 +- .../server/routes/data_sources_helpers.ts | 92 +- .../data_sources/server/routes/index.test.ts | 28 +- .../data_sources/server/routes/index.ts | 44 +- .../data_sources/server/routes/schema.ts | 22 +- .../server/sources/github/data_type.ts | 80 +- .../server/sources/google_drive/data_type.ts | 97 ++ .../server/sources/google_drive/index.ts | 8 + .../server/sources/google_drive/workflows.ts | 149 +++ .../data_sources/server/sources/index.ts | 2 + .../server/sources/notion/data_type.ts | 34 +- 31 files changed, 2422 insertions(+), 297 deletions(-) create mode 100644 src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts create mode 100644 src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/icon/google_drive.png create mode 100644 src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/icon/index.tsx create mode 100644 src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/elasticsearch.ingest_simulate.gen.ts create mode 100644 x-pack/platform/plugins/shared/data_sources/public/application/components/optional_connector_prompt.tsx delete mode 100644 x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_clone_active_source_flyout.ts create mode 100644 x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts create mode 100644 x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/index.ts create mode 100644 x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts diff --git a/src/platform/packages/shared/kbn-connector-specs/src/all_specs.ts b/src/platform/packages/shared/kbn-connector-specs/src/all_specs.ts index 21b506295a48c..2f9d132d68ddf 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/all_specs.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/all_specs.ts @@ -11,6 +11,7 @@ export * from './specs/abuseipdb/abuseipdb'; export * from './specs/alienvault_otx/alienvault_otx'; export * from './specs/brave_search/brave_search'; export * from './specs/github/github'; +export * from './specs/google_drive/google_drive'; export * from './specs/greynoise/greynoise'; export * from './specs/notion/notion'; export * from './specs/shodan/shodan'; diff --git a/src/platform/packages/shared/kbn-connector-specs/src/connector_icons_map.ts b/src/platform/packages/shared/kbn-connector-specs/src/connector_icons_map.ts index fd7dacd4c1d31..7a426dccad92b 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/connector_icons_map.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/connector_icons_map.ts @@ -64,4 +64,10 @@ export const ConnectorIconsMap: Map< '.urlvoid', lazy(() => import(/* webpackChunkName: "connectorIconUrlvoid" */ './specs/urlvoid/icon')), ], + [ + '.google_drive', + lazy( + () => import(/* webpackChunkName: "connectorIconGoogleDrive" */ './specs/google_drive/icon') + ), + ], ]); diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts new file mode 100644 index 0000000000000..bb1134b3d955d --- /dev/null +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts @@ -0,0 +1,293 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +import { i18n } from '@kbn/i18n'; +import { z } from '@kbn/zod/v4'; +import type { ConnectorSpec } from '../../connector_spec'; + +// Google Drive API constants +const GOOGLE_DRIVE_API_BASE = 'https://www.googleapis.com/drive/v3'; +const DEFAULT_PAGE_SIZE = 250; +const MAX_PAGE_SIZE = 1000; +const DEFAULT_FOLDER_ID = 'root'; +const GOOGLE_WORKSPACE_MIME_PREFIX = 'application/vnd.google-apps.'; +const DEFAULT_EXPORT_MIME_TYPE = 'application/pdf'; +// XLSX preserves tabular structure better than PDF for spreadsheets +const SHEETS_EXPORT_MIME_TYPE = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'; + +/** + * Escapes single quotes in a string for use in Google Drive query syntax. + * Google Drive queries use single quotes for string values, so any single + * quotes in the value must be escaped to avoid syntax errors. + */ +function escapeQueryValue(value: string): string { + return value.replace(/'/g, "\\'"); +} + +export const GoogleDriveConnector: ConnectorSpec = { + metadata: { + id: '.google_drive', + displayName: 'Google Drive', + description: i18n.translate('core.kibanaConnectorSpecs.googleDrive.metadata.description', { + defaultMessage: 'Search and access files and folders in Google Drive', + }), + minimumLicense: 'enterprise', + supportedFeatureIds: ['workflows'], + }, + auth: { + types: ['bearer'], + headers: { + Accept: 'application/json', + }, + }, + + actions: { + searchFiles: { + isTool: true, + input: z.object({ + query: z + .string() + .min(1) + .describe( + "Google Drive query. Use fullText contains 'term' for content search, " + + "name contains 'term' for filename search, mimeType='application/pdf' for type filtering, " + + "modifiedTime > '2024-01-01' for date filtering. Combine with 'and'/'or'." + ), + pageSize: z + .number() + .optional() + .default(DEFAULT_PAGE_SIZE) + .describe('Maximum number of files to return (1-1000)'), + pageToken: z.string().optional().describe('Token for pagination'), + }), + handler: async (ctx, input) => { + const typedInput = input as { + query: string; + pageSize: number; + pageToken?: string; + }; + + ctx.log.debug(`[google_drive.searchFiles] input: ${JSON.stringify(input)}`); + + const params: Record = { + q: typedInput.query, + pageSize: Math.min(typedInput.pageSize || DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE), + fields: 'nextPageToken, files(id, name, mimeType, size, modifiedTime, webViewLink)', + }; + + if (typedInput.pageToken) { + params.pageToken = typedInput.pageToken; + } + + ctx.log.debug(`[google_drive.searchFiles] API params: ${JSON.stringify(params)}`); + + try { + const response = await ctx.client.get(`${GOOGLE_DRIVE_API_BASE}/files`, { + params, + }); + + return { + files: response.data.files || [], + nextPageToken: response.data.nextPageToken, + }; + } catch (error: unknown) { + // Extract detailed error from Google API response + const axiosError = error as { + response?: { data?: { error?: { message?: string; code?: number } } }; + }; + const googleError = axiosError.response?.data?.error; + if (googleError) { + throw new Error(`Google Drive API error: ${googleError.message}`); + } + throw error; + } + }, + }, + + listFiles: { + isTool: true, + input: z.object({ + folderId: z + .string() + .optional() + .default(DEFAULT_FOLDER_ID) + .describe("Parent folder ID ('root' for root folder)"), + pageSize: z + .number() + .optional() + .default(DEFAULT_PAGE_SIZE) + .describe('Maximum number of files to return (1-1000)'), + pageToken: z.string().optional().describe('Token for pagination'), + orderBy: z + .enum(['name', 'modifiedTime', 'createdTime']) + .optional() + .describe('Field to order results by'), + }), + handler: async (ctx, input) => { + const typedInput = input as { + folderId: string; + pageSize: number; + pageToken?: string; + orderBy?: string; + }; + + const folderId = typedInput.folderId || DEFAULT_FOLDER_ID; + const params: Record = { + q: `'${escapeQueryValue(folderId)}' in parents and trashed=false`, + pageSize: Math.min(typedInput.pageSize || DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE), + fields: 'nextPageToken, files(id, name, mimeType, size, modifiedTime, webViewLink)', + }; + + if (typedInput.pageToken) { + params.pageToken = typedInput.pageToken; + } + + if (typedInput.orderBy) { + params.orderBy = typedInput.orderBy; + } + + try { + const response = await ctx.client.get(`${GOOGLE_DRIVE_API_BASE}/files`, { + params, + }); + + return { + files: response.data.files || [], + nextPageToken: response.data.nextPageToken, + }; + } catch (error: unknown) { + const axiosError = error as { + response?: { data?: { error?: { message?: string; code?: number } } }; + }; + const googleError = axiosError.response?.data?.error; + if (googleError) { + throw new Error(`Google Drive API error: ${googleError.message}`); + } + throw error; + } + }, + }, + + downloadFile: { + isTool: true, + input: z.object({ + fileId: z.string().min(1).describe('The ID of the file to download'), + mimeType: z.string().optional().describe('Export MIME type for Google Workspace documents'), + }), + handler: async (ctx, input) => { + const typedInput = input as { + fileId: string; + mimeType?: string; + }; + + try { + // First, get file metadata to determine if it's a Google Workspace document + const metadataResponse = await ctx.client.get( + `${GOOGLE_DRIVE_API_BASE}/files/${typedInput.fileId}`, + { + params: { + fields: 'id, name, mimeType, size', + }, + } + ); + + const fileMetadata = metadataResponse.data; + const isGoogleDoc = fileMetadata.mimeType?.startsWith(GOOGLE_WORKSPACE_MIME_PREFIX); + + let contentResponse; + if (isGoogleDoc) { + // Export Google Workspace documents + // Use XLSX for Sheets (preserves tabular structure), PDF for everything else + const defaultExport = + fileMetadata.mimeType === 'application/vnd.google-apps.spreadsheet' + ? SHEETS_EXPORT_MIME_TYPE + : DEFAULT_EXPORT_MIME_TYPE; + const exportMimeType = typedInput.mimeType || defaultExport; + contentResponse = await ctx.client.get( + `${GOOGLE_DRIVE_API_BASE}/files/${typedInput.fileId}/export`, + { + params: { + mimeType: exportMimeType, + }, + responseType: 'arraybuffer', + } + ); + } else { + // Download native files + contentResponse = await ctx.client.get( + `${GOOGLE_DRIVE_API_BASE}/files/${typedInput.fileId}`, + { + params: { + alt: 'media', + }, + responseType: 'arraybuffer', + } + ); + } + + const buffer = Buffer.from(contentResponse.data); + const base64Content = buffer.toString('base64'); + + return { + id: fileMetadata.id, + name: fileMetadata.name, + mimeType: isGoogleDoc + ? typedInput.mimeType || DEFAULT_EXPORT_MIME_TYPE + : fileMetadata.mimeType, + size: fileMetadata.size, + content: base64Content, + encoding: 'base64', + }; + } catch (error: unknown) { + const axiosError = error as { + response?: { data?: { error?: { message?: string; code?: number } } }; + }; + const googleError = axiosError.response?.data?.error; + if (googleError) { + throw new Error(`Google Drive API error: ${googleError.message}`); + } + throw error; + } + }, + }, + }, + + test: { + description: i18n.translate('core.kibanaConnectorSpecs.googleDrive.test.description', { + defaultMessage: 'Verifies Google Drive connection by fetching user information', + }), + handler: async (ctx) => { + ctx.log.debug('Google Drive test handler'); + try { + const response = await ctx.client.get(`${GOOGLE_DRIVE_API_BASE}/about`, { + params: { + fields: 'user', + }, + }); + + if (response.status !== 200) { + return { ok: false, message: 'Failed to connect to Google Drive API' }; + } + + return { + ok: true, + message: `Successfully connected to Google Drive API as ${ + response.data.user?.emailAddress || 'user' + }`, + }; + } catch (error) { + return { + ok: false, + message: `Failed to connect to Google Drive API: ${ + error instanceof Error ? error.message : 'Unknown error' + }`, + }; + } + }, + }, +}; diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/icon/google_drive.png b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/icon/google_drive.png new file mode 100644 index 0000000000000000000000000000000000000000..5385d299d8c81d73bbff9f109d8d93ca89fb210a GIT binary patch literal 25087 zcmYIwby!sG7wyb2#7Ga_Lw9${&?zC^t)ze;A~~agv95z2Z|)`O^>0GZcI z$FVoaJ} z@cZ{zp|4*c=7soc zK?TtRnS9n_*C0Q=7{6JGe#P8<(`|n(z=5Gb7g!cIUsdz_jwg2=KGs&^z{ml&@PWNp zS&=}%?PFKiiqTDKetX4B`Evx?4t9A^t-9MaMgzVkZ=X6~gZ1m7V5o0}s@@^whEu;5 zTTXF*SRRPAIntz#A-Jk_T7)a9C6{^tQM?611R>}N}!f0w?ctwjacvOnla zKl!RXDwviYmbXSqhaK$=O&j9-R~bfp(HYV6h~--6cPW~RHr-nJJ_m*$U5Edtu;QR_ zA*@jtuVaR#td(X7ue`NxS)LcR3qz%KsTj?}Bu?sMJJ0`h%OBZ$b=FUJvMbXFYZEvUA0ALLU3)0_?48a;Rs_fe`|>dSkyx@(n>~ zy`K^ZDxJ!Mm%X%xhkUXPtVzPN501Pn(MqdgaSPj5^7p)&ECNZ zUL_C#izF#3o+vMEKgo9-h=_=)T-l2#a#bzEy)a9BRWCdL8mR}pR$!(1_?Gzf39%P$ zM;62@RDS=imo`L>+yR22aBGQ8!1|Z}?BmT}h#WRFaA68`Ke7yxykmS2_~}aq2ZkB_ zb#`3_aTtDxr@FHHUbYmpnIGRF9FmNR1|FUpK}Ja-`|d{e%~G!LKV<}C+{e|E%2qPu z*wTLHc`XHuNNuV3HLHw@=v!fO641d2SgK7fnPDmw^$3Xm8=akW&c54pF5FbPN6OYn zdb8cAL|W0E38`VuiGrGN_&#%G#d{LUSlWXS`KKqUyzApP=lB$E_!2|`dLlEf!wBv8 zVe`URshv*IagXqR)cET>(96?TQP4@m5?o`4$L3=+Z;cY_)WdW5J#r#rW2TS_9`gc3 zfs$FO@w|jiJ&{-`GKh#zN}ka-Exym-hW(`@dXWvRjM&|Zyw4Fh^}%61grb1tH8d9* z@X>gLOYk+42)OWWP@?T5D`so6D)h8!(kRss}sva zW%-y##^VAVcxCNk!A4uk5Wd@I1=q@sO~P7l-tGC9al_f7Z`63rQ4e>eQ#U$x@`s-$ zVaLuaX(uSUYCGLPAI}s=cg-XeXVRf2>{uvLGEIV#7ahFUe{)&2^Q!*Yw{3`%y0XR? zCi9}MupOqLSZ~XC#Gn4p-In?-X~1{S?WkjI2Vnp=j#a)8T2VAR4YSWoE>tw+cprC? zRDAB}DSWc0&OOEIJFyG@6`26KDJoxK_X?-zUc^VJ{YU;iIP?JMOLz9RpD_*VLn=$Pf%g=jodA z6=V3n_d=sFIOWIR7lfIzQ?Y{-cb2!U3OY~KJITr+-Ed`U_-HJU40*_j#2LNfHCF*o zYafC*ADg3o9X;`G>AdWh*wHi>#?A9rqR-{i!B3tttmfExYXh()_|oMONQ9&1c5}ZUC^_Fo$RRNXOwk zS96RxVGr@fPEaFaZxwsT7UtKN3jBqqO7BW9gfd;~Z>fagVBFKL}`&?#zBn+`u5+9Ji znu`H$k%738ydb*EQIBHq+J)CYWNH~T9_I2|l_`ZRQ!!X%T)yMATB4SUM@j697aUp| z9IPsIb7Vl;5Sv?Fy~VCRiJ10kU9@1W+JDOSc{{#A{=zZ=8Qu57zUb@z@{Qt;2Q~Pg zTS^5qvX}ccfa~;PcVbnnM3YyOI{HNyOF!MiCJV9F z6>8ZAqc@zlQQaZ~G~N4CM?t=`^ZT$1J0s0zd{@`+;~Rw{VFYb*b1+S21$cRGyubkFt;ui>b2CsJb?VOXUr8{6=>4%~sZWLU}N|0pH;nnyS(R@$EDJqUR* zcTs?E4V0-v`SR4=8T^cnsIwvdhNngEUS>yfowjT9SrzeS!6-RSck7BqT+^GI8mFFN z=?^%jLh!NtVsHltVzn&OWFmcs`bXR8vx)bMv>rXD1AZkF_deH<2l=Z=*(eg%JdvuM zu4O>Kt<>Pd_{86?Rs)t4%iUa4iqd!u&8!K9+eM|+L&SAT*LVmRvvbfe^vS!wtVR=W z?_p~hIN)l$54f>V>=rvu>~+*i%?*$hscQEQ#e&JyelC3;d+I`JOdjMIB+cjqD|3(E zmqc!|O$?O!eggI)*;7HUTtjwSn2aRLtds3>@U1DWv|ICgXUu5_>|2G9hTY4rq_Z%5 zl$8+SR!Ev91u?MaW1~*OPXZ!hR%`A@KO&tt0)!-A zEuqtdvS3;rV^c`h>YqUXTqUsJ81pwb9ZL#$Q-q{jmwh|(QVUjzi(;#N%Jchctjz3% zy{EDpje)pXtoL(EEd&V=vx0hf@{vKVX(=f?L-3`eXERPApQg2tP%GZy+Np#L4z=F|NJEI=Z4s%`SMue{ULnfy6-l+diTj?+zo{^ z0T7m+eAhOjA~_#QpM@V}|R!E<-4`kfNQ-pBH}Qe|*wCs)ZwLfJ1`yel1`5@t2l z>(~u0zo{3W%u5gD@iyN=vOCwCVEo#cTO>Yb>)O16XL221kjCPeR@ii`TP9xV^jgTO z>GlQo^}}5PFGt2jrm%DyVyJvz+iya6OeAoJT0AvFB#>%EmE`GXG)+#<{=jG!M+?`iAjvB@F#&KeE#8-x~ zfEu;=DPNiogD3q$+Lt?IG!~=CeO9OU5g*XOb;atoZ{8Tv*Wr0EEYor7h%ywYh`s zs6wvi8zPxVn)k)JQ^cq(VKUtx((nhRVnl0?H`2n-e8^J4P2uA{6lN*dKw!Pb)TPca zIKj)VOsPN1cQ?21iz-!78z;r zs?Y3_*xtKgRGH5&VZF9UisKo4r5gLuimZR{nIC#_!}FS*V_HC_qHaVdkKlc^r7&ck2$G57~cznO)<1eAZQ)fE1%^UxRjJe`BlrLQ5s;HN~ zU5WYTZsEH+l& zrod1?R-I#7kUTN#{>+lyOOQJv-QQh59sJq-oSK2D2#((q)gh>L#M$SFwsmGT%*{lxT-;48F26djk2NMY zzxf&NC%ys(0%^%!x6^y=Jo+pc|F+3|)K6`m)}w1?pvKR_VcLSAO_mjB80jovyFL4G zHW4{O9nw2ocEN##VhkjXD-4s)7k)T*y)ap!-7!$%H%hmo1u8Bv8k^SUm9V`GP_#_e zYC4NxRTJBpnRDQoSwtkTC;2M-Gcnumoy5%7EkA`q2w2OTV`Ku@cE42z$~4Py2G4o+ zw7s2*8ar_&9RyAchuMO}yVd;N-JO_I5$D*q*c(W>XhD$4A=4~7AEcAcEFm{(q!2)) zMGC_dE>{%fn2;Izhz~%!|$%Ms~;gC zIX5tgHd#adsd=ETK{-SL5srZ`gME)Kxz9bnv=9(f5J;^cg$NT+CSLdKvJIg=({VF$ zb<_Hg26J%1PTs$>fyP@#7x?!nUij0Ya*L_P@ISa6aamaDonRNvI8Cs?2RLnZO2Vqm zjoJ4S@5CnpxMn|n=6C5%^?^zYtgZfCem*?ZTzBkh-TP5}bja!AyCW5QgNV0hj!%Ma zH2aK~C*B9V&J`()=ZgNJr za96UB(1kH^nUj`wNxEJ62W@(B79b(l7{tF*-uUPWUf0K6M3&5ApOvuFE7=%=c}D z72>hCuXwJ4f%Iz|?pi%%&kOepCBaVYqlAdzNG(-|Yll*8sd9Oy@O`sEY# z+4Yu;z)aodX@UtO{WbRO=S`+3(;k=|lJ@rTd?>JqS|S9&Iu_r5qAoJpK9=fQUu+PZ zU@Ge8rih$i*Wbq;$*5eGOHc=j9=&g6_)8fJim$w7a^HP%CVIken10~uO&i@vSiF4w z%(UfPqOR_}D=yFWW&bzUOoP@GFJ5_Q?M&Q%g!UvtYN%^-^KPS2_kJY(w|nHevle)4 z7=e@4s>sC)(r)e4b7CjeYC)&_h^*l4z)jv)GqnE z7pAlP&jyjj$MDrC9XD=~O5hj)pcC_If>OmPe}}u(gH*TQM0H#>uyCK0+!YQ()8wTt zS#5hQOm{Ebs=F8InO`po3W*wqV)0fNeoRC!3WQGKy&hm%={4lCjkDgw6UM zNgSFEZ3dHD1S*m(eIg8AmKrCe0I{!c{<1#X_4(^D{*__jP;+Tw*CVZn>TtR{{mIL< z2M$iCiz{8x(}k5}%x`I3)+n}mPAn8@-4ShVKVC}>;O)FG6Xh-w`VlnQUMy4Q7_ob~ z3u1J9Yt)HPz54TGFvyLj53kN58`w5oQehG2ox;B`d_m^oPhT$Bs_(4g5ld&d5q}>B zXjph3H+?W>y$r_l)ne$dthMIFEqfE04pJ{Z&O6Fq6+X%}p`wz=qnCadycDq8H?r!x zRPx*^uWaI;2~_F|*X{D*EhgDOPZ|1cR;g;{0Km;UV1CuC$grX7VCl=wW&YT5$)7?e zdGxF1(D~0WqafpS+-QhVfP=C4Eh8-%i&NRe(Y;Af7yL0Ne#fOnI&X6}@-UKe;c8%| zT0xe)H2BkAg23unc~WB(Hhb~JswaN!mX%&QH=9EfZHJm&_XtC5|KNTi@FITd5YXfH zNo~DsZRDcpGu_OGLd}-ASb)o--wugBapk@R_~ejKr~6O0FS%=qOC?J>wu*$&uT-Cu z^$UzmzlZ+R=js34&n$G2>cE4SPBsG(Xb)cs^m3E~5Jpeh<%;m_bUnmis(n6!eM(0a z!E@YUAXvND0Wu(QCm+0SmQ&AW@I$bAUW4j~S<0w3@7?xBU-7&UpUl5KgDOYjdGrRy z5`<3F(NpO5Y87c&^EM^&D4X?V%*hozc>{}axtB%O@5Bo%CLobpRdFW$b5W*Gg?yGf z?R*lUpx^WxM?X;3&y@NmMdCNoC!EwRs4H!px8*4i5d6ofrQG)OBOCUPi-)4zXw%O_ znI^7RuIQ>AYpB7rjz0<96sJMFrxq)eQcVD07&TpgtEYX>eq(UFn)8TA$K&tp+`JRD zpI_-5tn&QPOivu4Mn4h20U$QsaGQ*S0z{awf1acPD0a2DY&A~_Dwl;!GDzzLBBy(~ zu;jlsC?`mFKgmY$nZK4;#ecy!LJl{VLf0J#R`iY?I2qa?JbElQ2b#aBWqJV0uOPFMqn}_6`e!na|ZXRedV>M*)>h1mjDRG12sIyM>{G(+slniYz0PS+Rc z4e0d!68Te_83I*AR=~c~4JklEjPURo+*T)*K z>b{=*`knW%LmJKwF8Ahi7<)iIgv)Eie9@F z>6_k2JsBLEJh+l}s+C2sBAJ`i`=E=j4#W^)PHlBcM9H`SJ-YlIoV$?iNwamQh;t&hV@%Uy%ag50aSNaxuw zEGRsbyv>$B0jZoXMp2eW60_(iuzxEO{F`w&9#?Yqi=XiX6w+gaNZ$miqPt%4iuRjj$)z=)Xb50On{kbv)K|_@q7bqp^s^lZr4?v&S{%({$2F>nfzUbml$S2%A zmr=@9{-`f{9-cgP^SFi$6OvlLza^9aLlr&6HXPi$G5gx^ zlX|WpnbbEWrkqGDz38j=u5Y!w=XAL!K(IdR*?&si_7(7?gwT{V=#(fbx zqNkyit@d&f?BJS6OIdD|`YmpAFr;Kmhenc0553W&`rWVR(C{CJl8$wMBrT+SVaHVA7@M0smF;G+X;t~{Pu9wnVvwFkM@<0l!mnytA{ zZ$LL5;kQg#E9y*i_d$^<1@{9lJqh>sGC8^&b?EYsE3o}-d_(mHg8WGLKbqOIrH%|_ z1dR~gk2T7eHG9*BrJ%~vMDeev!$^$5IIFHC<3-~psa z5hvm}$?%p%g$eZD_89^Z3Jv@Dr=hxupZWg&)PbgRG$3YE%dE7X-4!0bcWomXBB2Ix zgKKJ5-B2jg-U>KqoKgC}*dxNXVuoN_>(@xtYFvsB$3}wnN(TMt@>1phBIXGM+NlX1 zxB9dH!{Y+sjd!|^9av0%nnmIUc$-47w)hA5>-nLH;?-zVU?brz<+VH&AuHhP( zSJ%=n$oT|+iT|e^0*1sPqFSPwJeB_>aqfFQ(5tHaNh!Oi!GU$FIn38$4fSkid2i(f zr-wAgBbw=*FFp-98F<}_+gO_suA;O1Y{zDAb7LmjZejrps0sII9l6D|_4lE1 zheZJT4~I0qXZH;K2d!lusl8f=`w-=t*#IlY�jrnIcnx3qq2aBw{Tj1*Dt>I@c`z zfy*mIbO4H>;jyyGzI#Fr{gV6Z9j^6ULIQRgEtzTYA^TPq#O=gN- za-yMHFQ*+4vPTIi8-&{%c8o8>y39RtiWT88*~l;}GUn2ds_?Hu+z%d%EN;pPC!=P`!me{|B*>$0`RLWOM+L63jGdIH|>EoCroYdAho2%)|jErJ+2| zl7_pLF&+gdk$l8zAJ4WjPi7HLF)C<= zSM7F7Bp(VH&HL9+vN_SG1d%;th#C*Ij0fKlB8?6__a`pwsLP^B=Mcazh$)^JkOd25-?|0hDH`*Lm28NlL#2l26-j!bW6g5EznMvw(UUC{x!-ADz}Mxb23mea2O1@#4?ed@>0tm=gdv&wE*w*F8V^l zJ_efcwA9fYVi>kKd=jb-5rbR!$&Y?jG@WFm=K}GKw|VS1yO9RIcY9r;g|vkz^)tsB z-{Jn}En%QQu66VI=k&8Es0%BZ2Tr;Tdpy1FU*P`7MZ)Kr$%H?WrR;waAz%{+TkfRYW;LO-Wn?iR>ZKGZj{U-;KQwfYgVQ!jBUuvC2q}t#4 z;J;r*y(7H%_>dSztB>5WED#{1?DV%>0h%2|c=YM&z|+0o=~n-XQbl~vi(DkRnZ@?< zhZ8nW;63W2=5U}yuL+}$MreO8+z-zJb<5@gj*Ly`(z;A#wLHvC7kRcja!-q8CRW;9 zfUwOE=xdZA8gPqXCbFe7syz}g;B&%$*8i2Q`1gaclq!%LyiH(ohQB->x61z1FqAjh zgz8yHBL}!07Ep;}_M@c9eDKT}Le`fo7&}BX-~~vMQu61``5>O zJ8O_ULsxyQ)81wYbj&E)mq+cA{ zRwS6T0hn-{F^=UsE-W|5h#iO7?wDjvx+5nwEOiM>J;RhqRIQkfx+lSur&@+0#9j81@%Fi2(2> zqe`IZacYvI74Hf8=^sMKewY2MUZRtT<3Isv#VNwqKuME!&&a@T85b3i7JTd$3!UT| zW!HDG4Nsjj6ZClnLsF`?87dq{1oX%o9Xc4pNAs~2qBLEH`<2Ux%-OgdmTMJ+AQ@yT@0kDAh5BLqy-7Eo14E7W zZL*-2Q5E!TtnPv$zBq>aFL2*_Fa*|hbg;h$SZ=A^Po70VHdUtYTXY2o58Yk!0<{t0 zkeq9X#I<<1yi^U4q|ebi)Hzq*a{DYx$T3b7I4~R2Fus8++&Nw?K75_3@!`9IsVI7= zM@c-Ow@M0F{=}m|5k#L}bNV=Fm(PR%Mmh&)^1nf>r0Vdfo$b0LO4>d|=@1F2fxB6Q zD#B%^el~9OK^;)bkN*CMo2TxSLGqiWqH3T!^iS?y?x=06H(#OlO+kdfoQcD%59+O; z7=M}y0|g2ky_|h!fRP=&`(J<`l&i>mxgw?Oh{Dj9I$2IxDdJEKG6pAzp|HxhlAE(@ z^c;a=@>y?!2jL3s`}SA$4pcB&1Bc@o-6da{LyC8&^kUZk#ng~l91x7Q@T9+OT<-!x z3f@ZnA&(^6Da9?Yu$u>2}K3Vgl{#Qn6Y0kNrNH)xt-0J0oI803ibF70D`2Aou2QXRMvu@q0>JQG& z|Ajmz1Ibp*%fkxb7t1X?Z?{!zNCKA_-d(@7wA{e`2)Jw@ifl{ole6$!7;$c6( z%D0$;vVra}g|Wf`-z1tQ!y?!stZG?|2L(OntX_G5$%&ZU{u%?rGOIlp4k6lJ;RjK} zliy&KE!q97#+3K_2jz*iGPGVPxRj|;L(p6f>qoU^uQl)~P`1%4tM>?RJ^z1?qlC4Q zLJ)l!k&vyP$FcV(z2pUxJN17t(DKbGJ8V-R2Y)d6oT+?U=8XJD5D8e@IbGZA&y@P_ zEj_R0rOmM8&F`NKL#YsxvZV++@rf~^5r+Oz>n5JOnK0~0Kz`YUe8*giiSAVWJ%}H< z))VgKwdFtEuBsEmZ&pIg%s-R}x#dDv*s6cyIN z;#;Nt_p~zZ&r2Eow85xseI8bUh*4PVM>ts`{ygJV1sXeKv-_C;%xHt@nsxe|mp*1d ziJ-GWK{@?)WD4^-AVYGObAGW&4`FNfiq}wCfNh%QLPs0(a4cRBplstC| z%40r@+Yn|@8pDf6i zh9xRP?=*qYC9lv&!ZV1`pQ`n)km^sZxSh7Ar9WeL`CwWqOH;3vG^3@};gX{LyDp$N zO}(S9ZMBe^5Zx;9b0#^tw7xfj6ex!7?3*FQ%KZe$n`A6AR3onM+ITSO=jsR8BK1Ao z6wPcy8P54=!)wDELXr2SKP{8~x_oSJN>~X@6Zsqk*M?S12yos8@d#+fFO?TQ>=PqX0)v^!YSoXH&0!h#?}_CTjS zLc26Wo6)ER+K`uCZKU4|dI_K`mv3L$LGp@riiKnD_zx#-=kH8@^uVChXeM@i6NOSV zi=AZyH$V89>IR$f&nxX!;*+R?qxaU|$h4~IrPl@X$}nir`5f`kr+&qk2&TW?DJWNE`2in zx0*T>FQ61dG};DDvAO%>?!LkS9$ESRhk0;m)F1iofvBrrtfRa1)o1L_4&5-_wXoX} zq3LfC_idS@nJnZGm6IjriF8t(7sOX{XwYJ! zMtjXV{bIY^B>sXdC|jemahc`z}oCT4p7+|k-$lTGTkO_EVn z@`M&@P^vzF@uy#y7U9v&B}ZXDEP}UnU5yH}Np!C!qq8^JtI9ESdbvsJeacg zp^3ZWT&|RHPcJg+PnNN~>HG_%g@Vz?EXPk4f^R?yMkl196ZPL7zvxC)s3LmuVyrcC zo%2wWaYZZCSkA^f4_LvLFt1<+x69Is4X1I}EGXtvDe<&$5FZ!8mKr3O{B)?gE;%E) zAVdD(bniF#cc1-yscq@PNNE<8MeAFF%6b%;LN)xMgcEz$nl4wIWW>_mbIiVKirG;?Wxe~ z$4NM1-oAYA&NQOqnkfFdL(?r+H~A*GLH{0|-uX8fQ4{8NDh$)o#B!!L57GO-4rnQ& zs`s|U=vqCa)PPv9ci_3%x&)Jz=z|~yh>w|Ft`Q}jgo4c2VyZr8HIMf_g(jj3oicPa zr}wFd$0=cDddO!hHvC-B?6yofaFE-qwZ&xZ+%Hw+nHg8|`rfK^*GTQ-n7M8~}BueUe%j zizcY)d?LDzZMb?GEuItQ5=y>bibm2RojbHVdhfzV}&Sf!n>!Cl~7bR ze{faG>4*1lR7gf!^l$GY_-c_vD@Ff7@oGzfQlmWH(W-DO$?n>FpA2ekq~QO=zmMsX zwp*E8V2j1En0cDcB0@?zRZ(&=Kg-U-J5m6NUlB>F1dlY{yNGjTd!{y~H zMbsg?zt`v$S~nE)l1GVQ-5L&!GTS5TB0aB6yukDex{mTLjbnU&^P%V5)hY__H^lnP zzmMNE-T~T2uJo&@3(a9GxJVk-AdckoudUb79@S9?315-d243WRFC=);4usXetc+ta z?t+6J9bKFT)SVgFiuEiYUyG7!QT~}(6y0b8W^Rp%pnU&8xv`=FE;P6L(RnF)tt#L# zbt}Vr3REMK`<7>Jjn#0&{vww)SW62zc+K$*0%{VGPB6n@NylVVH?&)PvuA>1>LgDB z2$}b^>z*6dSZBSD34>8`KX86<KQ-G#bVuf6e)c0`E~j^#L285KrnPwWTc8L^5@p$KK!*eB%x>K% zGucS6PA$YuHwQ!{H^N*arsEGMXaKsH$NE-pgup>wJ&U{EsWbOt|Lb5@^t%PwyFUg9 zJSHn^!}TrhFNosP!bn4oghAFy+m*eep7%LH_Y}RG9Y2dlLkj%iv@uv>)e*IDl{Ms$ zMHLO${;py83%j3xUqYP_h;-a3U_8aiSw1Vy1cvpYxrQ;c=D4h=3UX94pyPf)^vdPj zn><6=!D#s2pk$2-gf$Hb2_tj6hFb_vvow!$fEolncb96L&Jlfsw@C{KAp}(u>@R6Q z%xELGdSg5e7{AmS%h=DxVek7@Ji(Y|Cl_mjN=Cvc6_+s*4m^?2V-DY-VT)XmzbHlF zgLVPvV*`*FX@)xSlVt2qXvdN6u^IBxFV{TN5I1nqi8N#u}IM4e`Cqkzze-xQR`jN?SBsa z#YwgSpjnDa{xS@;-+tq?clF<|Ms%(AC3b~#l`Cw_r6SJ~QS#0s<)H>ZBjiRl|HJ3H z-&Zl33zXB6?K|=A57A2`m}8U$ofDm(C}8gqanFLA0_;O==$EP&R|YsDTtJIpK}|b% z(UN3NbPx2=epjL~uHgpAIDYgpFK2V}rG@2hBXfIbT({KF*RG2Xzn{QG-Xb@fuyfo* z-yJO=BIaAiC#P|p4DH1BUnj7{js##hM~_dV+Q(f53C5^p9IgKa5LPnZ&$+t<_=UKFXqW1_L! z>?5ilxMj12y!gezO%`7)IY*Am#R}g^;lL*KXa*ri3ncc_(4~!M1{lE^SA-)_^4G8g z-E!;*KFb*3s%O8#V)Ai#SHG3!#BHl$yusCLDa9r zny&TH?LIcEXI?D)U{}4D-;x@nFJ1$RP5Kqy-`RJsgqPJb76Sl-c%mbW)^Xv>JNDa= zc{+6^tnzH(yMO)6Ylvh&vgD`Hvi0|i6=8=G5FI$ic#IPq6!<;l*1phZ9V3RR86b>u zxg(~_2>%OVGklV9TE{(Rll^|m4mzbkR4+8n`C#aOSebWIB=mU{2J6;Yx{1Cd;MC6L zW#(6!IMfvY>)eb;Jm#H@hJFG2Sz$8C%2dY`z|G!!@qmS4`O@(3b3nT?Ov_BXDZe)I z%9|Ywg{DEV<}P;^OuShBYyUe6pS5Lkf%xY({1)MUTdV+L=`DRU`osP;4^g9{WBdli zuMG$b_PEu_sBch@o;73v^F1n$Y>QNZvM z;eT4ipbw-)+Jj3!6^h^q6u+;3;rw3<#c;iqBdhixcCv&MT_1*4R+=8i@lHjCLLv9C z;|wcxV+KC=;HcaAM*kf1!_{^ybUF*vm*g-g?7GG&B?!nI$6-1IXE&h++J)Pbw5o zK5&Dih0ZCYo=AJLrE6#-i6Otgo;APQ=aAGJi>4wVqVBYz7_20NIal+c1zA80cIJw5 zrWB}Kn_{%K$%IDx1qX%;9inMVkUv;nqCF=j4lX4v>zMvVZrI91AdtnIjvM%%{{06S z_VCjy1R2SDzT{pl8GW z`d4wo7_qqf=EI6x0(6pN3I5Q2Xr)$3^0n3ARI()-b4G)clf)ETH88GLZr6}$q74dG z0Rw}p_}|sW$Y5^DOYiobMffW92q)^@aUr~gsCV%d|GO23cnj*FJdd&WDWJp5lK zw&mG80Qql#xddG{yPwY9x*_G{Wz8_(hZcf-z0RDF+u7mo&*qq)LaJ|V39G~0H@jlg zgz#p+Q_KGSVk;4`=pw%VV6zaIoCB_xlMIjAz_`cd2Mk*UOp^na1fP>zB(xqOB2Y}* z$)jXA0!4=9mkJ6@i;dyXQH$t)gAl5NH4>e|4t^|GYw&^?*ww}-5+&9xfi8ztrY zPtrIA(OZS6t<|D8nvNW(VN>M7f*%?fYYTE6>fHNx?_l-u@G!6l_*GX_h^Bp6G2vXJ z7}(Vx@v?7#TuiEtqO6tTuMLGir6?yT@AIQ?x!fc?4y@ovZHm!*{270Kg8T*)@X zX=UQGIvX;)!);I%r|Z0ZQ)B{X33O>^&b+#|omC@aO9L`T(SKB|EV~Qz*eh%pM#^7b z69=L4setgiVGn8yr%#P9qd+w<<@i1v8Wfdv=7x{PV=LLilOzmk?RNh5eSj*sqaM46 z0F|wa{MKcxPe`Px#3H=$f6=mR2Mry6Tr6S?r_C`-I?(Ci5Tb?~zCbQNLq$~3r@Os? z*0YXY;#ObJmU(Nw0fhp_1Taj4@ZjD;LW1F$ifeEgCZmP9;DJKnI&#CE;)1%NlI-AZ z;9zP%VkAG5v0n?)QNN-o8G05OfBo*uU~e#mO#3eWsdCcHL>};t6i4zN4&q;cq~JI; ztV|cE#F|@>9T{P0B!p~04`~k^s0@!6TW*v z&3@tsOsZGNy@O&TN70^tZbY35y|f@2WSN#%>_qy*;$I}g=FrF$2Zvofd9?xBAsxgd zY*Hiql|~xqb{YXNCcMlzCcSYWQ83HqU;>u0Ift@D;u^h*PsddB<>;T<7$pdC6(mP# zqpByV1u!&zS8n>U1J`+xcS~UC{+# zDw0e9E&quIL_b4u+~ap8hw_d-H6$bAX=25OaFsy^#cfyj{=?liChLoh%6=OvJc$CZX%E0AKX5YD;etyI^a$FfsSCuk>fi)%g3o(|(%aL3h>WSw$cDVc zc8TABTKNK7AY177cp3dTrr-{|Uy-g~CXG=GX$RTN$7zw;D)y;@F8!RFKKAG)xDn56fSM_^?n-hkk**s(1-4r2g&X)pydJV(%vCQeCh5X`}m@T_W&WsSB$}!On+BwpV&=4QhPnC4%OoZk@FJhmg5PUlEu<9W? zYrxHmcoiS23rQg}@mwzjV7=#D$%?rB zcN_l&1Mi!@hNhK*7BjqISaa)iOhLK(h2*s^WSneQ#-`c8>%2<^Qo?2^D24wxNRKni z$y>!?!XE7#WL-TEM@9H#`=s{MmCO>@m_1|r&~pOtW(PjV*OkbDmBIi0G*Xtc`l_`O z-NXrEkrt}f{zWvQ2YuY;etJ}$pRFy{kb>2R{O-h4kdg?&8OIS|S=I_FFbrPc@gk@$S zZ~e1`EOtyFKiuv5fwjqSlwf%UEwi(jEPB>i2t84=4Sw^Ly&%+@!f&CVm4LhR-*orZKMQnx1!Drj?J1rq|> zvR816Sew+$uKtnK8KSCR(bNTNh&AZs$$TM&-{#6k{>tuQsFLmUdQ=;02gW zUF~v3|K8u7Op=!+2q({8;QjB-cDH+2$ufdr$1}UitI@l# zErjpim{5tQJ3)gq97xD>q_GUxz{FOCnghcCHrJ-h?ZOuYk0v$`F%b_LP*U6kbkD{ZewH)Tv zu=vq-+HekxELhVCo_qFUDL{^CrYb?R+C%@x8VLzQdTbAIuCK?zJZhN~xfEXyJ2f~~ z#;JFbgmr6T6}_O0`hT8!8jw`zC5+IAq#~7|-GMg*pT^JKKl1_ixD93gkR8p8pYhP= z&*zBm+)H$#4|2x5;|;jQ!G`%Ff;$M`t(njY&$VeidDJd0>`>L=o(iiS-?g8TpoKbQ zk9;?Qv2_UB2wlPqKu={!5qIBhE%2i+O-MJgQ-XonUSN%%Fa_gtf3IVgL%Xl#9Au>E zNVOo1?G(_df6A-Z#)Yb#BWE)0qG_5CQ?Nph|&=P(ku|^AS$3Bpi%_s9i@n1=)L!T9(>QY^B2x_@g*da z-JPAe=iZr}Ck=mgL<6PvHG{A878!FB$In@%4GjWD##g0wWJtV)mzW(6*c~BSb{H); z+`d5xSkJ;lA0O)r&eSt0V-b!ScntbsUc4MJ3;P)wo;ug?j2`5sXPN?bO44 zdRXgDggps-;i;0!EtV|iNB;jR$9RPI+{|~mP29NbN*bv8hw^GVGOf=1QXN~kGwE}O zBmJ?q*POYwEma&MRN?b+z0Fsw*MOv!9wY>=DSz1NUG0B;R>I~1rV zH(_qC0i}IGNKhomoN4RXm1PA*Ah$&{e)0AI?|hfkK3{mQ9=#LNCpVaB+qq4WA(og<&B! z*7?1FJ_+s=61E~zgavYz50{LCVoXOGf}iX|U<+Er%ACP6*>X+2^c}1iWNpcIgZaL0 zeF;EYnJ7sdv+;u;z;}PYY}lBxfZp13Y*^4z@`8X1qvoLtSH~UBPSUMlv*KUt2fQEz z-&aQX=xoeP)%#QBPj2JNzZOk@HUzQzGl#W!2!!`ma81U=$F#%}0(fBul-ZEp=ES0Mq65swHA?t_`8Sx^U& zRDEIs_ZHpXI3;crO~vwo^#y>2&(x5=W-D3*0Y$nuWeWCAKQD5QNg1TlIzTbHoEKdF zNBEFVJckm!1huiyg7hdR{gqO?{Tsp@D$Pz_$(UP*6`+z)VFe$;9;Jw~AZM8P>h<}I zUgLazekgR+{ONChZ}joD2BLCQ?L3t!n%JK z)A|Ci@pjF#Lp9OMbc2O~o+XU+Xi0Ids1*myMeJ&gqz&D-@9EEV{X%7$Elt{+e)3aj zQVgTay69v$*%iIIEnNUrdJ{ToyI9nNEpUmFD-#)y!Q5`*w7Oup*4c*B!tGbMiSm2POJ~0h7Z%Fz>c&SxOSK72z%4fu z)hfg*V}=pV_cL)Ha81m|XY!)lATpx8I;VX{bY0?&Po1o$GCiC5py3>pTLQ%tAn#*dQDi@b?mNEX>CuZAu$-y|=>}%}zJrWKLa;{b59g{2N&HY&ZRdmg~d4h^T0Ym;q1i(TyI&M%kHo6H)@DYQQ|$IRr~8LO+pQ5p&D8uoJO zr&?l6&y^vf)XqDz#KnOJ_Dh`@AGjuZ9WH&kbaKBy8$z(DwejIor+YZdp>BMKnx}K` zjU(#ZHJa=OV+t-0Y9%ClX&Fw&VXR@+PvxCz%s#n*$Xy)pGXDh!rAxEgq)Bt*ew;vC zp3mmWIiE?>gJDUazSXVC#*WUI?-1xodJnE?)<6&rd|a*njY_ijl61Z6l(S|SB!zFV z%sb+tRK^)~9}1S#kOe(^{eZL%^Vg}j^N|-jWFmqxB47n-O!M{yh^J!>Ey&nWw<$1( zV%_tGtm^woc$a$2Aua8*~9ioS(!_U8W8kyuggy^TOHX3Vqn-P{! z`4NrwEpv9iXt}&(2?gZvzSj+6VVn~J@>&7)zVwRWdC1q7O_XgmT5VMJ;mAJ5Lz`2~D$7 zw)*MoLgYUknkB>@9<)7@@N;!|<*0>&=CSt8+-{x58s_)iqQ+BeW<6wJiuvtc^Fa$j zc;Yt{$~*F*i_YwcJC-N>&0&mbS-6h$Rc5FxH~2N_Nl+gzz`T9|J%7zA4GIp@kD~Nq zT2<>qZt8%TI;?&=%{>0gv)^eQ+p&~m0j+VVzHYBf%|W;sP8due;VX1mjodS4CD!j~ z^JM4Wqv;0rVIvd^I~YA%S~1O|0eu-$@HOS4mGZ8O9ACfG;w22Rc34e3cl;5M{OViu zBJ$mOv^|jlt!>x}zoCm&<>cE1+LSpq5r``ts|GyRssbL9=8V_;-HzH%|W`YN3-N` zCHczWlD+K27uSVDIO1g-DG?M%E2?zi{inXv=S=n5k z!SzMnJ878oHd&JI@67x6VoJXV!IU1i0=PVgei%kmkMmle2jDILT@np7J0t+(*Zq?c6^<630ogTYY)5b=oInxES~w&LvFAc=@VuQ z&pfn@UROzAfBW1n@;$rnZIC+Mp~x<4kLu+#gz&H*l5vdT0 zv&`Tp?iKF&+>;|Ilkba$ZaR)D!aUKU@PEGLamh4(Hnpnep}-_fa<#*8ap`i!GC*h%Zi&mJazfa0T_~W zI1t^RNYd38{IbhnwrdF+Fr#y}mF0UG$qnj=`uUD~liJTHm)3R~`K+rojq+?4$tEO; z0>ek1Hqd%;W$`I^rSm^3rVXFj29gV2CrBqij;wE+E8osSREnU#JZG?CG{o9l9c*Aj_yI?-SY2C&v-WC5%-$){JMc$LSjBZ0IOuZQcp(9*6f?$>BM^Yxn3n zEt6LR>~HJ%2w09rL4ma$ac;B@Z1>O1x#XPQTFM>BOpVAB?^(+os_q=%1Zi1uugRx& zLhNfeDH*a7)83mtthjbD#~nR@n~`C?Kxqpw4!u)OrkI%ifS=V-z5wQ|XCe$mi(hdM ztBLQS$Wod^&nNz!C;%~4HR7r=x1E`fno-Eg#E61B2Y|zPc*JuU?v}P0ZJSpGs@|z% z`vEnX#zdAN;LR6}TWq4#-c=b?ZEzXzdA`Z_$)xQ;-QQ4~TP~0u8b)Y2g)VVpGP7uu zXH}n)31kk8k`dW3%z)Ui-D@o9cgxfNi*Nld?)wA}KiU=CEo|}Nc4GU%?KJmO*qE6= z#FOM9lZ))bJbu9JzaS1mX0Z$Yfh*76oF5?I_(x~?r=LZu?=C@Eg?}YjBU@qNr{k#B z_->65@|3(Y`toOhlUYKR;pCfk?U_qADl;l)va`rd?pFQ0qxBlZGxL1=Mmpy;F ztJ?c^hqQ-|X?q2!!+_pP)rh0XAIsZ275g4C8ap+za%Ay^J?0335NQ&t*A*?-1WG1F1`8;Czt7!dY2|X`dco24n#}`UhvVCvX1k1y z>ko2@xz+r>NN8THs2m+F*S<-2OBK+xJ6jhe9IbW zVTP`a;G@rh0rmjb&s6%{8AZZhSnU3bY3?LlH>wF+XTNeg9mMmwi4EXkztN!&{xI+r z%M;kIl1o+_Rj7*tYr1H9N%Wy?9A2FMDJ4*@wtki}Ild2yq1hAc1}q)kGhXP3Qg(+s zL`}hMxsJvEqv~)GAWLd2PW8k^@m@~ux@ zY6LYZx=3|L5+Hp0(6mGT%{>CbqY?EFb@@a+N(*j-u1^i?wkz4zRwCn^IBLq@j9Xa; zWb75jd0}a;4^`KEsV`m2yjR_{bNf(T(-Ty27 zXf`k!2%=obc+J$;%w`L4cDB7!sbJACs2!B+IT!JdR8ZhQwRCUGf_u~ebn_A~A< z5)kr%qI+dQNGE3J;_63HCsY%6hD?`$HV&nlFP?HTVwbA4BxE_&rXO>U7Ux(6K1PzU@jfesMr#VQer-6OuG`uM`zWk4Bab^My-zgvf zoUNf*LJD-|@^cfPe`Kb_H{Id%61*YTt{zQO6AiX85#0 z?s9AHkhO<4eHPvx@4bL8_&Jg7+{p>jF8he-4Ig`gr_A#$#8_DvRPf?|niOP9WtYdf(Y$UE>oj){2s8GgV z+AsQDXiD$#Q!GgvQ+ARlQBTijat_f|F5ulUA}{gDXVjXasY?j(BQ8`K8ym7W^%a)v zm)5)msnd&^EG~^pLlrO_#CA#9h2C-@Au?*sBToF0>FT~wZ=0SE>w;}a349Ei0TJ-?p!15}Df4h$%$&9TMm|GIA_JUsN*O4XF9HgF} zS$bx2w!G^e0AkX75}TH^2T~@{{gr82>>*=mNW_njsDTR^PS^?mx@OmHMnSCmgq9*M z;3U+*)zI+yECO_S=zkrY!|={ehrCStXODIN0L35823e>haCdD$Fk~-~SbspathB}r zH|GH5kFr?!BwyGgC=@)EjsPjvaFgcyjc)LSkd-wSG+A9!;fSkwyZ^oO7K+me92%BJw;tMX(JtWA$Ny%MpZ)>0QqdRzM5%D$pQHCtVRuj?16Qh3Nb?`Ul{ zg76Bnu#2Ua8QQshp*I6WAkgeXA-!{oYRA8boGJG4($4!u7!?Dz>H1G*4epFbZqb+j zJ%7O!tY9^;RFH28=*$gXq$AXnP4YU&&HEy>@+O1iXwA7m9ZJ~`l=z;}7xcRKy4N$h znJ2FXhKV`?67cBQ2E?pk|IE<5+r3^i0`L9Y;+RVHGJj7?6#&AXdf6`vJ59h*6ulZc zCa+LG)-J+>%_WH7+I4v=|2K1ZzQi-b_jPw3D2mCI`9buP=2}vm=3QjbO(dK#r)RTh zyuKkw0&}sVJMh4d8x8_`LWvb7zTJk7XE%h(HxpmSy+eSo#6;wDtFeaj=iA1}24)7s&Et}VJbi%18s;MKdgDz`+*#=9e1W}Fo6D}U*Ms-f>SiL#1!%m?c z=Y@(@UbGVUn_L}-t^4Qj@aU%v;-53+IR|8^On>D(GTdO(EcOa;ET;KBM-07XMU{4+mu5&EK@|f_z@Wu>}~cl zWVJ6Gri9G3c(K|TcK(5MIu@l^YjW74_;+qu`?#UK^WWekU=qjmVDTKYJFD+Y)mv6t zi53yja94U!}DLrWlIW5n$^RZzg8w|E)^PX9FuhLNt~B_XJe1eutfQ*+ zXbnU?OAhobGb1Ie-lRCPIvIal1dAtrwy4 zsCB*G_X?a+CTBnL>u+J;0Whaii}gNk!BqR-hGR|U4ugn#=S8ME7dj}R_Kxu~x8@IJ zRKcK`FpBe5KgiNxN~JUYi=Q3r=>zJK-!*gsYTDBOQ&|Z@+Ohgl`gucmg+NbDES(Sn z7YJJ$IFC!6Joj4nyq7~p5CLWD6o>S&i&&1bYdt2=R*}ZC-m7|x?L0Gj8LIM+%CMqC(@gOh+dIe6u zgfXcPDK9=YdHLeTgH}Z<$KC`Q983Io?ZKLx`L*oNyE7iAhSZ<}7tQK^*PzJ6^V_pE<5uzh^>*7BKD>HW7UHsj}K&*Q3(a@7vPw zdexAWf@`0~93L1$qEB=V_&!m-hRk{98}>H9u6k*G`(S$9dMP2GcQRYS@_a&}G)&~N zA&63)GL#Gg49Z$o2(dKTR>xhalr#^<19FLZnf~vEFN6$tlU3>Bop1=55>S-p`uBni n$Nc~Q1(fptdiX!~5C(dRN7$(o>1{85h(SvYqx$}q&Ex+80Lf&p literal 0 HcmV?d00001 diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/icon/index.tsx b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/icon/index.tsx new file mode 100644 index 0000000000000..3451c833d1579 --- /dev/null +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/icon/index.tsx @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +import React from 'react'; + +import { EuiIcon } from '@elastic/eui'; +import type { ConnectorIconProps } from '../../../types'; + +import icon from './google_drive.png'; + +export default (props: ConnectorIconProps) => { + return ; +}; diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/notion/notion.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/notion/notion.ts index bccee2df15f1b..3a25a9d12e031 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/notion/notion.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/notion/notion.ts @@ -124,7 +124,7 @@ export const NotionConnector: ConnectorSpec = { }, test: { - description: i18n.translate('ore.kibanaConnectorSpecs.notion.test.description', { + description: i18n.translate('core.kibanaConnectorSpecs.notion.test.description', { defaultMessage: 'Verifies Notion connection by fetching metadata about given data source', }), // TODO: might need to accept some input here in order to pass to the API endpoint to test diff --git a/src/platform/packages/shared/kbn-workflows/scripts/generate_es_connectors/included_operations.ts b/src/platform/packages/shared/kbn-workflows/scripts/generate_es_connectors/included_operations.ts index df1531362c7ec..6389095252f97 100644 --- a/src/platform/packages/shared/kbn-workflows/scripts/generate_es_connectors/included_operations.ts +++ b/src/platform/packages/shared/kbn-workflows/scripts/generate_es_connectors/included_operations.ts @@ -16,4 +16,5 @@ export const INCLUDED_OPERATIONS = [ 'indices.create', 'bulk', 'esql.query', + 'ingest.simulate', ]; diff --git a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/elasticsearch.ingest_simulate.gen.ts b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/elasticsearch.ingest_simulate.gen.ts new file mode 100644 index 0000000000000..2325400064b1e --- /dev/null +++ b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/elasticsearch.ingest_simulate.gen.ts @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +/* + * AUTO-GENERATED FILE - DO NOT EDIT + * + * Source: elasticsearch-specification repository, operations: ingest-simulate, ingest-simulate-1, ingest-simulate-2, ingest-simulate-3 + * + * To regenerate: node scripts/generate_workflow_es_contracts.js + */ + +import { z } from '@kbn/zod/v4'; + +import { + ingest_simulate1_request, + ingest_simulate1_response, + ingest_simulate2_request, + ingest_simulate2_response, + ingest_simulate3_request, + ingest_simulate3_response, + ingest_simulate_request, + ingest_simulate_response, +} from './schemas/es_openapi_zod.gen'; +import { getShapeAt } from '../../../common/utils/zod'; + +// import all needed request and response schemas generated from the OpenAPI spec +import type { InternalConnectorContract } from '../../../types/latest'; + +// export contract +export const INGEST_SIMULATE_CONTRACT: InternalConnectorContract = { + type: 'elasticsearch.ingest.simulate', + summary: `Simulate a pipeline`, + description: `Simulate a pipeline. + +Run an ingest pipeline against a set of provided documents. +You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. + + Documentation: https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate`, + methods: ['GET', 'POST'], + patterns: ['_ingest/pipeline/_simulate', '_ingest/pipeline/{id}/_simulate'], + documentation: + 'https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate', + parameterTypes: { + headerParams: [], + pathParams: ['id'], + urlParams: ['verbose'], + bodyParams: ['docs', 'pipeline'], + }, + paramsSchema: z.union([ + z.object({ + ...getShapeAt(ingest_simulate_request, 'body'), + ...getShapeAt(ingest_simulate_request, 'path'), + ...getShapeAt(ingest_simulate_request, 'query'), + }), + z.object({ + ...getShapeAt(ingest_simulate1_request, 'body'), + ...getShapeAt(ingest_simulate1_request, 'path'), + ...getShapeAt(ingest_simulate1_request, 'query'), + }), + z.object({ + ...getShapeAt(ingest_simulate2_request, 'body'), + ...getShapeAt(ingest_simulate2_request, 'path'), + ...getShapeAt(ingest_simulate2_request, 'query'), + }), + z.object({ + ...getShapeAt(ingest_simulate3_request, 'body'), + ...getShapeAt(ingest_simulate3_request, 'path'), + ...getShapeAt(ingest_simulate3_request, 'query'), + }), + ]), + outputSchema: z.union([ + ingest_simulate_response, + ingest_simulate1_response, + ingest_simulate2_response, + ingest_simulate3_response, + ]), +}; diff --git a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/index.ts b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/index.ts index 47482be5179a1..68f7e3ea0b4f5 100644 --- a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/index.ts +++ b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/index.ts @@ -10,9 +10,9 @@ /* * AUTO-GENERATED FILE - DO NOT EDIT * - * This file contains Elasticsearch connector definitions generated from elasticsearch-specification repository (https://github.com/elastic/elasticsearch-specification/commit/868f66c). - * Generated at: 2026-01-21T16:44:56.585Z - * Source: elasticsearch-specification repository (8 APIs) + * This file contains Elasticsearch connector definitions generated from elasticsearch-specification repository (https://github.com/elastic/elasticsearch-specification/commit/b0cefb9). + * Generated at: 2026-01-25T11:50:01.506Z + * Source: elasticsearch-specification repository (9 APIs) * * To regenerate: node scripts/generate_workflow_es_contracts.js */ @@ -24,6 +24,7 @@ import { INDEX_CONTRACT } from './elasticsearch.index.gen'; import { INDICES_CREATE_CONTRACT } from './elasticsearch.indices_create.gen'; import { INDICES_DELETE_CONTRACT } from './elasticsearch.indices_delete.gen'; import { INDICES_EXISTS_CONTRACT } from './elasticsearch.indices_exists.gen'; +import { INGEST_SIMULATE_CONTRACT } from './elasticsearch.ingest_simulate.gen'; import { SEARCH_CONTRACT } from './elasticsearch.search.gen'; import { UPDATE_CONTRACT } from './elasticsearch.update.gen'; import type { InternalConnectorContract } from '../../../types/latest'; @@ -36,6 +37,7 @@ export const GENERATED_ELASTICSEARCH_CONNECTORS: InternalConnectorContract[] = [ INDICES_CREATE_CONTRACT, INDICES_DELETE_CONTRACT, INDICES_EXISTS_CONTRACT, + INGEST_SIMULATE_CONTRACT, SEARCH_CONTRACT, UPDATE_CONTRACT, ]; diff --git a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/schemas/es_openapi_zod.gen.ts b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/schemas/es_openapi_zod.gen.ts index a6393344bc7c8..6b947f8574e65 100644 --- a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/schemas/es_openapi_zod.gen.ts +++ b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/schemas/es_openapi_zod.gen.ts @@ -2319,6 +2319,79 @@ export const types_indices = z.union([ z.array(types_index_name) ]); +export const ingest_types_user_agent_property = z.unknown(); + +export const types_grok_pattern = z.string(); + +export const ingest_types_json_processor_conflict_strategy = z.enum(['replace', 'merge']); + +export const ingest_types_input_config = z.object({ + input_field: z.string(), + output_field: z.string() +}); + +export const ingest_types_inference_config_classification = z.object({ + num_top_classes: z.optional(z.number().register(z.globalRegistry, { + description: 'Specifies the number of top class predictions to return.' + })), + num_top_feature_importance_values: z.optional(z.number().register(z.globalRegistry, { + description: 'Specifies the maximum number of feature importance values per document.' + })), + results_field: z.optional(types_field), + top_classes_results_field: z.optional(types_field), + prediction_field_type: z.optional(z.string().register(z.globalRegistry, { + description: 'Specifies the type of the predicted field to write.\nValid values are: `string`, `number`, `boolean`.' + })) +}); + +export const ingest_types_inference_config_regression = z.object({ + results_field: z.optional(types_field), + num_top_feature_importance_values: z.optional(z.number().register(z.globalRegistry, { + description: 'Specifies the maximum number of feature importance values per document.' + })) +}); + +export const ingest_types_inference_config = z.object({ + regression: z.optional(ingest_types_inference_config_regression), + classification: z.optional(ingest_types_inference_config_classification) +}); + +export const ingest_types_geo_grid_target_format = z.enum(['geojson', 'wkt']); + +export const ingest_types_geo_grid_tile_type = z.enum([ + 'geotile', + 'geohex', + 'geohash' +]); + +export const ingest_types_fingerprint_digest = z.enum([ + 'MD5', + 'SHA-1', + 'SHA-256', + 'SHA-512', + 'MurmurHash3' +]); + +export const types_geo_shape_relation = z.enum([ + 'intersects', + 'disjoint', + 'within', + 'contains' +]); + +export const ingest_types_convert_type = z.enum([ + 'integer', + 'long', + 'double', + 'float', + 'boolean', + 'ip', + 'string', + 'auto' +]); + +export const ingest_types_shape_type = z.enum(['geo_shape', 'shape']); + export const types_uuid = z.string(); export const types_mapping_data_stream_timestamp = z.object({ @@ -2565,6 +2638,18 @@ export const types_mapping_all_field = z.object({ store_term_vectors: z.boolean() }); +/** + * Some APIs will return values such as numbers also as a string (notably epoch timestamps). This behavior + * is used to capture this behavior while keeping the semantics of the field type. + * + * Depending on the target language, code generators can keep the union or remove it and leniently parse + * strings to the target type. + */ +export const spec_utils_stringified_version_number = z.union([ + types_version_number, + z.string() +]); + /** * Base type for multi-bucket aggregation results that can hold sub-aggregations results. */ @@ -5892,15 +5977,87 @@ export const types_aggregations_hdr_percentile_ranks_aggregate = types_aggregati export const types_aggregations_hdr_percentiles_aggregate = types_aggregations_percentiles_aggregate_base.and(z.record(z.string(), z.unknown())); +export const ingest_types_pipeline_simulation_status_options = z.enum([ + 'success', + 'error', + 'error_ignored', + 'skipped', + 'dropped' +]); + +export const ingest_types_redact = z.object({ + _is_redacted: z.boolean().register(z.globalRegistry, { + description: 'indicates if document has been redacted' + }) +}); + +export const ingest_types_ingest = z.object({ + _redact: z.optional(ingest_types_redact), + timestamp: types_date_time, + pipeline: z.optional(types_name) +}); + +/** + * The simulated document, with optional metadata. + */ +export const ingest_types_document_simulation = z.object({ + _id: types_id, + _index: types_index_name, + _ingest: ingest_types_ingest, + _routing: z.optional(z.string().register(z.globalRegistry, { + description: 'Value used to send the document to a specific primary shard.' + })), + _source: z.record(z.string(), z.record(z.string(), z.unknown())).register(z.globalRegistry, { + description: 'JSON body for the document.' + }), + _version: z.optional(spec_utils_stringified_version_number), + _version_type: z.optional(types_version_type) +}).register(z.globalRegistry, { + description: 'The simulated document, with optional metadata.' +}); + +export const ingest_types_pipeline_processor_result = z.object({ + doc: z.optional(ingest_types_document_simulation), + tag: z.optional(z.string()), + processor_type: z.optional(z.string()), + status: z.optional(ingest_types_pipeline_simulation_status_options), + description: z.optional(z.string()), + ignored_error: z.optional(types_error_cause), + error: z.optional(types_error_cause) +}); + +export const ingest_types_simulate_document_result = z.object({ + doc: z.optional(ingest_types_document_simulation), + error: z.optional(types_error_cause), + processor_results: z.optional(z.array(ingest_types_pipeline_processor_result)) +}); + +export const ingest_types_document = z.object({ + _id: z.optional(types_id), + _index: z.optional(types_index_name), + _source: z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'JSON body for the document.' + }) +}); + +export const ingest_types_field_access_pattern = z.enum(['classic', 'flexible']); + export const types_indices_response_base = types_acknowledged_response_base.and(z.object({ _shards: z.optional(types_shard_statistics) })); -export const esql_types_esql_param = z.union([ +export const esql_types_single_or_multi_value = z.union([ types_field_value, z.array(types_field_value) ]); +export const esql_types_named_value = z.record(z.string(), esql_types_single_or_multi_value); + +export const esql_types_esql_params = z.union([ + z.array(esql_types_single_or_multi_value), + z.array(esql_types_named_value) +]); + export const esql_types_esql_shard_failure = z.object({ shard: z.number(), index: z.union([ @@ -7770,6 +7927,753 @@ export const types_aggregations_adjacency_matrix_aggregation = types_aggregation })) })); +export const ingest_types_user_agent_processor = z.lazy((): any => ingest_types_processor_base).and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + regex_file: z.optional(z.string().register(z.globalRegistry, { + description: 'The name of the file in the `config/ingest-user-agent` directory containing the regular expressions for parsing the user agent string. Both the directory and the file have to be created before starting Elasticsearch. If not specified, ingest-user-agent will use the `regexes.yaml` from uap-core it ships with.' + })), + target_field: z.optional(types_field), + properties: z.optional(z.array(ingest_types_user_agent_property).register(z.globalRegistry, { + description: 'Controls what properties are added to `target_field`.' + })), + extract_device_type: z.optional(z.boolean().register(z.globalRegistry, { + description: 'Extracts device type from the user agent string on a best-effort basis.' + })) +})); + +export const ingest_types_processor_container = z.object({ + get append() { + return z.optional(z.lazy((): any => ingest_types_append_processor)); + }, + get attachment() { + return z.optional(z.lazy((): any => ingest_types_attachment_processor)); + }, + get bytes() { + return z.optional(z.lazy((): any => ingest_types_bytes_processor)); + }, + get cef() { + return z.optional(z.lazy((): any => ingest_types_cef_processor)); + }, + get circle() { + return z.optional(z.lazy((): any => ingest_types_circle_processor)); + }, + get community_id() { + return z.optional(z.lazy((): any => ingest_types_community_id_processor)); + }, + get convert() { + return z.optional(z.lazy((): any => ingest_types_convert_processor)); + }, + get csv() { + return z.optional(z.lazy((): any => ingest_types_csv_processor)); + }, + get date() { + return z.optional(z.lazy((): any => ingest_types_date_processor)); + }, + get date_index_name() { + return z.optional(z.lazy((): any => ingest_types_date_index_name_processor)); + }, + get dissect() { + return z.optional(z.lazy((): any => ingest_types_dissect_processor)); + }, + get dot_expander() { + return z.optional(z.lazy((): any => ingest_types_dot_expander_processor)); + }, + get drop() { + return z.optional(z.lazy((): any => ingest_types_drop_processor)); + }, + get enrich() { + return z.optional(z.lazy((): any => ingest_types_enrich_processor)); + }, + get fail() { + return z.optional(z.lazy((): any => ingest_types_fail_processor)); + }, + get fingerprint() { + return z.optional(z.lazy((): any => ingest_types_fingerprint_processor)); + }, + get foreach() { + return z.optional(z.lazy((): any => ingest_types_foreach_processor)); + }, + get ip_location() { + return z.optional(z.lazy((): any => ingest_types_ip_location_processor)); + }, + get geo_grid() { + return z.optional(z.lazy((): any => ingest_types_geo_grid_processor)); + }, + get geoip() { + return z.optional(z.lazy((): any => ingest_types_geo_ip_processor)); + }, + get grok() { + return z.optional(z.lazy((): any => ingest_types_grok_processor)); + }, + get gsub() { + return z.optional(z.lazy((): any => ingest_types_gsub_processor)); + }, + get html_strip() { + return z.optional(z.lazy((): any => ingest_types_html_strip_processor)); + }, + get inference() { + return z.optional(z.lazy((): any => ingest_types_inference_processor)); + }, + get join() { + return z.optional(z.lazy((): any => ingest_types_join_processor)); + }, + get json() { + return z.optional(z.lazy((): any => ingest_types_json_processor)); + }, + get kv() { + return z.optional(z.lazy((): any => ingest_types_key_value_processor)); + }, + get lowercase() { + return z.optional(z.lazy((): any => ingest_types_lowercase_processor)); + }, + get network_direction() { + return z.optional(z.lazy((): any => ingest_types_network_direction_processor)); + }, + get pipeline() { + return z.optional(z.lazy((): any => ingest_types_pipeline_processor)); + }, + get redact() { + return z.optional(z.lazy((): any => ingest_types_redact_processor)); + }, + get registered_domain() { + return z.optional(z.lazy((): any => ingest_types_registered_domain_processor)); + }, + get remove() { + return z.optional(z.lazy((): any => ingest_types_remove_processor)); + }, + get rename() { + return z.optional(z.lazy((): any => ingest_types_rename_processor)); + }, + get reroute() { + return z.optional(z.lazy((): any => ingest_types_reroute_processor)); + }, + get script() { + return z.optional(z.lazy((): any => ingest_types_script_processor)); + }, + get set() { + return z.optional(z.lazy((): any => ingest_types_set_processor)); + }, + get set_security_user() { + return z.optional(z.lazy((): any => ingest_types_set_security_user_processor)); + }, + get sort() { + return z.optional(z.lazy((): any => ingest_types_sort_processor)); + }, + get split() { + return z.optional(z.lazy((): any => ingest_types_split_processor)); + }, + get terminate() { + return z.optional(z.lazy((): any => ingest_types_terminate_processor)); + }, + get trim() { + return z.optional(z.lazy((): any => ingest_types_trim_processor)); + }, + get uppercase() { + return z.optional(z.lazy((): any => ingest_types_uppercase_processor)); + }, + get urldecode() { + return z.optional(z.lazy((): any => ingest_types_url_decode_processor)); + }, + get uri_parts() { + return z.optional(z.lazy((): any => ingest_types_uri_parts_processor)); + }, + user_agent: z.optional(ingest_types_user_agent_processor) +}); + +export const ingest_types_uri_parts_processor = z.lazy((): any => ingest_types_processor_base).and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + keep_original: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, the processor copies the unparsed URI to `.original`.' + })), + remove_if_successful: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, the processor removes the `field` after parsing the URI string.\nIf parsing fails, the processor does not remove the `field`.' + })), + target_field: z.optional(types_field) +})); + +export const ingest_types_processor_base = z.object({ + description: z.optional(z.string().register(z.globalRegistry, { + description: 'Description of the processor.\nUseful for describing the purpose of the processor or its configuration.' + })), + if: z.optional(types_script), + ignore_failure: z.optional(z.boolean().register(z.globalRegistry, { + description: 'Ignore failures for the processor.' + })), + on_failure: z.optional(z.array(ingest_types_processor_container).register(z.globalRegistry, { + description: 'Handle failures for the processor.' + })), + tag: z.optional(z.string().register(z.globalRegistry, { + description: 'Identifier for the processor.\nUseful for debugging and metrics.' + })) +}); + +export const ingest_types_url_decode_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + target_field: z.optional(types_field) +})); + +export const ingest_types_uppercase_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + target_field: z.optional(types_field) +})); + +export const ingest_types_trim_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + target_field: z.optional(types_field) +})); + +export const ingest_types_terminate_processor = ingest_types_processor_base.and(z.record(z.string(), z.unknown())); + +export const ingest_types_split_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + preserve_trailing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'Preserves empty trailing fields, if any.' + })), + separator: z.string().register(z.globalRegistry, { + description: 'A regex which matches the separator, for example, `,` or `\\s+`.' + }), + target_field: z.optional(types_field) +})); + +export const ingest_types_sort_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + order: z.optional(types_sort_order), + target_field: z.optional(types_field) +})); + +export const ingest_types_set_security_user_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + properties: z.optional(z.array(z.string()).register(z.globalRegistry, { + description: 'Controls what user related properties are added to the field.' + })) +})); + +export const ingest_types_set_processor = ingest_types_processor_base.and(z.object({ + copy_from: z.optional(types_field), + field: types_field, + ignore_empty_value: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document.' + })), + media_type: z.optional(z.string().register(z.globalRegistry, { + description: 'The media type for encoding `value`.\nApplies only when value is a template snippet.\nMust be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`.' + })), + override: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` processor will update fields with pre-existing non-null-valued field.\nWhen set to `false`, such fields will not be touched.' + })), + value: z.optional(z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The value to be set for the field.\nSupports template snippets.\nMay specify only one of `value` or `copy_from`.' + })) +})); + +export const ingest_types_script_processor = ingest_types_processor_base.and(z.object({ + id: z.optional(types_id), + lang: z.optional(types_script_language), + params: z.optional(z.record(z.string(), z.record(z.string(), z.unknown())).register(z.globalRegistry, { + description: 'Object containing parameters for the script.' + })), + source: z.optional(types_script_source) +})); + +export const ingest_types_reroute_processor = ingest_types_processor_base.and(z.object({ + destination: z.optional(z.string().register(z.globalRegistry, { + description: 'A static value for the target. Can’t be set when the dataset or namespace option is set.' + })), + dataset: z.optional(z.union([ + z.string(), + z.array(z.string()) + ])), + namespace: z.optional(z.union([ + z.string(), + z.array(z.string()) + ])) +})); + +export const ingest_types_rename_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + target_field: types_field +})); + +export const ingest_types_remove_processor = ingest_types_processor_base.and(z.object({ + field: types_fields, + keep: z.optional(types_fields), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })) +})); + +export const ingest_types_registered_domain_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + target_field: z.optional(types_field), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.' + })) +})); + +export const ingest_types_redact_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + patterns: z.array(types_grok_pattern).register(z.globalRegistry, { + description: 'A list of grok expressions to match and redact named captures with' + }), + pattern_definitions: z.optional(z.record(z.string(), z.string())), + prefix: z.optional(z.string().register(z.globalRegistry, { + description: 'Start a redacted section with this token' + })), + suffix: z.optional(z.string().register(z.globalRegistry, { + description: 'End a redacted section with this token' + })), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + skip_if_unlicensed: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document' + })), + trace_redact: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted' + })) +})); + +export const ingest_types_pipeline_processor = ingest_types_processor_base.and(z.object({ + name: types_name, + ignore_missing_pipeline: z.optional(z.boolean().register(z.globalRegistry, { + description: 'Whether to ignore missing pipelines instead of failing.' + })) +})); + +export const ingest_types_network_direction_processor = ingest_types_processor_base.and(z.object({ + source_ip: z.optional(types_field), + destination_ip: z.optional(types_field), + target_field: z.optional(types_field), + internal_networks: z.optional(z.array(z.string()).register(z.globalRegistry, { + description: 'List of internal networks. Supports IPv4 and IPv6 addresses and ranges in\nCIDR notation. Also supports the named ranges listed below. These may be\nconstructed with template snippets. Must specify only one of\ninternal_networks or internal_networks_field.' + })), + internal_networks_field: z.optional(types_field), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.' + })) +})); + +export const ingest_types_lowercase_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + target_field: z.optional(types_field) +})); + +export const ingest_types_key_value_processor = ingest_types_processor_base.and(z.object({ + exclude_keys: z.optional(z.array(z.string()).register(z.globalRegistry, { + description: 'List of keys to exclude from document.' + })), + field: types_field, + field_split: z.string().register(z.globalRegistry, { + description: 'Regex pattern to use for splitting key-value pairs.' + }), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + include_keys: z.optional(z.array(z.string()).register(z.globalRegistry, { + description: 'List of keys to filter and insert into document.\nDefaults to including all keys.' + })), + prefix: z.optional(z.string().register(z.globalRegistry, { + description: 'Prefix to be added to extracted keys.' + })), + strip_brackets: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `\'` and `"` from extracted values.' + })), + target_field: z.optional(types_field), + trim_key: z.optional(z.string().register(z.globalRegistry, { + description: 'String of characters to trim from extracted keys.' + })), + trim_value: z.optional(z.string().register(z.globalRegistry, { + description: 'String of characters to trim from extracted values.' + })), + value_split: z.string().register(z.globalRegistry, { + description: 'Regex pattern to use for splitting the key from the value within a key-value pair.' + }) +})); + +export const ingest_types_json_processor = ingest_types_processor_base.and(z.object({ + add_to_root: z.optional(z.boolean().register(z.globalRegistry, { + description: 'Flag that forces the parsed JSON to be added at the top level of the document.\n`target_field` must not be set when this option is chosen.' + })), + add_to_root_conflict_strategy: z.optional(ingest_types_json_processor_conflict_strategy), + allow_duplicate_keys: z.optional(z.boolean().register(z.globalRegistry, { + description: 'When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys.\nInstead, the last encountered value for any duplicate key wins.' + })), + field: types_field, + target_field: z.optional(types_field) +})); + +export const ingest_types_join_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + separator: z.string().register(z.globalRegistry, { + description: 'The separator character.' + }), + target_field: z.optional(types_field) +})); + +export const ingest_types_inference_processor = ingest_types_processor_base.and(z.object({ + model_id: types_id, + target_field: z.optional(types_field), + field_map: z.optional(z.record(z.string(), z.record(z.string(), z.unknown())).register(z.globalRegistry, { + description: 'Maps the document field names to the known field names of the model.\nThis mapping takes precedence over any default mappings provided in the model configuration.' + })), + inference_config: z.optional(ingest_types_inference_config), + input_output: z.optional(z.union([ + ingest_types_input_config, + z.array(ingest_types_input_config) + ])), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If true and any of the input fields defined in input_ouput are missing\nthen those missing fields are quietly ignored, otherwise a missing field causes a failure.\nOnly applies when using input_output configurations to explicitly list the input fields.' + })) +})); + +export const ingest_types_html_strip_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document,' + })), + target_field: z.optional(types_field) +})); + +export const ingest_types_gsub_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + pattern: z.string().register(z.globalRegistry, { + description: 'The pattern to be replaced.' + }), + replacement: z.string().register(z.globalRegistry, { + description: 'The string to replace the matching patterns with.' + }), + target_field: z.optional(types_field) +})); + +export const ingest_types_grok_processor = ingest_types_processor_base.and(z.object({ + ecs_compatibility: z.optional(z.string().register(z.globalRegistry, { + description: 'Must be disabled or v1. If v1, the processor uses patterns with Elastic\nCommon Schema (ECS) field names.' + })), + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + pattern_definitions: z.optional(z.record(z.string(), z.string()).register(z.globalRegistry, { + description: 'A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor.\nPatterns matching existing names will override the pre-existing definition.' + })), + patterns: z.array(types_grok_pattern).register(z.globalRegistry, { + description: 'An ordered list of grok expression to match and extract named captures with.\nReturns on the first expression in the list that matches.' + }), + trace_match: z.optional(z.boolean().register(z.globalRegistry, { + description: 'When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched.' + })) +})); + +export const ingest_types_geo_ip_processor = ingest_types_processor_base.and(z.object({ + database_file: z.optional(z.string().register(z.globalRegistry, { + description: 'The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory.' + })), + field: types_field, + first_only: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, only the first found geoip data will be returned, even if the field contains an array.' + })), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + properties: z.optional(z.array(z.string()).register(z.globalRegistry, { + description: 'Controls what properties are added to the `target_field` based on the geoip lookup.' + })), + target_field: z.optional(types_field), + download_database_on_pipeline_creation: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created.\nElse, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index.' + })) +})); + +export const ingest_types_geo_grid_processor = ingest_types_processor_base.and(z.object({ + field: z.string().register(z.globalRegistry, { + description: 'The field to interpret as a geo-tile.=\nThe field format is determined by the `tile_type`.' + }), + tile_type: ingest_types_geo_grid_tile_type, + target_field: z.optional(types_field), + parent_field: z.optional(types_field), + children_field: z.optional(types_field), + non_children_field: z.optional(types_field), + precision_field: z.optional(types_field), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + target_format: z.optional(ingest_types_geo_grid_target_format) +})); + +export const ingest_types_ip_location_processor = ingest_types_processor_base.and(z.object({ + database_file: z.optional(z.string().register(z.globalRegistry, { + description: 'The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory.' + })), + field: types_field, + first_only: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, only the first found IP location data will be returned, even if the field contains an array.' + })), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + properties: z.optional(z.array(z.string()).register(z.globalRegistry, { + description: 'Controls what properties are added to the `target_field` based on the IP location lookup.' + })), + target_field: z.optional(types_field), + download_database_on_pipeline_creation: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created.\nElse, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index.' + })) +})); + +export const ingest_types_foreach_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, the processor silently exits without changing the document if the `field` is `null` or missing.' + })), + processor: ingest_types_processor_container +})); + +export const ingest_types_fingerprint_processor = ingest_types_processor_base.and(z.object({ + fields: types_fields, + target_field: z.optional(types_field), + salt: z.optional(z.string().register(z.globalRegistry, { + description: 'Salt value for the hash function.' + })), + method: z.optional(ingest_types_fingerprint_digest), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If true, the processor ignores any missing fields. If all fields are\nmissing, the processor silently exits without modifying the document.' + })) +})); + +export const ingest_types_fail_processor = ingest_types_processor_base.and(z.object({ + message: z.string().register(z.globalRegistry, { + description: 'The error message thrown by the processor.\nSupports template snippets.' + }) +})); + +export const ingest_types_enrich_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + max_matches: z.optional(z.number().register(z.globalRegistry, { + description: 'The maximum number of matched documents to include under the configured target field.\nThe `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object.\nIn order to avoid documents getting too large, the maximum allowed value is 128.' + })), + override: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If processor will update fields with pre-existing non-null-valued field.\nWhen set to `false`, such fields will not be touched.' + })), + policy_name: z.string().register(z.globalRegistry, { + description: 'The name of the enrich policy to use.' + }), + shape_relation: z.optional(types_geo_shape_relation), + target_field: types_field +})); + +export const ingest_types_drop_processor = ingest_types_processor_base.and(z.record(z.string(), z.unknown())); + +export const ingest_types_dot_expander_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + override: z.optional(z.boolean().register(z.globalRegistry, { + description: 'Controls the behavior when there is already an existing nested object that conflicts with the expanded field.\nWhen `false`, the processor will merge conflicts by combining the old and the new values into an array.\nWhen `true`, the value from the expanded field will overwrite the existing value.' + })), + path: z.optional(z.string().register(z.globalRegistry, { + description: 'The field that contains the field to expand.\nOnly required if the field to expand is part another object field, because the `field` option can only understand leaf fields.' + })) +})); + +export const ingest_types_dissect_processor = ingest_types_processor_base.and(z.object({ + append_separator: z.optional(z.string().register(z.globalRegistry, { + description: 'The character(s) that separate the appended fields.' + })), + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + pattern: z.string().register(z.globalRegistry, { + description: 'The pattern to apply to the field.' + }) +})); + +export const ingest_types_date_index_name_processor = ingest_types_processor_base.and(z.object({ + date_formats: z.optional(z.array(z.string()).register(z.globalRegistry, { + description: 'An array of the expected date formats for parsing dates / timestamps in the document being preprocessed.\nCan be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.' + })), + date_rounding: z.string().register(z.globalRegistry, { + description: 'How to round the date when formatting the date into the index name. Valid values are:\n`y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second).\nSupports template snippets.' + }), + field: types_field, + index_name_format: z.optional(z.string().register(z.globalRegistry, { + description: 'The format to be used when printing the parsed date into the index name.\nA valid java time pattern is expected here.\nSupports template snippets.' + })), + index_name_prefix: z.optional(z.string().register(z.globalRegistry, { + description: 'A prefix of the index name to be prepended before the printed date.\nSupports template snippets.' + })), + locale: z.optional(z.string().register(z.globalRegistry, { + description: 'The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days.' + })), + timezone: z.optional(z.string().register(z.globalRegistry, { + description: 'The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.' + })) +})); + +export const ingest_types_date_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + formats: z.array(z.string()).register(z.globalRegistry, { + description: 'An array of the expected date formats.\nCan be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.' + }), + locale: z.optional(z.string().register(z.globalRegistry, { + description: 'The locale to use when parsing the date, relevant when parsing month names or week days.\nSupports template snippets.' + })), + target_field: z.optional(types_field), + timezone: z.optional(z.string().register(z.globalRegistry, { + description: 'The timezone to use when parsing the date.\nSupports template snippets.' + })), + output_format: z.optional(z.string().register(z.globalRegistry, { + description: 'The format to use when writing the date to target_field. Must be a valid\njava time pattern.' + })) +})); + +export const ingest_types_csv_processor = ingest_types_processor_base.and(z.object({ + empty_value: z.optional(z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'Value used to fill empty fields.\nEmpty fields are skipped if this is not provided.\nAn empty field is one with no value (2 consecutive separators) or empty quotes (`""`).' + })), + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + quote: z.optional(z.string().register(z.globalRegistry, { + description: 'Quote used in CSV, has to be single character string.' + })), + separator: z.optional(z.string().register(z.globalRegistry, { + description: 'Separator used in CSV, has to be single character string.' + })), + target_fields: types_fields, + trim: z.optional(z.boolean().register(z.globalRegistry, { + description: 'Trim whitespaces in unquoted fields.' + })) +})); + +export const ingest_types_convert_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + target_field: z.optional(types_field), + type: ingest_types_convert_type +})); + +export const ingest_types_community_id_processor = ingest_types_processor_base.and(z.object({ + source_ip: z.optional(types_field), + source_port: z.optional(types_field), + destination_ip: z.optional(types_field), + destination_port: z.optional(types_field), + iana_number: z.optional(types_field), + icmp_type: z.optional(types_field), + icmp_code: z.optional(types_field), + transport: z.optional(types_field), + target_field: z.optional(types_field), + seed: z.optional(z.number().register(z.globalRegistry, { + description: 'Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The\nseed can prevent hash collisions between network domains, such as a staging\nand production network that use the same addressing scheme.' + })), + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.' + })) +})); + +export const ingest_types_circle_processor = ingest_types_processor_base.and(z.object({ + error_distance: z.number().register(z.globalRegistry, { + description: 'The difference between the resulting inscribed distance from center to side and the circle’s radius (measured in meters for `geo_shape`, unit-less for `shape`).' + }), + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' + })), + shape_type: ingest_types_shape_type, + target_field: z.optional(types_field) +})); + +export const ingest_types_cef_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + target_field: z.optional(types_field), + ignore_empty_values: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and value is anempty string in extensions, the processor quietly exits without modifying the document.' + })), + timezone: z.optional(z.string().register(z.globalRegistry, { + description: 'The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.' + })) +})); + +export const ingest_types_bytes_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' + })), + target_field: z.optional(types_field) +})); + +export const ingest_types_attachment_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true` and field does not exist, the processor quietly exits without modifying the document.' + })), + indexed_chars: z.optional(z.number().register(z.globalRegistry, { + description: 'The number of chars being used for extraction to prevent huge fields.\nUse `-1` for no limit.' + })), + indexed_chars_field: z.optional(types_field), + properties: z.optional(z.array(z.string()).register(z.globalRegistry, { + description: 'Array of properties to select to be stored.\nCan be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language`.' + })), + target_field: z.optional(types_field), + remove_binary: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If true, the binary field will be removed from the document' + })), + resource_name: z.optional(z.string().register(z.globalRegistry, { + description: 'Field containing the name of the resource to decode.\nIf specified, the processor passes this resource name to the underlying Tika library to enable Resource Name Based Detection.' + })) +})); + +export const ingest_types_append_processor = ingest_types_processor_base.and(z.object({ + field: types_field, + value: z.optional(z.union([ + z.record(z.string(), z.unknown()), + z.array(z.record(z.string(), z.unknown())) + ])), + media_type: z.optional(z.string().register(z.globalRegistry, { + description: 'The media type for encoding `value`.\nApplies only when value is a template snippet.\nMust be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`.' + })), + copy_from: z.optional(types_field), + allow_duplicates: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `false`, the processor does not append values already present in the field.' + })), + ignore_empty_values: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, the processor will skip empty values from the source (e.g. empty strings, and null values),\nrather than appending them to the field.' + })) +})); + export const types_mapping_icu_collation_property = z.lazy((): any => types_mapping_doc_values_property_base).and(z.object({ type: z.enum(['icu_collation_keyword']), norms: z.optional(z.boolean()), @@ -9025,6 +9929,28 @@ export const global_search_response_body = z.object({ terminated_early: z.optional(z.boolean()) }); +export const ingest_types_pipeline = z.object({ + description: z.optional(z.string().register(z.globalRegistry, { + description: 'Description of the ingest pipeline.' + })), + on_failure: z.optional(z.array(ingest_types_processor_container).register(z.globalRegistry, { + description: 'Processors to run immediately after a processor failure.' + })), + processors: z.optional(z.array(ingest_types_processor_container).register(z.globalRegistry, { + description: 'Processors used to perform transformations on documents before indexing.\nProcessors run sequentially in the order specified.' + })), + version: z.optional(types_version_number), + deprecated: z.optional(z.boolean().register(z.globalRegistry, { + description: 'Marks this ingest pipeline as deprecated.\nWhen a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.' + })), + _meta: z.optional(types_metadata), + created_date: z.optional(types_date_time), + created_date_millis: z.optional(types_epoch_time_unit_millis), + modified_date: z.optional(types_date_time), + modified_date_millis: z.optional(types_epoch_time_unit_millis), + field_access_pattern: z.optional(ingest_types_field_access_pattern) +}); + export const indices_types_alias = z.object({ filter: z.optional(types_query_dsl_query_container), index_routing: z.optional(types_routing), @@ -9421,6 +10347,19 @@ export const search_allow_no_indices = z.boolean().register(z.globalRegistry, { */ export const search_index = types_indices; +/** + * If `true`, the response includes output data for each processor in the executed pipeline. + */ +export const ingest_simulate_verbose = z.boolean().register(z.globalRegistry, { + description: 'If `true`, the response includes output data for each processor in the executed pipeline.' +}); + +/** + * The pipeline to test. + * If you don't specify a `pipeline` in the request body, this parameter is required. + */ +export const ingest_simulate_id = types_id; + /** * If `true`, the request's actions must target a data stream (existing or to be created). */ @@ -9695,6 +10634,13 @@ export const search = z.object({ })) }); +export const ingest_simulate = z.object({ + docs: z.array(ingest_types_document).register(z.globalRegistry, { + description: 'Sample documents to test in the pipeline.' + }), + pipeline: z.optional(ingest_types_pipeline) +}); + export const bulk = z.array(z.union([ global_bulk_operation_container, global_bulk_update_action, @@ -10029,9 +10975,7 @@ export const esql_query_request = z.object({ })), filter: z.optional(types_query_dsl_query_container), locale: z.optional(z.string()), - params: z.optional(z.array(esql_types_esql_param).register(z.globalRegistry, { - description: 'To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters.' - })), + params: z.optional(esql_types_esql_params), profile: z.optional(z.boolean().register(z.globalRegistry, { description: 'If provided and `true` the response will include an extra `profile` object\nwith information on how the query was executed. This information is for human debugging\nand its format can change at any time but it can give some insight into the performance\nof each part of the query.' })), @@ -10089,6 +11033,66 @@ export const indices_create_response = z.object({ acknowledged: z.boolean() }); +export const ingest_simulate_request = z.object({ + body: ingest_simulate, + path: z.optional(z.never()), + query: z.optional(z.object({ + verbose: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, the response includes output data for each processor in the executed pipeline.' + })) + })) +}); + +export const ingest_simulate_response = z.object({ + docs: z.array(ingest_types_simulate_document_result) +}); + +export const ingest_simulate1_request = z.object({ + body: ingest_simulate, + path: z.optional(z.never()), + query: z.optional(z.object({ + verbose: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, the response includes output data for each processor in the executed pipeline.' + })) + })) +}); + +export const ingest_simulate1_response = z.object({ + docs: z.array(ingest_types_simulate_document_result) +}); + +export const ingest_simulate2_request = z.object({ + body: ingest_simulate, + path: z.object({ + id: types_id + }), + query: z.optional(z.object({ + verbose: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, the response includes output data for each processor in the executed pipeline.' + })) + })) +}); + +export const ingest_simulate2_response = z.object({ + docs: z.array(ingest_types_simulate_document_result) +}); + +export const ingest_simulate3_request = z.object({ + body: ingest_simulate, + path: z.object({ + id: types_id + }), + query: z.optional(z.object({ + verbose: z.optional(z.boolean().register(z.globalRegistry, { + description: 'If `true`, the response includes output data for each processor in the executed pipeline.' + })) + })) +}); + +export const ingest_simulate3_response = z.object({ + docs: z.array(ingest_types_simulate_document_result) +}); + export const search_request = z.object({ body: z.optional(search), path: z.optional(z.never()), diff --git a/x-pack/platform/plugins/shared/data_catalog/common/data_source_spec.ts b/x-pack/platform/plugins/shared/data_catalog/common/data_source_spec.ts index e446790dd2083..ebe885685cc5b 100644 --- a/x-pack/platform/plugins/shared/data_catalog/common/data_source_spec.ts +++ b/x-pack/platform/plugins/shared/data_catalog/common/data_source_spec.ts @@ -54,6 +54,24 @@ export interface CustomOAuthConfiguration { fetchSecretsPath: string; } +/** + * Role of a connector within a data source: + * - 'primary': Main connector shown first in UI configuration flow (one per data source) + * - 'required': Must be configured, shown after primary connector + * - 'optional': User is prompted with a choice before configuration + */ +export type ConnectorRole = 'primary' | 'required' | 'optional'; + +/** + * Reference to a created stack connector, including its type for reliable matching. + */ +export interface ConnectorReference { + /** The connector type (e.g., '.google_drive', '.jina') */ + type: string; + /** The created connector's ID */ + id: string; +} + /** * Configuration for a stack connector associated with a data source type */ @@ -61,6 +79,24 @@ export interface StackConnectorConfig { type: string; config: Record; importedTools?: string[]; + /** + * Role of this connector in the data source configuration flow. + * - 'primary': Main connector, shown first (default for first connector if not specified) + * - 'required': Must be configured, flyout shown directly after primary + * - 'optional': User prompted with y/n before showing configuration flyout + * @default 'required' + */ + role?: ConnectorRole; + /** Display name for this connector (shown in UI) */ + name?: string; + /** Description explaining what this connector does (shown in UI prompts) */ + description?: string; + /** + * Description shown when the user is about to skip an optional connector. + * Explains what will happen if they skip (e.g., fallback behavior). + * Only relevant for connectors with role 'optional'. + */ + skipDescription?: string; } /** @@ -88,14 +124,16 @@ export interface DataSource { /** * Generates workflows for interacting with the third-party data source. * Workflows are the only model for "taking action" against the third party. + * @param connectors - Array of connector references (type + id) for connectors created for this data source */ - generateWorkflows(stackConnectorId?: string): WorkflowInfo[]; + generateWorkflows(connectors: ConnectorReference[]): WorkflowInfo[]; /** - * Stack connector configuration. + * Stack connector configurations. * Stack connectors are the only model for executing workflow actions against the third party. + * This is an array to support composite data sources that use multiple connectors. */ - stackConnector: StackConnectorConfig; + stackConnectors: StackConnectorConfig[]; /** OAuth configuration for authentication */ oauthConfiguration?: EARSOAuthConfiguration | CustomOAuthConfiguration; diff --git a/x-pack/platform/plugins/shared/data_catalog/common/index.ts b/x-pack/platform/plugins/shared/data_catalog/common/index.ts index 3ad169565601b..9a25373e879ed 100644 --- a/x-pack/platform/plugins/shared/data_catalog/common/index.ts +++ b/x-pack/platform/plugins/shared/data_catalog/common/index.ts @@ -15,6 +15,8 @@ export type { EARSOAuthConfiguration, CustomOAuthConfiguration, WorkflowInfo, + ConnectorRole, + ConnectorReference, } from './data_source_spec'; export { EARSSupportedOAuthProvider } from './data_source_spec'; diff --git a/x-pack/platform/plugins/shared/data_catalog/index.ts b/x-pack/platform/plugins/shared/data_catalog/index.ts index e2ea3da2a2afd..a4ae75adea4ee 100644 --- a/x-pack/platform/plugins/shared/data_catalog/index.ts +++ b/x-pack/platform/plugins/shared/data_catalog/index.ts @@ -11,6 +11,8 @@ export type { EARSOAuthConfiguration, CustomOAuthConfiguration, WorkflowInfo, + ConnectorReference, + ConnectorRole, } from './common'; export { PLUGIN_ID, PLUGIN_NAME, API_BASE_PATH, EARSSupportedOAuthProvider } from './common'; diff --git a/x-pack/platform/plugins/shared/data_catalog/server/routes.ts b/x-pack/platform/plugins/shared/data_catalog/server/routes.ts index 6387f6653a786..b05121410fe14 100644 --- a/x-pack/platform/plugins/shared/data_catalog/server/routes.ts +++ b/x-pack/platform/plugins/shared/data_catalog/server/routes.ts @@ -50,7 +50,9 @@ export function registerRoutes(router: IRouter, dataCatalog: DataCatalog) { if (!type) { return response.notFound({ body: `Type ${request.params.id} not found` }); } - const workflowInfos = type.generateWorkflows(''); + const workflowInfos = type.generateWorkflows([ + { type: type.stackConnectors[0]?.type ?? 'unknown', id: '' }, + ]); return response.ok({ body: { ...type, diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/components/active_sources_view.tsx b/x-pack/platform/plugins/shared/data_sources/public/application/components/active_sources_view.tsx index d09e65d702a1a..2c73b8db61885 100644 --- a/x-pack/platform/plugins/shared/data_sources/public/application/components/active_sources_view.tsx +++ b/x-pack/platform/plugins/shared/data_sources/public/application/components/active_sources_view.tsx @@ -13,16 +13,18 @@ import { ActiveSourcesTable } from './active_sources_table'; import { ConfirmDeleteActiveSourceModal } from './confirm_delete_active_source_modal'; import { useActiveSources } from '../hooks/use_active_sources'; import { useDeleteActiveSource } from '../hooks/use_delete_active_source'; +import { useCloneActiveSource } from '../hooks/use_clone_active_source'; +import { useAddConnectorFlyout } from '../hooks/use_add_connector_flyout'; import { useEditActiveSourceFlyout } from '../hooks/use_edit_active_source_flyout'; -import { useCloneActiveSourceFlyout } from '../hooks/use_clone_active_source_flyout'; +import { useDataSources } from '../hooks/use_connectors'; import type { ActiveSource } from '../../types/connector'; export const ActiveSourcesView: React.FC = () => { const { activeSources, isLoading } = useActiveSources(); + const { dataSources } = useDataSources(); const [selectedSource, setSelectedSource] = useState(null); const [showDeleteModal, setShowDeleteModal] = useState(false); const [sourceToEdit, setSourceToEdit] = useState(null); - const [sourceToClone, setSourceToClone] = useState(null); const handleCancelDelete = useCallback(() => { setSelectedSource(null); @@ -32,12 +34,14 @@ export const ActiveSourcesView: React.FC = () => { const { mutate: deleteActiveSource, isLoading: isDeleting } = useDeleteActiveSource(handleCancelDelete); - const { openFlyout: openCloneFlyout, flyout: cloneFlyout } = useCloneActiveSourceFlyout({ - sourceToClone, - onConnectorCreated: () => { - setSourceToClone(null); - }, - }); + const { getCloneName } = useCloneActiveSource(); + + // Setup clone flyout + const { + openFlyout: openCloneFlyout, + flyout: cloneFlyout, + optionalPrompt: cloneOptionalPrompt, + } = useAddConnectorFlyout({}); const handleCloseEditFlyout = useCallback(() => { setSourceToEdit(null); @@ -58,12 +62,16 @@ export const ActiveSourcesView: React.FC = () => { const handleClone = useCallback( (source: ActiveSource) => { - setSourceToClone(source); - // Open the add connector flyout with pre-selected type - // User will need to select/create credentials (no secrets cloned) - openCloneFlyout(); + // Find the DataSource definition for this type + const ds = dataSources.find((d) => d.id === source.type); + if (ds) { + // Open the add connector flyout with pre-selected type and suggested name + // User will need to select/create credentials (no secrets cloned) + const cloneName = getCloneName(source); + openCloneFlyout(ds, ds.id, cloneName); + } }, - [openCloneFlyout] + [dataSources, openCloneFlyout, getCloneName] ); const handleDelete = useCallback((source: ActiveSource) => { @@ -127,6 +135,7 @@ export const ActiveSourcesView: React.FC = () => { )} {editFlyout} {cloneFlyout} + {cloneOptionalPrompt} ); }; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/components/connectors_view.tsx b/x-pack/platform/plugins/shared/data_sources/public/application/components/connectors_view.tsx index 9a830dde56fdb..35e11f8f8e869 100644 --- a/x-pack/platform/plugins/shared/data_sources/public/application/components/connectors_view.tsx +++ b/x-pack/platform/plugins/shared/data_sources/public/application/components/connectors_view.tsx @@ -5,7 +5,7 @@ * 2.0. */ -import React, { useState, useMemo, useCallback } from 'react'; +import React, { useMemo, useCallback, useState } from 'react'; import { css } from '@emotion/react'; import { EuiFlexGrid, @@ -27,14 +27,11 @@ import { } from '../../../common/constants'; export const DataSourcesView: React.FC = () => { - const { connectors, isLoading } = useDataSources(); - const [selectedConnector, setSelectedConnector] = useState(null); + const { connectors, dataSources, isLoading } = useDataSources(); const [activePage, setActivePage] = useState(0); const [itemsPerPage, setItemsPerPage] = useState(DEFAULT_ITEMS_PER_PAGE); - const { openFlyout, flyout } = useAddConnectorFlyout({ - dataSourceType: selectedConnector?.id, - }); + const { openFlyout, flyout, optionalPrompt } = useAddConnectorFlyout({}); const paginatedConnectors = useMemo(() => { const start = activePage * itemsPerPage; @@ -50,12 +47,13 @@ export const DataSourcesView: React.FC = () => { const handleConnectorClick = useCallback( (connector: Connector) => { - setSelectedConnector(connector); - // Open the flyout with the connector's action type ID - // For connectors from registry, this will be the stackConnector.type (e.g., '.notion') - openFlyout(connector.type); + // Find the full DataSource definition + const ds = dataSources.find((d) => d.id === connector.id); + if (ds) { + openFlyout(ds, ds.id); + } }, - [openFlyout] + [dataSources, openFlyout] ); if (isLoading) { @@ -124,6 +122,9 @@ export const DataSourcesView: React.FC = () => { {/* Connector creation flyout */} {flyout} + + {/* Optional connector prompt */} + {optionalPrompt} ); }; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/components/optional_connector_prompt.tsx b/x-pack/platform/plugins/shared/data_sources/public/application/components/optional_connector_prompt.tsx new file mode 100644 index 0000000000000..f2056282e5f38 --- /dev/null +++ b/x-pack/platform/plugins/shared/data_sources/public/application/components/optional_connector_prompt.tsx @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import React from 'react'; +import { + EuiModal, + EuiModalHeader, + EuiModalHeaderTitle, + EuiModalBody, + EuiModalFooter, + EuiButton, + EuiButtonEmpty, + EuiText, + EuiSpacer, + EuiCallOut, + useGeneratedHtmlId, +} from '@elastic/eui'; +import { i18n } from '@kbn/i18n'; +import type { StackConnectorConfig, ConnectorRole } from '@kbn/data-catalog-plugin'; + +export interface ConnectorPromptProps { + connectorConfig: StackConnectorConfig; + /** The effective role of this connector */ + role: ConnectorRole; + onSetUp: () => void; + onSkip: () => void; +} + +/** + * Prompt shown before configuring a connector. + * - For 'optional' connectors: Shows description with Skip/Set up buttons + * - For 'required' connectors: Shows description with Continue button (no skip) + */ +export const ConnectorPrompt: React.FC = ({ + connectorConfig, + role, + onSetUp, + onSkip, +}) => { + const connectorName = connectorConfig.name || connectorConfig.type; + const isOptional = role === 'optional'; + const modalTitleId = useGeneratedHtmlId(); + + return ( + + + + {isOptional + ? i18n.translate('xpack.dataSources.connectorPrompt.optionalTitle', { + defaultMessage: 'Set up {connectorName}?', + values: { connectorName }, + }) + : i18n.translate('xpack.dataSources.connectorPrompt.requiredTitle', { + defaultMessage: 'Configure {connectorName}', + values: { connectorName }, + })} + + + + + {connectorConfig.description && ( + <> + +

{connectorConfig.description}

+
+ + + )} + + {isOptional && connectorConfig.skipDescription && ( + <> + +

{connectorConfig.skipDescription}

+
+ + + )} + + {isOptional && !connectorConfig.skipDescription && ( + +

+ {i18n.translate('xpack.dataSources.connectorPrompt.skipInfo', { + defaultMessage: 'You can skip this for now and set it up later.', + })} +

+
+ )} + + {!isOptional && ( + +

+ {i18n.translate('xpack.dataSources.connectorPrompt.requiredInfo', { + defaultMessage: 'This connector is required for the data source to work properly.', + })} +

+
+ )} +
+ + + {isOptional && ( + + {i18n.translate('xpack.dataSources.connectorPrompt.skipButton', { + defaultMessage: 'Skip', + })} + + )} + + {isOptional + ? i18n.translate('xpack.dataSources.connectorPrompt.setUpButton', { + defaultMessage: 'Set up {connectorName}', + values: { connectorName }, + }) + : i18n.translate('xpack.dataSources.connectorPrompt.continueButton', { + defaultMessage: 'Continue', + })} + + +
+ ); +}; + +// Legacy export for backwards compatibility +export const OptionalConnectorPrompt = ConnectorPrompt; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_add_connector_flyout.ts b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_add_connector_flyout.ts index 0e9e5f5905b0f..422c5e1e0a676 100644 --- a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_add_connector_flyout.ts +++ b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_add_connector_flyout.ts @@ -5,36 +5,89 @@ * 2.0. */ -import { useCallback, useMemo, useState, useRef } from 'react'; +import React, { useCallback, useMemo, useState, useRef } from 'react'; import type { ActionConnector } from '@kbn/triggers-actions-ui-plugin/public'; -import type { IconType } from '@elastic/eui'; import { i18n } from '@kbn/i18n'; import { useMutation, useQueryClient } from '@kbn/react-query'; +import type { DataSource, StackConnectorConfig, ConnectorRole } from '@kbn/data-catalog-plugin'; import { useKibana } from './use_kibana'; import { API_BASE_PATH } from '../../../common/constants'; import { queryKeys } from '../query_keys'; +import { ConnectorPrompt } from '../components/optional_connector_prompt'; + +/** + * Get the effective role of a connector. + * - First connector defaults to 'primary' if no role specified + * - Other connectors default to 'required' if no role specified + */ +function getEffectiveRole(connectorConfig: StackConnectorConfig, index: number): ConnectorRole { + if (connectorConfig.role) { + return connectorConfig.role; + } + // First connector is implicitly primary, others are implicitly required + return index === 0 ? 'primary' : 'required'; +} + +/** + * Build processing order: primary connectors first, then required, then optional. + * Returns array of indices into the original stackConnectors array. + */ +function buildProcessingOrder(connectorsList: StackConnectorConfig[]): number[] { + const primary: number[] = []; + const required: number[] = []; + const optional: number[] = []; + + connectorsList.forEach((sc, idx) => { + const role = getEffectiveRole(sc, idx); + if (role === 'primary') { + primary.push(idx); + } else if (role === 'required') { + required.push(idx); + } else { + optional.push(idx); + } + }); + + return [...primary, ...required, ...optional]; +} export interface UseAddConnectorFlyoutOptions { onConnectorCreated?: (connector: ActionConnector) => void; - dataSourceType?: string; - suggestedName?: string; - icon?: IconType; + onComplete?: () => void; +} + +interface ConnectorCredential { + connector_type: string; + credentials?: string; + existing_connector_id?: string; } interface CreateDataConnectorPayload { name: string; - stack_connector_id: string; type: string; + connector_credentials: ConnectorCredential[]; } +type FlowState = + | { type: 'idle' } + | { type: 'flyout'; connectorIndex: number; dataSource: DataSource; suggestedName?: string } + | { + type: 'connector_prompt'; + connectorIndex: number; + dataSource: DataSource; + suggestedName?: string; + role: ConnectorRole; + } + | { type: 'complete' }; + /** - * Hook to manage the connector creation flyout. + * Hook to manage connector creation flyouts for data sources. + * Supports multi-connector data sources by showing sequential flyouts + * and prompting for optional connectors. */ export const useAddConnectorFlyout = ({ onConnectorCreated, - dataSourceType, - suggestedName, - icon, + onComplete, }: UseAddConnectorFlyoutOptions = {}) => { const { services: { @@ -45,29 +98,40 @@ export const useAddConnectorFlyout = ({ } = useKibana(); const queryClient = useQueryClient(); - const [isOpen, setIsOpen] = useState(false); - const [selectedConnectorType, setSelectedConnectorType] = useState(); const loadingToastRef = useRef | undefined>(); - const openFlyout = useCallback((actionTypeId?: string) => { - setSelectedConnectorType(actionTypeId); - setIsOpen(true); - }, []); + // Flow state management + const [flowState, setFlowState] = useState({ type: 'idle' }); + // Track created connectors - state for UI updates, ref for closure-safe access in callbacks + const [createdConnectors, setCreatedConnectors] = useState([]); + // Ref mirrors state for closure-safe access (callbacks capture stale state values) + const createdConnectorsRef = useRef([]); - const closeFlyout = useCallback(() => { - setIsOpen(false); - setSelectedConnectorType(undefined); - }, []); + // Store active session data separately to survive flyout close events + const activeSessionRef = useRef<{ + dataSource: DataSource; + dataSourceType: string; + suggestedName?: string; + processingOrder: number[]; // Indices in priority order: primary → required → optional + } | null>(null); - // Mutation for creating data connector - const createDataConnectorMutation = useMutation({ + // Get stack connectors from session ref (survives flyout close) + // Memoized to prevent useMemo dependency issues + const stackConnectors = useMemo( + () => activeSessionRef.current?.dataSource?.stackConnectors ?? [], + // Re-compute when flow state changes (which updates activeSessionRef) + // eslint-disable-next-line react-hooks/exhaustive-deps + [flowState] + ); + + // Mutation for creating data source + const createDataSourceMutation = useMutation({ mutationFn: async (payload: CreateDataConnectorPayload) => { return http.post(`${API_BASE_PATH}`, { body: JSON.stringify(payload), }); }, onMutate: ({ name }) => { - // Show loading toast const loadingToast = toasts.addInfo( { title: i18n.translate('xpack.dataSources.hooks.useAddConnectorFlyout.creatingTitle', { @@ -75,108 +139,295 @@ export const useAddConnectorFlyout = ({ }), text: i18n.translate('xpack.dataSources.hooks.useAddConnectorFlyout.creatingText', { defaultMessage: 'Setting up {connectorName}...', - values: { - connectorName: name, - }, + values: { connectorName: name }, }), }, - { - toastLifeTimeMs: 30000, - } + { toastLifeTimeMs: 30000 } ); loadingToastRef.current = loadingToast; - return { loadingToast }; }, - onSuccess: (data, variables) => { - // Dismiss loading toast + onSuccess: (_, variables) => { if (loadingToastRef.current) { toasts.remove(loadingToastRef.current); loadingToastRef.current = undefined; } - // Show success toast toasts.addSuccess( i18n.translate('xpack.dataSources.hooks.useAddConnectorFlyout.createSuccessText', { defaultMessage: 'Data source {connectorName} connected successfully', - values: { - connectorName: variables.name, - }, + values: { connectorName: variables.name }, }) ); - // Invalidate queries to refresh Active Sources table queryClient.invalidateQueries(queryKeys.dataSources.list()); }, - onError: (error, variables) => { - // Dismiss loading toast + onError: (error) => { if (loadingToastRef.current) { toasts.remove(loadingToastRef.current); loadingToastRef.current = undefined; } - // Show error toast toasts.addError(error as Error, { title: i18n.translate('xpack.dataSources.hooks.useAddConnectorFlyout.createErrorTitle', { - defaultMessage: 'Failed to create data connector', + defaultMessage: 'Failed to create data source', }), }); }, }); + // Find next connector to process using the processing order (primary → required → optional) + const findNextInOrder = useCallback( + ( + orderPosition: number, + processingOrder: number[], + connectorsList: StackConnectorConfig[], + created: ActionConnector[] + ): { orderPosition: number; connectorIndex: number } | null => { + for (let i = orderPosition; i < processingOrder.length; i++) { + const connectorIndex = processingOrder[i]; + // Check if we already have a connector for this type + const alreadyCreated = created.some( + (c) => c.actionTypeId === connectorsList[connectorIndex].type + ); + if (!alreadyCreated) { + return { orderPosition: i, connectorIndex }; + } + } + return null; + }, + [] + ); + + // Process next connector or complete the flow + const processNextConnector = useCallback( + ( + orderPosition: number, + connectors: ActionConnector[], + ds: DataSource, + dsType: string, + processingOrder: number[], + name?: string + ) => { + const connectorsList = ds.stackConnectors ?? []; + const next = findNextInOrder(orderPosition, processingOrder, connectorsList, connectors); + + if (next === null) { + // No more connectors - create data source + setFlowState({ type: 'complete' }); + activeSessionRef.current = null; + createdConnectorsRef.current = []; + setCreatedConnectors([]); + + if (dsType && connectors.length > 0) { + const connectorCredentials: ConnectorCredential[] = connectors.map((c) => ({ + connector_type: c.actionTypeId, + existing_connector_id: c.id, + })); + + // Data source name comes from the primary connector + const primaryConfig = connectorsList.find( + (sc, idx) => getEffectiveRole(sc, idx) === 'primary' + ); + const primaryConnector = primaryConfig + ? connectors.find((c) => c.actionTypeId === primaryConfig.type) + : null; + + createDataSourceMutation.mutate({ + name: name || primaryConnector?.name || connectors[0].name, + type: dsType, + connector_credentials: connectorCredentials, + }); + } + + onComplete?.(); + return; + } + + const { connectorIndex } = next; + const connectorConfig = connectorsList[connectorIndex]; + const role = getEffectiveRole(connectorConfig, connectorIndex); + + if (role === 'primary') { + // Show flyout directly for primary connector + setFlowState({ + type: 'flyout', + connectorIndex, + dataSource: ds, + suggestedName: name, + }); + } else { + // Show prompt for required and optional connectors (after primary) + // - Required: prompt with "Continue" button (no skip) + // - Optional: prompt with "Set up" and "Skip" buttons + setFlowState({ + type: 'connector_prompt', + connectorIndex, + dataSource: ds, + suggestedName: name, + role, + }); + } + }, + [findNextInOrder, createDataSourceMutation, onComplete] + ); + + // Start the flow - accepts dataSource directly to avoid closure issues + const openFlyout = useCallback( + (ds?: DataSource, dsType?: string, name?: string) => { + // Reset both state and ref + createdConnectorsRef.current = []; + setCreatedConnectors([]); + + if (!ds || !dsType || (ds.stackConnectors?.length ?? 0) === 0) { + // No data source definition - can't proceed + toasts.addError(new Error('No data source definition provided'), { + title: 'Cannot create data source', + }); + return; + } + + // Build processing order: primary → required → optional + const processingOrder = buildProcessingOrder(ds.stackConnectors ?? []); + + // Store session data in ref (survives flyout close events) + activeSessionRef.current = { + dataSource: ds, + dataSourceType: dsType, + suggestedName: name, + processingOrder, + }; + + // Start processing from first position in order + processNextConnector(0, [], ds, dsType, processingOrder, name); + }, + [processNextConnector, toasts] + ); + + // Close and reset - only resets if no connectors were created + // (the flyout calls onClose even after successful creation) + const closeFlyout = useCallback(() => { + // Don't reset if we have created connectors - let handleConnectorCreated manage flow + // Use ref for closure-safe check + if (createdConnectorsRef.current.length === 0) { + setFlowState({ type: 'idle' }); + activeSessionRef.current = null; + } + }, []); + + // Handle connector created from flyout const handleConnectorCreated = useCallback( (connector: ActionConnector) => { - // Call user callback first onConnectorCreated?.(connector); - // Close flyout immediately - closeFlyout(); + // Update both state and ref + const updatedConnectors = [...createdConnectorsRef.current, connector]; + createdConnectorsRef.current = updatedConnectors; + setCreatedConnectors(updatedConnectors); - // If no dataSourceType, skip data connector creation - if (!dataSourceType) { - return; + // Process next connector - get dataSource from session ref (survives flyout close) + const session = activeSessionRef.current; + if (session) { + // Start from position 0 - findNextInOrder will skip already-created connectors + processNextConnector( + 0, + updatedConnectors, + session.dataSource, + session.dataSourceType, + session.processingOrder, + session.suggestedName + ); } - - // Create data connector in the background using mutation - createDataConnectorMutation.mutate({ - name: connector.name, - stack_connector_id: connector.id, - type: dataSourceType, - }); }, - [dataSourceType, onConnectorCreated, closeFlyout, createDataConnectorMutation] + [onConnectorCreated, processNextConnector] ); + // Handle connector prompt - user wants to set up / continue + const handlePromptSetUp = useCallback(() => { + if (flowState.type === 'connector_prompt') { + setFlowState({ + type: 'flyout', + connectorIndex: flowState.connectorIndex, + dataSource: flowState.dataSource, + suggestedName: flowState.suggestedName, + }); + } + }, [flowState]); + + // Handle connector prompt - user wants to skip (only for optional connectors) + const handlePromptSkip = useCallback(() => { + const session = activeSessionRef.current; + if (flowState.type === 'connector_prompt' && session) { + // Find current position in processing order and advance past it + const currentOrderPos = session.processingOrder.indexOf(flowState.connectorIndex); + processNextConnector( + currentOrderPos + 1, + createdConnectorsRef.current, + session.dataSource, + session.dataSourceType, + session.processingOrder, + session.suggestedName + ); + } + }, [flowState, processNextConnector]); + + // Get current connector config + const currentConnectorConfig: StackConnectorConfig | undefined = + flowState.type === 'flyout' || flowState.type === 'connector_prompt' + ? stackConnectors[flowState.connectorIndex] + : undefined; + + // Render flyout const flyout = useMemo(() => { - if (!isOpen) { + if (flowState.type !== 'flyout') { return null; } + const connectorType = stackConnectors[flowState.connectorIndex]?.type; + const name = flowState.suggestedName; + return triggersActionsUi.getAddConnectorFlyout({ onClose: closeFlyout, onConnectorCreated: handleConnectorCreated, - ...(icon && { icon }), - ...(selectedConnectorType && { + ...(connectorType && { initialConnector: { - actionTypeId: selectedConnectorType, - ...(suggestedName && { name: suggestedName }), + actionTypeId: connectorType, + ...(name && { name }), }, }), }); - }, [ - isOpen, - selectedConnectorType, - suggestedName, - icon, - closeFlyout, - handleConnectorCreated, - triggersActionsUi, - ]); + }, [flowState, stackConnectors, closeFlyout, handleConnectorCreated, triggersActionsUi]); + + // Render connector prompt (for required and optional connectors after primary) + const connectorPrompt = useMemo(() => { + if (flowState.type !== 'connector_prompt' || !currentConnectorConfig) { + return null; + } + + return React.createElement(ConnectorPrompt, { + connectorConfig: currentConnectorConfig, + role: flowState.role, + onSetUp: handlePromptSetUp, + onSkip: handlePromptSkip, + }); + }, [flowState, currentConnectorConfig, handlePromptSetUp, handlePromptSkip]); return { openFlyout, closeFlyout, - isOpen, - isSaving: createDataConnectorMutation.isLoading, + isOpen: flowState.type !== 'idle' && flowState.type !== 'complete', + isSaving: createDataSourceMutation.isLoading, flyout, + connectorPrompt, + // Legacy alias for backwards compatibility + optionalPrompt: connectorPrompt, + // Progress info + currentConnectorIndex: + flowState.type === 'flyout' || flowState.type === 'connector_prompt' + ? flowState.connectorIndex + : -1, + totalConnectors: stackConnectors.length, + // Connectors created so far in the current flow + createdConnectors, + connectedCount: createdConnectors.length, }; }; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_clone_active_source_flyout.ts b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_clone_active_source_flyout.ts deleted file mode 100644 index 67dec5df59198..0000000000000 --- a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_clone_active_source_flyout.ts +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { useCallback, useEffect, useState } from 'react'; -import type { ActionConnector } from '@kbn/triggers-actions-ui-plugin/public'; -import type { ActiveSource } from '../../types/connector'; -import { useCloneActiveSource } from './use_clone_active_source'; -import { useAddConnectorFlyout } from './use_add_connector_flyout'; -import { useStackConnector } from './use_stack_connector'; -import { getConnectorIconType } from '../../utils/get_connector_icon'; - -export interface UseCloneActiveSourceFlyoutOptions { - sourceToClone: ActiveSource | null; - onConnectorCreated?: (connector: ActionConnector) => void; -} - -/** - * Hook to manage the connector clone flyout for active sources. - * Fetches the actual stack connector to get its real actionTypeId, - * ensuring clones use the correct connector type (e.g., .mcp for GitHub). - */ -export const useCloneActiveSourceFlyout = ({ - sourceToClone, - onConnectorCreated, -}: UseCloneActiveSourceFlyoutOptions) => { - const { getCloneName } = useCloneActiveSource(); - const [shouldOpenFlyout, setShouldOpenFlyout] = useState(false); - - // Get stack connector ID from the source to clone - const stackConnectorId = - sourceToClone && sourceToClone.stackConnectors.length > 0 - ? sourceToClone.stackConnectors[0] - : null; - - const { stackConnector, isLoading: isLoadingConnector } = useStackConnector({ - stackConnectorId, - enabled: shouldOpenFlyout, - }); - - // Generate suggested clone name - const clonedName = sourceToClone ? getCloneName(sourceToClone) : undefined; - - // Use the add connector flyout with the data source type, suggested name, and icon - const { - openFlyout: openAddFlyout, - flyout, - ...rest - } = useAddConnectorFlyout({ - dataSourceType: sourceToClone?.type, - suggestedName: clonedName, - icon: sourceToClone ? getConnectorIconType(sourceToClone.iconType) : undefined, - onConnectorCreated, - }); - - // When stack connector is loaded and we want to open the flyout, do it automatically - useEffect(() => { - if (shouldOpenFlyout && stackConnector && !isLoadingConnector) { - // Open the flyout with the actual stack connector type (e.g., '.mcp' for GitHub) - openAddFlyout(stackConnector.actionTypeId); - setShouldOpenFlyout(false); // Reset flag - } - }, [shouldOpenFlyout, stackConnector, isLoadingConnector, openAddFlyout]); - - const openFlyout = useCallback(() => { - setShouldOpenFlyout(true); - }, []); - - return { - openFlyout, - flyout, - isLoadingConnector, - ...rest, - }; -}; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_connectors.ts b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_connectors.ts index bece2922a0a61..3ad04883c8e52 100644 --- a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_connectors.ts +++ b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_connectors.ts @@ -20,7 +20,7 @@ const transformDataSourceType = (dataSources: DataSource): Connector => { return { id: dataSources.id, name: dataSources.name, - type: dataSources.stackConnector.type, + type: dataSources.stackConnectors?.[0]?.type, // Already has '.' prefix (e.g., '.notion') iconType: dataSources.iconType, category: 'popular', }; @@ -39,10 +39,12 @@ export const useDataSources = () => { queryKeys.connectorTypes.list(), async () => { const service = new AvailableDataSourcesService({ http }); - const connectorTypes = await service.list(); + const dataSources = await service.list(); - // Transform connector types to our internal Connector interface - return connectorTypes.map(transformDataSourceType); + // Transform to our internal Connector interface while keeping raw data + const connectors = dataSources.map(transformDataSourceType); + + return { dataSources, connectors }; }, { onError: (err: Error) => { @@ -57,7 +59,8 @@ export const useDataSources = () => { ); return { - connectors: data ?? [], + connectors: data?.connectors ?? [], + dataSources: data?.dataSources ?? [], isLoading, error, }; diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.test.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.test.ts index ea8ed1df4f86d..88b141329c63a 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.test.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.test.ts @@ -160,7 +160,7 @@ describe('createConnectorAndRelatedResources', () => { }, }; const mockDataSource = { - stackConnector: { type: actionTypeId, config: {} }, + stackConnectors: [{ type: actionTypeId, config: {}, role: 'primary' as const }], generateWorkflows: jest.fn().mockReturnValue([ { content: 'workflow yaml content', @@ -177,7 +177,7 @@ describe('createConnectorAndRelatedResources', () => { const result = await createDataSourceAndRelatedResources({ name: 'My Test Connector', type: 'test_type', - credentials: 'secret-token-123', + stackConnectorCredentials: [{ credentials: 'secret-token-123' }], savedObjectsClient: mockSavedObjectsClient, request: mockRequest, logger: mockLogger, @@ -235,7 +235,7 @@ describe('createConnectorAndRelatedResources', () => { attributes: { workflowIds: ['workflow-1'], toolIds: [], kscIds: ['ksc-1'] }, }; const mockDataSource = { - stackConnector: { type: actionTypeId, config: {} }, + stackConnectors: [{ type: actionTypeId, config: {}, role: 'primary' as const }], generateWorkflows: jest.fn().mockReturnValue([ { content: 'workflow yaml content', @@ -251,7 +251,7 @@ describe('createConnectorAndRelatedResources', () => { await createDataSourceAndRelatedResources({ name: 'Test', type: 'test', - credentials: 'token', + stackConnectorCredentials: [{ credentials: 'token' }], savedObjectsClient: mockSavedObjectsClient, request: mockRequest, logger: mockLogger, diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.ts index 765106cd90cb3..25df5893aef48 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.ts @@ -11,7 +11,7 @@ import type { SavedObjectsClientContract } from '@kbn/core-saved-objects-api-ser import type { KibanaRequest } from '@kbn/core-http-server'; import type { ActionResult } from '@kbn/actions-plugin/server'; import type { Logger } from '@kbn/logging'; -import type { DataSource } from '@kbn/data-catalog-plugin'; +import type { DataSource, ConnectorReference } from '@kbn/data-catalog-plugin'; import { DEFAULT_NAMESPACE_STRING } from '@kbn/core-saved-objects-utils-server'; import { updateYamlField } from '@kbn/workflows-management-plugin/common/lib/yaml'; import { createStackConnector } from '../utils/create_stack_connector'; @@ -21,11 +21,22 @@ import type { } from '../types'; import { DATA_SOURCE_SAVED_OBJECT_TYPE, type DataSourceAttributes } from '../saved_objects'; +/** + * Credentials configuration for creating stack connectors. + * Each entry corresponds to a StackConnectorConfig in the data source. + */ +export interface StackConnectorCredentials { + /** The credentials (token, API key, etc.) for this connector */ + credentials: string; + /** Optional: Use an existing stack connector ID instead of creating a new one */ + existingConnectorId?: string; +} + interface CreateDataSourceAndResourcesParams { name: string; type: string; - credentials: string; - stackConnectorId?: string; + /** Array of credentials, one for each stack connector in the data source */ + stackConnectorCredentials: StackConnectorCredentials[]; savedObjectsClient: SavedObjectsClientContract; request: KibanaRequest; logger: Logger; @@ -47,9 +58,10 @@ function slugify(input: string): string { /** * Creates data source Saved Object, as well as all related resources (stack connectors, tools, workflows) * - * Supports two patterns: - * 1. Reuse existing stack connector: Pass stackConnectorId (e.g., from UI flyout) - * 2. Create new stack connector: Omit stackConnectorId, provide name and token + * Supports composite data sources with multiple stack connectors. + * For each connector config in the data source: + * - If existingConnectorId is provided, reuse it + * - Otherwise, create a new stack connector with the provided credentials */ export async function createDataSourceAndRelatedResources( params: CreateDataSourceAndResourcesParams @@ -57,8 +69,7 @@ export async function createDataSourceAndRelatedResources( const { name, type, - credentials, - stackConnectorId, + stackConnectorCredentials, savedObjectsClient, request, logger, @@ -70,36 +81,49 @@ export async function createDataSourceAndRelatedResources( const workflowIds: string[] = []; const toolIds: string[] = []; + const connectorRefs: ConnectorReference[] = []; - let finalStackConnectorId: string; + const toolRegistry = await agentBuilder.tools.getRegistry({ request }); - // Pattern 1: Reuse existing stack connector (from flyout) - if (stackConnectorId) { - logger.info(`Reusing existing stack connector: ${stackConnectorId}`); - finalStackConnectorId = stackConnectorId; - } - // Pattern 2: Create new stack connector (direct API call) - else { - const toolRegistry = await agentBuilder.tools.getRegistry({ request }); - const stackConnectorConfig = dataSource.stackConnector; - const stackConnector: ActionResult = await createStackConnector( - toolRegistry, - actions, - request, - stackConnectorConfig, - name, - toolIds, - credentials, - logger - ); + // Create or reuse stack connectors for each connector config + for (let i = 0; i < dataSource.stackConnectors.length; i++) { + const connectorConfig = dataSource.stackConnectors[i]; + const credentialsConfig = stackConnectorCredentials[i]; - finalStackConnectorId = stackConnector.id; + if (!credentialsConfig) { + logger.warn( + `No credentials provided for connector index ${i} (type: ${connectorConfig.type}), skipping` + ); + continue; + } + + // Reuse existing stack connector if ID is provided + if (credentialsConfig.existingConnectorId) { + logger.info( + `Reusing existing stack connector: ${credentialsConfig.existingConnectorId} for ${connectorConfig.type}` + ); + connectorRefs.push({ type: connectorConfig.type, id: credentialsConfig.existingConnectorId }); + } + // Create new stack connector + else { + logger.info(`Creating new stack connector for type: ${connectorConfig.type}`); + const stackConnector: ActionResult = await createStackConnector( + toolRegistry, + actions, + request, + connectorConfig, + name, + toolIds, + credentialsConfig.credentials, + logger + ); + connectorRefs.push({ type: connectorConfig.type, id: stackConnector.id }); + } } - // Create workflows and tools + // Create workflows and tools using connector references (type + id) const spaceId = getSpaceId(savedObjectsClient); - const workflowInfos = dataSource.generateWorkflows(finalStackConnectorId); - const toolRegistry = await agentBuilder.tools.getRegistry({ request }); + const workflowInfos = dataSource.generateWorkflows(connectorRefs); logger.info(`Creating workflows and tools for data source '${name}'`); @@ -119,7 +143,7 @@ export async function createDataSourceAndRelatedResources( workflowIds.push(workflow.id); if (workflowInfo.shouldGenerateABTool) { - // e.g., "sources.github.search_issues" -> "search_issues" + // Extract base workflow name (e.g., "sources.notion.search" -> "search") const workflowBaseName = originalName.split('.').pop() || originalName; // Tool ID structure: type.data_source_name.workflow_base_name @@ -148,7 +172,7 @@ export async function createDataSourceAndRelatedResources( updatedAt: now, workflowIds, toolIds, - kscIds: [finalStackConnectorId], + kscIds: connectorRefs.map((ref) => ref.id), }); return savedObject.id; diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts index caad3f4661c61..639894c202f4e 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts @@ -48,7 +48,9 @@ describe('registerRoutes', () => { }, }; const mockDataCatalog = { - getCatalog: jest.fn(), + getCatalog: jest.fn().mockReturnValue({ + get: jest.fn().mockReturnValue({ iconType: '.notion' }), + }), }; const mockGetStartServices = jest.fn().mockResolvedValue([ @@ -132,11 +134,6 @@ describe('registerRoutes', () => { page: 1, }); - // Mock catalog to return iconType - mockDataCatalog.getCatalog.mockReturnValue({ - get: jest.fn().mockReturnValue({ iconType: '.notion' }), - }); - registerRoutes(dependencies); const routeHandler = mockRouter.get.mock.calls[0][1]; @@ -202,11 +199,6 @@ describe('registerRoutes', () => { mockSavedObjectsClient.get.mockResolvedValue(mockDataSource); - // Mock catalog to return iconType - mockDataCatalog.getCatalog.mockReturnValue({ - get: jest.fn().mockReturnValue({ iconType: '.notion' }), - }); - registerRoutes(dependencies); const routeHandler = mockRouter.get.mock.calls[1][1]; @@ -254,7 +246,7 @@ describe('registerRoutes', () => { describe('POST /api/data_sources', () => { it('should create a new data source and call the helper with correct params', async () => { const mockDataSource = { - stackConnector: { type: '.bearer_connector' }, + stackConnectors: [{ type: '.notion', required: true }], generateWorkflows: jest.fn(), }; @@ -271,7 +263,7 @@ describe('registerRoutes', () => { body: { name: 'My Notion Data Source', type: 'notion', - credentials: 'secret-token-123', + connector_credentials: [{ connector_type: '.notion', credentials: 'secret-token-123' }], }, }); const mockResponse = httpServerMock.createResponseFactory(); @@ -282,7 +274,9 @@ describe('registerRoutes', () => { expect.objectContaining({ name: 'My Notion Data Source', type: 'notion', - credentials: 'secret-token-123', + stackConnectorCredentials: [ + { credentials: 'secret-token-123', existingConnectorId: undefined }, + ], dataSource: mockDataSource, }) ); @@ -306,7 +300,7 @@ describe('registerRoutes', () => { body: { name: 'Invalid Data Source', type: 'invalid-type', - credentials: 'token', + connector_credentials: [{ connector_type: '.notion', credentials: 'token' }], }, }); const mockResponse = httpServerMock.createResponseFactory(); @@ -324,7 +318,7 @@ describe('registerRoutes', () => { it('should handle errors during creation', async () => { const mockDataSource = { - stackConnector: { type: '.bearer_connector' }, + stackConnectors: [{ type: '.notion', required: true }], generateWorkflows: jest.fn(), }; @@ -343,7 +337,7 @@ describe('registerRoutes', () => { body: { name: 'Test Data Source', type: 'notion', - credentials: 'token', + connector_credentials: [{ connector_type: '.notion', credentials: 'token' }], }, }); const mockResponse = httpServerMock.createResponseFactory(); diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/index.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/index.ts index c38390fd176dd..b77c53ae2748e 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/index.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/index.ts @@ -17,6 +17,7 @@ import { schema } from '@kbn/config-schema'; import { createDataSourceAndRelatedResources, deleteDataSourceAndRelatedResources, + type StackConnectorCredentials, } from './data_sources_helpers'; import type { DataSourceAttributes } from '../saved_objects'; import { DATA_SOURCE_SAVED_OBJECT_TYPE } from '../saved_objects'; @@ -151,7 +152,7 @@ export function registerRoutes(dependencies: RouteDependencies) { const coreContext = await context.core; try { - const { name, type, credentials, stack_connector_id } = request.body; + const { name, type, connector_credentials } = request.body; const [, { actions, dataCatalog, agentBuilder }] = await getStartServices(); const savedObjectsClient = coreContext.savedObjects.client; @@ -167,11 +168,43 @@ export function registerRoutes(dependencies: RouteDependencies) { }); } - // Validate required fields based on pattern - if (!stack_connector_id && (!name || !credentials)) { + // Build the stack connector credentials array by matching provided credentials + // to the stackConnectors defined in the data source + const stackConnectorCredentials: StackConnectorCredentials[] = []; + + for (const connectorConfig of dataSource.stackConnectors) { + // Find matching credentials from the request + const matchingCreds = connector_credentials.find( + (cred) => cred.connector_type === connectorConfig.type + ); + + // Check if this connector is required based on its role + // 'primary' and 'required' roles require credentials, 'optional' does not + const role = connectorConfig.role ?? 'required'; + const isRequired = role !== 'optional'; + + if (matchingCreds) { + // Credentials provided for this connector + stackConnectorCredentials.push({ + credentials: matchingCreds.credentials || '', + existingConnectorId: matchingCreds.existing_connector_id, + }); + } else if (isRequired) { + // Required connector but no credentials provided + return response.badRequest({ + body: { + message: `Missing credentials for required connector type "${connectorConfig.type}"`, + }, + }); + } + // Optional connector with no credentials - skip it + } + + // Validate we have a name + if (!name && stackConnectorCredentials.length > 0) { return response.badRequest({ body: { - message: 'name and token are required when stack_connector_id is not provided', + message: 'name is required when creating connectors', }, }); } @@ -179,8 +212,7 @@ export function registerRoutes(dependencies: RouteDependencies) { const dataSourceId = await createDataSourceAndRelatedResources({ name: name || `Data source for ${type}`, type, - credentials: credentials || '', - stackConnectorId: stack_connector_id, + stackConnectorCredentials, savedObjectsClient, request, logger, diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts index 7edd2b4cbfc06..d59207f0619b6 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts @@ -43,9 +43,27 @@ export function convertSOtoAPIResponse( }; } +/** + * Schema for credentials for a single connector + */ +const connectorCredentialsSchema = schema.object({ + /** The connector type (e.g., '.google_drive', '.jina') */ + connector_type: schema.string({ minLength: 1 }), + /** The credentials (token, API key, etc.) for this connector */ + credentials: schema.maybe(schema.string({ minLength: 1 })), + /** Optional: Use an existing stack connector ID instead of creating a new one */ + existing_connector_id: schema.maybe(schema.string({ minLength: 1 })), +}); + export const createDataSourceRequestSchema = schema.object({ + /** The data source type (e.g., 'google_drive', 'notion') */ type: schema.string({ minLength: 1 }), + /** Display name for the data source */ name: schema.maybe(schema.string({ minLength: 1 })), - credentials: schema.maybe(schema.string({ minLength: 1 })), - stack_connector_id: schema.maybe(schema.string({ minLength: 1 })), + /** + * Credentials for each connector required by this data source. + * The array should match the stackConnectors defined in the data source definition. + * Optional connectors can be omitted. + */ + connector_credentials: schema.arrayOf(connectorCredentialsSchema), }); diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts index 36180fb362ab5..aa79a92265ec8 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts @@ -7,7 +7,7 @@ import { i18n } from '@kbn/i18n'; import { MCPAuthType } from '@kbn/connector-schemas/mcp'; -import type { DataSource } from '@kbn/data-catalog-plugin'; +import type { DataSource, ConnectorReference } from '@kbn/data-catalog-plugin'; import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; import { generateGithubSearchIssuesWorkflow, @@ -23,7 +23,6 @@ export const githubDataSource: DataSource = { description: i18n.translate('xpack.dataSources.github.description', { defaultMessage: 'Connect to Github to pull data from your repository.', }), - iconType: '.github', oauthConfiguration: { @@ -33,53 +32,68 @@ export const githubDataSource: DataSource = { oauthBaseUrl: 'https://localhost:8052', // update once EARS deploys to QA }, - stackConnector: { - type: '.mcp', - config: { - serverUrl: 'https://api.githubcopilot.com/mcp/', - hasAuth: true, - authType: MCPAuthType.Bearer, + stackConnectors: [ + { + type: '.mcp', + config: { + serverUrl: 'https://api.githubcopilot.com/mcp/', + hasAuth: true, + authType: MCPAuthType.Bearer, + }, + importedTools: [ + 'get_commit', + 'get_file_contents', + 'get_label', + 'get_latest_release', + 'get_me', + 'get_tag', + 'get_team_members', + 'get_teams', + 'list_branches', + 'list_commits', + 'list_issue_types', + 'list_issues', + 'list_pull_requests', + 'list_releases', + 'list_tags', + 'pull_request_read', + ], + role: 'primary', + name: 'GitHub', + description: i18n.translate('xpack.dataSources.github.connectorDescription', { + defaultMessage: + 'Connect to GitHub to access repositories, issues, pull requests, and more.', + }), }, - importedTools: [ - 'get_commit', - 'get_file_contents', - 'get_label', - 'get_latest_release', - 'get_me', - 'get_tag', - 'get_team_members', - 'get_teams', - 'list_branches', - 'list_commits', - 'list_issue_types', - 'list_issues', - 'list_pull_requests', - 'list_releases', - 'list_tags', - 'pull_request_read', - ], - }, + ], + + generateWorkflows(connectors: ConnectorReference[]) { + // GitHub uses MCP connector type + const github = connectors.find((c) => c.type === '.mcp'); + + if (!github) { + throw new Error('GitHub MCP connector is required for GitHub data source'); + } - generateWorkflows(stackConnectorId: string) { return [ { - content: generateGithubSearchIssuesWorkflow(stackConnectorId), + content: generateGithubSearchIssuesWorkflow(github.id), shouldGenerateABTool: true, }, { - content: generateGithubSearchCodeWorkflow(stackConnectorId), + content: generateGithubSearchCodeWorkflow(github.id), shouldGenerateABTool: true, }, { - content: generateGithubSearchPullRequestsWorkflow(stackConnectorId), + content: generateGithubSearchPullRequestsWorkflow(github.id), shouldGenerateABTool: true, }, { - content: generateGithubSearchRepositoriesWorkflow(stackConnectorId), + content: generateGithubSearchRepositoriesWorkflow(github.id), shouldGenerateABTool: true, }, { - content: generateGithubSearchUsersWorkflow(stackConnectorId), + content: generateGithubSearchUsersWorkflow(github.id), shouldGenerateABTool: true, }, ]; diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts new file mode 100644 index 0000000000000..54ce05c7953d6 --- /dev/null +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { i18n } from '@kbn/i18n'; +import type { DataSource, ConnectorReference } from '@kbn/data-catalog-plugin'; +import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; +import { + generateGoogleDriveSearchFilesWorkflow, + generateGoogleDriveListFilesWorkflow, + generateGoogleDriveDownloadFilesWithJinaWorkflow, + generateGoogleDriveDownloadFilesWithIngestSimulateWorkflow, +} from './workflows'; + +export const googleDriveDataSource: DataSource = { + id: 'google_drive', + name: 'Google Drive', + description: i18n.translate('xpack.dataSources.googleDrive.description', { + defaultMessage: 'Connect to Google Drive to access files and folders.', + }), + iconType: '.google_drive', + + oauthConfiguration: { + provider: EARSSupportedOAuthProvider.GOOGLE, + initiatePath: '/oauth/start/google_drive', + fetchSecretsPath: '/oauth/fetch_request_secrets', + oauthBaseUrl: 'https://localhost:8052', // update once EARS deploys to QA + }, + + stackConnectors: [ + { + type: '.google_drive', + config: {}, + role: 'primary', + name: 'Google Drive', + description: i18n.translate('xpack.dataSources.googleDrive.connectorDescription', { + defaultMessage: 'Connect to Google Drive to search and access your files and folders.', + }), + }, + { + type: '.jina', + config: {}, + role: 'optional', + name: 'Jina Reader', + description: i18n.translate('xpack.dataSources.googleDrive.jinaDescription', { + defaultMessage: + 'Enable high-quality extraction of file contents to markdown using Jina Reader. This provides better handling of complex document layouts and semantic chunking. Get a free API key at jina.ai/reader', + }), + skipDescription: i18n.translate('xpack.dataSources.googleDrive.jinaSkipDescription', { + defaultMessage: + "File content extraction will use Elasticsearch's built-in attachment processor instead. This provides basic text extraction but may not handle complex layouts as well as Jina Reader.", + }), + }, + ], + + generateWorkflows(connectors: ConnectorReference[]) { + // Find connectors by type (not position) for reliable matching + const googleDrive = connectors.find((c) => c.type === '.google_drive'); + const jina = connectors.find((c) => c.type === '.jina'); + + if (!googleDrive) { + throw new Error('Google Drive connector is required for Google Drive data source'); + } + + const workflows = [ + { + content: generateGoogleDriveSearchFilesWorkflow(googleDrive.id), + shouldGenerateABTool: true, + }, + { + content: generateGoogleDriveListFilesWorkflow(googleDrive.id), + shouldGenerateABTool: true, + }, + ]; + + // Add download workflow - uses Jina Reader if configured for high-quality extraction, + // otherwise falls back to Elasticsearch's ingest.simulate with attachment processor + if (jina) { + // Jina Reader connector was configured - use it for high-quality extraction + workflows.push({ + content: generateGoogleDriveDownloadFilesWithJinaWorkflow(googleDrive.id, jina.id), + shouldGenerateABTool: true, + }); + } else { + // No Jina Reader - use Elasticsearch's built-in attachment processor as fallback + workflows.push({ + content: generateGoogleDriveDownloadFilesWithIngestSimulateWorkflow(googleDrive.id), + shouldGenerateABTool: true, + }); + } + + return workflows; + }, +}; diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/index.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/index.ts new file mode 100644 index 0000000000000..5e44ffd88ccab --- /dev/null +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/index.ts @@ -0,0 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +export { googleDriveDataSource } from './data_type'; diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts new file mode 100644 index 0000000000000..6ad569836d721 --- /dev/null +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +export function generateGoogleDriveSearchFilesWorkflow(stackConnectorId: string): string { + return `version: '1' +name: 'sources.google_drive.search' +description: Search for files in Google Drive using Google's query syntax +enabled: true +triggers: + - type: manual +inputs: + - name: query + type: string + description: "Google Drive query. Use fullText contains 'term' for content search, name contains 'term' for filename, mimeType='application/pdf' for type filtering, modifiedTime > '2024-01-01' for date filtering. Combine with 'and'/'or'." + - name: pageSize + type: number + required: false + description: Number of results to return (default 250, max 1000) + - name: pageToken + type: string + required: false + description: "Pagination token. Pass the 'nextPageToken' value from a previous response to get the next page. When nextPageToken is absent in the response, there are no more results." +steps: + - name: search_files + type: google_drive.searchFiles + connector-id: ${stackConnectorId} + with: + query: "{{ inputs.query }}" + pageSize: "{{ inputs.pageSize }}" + pageToken: "{{ inputs.pageToken }}" +`; +} + +export function generateGoogleDriveListFilesWorkflow(stackConnectorId: string): string { + return `version: '1' +name: 'sources.google_drive.list' +description: List files and subfolders in a Google Drive folder +enabled: true +triggers: + - type: manual +inputs: + - name: folderId + type: string + default: root + description: "Folder ID to list contents of. Use 'root' for the root folder, or a folder ID from search/list results" + - name: pageSize + type: number + required: false + description: Number of results to return (default 250, max 1000) + - name: pageToken + type: string + required: false + description: "Pagination token. Pass the 'nextPageToken' value from a previous response to get the next page. When nextPageToken is absent in the response, there are no more results." + - name: orderBy + type: string + required: false + description: "Sort order: 'name', 'modifiedTime', or 'createdTime'" +steps: + - name: list_files + type: google_drive.listFiles + connector-id: ${stackConnectorId} + with: + folderId: "{{ inputs.folderId }}" + pageSize: "{{ inputs.pageSize }}" + pageToken: "{{ inputs.pageToken }}" + orderBy: "{{ inputs.orderBy }}" +`; +} + +/** + * Generates a composite workflow that downloads a file from Google Drive + * and extracts its content using Jina Reader for LLM consumption. + */ +export function generateGoogleDriveDownloadFilesWithJinaWorkflow( + googleDriveConnectorId: string, + jinaConnectorId: string +): string { + return `version: '1' +name: 'sources.google_drive.download' +description: Download a file and extract its text content to readable markdown (best for PDFs, Word docs, etc.) +enabled: true +triggers: + - type: manual +inputs: + - name: fileId + type: string + description: File ID from search or list results. Works with PDFs, Office docs, Google Docs, images with text, and more +steps: + - name: download_file + type: google_drive.downloadFile + connector-id: ${googleDriveConnectorId} + with: + fileId: "{{ inputs.fileId }}" + - name: convert_to_markdown + type: jina.fileToMarkdown + connector-id: ${jinaConnectorId} + with: + file: "{{ steps.download_file.output.content }}" + filename: "{{ steps.download_file.output.name }}" +`; +} + +/** + * Generates a composite workflow that downloads a file from Google Drive + * and extracts its content using Elasticsearch's attachment processor + * via the ingest.simulate API. + * + * This is the fallback when no Jina Reader connector is configured. + * Uses Apache Tika under the hood for text extraction. + */ +export function generateGoogleDriveDownloadFilesWithIngestSimulateWorkflow( + googleDriveConnectorId: string +): string { + return `version: '1' +name: 'sources.google_drive.download' +description: Download a file and extract its text content (best for PDFs, Word docs, etc.) +enabled: true +triggers: + - type: manual +inputs: + - name: fileId + type: string + description: File ID from search or list results. Works with PDFs, Office docs, Google Docs, and other text-based formats +steps: + - name: download_file + type: google_drive.downloadFile + connector-id: ${googleDriveConnectorId} + with: + fileId: "{{ inputs.fileId }}" + - name: extract_content + type: elasticsearch.ingest.simulate + with: + pipeline: + processors: + - attachment: + field: data + indexed_chars: -1 + remove_binary: true + docs: + - _id: "{{ inputs.fileId }}" + _source: + filename: "{{ steps.download_file.output.name }}" + data: "{{ steps.download_file.output.content }}" +`; +} diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/index.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/index.ts index 7a325e01d8418..b06df9e489c23 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/index.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/index.ts @@ -7,8 +7,10 @@ import type { DataCatalogPluginSetup } from '@kbn/data-catalog-plugin/server'; import { notionDataSource } from './notion'; import { githubDataSource } from './github'; +import { googleDriveDataSource } from './google_drive'; export function registerDataSources(dataCatalog: DataCatalogPluginSetup) { dataCatalog.register(notionDataSource); dataCatalog.register(githubDataSource); + dataCatalog.register(googleDriveDataSource); } diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts index 907cb6f84cf13..306417c1af184 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts @@ -6,7 +6,7 @@ */ import { i18n } from '@kbn/i18n'; -import type { DataSource } from '@kbn/data-catalog-plugin'; +import type { DataSource, ConnectorReference } from '@kbn/data-catalog-plugin'; import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; import { generateGetDataSourceWorkflow, @@ -21,7 +21,6 @@ export const notionDataSource: DataSource = { description: i18n.translate('xpack.dataSources.notion.description', { defaultMessage: 'Connect to Notion to pull data from your workspace.', }), - iconType: '.notion', oauthConfiguration: { @@ -31,17 +30,30 @@ export const notionDataSource: DataSource = { oauthBaseUrl: 'https://localhost:8052', }, - stackConnector: { - type: '.notion', - config: {}, - }, + stackConnectors: [ + { + type: '.notion', + config: {}, + role: 'primary', + name: 'Notion', + description: i18n.translate('xpack.dataSources.notion.connectorDescription', { + defaultMessage: 'Connect to Notion to access pages, databases, and workspace content.', + }), + }, + ], + + generateWorkflows(connectors: ConnectorReference[]) { + const notion = connectors.find((c) => c.type === '.notion'); + + if (!notion) { + throw new Error('Notion connector is required for Notion data source'); + } - generateWorkflows(stackConnectorId: string) { return [ - { content: generateQueryWorkflow(stackConnectorId), shouldGenerateABTool: true }, - { content: generateSearchWorkflow(stackConnectorId), shouldGenerateABTool: true }, - { content: generateGetPageWorkflow(stackConnectorId), shouldGenerateABTool: true }, - { content: generateGetDataSourceWorkflow(stackConnectorId), shouldGenerateABTool: true }, + { content: generateQueryWorkflow(notion.id), shouldGenerateABTool: true }, + { content: generateSearchWorkflow(notion.id), shouldGenerateABTool: true }, + { content: generateGetPageWorkflow(notion.id), shouldGenerateABTool: true }, + { content: generateGetDataSourceWorkflow(notion.id), shouldGenerateABTool: true }, ]; }, }; From 15017fac2263bdfd0701a008d7fe88c3c9326edd Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Thu, 29 Jan 2026 13:06:54 +0200 Subject: [PATCH 02/11] Take care of empty inputs and move away from ingest action --- .../src/specs/google_drive/google_drive.ts | 9 +- .../included_operations.ts | 1 - .../elasticsearch.ingest_simulate.gen.ts | 83 -- .../spec/elasticsearch/generated/index.ts | 8 +- .../generated/schemas/es_openapi_zod.gen.ts | 1012 +---------------- .../server/sources/google_drive/workflows.ts | 51 +- 6 files changed, 39 insertions(+), 1125 deletions(-) delete mode 100644 src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/elasticsearch.ingest_simulate.gen.ts diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts index bb1134b3d955d..c269f3afa9c97 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts @@ -113,8 +113,7 @@ export const GoogleDriveConnector: ConnectorSpec = { isTool: true, input: z.object({ folderId: z - .string() - .optional() + .preprocess((val) => (val === '' ? undefined : val), z.string().optional()) .default(DEFAULT_FOLDER_ID) .describe("Parent folder ID ('root' for root folder)"), pageSize: z @@ -124,8 +123,10 @@ export const GoogleDriveConnector: ConnectorSpec = { .describe('Maximum number of files to return (1-1000)'), pageToken: z.string().optional().describe('Token for pagination'), orderBy: z - .enum(['name', 'modifiedTime', 'createdTime']) - .optional() + .preprocess( + (val) => (val === '' ? undefined : val), + z.enum(['name', 'modifiedTime', 'createdTime']).optional() + ) .describe('Field to order results by'), }), handler: async (ctx, input) => { diff --git a/src/platform/packages/shared/kbn-workflows/scripts/generate_es_connectors/included_operations.ts b/src/platform/packages/shared/kbn-workflows/scripts/generate_es_connectors/included_operations.ts index 6389095252f97..df1531362c7ec 100644 --- a/src/platform/packages/shared/kbn-workflows/scripts/generate_es_connectors/included_operations.ts +++ b/src/platform/packages/shared/kbn-workflows/scripts/generate_es_connectors/included_operations.ts @@ -16,5 +16,4 @@ export const INCLUDED_OPERATIONS = [ 'indices.create', 'bulk', 'esql.query', - 'ingest.simulate', ]; diff --git a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/elasticsearch.ingest_simulate.gen.ts b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/elasticsearch.ingest_simulate.gen.ts deleted file mode 100644 index 2325400064b1e..0000000000000 --- a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/elasticsearch.ingest_simulate.gen.ts +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -/* - * AUTO-GENERATED FILE - DO NOT EDIT - * - * Source: elasticsearch-specification repository, operations: ingest-simulate, ingest-simulate-1, ingest-simulate-2, ingest-simulate-3 - * - * To regenerate: node scripts/generate_workflow_es_contracts.js - */ - -import { z } from '@kbn/zod/v4'; - -import { - ingest_simulate1_request, - ingest_simulate1_response, - ingest_simulate2_request, - ingest_simulate2_response, - ingest_simulate3_request, - ingest_simulate3_response, - ingest_simulate_request, - ingest_simulate_response, -} from './schemas/es_openapi_zod.gen'; -import { getShapeAt } from '../../../common/utils/zod'; - -// import all needed request and response schemas generated from the OpenAPI spec -import type { InternalConnectorContract } from '../../../types/latest'; - -// export contract -export const INGEST_SIMULATE_CONTRACT: InternalConnectorContract = { - type: 'elasticsearch.ingest.simulate', - summary: `Simulate a pipeline`, - description: `Simulate a pipeline. - -Run an ingest pipeline against a set of provided documents. -You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. - - Documentation: https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate`, - methods: ['GET', 'POST'], - patterns: ['_ingest/pipeline/_simulate', '_ingest/pipeline/{id}/_simulate'], - documentation: - 'https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate', - parameterTypes: { - headerParams: [], - pathParams: ['id'], - urlParams: ['verbose'], - bodyParams: ['docs', 'pipeline'], - }, - paramsSchema: z.union([ - z.object({ - ...getShapeAt(ingest_simulate_request, 'body'), - ...getShapeAt(ingest_simulate_request, 'path'), - ...getShapeAt(ingest_simulate_request, 'query'), - }), - z.object({ - ...getShapeAt(ingest_simulate1_request, 'body'), - ...getShapeAt(ingest_simulate1_request, 'path'), - ...getShapeAt(ingest_simulate1_request, 'query'), - }), - z.object({ - ...getShapeAt(ingest_simulate2_request, 'body'), - ...getShapeAt(ingest_simulate2_request, 'path'), - ...getShapeAt(ingest_simulate2_request, 'query'), - }), - z.object({ - ...getShapeAt(ingest_simulate3_request, 'body'), - ...getShapeAt(ingest_simulate3_request, 'path'), - ...getShapeAt(ingest_simulate3_request, 'query'), - }), - ]), - outputSchema: z.union([ - ingest_simulate_response, - ingest_simulate1_response, - ingest_simulate2_response, - ingest_simulate3_response, - ]), -}; diff --git a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/index.ts b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/index.ts index 68f7e3ea0b4f5..47482be5179a1 100644 --- a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/index.ts +++ b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/index.ts @@ -10,9 +10,9 @@ /* * AUTO-GENERATED FILE - DO NOT EDIT * - * This file contains Elasticsearch connector definitions generated from elasticsearch-specification repository (https://github.com/elastic/elasticsearch-specification/commit/b0cefb9). - * Generated at: 2026-01-25T11:50:01.506Z - * Source: elasticsearch-specification repository (9 APIs) + * This file contains Elasticsearch connector definitions generated from elasticsearch-specification repository (https://github.com/elastic/elasticsearch-specification/commit/868f66c). + * Generated at: 2026-01-21T16:44:56.585Z + * Source: elasticsearch-specification repository (8 APIs) * * To regenerate: node scripts/generate_workflow_es_contracts.js */ @@ -24,7 +24,6 @@ import { INDEX_CONTRACT } from './elasticsearch.index.gen'; import { INDICES_CREATE_CONTRACT } from './elasticsearch.indices_create.gen'; import { INDICES_DELETE_CONTRACT } from './elasticsearch.indices_delete.gen'; import { INDICES_EXISTS_CONTRACT } from './elasticsearch.indices_exists.gen'; -import { INGEST_SIMULATE_CONTRACT } from './elasticsearch.ingest_simulate.gen'; import { SEARCH_CONTRACT } from './elasticsearch.search.gen'; import { UPDATE_CONTRACT } from './elasticsearch.update.gen'; import type { InternalConnectorContract } from '../../../types/latest'; @@ -37,7 +36,6 @@ export const GENERATED_ELASTICSEARCH_CONNECTORS: InternalConnectorContract[] = [ INDICES_CREATE_CONTRACT, INDICES_DELETE_CONTRACT, INDICES_EXISTS_CONTRACT, - INGEST_SIMULATE_CONTRACT, SEARCH_CONTRACT, UPDATE_CONTRACT, ]; diff --git a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/schemas/es_openapi_zod.gen.ts b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/schemas/es_openapi_zod.gen.ts index 6b947f8574e65..a6393344bc7c8 100644 --- a/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/schemas/es_openapi_zod.gen.ts +++ b/src/platform/packages/shared/kbn-workflows/spec/elasticsearch/generated/schemas/es_openapi_zod.gen.ts @@ -2319,79 +2319,6 @@ export const types_indices = z.union([ z.array(types_index_name) ]); -export const ingest_types_user_agent_property = z.unknown(); - -export const types_grok_pattern = z.string(); - -export const ingest_types_json_processor_conflict_strategy = z.enum(['replace', 'merge']); - -export const ingest_types_input_config = z.object({ - input_field: z.string(), - output_field: z.string() -}); - -export const ingest_types_inference_config_classification = z.object({ - num_top_classes: z.optional(z.number().register(z.globalRegistry, { - description: 'Specifies the number of top class predictions to return.' - })), - num_top_feature_importance_values: z.optional(z.number().register(z.globalRegistry, { - description: 'Specifies the maximum number of feature importance values per document.' - })), - results_field: z.optional(types_field), - top_classes_results_field: z.optional(types_field), - prediction_field_type: z.optional(z.string().register(z.globalRegistry, { - description: 'Specifies the type of the predicted field to write.\nValid values are: `string`, `number`, `boolean`.' - })) -}); - -export const ingest_types_inference_config_regression = z.object({ - results_field: z.optional(types_field), - num_top_feature_importance_values: z.optional(z.number().register(z.globalRegistry, { - description: 'Specifies the maximum number of feature importance values per document.' - })) -}); - -export const ingest_types_inference_config = z.object({ - regression: z.optional(ingest_types_inference_config_regression), - classification: z.optional(ingest_types_inference_config_classification) -}); - -export const ingest_types_geo_grid_target_format = z.enum(['geojson', 'wkt']); - -export const ingest_types_geo_grid_tile_type = z.enum([ - 'geotile', - 'geohex', - 'geohash' -]); - -export const ingest_types_fingerprint_digest = z.enum([ - 'MD5', - 'SHA-1', - 'SHA-256', - 'SHA-512', - 'MurmurHash3' -]); - -export const types_geo_shape_relation = z.enum([ - 'intersects', - 'disjoint', - 'within', - 'contains' -]); - -export const ingest_types_convert_type = z.enum([ - 'integer', - 'long', - 'double', - 'float', - 'boolean', - 'ip', - 'string', - 'auto' -]); - -export const ingest_types_shape_type = z.enum(['geo_shape', 'shape']); - export const types_uuid = z.string(); export const types_mapping_data_stream_timestamp = z.object({ @@ -2638,18 +2565,6 @@ export const types_mapping_all_field = z.object({ store_term_vectors: z.boolean() }); -/** - * Some APIs will return values such as numbers also as a string (notably epoch timestamps). This behavior - * is used to capture this behavior while keeping the semantics of the field type. - * - * Depending on the target language, code generators can keep the union or remove it and leniently parse - * strings to the target type. - */ -export const spec_utils_stringified_version_number = z.union([ - types_version_number, - z.string() -]); - /** * Base type for multi-bucket aggregation results that can hold sub-aggregations results. */ @@ -5977,87 +5892,15 @@ export const types_aggregations_hdr_percentile_ranks_aggregate = types_aggregati export const types_aggregations_hdr_percentiles_aggregate = types_aggregations_percentiles_aggregate_base.and(z.record(z.string(), z.unknown())); -export const ingest_types_pipeline_simulation_status_options = z.enum([ - 'success', - 'error', - 'error_ignored', - 'skipped', - 'dropped' -]); - -export const ingest_types_redact = z.object({ - _is_redacted: z.boolean().register(z.globalRegistry, { - description: 'indicates if document has been redacted' - }) -}); - -export const ingest_types_ingest = z.object({ - _redact: z.optional(ingest_types_redact), - timestamp: types_date_time, - pipeline: z.optional(types_name) -}); - -/** - * The simulated document, with optional metadata. - */ -export const ingest_types_document_simulation = z.object({ - _id: types_id, - _index: types_index_name, - _ingest: ingest_types_ingest, - _routing: z.optional(z.string().register(z.globalRegistry, { - description: 'Value used to send the document to a specific primary shard.' - })), - _source: z.record(z.string(), z.record(z.string(), z.unknown())).register(z.globalRegistry, { - description: 'JSON body for the document.' - }), - _version: z.optional(spec_utils_stringified_version_number), - _version_type: z.optional(types_version_type) -}).register(z.globalRegistry, { - description: 'The simulated document, with optional metadata.' -}); - -export const ingest_types_pipeline_processor_result = z.object({ - doc: z.optional(ingest_types_document_simulation), - tag: z.optional(z.string()), - processor_type: z.optional(z.string()), - status: z.optional(ingest_types_pipeline_simulation_status_options), - description: z.optional(z.string()), - ignored_error: z.optional(types_error_cause), - error: z.optional(types_error_cause) -}); - -export const ingest_types_simulate_document_result = z.object({ - doc: z.optional(ingest_types_document_simulation), - error: z.optional(types_error_cause), - processor_results: z.optional(z.array(ingest_types_pipeline_processor_result)) -}); - -export const ingest_types_document = z.object({ - _id: z.optional(types_id), - _index: z.optional(types_index_name), - _source: z.record(z.string(), z.unknown()).register(z.globalRegistry, { - description: 'JSON body for the document.' - }) -}); - -export const ingest_types_field_access_pattern = z.enum(['classic', 'flexible']); - export const types_indices_response_base = types_acknowledged_response_base.and(z.object({ _shards: z.optional(types_shard_statistics) })); -export const esql_types_single_or_multi_value = z.union([ +export const esql_types_esql_param = z.union([ types_field_value, z.array(types_field_value) ]); -export const esql_types_named_value = z.record(z.string(), esql_types_single_or_multi_value); - -export const esql_types_esql_params = z.union([ - z.array(esql_types_single_or_multi_value), - z.array(esql_types_named_value) -]); - export const esql_types_esql_shard_failure = z.object({ shard: z.number(), index: z.union([ @@ -7927,753 +7770,6 @@ export const types_aggregations_adjacency_matrix_aggregation = types_aggregation })) })); -export const ingest_types_user_agent_processor = z.lazy((): any => ingest_types_processor_base).and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - regex_file: z.optional(z.string().register(z.globalRegistry, { - description: 'The name of the file in the `config/ingest-user-agent` directory containing the regular expressions for parsing the user agent string. Both the directory and the file have to be created before starting Elasticsearch. If not specified, ingest-user-agent will use the `regexes.yaml` from uap-core it ships with.' - })), - target_field: z.optional(types_field), - properties: z.optional(z.array(ingest_types_user_agent_property).register(z.globalRegistry, { - description: 'Controls what properties are added to `target_field`.' - })), - extract_device_type: z.optional(z.boolean().register(z.globalRegistry, { - description: 'Extracts device type from the user agent string on a best-effort basis.' - })) -})); - -export const ingest_types_processor_container = z.object({ - get append() { - return z.optional(z.lazy((): any => ingest_types_append_processor)); - }, - get attachment() { - return z.optional(z.lazy((): any => ingest_types_attachment_processor)); - }, - get bytes() { - return z.optional(z.lazy((): any => ingest_types_bytes_processor)); - }, - get cef() { - return z.optional(z.lazy((): any => ingest_types_cef_processor)); - }, - get circle() { - return z.optional(z.lazy((): any => ingest_types_circle_processor)); - }, - get community_id() { - return z.optional(z.lazy((): any => ingest_types_community_id_processor)); - }, - get convert() { - return z.optional(z.lazy((): any => ingest_types_convert_processor)); - }, - get csv() { - return z.optional(z.lazy((): any => ingest_types_csv_processor)); - }, - get date() { - return z.optional(z.lazy((): any => ingest_types_date_processor)); - }, - get date_index_name() { - return z.optional(z.lazy((): any => ingest_types_date_index_name_processor)); - }, - get dissect() { - return z.optional(z.lazy((): any => ingest_types_dissect_processor)); - }, - get dot_expander() { - return z.optional(z.lazy((): any => ingest_types_dot_expander_processor)); - }, - get drop() { - return z.optional(z.lazy((): any => ingest_types_drop_processor)); - }, - get enrich() { - return z.optional(z.lazy((): any => ingest_types_enrich_processor)); - }, - get fail() { - return z.optional(z.lazy((): any => ingest_types_fail_processor)); - }, - get fingerprint() { - return z.optional(z.lazy((): any => ingest_types_fingerprint_processor)); - }, - get foreach() { - return z.optional(z.lazy((): any => ingest_types_foreach_processor)); - }, - get ip_location() { - return z.optional(z.lazy((): any => ingest_types_ip_location_processor)); - }, - get geo_grid() { - return z.optional(z.lazy((): any => ingest_types_geo_grid_processor)); - }, - get geoip() { - return z.optional(z.lazy((): any => ingest_types_geo_ip_processor)); - }, - get grok() { - return z.optional(z.lazy((): any => ingest_types_grok_processor)); - }, - get gsub() { - return z.optional(z.lazy((): any => ingest_types_gsub_processor)); - }, - get html_strip() { - return z.optional(z.lazy((): any => ingest_types_html_strip_processor)); - }, - get inference() { - return z.optional(z.lazy((): any => ingest_types_inference_processor)); - }, - get join() { - return z.optional(z.lazy((): any => ingest_types_join_processor)); - }, - get json() { - return z.optional(z.lazy((): any => ingest_types_json_processor)); - }, - get kv() { - return z.optional(z.lazy((): any => ingest_types_key_value_processor)); - }, - get lowercase() { - return z.optional(z.lazy((): any => ingest_types_lowercase_processor)); - }, - get network_direction() { - return z.optional(z.lazy((): any => ingest_types_network_direction_processor)); - }, - get pipeline() { - return z.optional(z.lazy((): any => ingest_types_pipeline_processor)); - }, - get redact() { - return z.optional(z.lazy((): any => ingest_types_redact_processor)); - }, - get registered_domain() { - return z.optional(z.lazy((): any => ingest_types_registered_domain_processor)); - }, - get remove() { - return z.optional(z.lazy((): any => ingest_types_remove_processor)); - }, - get rename() { - return z.optional(z.lazy((): any => ingest_types_rename_processor)); - }, - get reroute() { - return z.optional(z.lazy((): any => ingest_types_reroute_processor)); - }, - get script() { - return z.optional(z.lazy((): any => ingest_types_script_processor)); - }, - get set() { - return z.optional(z.lazy((): any => ingest_types_set_processor)); - }, - get set_security_user() { - return z.optional(z.lazy((): any => ingest_types_set_security_user_processor)); - }, - get sort() { - return z.optional(z.lazy((): any => ingest_types_sort_processor)); - }, - get split() { - return z.optional(z.lazy((): any => ingest_types_split_processor)); - }, - get terminate() { - return z.optional(z.lazy((): any => ingest_types_terminate_processor)); - }, - get trim() { - return z.optional(z.lazy((): any => ingest_types_trim_processor)); - }, - get uppercase() { - return z.optional(z.lazy((): any => ingest_types_uppercase_processor)); - }, - get urldecode() { - return z.optional(z.lazy((): any => ingest_types_url_decode_processor)); - }, - get uri_parts() { - return z.optional(z.lazy((): any => ingest_types_uri_parts_processor)); - }, - user_agent: z.optional(ingest_types_user_agent_processor) -}); - -export const ingest_types_uri_parts_processor = z.lazy((): any => ingest_types_processor_base).and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - keep_original: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, the processor copies the unparsed URI to `.original`.' - })), - remove_if_successful: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, the processor removes the `field` after parsing the URI string.\nIf parsing fails, the processor does not remove the `field`.' - })), - target_field: z.optional(types_field) -})); - -export const ingest_types_processor_base = z.object({ - description: z.optional(z.string().register(z.globalRegistry, { - description: 'Description of the processor.\nUseful for describing the purpose of the processor or its configuration.' - })), - if: z.optional(types_script), - ignore_failure: z.optional(z.boolean().register(z.globalRegistry, { - description: 'Ignore failures for the processor.' - })), - on_failure: z.optional(z.array(ingest_types_processor_container).register(z.globalRegistry, { - description: 'Handle failures for the processor.' - })), - tag: z.optional(z.string().register(z.globalRegistry, { - description: 'Identifier for the processor.\nUseful for debugging and metrics.' - })) -}); - -export const ingest_types_url_decode_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - target_field: z.optional(types_field) -})); - -export const ingest_types_uppercase_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - target_field: z.optional(types_field) -})); - -export const ingest_types_trim_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - target_field: z.optional(types_field) -})); - -export const ingest_types_terminate_processor = ingest_types_processor_base.and(z.record(z.string(), z.unknown())); - -export const ingest_types_split_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - preserve_trailing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'Preserves empty trailing fields, if any.' - })), - separator: z.string().register(z.globalRegistry, { - description: 'A regex which matches the separator, for example, `,` or `\\s+`.' - }), - target_field: z.optional(types_field) -})); - -export const ingest_types_sort_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - order: z.optional(types_sort_order), - target_field: z.optional(types_field) -})); - -export const ingest_types_set_security_user_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - properties: z.optional(z.array(z.string()).register(z.globalRegistry, { - description: 'Controls what user related properties are added to the field.' - })) -})); - -export const ingest_types_set_processor = ingest_types_processor_base.and(z.object({ - copy_from: z.optional(types_field), - field: types_field, - ignore_empty_value: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document.' - })), - media_type: z.optional(z.string().register(z.globalRegistry, { - description: 'The media type for encoding `value`.\nApplies only when value is a template snippet.\nMust be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`.' - })), - override: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` processor will update fields with pre-existing non-null-valued field.\nWhen set to `false`, such fields will not be touched.' - })), - value: z.optional(z.record(z.string(), z.unknown()).register(z.globalRegistry, { - description: 'The value to be set for the field.\nSupports template snippets.\nMay specify only one of `value` or `copy_from`.' - })) -})); - -export const ingest_types_script_processor = ingest_types_processor_base.and(z.object({ - id: z.optional(types_id), - lang: z.optional(types_script_language), - params: z.optional(z.record(z.string(), z.record(z.string(), z.unknown())).register(z.globalRegistry, { - description: 'Object containing parameters for the script.' - })), - source: z.optional(types_script_source) -})); - -export const ingest_types_reroute_processor = ingest_types_processor_base.and(z.object({ - destination: z.optional(z.string().register(z.globalRegistry, { - description: 'A static value for the target. Can’t be set when the dataset or namespace option is set.' - })), - dataset: z.optional(z.union([ - z.string(), - z.array(z.string()) - ])), - namespace: z.optional(z.union([ - z.string(), - z.array(z.string()) - ])) -})); - -export const ingest_types_rename_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - target_field: types_field -})); - -export const ingest_types_remove_processor = ingest_types_processor_base.and(z.object({ - field: types_fields, - keep: z.optional(types_fields), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })) -})); - -export const ingest_types_registered_domain_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - target_field: z.optional(types_field), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.' - })) -})); - -export const ingest_types_redact_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - patterns: z.array(types_grok_pattern).register(z.globalRegistry, { - description: 'A list of grok expressions to match and redact named captures with' - }), - pattern_definitions: z.optional(z.record(z.string(), z.string())), - prefix: z.optional(z.string().register(z.globalRegistry, { - description: 'Start a redacted section with this token' - })), - suffix: z.optional(z.string().register(z.globalRegistry, { - description: 'End a redacted section with this token' - })), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - skip_if_unlicensed: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document' - })), - trace_redact: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted' - })) -})); - -export const ingest_types_pipeline_processor = ingest_types_processor_base.and(z.object({ - name: types_name, - ignore_missing_pipeline: z.optional(z.boolean().register(z.globalRegistry, { - description: 'Whether to ignore missing pipelines instead of failing.' - })) -})); - -export const ingest_types_network_direction_processor = ingest_types_processor_base.and(z.object({ - source_ip: z.optional(types_field), - destination_ip: z.optional(types_field), - target_field: z.optional(types_field), - internal_networks: z.optional(z.array(z.string()).register(z.globalRegistry, { - description: 'List of internal networks. Supports IPv4 and IPv6 addresses and ranges in\nCIDR notation. Also supports the named ranges listed below. These may be\nconstructed with template snippets. Must specify only one of\ninternal_networks or internal_networks_field.' - })), - internal_networks_field: z.optional(types_field), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.' - })) -})); - -export const ingest_types_lowercase_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - target_field: z.optional(types_field) -})); - -export const ingest_types_key_value_processor = ingest_types_processor_base.and(z.object({ - exclude_keys: z.optional(z.array(z.string()).register(z.globalRegistry, { - description: 'List of keys to exclude from document.' - })), - field: types_field, - field_split: z.string().register(z.globalRegistry, { - description: 'Regex pattern to use for splitting key-value pairs.' - }), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - include_keys: z.optional(z.array(z.string()).register(z.globalRegistry, { - description: 'List of keys to filter and insert into document.\nDefaults to including all keys.' - })), - prefix: z.optional(z.string().register(z.globalRegistry, { - description: 'Prefix to be added to extracted keys.' - })), - strip_brackets: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `\'` and `"` from extracted values.' - })), - target_field: z.optional(types_field), - trim_key: z.optional(z.string().register(z.globalRegistry, { - description: 'String of characters to trim from extracted keys.' - })), - trim_value: z.optional(z.string().register(z.globalRegistry, { - description: 'String of characters to trim from extracted values.' - })), - value_split: z.string().register(z.globalRegistry, { - description: 'Regex pattern to use for splitting the key from the value within a key-value pair.' - }) -})); - -export const ingest_types_json_processor = ingest_types_processor_base.and(z.object({ - add_to_root: z.optional(z.boolean().register(z.globalRegistry, { - description: 'Flag that forces the parsed JSON to be added at the top level of the document.\n`target_field` must not be set when this option is chosen.' - })), - add_to_root_conflict_strategy: z.optional(ingest_types_json_processor_conflict_strategy), - allow_duplicate_keys: z.optional(z.boolean().register(z.globalRegistry, { - description: 'When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys.\nInstead, the last encountered value for any duplicate key wins.' - })), - field: types_field, - target_field: z.optional(types_field) -})); - -export const ingest_types_join_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - separator: z.string().register(z.globalRegistry, { - description: 'The separator character.' - }), - target_field: z.optional(types_field) -})); - -export const ingest_types_inference_processor = ingest_types_processor_base.and(z.object({ - model_id: types_id, - target_field: z.optional(types_field), - field_map: z.optional(z.record(z.string(), z.record(z.string(), z.unknown())).register(z.globalRegistry, { - description: 'Maps the document field names to the known field names of the model.\nThis mapping takes precedence over any default mappings provided in the model configuration.' - })), - inference_config: z.optional(ingest_types_inference_config), - input_output: z.optional(z.union([ - ingest_types_input_config, - z.array(ingest_types_input_config) - ])), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If true and any of the input fields defined in input_ouput are missing\nthen those missing fields are quietly ignored, otherwise a missing field causes a failure.\nOnly applies when using input_output configurations to explicitly list the input fields.' - })) -})); - -export const ingest_types_html_strip_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document,' - })), - target_field: z.optional(types_field) -})); - -export const ingest_types_gsub_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - pattern: z.string().register(z.globalRegistry, { - description: 'The pattern to be replaced.' - }), - replacement: z.string().register(z.globalRegistry, { - description: 'The string to replace the matching patterns with.' - }), - target_field: z.optional(types_field) -})); - -export const ingest_types_grok_processor = ingest_types_processor_base.and(z.object({ - ecs_compatibility: z.optional(z.string().register(z.globalRegistry, { - description: 'Must be disabled or v1. If v1, the processor uses patterns with Elastic\nCommon Schema (ECS) field names.' - })), - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - pattern_definitions: z.optional(z.record(z.string(), z.string()).register(z.globalRegistry, { - description: 'A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor.\nPatterns matching existing names will override the pre-existing definition.' - })), - patterns: z.array(types_grok_pattern).register(z.globalRegistry, { - description: 'An ordered list of grok expression to match and extract named captures with.\nReturns on the first expression in the list that matches.' - }), - trace_match: z.optional(z.boolean().register(z.globalRegistry, { - description: 'When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched.' - })) -})); - -export const ingest_types_geo_ip_processor = ingest_types_processor_base.and(z.object({ - database_file: z.optional(z.string().register(z.globalRegistry, { - description: 'The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory.' - })), - field: types_field, - first_only: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, only the first found geoip data will be returned, even if the field contains an array.' - })), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - properties: z.optional(z.array(z.string()).register(z.globalRegistry, { - description: 'Controls what properties are added to the `target_field` based on the geoip lookup.' - })), - target_field: z.optional(types_field), - download_database_on_pipeline_creation: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created.\nElse, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index.' - })) -})); - -export const ingest_types_geo_grid_processor = ingest_types_processor_base.and(z.object({ - field: z.string().register(z.globalRegistry, { - description: 'The field to interpret as a geo-tile.=\nThe field format is determined by the `tile_type`.' - }), - tile_type: ingest_types_geo_grid_tile_type, - target_field: z.optional(types_field), - parent_field: z.optional(types_field), - children_field: z.optional(types_field), - non_children_field: z.optional(types_field), - precision_field: z.optional(types_field), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - target_format: z.optional(ingest_types_geo_grid_target_format) -})); - -export const ingest_types_ip_location_processor = ingest_types_processor_base.and(z.object({ - database_file: z.optional(z.string().register(z.globalRegistry, { - description: 'The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory.' - })), - field: types_field, - first_only: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, only the first found IP location data will be returned, even if the field contains an array.' - })), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - properties: z.optional(z.array(z.string()).register(z.globalRegistry, { - description: 'Controls what properties are added to the `target_field` based on the IP location lookup.' - })), - target_field: z.optional(types_field), - download_database_on_pipeline_creation: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created.\nElse, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index.' - })) -})); - -export const ingest_types_foreach_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, the processor silently exits without changing the document if the `field` is `null` or missing.' - })), - processor: ingest_types_processor_container -})); - -export const ingest_types_fingerprint_processor = ingest_types_processor_base.and(z.object({ - fields: types_fields, - target_field: z.optional(types_field), - salt: z.optional(z.string().register(z.globalRegistry, { - description: 'Salt value for the hash function.' - })), - method: z.optional(ingest_types_fingerprint_digest), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If true, the processor ignores any missing fields. If all fields are\nmissing, the processor silently exits without modifying the document.' - })) -})); - -export const ingest_types_fail_processor = ingest_types_processor_base.and(z.object({ - message: z.string().register(z.globalRegistry, { - description: 'The error message thrown by the processor.\nSupports template snippets.' - }) -})); - -export const ingest_types_enrich_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - max_matches: z.optional(z.number().register(z.globalRegistry, { - description: 'The maximum number of matched documents to include under the configured target field.\nThe `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object.\nIn order to avoid documents getting too large, the maximum allowed value is 128.' - })), - override: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If processor will update fields with pre-existing non-null-valued field.\nWhen set to `false`, such fields will not be touched.' - })), - policy_name: z.string().register(z.globalRegistry, { - description: 'The name of the enrich policy to use.' - }), - shape_relation: z.optional(types_geo_shape_relation), - target_field: types_field -})); - -export const ingest_types_drop_processor = ingest_types_processor_base.and(z.record(z.string(), z.unknown())); - -export const ingest_types_dot_expander_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - override: z.optional(z.boolean().register(z.globalRegistry, { - description: 'Controls the behavior when there is already an existing nested object that conflicts with the expanded field.\nWhen `false`, the processor will merge conflicts by combining the old and the new values into an array.\nWhen `true`, the value from the expanded field will overwrite the existing value.' - })), - path: z.optional(z.string().register(z.globalRegistry, { - description: 'The field that contains the field to expand.\nOnly required if the field to expand is part another object field, because the `field` option can only understand leaf fields.' - })) -})); - -export const ingest_types_dissect_processor = ingest_types_processor_base.and(z.object({ - append_separator: z.optional(z.string().register(z.globalRegistry, { - description: 'The character(s) that separate the appended fields.' - })), - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - pattern: z.string().register(z.globalRegistry, { - description: 'The pattern to apply to the field.' - }) -})); - -export const ingest_types_date_index_name_processor = ingest_types_processor_base.and(z.object({ - date_formats: z.optional(z.array(z.string()).register(z.globalRegistry, { - description: 'An array of the expected date formats for parsing dates / timestamps in the document being preprocessed.\nCan be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.' - })), - date_rounding: z.string().register(z.globalRegistry, { - description: 'How to round the date when formatting the date into the index name. Valid values are:\n`y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second).\nSupports template snippets.' - }), - field: types_field, - index_name_format: z.optional(z.string().register(z.globalRegistry, { - description: 'The format to be used when printing the parsed date into the index name.\nA valid java time pattern is expected here.\nSupports template snippets.' - })), - index_name_prefix: z.optional(z.string().register(z.globalRegistry, { - description: 'A prefix of the index name to be prepended before the printed date.\nSupports template snippets.' - })), - locale: z.optional(z.string().register(z.globalRegistry, { - description: 'The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days.' - })), - timezone: z.optional(z.string().register(z.globalRegistry, { - description: 'The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.' - })) -})); - -export const ingest_types_date_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - formats: z.array(z.string()).register(z.globalRegistry, { - description: 'An array of the expected date formats.\nCan be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.' - }), - locale: z.optional(z.string().register(z.globalRegistry, { - description: 'The locale to use when parsing the date, relevant when parsing month names or week days.\nSupports template snippets.' - })), - target_field: z.optional(types_field), - timezone: z.optional(z.string().register(z.globalRegistry, { - description: 'The timezone to use when parsing the date.\nSupports template snippets.' - })), - output_format: z.optional(z.string().register(z.globalRegistry, { - description: 'The format to use when writing the date to target_field. Must be a valid\njava time pattern.' - })) -})); - -export const ingest_types_csv_processor = ingest_types_processor_base.and(z.object({ - empty_value: z.optional(z.record(z.string(), z.unknown()).register(z.globalRegistry, { - description: 'Value used to fill empty fields.\nEmpty fields are skipped if this is not provided.\nAn empty field is one with no value (2 consecutive separators) or empty quotes (`""`).' - })), - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - quote: z.optional(z.string().register(z.globalRegistry, { - description: 'Quote used in CSV, has to be single character string.' - })), - separator: z.optional(z.string().register(z.globalRegistry, { - description: 'Separator used in CSV, has to be single character string.' - })), - target_fields: types_fields, - trim: z.optional(z.boolean().register(z.globalRegistry, { - description: 'Trim whitespaces in unquoted fields.' - })) -})); - -export const ingest_types_convert_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - target_field: z.optional(types_field), - type: ingest_types_convert_type -})); - -export const ingest_types_community_id_processor = ingest_types_processor_base.and(z.object({ - source_ip: z.optional(types_field), - source_port: z.optional(types_field), - destination_ip: z.optional(types_field), - destination_port: z.optional(types_field), - iana_number: z.optional(types_field), - icmp_type: z.optional(types_field), - icmp_code: z.optional(types_field), - transport: z.optional(types_field), - target_field: z.optional(types_field), - seed: z.optional(z.number().register(z.globalRegistry, { - description: 'Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The\nseed can prevent hash collisions between network domains, such as a staging\nand production network that use the same addressing scheme.' - })), - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If true and any required fields are missing, the processor quietly exits\nwithout modifying the document.' - })) -})); - -export const ingest_types_circle_processor = ingest_types_processor_base.and(z.object({ - error_distance: z.number().register(z.globalRegistry, { - description: 'The difference between the resulting inscribed distance from center to side and the circle’s radius (measured in meters for `geo_shape`, unit-less for `shape`).' - }), - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist, the processor quietly exits without modifying the document.' - })), - shape_type: ingest_types_shape_type, - target_field: z.optional(types_field) -})); - -export const ingest_types_cef_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - target_field: z.optional(types_field), - ignore_empty_values: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and value is anempty string in extensions, the processor quietly exits without modifying the document.' - })), - timezone: z.optional(z.string().register(z.globalRegistry, { - description: 'The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.' - })) -})); - -export const ingest_types_bytes_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document.' - })), - target_field: z.optional(types_field) -})); - -export const ingest_types_attachment_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - ignore_missing: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true` and field does not exist, the processor quietly exits without modifying the document.' - })), - indexed_chars: z.optional(z.number().register(z.globalRegistry, { - description: 'The number of chars being used for extraction to prevent huge fields.\nUse `-1` for no limit.' - })), - indexed_chars_field: z.optional(types_field), - properties: z.optional(z.array(z.string()).register(z.globalRegistry, { - description: 'Array of properties to select to be stored.\nCan be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language`.' - })), - target_field: z.optional(types_field), - remove_binary: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If true, the binary field will be removed from the document' - })), - resource_name: z.optional(z.string().register(z.globalRegistry, { - description: 'Field containing the name of the resource to decode.\nIf specified, the processor passes this resource name to the underlying Tika library to enable Resource Name Based Detection.' - })) -})); - -export const ingest_types_append_processor = ingest_types_processor_base.and(z.object({ - field: types_field, - value: z.optional(z.union([ - z.record(z.string(), z.unknown()), - z.array(z.record(z.string(), z.unknown())) - ])), - media_type: z.optional(z.string().register(z.globalRegistry, { - description: 'The media type for encoding `value`.\nApplies only when value is a template snippet.\nMust be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`.' - })), - copy_from: z.optional(types_field), - allow_duplicates: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `false`, the processor does not append values already present in the field.' - })), - ignore_empty_values: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, the processor will skip empty values from the source (e.g. empty strings, and null values),\nrather than appending them to the field.' - })) -})); - export const types_mapping_icu_collation_property = z.lazy((): any => types_mapping_doc_values_property_base).and(z.object({ type: z.enum(['icu_collation_keyword']), norms: z.optional(z.boolean()), @@ -9929,28 +9025,6 @@ export const global_search_response_body = z.object({ terminated_early: z.optional(z.boolean()) }); -export const ingest_types_pipeline = z.object({ - description: z.optional(z.string().register(z.globalRegistry, { - description: 'Description of the ingest pipeline.' - })), - on_failure: z.optional(z.array(ingest_types_processor_container).register(z.globalRegistry, { - description: 'Processors to run immediately after a processor failure.' - })), - processors: z.optional(z.array(ingest_types_processor_container).register(z.globalRegistry, { - description: 'Processors used to perform transformations on documents before indexing.\nProcessors run sequentially in the order specified.' - })), - version: z.optional(types_version_number), - deprecated: z.optional(z.boolean().register(z.globalRegistry, { - description: 'Marks this ingest pipeline as deprecated.\nWhen a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning.' - })), - _meta: z.optional(types_metadata), - created_date: z.optional(types_date_time), - created_date_millis: z.optional(types_epoch_time_unit_millis), - modified_date: z.optional(types_date_time), - modified_date_millis: z.optional(types_epoch_time_unit_millis), - field_access_pattern: z.optional(ingest_types_field_access_pattern) -}); - export const indices_types_alias = z.object({ filter: z.optional(types_query_dsl_query_container), index_routing: z.optional(types_routing), @@ -10347,19 +9421,6 @@ export const search_allow_no_indices = z.boolean().register(z.globalRegistry, { */ export const search_index = types_indices; -/** - * If `true`, the response includes output data for each processor in the executed pipeline. - */ -export const ingest_simulate_verbose = z.boolean().register(z.globalRegistry, { - description: 'If `true`, the response includes output data for each processor in the executed pipeline.' -}); - -/** - * The pipeline to test. - * If you don't specify a `pipeline` in the request body, this parameter is required. - */ -export const ingest_simulate_id = types_id; - /** * If `true`, the request's actions must target a data stream (existing or to be created). */ @@ -10634,13 +9695,6 @@ export const search = z.object({ })) }); -export const ingest_simulate = z.object({ - docs: z.array(ingest_types_document).register(z.globalRegistry, { - description: 'Sample documents to test in the pipeline.' - }), - pipeline: z.optional(ingest_types_pipeline) -}); - export const bulk = z.array(z.union([ global_bulk_operation_container, global_bulk_update_action, @@ -10975,7 +10029,9 @@ export const esql_query_request = z.object({ })), filter: z.optional(types_query_dsl_query_container), locale: z.optional(z.string()), - params: z.optional(esql_types_esql_params), + params: z.optional(z.array(esql_types_esql_param).register(z.globalRegistry, { + description: 'To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters.' + })), profile: z.optional(z.boolean().register(z.globalRegistry, { description: 'If provided and `true` the response will include an extra `profile` object\nwith information on how the query was executed. This information is for human debugging\nand its format can change at any time but it can give some insight into the performance\nof each part of the query.' })), @@ -11033,66 +10089,6 @@ export const indices_create_response = z.object({ acknowledged: z.boolean() }); -export const ingest_simulate_request = z.object({ - body: ingest_simulate, - path: z.optional(z.never()), - query: z.optional(z.object({ - verbose: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, the response includes output data for each processor in the executed pipeline.' - })) - })) -}); - -export const ingest_simulate_response = z.object({ - docs: z.array(ingest_types_simulate_document_result) -}); - -export const ingest_simulate1_request = z.object({ - body: ingest_simulate, - path: z.optional(z.never()), - query: z.optional(z.object({ - verbose: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, the response includes output data for each processor in the executed pipeline.' - })) - })) -}); - -export const ingest_simulate1_response = z.object({ - docs: z.array(ingest_types_simulate_document_result) -}); - -export const ingest_simulate2_request = z.object({ - body: ingest_simulate, - path: z.object({ - id: types_id - }), - query: z.optional(z.object({ - verbose: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, the response includes output data for each processor in the executed pipeline.' - })) - })) -}); - -export const ingest_simulate2_response = z.object({ - docs: z.array(ingest_types_simulate_document_result) -}); - -export const ingest_simulate3_request = z.object({ - body: ingest_simulate, - path: z.object({ - id: types_id - }), - query: z.optional(z.object({ - verbose: z.optional(z.boolean().register(z.globalRegistry, { - description: 'If `true`, the response includes output data for each processor in the executed pipeline.' - })) - })) -}); - -export const ingest_simulate3_response = z.object({ - docs: z.array(ingest_types_simulate_document_result) -}); - export const search_request = z.object({ body: z.optional(search), path: z.optional(z.never()), diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts index 6ad569836d721..5d0d19c4b372f 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts @@ -29,9 +29,9 @@ steps: type: google_drive.searchFiles connector-id: ${stackConnectorId} with: - query: "{{ inputs.query }}" - pageSize: "{{ inputs.pageSize }}" - pageToken: "{{ inputs.pageToken }}" + query: "\${{inputs.query}}" + pageSize: \${{inputs.pageSize}} + pageToken: "\${{inputs.pageToken}}" `; } @@ -64,10 +64,10 @@ steps: type: google_drive.listFiles connector-id: ${stackConnectorId} with: - folderId: "{{ inputs.folderId }}" - pageSize: "{{ inputs.pageSize }}" - pageToken: "{{ inputs.pageToken }}" - orderBy: "{{ inputs.orderBy }}" + folderId: "\${{inputs.folderId}}" + pageSize: \${{inputs.pageSize}} + pageToken: "\${{inputs.pageToken}}" + orderBy: "\${{inputs.orderBy}}" `; } @@ -94,20 +94,20 @@ steps: type: google_drive.downloadFile connector-id: ${googleDriveConnectorId} with: - fileId: "{{ inputs.fileId }}" + fileId: "\${{inputs.fileId}}" - name: convert_to_markdown type: jina.fileToMarkdown connector-id: ${jinaConnectorId} with: - file: "{{ steps.download_file.output.content }}" - filename: "{{ steps.download_file.output.name }}" + file: "\${{steps.download_file.output.content}}" + filename: "\${{steps.download_file.output.name}}" `; } /** * Generates a composite workflow that downloads a file from Google Drive * and extracts its content using Elasticsearch's attachment processor - * via the ingest.simulate API. + * via the ingest pipeline simulate API. * * This is the fallback when no Jina Reader connector is configured. * Uses Apache Tika under the hood for text extraction. @@ -130,20 +130,23 @@ steps: type: google_drive.downloadFile connector-id: ${googleDriveConnectorId} with: - fileId: "{{ inputs.fileId }}" + fileId: "\${{inputs.fileId}}" - name: extract_content - type: elasticsearch.ingest.simulate + type: elasticsearch.request with: - pipeline: - processors: - - attachment: - field: data - indexed_chars: -1 - remove_binary: true - docs: - - _id: "{{ inputs.fileId }}" - _source: - filename: "{{ steps.download_file.output.name }}" - data: "{{ steps.download_file.output.content }}" + method: POST + path: /_ingest/pipeline/_simulate + body: + pipeline: + processors: + - attachment: + field: data + indexed_chars: -1 + remove_binary: true + docs: + - _id: "\${{inputs.fileId}}" + _source: + filename: "\${{steps.download_file.output.name}}" + data: "\${{steps.download_file.output.content}}" `; } From da1a4e87300242bb41f78c3d7bcdad58de9f2775 Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Tue, 3 Feb 2026 17:27:42 +0200 Subject: [PATCH 03/11] Address review comments --- .../src/specs/google_drive/google_drive.ts | 48 ++--- .../data_sources/server/routes/schema.ts | 2 +- .../server/sources/google_drive/workflows.ts | 199 ++++++++++++++---- 3 files changed, 183 insertions(+), 66 deletions(-) diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts index c269f3afa9c97..9de11641a84b2 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts @@ -21,12 +21,27 @@ const DEFAULT_EXPORT_MIME_TYPE = 'application/pdf'; const SHEETS_EXPORT_MIME_TYPE = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'; /** - * Escapes single quotes in a string for use in Google Drive query syntax. - * Google Drive queries use single quotes for string values, so any single - * quotes in the value must be escaped to avoid syntax errors. + * Escapes special characters in a string for use in Google Drive query syntax. + * Google Drive queries use single quotes for string values, so backslashes + * and single quotes must be escaped to avoid syntax errors and injection. */ function escapeQueryValue(value: string): string { - return value.replace(/'/g, "\\'"); + // Escape backslashes first, then single quotes + return value.replace(/\\/g, '\\\\').replace(/'/g, "\\'"); +} + +/** + * Extracts and throws a meaningful error from Google Drive API responses. + * Returns void if the error doesn't have Google API error details. + */ +function throwGoogleDriveError(error: unknown): void { + const axiosError = error as { + response?: { data?: { error?: { message?: string; code?: number } } }; + }; + const googleError = axiosError.response?.data?.error; + if (googleError) { + throw new Error(`Google Drive API error: ${googleError.message}`); + } } export const GoogleDriveConnector: ConnectorSpec = { @@ -96,14 +111,7 @@ export const GoogleDriveConnector: ConnectorSpec = { nextPageToken: response.data.nextPageToken, }; } catch (error: unknown) { - // Extract detailed error from Google API response - const axiosError = error as { - response?: { data?: { error?: { message?: string; code?: number } } }; - }; - const googleError = axiosError.response?.data?.error; - if (googleError) { - throw new Error(`Google Drive API error: ${googleError.message}`); - } + throwGoogleDriveError(error); throw error; } }, @@ -162,13 +170,7 @@ export const GoogleDriveConnector: ConnectorSpec = { nextPageToken: response.data.nextPageToken, }; } catch (error: unknown) { - const axiosError = error as { - response?: { data?: { error?: { message?: string; code?: number } } }; - }; - const googleError = axiosError.response?.data?.error; - if (googleError) { - throw new Error(`Google Drive API error: ${googleError.message}`); - } + throwGoogleDriveError(error); throw error; } }, @@ -245,13 +247,7 @@ export const GoogleDriveConnector: ConnectorSpec = { encoding: 'base64', }; } catch (error: unknown) { - const axiosError = error as { - response?: { data?: { error?: { message?: string; code?: number } } }; - }; - const googleError = axiosError.response?.data?.error; - if (googleError) { - throw new Error(`Google Drive API error: ${googleError.message}`); - } + throwGoogleDriveError(error); throw error; } }, diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts index d59207f0619b6..6f59ea0930bfc 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts @@ -65,5 +65,5 @@ export const createDataSourceRequestSchema = schema.object({ * The array should match the stackConnectors defined in the data source definition. * Optional connectors can be omitted. */ - connector_credentials: schema.arrayOf(connectorCredentialsSchema), + connector_credentials: schema.arrayOf(connectorCredentialsSchema, { maxSize: 10 }), }); diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts index 5d0d19c4b372f..9eaa57260226c 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts @@ -72,8 +72,10 @@ steps: } /** - * Generates a composite workflow that downloads a file from Google Drive - * and extracts its content using Jina Reader for LLM consumption. + * Generates a composite workflow that downloads files from Google Drive + * and extracts their content using Jina Reader for LLM consumption. + * Processes multiple files and returns an array of results. + * Optionally reranks results by relevance to reduce context window usage. */ export function generateGoogleDriveDownloadFilesWithJinaWorkflow( googleDriveConnectorId: string, @@ -81,72 +83,191 @@ export function generateGoogleDriveDownloadFilesWithJinaWorkflow( ): string { return `version: '1' name: 'sources.google_drive.download' -description: Download a file and extract its text content to readable markdown (best for PDFs, Word docs, etc.) +description: Download files and extract their text content to readable markdown (best for PDFs, Word docs, etc.). You can optionally set rerank to true and specify topK to use semantic reranking - this is useful when downloading many documents and you want to avoid using too much of your context window by only keeping the top K most relevant documents based on the rerankQuery. enabled: true triggers: - type: manual inputs: - - name: fileId + - name: fileIds + type: array + description: Array of file IDs from search or list results. Works with PDFs, Office docs, Google Docs, images with text, and more + - name: rerank + type: boolean + required: false + default: false + description: Set to true to rerank results by relevance to rerankQuery. Useful when downloading many documents to reduce context window usage by keeping only the most relevant ones. + - name: topK + type: number + required: false + default: 5 + description: When rerank is true, return only the top K most relevant documents after reranking. + - name: rerankQuery type: string - description: File ID from search or list results. Works with PDFs, Office docs, Google Docs, images with text, and more + required: false + description: The query to rerank documents against. Required when rerank is true. Documents will be scored by relevance to this query. steps: - - name: download_file - type: google_drive.downloadFile - connector-id: ${googleDriveConnectorId} + - name: init_results + type: data.set with: - fileId: "\${{inputs.fileId}}" - - name: convert_to_markdown - type: jina.fileToMarkdown - connector-id: ${jinaConnectorId} + results: [] + - name: process_files + type: foreach + foreach: "\${{inputs.fileIds}}" + steps: + - name: download_file + type: google_drive.downloadFile + connector-id: ${googleDriveConnectorId} + with: + fileId: "\${{foreach.item}}" + - name: convert_to_markdown + type: jina.fileToMarkdown + connector-id: ${jinaConnectorId} + with: + file: "\${{steps.download_file.output.content}}" + filename: "\${{steps.download_file.output.name}}" + - name: normalize_result + type: data.set + with: + normalized: + fileId: "\${{foreach.item}}" + filename: "\${{steps.download_file.output.name}}" + content: "\${{steps.convert_to_markdown.output.content}}" + - name: accumulate_result + type: data.set + with: + results: '\${{variables.results | push: variables.normalized}}' + - name: conditional_rerank + type: if + condition: "\${{inputs.rerank}}" + steps: + - name: do_rerank + type: search.rerank + with: + rerank_text: "\${{inputs.rerankQuery}}" + data: \${{variables.results}} + fields: + - ["content"] + rank_window_size: \${{inputs.topK}} + - name: store_reranked + type: data.set + with: + final_results: "\${{steps.do_rerank.output}}" + else: + - name: store_all + type: data.set + with: + final_results: "\${{variables.results}}" + - name: output_results + type: data.set with: - file: "\${{steps.download_file.output.content}}" - filename: "\${{steps.download_file.output.name}}" + results: "\${{variables.final_results}}" `; } /** - * Generates a composite workflow that downloads a file from Google Drive - * and extracts its content using Elasticsearch's attachment processor + * Generates a composite workflow that downloads files from Google Drive + * and extracts their content using Elasticsearch's attachment processor * via the ingest pipeline simulate API. * * This is the fallback when no Jina Reader connector is configured. * Uses Apache Tika under the hood for text extraction. + * Processes multiple files and returns an array of results. + * Optionally reranks results by relevance to reduce context window usage. */ export function generateGoogleDriveDownloadFilesWithIngestSimulateWorkflow( googleDriveConnectorId: string ): string { return `version: '1' name: 'sources.google_drive.download' -description: Download a file and extract its text content (best for PDFs, Word docs, etc.) +description: Download files and extract their text content (best for PDFs, Word docs, etc.). You can optionally set rerank to true and specify topK to use semantic reranking - this is useful when downloading many documents and you want to avoid using too much of your context window by only keeping the top K most relevant documents based on the rerankQuery. enabled: true triggers: - type: manual inputs: - - name: fileId + - name: fileIds + type: array + description: Array of file IDs from search or list results. Works with PDFs, Office docs, Google Docs, and other text-based formats + - name: rerank + type: boolean + required: false + default: false + description: Set to true to rerank results by relevance to rerankQuery. Useful when downloading many documents to reduce context window usage by keeping only the most relevant ones. + - name: topK + type: number + required: false + default: 5 + description: When rerank is true, return only the top K most relevant documents after reranking. + - name: rerankQuery type: string - description: File ID from search or list results. Works with PDFs, Office docs, Google Docs, and other text-based formats + required: false + description: The query to rerank documents against. Required when rerank is true. Documents will be scored by relevance to this query. steps: - - name: download_file - type: google_drive.downloadFile - connector-id: ${googleDriveConnectorId} + - name: init_results + type: data.set with: - fileId: "\${{inputs.fileId}}" - - name: extract_content - type: elasticsearch.request + results: [] + - name: process_files + type: foreach + foreach: "\${{inputs.fileIds}}" + steps: + - name: download_file + type: google_drive.downloadFile + connector-id: ${googleDriveConnectorId} + with: + fileId: "\${{foreach.item}}" + - name: extract_content + type: elasticsearch.request + with: + method: POST + path: /_ingest/pipeline/_simulate + body: + pipeline: + processors: + - attachment: + field: data + indexed_chars: -1 + remove_binary: true + docs: + - _id: "\${{foreach.item}}" + _source: + filename: "\${{steps.download_file.output.name}}" + data: "\${{steps.download_file.output.content}}" + - name: normalize_result + type: data.set + with: + normalized: + fileId: "\${{foreach.item}}" + filename: "\${{steps.download_file.output.name}}" + content: "\${{steps.extract_content.output.docs[0].doc._source.attachment.content}}" + content_type: "\${{steps.extract_content.output.docs[0].doc._source.attachment.content_type}}" + - name: accumulate_result + type: data.set + with: + results: '\${{variables.results | push: variables.normalized}}' + - name: conditional_rerank + type: if + condition: "\${{inputs.rerank}}" + steps: + - name: do_rerank + type: search.rerank + with: + rerank_text: "\${{inputs.rerankQuery}}" + data: \${{variables.results}} + fields: + - ["content"] + rank_window_size: \${{inputs.topK}} + - name: store_reranked + type: data.set + with: + final_results: "\${{steps.do_rerank.output}}" + else: + - name: store_all + type: data.set + with: + final_results: "\${{variables.results}}" + - name: output_results + type: data.set with: - method: POST - path: /_ingest/pipeline/_simulate - body: - pipeline: - processors: - - attachment: - field: data - indexed_chars: -1 - remove_binary: true - docs: - - _id: "\${{inputs.fileId}}" - _source: - filename: "\${{steps.download_file.output.name}}" - data: "\${{steps.download_file.output.content}}" + results: "\${{variables.final_results}}" `; } From 0019dbcbf85945cd952f7efe8269f6f45d433217 Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Wed, 4 Feb 2026 11:51:09 +0200 Subject: [PATCH 04/11] Refine trashed items handling --- .../src/specs/google_drive/google_drive.ts | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts index 9de11641a84b2..fcacf9d7c67b3 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts @@ -71,7 +71,8 @@ export const GoogleDriveConnector: ConnectorSpec = { .describe( "Google Drive query. Use fullText contains 'term' for content search, " + "name contains 'term' for filename search, mimeType='application/pdf' for type filtering, " + - "modifiedTime > '2024-01-01' for date filtering. Combine with 'and'/'or'." + "modifiedTime > '2024-01-01' for date filtering. Combine with 'and'/'or'. " + + "Note: Google Drive includes trashed files by default. Add 'and trashed=false' to exclude them." ), pageSize: z .number() @@ -136,6 +137,11 @@ export const GoogleDriveConnector: ConnectorSpec = { z.enum(['name', 'modifiedTime', 'createdTime']).optional() ) .describe('Field to order results by'), + includeTrashed: z + .boolean() + .optional() + .default(false) + .describe('Include trashed files in results (default: false)'), }), handler: async (ctx, input) => { const typedInput = input as { @@ -143,11 +149,15 @@ export const GoogleDriveConnector: ConnectorSpec = { pageSize: number; pageToken?: string; orderBy?: string; + includeTrashed: boolean; }; + ctx.log.debug(`[google_drive.listFiles] input: ${JSON.stringify(input)}`); + const folderId = typedInput.folderId || DEFAULT_FOLDER_ID; + const trashedFilter = typedInput.includeTrashed ? '' : ' and trashed=false'; const params: Record = { - q: `'${escapeQueryValue(folderId)}' in parents and trashed=false`, + q: `'${escapeQueryValue(folderId)}' in parents${trashedFilter}`, pageSize: Math.min(typedInput.pageSize || DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE), fields: 'nextPageToken, files(id, name, mimeType, size, modifiedTime, webViewLink)', }; @@ -160,6 +170,8 @@ export const GoogleDriveConnector: ConnectorSpec = { params.orderBy = typedInput.orderBy; } + ctx.log.debug(`[google_drive.listFiles] API params: ${JSON.stringify(params)}`); + try { const response = await ctx.client.get(`${GOOGLE_DRIVE_API_BASE}/files`, { params, @@ -188,6 +200,8 @@ export const GoogleDriveConnector: ConnectorSpec = { mimeType?: string; }; + ctx.log.debug(`[google_drive.downloadFile] input: ${JSON.stringify(input)}`); + try { // First, get file metadata to determine if it's a Google Workspace document const metadataResponse = await ctx.client.get( From 61c59df6b841a9d6bd148786e20c4604cc229fc5 Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Thu, 5 Feb 2026 15:27:00 +0200 Subject: [PATCH 05/11] Revert multi-connector data source refactor and Jina integration --- .../data_catalog/common/data_source_spec.ts | 44 +- .../shared/data_catalog/common/index.ts | 2 - .../plugins/shared/data_catalog/index.ts | 2 - .../shared/data_catalog/server/routes.ts | 4 +- .../components/active_sources_view.tsx | 35 +- .../components/connectors_view.tsx | 23 +- .../components/optional_connector_prompt.tsx | 136 ------ .../hooks/use_add_connector_flyout.ts | 393 ++++-------------- .../hooks/use_clone_active_source_flyout.ts | 78 ++++ .../application/hooks/use_connectors.ts | 13 +- .../routes/data_sources_helpers.test.ts | 8 +- .../server/routes/data_sources_helpers.ts | 92 ++-- .../data_sources/server/routes/index.test.ts | 14 +- .../data_sources/server/routes/index.ts | 44 +- .../data_sources/server/routes/schema.ts | 22 +- .../server/sources/github/data_type.ts | 80 ++-- .../server/sources/google_drive/data_type.ts | 72 +--- .../server/sources/google_drive/workflows.ts | 98 +---- .../server/sources/notion/data_type.ts | 34 +- 19 files changed, 293 insertions(+), 901 deletions(-) delete mode 100644 x-pack/platform/plugins/shared/data_sources/public/application/components/optional_connector_prompt.tsx create mode 100644 x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_clone_active_source_flyout.ts diff --git a/x-pack/platform/plugins/shared/data_catalog/common/data_source_spec.ts b/x-pack/platform/plugins/shared/data_catalog/common/data_source_spec.ts index ebe885685cc5b..e446790dd2083 100644 --- a/x-pack/platform/plugins/shared/data_catalog/common/data_source_spec.ts +++ b/x-pack/platform/plugins/shared/data_catalog/common/data_source_spec.ts @@ -54,24 +54,6 @@ export interface CustomOAuthConfiguration { fetchSecretsPath: string; } -/** - * Role of a connector within a data source: - * - 'primary': Main connector shown first in UI configuration flow (one per data source) - * - 'required': Must be configured, shown after primary connector - * - 'optional': User is prompted with a choice before configuration - */ -export type ConnectorRole = 'primary' | 'required' | 'optional'; - -/** - * Reference to a created stack connector, including its type for reliable matching. - */ -export interface ConnectorReference { - /** The connector type (e.g., '.google_drive', '.jina') */ - type: string; - /** The created connector's ID */ - id: string; -} - /** * Configuration for a stack connector associated with a data source type */ @@ -79,24 +61,6 @@ export interface StackConnectorConfig { type: string; config: Record; importedTools?: string[]; - /** - * Role of this connector in the data source configuration flow. - * - 'primary': Main connector, shown first (default for first connector if not specified) - * - 'required': Must be configured, flyout shown directly after primary - * - 'optional': User prompted with y/n before showing configuration flyout - * @default 'required' - */ - role?: ConnectorRole; - /** Display name for this connector (shown in UI) */ - name?: string; - /** Description explaining what this connector does (shown in UI prompts) */ - description?: string; - /** - * Description shown when the user is about to skip an optional connector. - * Explains what will happen if they skip (e.g., fallback behavior). - * Only relevant for connectors with role 'optional'. - */ - skipDescription?: string; } /** @@ -124,16 +88,14 @@ export interface DataSource { /** * Generates workflows for interacting with the third-party data source. * Workflows are the only model for "taking action" against the third party. - * @param connectors - Array of connector references (type + id) for connectors created for this data source */ - generateWorkflows(connectors: ConnectorReference[]): WorkflowInfo[]; + generateWorkflows(stackConnectorId?: string): WorkflowInfo[]; /** - * Stack connector configurations. + * Stack connector configuration. * Stack connectors are the only model for executing workflow actions against the third party. - * This is an array to support composite data sources that use multiple connectors. */ - stackConnectors: StackConnectorConfig[]; + stackConnector: StackConnectorConfig; /** OAuth configuration for authentication */ oauthConfiguration?: EARSOAuthConfiguration | CustomOAuthConfiguration; diff --git a/x-pack/platform/plugins/shared/data_catalog/common/index.ts b/x-pack/platform/plugins/shared/data_catalog/common/index.ts index 9a25373e879ed..3ad169565601b 100644 --- a/x-pack/platform/plugins/shared/data_catalog/common/index.ts +++ b/x-pack/platform/plugins/shared/data_catalog/common/index.ts @@ -15,8 +15,6 @@ export type { EARSOAuthConfiguration, CustomOAuthConfiguration, WorkflowInfo, - ConnectorRole, - ConnectorReference, } from './data_source_spec'; export { EARSSupportedOAuthProvider } from './data_source_spec'; diff --git a/x-pack/platform/plugins/shared/data_catalog/index.ts b/x-pack/platform/plugins/shared/data_catalog/index.ts index a4ae75adea4ee..e2ea3da2a2afd 100644 --- a/x-pack/platform/plugins/shared/data_catalog/index.ts +++ b/x-pack/platform/plugins/shared/data_catalog/index.ts @@ -11,8 +11,6 @@ export type { EARSOAuthConfiguration, CustomOAuthConfiguration, WorkflowInfo, - ConnectorReference, - ConnectorRole, } from './common'; export { PLUGIN_ID, PLUGIN_NAME, API_BASE_PATH, EARSSupportedOAuthProvider } from './common'; diff --git a/x-pack/platform/plugins/shared/data_catalog/server/routes.ts b/x-pack/platform/plugins/shared/data_catalog/server/routes.ts index b05121410fe14..6387f6653a786 100644 --- a/x-pack/platform/plugins/shared/data_catalog/server/routes.ts +++ b/x-pack/platform/plugins/shared/data_catalog/server/routes.ts @@ -50,9 +50,7 @@ export function registerRoutes(router: IRouter, dataCatalog: DataCatalog) { if (!type) { return response.notFound({ body: `Type ${request.params.id} not found` }); } - const workflowInfos = type.generateWorkflows([ - { type: type.stackConnectors[0]?.type ?? 'unknown', id: '' }, - ]); + const workflowInfos = type.generateWorkflows(''); return response.ok({ body: { ...type, diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/components/active_sources_view.tsx b/x-pack/platform/plugins/shared/data_sources/public/application/components/active_sources_view.tsx index 2c73b8db61885..d09e65d702a1a 100644 --- a/x-pack/platform/plugins/shared/data_sources/public/application/components/active_sources_view.tsx +++ b/x-pack/platform/plugins/shared/data_sources/public/application/components/active_sources_view.tsx @@ -13,18 +13,16 @@ import { ActiveSourcesTable } from './active_sources_table'; import { ConfirmDeleteActiveSourceModal } from './confirm_delete_active_source_modal'; import { useActiveSources } from '../hooks/use_active_sources'; import { useDeleteActiveSource } from '../hooks/use_delete_active_source'; -import { useCloneActiveSource } from '../hooks/use_clone_active_source'; -import { useAddConnectorFlyout } from '../hooks/use_add_connector_flyout'; import { useEditActiveSourceFlyout } from '../hooks/use_edit_active_source_flyout'; -import { useDataSources } from '../hooks/use_connectors'; +import { useCloneActiveSourceFlyout } from '../hooks/use_clone_active_source_flyout'; import type { ActiveSource } from '../../types/connector'; export const ActiveSourcesView: React.FC = () => { const { activeSources, isLoading } = useActiveSources(); - const { dataSources } = useDataSources(); const [selectedSource, setSelectedSource] = useState(null); const [showDeleteModal, setShowDeleteModal] = useState(false); const [sourceToEdit, setSourceToEdit] = useState(null); + const [sourceToClone, setSourceToClone] = useState(null); const handleCancelDelete = useCallback(() => { setSelectedSource(null); @@ -34,14 +32,12 @@ export const ActiveSourcesView: React.FC = () => { const { mutate: deleteActiveSource, isLoading: isDeleting } = useDeleteActiveSource(handleCancelDelete); - const { getCloneName } = useCloneActiveSource(); - - // Setup clone flyout - const { - openFlyout: openCloneFlyout, - flyout: cloneFlyout, - optionalPrompt: cloneOptionalPrompt, - } = useAddConnectorFlyout({}); + const { openFlyout: openCloneFlyout, flyout: cloneFlyout } = useCloneActiveSourceFlyout({ + sourceToClone, + onConnectorCreated: () => { + setSourceToClone(null); + }, + }); const handleCloseEditFlyout = useCallback(() => { setSourceToEdit(null); @@ -62,16 +58,12 @@ export const ActiveSourcesView: React.FC = () => { const handleClone = useCallback( (source: ActiveSource) => { - // Find the DataSource definition for this type - const ds = dataSources.find((d) => d.id === source.type); - if (ds) { - // Open the add connector flyout with pre-selected type and suggested name - // User will need to select/create credentials (no secrets cloned) - const cloneName = getCloneName(source); - openCloneFlyout(ds, ds.id, cloneName); - } + setSourceToClone(source); + // Open the add connector flyout with pre-selected type + // User will need to select/create credentials (no secrets cloned) + openCloneFlyout(); }, - [dataSources, openCloneFlyout, getCloneName] + [openCloneFlyout] ); const handleDelete = useCallback((source: ActiveSource) => { @@ -135,7 +127,6 @@ export const ActiveSourcesView: React.FC = () => { )} {editFlyout} {cloneFlyout} - {cloneOptionalPrompt} ); }; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/components/connectors_view.tsx b/x-pack/platform/plugins/shared/data_sources/public/application/components/connectors_view.tsx index 35e11f8f8e869..9a830dde56fdb 100644 --- a/x-pack/platform/plugins/shared/data_sources/public/application/components/connectors_view.tsx +++ b/x-pack/platform/plugins/shared/data_sources/public/application/components/connectors_view.tsx @@ -5,7 +5,7 @@ * 2.0. */ -import React, { useMemo, useCallback, useState } from 'react'; +import React, { useState, useMemo, useCallback } from 'react'; import { css } from '@emotion/react'; import { EuiFlexGrid, @@ -27,11 +27,14 @@ import { } from '../../../common/constants'; export const DataSourcesView: React.FC = () => { - const { connectors, dataSources, isLoading } = useDataSources(); + const { connectors, isLoading } = useDataSources(); + const [selectedConnector, setSelectedConnector] = useState(null); const [activePage, setActivePage] = useState(0); const [itemsPerPage, setItemsPerPage] = useState(DEFAULT_ITEMS_PER_PAGE); - const { openFlyout, flyout, optionalPrompt } = useAddConnectorFlyout({}); + const { openFlyout, flyout } = useAddConnectorFlyout({ + dataSourceType: selectedConnector?.id, + }); const paginatedConnectors = useMemo(() => { const start = activePage * itemsPerPage; @@ -47,13 +50,12 @@ export const DataSourcesView: React.FC = () => { const handleConnectorClick = useCallback( (connector: Connector) => { - // Find the full DataSource definition - const ds = dataSources.find((d) => d.id === connector.id); - if (ds) { - openFlyout(ds, ds.id); - } + setSelectedConnector(connector); + // Open the flyout with the connector's action type ID + // For connectors from registry, this will be the stackConnector.type (e.g., '.notion') + openFlyout(connector.type); }, - [dataSources, openFlyout] + [openFlyout] ); if (isLoading) { @@ -122,9 +124,6 @@ export const DataSourcesView: React.FC = () => { {/* Connector creation flyout */} {flyout} - - {/* Optional connector prompt */} - {optionalPrompt} ); }; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/components/optional_connector_prompt.tsx b/x-pack/platform/plugins/shared/data_sources/public/application/components/optional_connector_prompt.tsx deleted file mode 100644 index f2056282e5f38..0000000000000 --- a/x-pack/platform/plugins/shared/data_sources/public/application/components/optional_connector_prompt.tsx +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import React from 'react'; -import { - EuiModal, - EuiModalHeader, - EuiModalHeaderTitle, - EuiModalBody, - EuiModalFooter, - EuiButton, - EuiButtonEmpty, - EuiText, - EuiSpacer, - EuiCallOut, - useGeneratedHtmlId, -} from '@elastic/eui'; -import { i18n } from '@kbn/i18n'; -import type { StackConnectorConfig, ConnectorRole } from '@kbn/data-catalog-plugin'; - -export interface ConnectorPromptProps { - connectorConfig: StackConnectorConfig; - /** The effective role of this connector */ - role: ConnectorRole; - onSetUp: () => void; - onSkip: () => void; -} - -/** - * Prompt shown before configuring a connector. - * - For 'optional' connectors: Shows description with Skip/Set up buttons - * - For 'required' connectors: Shows description with Continue button (no skip) - */ -export const ConnectorPrompt: React.FC = ({ - connectorConfig, - role, - onSetUp, - onSkip, -}) => { - const connectorName = connectorConfig.name || connectorConfig.type; - const isOptional = role === 'optional'; - const modalTitleId = useGeneratedHtmlId(); - - return ( - - - - {isOptional - ? i18n.translate('xpack.dataSources.connectorPrompt.optionalTitle', { - defaultMessage: 'Set up {connectorName}?', - values: { connectorName }, - }) - : i18n.translate('xpack.dataSources.connectorPrompt.requiredTitle', { - defaultMessage: 'Configure {connectorName}', - values: { connectorName }, - })} - - - - - {connectorConfig.description && ( - <> - -

{connectorConfig.description}

-
- - - )} - - {isOptional && connectorConfig.skipDescription && ( - <> - -

{connectorConfig.skipDescription}

-
- - - )} - - {isOptional && !connectorConfig.skipDescription && ( - -

- {i18n.translate('xpack.dataSources.connectorPrompt.skipInfo', { - defaultMessage: 'You can skip this for now and set it up later.', - })} -

-
- )} - - {!isOptional && ( - -

- {i18n.translate('xpack.dataSources.connectorPrompt.requiredInfo', { - defaultMessage: 'This connector is required for the data source to work properly.', - })} -

-
- )} -
- - - {isOptional && ( - - {i18n.translate('xpack.dataSources.connectorPrompt.skipButton', { - defaultMessage: 'Skip', - })} - - )} - - {isOptional - ? i18n.translate('xpack.dataSources.connectorPrompt.setUpButton', { - defaultMessage: 'Set up {connectorName}', - values: { connectorName }, - }) - : i18n.translate('xpack.dataSources.connectorPrompt.continueButton', { - defaultMessage: 'Continue', - })} - - -
- ); -}; - -// Legacy export for backwards compatibility -export const OptionalConnectorPrompt = ConnectorPrompt; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_add_connector_flyout.ts b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_add_connector_flyout.ts index 422c5e1e0a676..0e9e5f5905b0f 100644 --- a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_add_connector_flyout.ts +++ b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_add_connector_flyout.ts @@ -5,89 +5,36 @@ * 2.0. */ -import React, { useCallback, useMemo, useState, useRef } from 'react'; +import { useCallback, useMemo, useState, useRef } from 'react'; import type { ActionConnector } from '@kbn/triggers-actions-ui-plugin/public'; +import type { IconType } from '@elastic/eui'; import { i18n } from '@kbn/i18n'; import { useMutation, useQueryClient } from '@kbn/react-query'; -import type { DataSource, StackConnectorConfig, ConnectorRole } from '@kbn/data-catalog-plugin'; import { useKibana } from './use_kibana'; import { API_BASE_PATH } from '../../../common/constants'; import { queryKeys } from '../query_keys'; -import { ConnectorPrompt } from '../components/optional_connector_prompt'; - -/** - * Get the effective role of a connector. - * - First connector defaults to 'primary' if no role specified - * - Other connectors default to 'required' if no role specified - */ -function getEffectiveRole(connectorConfig: StackConnectorConfig, index: number): ConnectorRole { - if (connectorConfig.role) { - return connectorConfig.role; - } - // First connector is implicitly primary, others are implicitly required - return index === 0 ? 'primary' : 'required'; -} - -/** - * Build processing order: primary connectors first, then required, then optional. - * Returns array of indices into the original stackConnectors array. - */ -function buildProcessingOrder(connectorsList: StackConnectorConfig[]): number[] { - const primary: number[] = []; - const required: number[] = []; - const optional: number[] = []; - - connectorsList.forEach((sc, idx) => { - const role = getEffectiveRole(sc, idx); - if (role === 'primary') { - primary.push(idx); - } else if (role === 'required') { - required.push(idx); - } else { - optional.push(idx); - } - }); - - return [...primary, ...required, ...optional]; -} export interface UseAddConnectorFlyoutOptions { onConnectorCreated?: (connector: ActionConnector) => void; - onComplete?: () => void; -} - -interface ConnectorCredential { - connector_type: string; - credentials?: string; - existing_connector_id?: string; + dataSourceType?: string; + suggestedName?: string; + icon?: IconType; } interface CreateDataConnectorPayload { name: string; + stack_connector_id: string; type: string; - connector_credentials: ConnectorCredential[]; } -type FlowState = - | { type: 'idle' } - | { type: 'flyout'; connectorIndex: number; dataSource: DataSource; suggestedName?: string } - | { - type: 'connector_prompt'; - connectorIndex: number; - dataSource: DataSource; - suggestedName?: string; - role: ConnectorRole; - } - | { type: 'complete' }; - /** - * Hook to manage connector creation flyouts for data sources. - * Supports multi-connector data sources by showing sequential flyouts - * and prompting for optional connectors. + * Hook to manage the connector creation flyout. */ export const useAddConnectorFlyout = ({ onConnectorCreated, - onComplete, + dataSourceType, + suggestedName, + icon, }: UseAddConnectorFlyoutOptions = {}) => { const { services: { @@ -98,40 +45,29 @@ export const useAddConnectorFlyout = ({ } = useKibana(); const queryClient = useQueryClient(); + const [isOpen, setIsOpen] = useState(false); + const [selectedConnectorType, setSelectedConnectorType] = useState(); const loadingToastRef = useRef | undefined>(); - // Flow state management - const [flowState, setFlowState] = useState({ type: 'idle' }); - // Track created connectors - state for UI updates, ref for closure-safe access in callbacks - const [createdConnectors, setCreatedConnectors] = useState([]); - // Ref mirrors state for closure-safe access (callbacks capture stale state values) - const createdConnectorsRef = useRef([]); - - // Store active session data separately to survive flyout close events - const activeSessionRef = useRef<{ - dataSource: DataSource; - dataSourceType: string; - suggestedName?: string; - processingOrder: number[]; // Indices in priority order: primary → required → optional - } | null>(null); + const openFlyout = useCallback((actionTypeId?: string) => { + setSelectedConnectorType(actionTypeId); + setIsOpen(true); + }, []); - // Get stack connectors from session ref (survives flyout close) - // Memoized to prevent useMemo dependency issues - const stackConnectors = useMemo( - () => activeSessionRef.current?.dataSource?.stackConnectors ?? [], - // Re-compute when flow state changes (which updates activeSessionRef) - // eslint-disable-next-line react-hooks/exhaustive-deps - [flowState] - ); + const closeFlyout = useCallback(() => { + setIsOpen(false); + setSelectedConnectorType(undefined); + }, []); - // Mutation for creating data source - const createDataSourceMutation = useMutation({ + // Mutation for creating data connector + const createDataConnectorMutation = useMutation({ mutationFn: async (payload: CreateDataConnectorPayload) => { return http.post(`${API_BASE_PATH}`, { body: JSON.stringify(payload), }); }, onMutate: ({ name }) => { + // Show loading toast const loadingToast = toasts.addInfo( { title: i18n.translate('xpack.dataSources.hooks.useAddConnectorFlyout.creatingTitle', { @@ -139,295 +75,108 @@ export const useAddConnectorFlyout = ({ }), text: i18n.translate('xpack.dataSources.hooks.useAddConnectorFlyout.creatingText', { defaultMessage: 'Setting up {connectorName}...', - values: { connectorName: name }, + values: { + connectorName: name, + }, }), }, - { toastLifeTimeMs: 30000 } + { + toastLifeTimeMs: 30000, + } ); loadingToastRef.current = loadingToast; + return { loadingToast }; }, - onSuccess: (_, variables) => { + onSuccess: (data, variables) => { + // Dismiss loading toast if (loadingToastRef.current) { toasts.remove(loadingToastRef.current); loadingToastRef.current = undefined; } + // Show success toast toasts.addSuccess( i18n.translate('xpack.dataSources.hooks.useAddConnectorFlyout.createSuccessText', { defaultMessage: 'Data source {connectorName} connected successfully', - values: { connectorName: variables.name }, + values: { + connectorName: variables.name, + }, }) ); + // Invalidate queries to refresh Active Sources table queryClient.invalidateQueries(queryKeys.dataSources.list()); }, - onError: (error) => { + onError: (error, variables) => { + // Dismiss loading toast if (loadingToastRef.current) { toasts.remove(loadingToastRef.current); loadingToastRef.current = undefined; } + // Show error toast toasts.addError(error as Error, { title: i18n.translate('xpack.dataSources.hooks.useAddConnectorFlyout.createErrorTitle', { - defaultMessage: 'Failed to create data source', + defaultMessage: 'Failed to create data connector', }), }); }, }); - // Find next connector to process using the processing order (primary → required → optional) - const findNextInOrder = useCallback( - ( - orderPosition: number, - processingOrder: number[], - connectorsList: StackConnectorConfig[], - created: ActionConnector[] - ): { orderPosition: number; connectorIndex: number } | null => { - for (let i = orderPosition; i < processingOrder.length; i++) { - const connectorIndex = processingOrder[i]; - // Check if we already have a connector for this type - const alreadyCreated = created.some( - (c) => c.actionTypeId === connectorsList[connectorIndex].type - ); - if (!alreadyCreated) { - return { orderPosition: i, connectorIndex }; - } - } - return null; - }, - [] - ); - - // Process next connector or complete the flow - const processNextConnector = useCallback( - ( - orderPosition: number, - connectors: ActionConnector[], - ds: DataSource, - dsType: string, - processingOrder: number[], - name?: string - ) => { - const connectorsList = ds.stackConnectors ?? []; - const next = findNextInOrder(orderPosition, processingOrder, connectorsList, connectors); - - if (next === null) { - // No more connectors - create data source - setFlowState({ type: 'complete' }); - activeSessionRef.current = null; - createdConnectorsRef.current = []; - setCreatedConnectors([]); - - if (dsType && connectors.length > 0) { - const connectorCredentials: ConnectorCredential[] = connectors.map((c) => ({ - connector_type: c.actionTypeId, - existing_connector_id: c.id, - })); - - // Data source name comes from the primary connector - const primaryConfig = connectorsList.find( - (sc, idx) => getEffectiveRole(sc, idx) === 'primary' - ); - const primaryConnector = primaryConfig - ? connectors.find((c) => c.actionTypeId === primaryConfig.type) - : null; - - createDataSourceMutation.mutate({ - name: name || primaryConnector?.name || connectors[0].name, - type: dsType, - connector_credentials: connectorCredentials, - }); - } - - onComplete?.(); - return; - } - - const { connectorIndex } = next; - const connectorConfig = connectorsList[connectorIndex]; - const role = getEffectiveRole(connectorConfig, connectorIndex); - - if (role === 'primary') { - // Show flyout directly for primary connector - setFlowState({ - type: 'flyout', - connectorIndex, - dataSource: ds, - suggestedName: name, - }); - } else { - // Show prompt for required and optional connectors (after primary) - // - Required: prompt with "Continue" button (no skip) - // - Optional: prompt with "Set up" and "Skip" buttons - setFlowState({ - type: 'connector_prompt', - connectorIndex, - dataSource: ds, - suggestedName: name, - role, - }); - } - }, - [findNextInOrder, createDataSourceMutation, onComplete] - ); - - // Start the flow - accepts dataSource directly to avoid closure issues - const openFlyout = useCallback( - (ds?: DataSource, dsType?: string, name?: string) => { - // Reset both state and ref - createdConnectorsRef.current = []; - setCreatedConnectors([]); - - if (!ds || !dsType || (ds.stackConnectors?.length ?? 0) === 0) { - // No data source definition - can't proceed - toasts.addError(new Error('No data source definition provided'), { - title: 'Cannot create data source', - }); - return; - } - - // Build processing order: primary → required → optional - const processingOrder = buildProcessingOrder(ds.stackConnectors ?? []); - - // Store session data in ref (survives flyout close events) - activeSessionRef.current = { - dataSource: ds, - dataSourceType: dsType, - suggestedName: name, - processingOrder, - }; - - // Start processing from first position in order - processNextConnector(0, [], ds, dsType, processingOrder, name); - }, - [processNextConnector, toasts] - ); - - // Close and reset - only resets if no connectors were created - // (the flyout calls onClose even after successful creation) - const closeFlyout = useCallback(() => { - // Don't reset if we have created connectors - let handleConnectorCreated manage flow - // Use ref for closure-safe check - if (createdConnectorsRef.current.length === 0) { - setFlowState({ type: 'idle' }); - activeSessionRef.current = null; - } - }, []); - - // Handle connector created from flyout const handleConnectorCreated = useCallback( (connector: ActionConnector) => { + // Call user callback first onConnectorCreated?.(connector); - // Update both state and ref - const updatedConnectors = [...createdConnectorsRef.current, connector]; - createdConnectorsRef.current = updatedConnectors; - setCreatedConnectors(updatedConnectors); + // Close flyout immediately + closeFlyout(); - // Process next connector - get dataSource from session ref (survives flyout close) - const session = activeSessionRef.current; - if (session) { - // Start from position 0 - findNextInOrder will skip already-created connectors - processNextConnector( - 0, - updatedConnectors, - session.dataSource, - session.dataSourceType, - session.processingOrder, - session.suggestedName - ); + // If no dataSourceType, skip data connector creation + if (!dataSourceType) { + return; } - }, - [onConnectorCreated, processNextConnector] - ); - // Handle connector prompt - user wants to set up / continue - const handlePromptSetUp = useCallback(() => { - if (flowState.type === 'connector_prompt') { - setFlowState({ - type: 'flyout', - connectorIndex: flowState.connectorIndex, - dataSource: flowState.dataSource, - suggestedName: flowState.suggestedName, + // Create data connector in the background using mutation + createDataConnectorMutation.mutate({ + name: connector.name, + stack_connector_id: connector.id, + type: dataSourceType, }); - } - }, [flowState]); - - // Handle connector prompt - user wants to skip (only for optional connectors) - const handlePromptSkip = useCallback(() => { - const session = activeSessionRef.current; - if (flowState.type === 'connector_prompt' && session) { - // Find current position in processing order and advance past it - const currentOrderPos = session.processingOrder.indexOf(flowState.connectorIndex); - processNextConnector( - currentOrderPos + 1, - createdConnectorsRef.current, - session.dataSource, - session.dataSourceType, - session.processingOrder, - session.suggestedName - ); - } - }, [flowState, processNextConnector]); - - // Get current connector config - const currentConnectorConfig: StackConnectorConfig | undefined = - flowState.type === 'flyout' || flowState.type === 'connector_prompt' - ? stackConnectors[flowState.connectorIndex] - : undefined; + }, + [dataSourceType, onConnectorCreated, closeFlyout, createDataConnectorMutation] + ); - // Render flyout const flyout = useMemo(() => { - if (flowState.type !== 'flyout') { + if (!isOpen) { return null; } - const connectorType = stackConnectors[flowState.connectorIndex]?.type; - const name = flowState.suggestedName; - return triggersActionsUi.getAddConnectorFlyout({ onClose: closeFlyout, onConnectorCreated: handleConnectorCreated, - ...(connectorType && { + ...(icon && { icon }), + ...(selectedConnectorType && { initialConnector: { - actionTypeId: connectorType, - ...(name && { name }), + actionTypeId: selectedConnectorType, + ...(suggestedName && { name: suggestedName }), }, }), }); - }, [flowState, stackConnectors, closeFlyout, handleConnectorCreated, triggersActionsUi]); - - // Render connector prompt (for required and optional connectors after primary) - const connectorPrompt = useMemo(() => { - if (flowState.type !== 'connector_prompt' || !currentConnectorConfig) { - return null; - } - - return React.createElement(ConnectorPrompt, { - connectorConfig: currentConnectorConfig, - role: flowState.role, - onSetUp: handlePromptSetUp, - onSkip: handlePromptSkip, - }); - }, [flowState, currentConnectorConfig, handlePromptSetUp, handlePromptSkip]); + }, [ + isOpen, + selectedConnectorType, + suggestedName, + icon, + closeFlyout, + handleConnectorCreated, + triggersActionsUi, + ]); return { openFlyout, closeFlyout, - isOpen: flowState.type !== 'idle' && flowState.type !== 'complete', - isSaving: createDataSourceMutation.isLoading, + isOpen, + isSaving: createDataConnectorMutation.isLoading, flyout, - connectorPrompt, - // Legacy alias for backwards compatibility - optionalPrompt: connectorPrompt, - // Progress info - currentConnectorIndex: - flowState.type === 'flyout' || flowState.type === 'connector_prompt' - ? flowState.connectorIndex - : -1, - totalConnectors: stackConnectors.length, - // Connectors created so far in the current flow - createdConnectors, - connectedCount: createdConnectors.length, }; }; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_clone_active_source_flyout.ts b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_clone_active_source_flyout.ts new file mode 100644 index 0000000000000..67dec5df59198 --- /dev/null +++ b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_clone_active_source_flyout.ts @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { useCallback, useEffect, useState } from 'react'; +import type { ActionConnector } from '@kbn/triggers-actions-ui-plugin/public'; +import type { ActiveSource } from '../../types/connector'; +import { useCloneActiveSource } from './use_clone_active_source'; +import { useAddConnectorFlyout } from './use_add_connector_flyout'; +import { useStackConnector } from './use_stack_connector'; +import { getConnectorIconType } from '../../utils/get_connector_icon'; + +export interface UseCloneActiveSourceFlyoutOptions { + sourceToClone: ActiveSource | null; + onConnectorCreated?: (connector: ActionConnector) => void; +} + +/** + * Hook to manage the connector clone flyout for active sources. + * Fetches the actual stack connector to get its real actionTypeId, + * ensuring clones use the correct connector type (e.g., .mcp for GitHub). + */ +export const useCloneActiveSourceFlyout = ({ + sourceToClone, + onConnectorCreated, +}: UseCloneActiveSourceFlyoutOptions) => { + const { getCloneName } = useCloneActiveSource(); + const [shouldOpenFlyout, setShouldOpenFlyout] = useState(false); + + // Get stack connector ID from the source to clone + const stackConnectorId = + sourceToClone && sourceToClone.stackConnectors.length > 0 + ? sourceToClone.stackConnectors[0] + : null; + + const { stackConnector, isLoading: isLoadingConnector } = useStackConnector({ + stackConnectorId, + enabled: shouldOpenFlyout, + }); + + // Generate suggested clone name + const clonedName = sourceToClone ? getCloneName(sourceToClone) : undefined; + + // Use the add connector flyout with the data source type, suggested name, and icon + const { + openFlyout: openAddFlyout, + flyout, + ...rest + } = useAddConnectorFlyout({ + dataSourceType: sourceToClone?.type, + suggestedName: clonedName, + icon: sourceToClone ? getConnectorIconType(sourceToClone.iconType) : undefined, + onConnectorCreated, + }); + + // When stack connector is loaded and we want to open the flyout, do it automatically + useEffect(() => { + if (shouldOpenFlyout && stackConnector && !isLoadingConnector) { + // Open the flyout with the actual stack connector type (e.g., '.mcp' for GitHub) + openAddFlyout(stackConnector.actionTypeId); + setShouldOpenFlyout(false); // Reset flag + } + }, [shouldOpenFlyout, stackConnector, isLoadingConnector, openAddFlyout]); + + const openFlyout = useCallback(() => { + setShouldOpenFlyout(true); + }, []); + + return { + openFlyout, + flyout, + isLoadingConnector, + ...rest, + }; +}; diff --git a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_connectors.ts b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_connectors.ts index 3ad04883c8e52..bece2922a0a61 100644 --- a/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_connectors.ts +++ b/x-pack/platform/plugins/shared/data_sources/public/application/hooks/use_connectors.ts @@ -20,7 +20,7 @@ const transformDataSourceType = (dataSources: DataSource): Connector => { return { id: dataSources.id, name: dataSources.name, - type: dataSources.stackConnectors?.[0]?.type, // Already has '.' prefix (e.g., '.notion') + type: dataSources.stackConnector.type, iconType: dataSources.iconType, category: 'popular', }; @@ -39,12 +39,10 @@ export const useDataSources = () => { queryKeys.connectorTypes.list(), async () => { const service = new AvailableDataSourcesService({ http }); - const dataSources = await service.list(); + const connectorTypes = await service.list(); - // Transform to our internal Connector interface while keeping raw data - const connectors = dataSources.map(transformDataSourceType); - - return { dataSources, connectors }; + // Transform connector types to our internal Connector interface + return connectorTypes.map(transformDataSourceType); }, { onError: (err: Error) => { @@ -59,8 +57,7 @@ export const useDataSources = () => { ); return { - connectors: data?.connectors ?? [], - dataSources: data?.dataSources ?? [], + connectors: data ?? [], isLoading, error, }; diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.test.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.test.ts index 88b141329c63a..ea8ed1df4f86d 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.test.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.test.ts @@ -160,7 +160,7 @@ describe('createConnectorAndRelatedResources', () => { }, }; const mockDataSource = { - stackConnectors: [{ type: actionTypeId, config: {}, role: 'primary' as const }], + stackConnector: { type: actionTypeId, config: {} }, generateWorkflows: jest.fn().mockReturnValue([ { content: 'workflow yaml content', @@ -177,7 +177,7 @@ describe('createConnectorAndRelatedResources', () => { const result = await createDataSourceAndRelatedResources({ name: 'My Test Connector', type: 'test_type', - stackConnectorCredentials: [{ credentials: 'secret-token-123' }], + credentials: 'secret-token-123', savedObjectsClient: mockSavedObjectsClient, request: mockRequest, logger: mockLogger, @@ -235,7 +235,7 @@ describe('createConnectorAndRelatedResources', () => { attributes: { workflowIds: ['workflow-1'], toolIds: [], kscIds: ['ksc-1'] }, }; const mockDataSource = { - stackConnectors: [{ type: actionTypeId, config: {}, role: 'primary' as const }], + stackConnector: { type: actionTypeId, config: {} }, generateWorkflows: jest.fn().mockReturnValue([ { content: 'workflow yaml content', @@ -251,7 +251,7 @@ describe('createConnectorAndRelatedResources', () => { await createDataSourceAndRelatedResources({ name: 'Test', type: 'test', - stackConnectorCredentials: [{ credentials: 'token' }], + credentials: 'token', savedObjectsClient: mockSavedObjectsClient, request: mockRequest, logger: mockLogger, diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.ts index 25df5893aef48..765106cd90cb3 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/data_sources_helpers.ts @@ -11,7 +11,7 @@ import type { SavedObjectsClientContract } from '@kbn/core-saved-objects-api-ser import type { KibanaRequest } from '@kbn/core-http-server'; import type { ActionResult } from '@kbn/actions-plugin/server'; import type { Logger } from '@kbn/logging'; -import type { DataSource, ConnectorReference } from '@kbn/data-catalog-plugin'; +import type { DataSource } from '@kbn/data-catalog-plugin'; import { DEFAULT_NAMESPACE_STRING } from '@kbn/core-saved-objects-utils-server'; import { updateYamlField } from '@kbn/workflows-management-plugin/common/lib/yaml'; import { createStackConnector } from '../utils/create_stack_connector'; @@ -21,22 +21,11 @@ import type { } from '../types'; import { DATA_SOURCE_SAVED_OBJECT_TYPE, type DataSourceAttributes } from '../saved_objects'; -/** - * Credentials configuration for creating stack connectors. - * Each entry corresponds to a StackConnectorConfig in the data source. - */ -export interface StackConnectorCredentials { - /** The credentials (token, API key, etc.) for this connector */ - credentials: string; - /** Optional: Use an existing stack connector ID instead of creating a new one */ - existingConnectorId?: string; -} - interface CreateDataSourceAndResourcesParams { name: string; type: string; - /** Array of credentials, one for each stack connector in the data source */ - stackConnectorCredentials: StackConnectorCredentials[]; + credentials: string; + stackConnectorId?: string; savedObjectsClient: SavedObjectsClientContract; request: KibanaRequest; logger: Logger; @@ -58,10 +47,9 @@ function slugify(input: string): string { /** * Creates data source Saved Object, as well as all related resources (stack connectors, tools, workflows) * - * Supports composite data sources with multiple stack connectors. - * For each connector config in the data source: - * - If existingConnectorId is provided, reuse it - * - Otherwise, create a new stack connector with the provided credentials + * Supports two patterns: + * 1. Reuse existing stack connector: Pass stackConnectorId (e.g., from UI flyout) + * 2. Create new stack connector: Omit stackConnectorId, provide name and token */ export async function createDataSourceAndRelatedResources( params: CreateDataSourceAndResourcesParams @@ -69,7 +57,8 @@ export async function createDataSourceAndRelatedResources( const { name, type, - stackConnectorCredentials, + credentials, + stackConnectorId, savedObjectsClient, request, logger, @@ -81,49 +70,36 @@ export async function createDataSourceAndRelatedResources( const workflowIds: string[] = []; const toolIds: string[] = []; - const connectorRefs: ConnectorReference[] = []; - - const toolRegistry = await agentBuilder.tools.getRegistry({ request }); - // Create or reuse stack connectors for each connector config - for (let i = 0; i < dataSource.stackConnectors.length; i++) { - const connectorConfig = dataSource.stackConnectors[i]; - const credentialsConfig = stackConnectorCredentials[i]; + let finalStackConnectorId: string; - if (!credentialsConfig) { - logger.warn( - `No credentials provided for connector index ${i} (type: ${connectorConfig.type}), skipping` - ); - continue; - } + // Pattern 1: Reuse existing stack connector (from flyout) + if (stackConnectorId) { + logger.info(`Reusing existing stack connector: ${stackConnectorId}`); + finalStackConnectorId = stackConnectorId; + } + // Pattern 2: Create new stack connector (direct API call) + else { + const toolRegistry = await agentBuilder.tools.getRegistry({ request }); + const stackConnectorConfig = dataSource.stackConnector; + const stackConnector: ActionResult = await createStackConnector( + toolRegistry, + actions, + request, + stackConnectorConfig, + name, + toolIds, + credentials, + logger + ); - // Reuse existing stack connector if ID is provided - if (credentialsConfig.existingConnectorId) { - logger.info( - `Reusing existing stack connector: ${credentialsConfig.existingConnectorId} for ${connectorConfig.type}` - ); - connectorRefs.push({ type: connectorConfig.type, id: credentialsConfig.existingConnectorId }); - } - // Create new stack connector - else { - logger.info(`Creating new stack connector for type: ${connectorConfig.type}`); - const stackConnector: ActionResult = await createStackConnector( - toolRegistry, - actions, - request, - connectorConfig, - name, - toolIds, - credentialsConfig.credentials, - logger - ); - connectorRefs.push({ type: connectorConfig.type, id: stackConnector.id }); - } + finalStackConnectorId = stackConnector.id; } - // Create workflows and tools using connector references (type + id) + // Create workflows and tools const spaceId = getSpaceId(savedObjectsClient); - const workflowInfos = dataSource.generateWorkflows(connectorRefs); + const workflowInfos = dataSource.generateWorkflows(finalStackConnectorId); + const toolRegistry = await agentBuilder.tools.getRegistry({ request }); logger.info(`Creating workflows and tools for data source '${name}'`); @@ -143,7 +119,7 @@ export async function createDataSourceAndRelatedResources( workflowIds.push(workflow.id); if (workflowInfo.shouldGenerateABTool) { - // Extract base workflow name (e.g., "sources.notion.search" -> "search") + // e.g., "sources.github.search_issues" -> "search_issues" const workflowBaseName = originalName.split('.').pop() || originalName; // Tool ID structure: type.data_source_name.workflow_base_name @@ -172,7 +148,7 @@ export async function createDataSourceAndRelatedResources( updatedAt: now, workflowIds, toolIds, - kscIds: connectorRefs.map((ref) => ref.id), + kscIds: [finalStackConnectorId], }); return savedObject.id; diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts index 639894c202f4e..e9795d5bd12a2 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts @@ -246,7 +246,7 @@ describe('registerRoutes', () => { describe('POST /api/data_sources', () => { it('should create a new data source and call the helper with correct params', async () => { const mockDataSource = { - stackConnectors: [{ type: '.notion', required: true }], + stackConnector: { type: '.bearer_connector' }, generateWorkflows: jest.fn(), }; @@ -263,7 +263,7 @@ describe('registerRoutes', () => { body: { name: 'My Notion Data Source', type: 'notion', - connector_credentials: [{ connector_type: '.notion', credentials: 'secret-token-123' }], + credentials: 'secret-token-123', }, }); const mockResponse = httpServerMock.createResponseFactory(); @@ -274,9 +274,7 @@ describe('registerRoutes', () => { expect.objectContaining({ name: 'My Notion Data Source', type: 'notion', - stackConnectorCredentials: [ - { credentials: 'secret-token-123', existingConnectorId: undefined }, - ], + credentials: 'secret-token-123', dataSource: mockDataSource, }) ); @@ -300,7 +298,7 @@ describe('registerRoutes', () => { body: { name: 'Invalid Data Source', type: 'invalid-type', - connector_credentials: [{ connector_type: '.notion', credentials: 'token' }], + credentials: 'token', }, }); const mockResponse = httpServerMock.createResponseFactory(); @@ -318,7 +316,7 @@ describe('registerRoutes', () => { it('should handle errors during creation', async () => { const mockDataSource = { - stackConnectors: [{ type: '.notion', required: true }], + stackConnector: { type: '.bearer_connector' }, generateWorkflows: jest.fn(), }; @@ -337,7 +335,7 @@ describe('registerRoutes', () => { body: { name: 'Test Data Source', type: 'notion', - connector_credentials: [{ connector_type: '.notion', credentials: 'token' }], + credentials: 'token', }, }); const mockResponse = httpServerMock.createResponseFactory(); diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/index.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/index.ts index b77c53ae2748e..c38390fd176dd 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/index.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/index.ts @@ -17,7 +17,6 @@ import { schema } from '@kbn/config-schema'; import { createDataSourceAndRelatedResources, deleteDataSourceAndRelatedResources, - type StackConnectorCredentials, } from './data_sources_helpers'; import type { DataSourceAttributes } from '../saved_objects'; import { DATA_SOURCE_SAVED_OBJECT_TYPE } from '../saved_objects'; @@ -152,7 +151,7 @@ export function registerRoutes(dependencies: RouteDependencies) { const coreContext = await context.core; try { - const { name, type, connector_credentials } = request.body; + const { name, type, credentials, stack_connector_id } = request.body; const [, { actions, dataCatalog, agentBuilder }] = await getStartServices(); const savedObjectsClient = coreContext.savedObjects.client; @@ -168,43 +167,11 @@ export function registerRoutes(dependencies: RouteDependencies) { }); } - // Build the stack connector credentials array by matching provided credentials - // to the stackConnectors defined in the data source - const stackConnectorCredentials: StackConnectorCredentials[] = []; - - for (const connectorConfig of dataSource.stackConnectors) { - // Find matching credentials from the request - const matchingCreds = connector_credentials.find( - (cred) => cred.connector_type === connectorConfig.type - ); - - // Check if this connector is required based on its role - // 'primary' and 'required' roles require credentials, 'optional' does not - const role = connectorConfig.role ?? 'required'; - const isRequired = role !== 'optional'; - - if (matchingCreds) { - // Credentials provided for this connector - stackConnectorCredentials.push({ - credentials: matchingCreds.credentials || '', - existingConnectorId: matchingCreds.existing_connector_id, - }); - } else if (isRequired) { - // Required connector but no credentials provided - return response.badRequest({ - body: { - message: `Missing credentials for required connector type "${connectorConfig.type}"`, - }, - }); - } - // Optional connector with no credentials - skip it - } - - // Validate we have a name - if (!name && stackConnectorCredentials.length > 0) { + // Validate required fields based on pattern + if (!stack_connector_id && (!name || !credentials)) { return response.badRequest({ body: { - message: 'name is required when creating connectors', + message: 'name and token are required when stack_connector_id is not provided', }, }); } @@ -212,7 +179,8 @@ export function registerRoutes(dependencies: RouteDependencies) { const dataSourceId = await createDataSourceAndRelatedResources({ name: name || `Data source for ${type}`, type, - stackConnectorCredentials, + credentials: credentials || '', + stackConnectorId: stack_connector_id, savedObjectsClient, request, logger, diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts index 6f59ea0930bfc..7edd2b4cbfc06 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/schema.ts @@ -43,27 +43,9 @@ export function convertSOtoAPIResponse( }; } -/** - * Schema for credentials for a single connector - */ -const connectorCredentialsSchema = schema.object({ - /** The connector type (e.g., '.google_drive', '.jina') */ - connector_type: schema.string({ minLength: 1 }), - /** The credentials (token, API key, etc.) for this connector */ - credentials: schema.maybe(schema.string({ minLength: 1 })), - /** Optional: Use an existing stack connector ID instead of creating a new one */ - existing_connector_id: schema.maybe(schema.string({ minLength: 1 })), -}); - export const createDataSourceRequestSchema = schema.object({ - /** The data source type (e.g., 'google_drive', 'notion') */ type: schema.string({ minLength: 1 }), - /** Display name for the data source */ name: schema.maybe(schema.string({ minLength: 1 })), - /** - * Credentials for each connector required by this data source. - * The array should match the stackConnectors defined in the data source definition. - * Optional connectors can be omitted. - */ - connector_credentials: schema.arrayOf(connectorCredentialsSchema, { maxSize: 10 }), + credentials: schema.maybe(schema.string({ minLength: 1 })), + stack_connector_id: schema.maybe(schema.string({ minLength: 1 })), }); diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts index aa79a92265ec8..36180fb362ab5 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts @@ -7,7 +7,7 @@ import { i18n } from '@kbn/i18n'; import { MCPAuthType } from '@kbn/connector-schemas/mcp'; -import type { DataSource, ConnectorReference } from '@kbn/data-catalog-plugin'; +import type { DataSource } from '@kbn/data-catalog-plugin'; import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; import { generateGithubSearchIssuesWorkflow, @@ -23,6 +23,7 @@ export const githubDataSource: DataSource = { description: i18n.translate('xpack.dataSources.github.description', { defaultMessage: 'Connect to Github to pull data from your repository.', }), + iconType: '.github', oauthConfiguration: { @@ -32,68 +33,53 @@ export const githubDataSource: DataSource = { oauthBaseUrl: 'https://localhost:8052', // update once EARS deploys to QA }, - stackConnectors: [ - { - type: '.mcp', - config: { - serverUrl: 'https://api.githubcopilot.com/mcp/', - hasAuth: true, - authType: MCPAuthType.Bearer, - }, - importedTools: [ - 'get_commit', - 'get_file_contents', - 'get_label', - 'get_latest_release', - 'get_me', - 'get_tag', - 'get_team_members', - 'get_teams', - 'list_branches', - 'list_commits', - 'list_issue_types', - 'list_issues', - 'list_pull_requests', - 'list_releases', - 'list_tags', - 'pull_request_read', - ], - role: 'primary', - name: 'GitHub', - description: i18n.translate('xpack.dataSources.github.connectorDescription', { - defaultMessage: - 'Connect to GitHub to access repositories, issues, pull requests, and more.', - }), + stackConnector: { + type: '.mcp', + config: { + serverUrl: 'https://api.githubcopilot.com/mcp/', + hasAuth: true, + authType: MCPAuthType.Bearer, }, - ], - - generateWorkflows(connectors: ConnectorReference[]) { - // GitHub uses MCP connector type - const github = connectors.find((c) => c.type === '.mcp'); - - if (!github) { - throw new Error('GitHub MCP connector is required for GitHub data source'); - } + importedTools: [ + 'get_commit', + 'get_file_contents', + 'get_label', + 'get_latest_release', + 'get_me', + 'get_tag', + 'get_team_members', + 'get_teams', + 'list_branches', + 'list_commits', + 'list_issue_types', + 'list_issues', + 'list_pull_requests', + 'list_releases', + 'list_tags', + 'pull_request_read', + ], + }, + generateWorkflows(stackConnectorId: string) { return [ { - content: generateGithubSearchIssuesWorkflow(github.id), + content: generateGithubSearchIssuesWorkflow(stackConnectorId), shouldGenerateABTool: true, }, { - content: generateGithubSearchCodeWorkflow(github.id), + content: generateGithubSearchCodeWorkflow(stackConnectorId), shouldGenerateABTool: true, }, { - content: generateGithubSearchPullRequestsWorkflow(github.id), + content: generateGithubSearchPullRequestsWorkflow(stackConnectorId), shouldGenerateABTool: true, }, { - content: generateGithubSearchRepositoriesWorkflow(github.id), + content: generateGithubSearchRepositoriesWorkflow(stackConnectorId), shouldGenerateABTool: true, }, { - content: generateGithubSearchUsersWorkflow(github.id), + content: generateGithubSearchUsersWorkflow(stackConnectorId), shouldGenerateABTool: true, }, ]; diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts index 54ce05c7953d6..473b1767323c4 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts @@ -6,13 +6,12 @@ */ import { i18n } from '@kbn/i18n'; -import type { DataSource, ConnectorReference } from '@kbn/data-catalog-plugin'; +import type { DataSource } from '@kbn/data-catalog-plugin'; import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; import { generateGoogleDriveSearchFilesWorkflow, generateGoogleDriveListFilesWorkflow, - generateGoogleDriveDownloadFilesWithJinaWorkflow, - generateGoogleDriveDownloadFilesWithIngestSimulateWorkflow, + generateGoogleDriveDownloadFilesWorkflow, } from './workflows'; export const googleDriveDataSource: DataSource = { @@ -30,68 +29,25 @@ export const googleDriveDataSource: DataSource = { oauthBaseUrl: 'https://localhost:8052', // update once EARS deploys to QA }, - stackConnectors: [ - { - type: '.google_drive', - config: {}, - role: 'primary', - name: 'Google Drive', - description: i18n.translate('xpack.dataSources.googleDrive.connectorDescription', { - defaultMessage: 'Connect to Google Drive to search and access your files and folders.', - }), - }, - { - type: '.jina', - config: {}, - role: 'optional', - name: 'Jina Reader', - description: i18n.translate('xpack.dataSources.googleDrive.jinaDescription', { - defaultMessage: - 'Enable high-quality extraction of file contents to markdown using Jina Reader. This provides better handling of complex document layouts and semantic chunking. Get a free API key at jina.ai/reader', - }), - skipDescription: i18n.translate('xpack.dataSources.googleDrive.jinaSkipDescription', { - defaultMessage: - "File content extraction will use Elasticsearch's built-in attachment processor instead. This provides basic text extraction but may not handle complex layouts as well as Jina Reader.", - }), - }, - ], - - generateWorkflows(connectors: ConnectorReference[]) { - // Find connectors by type (not position) for reliable matching - const googleDrive = connectors.find((c) => c.type === '.google_drive'); - const jina = connectors.find((c) => c.type === '.jina'); - - if (!googleDrive) { - throw new Error('Google Drive connector is required for Google Drive data source'); - } + stackConnector: { + type: '.google_drive', + config: {}, + }, - const workflows = [ + generateWorkflows(stackConnectorId: string) { + return [ { - content: generateGoogleDriveSearchFilesWorkflow(googleDrive.id), + content: generateGoogleDriveSearchFilesWorkflow(stackConnectorId), shouldGenerateABTool: true, }, { - content: generateGoogleDriveListFilesWorkflow(googleDrive.id), + content: generateGoogleDriveListFilesWorkflow(stackConnectorId), shouldGenerateABTool: true, }, - ]; - - // Add download workflow - uses Jina Reader if configured for high-quality extraction, - // otherwise falls back to Elasticsearch's ingest.simulate with attachment processor - if (jina) { - // Jina Reader connector was configured - use it for high-quality extraction - workflows.push({ - content: generateGoogleDriveDownloadFilesWithJinaWorkflow(googleDrive.id, jina.id), - shouldGenerateABTool: true, - }); - } else { - // No Jina Reader - use Elasticsearch's built-in attachment processor as fallback - workflows.push({ - content: generateGoogleDriveDownloadFilesWithIngestSimulateWorkflow(googleDrive.id), + { + content: generateGoogleDriveDownloadFilesWorkflow(stackConnectorId), shouldGenerateABTool: true, - }); - } - - return workflows; + }, + ]; }, }; diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts index 9eaa57260226c..697c8a954fa95 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts @@ -71,112 +71,16 @@ steps: `; } -/** - * Generates a composite workflow that downloads files from Google Drive - * and extracts their content using Jina Reader for LLM consumption. - * Processes multiple files and returns an array of results. - * Optionally reranks results by relevance to reduce context window usage. - */ -export function generateGoogleDriveDownloadFilesWithJinaWorkflow( - googleDriveConnectorId: string, - jinaConnectorId: string -): string { - return `version: '1' -name: 'sources.google_drive.download' -description: Download files and extract their text content to readable markdown (best for PDFs, Word docs, etc.). You can optionally set rerank to true and specify topK to use semantic reranking - this is useful when downloading many documents and you want to avoid using too much of your context window by only keeping the top K most relevant documents based on the rerankQuery. -enabled: true -triggers: - - type: manual -inputs: - - name: fileIds - type: array - description: Array of file IDs from search or list results. Works with PDFs, Office docs, Google Docs, images with text, and more - - name: rerank - type: boolean - required: false - default: false - description: Set to true to rerank results by relevance to rerankQuery. Useful when downloading many documents to reduce context window usage by keeping only the most relevant ones. - - name: topK - type: number - required: false - default: 5 - description: When rerank is true, return only the top K most relevant documents after reranking. - - name: rerankQuery - type: string - required: false - description: The query to rerank documents against. Required when rerank is true. Documents will be scored by relevance to this query. -steps: - - name: init_results - type: data.set - with: - results: [] - - name: process_files - type: foreach - foreach: "\${{inputs.fileIds}}" - steps: - - name: download_file - type: google_drive.downloadFile - connector-id: ${googleDriveConnectorId} - with: - fileId: "\${{foreach.item}}" - - name: convert_to_markdown - type: jina.fileToMarkdown - connector-id: ${jinaConnectorId} - with: - file: "\${{steps.download_file.output.content}}" - filename: "\${{steps.download_file.output.name}}" - - name: normalize_result - type: data.set - with: - normalized: - fileId: "\${{foreach.item}}" - filename: "\${{steps.download_file.output.name}}" - content: "\${{steps.convert_to_markdown.output.content}}" - - name: accumulate_result - type: data.set - with: - results: '\${{variables.results | push: variables.normalized}}' - - name: conditional_rerank - type: if - condition: "\${{inputs.rerank}}" - steps: - - name: do_rerank - type: search.rerank - with: - rerank_text: "\${{inputs.rerankQuery}}" - data: \${{variables.results}} - fields: - - ["content"] - rank_window_size: \${{inputs.topK}} - - name: store_reranked - type: data.set - with: - final_results: "\${{steps.do_rerank.output}}" - else: - - name: store_all - type: data.set - with: - final_results: "\${{variables.results}}" - - name: output_results - type: data.set - with: - results: "\${{variables.final_results}}" -`; -} - /** * Generates a composite workflow that downloads files from Google Drive * and extracts their content using Elasticsearch's attachment processor * via the ingest pipeline simulate API. * - * This is the fallback when no Jina Reader connector is configured. * Uses Apache Tika under the hood for text extraction. * Processes multiple files and returns an array of results. * Optionally reranks results by relevance to reduce context window usage. */ -export function generateGoogleDriveDownloadFilesWithIngestSimulateWorkflow( - googleDriveConnectorId: string -): string { +export function generateGoogleDriveDownloadFilesWorkflow(googleDriveConnectorId: string): string { return `version: '1' name: 'sources.google_drive.download' description: Download files and extract their text content (best for PDFs, Word docs, etc.). You can optionally set rerank to true and specify topK to use semantic reranking - this is useful when downloading many documents and you want to avoid using too much of your context window by only keeping the top K most relevant documents based on the rerankQuery. diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts index 306417c1af184..907cb6f84cf13 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts @@ -6,7 +6,7 @@ */ import { i18n } from '@kbn/i18n'; -import type { DataSource, ConnectorReference } from '@kbn/data-catalog-plugin'; +import type { DataSource } from '@kbn/data-catalog-plugin'; import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; import { generateGetDataSourceWorkflow, @@ -21,6 +21,7 @@ export const notionDataSource: DataSource = { description: i18n.translate('xpack.dataSources.notion.description', { defaultMessage: 'Connect to Notion to pull data from your workspace.', }), + iconType: '.notion', oauthConfiguration: { @@ -30,30 +31,17 @@ export const notionDataSource: DataSource = { oauthBaseUrl: 'https://localhost:8052', }, - stackConnectors: [ - { - type: '.notion', - config: {}, - role: 'primary', - name: 'Notion', - description: i18n.translate('xpack.dataSources.notion.connectorDescription', { - defaultMessage: 'Connect to Notion to access pages, databases, and workspace content.', - }), - }, - ], - - generateWorkflows(connectors: ConnectorReference[]) { - const notion = connectors.find((c) => c.type === '.notion'); - - if (!notion) { - throw new Error('Notion connector is required for Notion data source'); - } + stackConnector: { + type: '.notion', + config: {}, + }, + generateWorkflows(stackConnectorId: string) { return [ - { content: generateQueryWorkflow(notion.id), shouldGenerateABTool: true }, - { content: generateSearchWorkflow(notion.id), shouldGenerateABTool: true }, - { content: generateGetPageWorkflow(notion.id), shouldGenerateABTool: true }, - { content: generateGetDataSourceWorkflow(notion.id), shouldGenerateABTool: true }, + { content: generateQueryWorkflow(stackConnectorId), shouldGenerateABTool: true }, + { content: generateSearchWorkflow(stackConnectorId), shouldGenerateABTool: true }, + { content: generateGetPageWorkflow(stackConnectorId), shouldGenerateABTool: true }, + { content: generateGetDataSourceWorkflow(stackConnectorId), shouldGenerateABTool: true }, ]; }, }; From e325024eae99bc8fa253780b7617e7900c9ea793 Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Fri, 6 Feb 2026 05:13:00 +0200 Subject: [PATCH 06/11] migrate google drive workflows to yaml --- .../data_sources/server/routes/index.test.ts | 4 +- .../server/sources/google_drive/data_type.ts | 22 +-- .../server/sources/google_drive/workflows.ts | 177 ------------------ .../google_drive/workflows/download.yaml | 93 +++++++++ .../sources/google_drive/workflows/list.yaml | 33 ++++ .../google_drive/workflows/search.yaml | 27 +++ 6 files changed, 157 insertions(+), 199 deletions(-) delete mode 100644 x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts create mode 100644 x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/download.yaml create mode 100644 x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/list.yaml create mode 100644 x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/search.yaml diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts index 7fcb9dcd25584..d8043a645889b 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts @@ -515,7 +515,7 @@ describe('registerRoutes', () => { it('should create a new data source and call the helper with correct params', async () => { const mockDataSource = { stackConnector: { type: '.bearer_connector' }, - generateWorkflows: jest.fn(), + workflows: { directory: '/mock/workflows' }, }; mockDataCatalog.getCatalog.mockReturnValue({ @@ -585,7 +585,7 @@ describe('registerRoutes', () => { it('should handle errors during creation', async () => { const mockDataSource = { stackConnector: { type: '.bearer_connector' }, - generateWorkflows: jest.fn(), + workflows: { directory: '/mock/workflows' }, }; mockDataCatalog.getCatalog.mockReturnValue({ diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts index 473b1767323c4..99ad26b43af74 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts @@ -8,11 +8,6 @@ import { i18n } from '@kbn/i18n'; import type { DataSource } from '@kbn/data-catalog-plugin'; import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; -import { - generateGoogleDriveSearchFilesWorkflow, - generateGoogleDriveListFilesWorkflow, - generateGoogleDriveDownloadFilesWorkflow, -} from './workflows'; export const googleDriveDataSource: DataSource = { id: 'google_drive', @@ -34,20 +29,7 @@ export const googleDriveDataSource: DataSource = { config: {}, }, - generateWorkflows(stackConnectorId: string) { - return [ - { - content: generateGoogleDriveSearchFilesWorkflow(stackConnectorId), - shouldGenerateABTool: true, - }, - { - content: generateGoogleDriveListFilesWorkflow(stackConnectorId), - shouldGenerateABTool: true, - }, - { - content: generateGoogleDriveDownloadFilesWorkflow(stackConnectorId), - shouldGenerateABTool: true, - }, - ]; + workflows: { + directory: __dirname + '/workflows', }, }; diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts deleted file mode 100644 index 697c8a954fa95..0000000000000 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows.ts +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -export function generateGoogleDriveSearchFilesWorkflow(stackConnectorId: string): string { - return `version: '1' -name: 'sources.google_drive.search' -description: Search for files in Google Drive using Google's query syntax -enabled: true -triggers: - - type: manual -inputs: - - name: query - type: string - description: "Google Drive query. Use fullText contains 'term' for content search, name contains 'term' for filename, mimeType='application/pdf' for type filtering, modifiedTime > '2024-01-01' for date filtering. Combine with 'and'/'or'." - - name: pageSize - type: number - required: false - description: Number of results to return (default 250, max 1000) - - name: pageToken - type: string - required: false - description: "Pagination token. Pass the 'nextPageToken' value from a previous response to get the next page. When nextPageToken is absent in the response, there are no more results." -steps: - - name: search_files - type: google_drive.searchFiles - connector-id: ${stackConnectorId} - with: - query: "\${{inputs.query}}" - pageSize: \${{inputs.pageSize}} - pageToken: "\${{inputs.pageToken}}" -`; -} - -export function generateGoogleDriveListFilesWorkflow(stackConnectorId: string): string { - return `version: '1' -name: 'sources.google_drive.list' -description: List files and subfolders in a Google Drive folder -enabled: true -triggers: - - type: manual -inputs: - - name: folderId - type: string - default: root - description: "Folder ID to list contents of. Use 'root' for the root folder, or a folder ID from search/list results" - - name: pageSize - type: number - required: false - description: Number of results to return (default 250, max 1000) - - name: pageToken - type: string - required: false - description: "Pagination token. Pass the 'nextPageToken' value from a previous response to get the next page. When nextPageToken is absent in the response, there are no more results." - - name: orderBy - type: string - required: false - description: "Sort order: 'name', 'modifiedTime', or 'createdTime'" -steps: - - name: list_files - type: google_drive.listFiles - connector-id: ${stackConnectorId} - with: - folderId: "\${{inputs.folderId}}" - pageSize: \${{inputs.pageSize}} - pageToken: "\${{inputs.pageToken}}" - orderBy: "\${{inputs.orderBy}}" -`; -} - -/** - * Generates a composite workflow that downloads files from Google Drive - * and extracts their content using Elasticsearch's attachment processor - * via the ingest pipeline simulate API. - * - * Uses Apache Tika under the hood for text extraction. - * Processes multiple files and returns an array of results. - * Optionally reranks results by relevance to reduce context window usage. - */ -export function generateGoogleDriveDownloadFilesWorkflow(googleDriveConnectorId: string): string { - return `version: '1' -name: 'sources.google_drive.download' -description: Download files and extract their text content (best for PDFs, Word docs, etc.). You can optionally set rerank to true and specify topK to use semantic reranking - this is useful when downloading many documents and you want to avoid using too much of your context window by only keeping the top K most relevant documents based on the rerankQuery. -enabled: true -triggers: - - type: manual -inputs: - - name: fileIds - type: array - description: Array of file IDs from search or list results. Works with PDFs, Office docs, Google Docs, and other text-based formats - - name: rerank - type: boolean - required: false - default: false - description: Set to true to rerank results by relevance to rerankQuery. Useful when downloading many documents to reduce context window usage by keeping only the most relevant ones. - - name: topK - type: number - required: false - default: 5 - description: When rerank is true, return only the top K most relevant documents after reranking. - - name: rerankQuery - type: string - required: false - description: The query to rerank documents against. Required when rerank is true. Documents will be scored by relevance to this query. -steps: - - name: init_results - type: data.set - with: - results: [] - - name: process_files - type: foreach - foreach: "\${{inputs.fileIds}}" - steps: - - name: download_file - type: google_drive.downloadFile - connector-id: ${googleDriveConnectorId} - with: - fileId: "\${{foreach.item}}" - - name: extract_content - type: elasticsearch.request - with: - method: POST - path: /_ingest/pipeline/_simulate - body: - pipeline: - processors: - - attachment: - field: data - indexed_chars: -1 - remove_binary: true - docs: - - _id: "\${{foreach.item}}" - _source: - filename: "\${{steps.download_file.output.name}}" - data: "\${{steps.download_file.output.content}}" - - name: normalize_result - type: data.set - with: - normalized: - fileId: "\${{foreach.item}}" - filename: "\${{steps.download_file.output.name}}" - content: "\${{steps.extract_content.output.docs[0].doc._source.attachment.content}}" - content_type: "\${{steps.extract_content.output.docs[0].doc._source.attachment.content_type}}" - - name: accumulate_result - type: data.set - with: - results: '\${{variables.results | push: variables.normalized}}' - - name: conditional_rerank - type: if - condition: "\${{inputs.rerank}}" - steps: - - name: do_rerank - type: search.rerank - with: - rerank_text: "\${{inputs.rerankQuery}}" - data: \${{variables.results}} - fields: - - ["content"] - rank_window_size: \${{inputs.topK}} - - name: store_reranked - type: data.set - with: - final_results: "\${{steps.do_rerank.output}}" - else: - - name: store_all - type: data.set - with: - final_results: "\${{variables.results}}" - - name: output_results - type: data.set - with: - results: "\${{variables.final_results}}" -`; -} diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/download.yaml b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/download.yaml new file mode 100644 index 0000000000000..bd7171762672a --- /dev/null +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/download.yaml @@ -0,0 +1,93 @@ +version: '1' +name: 'sources.google_drive.download' +description: Download files and extract their text content (best for PDFs, Word docs, etc.). You can optionally set rerank to true and specify topK to use semantic reranking - this is useful when downloading many documents and you want to avoid using too much of your context window by only keeping the top K most relevant documents based on the rerankQuery. +tags: ['agent-builder-tool'] +enabled: true +triggers: + - type: manual +inputs: + - name: fileIds + type: array + description: Array of file IDs from search or list results. Works with PDFs, Office docs, Google Docs, and other text-based formats + - name: rerank + type: boolean + required: false + default: false + description: Set to true to rerank results by relevance to rerankQuery. Useful when downloading many documents to reduce context window usage by keeping only the most relevant ones. + - name: topK + type: number + required: false + default: 5 + description: When rerank is true, return only the top K most relevant documents after reranking. + - name: rerankQuery + type: string + required: false + description: The query to rerank documents against. Required when rerank is true. Documents will be scored by relevance to this query. +steps: + - name: init_results + type: data.set + with: + results: [] + - name: process_files + type: foreach + foreach: "${{inputs.fileIds}}" + steps: + - name: download_file + type: google_drive.downloadFile + connector-id: <%= stackConnectorId %> + with: + fileId: "${{foreach.item}}" + - name: extract_content + type: elasticsearch.request + with: + method: POST + path: /_ingest/pipeline/_simulate + body: + pipeline: + processors: + - attachment: + field: data + indexed_chars: -1 + remove_binary: true + docs: + - _id: "${{foreach.item}}" + _source: + filename: "${{steps.download_file.output.name}}" + data: "${{steps.download_file.output.content}}" + - name: normalize_result + type: data.set + with: + normalized: + fileId: "${{foreach.item}}" + filename: "${{steps.download_file.output.name}}" + content: "${{steps.extract_content.output.docs[0].doc._source.attachment.content}}" + content_type: "${{steps.extract_content.output.docs[0].doc._source.attachment.content_type}}" + - name: accumulate_result + type: data.set + with: + results: '${{variables.results | push: variables.normalized}}' + - name: conditional_rerank + type: if + condition: "${{inputs.rerank}}" + steps: + - name: do_rerank + type: search.rerank + with: + rerank_text: "${{inputs.rerankQuery}}" + data: ${{variables.results}} + fields: + - ["content"] + rank_window_size: ${{inputs.topK}} + - name: store_reranked + type: data.set + with: + final_results: "${{steps.do_rerank.output}}" + else: + - name: store_all + type: data.set + with: + final_results: "${{variables.results}}" + - name: output_results + type: data.set + with: + results: "${{variables.final_results}}" diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/list.yaml b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/list.yaml new file mode 100644 index 0000000000000..26a449ae06e96 --- /dev/null +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/list.yaml @@ -0,0 +1,33 @@ +version: '1' +name: 'sources.google_drive.list' +description: List files and subfolders in a Google Drive folder +tags: ['agent-builder-tool'] +enabled: true +triggers: + - type: manual +inputs: + - name: folderId + type: string + default: root + description: "Folder ID to list contents of. Use 'root' for the root folder, or a folder ID from search/list results" + - name: pageSize + type: number + required: false + description: Number of results to return (default 250, max 1000) + - name: pageToken + type: string + required: false + description: "Pagination token. Pass the 'nextPageToken' value from a previous response to get the next page. When nextPageToken is absent in the response, there are no more results." + - name: orderBy + type: string + required: false + description: "Sort order: 'name', 'modifiedTime', or 'createdTime'" +steps: + - name: list_files + type: google_drive.listFiles + connector-id: <%= stackConnectorId %> + with: + folderId: "${{inputs.folderId}}" + pageSize: ${{inputs.pageSize}} + pageToken: "${{inputs.pageToken}}" + orderBy: "${{inputs.orderBy}}" diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/search.yaml b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/search.yaml new file mode 100644 index 0000000000000..75717fce2a13d --- /dev/null +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/search.yaml @@ -0,0 +1,27 @@ +version: '1' +name: 'sources.google_drive.search' +description: Search for files in Google Drive using Google's query syntax +tags: ['agent-builder-tool'] +enabled: true +triggers: + - type: manual +inputs: + - name: query + type: string + description: "Google Drive query. Use fullText contains 'term' for content search, name contains 'term' for filename, mimeType='application/pdf' for type filtering, modifiedTime > '2024-01-01' for date filtering. Combine with 'and'/'or'." + - name: pageSize + type: number + required: false + description: Number of results to return (default 250, max 1000) + - name: pageToken + type: string + required: false + description: "Pagination token. Pass the 'nextPageToken' value from a previous response to get the next page. When nextPageToken is absent in the response, there are no more results." +steps: + - name: search_files + type: google_drive.searchFiles + connector-id: <%= stackConnectorId %> + with: + query: "${{inputs.query}}" + pageSize: ${{inputs.pageSize}} + pageToken: "${{inputs.pageToken}}" From 0e7c276f13b7a530d03187986302f8f501f83b75 Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Fri, 6 Feb 2026 06:06:38 +0200 Subject: [PATCH 07/11] Revert changes to tests --- .../data_sources/server/routes/index.test.ts | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts b/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts index d8043a645889b..9de3df681097e 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/routes/index.test.ts @@ -60,9 +60,7 @@ describe('registerRoutes', () => { }, }; const mockDataCatalog = { - getCatalog: jest.fn().mockReturnValue({ - get: jest.fn().mockReturnValue({ iconType: '.notion' }), - }), + getCatalog: jest.fn(), }; const mockGetStartServices = jest.fn().mockResolvedValue([ @@ -146,6 +144,11 @@ describe('registerRoutes', () => { page: 1, }); + // Mock catalog to return iconType + mockDataCatalog.getCatalog.mockReturnValue({ + get: jest.fn().mockReturnValue({ iconType: '.notion' }), + }); + registerRoutes(dependencies); const routeHandler = mockRouter.get.mock.calls[0][1]; @@ -467,6 +470,11 @@ describe('registerRoutes', () => { mockSavedObjectsClient.get.mockResolvedValue(mockDataSource); + // Mock catalog to return iconType + mockDataCatalog.getCatalog.mockReturnValue({ + get: jest.fn().mockReturnValue({ iconType: '.notion' }), + }); + registerRoutes(dependencies); const routeHandler = mockRouter.get.mock.calls[1][1]; @@ -515,7 +523,7 @@ describe('registerRoutes', () => { it('should create a new data source and call the helper with correct params', async () => { const mockDataSource = { stackConnector: { type: '.bearer_connector' }, - workflows: { directory: '/mock/workflows' }, + generateWorkflows: jest.fn(), }; mockDataCatalog.getCatalog.mockReturnValue({ @@ -585,7 +593,7 @@ describe('registerRoutes', () => { it('should handle errors during creation', async () => { const mockDataSource = { stackConnector: { type: '.bearer_connector' }, - workflows: { directory: '/mock/workflows' }, + generateWorkflows: jest.fn(), }; mockDataCatalog.getCatalog.mockReturnValue({ From 0541dad6b18ad670f470d5ad81acd29d26522995 Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Mon, 9 Feb 2026 13:16:57 +0200 Subject: [PATCH 08/11] Address review comments --- .github/CODEOWNERS | 1 + .../data-context-sources-connectors-list.md | 1 + .../google-drive-action-type.md | 67 ++ docs/reference/toc.yml | 1 + .../specs/google_drive/google_drive.test.ts | 665 ++++++++++++++++++ .../src/specs/google_drive/google_drive.ts | 119 +++- .../sharepoint_online/sharepoint_online.ts | 1 - .../google_drive/workflows/metadata.yaml | 17 + .../google_drive/workflows/search.yaml | 7 +- 9 files changed, 855 insertions(+), 24 deletions(-) create mode 100644 docs/reference/connectors-kibana/google-drive-action-type.md create mode 100644 src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts create mode 100644 x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/metadata.yaml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 16d8e44a425bf..13590bb05ad0a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2410,6 +2410,7 @@ src/platform/packages/shared/kbn-connector-specs/src/specs/** src/platform/packages/shared/kbn-connector-specs/src/specs/abuseipdb/** @elastic/workflows-eng src/platform/packages/shared/kbn-connector-specs/src/specs/alienvault_otx/** @elastic/workflows-eng src/platform/packages/shared/kbn-connector-specs/src/specs/brave_search/** @elastic/workchat-eng +src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/** @elastic/workchat-eng src/platform/packages/shared/kbn-connector-specs/src/specs/greynoise/** @elastic/workflows-eng src/platform/packages/shared/kbn-connector-specs/src/specs/jina/** @elastic/jinastic src/platform/packages/shared/kbn-connector-specs/src/specs/notion/** @elastic/workchat-eng diff --git a/docs/reference/connectors-kibana/_snippets/data-context-sources-connectors-list.md b/docs/reference/connectors-kibana/_snippets/data-context-sources-connectors-list.md index 5a09a5d2dad04..5690f7a21507b 100644 --- a/docs/reference/connectors-kibana/_snippets/data-context-sources-connectors-list.md +++ b/docs/reference/connectors-kibana/_snippets/data-context-sources-connectors-list.md @@ -1,6 +1,7 @@ **Third-party search** * [Brave Search](/reference/connectors-kibana/brave-search-action-type.md): Search the web using the Brave Search API. * [Jina Reader](/reference/connectors-kibana/jina-action-type.md): Convert web pages into markdown from their URL and search the web for better LLM grounding. +* [Google Drive](/reference/connectors-kibana/google-drive-action-type.md): Search and access files and folders in Google Drive. * [Notion](/reference/connectors-kibana/notion-action-type.md): Explore content and databases in Notion. * [Sharepoint online](/reference/connectors-kibana/sharepoint-online-action-type.md): Search across SharePoint sites, pages, and content using the Microsoft Graph API. diff --git a/docs/reference/connectors-kibana/google-drive-action-type.md b/docs/reference/connectors-kibana/google-drive-action-type.md new file mode 100644 index 0000000000000..26570a9ba9ab0 --- /dev/null +++ b/docs/reference/connectors-kibana/google-drive-action-type.md @@ -0,0 +1,67 @@ +--- +navigation_title: "Google Drive" +mapped_pages: + - https://www.elastic.co/guide/en/kibana/current/google-drive-action-type.html +applies_to: + stack: preview 9.4 + serverless: preview +--- + +# Google Drive connector [google-drive-action-type] + +The Google Drive connector enables searching and accessing files and folders in Google Drive. + +## Create connectors in {{kib}} [define-google-drive-ui] + +You can create connectors in **{{stack-manage-app}} > {{connectors-ui}}**. + +### Connector configuration [google-drive-connector-configuration] + +Google Drive connectors have the following configuration properties: + +Bearer Token +: A Google OAuth 2.0 access token with Google Drive API scopes. See [Get API credentials](#google-drive-api-credentials) for instructions. + +## Test connectors [google-drive-action-configuration] + +You can test connectors as you're creating or editing the connector in {{kib}}. The test verifies connectivity by fetching the authenticated user's information from the Google Drive API. + +The Google Drive connector has the following actions: + +Search files +: Search for files in Google Drive using Google's query syntax. + - **query** (required): Google Drive query string. Use `fullText contains 'term'` for content search, `name contains 'term'` for filename search, `mimeType='application/pdf'` for type filtering, `modifiedTime > '2024-01-01'` for date filtering. Combine with `and`/`or`. + - **pageSize** (optional): Maximum number of files to return (1–1000). Defaults to 250. + - **pageToken** (optional): Token for pagination from a previous response. + - **orderBy** (optional): Sort order. Valid values: `createdTime`, `createdTime desc`, `modifiedTime`, `modifiedTime desc`, `name`, `name desc`. + +List files +: List files and subfolders in a Google Drive folder. + - **folderId** (optional): Parent folder ID. Use `root` for the root folder, or a folder ID from search/list results. Defaults to `root`. + - **pageSize** (optional): Maximum number of files to return (1–1000). Defaults to 250. + - **pageToken** (optional): Token for pagination from a previous response. + - **orderBy** (optional): Sort order: `name`, `modifiedTime`, or `createdTime`. + - **includeTrashed** (optional): Include trashed files in results. Defaults to `false`. + +Download file +: Download a file's content. For native files (PDF, DOCX, etc.), downloads the file directly. For Google Workspace documents (Docs, Sheets, Slides), exports to a standard format (PDF for documents, XLSX for spreadsheets). + - **fileId** (required): The ID of the file to download. + +Get file metadata +: Get detailed metadata for specific files, including ownership, sharing status, permissions, and descriptions. Use after search or list to inspect specific files. + - **fileIds** (required): Array of file IDs to fetch metadata for. Returns: `id`, `name`, `mimeType`, `size`, `createdTime`, `modifiedTime`, `owners`, `lastModifyingUser`, `sharingUser`, `shared`, `starred`, `trashed`, `permissions`, `description`, `parents`, `labelInfo`, `webViewLink`. + +## Connector networking configuration [google-drive-connector-networking-configuration] + +Use the [Action configuration settings](/reference/configuration-reference/alerting-settings.md#action-settings) to customize connector networking configurations, such as proxies, certificates, or TLS settings. You can set configurations that apply to all your connectors or use `xpack.actions.customHostSettings` to set per-host configurations. + +## Get API credentials [google-drive-api-credentials] + +To use the Google Drive connector, you need a Google OAuth 2.0 access token with Drive API scopes. You can obtain one using the [Google OAuth 2.0 Playground](https://developers.google.com/oauthplayground/): + +1. Open the [OAuth 2.0 Playground](https://developers.google.com/oauthplayground/). +2. In the list of APIs, select **Drive API v3** and choose the `https://www.googleapis.com/auth/drive.readonly` scope (or `https://www.googleapis.com/auth/drive` for full access). +3. Click **Authorize APIs** and sign in with your Google account. +4. Click **Exchange authorization code for tokens**. +5. Copy the **Access token** and use it as the **Bearer Token** in the connector configuration. + diff --git a/docs/reference/toc.yml b/docs/reference/toc.yml index 5cbd54e35cdf7..8dd24c0fd8263 100644 --- a/docs/reference/toc.yml +++ b/docs/reference/toc.yml @@ -75,6 +75,7 @@ toc: - file: connectors-kibana/abuseipdb-action-type.md - file: connectors-kibana/alienvault-otx-action-type.md - file: connectors-kibana/brave-search-action-type.md + - file: connectors-kibana/google-drive-action-type.md - file: connectors-kibana/greynoise-action-type.md - file: connectors-kibana/jina-action-type.md - file: connectors-kibana/notion-action-type.md diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts new file mode 100644 index 0000000000000..21186b592b7d6 --- /dev/null +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts @@ -0,0 +1,665 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +import type { ActionContext } from '../../connector_spec'; +import { GoogleDriveConnector } from './google_drive'; + +describe('GoogleDriveConnector', () => { + const mockClient = { + get: jest.fn(), + post: jest.fn(), + }; + + const mockContext = { + client: mockClient, + log: { debug: jest.fn() }, + } as unknown as ActionContext; + + beforeEach(() => { + jest.clearAllMocks(); + }); + + describe('searchFiles action', () => { + it('should search files with a query', async () => { + const mockResponse = { + data: { + files: [ + { + id: 'file-1', + name: 'Report.pdf', + mimeType: 'application/pdf', + size: '1024', + modifiedTime: '2025-01-01T00:00:00.000Z', + webViewLink: 'https://drive.google.com/file/d/file-1/view', + }, + ], + nextPageToken: undefined, + }, + }; + mockClient.get.mockResolvedValue(mockResponse); + + const result = await GoogleDriveConnector.actions.searchFiles.handler(mockContext, { + query: "fullText contains 'report'", + pageSize: 250, + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + { + params: { + q: "fullText contains 'report'", + pageSize: 250, + fields: + 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', + }, + } + ); + expect(result).toEqual({ + files: mockResponse.data.files, + nextPageToken: undefined, + }); + }); + + it('should include pageToken when provided', async () => { + const mockResponse = { + data: { + files: [], + nextPageToken: undefined, + }, + }; + mockClient.get.mockResolvedValue(mockResponse); + + await GoogleDriveConnector.actions.searchFiles.handler(mockContext, { + query: "name contains 'test'", + pageSize: 10, + pageToken: 'next-page-token', + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + { + params: { + q: "name contains 'test'", + pageSize: 10, + pageToken: 'next-page-token', + fields: + 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', + }, + } + ); + }); + + it('should cap pageSize at 1000', async () => { + const mockResponse = { data: { files: [] } }; + mockClient.get.mockResolvedValue(mockResponse); + + await GoogleDriveConnector.actions.searchFiles.handler(mockContext, { + query: 'test', + pageSize: 5000, + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + expect.objectContaining({ + params: expect.objectContaining({ pageSize: 1000 }), + }) + ); + }); + + it('should include orderBy when provided', async () => { + const mockResponse = { data: { files: [], nextPageToken: undefined } }; + mockClient.get.mockResolvedValue(mockResponse); + + await GoogleDriveConnector.actions.searchFiles.handler(mockContext, { + query: "'me' in owners and trashed=false", + pageSize: 1, + orderBy: 'createdTime desc', + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + expect.objectContaining({ + params: expect.objectContaining({ + q: "'me' in owners and trashed=false", + pageSize: 1, + orderBy: 'createdTime desc', + }), + }) + ); + }); + + it('should throw Google Drive API error when present', async () => { + const error = { + response: { + data: { + error: { message: 'Invalid query', code: 400 }, + }, + }, + }; + mockClient.get.mockRejectedValue(error); + + await expect( + GoogleDriveConnector.actions.searchFiles.handler(mockContext, { + query: 'bad query', + pageSize: 250, + }) + ).rejects.toThrow('Google Drive API error: Invalid query'); + }); + + it('should rethrow non-Google errors', async () => { + mockClient.get.mockRejectedValue(new Error('Network error')); + + await expect( + GoogleDriveConnector.actions.searchFiles.handler(mockContext, { + query: 'test', + pageSize: 250, + }) + ).rejects.toThrow('Network error'); + }); + }); + + describe('listFiles action', () => { + it('should list files in root folder by default', async () => { + const mockResponse = { + data: { + files: [ + { + id: 'file-1', + name: 'Document.docx', + mimeType: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + size: '2048', + modifiedTime: '2025-01-01T00:00:00.000Z', + webViewLink: 'https://drive.google.com/file/d/file-1/view', + }, + ], + nextPageToken: undefined, + }, + }; + mockClient.get.mockResolvedValue(mockResponse); + + const result = await GoogleDriveConnector.actions.listFiles.handler(mockContext, { + folderId: 'root', + pageSize: 250, + includeTrashed: false, + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + { + params: { + q: "'root' in parents and trashed=false", + pageSize: 250, + fields: + 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', + }, + } + ); + expect(result).toEqual({ + files: mockResponse.data.files, + nextPageToken: undefined, + }); + }); + + it('should list files in a specific folder', async () => { + const mockResponse = { data: { files: [], nextPageToken: undefined } }; + mockClient.get.mockResolvedValue(mockResponse); + + await GoogleDriveConnector.actions.listFiles.handler(mockContext, { + folderId: 'folder-abc123', + pageSize: 250, + includeTrashed: false, + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + expect.objectContaining({ + params: expect.objectContaining({ + q: "'folder-abc123' in parents and trashed=false", + }), + }) + ); + }); + + it('should include trashed files when requested', async () => { + const mockResponse = { data: { files: [], nextPageToken: undefined } }; + mockClient.get.mockResolvedValue(mockResponse); + + await GoogleDriveConnector.actions.listFiles.handler(mockContext, { + folderId: 'root', + pageSize: 250, + includeTrashed: true, + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + expect.objectContaining({ + params: expect.objectContaining({ + q: "'root' in parents", + }), + }) + ); + }); + + it('should include orderBy when provided', async () => { + const mockResponse = { data: { files: [], nextPageToken: undefined } }; + mockClient.get.mockResolvedValue(mockResponse); + + await GoogleDriveConnector.actions.listFiles.handler(mockContext, { + folderId: 'root', + pageSize: 250, + orderBy: 'modifiedTime', + includeTrashed: false, + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + expect.objectContaining({ + params: expect.objectContaining({ + orderBy: 'modifiedTime', + }), + }) + ); + }); + + it('should include pageToken when provided', async () => { + const mockResponse = { data: { files: [], nextPageToken: undefined } }; + mockClient.get.mockResolvedValue(mockResponse); + + await GoogleDriveConnector.actions.listFiles.handler(mockContext, { + folderId: 'root', + pageSize: 250, + pageToken: 'page-token-xyz', + includeTrashed: false, + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + expect.objectContaining({ + params: expect.objectContaining({ + pageToken: 'page-token-xyz', + }), + }) + ); + }); + + it('should escape special characters in folder IDs', async () => { + const mockResponse = { data: { files: [], nextPageToken: undefined } }; + mockClient.get.mockResolvedValue(mockResponse); + + await GoogleDriveConnector.actions.listFiles.handler(mockContext, { + folderId: "folder's\\id", + pageSize: 250, + includeTrashed: false, + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files', + expect.objectContaining({ + params: expect.objectContaining({ + q: "'folder\\'s\\\\id' in parents and trashed=false", + }), + }) + ); + }); + + it('should throw Google Drive API error when present', async () => { + const error = { + response: { + data: { + error: { message: 'Folder not found', code: 404 }, + }, + }, + }; + mockClient.get.mockRejectedValue(error); + + await expect( + GoogleDriveConnector.actions.listFiles.handler(mockContext, { + folderId: 'nonexistent', + pageSize: 250, + includeTrashed: false, + }) + ).rejects.toThrow('Google Drive API error: Folder not found'); + }); + }); + + describe('downloadFile action', () => { + it('should download a native file', async () => { + const metadataResponse = { + data: { + id: 'file-1', + name: 'report.pdf', + mimeType: 'application/pdf', + size: '1024', + }, + }; + const contentResponse = { + data: Buffer.from('pdf content'), + }; + + mockClient.get.mockResolvedValueOnce(metadataResponse).mockResolvedValueOnce(contentResponse); + + const result = await GoogleDriveConnector.actions.downloadFile.handler(mockContext, { + fileId: 'file-1', + }); + + // First call: metadata + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files/file-1', + { + params: { fields: 'id, name, mimeType, size' }, + } + ); + + // Second call: content download + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files/file-1', + { + params: { alt: 'media' }, + responseType: 'arraybuffer', + } + ); + + expect(result).toEqual({ + id: 'file-1', + name: 'report.pdf', + mimeType: 'application/pdf', + size: '1024', + content: Buffer.from('pdf content').toString('base64'), + encoding: 'base64', + }); + }); + + it('should export a Google Doc as PDF', async () => { + const metadataResponse = { + data: { + id: 'doc-1', + name: 'My Document', + mimeType: 'application/vnd.google-apps.document', + size: undefined, + }, + }; + const contentResponse = { + data: Buffer.from('exported pdf'), + }; + + mockClient.get.mockResolvedValueOnce(metadataResponse).mockResolvedValueOnce(contentResponse); + + const result = await GoogleDriveConnector.actions.downloadFile.handler(mockContext, { + fileId: 'doc-1', + }); + + // Second call: export + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files/doc-1/export', + { + params: { mimeType: 'application/pdf' }, + responseType: 'arraybuffer', + } + ); + + expect(result).toEqual({ + id: 'doc-1', + name: 'My Document', + mimeType: 'application/pdf', + size: undefined, + content: Buffer.from('exported pdf').toString('base64'), + encoding: 'base64', + }); + }); + + it('should export a Google Spreadsheet as XLSX', async () => { + const metadataResponse = { + data: { + id: 'sheet-1', + name: 'My Spreadsheet', + mimeType: 'application/vnd.google-apps.spreadsheet', + size: undefined, + }, + }; + const contentResponse = { + data: Buffer.from('exported xlsx'), + }; + + mockClient.get.mockResolvedValueOnce(metadataResponse).mockResolvedValueOnce(contentResponse); + + const result = await GoogleDriveConnector.actions.downloadFile.handler(mockContext, { + fileId: 'sheet-1', + }); + + // The export API call should use XLSX mime type for spreadsheets + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files/sheet-1/export', + { + params: { + mimeType: + 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + }, + responseType: 'arraybuffer', + } + ); + + expect(result).toEqual( + expect.objectContaining({ + mimeType: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + }) + ); + }); + + it('should throw Google Drive API error when present', async () => { + const error = { + response: { + data: { + error: { message: 'File not found', code: 404 }, + }, + }, + }; + mockClient.get.mockRejectedValue(error); + + await expect( + GoogleDriveConnector.actions.downloadFile.handler(mockContext, { + fileId: 'nonexistent', + }) + ).rejects.toThrow('Google Drive API error: File not found'); + }); + }); + + describe('getFileMetadata action', () => { + const metadataFields = + 'id,name,mimeType,size,createdTime,modifiedTime,owners,lastModifyingUser,sharingUser,shared,starred,trashed,permissions,description,parents,labelInfo,webViewLink'; + + it('should fetch metadata for a single file', async () => { + const mockResponse = { + data: { + id: 'file-1', + name: 'Report.pdf', + mimeType: 'application/pdf', + size: '1024', + createdTime: '2025-01-01T00:00:00.000Z', + modifiedTime: '2025-06-01T00:00:00.000Z', + owners: [{ displayName: 'Alice', emailAddress: 'alice@example.com' }], + lastModifyingUser: { displayName: 'Bob', emailAddress: 'bob@example.com' }, + shared: true, + permissions: [ + { id: '1', role: 'owner', type: 'user', emailAddress: 'alice@example.com' }, + ], + webViewLink: 'https://drive.google.com/file/d/file-1/view', + }, + }; + mockClient.get.mockResolvedValue(mockResponse); + + const result = await GoogleDriveConnector.actions.getFileMetadata.handler(mockContext, { + fileIds: ['file-1'], + }); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files/file-1', + { params: { fields: metadataFields } } + ); + expect(result).toEqual({ files: [mockResponse.data] }); + }); + + it('should fetch metadata for multiple files in parallel', async () => { + const mockResponse1 = { + data: { id: 'file-1', name: 'Doc 1', mimeType: 'application/pdf' }, + }; + const mockResponse2 = { + data: { id: 'file-2', name: 'Doc 2', mimeType: 'application/pdf' }, + }; + const mockResponse3 = { + data: { id: 'file-3', name: 'Doc 3', mimeType: 'application/pdf' }, + }; + + mockClient.get + .mockResolvedValueOnce(mockResponse1) + .mockResolvedValueOnce(mockResponse2) + .mockResolvedValueOnce(mockResponse3); + + const result = await GoogleDriveConnector.actions.getFileMetadata.handler(mockContext, { + fileIds: ['file-1', 'file-2', 'file-3'], + }); + + expect(mockClient.get).toHaveBeenCalledTimes(3); + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files/file-1', + { params: { fields: metadataFields } } + ); + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files/file-2', + { params: { fields: metadataFields } } + ); + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/files/file-3', + { params: { fields: metadataFields } } + ); + expect(result).toEqual({ + files: [mockResponse1.data, mockResponse2.data, mockResponse3.data], + }); + }); + + it('should throw Google Drive API error when present', async () => { + const error = { + response: { + data: { + error: { message: 'File not found', code: 404 }, + }, + }, + }; + mockClient.get.mockRejectedValue(error); + + await expect( + GoogleDriveConnector.actions.getFileMetadata.handler(mockContext, { + fileIds: ['nonexistent'], + }) + ).rejects.toThrow('Google Drive API error: File not found'); + }); + + it('should fail if any file in the batch fails', async () => { + const mockResponse = { + data: { id: 'file-1', name: 'Doc 1' }, + }; + const error = { + response: { + data: { + error: { message: 'Permission denied', code: 403 }, + }, + }, + }; + + mockClient.get.mockResolvedValueOnce(mockResponse).mockRejectedValueOnce(error); + + await expect( + GoogleDriveConnector.actions.getFileMetadata.handler(mockContext, { + fileIds: ['file-1', 'file-2'], + }) + ).rejects.toThrow('Google Drive API error: Permission denied'); + }); + }); + + describe('test handler', () => { + it('should return success when API is accessible', async () => { + const mockResponse = { + status: 200, + data: { + user: { + emailAddress: 'user@example.com', + }, + }, + }; + mockClient.get.mockResolvedValue(mockResponse); + + if (!GoogleDriveConnector.test) { + throw new Error('Test handler not defined'); + } + const result = await GoogleDriveConnector.test.handler(mockContext); + + expect(mockClient.get).toHaveBeenCalledWith( + 'https://www.googleapis.com/drive/v3/about', + { + params: { fields: 'user' }, + } + ); + expect(result).toEqual({ + ok: true, + message: 'Successfully connected to Google Drive API as user@example.com', + }); + }); + + it('should fall back to generic user label when email is missing', async () => { + const mockResponse = { + status: 200, + data: { + user: {}, + }, + }; + mockClient.get.mockResolvedValue(mockResponse); + + if (!GoogleDriveConnector.test) { + throw new Error('Test handler not defined'); + } + const result = await GoogleDriveConnector.test.handler(mockContext); + + expect(result).toEqual({ + ok: true, + message: 'Successfully connected to Google Drive API as user', + }); + }); + + it('should return failure when API returns non-200 status', async () => { + const mockResponse = { + status: 401, + data: {}, + }; + mockClient.get.mockResolvedValue(mockResponse); + + if (!GoogleDriveConnector.test) { + throw new Error('Test handler not defined'); + } + const result = await GoogleDriveConnector.test.handler(mockContext); + + expect(result).toEqual({ + ok: false, + message: 'Failed to connect to Google Drive API', + }); + }); + + it('should return failure when API throws an error', async () => { + mockClient.get.mockRejectedValue(new Error('Invalid credentials')); + + if (!GoogleDriveConnector.test) { + throw new Error('Test handler not defined'); + } + const result = await GoogleDriveConnector.test.handler(mockContext); + + expect(result).toEqual({ + ok: false, + message: 'Failed to connect to Google Drive API: Invalid credentials', + }); + }); + }); +}); diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts index fcacf9d7c67b3..af8cef7b65e16 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts @@ -69,10 +69,13 @@ export const GoogleDriveConnector: ConnectorSpec = { .string() .min(1) .describe( - "Google Drive query. Use fullText contains 'term' for content search, " + - "name contains 'term' for filename search, mimeType='application/pdf' for type filtering, " + - "modifiedTime > '2024-01-01' for date filtering. Combine with 'and'/'or'. " + - "Note: Google Drive includes trashed files by default. Add 'and trashed=false' to exclude them." + 'Google Drive search query. ' + + "Examples: name contains 'budget' and trashed=false | " + + "fullText contains 'quarterly report' and mimeType='application/pdf' | " + + "'me' in owners and modifiedTime > '2024-01-01' | " + + "mimeType='application/vnd.google-apps.folder' and trashed=false. " + + "Operators: contains, =, !=, <, >, <=, >=. Combine with 'and'/'or'. " + + "String values use single quotes. Add 'and trashed=false' to exclude trashed files." ), pageSize: z .number() @@ -80,27 +83,44 @@ export const GoogleDriveConnector: ConnectorSpec = { .default(DEFAULT_PAGE_SIZE) .describe('Maximum number of files to return (1-1000)'), pageToken: z.string().optional().describe('Token for pagination'), + orderBy: z + .preprocess( + (val) => (val === '' ? undefined : val), + z + .enum([ + 'createdTime', + 'createdTime desc', + 'modifiedTime', + 'modifiedTime desc', + 'name', + 'name desc', + ]) + .optional() + ) + .describe('Field and direction to order results by'), }), handler: async (ctx, input) => { const typedInput = input as { query: string; pageSize: number; pageToken?: string; + orderBy?: string; }; - ctx.log.debug(`[google_drive.searchFiles] input: ${JSON.stringify(input)}`); - const params: Record = { q: typedInput.query, pageSize: Math.min(typedInput.pageSize || DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE), - fields: 'nextPageToken, files(id, name, mimeType, size, modifiedTime, webViewLink)', + fields: + 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', }; if (typedInput.pageToken) { params.pageToken = typedInput.pageToken; } - ctx.log.debug(`[google_drive.searchFiles] API params: ${JSON.stringify(params)}`); + if (typedInput.orderBy) { + params.orderBy = typedInput.orderBy; + } try { const response = await ctx.client.get(`${GOOGLE_DRIVE_API_BASE}/files`, { @@ -152,14 +172,13 @@ export const GoogleDriveConnector: ConnectorSpec = { includeTrashed: boolean; }; - ctx.log.debug(`[google_drive.listFiles] input: ${JSON.stringify(input)}`); - const folderId = typedInput.folderId || DEFAULT_FOLDER_ID; const trashedFilter = typedInput.includeTrashed ? '' : ' and trashed=false'; const params: Record = { q: `'${escapeQueryValue(folderId)}' in parents${trashedFilter}`, pageSize: Math.min(typedInput.pageSize || DEFAULT_PAGE_SIZE, MAX_PAGE_SIZE), - fields: 'nextPageToken, files(id, name, mimeType, size, modifiedTime, webViewLink)', + fields: + 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', }; if (typedInput.pageToken) { @@ -170,8 +189,6 @@ export const GoogleDriveConnector: ConnectorSpec = { params.orderBy = typedInput.orderBy; } - ctx.log.debug(`[google_drive.listFiles] API params: ${JSON.stringify(params)}`); - try { const response = await ctx.client.get(`${GOOGLE_DRIVE_API_BASE}/files`, { params, @@ -192,16 +209,12 @@ export const GoogleDriveConnector: ConnectorSpec = { isTool: true, input: z.object({ fileId: z.string().min(1).describe('The ID of the file to download'), - mimeType: z.string().optional().describe('Export MIME type for Google Workspace documents'), }), handler: async (ctx, input) => { const typedInput = input as { fileId: string; - mimeType?: string; }; - ctx.log.debug(`[google_drive.downloadFile] input: ${JSON.stringify(input)}`); - try { // First, get file metadata to determine if it's a Google Workspace document const metadataResponse = await ctx.client.get( @@ -217,6 +230,8 @@ export const GoogleDriveConnector: ConnectorSpec = { const isGoogleDoc = fileMetadata.mimeType?.startsWith(GOOGLE_WORKSPACE_MIME_PREFIX); let contentResponse; + let resolvedMimeType: string = fileMetadata.mimeType; + if (isGoogleDoc) { // Export Google Workspace documents // Use XLSX for Sheets (preserves tabular structure), PDF for everything else @@ -224,12 +239,12 @@ export const GoogleDriveConnector: ConnectorSpec = { fileMetadata.mimeType === 'application/vnd.google-apps.spreadsheet' ? SHEETS_EXPORT_MIME_TYPE : DEFAULT_EXPORT_MIME_TYPE; - const exportMimeType = typedInput.mimeType || defaultExport; + resolvedMimeType = defaultExport; contentResponse = await ctx.client.get( `${GOOGLE_DRIVE_API_BASE}/files/${typedInput.fileId}/export`, { params: { - mimeType: exportMimeType, + mimeType: resolvedMimeType, }, responseType: 'arraybuffer', } @@ -253,9 +268,7 @@ export const GoogleDriveConnector: ConnectorSpec = { return { id: fileMetadata.id, name: fileMetadata.name, - mimeType: isGoogleDoc - ? typedInput.mimeType || DEFAULT_EXPORT_MIME_TYPE - : fileMetadata.mimeType, + mimeType: resolvedMimeType, size: fileMetadata.size, content: base64Content, encoding: 'base64', @@ -266,6 +279,68 @@ export const GoogleDriveConnector: ConnectorSpec = { } }, }, + + getFileMetadata: { + isTool: true, + input: z.object({ + fileIds: z + .array(z.string().min(1)) + .min(1) + .describe( + 'Array of file IDs to fetch metadata for. Use after search/list to get ownership, ' + + 'sharing, permissions, and other details for specific files.' + ), + }), + handler: async (ctx, input) => { + const typedInput = input as { + fileIds: string[]; + }; + + const metadataFields = [ + 'id', + 'name', + 'mimeType', + 'size', + 'createdTime', + 'modifiedTime', + 'owners', + 'lastModifyingUser', + 'sharingUser', + 'shared', + 'starred', + 'trashed', + 'permissions', + 'description', + 'parents', + 'labelInfo', + 'webViewLink', + ].join(','); + + try { + const results = await Promise.all( + typedInput.fileIds.map(async (fileId) => { + try { + const response = await ctx.client.get( + `${GOOGLE_DRIVE_API_BASE}/files/${fileId}`, + { + params: { fields: metadataFields }, + } + ); + return response.data; + } catch (error: unknown) { + throwGoogleDriveError(error); + throw error; + } + }) + ); + + return { files: results }; + } catch (error: unknown) { + throwGoogleDriveError(error); + throw error; + } + }, + }, }, test: { diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/sharepoint_online/sharepoint_online.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/sharepoint_online/sharepoint_online.ts index ae6ceb66c6a1a..3179b2c27b7b7 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/sharepoint_online/sharepoint_online.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/sharepoint_online/sharepoint_online.ts @@ -405,7 +405,6 @@ export const SharepointOnline: ConnectorSpec = { ], }; - ctx.log.debug(`SharePoint search: ${JSON.stringify(typedInput.query)}`); const response = await ctx.client.post( 'https://graph.microsoft.com/v1.0/search/query', searchRequest diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/metadata.yaml b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/metadata.yaml new file mode 100644 index 0000000000000..521ad1be36fb9 --- /dev/null +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/metadata.yaml @@ -0,0 +1,17 @@ +version: '1' +name: 'sources.google_drive.metadata' +description: Get detailed metadata for specific files including ownership, sharing, permissions, and descriptions. Use after search or list to inspect specific files. +tags: ['agent-builder-tool'] +enabled: true +triggers: + - type: manual +inputs: + - name: fileIds + type: array + description: Array of file IDs to fetch metadata for. Use IDs from search or list results. +steps: + - name: get_metadata + type: google_drive.getFileMetadata + connector-id: <%= stackConnectorId %> + with: + fileIds: ${{inputs.fileIds}} diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/search.yaml b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/search.yaml index 75717fce2a13d..4beb323bb39d3 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/search.yaml +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/workflows/search.yaml @@ -8,7 +8,7 @@ triggers: inputs: - name: query type: string - description: "Google Drive query. Use fullText contains 'term' for content search, name contains 'term' for filename, mimeType='application/pdf' for type filtering, modifiedTime > '2024-01-01' for date filtering. Combine with 'and'/'or'." + description: "Google Drive search query. Examples: name contains 'budget' and trashed=false | fullText contains 'quarterly report' and mimeType='application/pdf' | 'me' in owners and modifiedTime > '2024-01-01' | mimeType='application/vnd.google-apps.folder' and trashed=false. Operators: contains, =, !=, <, >, <=, >=. Combine with 'and'/'or'. String values use single quotes. Add 'and trashed=false' to exclude trashed files." - name: pageSize type: number required: false @@ -17,6 +17,10 @@ inputs: type: string required: false description: "Pagination token. Pass the 'nextPageToken' value from a previous response to get the next page. When nextPageToken is absent in the response, there are no more results." + - name: orderBy + type: string + required: false + description: "Sort order: 'createdTime', 'createdTime desc', 'modifiedTime', 'modifiedTime desc', 'name', or 'name desc'" steps: - name: search_files type: google_drive.searchFiles @@ -25,3 +29,4 @@ steps: query: "${{inputs.query}}" pageSize: ${{inputs.pageSize}} pageToken: "${{inputs.pageToken}}" + orderBy: "${{inputs.orderBy}}" From ace423f2f71f42c22268f4987220d9e5613fcb92 Mon Sep 17 00:00:00 2001 From: kibanamachine <42973632+kibanamachine@users.noreply.github.com> Date: Mon, 9 Feb 2026 11:50:19 +0000 Subject: [PATCH 09/11] Changes from node scripts/eslint_all_files --no-cache --fix --- .../specs/google_drive/google_drive.test.ts | 71 ++++++++----------- .../src/specs/google_drive/google_drive.ts | 9 +-- 2 files changed, 32 insertions(+), 48 deletions(-) diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts index 21186b592b7d6..f94e805acb125 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts @@ -49,17 +49,14 @@ describe('GoogleDriveConnector', () => { pageSize: 250, }); - expect(mockClient.get).toHaveBeenCalledWith( - 'https://www.googleapis.com/drive/v3/files', - { - params: { - q: "fullText contains 'report'", - pageSize: 250, - fields: - 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', - }, - } - ); + expect(mockClient.get).toHaveBeenCalledWith('https://www.googleapis.com/drive/v3/files', { + params: { + q: "fullText contains 'report'", + pageSize: 250, + fields: + 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', + }, + }); expect(result).toEqual({ files: mockResponse.data.files, nextPageToken: undefined, @@ -81,18 +78,15 @@ describe('GoogleDriveConnector', () => { pageToken: 'next-page-token', }); - expect(mockClient.get).toHaveBeenCalledWith( - 'https://www.googleapis.com/drive/v3/files', - { - params: { - q: "name contains 'test'", - pageSize: 10, - pageToken: 'next-page-token', - fields: - 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', - }, - } - ); + expect(mockClient.get).toHaveBeenCalledWith('https://www.googleapis.com/drive/v3/files', { + params: { + q: "name contains 'test'", + pageSize: 10, + pageToken: 'next-page-token', + fields: + 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', + }, + }); }); it('should cap pageSize at 1000', async () => { @@ -189,17 +183,14 @@ describe('GoogleDriveConnector', () => { includeTrashed: false, }); - expect(mockClient.get).toHaveBeenCalledWith( - 'https://www.googleapis.com/drive/v3/files', - { - params: { - q: "'root' in parents and trashed=false", - pageSize: 250, - fields: - 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', - }, - } - ); + expect(mockClient.get).toHaveBeenCalledWith('https://www.googleapis.com/drive/v3/files', { + params: { + q: "'root' in parents and trashed=false", + pageSize: 250, + fields: + 'nextPageToken, files(id, name, mimeType, size, createdTime, modifiedTime, webViewLink)', + }, + }); expect(result).toEqual({ files: mockResponse.data.files, nextPageToken: undefined, @@ -437,8 +428,7 @@ describe('GoogleDriveConnector', () => { 'https://www.googleapis.com/drive/v3/files/sheet-1/export', { params: { - mimeType: - 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + mimeType: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', }, responseType: 'arraybuffer', } @@ -598,12 +588,9 @@ describe('GoogleDriveConnector', () => { } const result = await GoogleDriveConnector.test.handler(mockContext); - expect(mockClient.get).toHaveBeenCalledWith( - 'https://www.googleapis.com/drive/v3/about', - { - params: { fields: 'user' }, - } - ); + expect(mockClient.get).toHaveBeenCalledWith('https://www.googleapis.com/drive/v3/about', { + params: { fields: 'user' }, + }); expect(result).toEqual({ ok: true, message: 'Successfully connected to Google Drive API as user@example.com', diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts index af8cef7b65e16..0b8fcbc3a153c 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts @@ -320,12 +320,9 @@ export const GoogleDriveConnector: ConnectorSpec = { const results = await Promise.all( typedInput.fileIds.map(async (fileId) => { try { - const response = await ctx.client.get( - `${GOOGLE_DRIVE_API_BASE}/files/${fileId}`, - { - params: { fields: metadataFields }, - } - ); + const response = await ctx.client.get(`${GOOGLE_DRIVE_API_BASE}/files/${fileId}`, { + params: { fields: metadataFields }, + }); return response.data; } catch (error: unknown) { throwGoogleDriveError(error); From f10d0b8a942d12309c1f041bac574d7b5b5aab90 Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Mon, 9 Feb 2026 16:08:24 +0200 Subject: [PATCH 10/11] Address review comments --- .../src/specs/google_drive/google_drive.test.ts | 10 +++++----- .../src/specs/google_drive/google_drive.ts | 2 +- .../data_sources/server/sources/github/data_type.ts | 8 -------- .../server/sources/google_drive/data_type.ts | 8 -------- .../data_sources/server/sources/notion/data_type.ts | 8 -------- 5 files changed, 6 insertions(+), 30 deletions(-) diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts index f94e805acb125..a09abe1f25cbd 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.test.ts @@ -143,7 +143,7 @@ describe('GoogleDriveConnector', () => { query: 'bad query', pageSize: 250, }) - ).rejects.toThrow('Google Drive API error: Invalid query'); + ).rejects.toThrow('Google Drive API error (400)'); }); it('should rethrow non-Google errors', async () => { @@ -315,7 +315,7 @@ describe('GoogleDriveConnector', () => { pageSize: 250, includeTrashed: false, }) - ).rejects.toThrow('Google Drive API error: Folder not found'); + ).rejects.toThrow('Google Drive API error (404)'); }); }); @@ -455,7 +455,7 @@ describe('GoogleDriveConnector', () => { GoogleDriveConnector.actions.downloadFile.handler(mockContext, { fileId: 'nonexistent', }) - ).rejects.toThrow('Google Drive API error: File not found'); + ).rejects.toThrow('Google Drive API error (404)'); }); }); @@ -546,7 +546,7 @@ describe('GoogleDriveConnector', () => { GoogleDriveConnector.actions.getFileMetadata.handler(mockContext, { fileIds: ['nonexistent'], }) - ).rejects.toThrow('Google Drive API error: File not found'); + ).rejects.toThrow('Google Drive API error (404)'); }); it('should fail if any file in the batch fails', async () => { @@ -567,7 +567,7 @@ describe('GoogleDriveConnector', () => { GoogleDriveConnector.actions.getFileMetadata.handler(mockContext, { fileIds: ['file-1', 'file-2'], }) - ).rejects.toThrow('Google Drive API error: Permission denied'); + ).rejects.toThrow('Google Drive API error (403)'); }); }); diff --git a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts index 0b8fcbc3a153c..00e3e6d2e661f 100644 --- a/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts +++ b/src/platform/packages/shared/kbn-connector-specs/src/specs/google_drive/google_drive.ts @@ -40,7 +40,7 @@ function throwGoogleDriveError(error: unknown): void { }; const googleError = axiosError.response?.data?.error; if (googleError) { - throw new Error(`Google Drive API error: ${googleError.message}`); + throw new Error(`Google Drive API error (${googleError.code})`); } } diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts index 1cd95da50d093..a28fab1e1e4df 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/github/data_type.ts @@ -8,7 +8,6 @@ import { i18n } from '@kbn/i18n'; import { MCPAuthType } from '@kbn/connector-schemas/mcp'; import type { DataSource } from '@kbn/data-catalog-plugin'; -import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; export const githubDataSource: DataSource = { id: 'github', @@ -19,13 +18,6 @@ export const githubDataSource: DataSource = { iconType: '.github', - oauthConfiguration: { - provider: EARSSupportedOAuthProvider.GITHUB, - initiatePath: '/oauth/start/github', - fetchSecretsPath: '/oauth/fetch_request_secrets', - oauthBaseUrl: 'https://localhost:8052', // update once EARS deploys to QA - }, - stackConnector: { type: '.mcp', config: { diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts index 99ad26b43af74..9a65c8476d200 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/google_drive/data_type.ts @@ -7,7 +7,6 @@ import { i18n } from '@kbn/i18n'; import type { DataSource } from '@kbn/data-catalog-plugin'; -import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; export const googleDriveDataSource: DataSource = { id: 'google_drive', @@ -17,13 +16,6 @@ export const googleDriveDataSource: DataSource = { }), iconType: '.google_drive', - oauthConfiguration: { - provider: EARSSupportedOAuthProvider.GOOGLE, - initiatePath: '/oauth/start/google_drive', - fetchSecretsPath: '/oauth/fetch_request_secrets', - oauthBaseUrl: 'https://localhost:8052', // update once EARS deploys to QA - }, - stackConnector: { type: '.google_drive', config: {}, diff --git a/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts b/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts index 76d0b585c5bd2..2f47ba63ad01e 100644 --- a/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts +++ b/x-pack/platform/plugins/shared/data_sources/server/sources/notion/data_type.ts @@ -7,7 +7,6 @@ import { i18n } from '@kbn/i18n'; import type { DataSource } from '@kbn/data-catalog-plugin'; -import { EARSSupportedOAuthProvider } from '@kbn/data-catalog-plugin'; export const notionDataSource: DataSource = { id: 'notion', @@ -18,13 +17,6 @@ export const notionDataSource: DataSource = { iconType: '.notion', - oauthConfiguration: { - provider: EARSSupportedOAuthProvider.NOTION, - initiatePath: '/oauth/start/notion', - fetchSecretsPath: '/oauth/fetch_request_secrets', - oauthBaseUrl: 'https://localhost:8052', - }, - stackConnector: { type: '.notion', config: {}, From e4288592c44e4c2fa4b16430870d5532b53fc828 Mon Sep 17 00:00:00 2001 From: Apostolos Matsagkas Date: Tue, 10 Feb 2026 15:11:19 +0200 Subject: [PATCH 11/11] Apply review comment --- docs/reference/connectors-kibana/google-drive-action-type.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/connectors-kibana/google-drive-action-type.md b/docs/reference/connectors-kibana/google-drive-action-type.md index 26570a9ba9ab0..160737d9d5d6f 100644 --- a/docs/reference/connectors-kibana/google-drive-action-type.md +++ b/docs/reference/connectors-kibana/google-drive-action-type.md @@ -20,7 +20,7 @@ You can create connectors in **{{stack-manage-app}} > {{connectors-ui}}**. Google Drive connectors have the following configuration properties: Bearer Token -: A Google OAuth 2.0 access token with Google Drive API scopes. See [Get API credentials](#google-drive-api-credentials) for instructions. +: A Google OAuth 2.0 access token with Google Drive API scopes. Check the [Get API credentials](#google-drive-api-credentials) for instructions. ## Test connectors [google-drive-action-configuration]