@@ -407,14 +407,71 @@ ur_result_t ur_context_handle_t_::getFreeSlotInExistingOrNewPool(
407407 bool ProfilingEnabled, ur_device_handle_t Device,
408408 bool CounterBasedEventEnabled, bool UsingImmCmdList,
409409 bool InterruptBasedEventEnabled) {
410- // Lock while updating event pool machinery.
411- std::scoped_lock<ur_mutex> Lock (ZeEventPoolCacheMutex);
412410
413411 ze_device_handle_t ZeDevice = nullptr ;
414-
415412 if (Device) {
416413 ZeDevice = Device->ZeDevice ;
417414 }
415+
416+ if (DisableEventsCaching) {
417+ // Skip all cache handling, always create a new pool
418+ ze_event_pool_counter_based_exp_desc_t counterBasedExt = {
419+ ZE_STRUCTURE_TYPE_COUNTER_BASED_EVENT_POOL_EXP_DESC, nullptr , 0 };
420+
421+ ze_intel_event_sync_mode_exp_desc_t eventSyncMode = {
422+ ZE_INTEL_STRUCTURE_TYPE_EVENT_SYNC_MODE_EXP_DESC, nullptr , 0 };
423+ eventSyncMode.syncModeFlags =
424+ ZE_INTEL_EVENT_SYNC_MODE_EXP_FLAG_LOW_POWER_WAIT |
425+ ZE_INTEL_EVENT_SYNC_MODE_EXP_FLAG_SIGNAL_INTERRUPT;
426+
427+ ZeStruct<ze_event_pool_desc_t > ZeEventPoolDesc;
428+ ZeEventPoolDesc.count = MaxNumEventsPerPool;
429+ ZeEventPoolDesc.flags = 0 ;
430+ ZeEventPoolDesc.pNext = nullptr ;
431+ if (HostVisible)
432+ ZeEventPoolDesc.flags |= ZE_EVENT_POOL_FLAG_HOST_VISIBLE;
433+ if (ProfilingEnabled)
434+ ZeEventPoolDesc.flags |= ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP;
435+ UR_LOG (DEBUG, " ze_event_pool_desc_t flags set to: {}" ,
436+ ZeEventPoolDesc.flags );
437+ if (CounterBasedEventEnabled) {
438+ if (UsingImmCmdList) {
439+ counterBasedExt.flags = ZE_EVENT_POOL_COUNTER_BASED_EXP_FLAG_IMMEDIATE;
440+ } else {
441+ counterBasedExt.flags =
442+ ZE_EVENT_POOL_COUNTER_BASED_EXP_FLAG_NON_IMMEDIATE;
443+ }
444+ UR_LOG (DEBUG, " ze_event_pool_desc_t counter based flags set to: {}" ,
445+ counterBasedExt.flags );
446+ if (InterruptBasedEventEnabled) {
447+ counterBasedExt.pNext = &eventSyncMode;
448+ }
449+ ZeEventPoolDesc.pNext = &counterBasedExt;
450+ } else if (InterruptBasedEventEnabled) {
451+ ZeEventPoolDesc.pNext = &eventSyncMode;
452+ }
453+
454+ std::vector<ze_device_handle_t > ZeDevices;
455+ if (ZeDevice) {
456+ ZeDevices.push_back (ZeDevice);
457+ } else {
458+ std::for_each (Devices.begin (), Devices.end (),
459+ [&](const ur_device_handle_t &D) {
460+ ZeDevices.push_back (D->ZeDevice );
461+ });
462+ }
463+
464+ ZE2UR_CALL (zeEventPoolCreate, (ZeContext, &ZeEventPoolDesc,
465+ ZeDevices.size (), &ZeDevices[0 ], &Pool));
466+ Index = 0 ;
467+ NumEventsAvailableInEventPool[Pool] = MaxNumEventsPerPool - 1 ;
468+ NumEventsUnreleasedInEventPool[Pool] = 1 ;
469+ return UR_RESULT_SUCCESS;
470+ }
471+
472+ // --- Normal cache-based logic below ---
473+ std::scoped_lock<ur_mutex> Lock (ZeEventPoolCacheMutex);
474+
418475 std::list<ze_event_pool_handle_t > *ZePoolCache = getZeEventPoolCache (
419476 HostVisible, ProfilingEnabled, CounterBasedEventEnabled, UsingImmCmdList,
420477 InterruptBasedEventEnabled, ZeDevice);
@@ -423,6 +480,7 @@ ur_result_t ur_context_handle_t_::getFreeSlotInExistingOrNewPool(
423480 if (NumEventsAvailableInEventPool[ZePoolCache->front ()] == 0 ) {
424481 if (DisableEventsCaching) {
425482 // Remove full pool from the cache if events caching is disabled.
483+ ZE_CALL_NOCHECK (zeEventPoolDestroy, (ZePoolCache->front ()));
426484 ZePoolCache->erase (ZePoolCache->begin ());
427485 } else {
428486 // If event caching is enabled then we don't destroy events so there is
0 commit comments