-
Notifications
You must be signed in to change notification settings - Fork 349
zephyr: lib: alloc: Use cached memory for L3 Heap #8632
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -116,8 +116,7 @@ static inline uintptr_t get_l3_heap_start(void) | |
| * - main_fw_load_offset | ||
| * - main fw size in manifest | ||
| */ | ||
| return (uintptr_t)z_soc_uncached_ptr((__sparse_force void __sparse_cache *) | ||
| ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE)); | ||
| return (uintptr_t)(ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE)); | ||
| } | ||
|
|
||
| /** | ||
|
|
@@ -145,14 +144,50 @@ static bool is_l3_heap_pointer(void *ptr) | |
| uintptr_t l3_heap_start = get_l3_heap_start(); | ||
| uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size(); | ||
|
|
||
| if (is_cached(ptr)) | ||
| ptr = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)ptr); | ||
|
|
||
| if ((POINTER_TO_UINT(ptr) >= l3_heap_start) && (POINTER_TO_UINT(ptr) < l3_heap_end)) | ||
| return true; | ||
|
|
||
| return false; | ||
| } | ||
|
|
||
| static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) | ||
| { | ||
| k_spinlock_key_t key; | ||
| void *ret; | ||
| #if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 | ||
| struct sys_memory_stats stats; | ||
| #endif | ||
| if (!cpu_is_primary(arch_proc_id())) { | ||
| tr_err(&zephyr_tr, "L3_HEAP available only for primary core!"); | ||
| return NULL; | ||
| } | ||
|
|
||
| key = k_spin_lock(&h->lock); | ||
| ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes); | ||
| k_spin_unlock(&h->lock, key); | ||
|
|
||
| #if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 | ||
| sys_heap_runtime_stats_get(&h->heap, &stats); | ||
| tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u", | ||
| stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes); | ||
| #endif | ||
|
|
||
| return ret; | ||
| } | ||
|
|
||
| static void l3_heap_free(struct k_heap *h, void *mem) | ||
| { | ||
| if (!cpu_is_primary(arch_proc_id())) { | ||
| tr_err(&zephyr_tr, "L3_HEAP available only for primary core!"); | ||
| return; | ||
| } | ||
|
|
||
| k_spinlock_key_t key = k_spin_lock(&h->lock); | ||
|
|
||
| sys_heap_free(&h->heap, mem); | ||
| k_spin_unlock(&h->lock, key); | ||
| } | ||
|
|
||
| #endif | ||
|
|
||
| static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) | ||
|
|
@@ -251,6 +286,17 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) | |
| if (caps & SOF_MEM_CAPS_L3) { | ||
| #if CONFIG_L3_HEAP | ||
| heap = &l3_heap; | ||
| /* Uncached L3_HEAP should be not used */ | ||
| if (!zone_is_cached(zone)) { | ||
| tr_err(&zephyr_tr, "L3_HEAP available for cached zones only!"); | ||
| return NULL; | ||
| } | ||
| ptr = (__sparse_force void *)l3_heap_alloc_aligned(heap, 0, bytes); | ||
|
|
||
| if (!ptr && zone == SOF_MEM_ZONE_SYS) | ||
| k_panic(); | ||
|
|
||
| return ptr; | ||
| #else | ||
| k_panic(); | ||
| #endif | ||
|
|
@@ -335,10 +381,24 @@ EXPORT_SYMBOL(rzalloc); | |
| void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, | ||
| uint32_t align) | ||
| { | ||
| struct k_heap *heap; | ||
|
|
||
| /* choose a heap */ | ||
| if (caps & SOF_MEM_CAPS_L3) { | ||
| #if CONFIG_L3_HEAP | ||
| heap = &l3_heap; | ||
| return (__sparse_force void *)l3_heap_alloc_aligned(heap, align, bytes); | ||
| #else | ||
| k_panic(); | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is kind of theoretical, but maybe a small follow-up PR to convert this and line 301 above to
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| #endif | ||
| } else { | ||
| heap = &sof_heap; | ||
| } | ||
|
|
||
| if (flags & SOF_MEM_FLAG_COHERENT) | ||
| return heap_alloc_aligned(&sof_heap, align, bytes); | ||
| return heap_alloc_aligned(heap, align, bytes); | ||
|
|
||
| return (__sparse_force void *)heap_alloc_aligned_cached(&sof_heap, align, bytes); | ||
| return (__sparse_force void *)heap_alloc_aligned_cached(heap, align, bytes); | ||
| } | ||
| EXPORT_SYMBOL(rballoc_align); | ||
|
|
||
|
|
@@ -352,7 +412,7 @@ void rfree(void *ptr) | |
|
|
||
| #if CONFIG_L3_HEAP | ||
| if (is_l3_heap_pointer(ptr)) { | ||
| heap_free(&l3_heap, ptr); | ||
| l3_heap_free(&l3_heap, ptr); | ||
| return; | ||
| } | ||
| #endif | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -96,10 +96,16 @@ void cpu_notify_state_entry(enum pm_state state) | |
| storage_buffer_size += LP_SRAM_SIZE; | ||
|
|
||
| /* allocate IMR buffer and store it in the global pointer */ | ||
| global_imr_ram_storage = rmalloc(SOF_MEM_ZONE_SYS_RUNTIME, | ||
| 0, | ||
| SOF_MEM_CAPS_L3, | ||
| storage_buffer_size); | ||
| global_imr_ram_storage = rballoc_align(0, SOF_MEM_CAPS_L3, | ||
| storage_buffer_size, | ||
| PLATFORM_DCACHE_ALIGN); | ||
|
|
||
| /* If no IMR buffer we can not recover */ | ||
| if (!global_imr_ram_storage) { | ||
| tr_err(&zephyr_tr, "failed to allocate global_imr_ram_storage"); | ||
| k_panic(); | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this means: the DSP is being powered down and we try to allocate an IMR buffer to safe state, and if we fail, we panic. Would it be possible to just abort saving state and go for a clean power off? Also rather theoretical, but would probably be better not to bother the host with a DSP panic but just to make this a clean shut down? @ujfalusi Can be an incremental PR too |
||
| } | ||
|
|
||
| #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */ | ||
| } | ||
| } | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.