Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 3 additions & 5 deletions src/library_manager/lib_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -539,16 +539,14 @@ static void __sparse_cache *lib_manager_allocate_store_mem(uint32_t size,
void __sparse_cache *local_add;
#if CONFIG_L3_HEAP
uint32_t caps = SOF_MEM_CAPS_L3 | SOF_MEM_CAPS_DMA;

/* allocate new buffer: cached alias */
local_add = (__sparse_force void __sparse_cache *)rmalloc(SOF_MEM_ZONE_SYS, 0, caps, size);
#else
uint32_t addr_align = PAGE_SZ;
uint32_t caps = SOF_MEM_CAPS_DMA;
#endif

uint32_t addr_align = PAGE_SZ;
/* allocate new buffer: cached alias */
local_add = (__sparse_force void __sparse_cache *)rballoc_align(0, caps, size, addr_align);
#endif

if (!local_add) {
tr_err(&lib_manager_tr, "lib_manager_allocate_store_mem(): alloc failed");
return NULL;
Expand Down
76 changes: 68 additions & 8 deletions zephyr/lib/alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,7 @@ static inline uintptr_t get_l3_heap_start(void)
* - main_fw_load_offset
* - main fw size in manifest
*/
return (uintptr_t)z_soc_uncached_ptr((__sparse_force void __sparse_cache *)
ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
return (uintptr_t)(ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
}

/**
Expand Down Expand Up @@ -145,14 +144,50 @@ static bool is_l3_heap_pointer(void *ptr)
uintptr_t l3_heap_start = get_l3_heap_start();
uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size();

if (is_cached(ptr))
ptr = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)ptr);

if ((POINTER_TO_UINT(ptr) >= l3_heap_start) && (POINTER_TO_UINT(ptr) < l3_heap_end))
return true;

return false;
}

static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
{
k_spinlock_key_t key;
void *ret;
#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
struct sys_memory_stats stats;
#endif
if (!cpu_is_primary(arch_proc_id())) {
tr_err(&zephyr_tr, "L3_HEAP available only for primary core!");
return NULL;
}

key = k_spin_lock(&h->lock);
ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes);
k_spin_unlock(&h->lock, key);

#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
sys_heap_runtime_stats_get(&h->heap, &stats);
tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u",
stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes);
#endif

return ret;
}

static void l3_heap_free(struct k_heap *h, void *mem)
{
if (!cpu_is_primary(arch_proc_id())) {
tr_err(&zephyr_tr, "L3_HEAP available only for primary core!");
return;
}

k_spinlock_key_t key = k_spin_lock(&h->lock);

sys_heap_free(&h->heap, mem);
k_spin_unlock(&h->lock, key);
}

#endif

static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
Expand Down Expand Up @@ -251,6 +286,17 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
if (caps & SOF_MEM_CAPS_L3) {
#if CONFIG_L3_HEAP
heap = &l3_heap;
/* Uncached L3_HEAP should be not used */
if (!zone_is_cached(zone)) {
tr_err(&zephyr_tr, "L3_HEAP available for cached zones only!");
return NULL;
}
ptr = (__sparse_force void *)l3_heap_alloc_aligned(heap, 0, bytes);

if (!ptr && zone == SOF_MEM_ZONE_SYS)
k_panic();

return ptr;
#else
k_panic();
#endif
Expand Down Expand Up @@ -335,10 +381,24 @@ EXPORT_SYMBOL(rzalloc);
void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
uint32_t align)
{
struct k_heap *heap;

/* choose a heap */
if (caps & SOF_MEM_CAPS_L3) {
#if CONFIG_L3_HEAP
heap = &l3_heap;
return (__sparse_force void *)l3_heap_alloc_aligned(heap, align, bytes);
#else
k_panic();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is kind of theoretical, but maybe a small follow-up PR to convert this and line 301 above to return NULL

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@lyakh this isn't theoretical based on the find by @marc-hb in the fuzzer logs

#endif
} else {
heap = &sof_heap;
}

if (flags & SOF_MEM_FLAG_COHERENT)
return heap_alloc_aligned(&sof_heap, align, bytes);
return heap_alloc_aligned(heap, align, bytes);

return (__sparse_force void *)heap_alloc_aligned_cached(&sof_heap, align, bytes);
return (__sparse_force void *)heap_alloc_aligned_cached(heap, align, bytes);
}
EXPORT_SYMBOL(rballoc_align);

Expand All @@ -352,7 +412,7 @@ void rfree(void *ptr)

#if CONFIG_L3_HEAP
if (is_l3_heap_pointer(ptr)) {
heap_free(&l3_heap, ptr);
l3_heap_free(&l3_heap, ptr);
return;
}
#endif
Expand Down
14 changes: 10 additions & 4 deletions zephyr/lib/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,10 +96,16 @@ void cpu_notify_state_entry(enum pm_state state)
storage_buffer_size += LP_SRAM_SIZE;

/* allocate IMR buffer and store it in the global pointer */
global_imr_ram_storage = rmalloc(SOF_MEM_ZONE_SYS_RUNTIME,
0,
SOF_MEM_CAPS_L3,
storage_buffer_size);
global_imr_ram_storage = rballoc_align(0, SOF_MEM_CAPS_L3,
storage_buffer_size,
PLATFORM_DCACHE_ALIGN);

/* If no IMR buffer we can not recover */
if (!global_imr_ram_storage) {
tr_err(&zephyr_tr, "failed to allocate global_imr_ram_storage");
k_panic();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this means: the DSP is being powered down and we try to allocate an IMR buffer to safe state, and if we fail, we panic. Would it be possible to just abort saving state and go for a clean power off? Also rather theoretical, but would probably be better not to bother the host with a DSP panic but just to make this a clean shut down? @ujfalusi Can be an incremental PR too

}

#endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */
}
}
Expand Down