Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: lk: Address memory aliasing issue #265

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions arch/arm64/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,14 +194,15 @@ static int alloc_page_table(paddr_t *paddrp, uint page_size_shift) {
LTRACEF("page_size_shift %u\n", page_size_shift);

if (size == PAGE_SIZE) {
vm_page_t *p = pmm_alloc_page();
vm_page_t *p = pmm_alloc_page(PMM_ARENA_FLAG_KMAP);
if (!p) {
return ERR_NO_MEMORY;
}
*paddrp = vm_page_to_paddr(p);
} else if (size > PAGE_SIZE) {
size_t count = size / PAGE_SIZE;
size_t ret = pmm_alloc_contiguous(count, page_size_shift, paddrp, NULL);
size_t ret = pmm_alloc_contiguous(PMM_ARENA_FLAG_KMAP,
count, page_size_shift, paddrp, NULL);
if (ret != count)
return ERR_NO_MEMORY;
} else {
Expand Down
20 changes: 15 additions & 5 deletions kernel/include/kernel/vm.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,19 +118,28 @@ typedef struct pmm_arena {
struct list_node free_list;
} pmm_arena_t;

#define PMM_ARENA_FLAG_KMAP (0x1) /* this arena is already mapped and useful for kallocs */
/* PMM Arena Flags */
#define PMM_ARENA_FLAG_UNAMPPED (1U << 0) /* This arena is not mapped already and useful for memory
allocation using vmm* APIs */
#define PMM_ARENA_FLAG_KMAP (1U << 1) /* This arena is already mapped and useful for kallocs */

#define PMM_ARENA_FLAG_ANY (PMM_ARENA_FLAG_UNAMPPED | PMM_ARENA_FLAG_KMAP)

/* Add a pre-filled memory arena to the physical allocator. */
status_t pmm_add_arena(pmm_arena_t *arena) __NONNULL((1));

/* Allocate count pages of physical memory, adding to the tail of the passed list.
/* Allocate count pages of physical memory, adding to the tail of the passed list using
* provided arena flags.
* The list must be initialized.
* Returns the number of pages allocated.
*/
size_t pmm_alloc_pages(uint count, struct list_node *list) __NONNULL((2));
size_t pmm_alloc_pages(uint aflags, uint count, struct list_node *list) __NONNULL((3));

/* Allocate a single page */
vm_page_t *pmm_alloc_page(void);
vm_page_t *pmm_alloc_page(uint aflags);

/* Check if a given address is already part of registered arenas */
bool pmm_address_in_arena(paddr_t address);

/* Allocate a specific range of physical pages, adding to the tail of the passed list.
* The list must be initialized.
Expand All @@ -150,7 +159,8 @@ size_t pmm_free_page(vm_page_t *page) __NONNULL((1));
* If the optional physical address pointer is passed, return the address.
* If the optional list is passed, append the allocate page structures to the tail of the list.
*/
size_t pmm_alloc_contiguous(uint count, uint8_t align_log2, paddr_t *pa, struct list_node *list);
size_t pmm_alloc_contiguous(uint aflags,
uint count, uint8_t align_log2, paddr_t *pa, struct list_node *list);

/* Allocate a run of pages out of the kernel area and return the pointer in kernel space.
* If the optional list is passed, append the allocate page structures to the tail of the list.
Expand Down
61 changes: 43 additions & 18 deletions kernel/vm/pmm.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ status_t pmm_add_arena(pmm_arena_t *arena) {
return NO_ERROR;
}

size_t pmm_alloc_pages(uint count, struct list_node *list) {
size_t pmm_alloc_pages(uint aflags,
uint count, struct list_node *list) {
LTRACEF("count %u\n", count);

/* list must be initialized prior to calling this */
Expand All @@ -118,17 +119,20 @@ size_t pmm_alloc_pages(uint count, struct list_node *list) {
/* walk the arenas in order, allocating as many pages as we can from each */
pmm_arena_t *a;
list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
while (allocated < count) {
vm_page_t *page = list_remove_head_type(&a->free_list, vm_page_t, node);
if (!page)
goto done;
if ((aflags == PMM_ARENA_FLAG_ANY) ||
((a->flags & aflags) == aflags)) {
while (allocated < count) {
vm_page_t *page = list_remove_head_type(&a->free_list, vm_page_t, node);
if (!page)
goto done;

a->free_count--;
a->free_count--;

page->flags |= VM_PAGE_FLAG_NONFREE;
list_add_tail(list, &page->node);
page->flags |= VM_PAGE_FLAG_NONFREE;
list_add_tail(list, &page->node);

allocated++;
allocated++;
}
}
}

Expand All @@ -137,10 +141,10 @@ size_t pmm_alloc_pages(uint count, struct list_node *list) {
return allocated;
}

vm_page_t *pmm_alloc_page(void) {
vm_page_t *pmm_alloc_page(uint aflags) {
struct list_node list = LIST_INITIAL_VALUE(list);

size_t ret = pmm_alloc_pages(1, &list);
size_t ret = pmm_alloc_pages(aflags, 1, &list);
if (ret == 0) {
return NULL;
}
Expand All @@ -150,6 +154,22 @@ vm_page_t *pmm_alloc_page(void) {
return list_peek_head_type(&list, vm_page_t, node);
}

bool pmm_address_in_arena(paddr_t address)
{
bool found = false;
mutex_acquire(&lock);

pmm_arena_t *a;
list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
if (ADDRESS_IN_ARENA(address, a)) {
found = true;
break;
}
}
mutex_release(&lock);
return found;
}

size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list) {
LTRACEF("address 0x%lx, count %u\n", address, count);

Expand Down Expand Up @@ -243,7 +263,7 @@ void *pmm_alloc_kpages(uint count, struct list_node *list) {

/* fast path for single page */
if (count == 1) {
vm_page_t *p = pmm_alloc_page();
vm_page_t *p = pmm_alloc_page(PMM_ARENA_FLAG_KMAP);
if (!p) {
return NULL;
}
Expand All @@ -252,7 +272,8 @@ void *pmm_alloc_kpages(uint count, struct list_node *list) {
}

paddr_t pa;
size_t alloc_count = pmm_alloc_contiguous(count, PAGE_SIZE_SHIFT, &pa, list);
size_t alloc_count = pmm_alloc_contiguous(PMM_ARENA_FLAG_KMAP,
count, PAGE_SIZE_SHIFT, &pa, list);
if (alloc_count == 0)
return NULL;

Expand Down Expand Up @@ -280,7 +301,8 @@ size_t pmm_free_kpages(void *_ptr, uint count) {
return pmm_free(&list);
}

size_t pmm_alloc_contiguous(uint count, uint8_t alignment_log2, paddr_t *pa, struct list_node *list) {
size_t pmm_alloc_contiguous(uint aflags, uint count, uint8_t alignment_log2,
paddr_t *pa, struct list_node *list) {
LTRACEF("count %u, align %u\n", count, alignment_log2);

if (count == 0)
Expand All @@ -292,8 +314,9 @@ size_t pmm_alloc_contiguous(uint count, uint8_t alignment_log2, paddr_t *pa, str

pmm_arena_t *a;
list_for_every_entry(&arena_list, a, pmm_arena_t, node) {
// XXX make this a flag to only search kmap?
if (a->flags & PMM_ARENA_FLAG_KMAP) {
// Search through desired arenas
if ((aflags == PMM_ARENA_FLAG_ANY) ||
((a->flags & aflags) == aflags)) {
/* walk the list starting at alignment boundaries.
* calculate the starting offset into this arena, based on the
* base address of the arena to handle the case where the arena
Expand Down Expand Up @@ -425,7 +448,8 @@ static int cmd_pmm(int argc, const cmd_args *argv) {
struct list_node list;
list_initialize(&list);

uint count = pmm_alloc_pages(argv[2].u, &list);
uint count = pmm_alloc_pages(PMM_ARENA_FLAG_KMAP,
argv[2].u, &list);
printf("alloc returns %u\n", count);

vm_page_t *p;
Expand Down Expand Up @@ -475,7 +499,8 @@ static int cmd_pmm(int argc, const cmd_args *argv) {
list_initialize(&list);

paddr_t pa;
size_t ret = pmm_alloc_contiguous(argv[2].u, argv[3].u, &pa, &list);
size_t ret = pmm_alloc_contiguous(PMM_ARENA_FLAG_KMAP,
argv[2].u, argv[3].u, &pa, &list);
printf("pmm_alloc_contiguous returns %zu, address 0x%lx\n", ret, pa);
printf("address %% align = 0x%lx\n", pa % argv[3].u);

Expand Down
22 changes: 20 additions & 2 deletions kernel/vm/vmm.c
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,14 @@ status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size,
vaddr = (vaddr_t)*ptr;
}

/* Check if physical address is part of an arena,
* if so the user should not be calling this API
* since the physical address is already managed
* internally */
if (pmm_address_in_arena(paddr)) {
return ERR_NOT_VALID;
}

mutex_acquire(&vmm_lock);

/* allocate a region and put it in the aspace list */
Expand Down Expand Up @@ -413,7 +421,8 @@ status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t siz

paddr_t pa = 0;
/* allocate a run of physical pages */
size_t count = pmm_alloc_contiguous(size / PAGE_SIZE, align_pow2, &pa, &page_list);
size_t count = pmm_alloc_contiguous(PMM_ARENA_FLAG_UNAMPPED,
size / PAGE_SIZE, align_pow2, &pa, &page_list);
if (count < size / PAGE_SIZE) {
DEBUG_ASSERT(count == 0); /* check that the pmm didn't allocate a partial run */
err = ERR_NO_MEMORY;
Expand Down Expand Up @@ -487,7 +496,8 @@ status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **p
struct list_node page_list;
list_initialize(&page_list);

size_t count = pmm_alloc_pages(size / PAGE_SIZE, &page_list);
size_t count = pmm_alloc_pages(PMM_ARENA_FLAG_UNAMPPED,
size / PAGE_SIZE, &page_list);
DEBUG_ASSERT(count <= size);
if (count < size / PAGE_SIZE) {
LTRACEF("failed to allocate enough pages (asked for %zu, got %zu)\n", size / PAGE_SIZE, count);
Expand Down Expand Up @@ -568,6 +578,14 @@ status_t vmm_free_region(vmm_aspace_t *aspace, vaddr_t vaddr) {
/* remove it from aspace */
list_delete(&r->node);

/* flush the caches if this mapping was cacheable so that
* any entry in the future for the same physical address
* has clean cache contents */
if ((r->arch_mmu_flags & ARCH_MMU_FLAG_CACHE_MASK)
== ARCH_MMU_FLAG_CACHED) {
arch_clean_invalidate_cache_range(vaddr, r->size);
}

/* unmap it */
arch_mmu_unmap(&aspace->arch_aspace, r->base, r->size / PAGE_SIZE);

Expand Down
3 changes: 3 additions & 0 deletions platform/qemu-virt-arm/include/platform/qemu-virt.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@

/* up to 30 GB of ram */
#define MEMORY_BASE_PHYS (0x40000000)

#define UNMAPPED_MEMSIZE (0x02000000)

#if ARCH_ARM64
#define MEMORY_APERTURE_SIZE (30ULL * 1024 * 1024 * 1024)
#else
Expand Down
27 changes: 22 additions & 5 deletions platform/qemu-virt-arm/platform.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,14 @@

/* initial memory mappings. parsed by start.S */
struct mmu_initial_mapping mmu_initial_mappings[] = {
/* all of memory */
/* All of the memory */
{
.phys = MEMORY_BASE_PHYS,
.virt = KERNEL_BASE,
.size = MEMORY_APERTURE_SIZE,
.flags = 0,
.name = "memory"
},

/* 1GB of peripherals */
{
.phys = PERIPHERAL_BASE_PHYS,
Expand All @@ -56,13 +55,20 @@ struct mmu_initial_mapping mmu_initial_mappings[] = {
{ 0 }
};

static pmm_arena_t arena = {
static pmm_arena_t mapped_arena = {
.name = "ram",
.base = MEMORY_BASE_PHYS,
.size = DEFAULT_MEMORY_SIZE,
.flags = PMM_ARENA_FLAG_KMAP,
};

static pmm_arena_t unmapped_arena = {
.name = "unmapped_ram",
.base = MEMORY_BASE_PHYS + DEFAULT_MEMORY_SIZE,
.size = UNMAPPED_MEMSIZE,
.flags = PMM_ARENA_FLAG_UNAMPPED,
};

extern void psci_call(ulong arg0, ulong arg1, ulong arg2, ulong arg3);

// callbacks to the fdt_walk routine
Expand All @@ -84,7 +90,10 @@ static void memcallback(uint64_t base, uint64_t len, void *cookie) {
#endif

/* set the size in the pmm arena */
arena.size = len;
/* TODO: Check whether unmapped memsize can fit,
* probably new fdt entry can be added */
mapped_arena.size = (len - UNMAPPED_MEMSIZE);
unmapped_arena.base = (MEMORY_BASE_PHYS + mapped_arena.size);

*found_mem = true; // stop searching after the first one
}
Expand Down Expand Up @@ -124,7 +133,8 @@ void platform_early_init(void) {
}

/* add the main memory arena */
pmm_add_arena(&arena);
pmm_add_arena(&mapped_arena);
pmm_add_arena(&unmapped_arena);

/* reserve the first 64k of ram, which should be holding the fdt */
struct list_node list = LIST_INITIAL_VALUE(list);
Expand Down Expand Up @@ -152,6 +162,13 @@ void platform_early_init(void) {
}

void platform_init(void) {
/* TODO: Take care of any vmm_alloc* invocations before this point */
/* Unmap the memory mapped initially for unmapped ram arena */
arch_clean_invalidate_cache_range(paddr_to_kvaddr(unmapped_arena.base),
unmapped_arena.size);
arch_mmu_unmap(&vmm_get_kernel_aspace()->arch_aspace,
paddr_to_kvaddr(unmapped_arena.base), unmapped_arena.size / PAGE_SIZE);

uart_init();

/* detect any virtio devices */
Expand Down
2 changes: 1 addition & 1 deletion platform/qemu-virt-arm/rules.mk
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ MODULE_SRCS += \
$(LOCAL_DIR)/uart.c

MEMBASE := 0x40000000
MEMSIZE ?= 0x08000000 # 512MB
MEMSIZE ?= 0x08000000 # 128MB
KERNEL_LOAD_OFFSET := 0x100000 # 1MB

MODULE_DEPS += \
Expand Down