Skip to content

Commit

Permalink
[arch][arm] improve arm chainload
Browse files Browse the repository at this point in the history
arch_mmu_map was failing hard, because the identity mapping does not fall within the `vmm_get_kernel_aspace`

this creates a new aspace covering the loader, so it can identity map

linux is also unable to use the FPU if lazy FPU context switching had turned it off prior to the chainload, `arm_fpu_set_enable()` is used to turn it back on
  • Loading branch information
cleverca22 authored and travisg committed Sep 19, 2021
1 parent cf14bb4 commit d178704
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 11 deletions.
19 changes: 18 additions & 1 deletion arch/arm/arm/arch.c
Original file line number Diff line number Diff line change
Expand Up @@ -341,8 +341,22 @@ void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3
LTRACEF("loader address %p, phys 0x%lx, surrounding large page 0x%lx\n",
&arm_chain_load, loader_pa, loader_pa_section);

arch_aspace_t *aspace;
bool need_context_switch;
// if loader_pa is within the kernel aspace, we can simply use arch_mmu_map to identity map it
// if its outside, we need to create a new aspace and context switch to it
if (arch_mmu_is_valid_vaddr(&vmm_get_kernel_aspace()->arch_aspace, loader_pa)) {
aspace = &vmm_get_kernel_aspace()->arch_aspace;
need_context_switch = false;
} else {
aspace = malloc(sizeof(*aspace));
arch_mmu_init_aspace(aspace, loader_pa_section, SECTION_SIZE, 0);
need_context_switch = true;
}

/* using large pages, map around the target location */
arch_mmu_map(&vmm_get_kernel_aspace()->arch_aspace, loader_pa_section, loader_pa_section, (2 * SECTION_SIZE / PAGE_SIZE), 0);
arch_mmu_map(aspace, loader_pa_section, loader_pa_section, (2 * SECTION_SIZE / PAGE_SIZE), 0);
if (need_context_switch) arch_mmu_context_switch(aspace);
#else
/* for non vm case, just branch directly into it */
entry_pa = (paddr_t)entry;
Expand All @@ -358,6 +372,9 @@ void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3
/* put the booting cpu back into close to a default state */
arch_quiesce();

// linux wont re-enable the FPU during boot, so it must be enabled when chainloading
arm_fpu_set_enable(true);

LTRACEF("branching to physical address of loader\n");

/* branch to the physical address version of the chain loader routine */
Expand Down
4 changes: 4 additions & 0 deletions arch/arm/arm/include/arch/aspace.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,8 @@ struct arch_aspace {
struct list_node pt_page_list;
};

static inline bool arch_mmu_is_valid_vaddr(struct arch_aspace *aspace, vaddr_t vaddr) {
return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1);
}

__END_CDECLS
16 changes: 6 additions & 10 deletions arch/arm/arm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,6 @@ static uint32_t mmu_flags_to_l2_arch_flags_small_page(uint flags) {
return arch_flags;
}

static inline bool is_valid_vaddr(arch_aspace_t *aspace, vaddr_t vaddr) {
return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1);
}

static void arm_mmu_map_section(arch_aspace_t *aspace, addr_t paddr, addr_t vaddr, uint flags) {
int index;

Expand Down Expand Up @@ -242,8 +238,8 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui
DEBUG_ASSERT(aspace);
DEBUG_ASSERT(aspace->tt_virt);

DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
if (!is_valid_vaddr(aspace, vaddr))
DEBUG_ASSERT(arch_mmu_is_valid_vaddr(aspace, vaddr));
if (!arch_mmu_is_valid_vaddr(aspace, vaddr))
return ERR_OUT_OF_RANGE;

/* Get the index into the translation table */
Expand Down Expand Up @@ -487,8 +483,8 @@ int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, uint count,
DEBUG_ASSERT(aspace);
DEBUG_ASSERT(aspace->tt_virt);

DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
if (!is_valid_vaddr(aspace, vaddr))
DEBUG_ASSERT(arch_mmu_is_valid_vaddr(aspace, vaddr));
if (!arch_mmu_is_valid_vaddr(aspace, vaddr))
return ERR_OUT_OF_RANGE;

#if !WITH_ARCH_MMU_PICK_SPOT
Expand Down Expand Up @@ -583,9 +579,9 @@ int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) {
DEBUG_ASSERT(aspace);
DEBUG_ASSERT(aspace->tt_virt);

DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
DEBUG_ASSERT(arch_mmu_is_valid_vaddr(aspace, vaddr));

if (!is_valid_vaddr(aspace, vaddr))
if (!arch_mmu_is_valid_vaddr(aspace, vaddr))
return ERR_OUT_OF_RANGE;

DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
Expand Down

0 comments on commit d178704

Please sign in to comment.