arch/arm: Separate out architecture-specifics
This commit is contained in:
21
arch/arm/include/arch/types.h
Normal file
21
arch/arm/include/arch/types.h
Normal file
@ -0,0 +1,21 @@
|
||||
/*
|
||||
Copyright (C) 2012, Aaron Lindsay <aaron@aclindsay.com>
|
||||
|
||||
This file is part of Aedrix.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#include <arch-generic/types.h>
|
19
arch/arm/kernel.ld
Normal file
19
arch/arm/kernel.ld
Normal file
@ -0,0 +1,19 @@
|
||||
ENTRY (start)
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
. = 0x80100000;
|
||||
.text : { *(.text*) *(.rodata*) }
|
||||
.init : {
|
||||
early_initcalls_start = .;
|
||||
*(.earlyinitcalls*)
|
||||
early_initcalls_end = .;
|
||||
initcalls_start = .;
|
||||
*(.driversubsysinitcalls*)
|
||||
*(.deviceinitcalls*)
|
||||
initcalls_end = .;
|
||||
}
|
||||
.data : { *(.data*) }
|
||||
.bss : { *(.bss*) *(COMMON*) }
|
||||
kernel_end = .;
|
||||
}
|
10
arch/arm/kernel.mk
Normal file
10
arch/arm/kernel.mk
Normal file
@ -0,0 +1,10 @@
|
||||
DIRNAME := arch/arm
|
||||
SUBDIRS := kernel
|
||||
|
||||
include $(BASEDIR)/header.mk
|
||||
|
||||
OBJS_$(d) := $(d)/start.o
|
||||
|
||||
KOBJS += $(OBJS_$(d))
|
||||
|
||||
include $(BASEDIR)/footer.mk
|
10
arch/arm/kernel/kernel.mk
Normal file
10
arch/arm/kernel/kernel.mk
Normal file
@ -0,0 +1,10 @@
|
||||
DIRNAME := kernel
|
||||
SUBDIRS :=
|
||||
|
||||
include $(BASEDIR)/header.mk
|
||||
|
||||
OBJS_$(d) := $(d)/mmu.o
|
||||
|
||||
KOBJS += $(OBJS_$(d))
|
||||
|
||||
include $(BASEDIR)/footer.mk
|
116
arch/arm/kernel/mmu.c
Normal file
116
arch/arm/kernel/mmu.c
Normal file
@ -0,0 +1,116 @@
|
||||
/*
|
||||
Copyright (C) 2012, Aaron Lindsay <aaron@aclindsay.com>
|
||||
|
||||
This file is part of Aedrix.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#include <print.h>
|
||||
#include <types.h>
|
||||
#include <mm.h>
|
||||
|
||||
#define SCTLR 15,0,1,0,0
|
||||
#define TTBR0 15,0,2,0,0
|
||||
#define TTBR1 15,0,2,0,1
|
||||
#define TTBCR 15,0,2,0,2
|
||||
|
||||
#define _cp_read(var, cp, opc1, CRn, CRm, opc2) asm("mrc p" #cp ", " #opc1 ", %0, c" #CRn ", c" #CRm ", " #opc2 ";" : "=r"(var) : )
|
||||
#define _cp_write(var, cp, opc1, CRn, CRm, opc2) asm("mcr p" #cp ", " #opc1 ", %0, c" #CRn ", c" #CRm ", " #opc2 ";" : : "r"(var) )
|
||||
#define cp_read(var, ...) _cp_read(var, __VA_ARGS__)
|
||||
#define cp_write(var, ...) _cp_write(var, __VA_ARGS__)
|
||||
|
||||
#define TT_BASE_SIZE (1<<14) /* 16k */
|
||||
#define TT_SECTION_SIZE (1<<20) /* 1mb */
|
||||
|
||||
uint32 *kernel_start_phys, *kernel_start_virt, *kernel_end_phys, *kernel_end_virt;
|
||||
|
||||
void print_mapping(void *addr) {
|
||||
extern uint32 tt_base_virtual;
|
||||
print("%x: %x\n", addr, *(uint32 *)(tt_base_virtual + (((uint32)addr)>>18)));
|
||||
}
|
||||
|
||||
void mmu_reinit() {
|
||||
extern uint32 tt_base_virtual, tt_base_physical, start;
|
||||
uint32 curr_addr;
|
||||
uint32 *curr_tt_entry;
|
||||
int virt_phys_offset;
|
||||
|
||||
virt_phys_offset = tt_base_virtual - tt_base_physical;
|
||||
kernel_start_virt = &start;
|
||||
kernel_start_phys = kernel_start_virt - virt_phys_offset/4;
|
||||
kernel_end_virt = (uint32 *)(tt_base_virtual + TT_BASE_SIZE);
|
||||
kernel_end_phys = (uint32 *)(tt_base_physical + TT_BASE_SIZE);
|
||||
|
||||
//get the current translation table base address
|
||||
curr_tt_entry = (uint32 *)tt_base_virtual;
|
||||
|
||||
//do first loop iteration outside the loop, because we have to check against wrapping back around to know we're done
|
||||
*curr_tt_entry = 0xc02; /* 0xc02 means read/write at any priviledge level, and that it's a section w/o PXN bit set */
|
||||
curr_tt_entry++;
|
||||
|
||||
//create identity mapping for entire address space using sections.
|
||||
//BUT, if we've relocated the kernel from where it is in physical
|
||||
//memory, make sure we keep those mappings correct, and we'll actually
|
||||
//swap the twp mappings so all of memory is addressable.
|
||||
for (curr_addr = 0x00100000; curr_addr != 0; curr_addr += 0x00100000) {
|
||||
if ((uint32 *)curr_addr >= kernel_start_phys && (uint32 *)curr_addr < kernel_end_phys) {
|
||||
*curr_tt_entry = (curr_addr + virt_phys_offset) | 0xc02;
|
||||
} else if ((uint32 *)curr_addr >= kernel_start_virt && (uint32 *)curr_addr < kernel_end_virt) {
|
||||
*curr_tt_entry = (curr_addr - virt_phys_offset) | 0xc02;
|
||||
} else {
|
||||
*curr_tt_entry = curr_addr | 0xc02;
|
||||
}
|
||||
curr_tt_entry++;
|
||||
}
|
||||
}
|
||||
|
||||
int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) {
|
||||
return lower_b >= lower_a && upper_b <= upper_a;
|
||||
}
|
||||
|
||||
#define section_round_down(ptr) (((uint32)ptr) & ~(TT_SECTION_SIZE-1))
|
||||
#define section_round_up(ptr) (((((uint32)ptr) & ~1) + (TT_SECTION_SIZE-1) ) & ~(TT_SECTION_SIZE-1))
|
||||
|
||||
/* Called one per physical memory region by bootup code. This function is
|
||||
* responsible for only adding (via mm_add_free_region()) those parts of the
|
||||
* memory region which are still available (i.e. aren't in the kernel and
|
||||
* haven't been remapped anywhere else. */
|
||||
void declare_memory_region(void *lower, void *upper) {
|
||||
void *k_section_start_phys = (void *)section_round_down(kernel_start_phys);
|
||||
void *k_section_end_phys = (void *)(section_round_up(kernel_end_phys) - 1);
|
||||
void *k_section_start_virt = (void *)section_round_down(kernel_start_virt);
|
||||
void *k_section_end_virt = (void *)(section_round_up(kernel_end_virt) - 1);
|
||||
|
||||
if (upper - lower < 1) {
|
||||
print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper);
|
||||
return;
|
||||
}
|
||||
|
||||
//TODO It's possible (though highly unlikely) that the kernel (virtual)
|
||||
//is split across two different memory regions. We should probably
|
||||
//handle this.
|
||||
if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) {
|
||||
//Don't map any of the physical kernel's memory
|
||||
declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
|
||||
declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
|
||||
mm_add_free_region(kernel_end_virt, k_section_end_virt);
|
||||
} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) {
|
||||
declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
|
||||
declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
|
||||
} else {
|
||||
mm_add_free_region(lower, upper);
|
||||
}
|
||||
}
|
190
arch/arm/start.S
Normal file
190
arch/arm/start.S
Normal file
@ -0,0 +1,190 @@
|
||||
/*
|
||||
Copyright (C) 2012, Aaron Lindsay <aaron@aclindsay.com>
|
||||
|
||||
This file is part of Aedrix.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Kernel entry in assembly. This handles relocating the kernel so that it is
|
||||
* in both physical and virtual memory where we want it to be. We copy the
|
||||
* kernel to a different physical location if necessary, turn on the MMU,
|
||||
* setting up a dual-mapping if the kernel is not in physical memory at the
|
||||
* same place it was linked against. Finally, we jump into the kernel's main()
|
||||
* function in C using the address it is linked against. When the MMU gets
|
||||
* initialized fully later, it will remove the initial 1:1 mapping.
|
||||
*/
|
||||
.globl start
|
||||
start:
|
||||
str r1, machine_type /* Backup atags/machine type registers so we can access them later from C */
|
||||
str r2, atags_ptr
|
||||
|
||||
bl copy_kernel
|
||||
copy_kernel_lr: /* Used to calculate address at which kernel is currently loaded by copy_kernel */
|
||||
|
||||
bl setup_mmu
|
||||
|
||||
ldr sp, =stack+0x10000 /* Set up the stack */
|
||||
bl main
|
||||
|
||||
1:
|
||||
b 1b /* Halt */
|
||||
|
||||
copy_kernel:
|
||||
/*
|
||||
* Because we're not necessarily loaded at an address that's aligned the same
|
||||
* as where we're linked, copy the kernel over to fix that up.
|
||||
*
|
||||
* clobbers:
|
||||
* r0-r10
|
||||
* returns:
|
||||
* r0 = new kernel base address
|
||||
*/
|
||||
sub r0, lr, $(copy_kernel_lr - start) /* r0 <- current address of start */
|
||||
ldr r1, tt_section_align
|
||||
ands r2, r0, r1 /* If we're already aligned to 1mb, early out */
|
||||
bxeq lr
|
||||
|
||||
mov r2, r0 /* r2 <- r0 <- current address of start */
|
||||
mov r3, #1
|
||||
lsl r3, r3, #20 /* r3 <- 1mb */
|
||||
add r0, r0, r3
|
||||
bic r0, r0, r1 /* r0 <- new address of start */
|
||||
sub r1, r0, r2 /* r1 <- offset between current and new start */
|
||||
|
||||
/* TODO only copy kernel image sections that aren't zeroed (leave out .bss) */
|
||||
ldr r5, =start
|
||||
ldr r6, =kernel_end
|
||||
sub r6, r6, r5
|
||||
add r6, r6, r2 /* r6 <- old kernel_end */
|
||||
add r6, r6, #16
|
||||
bic r6, r6, #0xf /* r6 <- old kernel_end (aligned to 16 bytes) */
|
||||
add r5, r6, r1 /* r5 <- new kernel_end */
|
||||
|
||||
copy_kernel_loop:
|
||||
/* Copy the kernel to its new location, 16 bytes at a time. We do this
|
||||
* from the end to the begininning so we don't overwrite the old kernel if the
|
||||
* destination and source overlap. */
|
||||
sub r6, r6, #16
|
||||
sub r5, r5, #16
|
||||
ldm r6, {r7, r8, r9, r10}
|
||||
stm r5, {r7, r8, r9, r10}
|
||||
subs r4, r5, r0
|
||||
bne copy_kernel_loop
|
||||
|
||||
add lr, lr, r1 /* Fixup link register for new kernel location */
|
||||
bx lr
|
||||
|
||||
setup_mmu:
|
||||
/*
|
||||
* Calculate the address at which we will store our translation table.
|
||||
* Currently, we store it just past the end of the kernel. Getting the physical
|
||||
* address of the end of the kernel is tricky, since kernel_end is the address
|
||||
* the end of the kernel is linked at, so we have to do a little math.
|
||||
*
|
||||
* arguments:
|
||||
* r0 = current kernel base address (physical), aligned to 1mb boundary
|
||||
* clobbers:
|
||||
* r0-r10
|
||||
*/
|
||||
/* Find future virtual address of the translation table */
|
||||
ldr r1, =kernel_end
|
||||
ldr r2, tt_base_align
|
||||
ands r3, r1, r2
|
||||
mov r3, r1
|
||||
addne r3, r1, r2
|
||||
bic r2, r3, r2 /* r2 <- future virtual address of translation table */
|
||||
str r2, tt_base_virtual
|
||||
|
||||
/* Find physical address of the translation table */
|
||||
ldr r1, =start
|
||||
sub r1, r2, r1
|
||||
add r1, r0, r1 /* r1 <- physical address of translation table */
|
||||
str r1, tt_base_physical
|
||||
|
||||
/* How many sections do we need to map to make sure we have the kernel
|
||||
* and translation table covered? */
|
||||
ldr r3, tt_base_align
|
||||
add r3, r3, r1
|
||||
sub r3, r3, r0
|
||||
lsr r3, r3, #20
|
||||
add r3, r3, #1 /* r3 <- number of sections to map */
|
||||
|
||||
ldr r4, =start /* r4 <- kernel virtual start address */
|
||||
lsr r5, r4, #18 /* 18 = 20 (1mb) - 2 (4 bytes per entry) */
|
||||
add r5, r5, r1 /* r5 <- address of translation page entry for first kernel section (final mapping) */
|
||||
|
||||
mov r6, r0 /* r6 <- kernel physical start address */
|
||||
lsr r7, r6, #18 /* 18 = 20 (1mb) - 2 (4 bytes per entry) */
|
||||
add r7, r7, r1 /* r7 <- address of translation page entry for first kernel section (initial, 1:1 mapping) */
|
||||
|
||||
mov r8, #1
|
||||
lsl r8, r8, #20 /* r8 <- 1mb */
|
||||
mov r9, #0xc
|
||||
lsl r9, r9, #8
|
||||
orr r9, r9, #2 /* r9 <- 0xc02, which means read/write at any priviledge level, and that it's a section w/o PXN bit set */
|
||||
|
||||
initial_tt_loop:
|
||||
/* Setup translation table entries for the translation table and kernel (domain 0) */
|
||||
ldr r10, tt_section_align
|
||||
bic r10, r6, r10
|
||||
orr r10, r10, r9 /* r9=0xc02, which means read/write at any priviledge level */
|
||||
str r10, [r7]
|
||||
str r10, [r5]
|
||||
|
||||
add r6, r6, r8
|
||||
add r7, r7, #4
|
||||
add r5, r5, #4
|
||||
|
||||
subs r3, r3, #1
|
||||
bne initial_tt_loop
|
||||
|
||||
mcr p15, 0, r1, c2, c0, 0 /* TTBR0 <- physical address of translation table */
|
||||
|
||||
/* Set access permissions for domain 0 to "Manager" */
|
||||
mov r1, #0x3
|
||||
mcr p15, 0, r1, c3, c0, 0 /* DACR */
|
||||
|
||||
/* Enable the MMU */
|
||||
mrc p15, 0, r1, c1, c0, 0 /* SCTLR */
|
||||
orr r1, r1, #0x1
|
||||
mcr p15, 0, r1, c1, c0, 0 /* SCTLR */
|
||||
|
||||
/* Update lr for new memory mapping */
|
||||
ldr r1, =start
|
||||
sub r0, r1, r0
|
||||
add lr, lr, r0
|
||||
|
||||
bx lr /* Finally, we jump into the new memory mapping, which matches where we were linked */
|
||||
|
||||
tt_base_align:
|
||||
.word 0b111111111111111 /* 16k - 1 */
|
||||
tt_section_align:
|
||||
.word 0b11111111111111111111 /* 1mb - 1 */
|
||||
.globl tt_base_virtual
|
||||
tt_base_virtual:
|
||||
.word 0
|
||||
.globl tt_base_physical
|
||||
tt_base_physical:
|
||||
.word 0
|
||||
.globl atags_ptr
|
||||
atags_ptr:
|
||||
.word 0
|
||||
.globl machine_type
|
||||
machine_type:
|
||||
.word 0
|
||||
|
||||
.comm stack, 0x10000 /* Reserve 64k for the stack in .bss */
|
Reference in New Issue
Block a user