1
0
aedrix-kernel/arch/arm/start.S

193 lines
5.9 KiB
ArmAsm
Raw Normal View History

/*
Copyright (C) 2012, Aaron Lindsay <aaron@aclindsay.com>
This file is part of Aedrix.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define STACK_SIZE 0x4000 /* 16k */
/*
* Kernel entry in assembly. This handles relocating the kernel so that it is
* in both physical and virtual memory where we want it to be. We copy the
* kernel to a different physical location if necessary, turn on the MMU,
* setting up a dual-mapping if the kernel is not in physical memory at the
* same place it was linked against. Finally, we jump into the kernel's main()
* function in C using the address it is linked against. When the MMU gets
* initialized fully later, it will remove the initial 1:1 mapping.
*/
.global start
start:
str r1, machine_type /* Backup atags/machine type registers so we can access them later from C */
str r2, atags_ptr
bl copy_kernel
copy_kernel_lr: /* Used to calculate address at which kernel is currently loaded by copy_kernel */
bl setup_mmu
ldr sp, =stack+STACK_SIZE /* Set up the stack */
bl main
1:
b 1b /* Halt */
copy_kernel:
/*
* Because we're not necessarily loaded at an address that's aligned the same
* as where we're linked, copy the kernel over to fix that up.
*
* clobbers:
* r0-r10
* returns:
* r0 = new kernel base address
*/
sub r0, lr, $(copy_kernel_lr - start) /* r0 <- current address of start */
ldr r1, tt_section_align
ands r2, r0, r1 /* If we're already aligned to 1mb, early out */
bxeq lr
mov r2, r0 /* r2 <- r0 <- current address of start */
mov r3, #1
lsl r3, r3, #20 /* r3 <- 1mb */
add r0, r0, r3
bic r0, r0, r1 /* r0 <- new address of start */
sub r1, r0, r2 /* r1 <- offset between current and new start */
/* TODO only copy kernel image sections that aren't zeroed (leave out .bss) */
ldr r5, =start
ldr r6, =kernel_end
sub r6, r6, r5
add r6, r6, r2 /* r6 <- old kernel_end */
add r6, r6, #16
bic r6, r6, #0xf /* r6 <- old kernel_end (aligned to 16 bytes) */
add r5, r6, r1 /* r5 <- new kernel_end */
copy_kernel_loop:
/* Copy the kernel to its new location, 16 bytes at a time. We do this
* from the end to the begininning so we don't overwrite the old kernel if the
* destination and source overlap. */
sub r6, r6, #16
sub r5, r5, #16
ldm r6, {r7, r8, r9, r10}
stm r5, {r7, r8, r9, r10}
subs r4, r5, r0
bne copy_kernel_loop
add lr, lr, r1 /* Fixup link register for new kernel location */
bx lr
setup_mmu:
/*
* Calculate the address at which we will store our translation table.
* Currently, we store it just past the end of the kernel. Getting the physical
* address of the end of the kernel is tricky, since kernel_end is the address
* the end of the kernel is linked at, so we have to do a little math.
*
* arguments:
* r0 = current kernel base address (physical), aligned to 1mb boundary
* clobbers:
* r0-r10
*/
/* Find future virtual address of the translation table */
ldr r1, =kernel_end
ldr r2, tt_base_align
ands r3, r1, r2
mov r3, r1
addne r3, r1, r2
bic r2, r3, r2 /* r2 <- future virtual address of translation table */
str r2, tt_base_virtual
/* Find physical address of the translation table */
ldr r1, =start
sub r1, r2, r1
add r1, r0, r1 /* r1 <- physical address of translation table */
str r1, tt_base_physical
/* How many sections do we need to map to make sure we have the kernel
* and translation table covered? */
ldr r3, tt_base_align
add r3, r3, r1
sub r3, r3, r0
lsr r3, r3, #20
add r3, r3, #1 /* r3 <- number of sections to map */
ldr r4, =start /* r4 <- kernel virtual start address */
lsr r5, r4, #18 /* 18 = 20 (1mb) - 2 (4 bytes per entry) */
add r5, r5, r1 /* r5 <- address of translation page entry for first kernel section (final mapping) */
mov r6, r0 /* r6 <- kernel physical start address */
lsr r7, r6, #18 /* 18 = 20 (1mb) - 2 (4 bytes per entry) */
add r7, r7, r1 /* r7 <- address of translation page entry for first kernel section (initial, 1:1 mapping) */
mov r8, #1
lsl r8, r8, #20 /* r8 <- 1mb */
mov r9, #0xc
lsl r9, r9, #8
orr r9, r9, #2 /* r9 <- 0xc02, which means read/write at any priviledge level, and that it's a section w/o PXN bit set */
initial_tt_loop:
/* Setup translation table entries for the translation table and kernel (domain 0) */
ldr r10, tt_section_align
bic r10, r6, r10
orr r10, r10, r9 /* r9=0xc02, which means read/write at any priviledge level */
str r10, [r7]
str r10, [r5]
add r6, r6, r8
add r7, r7, #4
add r5, r5, #4
subs r3, r3, #1
bne initial_tt_loop
mcr p15, 0, r1, c2, c0, 0 /* TTBR0 <- physical address of translation table */
/* Set access permissions for domain 0 to "Manager" */
mov r1, #0x3
mcr p15, 0, r1, c3, c0, 0 /* DACR */
/* Enable the MMU */
mrc p15, 0, r1, c1, c0, 0 /* SCTLR */
orr r1, r1, #0x1
mcr p15, 0, r1, c1, c0, 0 /* SCTLR */
/* Update lr for new memory mapping */
ldr r1, =start
sub r0, r1, r0
add lr, lr, r0
bx lr /* Finally, we jump into the new memory mapping, which matches where we were linked */
tt_base_align:
.word 0b111111111111111 /* 16k - 1 */
tt_section_align:
.word 0b11111111111111111111 /* 1mb - 1 */
.global tt_base_virtual
tt_base_virtual:
.word 0
.global tt_base_physical
tt_base_physical:
.word 0
.global atags_ptr
atags_ptr:
.word 0
.global machine_type
machine_type:
.word 0
.lcomm stack, STACK_SIZE /* Reserve space for the stack in .bss */