2012-09-26 23:59:58 -04:00
|
|
|
/*
|
|
|
|
Copyright (C) 2012, Aaron Lindsay <aaron@aclindsay.com>
|
|
|
|
|
|
|
|
This file is part of Aedrix.
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along
|
|
|
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*/
|
|
|
|
|
2012-09-21 11:52:24 -04:00
|
|
|
#include <print.h>
|
2012-09-25 22:44:19 -04:00
|
|
|
#include <types.h>
|
2013-01-01 23:42:45 -05:00
|
|
|
#include <frames.h>
|
2012-09-21 11:52:24 -04:00
|
|
|
|
|
|
|
#define SCTLR 15,0,1,0,0
|
|
|
|
#define TTBR0 15,0,2,0,0
|
|
|
|
#define TTBR1 15,0,2,0,1
|
|
|
|
#define TTBCR 15,0,2,0,2
|
|
|
|
|
|
|
|
#define _cp_read(var, cp, opc1, CRn, CRm, opc2) asm("mrc p" #cp ", " #opc1 ", %0, c" #CRn ", c" #CRm ", " #opc2 ";" : "=r"(var) : )
|
|
|
|
#define _cp_write(var, cp, opc1, CRn, CRm, opc2) asm("mcr p" #cp ", " #opc1 ", %0, c" #CRn ", c" #CRm ", " #opc2 ";" : : "r"(var) )
|
|
|
|
#define cp_read(var, ...) _cp_read(var, __VA_ARGS__)
|
|
|
|
#define cp_write(var, ...) _cp_write(var, __VA_ARGS__)
|
|
|
|
|
2012-09-23 01:29:45 -04:00
|
|
|
#define TT_BASE_SIZE (1<<14) /* 16k */
|
2012-09-25 22:44:19 -04:00
|
|
|
#define TT_SECTION_SIZE (1<<20) /* 1mb */
|
2012-09-23 01:29:45 -04:00
|
|
|
|
2012-09-25 22:44:19 -04:00
|
|
|
uint32 *kernel_start_phys, *kernel_start_virt, *kernel_end_phys, *kernel_end_virt;
|
|
|
|
|
|
|
|
void print_mapping(void *addr) {
|
|
|
|
extern uint32 tt_base_virtual;
|
|
|
|
print("%x: %x\n", addr, *(uint32 *)(tt_base_virtual + (((uint32)addr)>>18)));
|
|
|
|
}
|
2012-09-23 01:29:45 -04:00
|
|
|
|
2012-09-21 11:52:24 -04:00
|
|
|
void mmu_reinit() {
|
2012-09-25 22:44:19 -04:00
|
|
|
extern uint32 tt_base_virtual, tt_base_physical, start;
|
|
|
|
uint32 curr_addr;
|
|
|
|
uint32 *curr_tt_entry;
|
2012-09-23 01:29:45 -04:00
|
|
|
int virt_phys_offset;
|
|
|
|
|
|
|
|
virt_phys_offset = tt_base_virtual - tt_base_physical;
|
|
|
|
kernel_start_virt = &start;
|
|
|
|
kernel_start_phys = kernel_start_virt - virt_phys_offset/4;
|
2012-09-25 22:44:19 -04:00
|
|
|
kernel_end_virt = (uint32 *)(tt_base_virtual + TT_BASE_SIZE);
|
|
|
|
kernel_end_phys = (uint32 *)(tt_base_physical + TT_BASE_SIZE);
|
2012-09-21 11:52:24 -04:00
|
|
|
|
|
|
|
//get the current translation table base address
|
2012-09-25 22:44:19 -04:00
|
|
|
curr_tt_entry = (uint32 *)tt_base_virtual;
|
2012-09-21 11:52:24 -04:00
|
|
|
|
|
|
|
//do first loop iteration outside the loop, because we have to check against wrapping back around to know we're done
|
2012-09-23 01:29:45 -04:00
|
|
|
*curr_tt_entry = 0xc02; /* 0xc02 means read/write at any priviledge level, and that it's a section w/o PXN bit set */
|
2012-09-21 11:52:24 -04:00
|
|
|
curr_tt_entry++;
|
|
|
|
|
2012-09-23 01:29:45 -04:00
|
|
|
//create identity mapping for entire address space using sections.
|
|
|
|
//BUT, if we've relocated the kernel from where it is in physical
|
|
|
|
//memory, make sure we keep those mappings correct, and we'll actually
|
2012-12-24 13:10:42 -05:00
|
|
|
//swap the two mappings so all of memory is addressable.
|
2012-09-21 11:52:24 -04:00
|
|
|
for (curr_addr = 0x00100000; curr_addr != 0; curr_addr += 0x00100000) {
|
2012-09-25 22:44:19 -04:00
|
|
|
if ((uint32 *)curr_addr >= kernel_start_phys && (uint32 *)curr_addr < kernel_end_phys) {
|
2012-09-23 01:29:45 -04:00
|
|
|
*curr_tt_entry = (curr_addr + virt_phys_offset) | 0xc02;
|
2012-09-25 22:44:19 -04:00
|
|
|
} else if ((uint32 *)curr_addr >= kernel_start_virt && (uint32 *)curr_addr < kernel_end_virt) {
|
2012-09-23 01:29:45 -04:00
|
|
|
*curr_tt_entry = (curr_addr - virt_phys_offset) | 0xc02;
|
|
|
|
} else {
|
|
|
|
*curr_tt_entry = curr_addr | 0xc02;
|
|
|
|
}
|
2012-09-21 11:52:24 -04:00
|
|
|
curr_tt_entry++;
|
|
|
|
}
|
|
|
|
}
|
2012-09-25 22:44:19 -04:00
|
|
|
|
|
|
|
int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) {
|
|
|
|
return lower_b >= lower_a && upper_b <= upper_a;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define section_round_down(ptr) (((uint32)ptr) & ~(TT_SECTION_SIZE-1))
|
|
|
|
#define section_round_up(ptr) (((((uint32)ptr) & ~1) + (TT_SECTION_SIZE-1) ) & ~(TT_SECTION_SIZE-1))
|
|
|
|
|
2012-12-24 13:10:42 -05:00
|
|
|
/* Called once per physical memory region by bootup code. This function is
|
2012-09-25 22:44:19 -04:00
|
|
|
* responsible for only adding (via mm_add_free_region()) those parts of the
|
|
|
|
* memory region which are still available (i.e. aren't in the kernel and
|
|
|
|
* haven't been remapped anywhere else. */
|
|
|
|
void declare_memory_region(void *lower, void *upper) {
|
|
|
|
void *k_section_start_phys = (void *)section_round_down(kernel_start_phys);
|
|
|
|
void *k_section_end_phys = (void *)(section_round_up(kernel_end_phys) - 1);
|
|
|
|
void *k_section_start_virt = (void *)section_round_down(kernel_start_virt);
|
|
|
|
void *k_section_end_virt = (void *)(section_round_up(kernel_end_virt) - 1);
|
|
|
|
|
|
|
|
if (upper - lower < 1) {
|
|
|
|
print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
//TODO It's possible (though highly unlikely) that the kernel (virtual)
|
|
|
|
//is split across two different memory regions. We should probably
|
|
|
|
//handle this.
|
|
|
|
if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) {
|
|
|
|
//Don't map any of the physical kernel's memory
|
|
|
|
declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
|
|
|
|
declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
|
|
|
|
mm_add_free_region(kernel_end_virt, k_section_end_virt);
|
|
|
|
} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) {
|
|
|
|
declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
|
|
|
|
declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
|
|
|
|
} else {
|
|
|
|
mm_add_free_region(lower, upper);
|
|
|
|
}
|
|
|
|
}
|