#include #include #include #define SCTLR 15,0,1,0,0 #define TTBR0 15,0,2,0,0 #define TTBR1 15,0,2,0,1 #define TTBCR 15,0,2,0,2 #define _cp_read(var, cp, opc1, CRn, CRm, opc2) asm("mrc p" #cp ", " #opc1 ", %0, c" #CRn ", c" #CRm ", " #opc2 ";" : "=r"(var) : ) #define _cp_write(var, cp, opc1, CRn, CRm, opc2) asm("mcr p" #cp ", " #opc1 ", %0, c" #CRn ", c" #CRm ", " #opc2 ";" : : "r"(var) ) #define cp_read(var, ...) _cp_read(var, __VA_ARGS__) #define cp_write(var, ...) _cp_write(var, __VA_ARGS__) #define TT_BASE_SIZE (1<<14) /* 16k */ #define TT_SECTION_SIZE (1<<20) /* 1mb */ uint32 *kernel_start_phys, *kernel_start_virt, *kernel_end_phys, *kernel_end_virt; void print_mapping(void *addr) { extern uint32 tt_base_virtual; print("%x: %x\n", addr, *(uint32 *)(tt_base_virtual + (((uint32)addr)>>18))); } void mmu_reinit() { extern uint32 tt_base_virtual, tt_base_physical, start; uint32 curr_addr; uint32 *curr_tt_entry; int virt_phys_offset; virt_phys_offset = tt_base_virtual - tt_base_physical; kernel_start_virt = &start; kernel_start_phys = kernel_start_virt - virt_phys_offset/4; kernel_end_virt = (uint32 *)(tt_base_virtual + TT_BASE_SIZE); kernel_end_phys = (uint32 *)(tt_base_physical + TT_BASE_SIZE); //get the current translation table base address curr_tt_entry = (uint32 *)tt_base_virtual; //do first loop iteration outside the loop, because we have to check against wrapping back around to know we're done *curr_tt_entry = 0xc02; /* 0xc02 means read/write at any priviledge level, and that it's a section w/o PXN bit set */ curr_tt_entry++; //create identity mapping for entire address space using sections. //BUT, if we've relocated the kernel from where it is in physical //memory, make sure we keep those mappings correct, and we'll actually //swap the twp mappings so all of memory is addressable. for (curr_addr = 0x00100000; curr_addr != 0; curr_addr += 0x00100000) { if ((uint32 *)curr_addr >= kernel_start_phys && (uint32 *)curr_addr < kernel_end_phys) { *curr_tt_entry = (curr_addr + virt_phys_offset) | 0xc02; } else if ((uint32 *)curr_addr >= kernel_start_virt && (uint32 *)curr_addr < kernel_end_virt) { *curr_tt_entry = (curr_addr - virt_phys_offset) | 0xc02; } else { *curr_tt_entry = curr_addr | 0xc02; } curr_tt_entry++; } } int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) { return lower_b >= lower_a && upper_b <= upper_a; } #define section_round_down(ptr) (((uint32)ptr) & ~(TT_SECTION_SIZE-1)) #define section_round_up(ptr) (((((uint32)ptr) & ~1) + (TT_SECTION_SIZE-1) ) & ~(TT_SECTION_SIZE-1)) /* Called one per physical memory region by bootup code. This function is * responsible for only adding (via mm_add_free_region()) those parts of the * memory region which are still available (i.e. aren't in the kernel and * haven't been remapped anywhere else. */ void declare_memory_region(void *lower, void *upper) { void *k_section_start_phys = (void *)section_round_down(kernel_start_phys); void *k_section_end_phys = (void *)(section_round_up(kernel_end_phys) - 1); void *k_section_start_virt = (void *)section_round_down(kernel_start_virt); void *k_section_end_virt = (void *)(section_round_up(kernel_end_virt) - 1); if (upper - lower < 1) { print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper); return; } //TODO It's possible (though highly unlikely) that the kernel (virtual) //is split across two different memory regions. We should probably //handle this. if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) { //Don't map any of the physical kernel's memory declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1)); declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper); mm_add_free_region(kernel_end_virt, k_section_end_virt); } else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) { declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1)); declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper); } else { mm_add_free_region(lower, upper); } }