/* Copyright (C) 2012, Aaron Lindsay This file is part of Aedrix. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include extern uint32 kernel_start_phys, kernel_end_phys; extern uint32 kernel_start, kernel_end; #define PAGE_SIZE 0x00400000 #define ROUND_DOWN_PAGE_SIZE(addr) ((typeof(addr))((uint32)(addr) & 0xff800000)) static inline int page_intersects(uint32 *page_start, uint32 *lower, uint32 *upper) { return (lower >= page_start && lower < (page_start + PAGE_SIZE)) || (upper >= page_start && upper < (page_start + PAGE_SIZE)) || (lower < page_start && upper >= (page_start + PAGE_SIZE)); } void mmu_reinit() { int virt_phys_offset; /* CAUTION: 1 = 4 bytes */ uint32 *curr_tbl_entry; uint32 *curr_addr; uint32 *page_dir; asm("movl %%cr3, %0" : "=r"(page_dir) : : ); virt_phys_offset = &kernel_start - &kernel_start_phys; curr_tbl_entry = page_dir + virt_phys_offset; //do first loop iteration outside th eloop, because we have to check against wrapping back around to know we're done *curr_tbl_entry = 0x83; curr_tbl_entry++; //create identity mapping for entire address space using sections. //BUT, if we've relocated the kernel from where it is in physical //memory, make sure we keep those mappings correct, and we'll actually //swap the two mappings so all of memory is addressable. for (curr_addr = (uint32 *)PAGE_SIZE; curr_addr != 0; curr_addr += (PAGE_SIZE>>2)) { if (page_intersects(curr_addr, &kernel_start_phys, &kernel_end_phys)) { *curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr + virt_phys_offset) | 0x83; } else if (page_intersects(curr_addr, &kernel_start, &kernel_end)) { *curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr - virt_phys_offset) | 0x83; } else { *curr_tbl_entry = (uint32)curr_addr | 0x83; } /* Force the entries to reload */ asm("invlpg (%0)" : : "r"(curr_addr) : ); curr_tbl_entry++; } } //TODO merge the rest of this file with the similar section in arch/arm. This is clearly mostly duplicated code. int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) { return lower_b >= lower_a && upper_b <= upper_a; } int mmu_region_contains_single(void *lower_a, void *upper_a, void *ptr) { return lower_a <= ptr && ptr <= upper_a; } #define section_round_down(ptr) (((uint32)ptr) & ~(PAGE_SIZE-1)) #define section_round_up(ptr) (((((uint32)ptr) & ~1) + (PAGE_SIZE-1) ) & ~(PAGE_SIZE-1)) /* Called once per physical memory region by bootup code. This function is * responsible for only adding (via mm_add_free_region()) those parts of the * memory region which are still available (i.e. aren't in the kernel and * haven't been remapped anywhere else. */ void declare_memory_region(void *lower, void *upper) { void *k_section_start_phys = (void *)section_round_down(&kernel_start_phys); void *k_section_end_phys = (void *)(section_round_up(&kernel_end_phys) - 1); void *k_section_start_virt = (void *)section_round_down(&kernel_start); void *k_section_end_virt = (void *)(section_round_up(&kernel_end) - 1); if (upper - lower < 1) { print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper); return; } //TODO It's possible (though highly unlikely) that the kernel (virtual) //is split across two different memory regions. We should probably //handle this. if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) { //Don't map any of the physical kernel's memory declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1)); declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper); mm_add_free_region(&kernel_end, k_section_end_virt); } else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) { declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1)); declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper); } else if (mmu_region_contains_single(lower, upper, k_section_start_phys)) { if ((void*)((char*)lower + 1) < k_section_start_phys) declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1)); } else if (mmu_region_contains_single(lower, upper, k_section_end_phys)) { if (k_section_end_phys < (void*)((char*)upper - 1)) declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper); } else if (mmu_region_contains_single(lower, upper, k_section_start_virt)) { if ((void*)((char*)lower + 1) < k_section_start_virt) declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1)); } else if (mmu_region_contains_single(lower, upper, k_section_end_virt)) { if (k_section_end_virt < (void*)((char*)upper - 1)) declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper); } else { mm_add_free_region(lower, upper); } }