|
|
@ -20,28 +20,26 @@ |
|
|
|
|
|
|
|
#include <print.h> |
|
|
|
#include <types.h> |
|
|
|
#include <frames.h> |
|
|
|
|
|
|
|
extern uint32 kernel_start_phys, kernel_end_phys; |
|
|
|
extern uint32 kernel_start, kernel_end; |
|
|
|
#include <arch/properties.h> |
|
|
|
|
|
|
|
#define PAGE_SIZE 0x00400000 |
|
|
|
#define ROUND_DOWN_PAGE_SIZE(addr) ((typeof(addr))((uint32)(addr) & 0xff800000)) |
|
|
|
|
|
|
|
static inline int page_intersects(uint32 *page_start, uint32 *lower, uint32 *upper) { |
|
|
|
return (lower >= page_start && lower < (page_start + PAGE_SIZE)) || |
|
|
|
(upper >= page_start && upper < (page_start + PAGE_SIZE)) || |
|
|
|
(lower < page_start && upper >= (page_start + PAGE_SIZE)); |
|
|
|
return (lower >= page_start && lower < (page_start + (PAGE_SIZE>>2))) || |
|
|
|
(upper >= page_start && upper < (page_start + (PAGE_SIZE>>2))) || |
|
|
|
(lower < page_start && upper >= (page_start + (PAGE_SIZE>>2))); |
|
|
|
} |
|
|
|
|
|
|
|
void mmu_reinit() { |
|
|
|
int virt_phys_offset; /* CAUTION: 1 = 4 bytes */ |
|
|
|
uint32 virt_phys_offset; /* CAUTION: 1 = 4 bytes */ |
|
|
|
uint32 *curr_tbl_entry; |
|
|
|
uint32 *curr_addr; |
|
|
|
uint32 *page_dir; |
|
|
|
asm("movl %%cr3, %0" : "=r"(page_dir) : : ); |
|
|
|
|
|
|
|
virt_phys_offset = &kernel_start - &kernel_start_phys; |
|
|
|
virt_phys_offset = kernel_start_virtual() - kernel_start_physical(); |
|
|
|
curr_tbl_entry = page_dir + virt_phys_offset; |
|
|
|
|
|
|
|
//do first loop iteration outside th eloop, because we have to check against wrapping back around to know we're done |
|
|
@ -53,9 +51,9 @@ void mmu_reinit() { |
|
|
|
//memory, make sure we keep those mappings correct, and we'll actually |
|
|
|
//swap the two mappings so all of memory is addressable. |
|
|
|
for (curr_addr = (uint32 *)PAGE_SIZE; curr_addr != 0; curr_addr += (PAGE_SIZE>>2)) { |
|
|
|
if (page_intersects(curr_addr, &kernel_start_phys, &kernel_end_phys)) { |
|
|
|
if (page_intersects(curr_addr, kernel_start_physical(), kernel_end_physical())) { |
|
|
|
*curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr + virt_phys_offset) | 0x83; |
|
|
|
} else if (page_intersects(curr_addr, &kernel_start, &kernel_end)) { |
|
|
|
} else if (page_intersects(curr_addr, kernel_start_virtual(), kernel_end_virtual())) { |
|
|
|
*curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr - virt_phys_offset) | 0x83; |
|
|
|
} else { |
|
|
|
*curr_tbl_entry = (uint32)curr_addr | 0x83; |
|
|
@ -65,58 +63,3 @@ void mmu_reinit() { |
|
|
|
curr_tbl_entry++; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
//TODO merge the rest of this file with the similar section in arch/arm. This is clearly mostly duplicated code. |
|
|
|
|
|
|
|
int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) { |
|
|
|
return lower_b >= lower_a && upper_b <= upper_a; |
|
|
|
} |
|
|
|
int mmu_region_contains_single(void *lower_a, void *upper_a, void *ptr) { |
|
|
|
return lower_a <= ptr && ptr <= upper_a; |
|
|
|
} |
|
|
|
|
|
|
|
#define section_round_down(ptr) (((uint32)ptr) & ~(PAGE_SIZE-1)) |
|
|
|
#define section_round_up(ptr) (((((uint32)ptr) & ~1) + (PAGE_SIZE-1) ) & ~(PAGE_SIZE-1)) |
|
|
|
|
|
|
|
/* Called once per physical memory region by bootup code. This function is |
|
|
|
* responsible for only adding (via mm_add_free_region()) those parts of the |
|
|
|
* memory region which are still available (i.e. aren't in the kernel and |
|
|
|
* haven't been remapped anywhere else. */ |
|
|
|
void declare_memory_region(void *lower, void *upper) { |
|
|
|
void *k_section_start_phys = (void *)section_round_down(&kernel_start_phys); |
|
|
|
void *k_section_end_phys = (void *)(section_round_up(&kernel_end_phys) - 1); |
|
|
|
void *k_section_start_virt = (void *)section_round_down(&kernel_start); |
|
|
|
void *k_section_end_virt = (void *)(section_round_up(&kernel_end) - 1); |
|
|
|
|
|
|
|
if (upper - lower < 1) { |
|
|
|
print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper); |
|
|
|
return; |
|
|
|
} |
|
|
|
|
|
|
|
//TODO It's possible (though highly unlikely) that the kernel (virtual) |
|
|
|
//is split across two different memory regions. We should probably |
|
|
|
//handle this. |
|
|
|
if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) { |
|
|
|
//Don't map any of the physical kernel's memory |
|
|
|
declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1)); |
|
|
|
declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper); |
|
|
|
mm_add_free_region(&kernel_end, k_section_end_virt); |
|
|
|
} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) { |
|
|
|
declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1)); |
|
|
|
declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper); |
|
|
|
} else if (mmu_region_contains_single(lower, upper, k_section_start_phys)) { |
|
|
|
if ((void*)((char*)lower + 1) < k_section_start_phys) |
|
|
|
declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1)); |
|
|
|
} else if (mmu_region_contains_single(lower, upper, k_section_end_phys)) { |
|
|
|
if (k_section_end_phys < (void*)((char*)upper - 1)) |
|
|
|
declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper); |
|
|
|
} else if (mmu_region_contains_single(lower, upper, k_section_start_virt)) { |
|
|
|
if ((void*)((char*)lower + 1) < k_section_start_virt) |
|
|
|
declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1)); |
|
|
|
} else if (mmu_region_contains_single(lower, upper, k_section_end_virt)) { |
|
|
|
if (k_section_end_virt < (void*)((char*)upper - 1)) |
|
|
|
declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper); |
|
|
|
} else { |
|
|
|
mm_add_free_region(lower, upper); |
|
|
|
} |
|
|
|
} |