2012-11-15 00:38:10 -05:00
|
|
|
/*
|
|
|
|
Copyright (C) 2012, Aaron Lindsay <aaron@aclindsay.com>
|
|
|
|
|
|
|
|
This file is part of Aedrix.
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along
|
|
|
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <print.h>
|
|
|
|
#include <types.h>
|
|
|
|
|
2013-01-01 23:54:19 -05:00
|
|
|
#include <arch/properties.h>
|
2012-12-24 12:44:48 -05:00
|
|
|
|
|
|
|
#define PAGE_SIZE 0x00400000
|
|
|
|
#define ROUND_DOWN_PAGE_SIZE(addr) ((typeof(addr))((uint32)(addr) & 0xff800000))
|
|
|
|
|
|
|
|
static inline int page_intersects(uint32 *page_start, uint32 *lower, uint32 *upper) {
|
2013-01-01 23:54:19 -05:00
|
|
|
return (lower >= page_start && lower < (page_start + (PAGE_SIZE>>2))) ||
|
|
|
|
(upper >= page_start && upper < (page_start + (PAGE_SIZE>>2))) ||
|
|
|
|
(lower < page_start && upper >= (page_start + (PAGE_SIZE>>2)));
|
2012-12-24 12:44:48 -05:00
|
|
|
}
|
|
|
|
|
2012-11-15 00:38:10 -05:00
|
|
|
void mmu_reinit() {
|
2013-01-01 23:54:19 -05:00
|
|
|
uint32 virt_phys_offset; /* CAUTION: 1 = 4 bytes */
|
2012-12-24 12:44:48 -05:00
|
|
|
uint32 *curr_tbl_entry;
|
|
|
|
uint32 *curr_addr;
|
|
|
|
uint32 *page_dir;
|
|
|
|
asm("movl %%cr3, %0" : "=r"(page_dir) : : );
|
|
|
|
|
2013-01-01 23:54:19 -05:00
|
|
|
virt_phys_offset = kernel_start_virtual() - kernel_start_physical();
|
2012-12-24 12:44:48 -05:00
|
|
|
curr_tbl_entry = page_dir + virt_phys_offset;
|
|
|
|
|
|
|
|
//do first loop iteration outside th eloop, because we have to check against wrapping back around to know we're done
|
|
|
|
*curr_tbl_entry = 0x83;
|
|
|
|
curr_tbl_entry++;
|
|
|
|
|
|
|
|
//create identity mapping for entire address space using sections.
|
|
|
|
//BUT, if we've relocated the kernel from where it is in physical
|
|
|
|
//memory, make sure we keep those mappings correct, and we'll actually
|
|
|
|
//swap the two mappings so all of memory is addressable.
|
|
|
|
for (curr_addr = (uint32 *)PAGE_SIZE; curr_addr != 0; curr_addr += (PAGE_SIZE>>2)) {
|
2013-01-01 23:54:19 -05:00
|
|
|
if (page_intersects(curr_addr, kernel_start_physical(), kernel_end_physical())) {
|
2012-12-24 12:44:48 -05:00
|
|
|
*curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr + virt_phys_offset) | 0x83;
|
2013-01-01 23:54:19 -05:00
|
|
|
} else if (page_intersects(curr_addr, kernel_start_virtual(), kernel_end_virtual())) {
|
2012-12-24 12:44:48 -05:00
|
|
|
*curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr - virt_phys_offset) | 0x83;
|
|
|
|
} else {
|
|
|
|
*curr_tbl_entry = (uint32)curr_addr | 0x83;
|
|
|
|
}
|
|
|
|
/* Force the entries to reload */
|
|
|
|
asm("invlpg (%0)" : : "r"(curr_addr) : );
|
|
|
|
curr_tbl_entry++;
|
|
|
|
}
|
2012-11-15 00:38:10 -05:00
|
|
|
}
|