Browse Source

Consolidate physical page frame declaration from all archs

master
Aaron Lindsay 7 years ago
parent
commit
b51f8f8422
3 changed files with 76 additions and 125 deletions
  1. +17
    -60
      arch/arm/kernel/mmu.c
  2. +8
    -65
      arch/i386/kernel/mmu.c
  3. +51
    -0
      kernel/frames.c

+ 17
- 60
arch/arm/kernel/mmu.c View File

@@ -20,7 +20,8 @@

#include <print.h>
#include <types.h>
#include <frames.h>

#include <arch/properties.h>

#define SCTLR 15,0,1,0,0
#define TTBR0 15,0,2,0,0
@@ -32,27 +33,21 @@
#define cp_read(var, ...) _cp_read(var, __VA_ARGS__)
#define cp_write(var, ...) _cp_write(var, __VA_ARGS__)

#define TT_BASE_SIZE (1<<14) /* 16k */
#define TT_SECTION_SIZE (1<<20) /* 1mb */

uint32 *kernel_start_phys, *kernel_start_virt, *kernel_end_phys, *kernel_end_virt;

void print_mapping(void *addr) {
extern uint32 tt_base_virtual;
print("%x: %x\n", addr, *(uint32 *)(tt_base_virtual + (((uint32)addr)>>18)));
}

void mmu_reinit() {
extern uint32 tt_base_virtual, tt_base_physical, start;
uint32 curr_addr;
uint32 *curr_tt_entry;
int virt_phys_offset;
static inline int page_intersects(uint32 *page_start, uint32 *lower, uint32 *upper) {
return (lower >= page_start && lower < (page_start + (CONFIG_INIT_PAGE_SIZE>>2))) ||
(upper >= page_start && upper < (page_start + (CONFIG_INIT_PAGE_SIZE>>2))) ||
(lower < page_start && upper >= (page_start + (CONFIG_INIT_PAGE_SIZE>>2)));
}

virt_phys_offset = tt_base_virtual - tt_base_physical;
kernel_start_virt = &start;
kernel_start_phys = kernel_start_virt - virt_phys_offset/4;
kernel_end_virt = (uint32 *)(tt_base_virtual + TT_BASE_SIZE);
kernel_end_phys = (uint32 *)(tt_base_physical + TT_BASE_SIZE);
void mmu_reinit() {
extern uint32 tt_base_virtual;
uint32 *curr_addr, *curr_tt_entry;
uint32 virt_phys_offset = kernel_start_virtual() - kernel_start_physical(); /* CAUTION: 1 = 4 bytes */

//get the current translation table base address
curr_tt_entry = (uint32 *)tt_base_virtual;
@@ -65,52 +60,14 @@ void mmu_reinit() {
//BUT, if we've relocated the kernel from where it is in physical
//memory, make sure we keep those mappings correct, and we'll actually
//swap the two mappings so all of memory is addressable.
for (curr_addr = 0x00100000; curr_addr != 0; curr_addr += 0x00100000) {
if ((uint32 *)curr_addr >= kernel_start_phys && (uint32 *)curr_addr < kernel_end_phys) {
*curr_tt_entry = (curr_addr + virt_phys_offset) | 0xc02;
} else if ((uint32 *)curr_addr >= kernel_start_virt && (uint32 *)curr_addr < kernel_end_virt) {
*curr_tt_entry = (curr_addr - virt_phys_offset) | 0xc02;
for (curr_addr = (uint32 *)CONFIG_INIT_PAGE_SIZE; curr_addr != 0; curr_addr += (CONFIG_INIT_PAGE_SIZE>>2)) {
if (page_intersects(curr_addr, kernel_start_physical(), kernel_end_physical())) {
*curr_tt_entry = (uint32)(curr_addr + virt_phys_offset) | 0xc02;
} else if (page_intersects(curr_addr, kernel_start_virtual(), kernel_end_virtual())) {
*curr_tt_entry = (uint32)(curr_addr - virt_phys_offset) | 0xc02;
} else {
*curr_tt_entry = curr_addr | 0xc02;
*curr_tt_entry = (uint32)curr_addr | 0xc02;
}
curr_tt_entry++;
}
}

int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) {
return lower_b >= lower_a && upper_b <= upper_a;
}

#define section_round_down(ptr) (((uint32)ptr) & ~(TT_SECTION_SIZE-1))
#define section_round_up(ptr) (((((uint32)ptr) & ~1) + (TT_SECTION_SIZE-1) ) & ~(TT_SECTION_SIZE-1))

/* Called once per physical memory region by bootup code. This function is
* responsible for only adding (via mm_add_free_region()) those parts of the
* memory region which are still available (i.e. aren't in the kernel and
* haven't been remapped anywhere else. */
void declare_memory_region(void *lower, void *upper) {
void *k_section_start_phys = (void *)section_round_down(kernel_start_phys);
void *k_section_end_phys = (void *)(section_round_up(kernel_end_phys) - 1);
void *k_section_start_virt = (void *)section_round_down(kernel_start_virt);
void *k_section_end_virt = (void *)(section_round_up(kernel_end_virt) - 1);

if (upper - lower < 1) {
print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper);
return;
}

//TODO It's possible (though highly unlikely) that the kernel (virtual)
//is split across two different memory regions. We should probably
//handle this.
if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) {
//Don't map any of the physical kernel's memory
declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
mm_add_free_region(kernel_end_virt, k_section_end_virt);
} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) {
declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
} else {
mm_add_free_region(lower, upper);
}
}

+ 8
- 65
arch/i386/kernel/mmu.c View File

@@ -20,28 +20,26 @@

#include <print.h>
#include <types.h>
#include <frames.h>

extern uint32 kernel_start_phys, kernel_end_phys;
extern uint32 kernel_start, kernel_end;
#include <arch/properties.h>

#define PAGE_SIZE 0x00400000
#define ROUND_DOWN_PAGE_SIZE(addr) ((typeof(addr))((uint32)(addr) & 0xff800000))

static inline int page_intersects(uint32 *page_start, uint32 *lower, uint32 *upper) {
return (lower >= page_start && lower < (page_start + PAGE_SIZE)) ||
(upper >= page_start && upper < (page_start + PAGE_SIZE)) ||
(lower < page_start && upper >= (page_start + PAGE_SIZE));
return (lower >= page_start && lower < (page_start + (PAGE_SIZE>>2))) ||
(upper >= page_start && upper < (page_start + (PAGE_SIZE>>2))) ||
(lower < page_start && upper >= (page_start + (PAGE_SIZE>>2)));
}

void mmu_reinit() {
int virt_phys_offset; /* CAUTION: 1 = 4 bytes */
uint32 virt_phys_offset; /* CAUTION: 1 = 4 bytes */
uint32 *curr_tbl_entry;
uint32 *curr_addr;
uint32 *page_dir;
asm("movl %%cr3, %0" : "=r"(page_dir) : : );

virt_phys_offset = &kernel_start - &kernel_start_phys;
virt_phys_offset = kernel_start_virtual() - kernel_start_physical();
curr_tbl_entry = page_dir + virt_phys_offset;

//do first loop iteration outside th eloop, because we have to check against wrapping back around to know we're done
@@ -53,9 +51,9 @@ void mmu_reinit() {
//memory, make sure we keep those mappings correct, and we'll actually
//swap the two mappings so all of memory is addressable.
for (curr_addr = (uint32 *)PAGE_SIZE; curr_addr != 0; curr_addr += (PAGE_SIZE>>2)) {
if (page_intersects(curr_addr, &kernel_start_phys, &kernel_end_phys)) {
if (page_intersects(curr_addr, kernel_start_physical(), kernel_end_physical())) {
*curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr + virt_phys_offset) | 0x83;
} else if (page_intersects(curr_addr, &kernel_start, &kernel_end)) {
} else if (page_intersects(curr_addr, kernel_start_virtual(), kernel_end_virtual())) {
*curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr - virt_phys_offset) | 0x83;
} else {
*curr_tbl_entry = (uint32)curr_addr | 0x83;
@@ -65,58 +63,3 @@ void mmu_reinit() {
curr_tbl_entry++;
}
}

//TODO merge the rest of this file with the similar section in arch/arm. This is clearly mostly duplicated code.

int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) {
return lower_b >= lower_a && upper_b <= upper_a;
}
int mmu_region_contains_single(void *lower_a, void *upper_a, void *ptr) {
return lower_a <= ptr && ptr <= upper_a;
}

#define section_round_down(ptr) (((uint32)ptr) & ~(PAGE_SIZE-1))
#define section_round_up(ptr) (((((uint32)ptr) & ~1) + (PAGE_SIZE-1) ) & ~(PAGE_SIZE-1))

/* Called once per physical memory region by bootup code. This function is
* responsible for only adding (via mm_add_free_region()) those parts of the
* memory region which are still available (i.e. aren't in the kernel and
* haven't been remapped anywhere else. */
void declare_memory_region(void *lower, void *upper) {
void *k_section_start_phys = (void *)section_round_down(&kernel_start_phys);
void *k_section_end_phys = (void *)(section_round_up(&kernel_end_phys) - 1);
void *k_section_start_virt = (void *)section_round_down(&kernel_start);
void *k_section_end_virt = (void *)(section_round_up(&kernel_end) - 1);

if (upper - lower < 1) {
print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper);
return;
}

//TODO It's possible (though highly unlikely) that the kernel (virtual)
//is split across two different memory regions. We should probably
//handle this.
if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) {
//Don't map any of the physical kernel's memory
declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
mm_add_free_region(&kernel_end, k_section_end_virt);
} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) {
declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
} else if (mmu_region_contains_single(lower, upper, k_section_start_phys)) {
if ((void*)((char*)lower + 1) < k_section_start_phys)
declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
} else if (mmu_region_contains_single(lower, upper, k_section_end_phys)) {
if (k_section_end_phys < (void*)((char*)upper - 1))
declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
} else if (mmu_region_contains_single(lower, upper, k_section_start_virt)) {
if ((void*)((char*)lower + 1) < k_section_start_virt)
declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
} else if (mmu_region_contains_single(lower, upper, k_section_end_virt)) {
if (k_section_end_virt < (void*)((char*)upper - 1))
declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
} else {
mm_add_free_region(lower, upper);
}
}

+ 51
- 0
kernel/frames.c View File

@@ -23,6 +23,8 @@
#include <print.h>
#include <types.h>

#include <arch/properties.h>

struct dlist_node free_frames_list;

void frames_init() {
@@ -143,3 +145,52 @@ int put_free_frames(struct frame *f) {
}
return 1;
}

static inline int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) {
return lower_b >= lower_a && upper_b <= upper_a;
}
static inline int mmu_region_contains_single(void *lower_a, void *upper_a, void *ptr) {
return lower_a <= ptr && ptr <= upper_a;
}
#define page_round_down(ptr) (((uint32)ptr) & ~(CONFIG_INIT_PAGE_SIZE-1))
#define page_round_up(ptr) (((((uint32)ptr) & ~1) + (CONFIG_INIT_PAGE_SIZE-1) ) & ~(CONFIG_INIT_PAGE_SIZE-1))

/* Called once per physical memory region by bootup code. This function is
* responsible for only adding (via add_physical_memory()) those parts of the
* memory region which are still available (i.e. aren't in the kernel and
* haven't been remapped anywhere else. */
void declare_memory_region(void *lower, void *upper) {
void *k_section_start_phys = (void *)page_round_down(kernel_start_physical());
void *k_section_end_phys = (void *)(page_round_up(kernel_end_physical()) - 1);
void *k_section_start_virt = (void *)page_round_down(kernel_start_virtual());
void *k_section_end_virt = (void *)(page_round_up(kernel_end_virtual()) - 1);

if (upper - lower < 1) {
print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper);
return;
}

if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) {
//Don't map any of the physical kernel's memory
declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
add_physical_memory(kernel_end_virtual(), k_section_end_virt);
} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) {
declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
} else if (mmu_region_contains_single(lower, upper, k_section_start_phys)) {
if ((void*)((char*)lower + 1) < k_section_start_phys)
declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
} else if (mmu_region_contains_single(lower, upper, k_section_end_phys)) {
if (k_section_end_phys < (void*)((char*)upper - 1))
declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
} else if (mmu_region_contains_single(lower, upper, k_section_start_virt)) {
if ((void*)((char*)lower + 1) < k_section_start_virt)
declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
} else if (mmu_region_contains_single(lower, upper, k_section_end_virt)) {
if (k_section_end_virt < (void*)((char*)upper - 1))
declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
} else {
add_physical_memory(lower, upper);
}
}

Loading…
Cancel
Save