Browse Source

Consolidate physical page frame declaration from all archs

Aaron Lindsay 6 years ago
parent
commit
b51f8f8422
3 changed files with 76 additions and 125 deletions
  1. 17 60
      arch/arm/kernel/mmu.c
  2. 8 65
      arch/i386/kernel/mmu.c
  3. 51 0
      kernel/frames.c

+ 17 - 60
arch/arm/kernel/mmu.c

@@ -20,7 +20,8 @@
20 20
 
21 21
 #include <print.h>
22 22
 #include <types.h>
23
-#include <frames.h>
23
+
24
+#include <arch/properties.h>
24 25
 
25 26
 #define SCTLR 15,0,1,0,0
26 27
 #define TTBR0 15,0,2,0,0
@@ -32,27 +33,21 @@
32 33
 #define cp_read(var, ...)  _cp_read(var, __VA_ARGS__)
33 34
 #define cp_write(var, ...)  _cp_write(var, __VA_ARGS__)
34 35
 
35
-#define TT_BASE_SIZE (1<<14) /* 16k */
36
-#define TT_SECTION_SIZE (1<<20) /* 1mb */
37
-
38
-uint32 *kernel_start_phys, *kernel_start_virt, *kernel_end_phys, *kernel_end_virt;
39
-
40 36
 void print_mapping(void *addr) {
41 37
 	extern uint32 tt_base_virtual;
42 38
 	print("%x: %x\n", addr, *(uint32 *)(tt_base_virtual + (((uint32)addr)>>18)));
43 39
 }
44 40
 
45
-void mmu_reinit() {
46
-	extern uint32 tt_base_virtual, tt_base_physical, start;
47
-	uint32 curr_addr;
48
-	uint32 *curr_tt_entry;
49
-	int virt_phys_offset;
41
+static inline int page_intersects(uint32 *page_start, uint32 *lower, uint32 *upper) {
42
+	return (lower >= page_start && lower < (page_start + (CONFIG_INIT_PAGE_SIZE>>2))) ||
43
+		(upper >= page_start && upper < (page_start + (CONFIG_INIT_PAGE_SIZE>>2))) ||
44
+		(lower < page_start && upper >= (page_start + (CONFIG_INIT_PAGE_SIZE>>2)));
45
+}
50 46
 
51
-	virt_phys_offset = tt_base_virtual - tt_base_physical;
52
-	kernel_start_virt = &start;
53
-	kernel_start_phys = kernel_start_virt - virt_phys_offset/4;
54
-	kernel_end_virt = (uint32 *)(tt_base_virtual + TT_BASE_SIZE);
55
-	kernel_end_phys = (uint32 *)(tt_base_physical + TT_BASE_SIZE);
47
+void mmu_reinit() {
48
+	extern uint32 tt_base_virtual;
49
+	uint32 *curr_addr, *curr_tt_entry;
50
+	uint32 virt_phys_offset = kernel_start_virtual() - kernel_start_physical(); /* CAUTION: 1 = 4 bytes */
56 51
 
57 52
 	//get the current translation table base address
58 53
 	curr_tt_entry = (uint32 *)tt_base_virtual;
@@ -65,52 +60,14 @@ void mmu_reinit() {
65 60
 	//BUT, if we've relocated the kernel from where it is in physical
66 61
 	//memory, make sure we keep those mappings correct, and we'll actually
67 62
 	//swap the two mappings so all of memory is addressable.
68
-	for (curr_addr = 0x00100000; curr_addr != 0; curr_addr += 0x00100000) {
69
-		if ((uint32 *)curr_addr >= kernel_start_phys && (uint32 *)curr_addr < kernel_end_phys) {
70
-			*curr_tt_entry = (curr_addr + virt_phys_offset) | 0xc02;
71
-		} else if ((uint32 *)curr_addr >= kernel_start_virt && (uint32 *)curr_addr < kernel_end_virt) {
72
-			*curr_tt_entry = (curr_addr - virt_phys_offset) | 0xc02;
63
+	for (curr_addr = (uint32 *)CONFIG_INIT_PAGE_SIZE; curr_addr != 0; curr_addr += (CONFIG_INIT_PAGE_SIZE>>2)) {
64
+		if (page_intersects(curr_addr, kernel_start_physical(), kernel_end_physical())) {
65
+			*curr_tt_entry = (uint32)(curr_addr + virt_phys_offset) | 0xc02;
66
+		} else if (page_intersects(curr_addr, kernel_start_virtual(), kernel_end_virtual())) {
67
+			*curr_tt_entry = (uint32)(curr_addr - virt_phys_offset) | 0xc02;
73 68
 		} else {
74
-			*curr_tt_entry = curr_addr | 0xc02;
69
+			*curr_tt_entry = (uint32)curr_addr | 0xc02;
75 70
 		}
76 71
 		curr_tt_entry++;
77 72
 	}
78 73
 }
79
-
80
-int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) {
81
-	return lower_b >= lower_a && upper_b <= upper_a;
82
-}
83
-
84
-#define section_round_down(ptr) (((uint32)ptr) & ~(TT_SECTION_SIZE-1))
85
-#define section_round_up(ptr) (((((uint32)ptr) & ~1) + (TT_SECTION_SIZE-1) ) & ~(TT_SECTION_SIZE-1))
86
-
87
-/* Called once per physical memory region by bootup code. This function is
88
- * responsible for only adding (via mm_add_free_region()) those parts of the
89
- * memory region which are still available (i.e. aren't in the kernel and
90
- * haven't been remapped anywhere else. */
91
-void declare_memory_region(void *lower, void *upper) {
92
-	void *k_section_start_phys = (void *)section_round_down(kernel_start_phys);
93
-	void *k_section_end_phys = (void *)(section_round_up(kernel_end_phys) - 1);
94
-	void *k_section_start_virt = (void *)section_round_down(kernel_start_virt);
95
-	void *k_section_end_virt = (void *)(section_round_up(kernel_end_virt) - 1);
96
-
97
-	if (upper - lower < 1) {
98
-		print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper);
99
-		return;
100
-	}
101
-
102
-	//TODO It's possible (though highly unlikely) that the kernel (virtual)
103
-	//is split across two different memory regions. We should probably
104
-	//handle this.
105
-	if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) {
106
-		//Don't map any of the physical kernel's memory
107
-		declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
108
-		declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
109
-		mm_add_free_region(kernel_end_virt, k_section_end_virt);
110
-	} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) {
111
-		declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
112
-		declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
113
-	} else {
114
-		mm_add_free_region(lower, upper);
115
-	}
116
-}

+ 8 - 65
arch/i386/kernel/mmu.c

@@ -20,28 +20,26 @@
20 20
 
21 21
 #include <print.h>
22 22
 #include <types.h>
23
-#include <frames.h>
24 23
 
25
-extern uint32 kernel_start_phys, kernel_end_phys;
26
-extern uint32 kernel_start, kernel_end;
24
+#include <arch/properties.h>
27 25
 
28 26
 #define PAGE_SIZE 0x00400000
29 27
 #define ROUND_DOWN_PAGE_SIZE(addr) ((typeof(addr))((uint32)(addr) & 0xff800000))
30 28
 
31 29
 static inline int page_intersects(uint32 *page_start, uint32 *lower, uint32 *upper) {
32
-	return (lower >= page_start && lower < (page_start + PAGE_SIZE)) ||
33
-		(upper >= page_start && upper < (page_start + PAGE_SIZE)) ||
34
-		(lower < page_start && upper >= (page_start + PAGE_SIZE));
30
+	return (lower >= page_start && lower < (page_start + (PAGE_SIZE>>2))) ||
31
+		(upper >= page_start && upper < (page_start + (PAGE_SIZE>>2))) ||
32
+		(lower < page_start && upper >= (page_start + (PAGE_SIZE>>2)));
35 33
 }
36 34
 
37 35
 void mmu_reinit() {
38
-	int virt_phys_offset; /* CAUTION: 1 = 4 bytes */
36
+	uint32 virt_phys_offset; /* CAUTION: 1 = 4 bytes */
39 37
 	uint32 *curr_tbl_entry;
40 38
 	uint32 *curr_addr;
41 39
 	uint32 *page_dir;
42 40
 	asm("movl %%cr3, %0" : "=r"(page_dir) : : );
43 41
 
44
-	virt_phys_offset = &kernel_start - &kernel_start_phys;
42
+	virt_phys_offset = kernel_start_virtual() - kernel_start_physical();
45 43
 	curr_tbl_entry = page_dir + virt_phys_offset;
46 44
 
47 45
 	//do first loop iteration outside th eloop, because we have to check against wrapping back around to know we're done
@@ -53,9 +51,9 @@ void mmu_reinit() {
53 51
 	//memory, make sure we keep those mappings correct, and we'll actually
54 52
 	//swap the two mappings so all of memory is addressable.
55 53
 	for (curr_addr = (uint32 *)PAGE_SIZE; curr_addr != 0; curr_addr += (PAGE_SIZE>>2)) {
56
-		if (page_intersects(curr_addr, &kernel_start_phys, &kernel_end_phys)) {
54
+		if (page_intersects(curr_addr, kernel_start_physical(), kernel_end_physical())) {
57 55
 			*curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr + virt_phys_offset) | 0x83;
58
-		} else if (page_intersects(curr_addr, &kernel_start, &kernel_end)) {
56
+		} else if (page_intersects(curr_addr, kernel_start_virtual(), kernel_end_virtual())) {
59 57
 			*curr_tbl_entry = (uint32)ROUND_DOWN_PAGE_SIZE(curr_addr - virt_phys_offset) | 0x83;
60 58
 		} else {
61 59
 			*curr_tbl_entry = (uint32)curr_addr | 0x83;
@@ -65,58 +63,3 @@ void mmu_reinit() {
65 63
 		curr_tbl_entry++;
66 64
 	}
67 65
 }
68
-
69
-//TODO merge the rest of this file with the similar section in arch/arm. This is clearly mostly duplicated code.
70
-
71
-int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) {
72
-	return lower_b >= lower_a && upper_b <= upper_a;
73
-}
74
-int mmu_region_contains_single(void *lower_a, void *upper_a, void *ptr) {
75
-	return lower_a <= ptr && ptr <= upper_a;
76
-}
77
-
78
-#define section_round_down(ptr) (((uint32)ptr) & ~(PAGE_SIZE-1))
79
-#define section_round_up(ptr) (((((uint32)ptr) & ~1) + (PAGE_SIZE-1) ) & ~(PAGE_SIZE-1))
80
-
81
-/* Called once per physical memory region by bootup code. This function is
82
- * responsible for only adding (via mm_add_free_region()) those parts of the
83
- * memory region which are still available (i.e. aren't in the kernel and
84
- * haven't been remapped anywhere else. */
85
-void declare_memory_region(void *lower, void *upper) {
86
-	void *k_section_start_phys = (void *)section_round_down(&kernel_start_phys);
87
-	void *k_section_end_phys = (void *)(section_round_up(&kernel_end_phys) - 1);
88
-	void *k_section_start_virt = (void *)section_round_down(&kernel_start);
89
-	void *k_section_end_virt = (void *)(section_round_up(&kernel_end) - 1);
90
-
91
-	if (upper - lower < 1) {
92
-		print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper);
93
-		return;
94
-	}
95
-
96
-	//TODO It's possible (though highly unlikely) that the kernel (virtual)
97
-	//is split across two different memory regions. We should probably
98
-	//handle this.
99
-	if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) {
100
-		//Don't map any of the physical kernel's memory
101
-		declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
102
-		declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
103
-		mm_add_free_region(&kernel_end, k_section_end_virt);
104
-	} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) {
105
-		declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
106
-		declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
107
-	} else if (mmu_region_contains_single(lower, upper, k_section_start_phys)) {
108
-		if ((void*)((char*)lower + 1) < k_section_start_phys)
109
-			declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
110
-	} else if (mmu_region_contains_single(lower, upper, k_section_end_phys)) {
111
-		if (k_section_end_phys < (void*)((char*)upper - 1))
112
-			declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
113
-	} else if (mmu_region_contains_single(lower, upper, k_section_start_virt)) {
114
-		if ((void*)((char*)lower + 1) < k_section_start_virt)
115
-			declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
116
-	} else if (mmu_region_contains_single(lower, upper, k_section_end_virt)) {
117
-		if (k_section_end_virt < (void*)((char*)upper - 1))
118
-			declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
119
-	} else {
120
-		mm_add_free_region(lower, upper);
121
-	}
122
-}

+ 51 - 0
kernel/frames.c

@@ -23,6 +23,8 @@
23 23
 #include <print.h>
24 24
 #include <types.h>
25 25
 
26
+#include <arch/properties.h>
27
+
26 28
 struct dlist_node free_frames_list;
27 29
 
28 30
 void frames_init() {
@@ -143,3 +145,52 @@ int put_free_frames(struct frame *f) {
143 145
 	}
144 146
 	return 1;
145 147
 }
148
+
149
+static inline int mmu_region_contains(void *lower_a, void *upper_a, void *lower_b, void *upper_b) {
150
+	return lower_b >= lower_a && upper_b <= upper_a;
151
+}
152
+static inline int mmu_region_contains_single(void *lower_a, void *upper_a, void *ptr) {
153
+	return lower_a <= ptr && ptr <= upper_a;
154
+}
155
+#define page_round_down(ptr) (((uint32)ptr) & ~(CONFIG_INIT_PAGE_SIZE-1))
156
+#define page_round_up(ptr) (((((uint32)ptr) & ~1) + (CONFIG_INIT_PAGE_SIZE-1) ) & ~(CONFIG_INIT_PAGE_SIZE-1))
157
+
158
+/* Called once per physical memory region by bootup code. This function is
159
+ * responsible for only adding (via add_physical_memory()) those parts of the
160
+ * memory region which are still available (i.e. aren't in the kernel and
161
+ * haven't been remapped anywhere else. */
162
+void declare_memory_region(void *lower, void *upper) {
163
+	void *k_section_start_phys = (void *)page_round_down(kernel_start_physical());
164
+	void *k_section_end_phys = (void *)(page_round_up(kernel_end_physical()) - 1);
165
+	void *k_section_start_virt = (void *)page_round_down(kernel_start_virtual());
166
+	void *k_section_end_virt = (void *)(page_round_up(kernel_end_virtual()) - 1);
167
+
168
+	if (upper - lower < 1) {
169
+		print("Warning: declare_memory_region() called with lower=%x, upper=%x. Ignoring.\n", lower, upper);
170
+		return;
171
+	}
172
+
173
+	if (mmu_region_contains(lower, upper, k_section_start_phys, k_section_end_phys)) {
174
+		//Don't map any of the physical kernel's memory
175
+		declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
176
+		declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
177
+		add_physical_memory(kernel_end_virtual(), k_section_end_virt);
178
+	} else if (mmu_region_contains(lower, upper, k_section_start_virt, k_section_end_virt)) {
179
+		declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
180
+		declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
181
+	} else if (mmu_region_contains_single(lower, upper, k_section_start_phys)) {
182
+		if ((void*)((char*)lower + 1) < k_section_start_phys)
183
+			declare_memory_region(lower, (void *) ((char *)k_section_start_phys - 1));
184
+	} else if (mmu_region_contains_single(lower, upper, k_section_end_phys)) {
185
+		if (k_section_end_phys < (void*)((char*)upper - 1))
186
+			declare_memory_region((void *) ((char *)k_section_end_phys + 1), upper);
187
+	} else if (mmu_region_contains_single(lower, upper, k_section_start_virt)) {
188
+		if ((void*)((char*)lower + 1) < k_section_start_virt)
189
+			declare_memory_region(lower, (void *) ((char *)k_section_start_virt - 1));
190
+	} else if (mmu_region_contains_single(lower, upper, k_section_end_virt)) {
191
+		if (k_section_end_virt < (void*)((char*)upper - 1))
192
+			declare_memory_region((void *) ((char *)k_section_end_virt + 1), upper);
193
+	} else {
194
+		add_physical_memory(lower, upper);
195
+	}
196
+}