summaryrefslogtreecommitdiff
path: root/kernel/arch/i386
diff options
context:
space:
mode:
authorAnton Kling <anton@kling.gg>2023-11-08 15:33:11 +0100
committerAnton Kling <anton@kling.gg>2023-11-08 15:33:11 +0100
commitd88e7c2124ccb655945bcfd9c2b1cb875a42637d (patch)
treeccea3c2efa7664e5c7de040daea3e07e52908fe4 /kernel/arch/i386
parent2753031a63117fd9de2d189d6731b4de8dd94769 (diff)
Kernel/MMU: Create a seperate table for heap allocation
Diffstat (limited to 'kernel/arch/i386')
-rw-r--r--kernel/arch/i386/boot.s28
-rw-r--r--kernel/arch/i386/mmu.c6
2 files changed, 29 insertions, 5 deletions
diff --git a/kernel/arch/i386/boot.s b/kernel/arch/i386/boot.s
index 61bbfbb..3286a57 100644
--- a/kernel/arch/i386/boot.s
+++ b/kernel/arch/i386/boot.s
@@ -34,6 +34,8 @@ boot_page_directory:
.skip 4096
boot_page_table1:
.skip 4096
+heap_table:
+ .skip 4096
.section .multiboot.text, "a"
.global _start
@@ -55,8 +57,8 @@ _start:
# If we are past the kernel jump to the final stage
# "label 3"
- cmpl $(_kernel_end - 0xC0000000), %esi
- jge 3f
+ cmpl $(_kernel_end- 0xC0000000), %esi
+ jge heap_start
2:
# Add permission to the pages
@@ -71,10 +73,30 @@ _start:
# Loop to the next entry if we haven't finished.
loop 1b
-3:
+heap_start:
+ # edi contains the buffer we wish to modify
+ movl $(heap_table - 0xC0000000), %edi
+
+ # for loop
+ movl $1024, %ecx
+ addl $4096, %esi
+heap:
+ movl %esi, %edx
+ orl $0x003, %edx
+ movl %edx, (%edi)
+
+ # Size of page is 4096 bytes.
+ addl $4096, %esi
+ # Size of entries is 4 bytes.
+ addl $4, %edi
+
+ loop heap
+
+final:
# Map the page table to both virtual addresses 0x00000000 and 0xC0000000.
movl $(boot_page_table1 - 0xC0000000 + 0x003), boot_page_directory - 0xC0000000 + 0
movl $(boot_page_table1 - 0xC0000000 + 0x003), boot_page_directory - 0xC0000000 + 768 * 4
+ movl $(heap_table - 0xC0000000 + 0x003), boot_page_directory - 0xC0000000 + 769 * 4
# Set cr3 to the address of the boot_page_directory.
movl $(boot_page_directory - 0xC0000000), %ecx
diff --git a/kernel/arch/i386/mmu.c b/kernel/arch/i386/mmu.c
index 6b2f704..4561675 100644
--- a/kernel/arch/i386/mmu.c
+++ b/kernel/arch/i386/mmu.c
@@ -549,12 +549,13 @@ void paging_init(void) {
}
switch_page_directory(kernel_directory);
+ get_page(NULL, kernel_directory, PAGE_ALLOCATE, 0)->present = 0;
kernel_directory = clone_directory(kernel_directory);
// Make null dereferences crash.
switch_page_directory(kernel_directory);
- get_page(NULL, kernel_directory, PAGE_ALLOCATE, 0)->present = 0;
switch_page_directory(clone_directory(kernel_directory));
+ /*
// FIXME: Really hacky solution. Since page table creation needs to
// allocate memory but memory allocation requires page table creation
// they depend on eachother. The bad/current solution is just to
@@ -563,5 +564,6 @@ void paging_init(void) {
allocate_frame(
get_page((void *)((0x302 + i) * 0x1000 * 1024), NULL, PAGE_ALLOCATE, 0),
1, 1);
- move_stack(0xA0000000, 0xC00000);
+ */
+ move_stack(0xA0000000, 0x80000);
}