summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/arch/i386/mmu.c103
-rw-r--r--kernel/drivers/pit.c12
-rw-r--r--kernel/includes/mmu.h4
-rw-r--r--kernel/init/kernel.c4
-rw-r--r--kernel/network/tcp.c9
-rw-r--r--kernel/process.s10
6 files changed, 83 insertions, 59 deletions
diff --git a/kernel/arch/i386/mmu.c b/kernel/arch/i386/mmu.c
index ad415a6..8761dd8 100644
--- a/kernel/arch/i386/mmu.c
+++ b/kernel/arch/i386/mmu.c
@@ -17,16 +17,20 @@ PageDirectory *kernel_directory;
PageDirectory real_kernel_directory;
PageDirectory *active_directory = 0;
+u32 num_allocated_frames = 0;
+
#define END_OF_MEMORY 0x8000000 * 15
-u64 num_of_frames;
-u32 *frames;
u64 available_memory_kb;
-u32 num_allocated_frames = 0;
+
+u32 num_array_frames = 1024;
+u32 tmp_array[1024];
+u32 *tmp_small_frames = tmp_array;
#define KERNEL_START 0xc0000000
extern uintptr_t data_end;
-void write_to_frame(u32 frame_address, u8 on);
+void change_frame(u32 frame, int on);
+u32 get_free_frame(void);
void *ksbrk(size_t s) {
uintptr_t rc = (uintptr_t)align_page((void *)data_end);
@@ -37,7 +41,6 @@ void *ksbrk(size_t s) {
// If there is no active pagedirectory we
// just assume that the memory is
// already mapped.
- get_fast_insecure_random((void *)rc, data_end - rc);
return (void *)rc;
}
// Determine whether we are approaching a unallocated table
@@ -59,7 +62,6 @@ void *ksbrk(size_t s) {
mmu_allocate_shared_kernel_region((void *)rc, (data_end - (uintptr_t)rc));
assert(((uintptr_t)rc % PAGE_SIZE) == 0);
- get_fast_insecure_random((void *)rc, data_end - rc);
return (void *)rc;
}
@@ -132,20 +134,18 @@ void *align_page(void *a) {
}
u32 first_free_frame(void) {
- for (u32 i = 1; i < INDEX_FROM_BIT(num_of_frames); i++) {
- if (frames[i] == 0xFFFFFFFF) {
+ u32 i = 1;
+ for (; i < INDEX_FROM_BIT(num_array_frames * 32); i++) {
+ if (tmp_small_frames[i] == 0xFFFFFFFF) {
continue;
}
for (u32 c = 0; c < 32; c++) {
- if (!(frames[i] & ((u32)1 << c))) {
+ if (!(tmp_small_frames[i] & ((u32)1 << c))) {
return i * 32 + c;
}
}
}
-
- kprintf("ERROR Num frames: %x\n", mmu_get_number_of_allocated_frames());
- klog("No free frames, uh oh.", LOG_ERROR);
assert(0);
return 0;
}
@@ -154,11 +154,13 @@ void write_to_frame(u32 frame_address, u8 on) {
u32 frame = frame_address / 0x1000;
if (on) {
num_allocated_frames++;
- frames[INDEX_FROM_BIT(frame)] |= ((u32)0x1 << OFFSET_FROM_BIT(frame));
+ tmp_small_frames[INDEX_FROM_BIT(frame)] |=
+ ((u32)0x1 << OFFSET_FROM_BIT(frame));
return;
}
num_allocated_frames--;
- frames[INDEX_FROM_BIT(frame)] &= ~((u32)0x1 << OFFSET_FROM_BIT(frame));
+ tmp_small_frames[INDEX_FROM_BIT(frame)] &=
+ ~((u32)0x1 << OFFSET_FROM_BIT(frame));
}
PageDirectory *get_active_pagedirectory(void) {
@@ -441,7 +443,6 @@ void mmu_map_physical(void *dst, PageDirectory *d, void *physical,
p->frame = (uintptr_t)physical / PAGE_SIZE;
write_to_frame((uintptr_t)physical, 1);
}
- flush_tlb();
}
struct PhysVirtMap {
@@ -591,12 +592,33 @@ void create_table(int table_index) {
kernel_directory->physical_tables[table_index] = (u32)physical | 0x3;
}
-void paging_init(u64 memsize) {
+void paging_init(u64 memsize, multiboot_info_t *mb) {
u32 *cr3 = (void *)get_cr3();
u32 *virtual = (u32 *)((u32)cr3 + 0xC0000000);
- frames = ksbrk(1024 * sizeof(u32));
- memset(frames, 0, 1024 * sizeof(u32));
- num_of_frames = 1024 * 32;
+
+ u32 num_of_frames = 0;
+
+ memset(tmp_small_frames, 0xFF, num_array_frames * sizeof(u32));
+ {
+ multiboot_memory_map_t *map =
+ (multiboot_memory_map_t *)(mb->mmap_addr + 0xc0000000);
+ for (int length = 0; length < mb->mmap_length;) {
+ if (MULTIBOOT_MEMORY_AVAILABLE == map->type) {
+ num_of_frames = max(num_of_frames, map->addr + map->len);
+ for (size_t i = 0; i < map->len; i += 0x20000) {
+ u32 frame = (map->addr + i) / 0x1000;
+ if (frame < (num_array_frames * 32)) {
+ tmp_small_frames[INDEX_FROM_BIT(frame)] = 0;
+ }
+ }
+ }
+ u32 delta = (uintptr_t)map->size + sizeof(map->size);
+ map = (multiboot_memory_map_t *)((uintptr_t)map + delta);
+ length += delta;
+ }
+ }
+ num_of_frames /= 0x1000;
+ num_of_frames /= 32;
kernel_directory = &real_kernel_directory;
kernel_directory->physical_address = (u32)cr3;
@@ -636,24 +658,29 @@ void paging_init(u64 memsize) {
switch_page_directory(clone_directory(kernel_directory));
move_stack(0xA0000000, 0x80000);
- u64 buffer_size = (memsize / 32) * sizeof(u32);
- // TODO: Very hacky solution since we have to memcpy the old allocation. This
- // places a strict requierment on how much RAM the system can have(altough it
- // is very small). Ideally the number of frames required would be dynamically
- // calculated.
- assert(buffer_size >= 1024 * sizeof(u32));
-
- // TODO Do this better
- // NOTE:
- // There are some addresses that point to devices rather than RAM.
- // Therefore we need frames for these to exist
- u64 min_buffer_required = 0xFD000 + 0x100000;
- buffer_size = max(min_buffer_required, buffer_size);
-
available_memory_kb = memsize;
- num_of_frames = available_memory_kb / 4;
- u32 *new_frames = ksbrk(buffer_size);
- memset(new_frames, 0, buffer_size);
- memcpy(new_frames, frames, 1024 * sizeof(u32));
- frames = new_frames;
+
+ void *new = kmalloc(num_of_frames * sizeof(u32));
+ memset(new, 0xFF, num_of_frames * sizeof(u32));
+ memcpy(new, tmp_small_frames, num_array_frames * sizeof(u32));
+ tmp_small_frames = new;
+ {
+ multiboot_memory_map_t *map =
+ (multiboot_memory_map_t *)(mb->mmap_addr + 0xc0000000);
+ for (int length = 0; length < mb->mmap_length;) {
+ if (MULTIBOOT_MEMORY_AVAILABLE == map->type) {
+ for (size_t i = 0; i < map->len - 0x1000; i += 0x20000) {
+ u32 frame = (map->addr + i) / 0x1000;
+ if (frame > (num_array_frames * 32)) {
+ assert(INDEX_FROM_BIT(frame) <= num_of_frames);
+ tmp_small_frames[INDEX_FROM_BIT(frame)] = 0;
+ }
+ }
+ }
+ u32 delta = (uintptr_t)map->size + sizeof(map->size);
+ map = (multiboot_memory_map_t *)((uintptr_t)map + delta);
+ length += delta;
+ }
+ }
+ num_array_frames = num_of_frames;
}
diff --git a/kernel/drivers/pit.c b/kernel/drivers/pit.c
index 30fd3ed..41c0d2a 100644
--- a/kernel/drivers/pit.c
+++ b/kernel/drivers/pit.c
@@ -44,16 +44,14 @@ void set_pit_count(u16 _hertz) {
}
void int_clock(reg_t *regs) {
- EOI(0x20);
- pit_counter++;
- if (pit_counter * 1000 >= hertz) {
- pit_counter = 0;
- clock_num_ms_ticks += 1000 / hertz;
- }
+ clock_num_ms_ticks++;
switch_counter++;
- if (switch_counter * 500 >= hertz) {
+ if (switch_counter >= hertz) {
+ EOI(0x20);
switch_counter = 0;
switch_task();
+ } else {
+ EOI(0x20);
}
}
diff --git a/kernel/includes/mmu.h b/kernel/includes/mmu.h
index 68fd134..1a3f7c9 100644
--- a/kernel/includes/mmu.h
+++ b/kernel/includes/mmu.h
@@ -1,6 +1,7 @@
#ifndef PAGING_H
#define PAGING_H
#include "kmalloc.h"
+#include <multiboot.h>
#include <typedefs.h>
typedef u8 mmu_flags;
@@ -49,7 +50,7 @@ void *mmu_is_valid_userpointer(const void *ptr, size_t s);
void *mmu_is_valid_user_c_string(const char *ptr, size_t *size);
void flush_tlb(void);
-void paging_init(u64 memsize);
+void paging_init(u64 memsize, multiboot_info_t *mb);
PageDirectory *get_active_pagedirectory(void);
void move_stack(u32 new_stack_address, u32 size);
void switch_page_directory(PageDirectory *directory);
@@ -58,6 +59,7 @@ PageDirectory *clone_directory(PageDirectory *original);
void *virtual_to_physical(void *address, PageDirectory *directory);
void *ksbrk(size_t s);
void *ksbrk_physical(size_t s, void **physical);
+void write_to_frame(u32 frame_address, u8 on);
Page *get_page(void *ptr, PageDirectory *directory, int create_new_page,
int set_user);
diff --git a/kernel/init/kernel.c b/kernel/init/kernel.c
index ef7cdae..180aaa8 100644
--- a/kernel/init/kernel.c
+++ b/kernel/init/kernel.c
@@ -43,7 +43,6 @@ uintptr_t data_end;
void kernel_main(u32 kernel_end, unsigned long magic, unsigned long addr,
u32 inital_stack) {
- (void)kernel_end;
data_end = 0xc0400000;
inital_esp = inital_stack;
@@ -55,7 +54,8 @@ void kernel_main(u32 kernel_end, unsigned long magic, unsigned long addr,
u32 mem_kb = mb->mem_lower;
u32 mem_mb = (mb->mem_upper - 1000) / 1000;
u64 memsize_kb = mem_mb * 1000 + mem_kb;
- paging_init(memsize_kb);
+
+ paging_init(memsize_kb, mb);
klog("Paging Initalized", LOG_SUCCESS);
mb = mmu_map_frames((multiboot_info_t *)addr, sizeof(multiboot_info_t));
diff --git a/kernel/network/tcp.c b/kernel/network/tcp.c
index b929005..dd6e6a8 100644
--- a/kernel/network/tcp.c
+++ b/kernel/network/tcp.c
@@ -162,12 +162,11 @@ void send_tcp_packet(struct TcpConnection *con, const u8 *payload,
void handle_tcp(ipv4_t src_ip, const u8 *payload, u32 payload_length) {
const struct TCP_HEADER *header = (const struct TCP_HEADER *)payload;
(void)header;
- u16 n_src_port = *(u16 *)(payload);
- u16 n_dst_port = *(u16 *)(payload + 2);
- u32 n_seq_num = *(u32 *)(payload + 4);
- u32 n_ack_num = *(u32 *)(payload + 8);
+ u16 n_src_port = header->src_port;
+ u16 n_dst_port = header->dst_port;
+ u32 n_seq_num = header->seq_num;
+ u32 n_ack_num = header->ack_num;
- // u8 flags = *(payload + 13);
u8 flags = header->flags;
u16 src_port = htons(n_src_port);
diff --git a/kernel/process.s b/kernel/process.s
index caef941..934d627 100644
--- a/kernel/process.s
+++ b/kernel/process.s
@@ -128,15 +128,13 @@ switch_to_task:
mov eax,[esi+TCB.CR3] # eax = address of page directory for next task
mov ebx,[esi+TCB.ESP0] # ebx = address for the top of the next task's kernel stack
# mov [TSS.ESP0],ebx # Adjust the ESP0 field in the TSS (used by CPU for for CPL=3 -> CPL=0 privilege level changes)
-# mov ecx,cr3 # ecx = previous task's virtual address space
+ mov ecx,cr3 # ecx = previous task's virtual address space
-# FIXME: This branch gets a from the assembler, something about "relaxed branches".
-# this branch would probably not be used anyway but should be checked on later anyway.
-# cmp eax,ecx # Does the virtual address space need to being changed?
+ cmp eax,ecx # Does the virtual address space need to being changed?
-# je .doneVAS # no, virtual address space is the same, so don't reload it and cause TLB flushes
+ je .doneVAS # no, virtual address space is the same, so don't reload it and cause TLB flushes
mov cr3,eax # yes, load the next task's virtual address space
-#.doneVAS:
+.doneVAS:
pop ebp
pop edi