diff options
author | Anton Kling <anton@kling.gg> | 2024-06-23 23:55:03 +0200 |
---|---|---|
committer | Anton Kling <anton@kling.gg> | 2024-06-23 23:55:03 +0200 |
commit | f37e21114750c066a1b9f9d8e789185746fd1c45 (patch) | |
tree | c49f57d40f78df286c24c6b96aa6d94d301db989 | |
parent | eb606d798b18be08e4a403132350b6dc350b522b (diff) |
Kernel: Fix small out of memory issues
-rw-r--r-- | kernel/arch/i386/mmu.c | 20 | ||||
-rw-r--r-- | kernel/drivers/vbe.c | 6 | ||||
-rw-r--r-- | kernel/kmalloc.c | 11 | ||||
-rw-r--r-- | kernel/sched/scheduler.c | 4 |
4 files changed, 32 insertions, 9 deletions
diff --git a/kernel/arch/i386/mmu.c b/kernel/arch/i386/mmu.c index 3043816..6948a73 100644 --- a/kernel/arch/i386/mmu.c +++ b/kernel/arch/i386/mmu.c @@ -29,24 +29,28 @@ void change_frame(u32 frame, int on); int get_free_frame(u32 *frame); int allocate_frame(Page *page, int rw, int is_kernel); -static void create_kernel_table(int table_index) { +static int create_kernel_table(int table_index) { u32 physical; active_directory->tables[table_index] = (PageTable *)0xDEADBEEF; PageTable *new_table = (PageTable *)ksbrk_physical(sizeof(PageTable), (void **)&physical); + if (!new_table) { + return 0; + } memset(new_table, 0, sizeof(PageTable)); kernel_directory->tables[table_index] = new_table; kernel_directory->physical_tables[table_index] = physical | 0x3; if (!current_task) { active_directory->tables[table_index] = new_table; active_directory->physical_tables[table_index] = physical | 0x3; - return; + return 1; } for (process_t *p = ready_queue; p; p = p->next) { PageDirectory *pd = p->cr3; pd->tables[table_index] = new_table; pd->physical_tables[table_index] = physical | 0x3; } + return 1; } void *ksbrk(size_t s) { @@ -63,12 +67,14 @@ void *ksbrk(size_t s) { // Determine whether we are approaching a unallocated table int table_index = 1 + (rc / (1024 * 0x1000)); if (!kernel_directory->tables[table_index]) { - create_kernel_table(table_index); + if (!create_kernel_table(table_index)) { + return NULL; + } return ksbrk(s); } if (!mmu_allocate_shared_kernel_region((void *)rc, (data_end - (uintptr_t)rc))) { - return (void *)-1; + return NULL; } get_fast_insecure_random(rc, s); assert(((uintptr_t)rc % PAGE_SIZE) == 0); @@ -77,6 +83,9 @@ void *ksbrk(size_t s) { void *ksbrk_physical(size_t s, void **physical) { void *r = ksbrk(s); + if (!r) { + return NULL; + } if (physical) { *physical = (void *)virtual_to_physical(r, 0); } @@ -256,6 +265,9 @@ PageDirectory *clone_directory(PageDirectory *original) { u32 physical_address; PageDirectory *new_directory = kmalloc_align(sizeof(PageDirectory), (void **)&physical_address); + if (!new_directory) { + return NULL; + } memset(new_directory, 0, sizeof(PageDirectory)); if (!new_directory) { return NULL; diff --git a/kernel/drivers/vbe.c b/kernel/drivers/vbe.c index c0603d6..5621b63 100644 --- a/kernel/drivers/vbe.c +++ b/kernel/drivers/vbe.c @@ -51,9 +51,13 @@ vfs_vm_object_t *vbe_get_vm_object(u64 length, u64 offset, vfs_fd_t *fd) { (void)fd; (void)length; (void)offset; - vbe_vm_object.size = framebuffer_size; int n = (uintptr_t)align_page((void *)(u32)framebuffer_size) / 0x1000; vbe_vm_object.object = kmalloc(sizeof(void *) * n); + if (!vbe_vm_object.object) { + return NULL; + } + vbe_vm_object.size = framebuffer_size; + for (int i = 0; i < n; i++) { vbe_vm_object.object[i] = (void *)framebuffer_physical + (i * 0x1000); } diff --git a/kernel/kmalloc.c b/kernel/kmalloc.c index f9513d7..b7c5dec 100644 --- a/kernel/kmalloc.c +++ b/kernel/kmalloc.c @@ -18,7 +18,7 @@ void *kmalloc_align(size_t s, void **physical) { // TODO: It should reuse virtual regions so that it does not run out // of address space. void *rc; - if ((void *)-1 == (rc = ksbrk_physical(s, physical))) { + if (!(rc = ksbrk_physical(s, physical))) { return NULL; } return rc; @@ -59,6 +59,9 @@ u32 total_heap_size = 0; int init_heap(void) { head = (MallocHeader *)ksbrk(NEW_ALLOC_SIZE); + if (!head) { + return 0; + } total_heap_size += NEW_ALLOC_SIZE - sizeof(MallocHeader); head->magic = 0xdde51ab9410268b1; head->size = NEW_ALLOC_SIZE - sizeof(MallocHeader); @@ -73,7 +76,7 @@ int add_heap_memory(size_t min_desired) { size_t allocation_size = max(min_desired, NEW_ALLOC_SIZE); allocation_size += delta_page(allocation_size); void *p; - if ((void *)(-1) == (p = (void *)ksbrk(allocation_size))) { + if (!(p = (void *)ksbrk(allocation_size))) { return 0; } total_heap_size += allocation_size - sizeof(MallocHeader); @@ -122,7 +125,9 @@ MallocHeader *find_free_entry(u32 s) { // A new header is required as well as the newly allocated chunk s += sizeof(MallocHeader); if (!head) { - init_heap(); + if (!init_heap()) { + return NULL; + } } MallocHeader *p = head; for (; p; p = next_header(p)) { diff --git a/kernel/sched/scheduler.c b/kernel/sched/scheduler.c index a2b6091..b249e3d 100644 --- a/kernel/sched/scheduler.c +++ b/kernel/sched/scheduler.c @@ -608,12 +608,14 @@ void *mmap(void *addr, size_t length, int prot, int flags, int fd, return (void *)-1; } *ptr = kmalloc(sizeof(MemoryMap)); + if (!*ptr) { + return (void *)-ENOMEM; + } MemoryMap *free_map = *ptr; if (-1 == fd) { void *rc = allocate_virtual_user_memory(length, prot, flags); if (!rc) { - kprintf("ENOMEM\n"); return (void *)-ENOMEM; } free_map->u_address = rc; |