diff options
author | Anton Kling <anton@kling.gg> | 2024-04-13 21:07:02 +0200 |
---|---|---|
committer | Anton Kling <anton@kling.gg> | 2024-04-13 21:07:02 +0200 |
commit | 92f848244796881994c1f147633123c45da219b6 (patch) | |
tree | 4ad209b6ede26b8475b7f0c6afb2382336dbbac7 | |
parent | 008b84bf5308d2f180905130653a656bfedccf8c (diff) |
Kernel: Don't hard fail if the kernel can't allocate memory.
Currently this is just a improvement of error handling but it should
also try to free up memory where it is possible.
-rw-r--r-- | kernel/arch/i386/mmu.c | 72 | ||||
-rw-r--r-- | kernel/elf.c | 9 | ||||
-rw-r--r-- | kernel/fs/shm.c | 4 | ||||
-rw-r--r-- | kernel/includes/mmu.h | 4 | ||||
-rw-r--r-- | kernel/process.s | 5 | ||||
-rw-r--r-- | kernel/sched/scheduler.c | 19 |
6 files changed, 79 insertions, 34 deletions
diff --git a/kernel/arch/i386/mmu.c b/kernel/arch/i386/mmu.c index b9501ff..ce20208 100644 --- a/kernel/arch/i386/mmu.c +++ b/kernel/arch/i386/mmu.c @@ -26,7 +26,8 @@ u32 *tmp_small_frames = tmp_array; extern uintptr_t data_end; void change_frame(u32 frame, int on); -u32 get_free_frame(void); +int get_free_frame(u32 *frame); +int allocate_frame(Page *page, int rw, int is_kernel); void *ksbrk(size_t s) { uintptr_t rc = (uintptr_t)align_page((void *)data_end); @@ -55,9 +56,11 @@ void *ksbrk(size_t s) { active_directory->physical_tables[table_index]; return ksbrk(s); } - mmu_allocate_shared_kernel_region((void *)rc, (data_end - (uintptr_t)rc)); + if (!mmu_allocate_shared_kernel_region((void *)rc, + (data_end - (uintptr_t)rc))) { + return NULL; + } assert(((uintptr_t)rc % PAGE_SIZE) == 0); - return (void *)rc; } @@ -118,7 +121,7 @@ void mmu_free_pages(void *a, u32 n) { } u32 start_frame_search = 1; -u32 first_free_frame(void) { +int get_free_frame(u32 *frame) { u32 i = start_frame_search; for (; i < INDEX_FROM_BIT(num_array_frames * 32); i++) { if (tmp_small_frames[i] == 0xFFFFFFFF) { @@ -128,11 +131,13 @@ u32 first_free_frame(void) { for (u32 c = 0; c < 32; c++) { if (!(tmp_small_frames[i] & ((u32)1 << c))) { start_frame_search = i; - return i * 32 + c; + *frame = i * 32 + c; + return 1; } } } - assert(0); + klog("MMU: Ran out of free frames. TODO: free up memory", LOG_WARN); + *frame = 0; return 0; } @@ -166,7 +171,11 @@ PageTable *clone_table(u32 src_index, PageDirectory *src_directory, new_table->pages[i].present = 0; continue; } - u32 frame_address = first_free_frame(); + u32 frame_address; + if (!get_free_frame(&frame_address)) { + kmalloc_align_free(new_table, sizeof(PageTable)); + return NULL; + } write_to_frame(frame_address * 0x1000, 1); new_table->pages[i].frame = frame_address; @@ -254,6 +263,10 @@ PageDirectory *clone_directory(PageDirectory *original) { if (i >= 635 && i <= 641) { u32 physical; new_directory->tables[i] = clone_table(i, original, &physical); + if (!new_directory->tables[i]) { + mmu_free_pagedirectory(new_directory); + return NULL; + } new_directory->physical_tables[i] = physical | (original->physical_tables[i] & 0xFFF); continue; @@ -270,6 +283,10 @@ PageDirectory *clone_directory(PageDirectory *original) { u32 physical; new_directory->tables[i] = clone_table(i, original, &physical); + if (!new_directory->tables[i]) { + mmu_free_pagedirectory(new_directory); + return NULL; + } new_directory->physical_tables[i] = physical | (original->physical_tables[i] & 0xFFF); } @@ -277,14 +294,18 @@ PageDirectory *clone_directory(PageDirectory *original) { return new_directory; } -void mmu_allocate_shared_kernel_region(void *rc, size_t n) { +int mmu_allocate_shared_kernel_region(void *rc, size_t n) { size_t num_pages = n / PAGE_SIZE; for (size_t i = 0; i <= num_pages; i++) { Page *p = get_page((void *)(rc + i * 0x1000), NULL, PAGE_ALLOCATE, 0); if (!p->present || !p->frame) { - allocate_frame(p, 0, 1); + if (!allocate_frame(p, 0, 1)) { + mmu_free_address_range(rc, n, NULL); + return 0; + } } } + return 1; } void mmu_remove_virtual_physical_address_mapping(void *ptr, size_t length) { @@ -327,7 +348,7 @@ int mmu_allocate_region(void *ptr, size_t n, mmu_flags flags, int rw = (flags & MMU_FLAG_RW); int kernel = (flags & MMU_FLAG_KERNEL); if (!allocate_frame(p, rw, kernel)) { - klog("MMU: Frame allocation failed", LOG_WARN); + mmu_free_address_range(ptr, n, pd); return 0; } } @@ -370,22 +391,23 @@ void *mmu_map_frames(void *const ptr, size_t s) { return r; } -void *allocate_frame(Page *page, int rw, int is_kernel) { +int allocate_frame(Page *page, int rw, int is_kernel) { if (page->present) { - dump_backtrace(5); klog("Page is already set", 1); - for (;;) - ; + assert(0); + return 0; + } + u32 frame_address; + if (!get_free_frame(&frame_address)) { return 0; } - u32 frame_address = first_free_frame(); write_to_frame(frame_address * 0x1000, 1); page->present = 1; page->rw = rw; page->user = !is_kernel; page->frame = frame_address; - return (void *)(frame_address * 0x1000); + return 1; } void mmu_free_pagedirectory(PageDirectory *pd) { @@ -518,9 +540,11 @@ void *virtual_to_physical(void *address, PageDirectory *directory) { } extern u32 inital_esp; -void move_stack(u32 new_stack_address, u32 size) { - mmu_allocate_region((void *)(new_stack_address - size), size, MMU_FLAG_KERNEL, - NULL); +int move_stack(u32 new_stack_address, u32 size) { + if (!mmu_allocate_region((void *)(new_stack_address - size), size, + MMU_FLAG_KERNEL, NULL)) { + return 0; + } u32 old_stack_pointer, old_base_pointer; @@ -548,6 +572,7 @@ void move_stack(u32 new_stack_address, u32 size) { // Actually change the stack set_sp(new_stack_pointer + 8); set_sbp(new_base_pointer); + return 1; } // C strings have a unknown length so it does not makes sense to check @@ -675,10 +700,15 @@ void paging_init(u64 memsize, multiboot_info_t *mb) { create_table(770 + i); } kernel_directory = clone_directory(kernel_directory); + assert(kernel_directory); switch_page_directory(kernel_directory); - switch_page_directory(clone_directory(kernel_directory)); - move_stack(0xA0000000, 0x80000); + { + PageDirectory *tmp = clone_directory(kernel_directory); + assert(tmp); + switch_page_directory(tmp); + } + assert(move_stack(0xA0000000, 0x80000)); available_memory_kb = memsize; diff --git a/kernel/elf.c b/kernel/elf.c index 3d03335..01f1e53 100644 --- a/kernel/elf.c +++ b/kernel/elf.c @@ -6,7 +6,6 @@ #include <typedefs.h> void *load_elf_file(const char *f, u32 *ds) { - // ELFHeader *header = kmalloc(sizeof(ELFHeader)); ELFHeader header; int fd = vfs_open(f, O_RDONLY, 0); if (fd < 0) { @@ -46,10 +45,10 @@ void *load_elf_file(const char *f, u32 *ds) { pages_to_allocate -= p_vaddr - (p_vaddr % 0x1000); pages_to_allocate /= 0x1000; - mmu_allocate_region((void *)p_vaddr, pages_to_allocate * 0x1000, - MMU_FLAG_RW, NULL); - - flush_tlb(); + if(!mmu_allocate_region((void *)p_vaddr, pages_to_allocate * 0x1000, + MMU_FLAG_RW, NULL)) { + return NULL; + } uintptr_t e = program_header.p_vaddr + program_header.p_memsz; if (e > end_of_code) { diff --git a/kernel/fs/shm.c b/kernel/fs/shm.c index 2a2995b..0735efc 100644 --- a/kernel/fs/shm.c +++ b/kernel/fs/shm.c @@ -60,7 +60,9 @@ int shm_ftruncate(vfs_fd_t *fd, size_t length) { } p->real_pointer = mmu_find_unallocated_virtual_range(NULL, length); - mmu_allocate_region(p->real_pointer, length, MMU_FLAG_RW, NULL); + if (!mmu_allocate_region(p->real_pointer, length, MMU_FLAG_RW, NULL)) { + return -ENOMEM; + } p->size = length; p->virtual_object = p->real_pointer; diff --git a/kernel/includes/mmu.h b/kernel/includes/mmu.h index 211a950..83ea6e0 100644 --- a/kernel/includes/mmu.h +++ b/kernel/includes/mmu.h @@ -41,7 +41,7 @@ typedef struct PageDirectory { int mmu_allocate_region(void *ptr, size_t n, mmu_flags flags, PageDirectory *pd); void mmu_free_pagedirectory(PageDirectory *pd); -void mmu_allocate_shared_kernel_region(void *rc, size_t n); +int mmu_allocate_shared_kernel_region(void *rc, size_t n); void *mmu_find_unallocated_virtual_range(void *addr, size_t length); void mmu_remove_virtual_physical_address_mapping(void *ptr, size_t length); void mmu_free_address_range(void *ptr, size_t length, PageDirectory *pd); @@ -59,9 +59,7 @@ void *mmu_is_valid_user_c_string(const char *ptr, size_t *size); void flush_tlb(void); void paging_init(u64 memsize, multiboot_info_t *mb); PageDirectory *get_active_pagedirectory(void); -void move_stack(u32 new_stack_address, u32 size); void switch_page_directory(PageDirectory *directory); -void *allocate_frame(Page *page, int rw, int is_kernel); PageDirectory *clone_directory(PageDirectory *original); void *virtual_to_physical(void *address, PageDirectory *directory); void *ksbrk(size_t s); diff --git a/kernel/process.s b/kernel/process.s index ed87959..fd0462f 100644 --- a/kernel/process.s +++ b/kernel/process.s @@ -67,7 +67,10 @@ internal_fork: push eax call create_process add esp, 0xC - + cmp eax, 0 + jnz internal_fork_ret + mov eax, 1 +internal_fork_ret: pop ebp ret after_internal_fork: diff --git a/kernel/sched/scheduler.c b/kernel/sched/scheduler.c index 45969b0..6f36a76 100644 --- a/kernel/sched/scheduler.c +++ b/kernel/sched/scheduler.c @@ -111,8 +111,15 @@ process_t *create_process(process_t *p, u32 esp, u32 eip) { sizeof(current_task->program_name)); } - r->cr3 = (p) ? clone_directory(get_active_pagedirectory()) - : get_active_pagedirectory(); + if (p) { + r->cr3 = clone_directory(p->cr3); + if (!r->cr3) { + kfree(r); + return NULL; + } + } else { + r->cr3 = get_active_pagedirectory(); + } r->parent = p; r->tcb = kcalloc(1, sizeof(struct TCB)); @@ -168,6 +175,7 @@ process_t *create_process(process_t *p, u32 esp, u32 eip) { void tasking_init(void) { current_task = ready_queue = create_process(NULL, 0, 0); + assert(current_task); current_task_TCB = current_task->tcb; current_task->tcb->CR3 = current_task->cr3->physical_address; } @@ -321,9 +329,14 @@ process_t *internal_fork(process_t *parent); int fork(void) { process_t *new_task; new_task = internal_fork(current_task); - if (NULL == new_task) { + if (0 == new_task) { return 0; } + if ((process_t *)1 == new_task) { + return -ENOMEM; // FIXME: This is probably the reason it failed now but is + // not the only reason it could fail(at least in the + // future). + } process_t *tmp_task = (process_t *)ready_queue; for (; tmp_task->next;) { |