mirror of
https://github.com/ish-app/ish.git
synced 2026-01-18 13:57:29 +00:00
221 lines
6.6 KiB
C
221 lines
6.6 KiB
C
#include "debug.h"
|
|
#include "kernel/calls.h"
|
|
#include "kernel/errno.h"
|
|
#include "kernel/task.h"
|
|
#include "fs/fd.h"
|
|
#include "emu/memory.h"
|
|
#include "kernel/mm.h"
|
|
|
|
struct mm *mm_new() {
|
|
struct mm *mm = malloc(sizeof(struct mm));
|
|
if (mm == NULL)
|
|
return NULL;
|
|
mem_init(&mm->mem);
|
|
mm->start_brk = mm->brk = 0; // should get overwritten by exec
|
|
mm->exefile = NULL;
|
|
mm->refcount = 1;
|
|
return mm;
|
|
}
|
|
|
|
struct mm *mm_copy(struct mm *mm) {
|
|
struct mm *new_mm = malloc(sizeof(struct mm));
|
|
if (new_mm == NULL)
|
|
return NULL;
|
|
*new_mm = *mm;
|
|
mem_init(&new_mm->mem);
|
|
fd_retain(new_mm->exefile);
|
|
read_wrlock(&mm->mem.lock);
|
|
pt_copy_on_write(&mm->mem, &new_mm->mem, 0, MEM_PAGES);
|
|
read_wrunlock(&mm->mem.lock);
|
|
return new_mm;
|
|
}
|
|
|
|
void mm_retain(struct mm *mm) {
|
|
mm->refcount++;
|
|
}
|
|
|
|
void mm_release(struct mm *mm) {
|
|
if (--mm->refcount == 0) {
|
|
if (mm->exefile != NULL)
|
|
fd_close(mm->exefile);
|
|
mem_destroy(&mm->mem);
|
|
free(mm);
|
|
}
|
|
}
|
|
|
|
static addr_t do_mmap(addr_t addr, dword_t len, dword_t prot, dword_t flags, fd_t fd_no, dword_t offset) {
|
|
int err;
|
|
pages_t pages = PAGE_ROUND_UP(len);
|
|
page_t page;
|
|
if (addr == 0) {
|
|
page = pt_find_hole(current->mem, pages);
|
|
if (page == BAD_PAGE)
|
|
return _ENOMEM;
|
|
} else {
|
|
if (PGOFFSET(addr) != 0)
|
|
return _EINVAL;
|
|
page = PAGE(addr);
|
|
}
|
|
if (flags & MMAP_ANONYMOUS) {
|
|
if (!(flags & MMAP_PRIVATE)) {
|
|
TODO("MMAP_SHARED");
|
|
return _EINVAL;
|
|
}
|
|
if ((err = pt_map_nothing(current->mem, page, pages, prot)) < 0)
|
|
return err;
|
|
} else {
|
|
// fd must be valid
|
|
struct fd *fd = f_get(fd_no);
|
|
if (fd == NULL)
|
|
return _EBADF;
|
|
if (fd->ops->mmap == NULL)
|
|
return _ENODEV;
|
|
if ((err = fd->ops->mmap(fd, current->mem, page, pages, offset, prot, flags)) < 0)
|
|
return err;
|
|
}
|
|
return page << PAGE_BITS;
|
|
}
|
|
|
|
static addr_t mmap_common(addr_t addr, dword_t len, dword_t prot, dword_t flags, fd_t fd_no, dword_t offset) {
|
|
STRACE("mmap(0x%x, 0x%x, 0x%x, 0x%x, %d, %d)", addr, len, prot, flags, fd_no, offset);
|
|
if (len == 0)
|
|
return _EINVAL;
|
|
if (prot & ~(P_READ | P_WRITE | P_EXEC))
|
|
return _EINVAL;
|
|
|
|
write_wrlock(¤t->mem->lock);
|
|
addr_t res = do_mmap(addr, len, prot, flags, fd_no, offset);
|
|
write_wrunlock(¤t->mem->lock);
|
|
return res;
|
|
}
|
|
|
|
addr_t sys_mmap2(addr_t addr, dword_t len, dword_t prot, dword_t flags, fd_t fd_no, dword_t offset) {
|
|
return mmap_common(addr, len, prot, flags, fd_no, offset << PAGE_BITS);
|
|
}
|
|
|
|
addr_t sys_mmap(struct mmap_arg_struct *args) {
|
|
return mmap_common(args->addr, args->len, args->prot, args->flags, args->fd, args->offset);
|
|
}
|
|
|
|
int_t sys_munmap(addr_t addr, uint_t len) {
|
|
STRACE("munmap(0x%x, 0x%x)", addr, len);
|
|
if (PGOFFSET(addr) != 0)
|
|
return _EINVAL;
|
|
if (len == 0)
|
|
return _EINVAL;
|
|
write_wrlock(¤t->mem->lock);
|
|
int err = pt_unmap(current->mem, PAGE(addr), PAGE_ROUND_UP(len), 0);
|
|
write_wrunlock(¤t->mem->lock);
|
|
if (err < 0)
|
|
return _EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
#define MREMAP_MAYMOVE_ 1
|
|
#define MREMAP_FIXED_ 2
|
|
|
|
int_t sys_mremap(addr_t addr, dword_t old_len, dword_t new_len, dword_t flags) {
|
|
STRACE("mremap(%#x, %#x, %#x, %d)", addr, old_len, new_len, flags);
|
|
if (PGOFFSET(addr) != 0)
|
|
return _EINVAL;
|
|
if (flags & ~(MREMAP_MAYMOVE_ | MREMAP_FIXED_))
|
|
return _EINVAL;
|
|
if (flags & MREMAP_FIXED_) {
|
|
FIXME("missing MREMAP_FIXED");
|
|
return _EINVAL;
|
|
}
|
|
pages_t old_pages = PAGE(old_len);
|
|
pages_t new_pages = PAGE(new_len);
|
|
|
|
// shrinking always works
|
|
if (new_pages <= old_pages) {
|
|
int err = pt_unmap(current->mem, PAGE(addr) + new_pages, old_pages - new_pages, 0);
|
|
if (err < 0)
|
|
return _EFAULT;
|
|
return addr;
|
|
}
|
|
|
|
struct pt_entry *entry = mem_pt(current->mem, PAGE(addr));
|
|
if (entry == NULL)
|
|
return _EFAULT;
|
|
dword_t pt_flags = entry->flags;
|
|
for (page_t page = PAGE(addr); page < PAGE(addr) + old_pages; page++) {
|
|
entry = mem_pt(current->mem, page);
|
|
if (entry == NULL && entry->flags != pt_flags)
|
|
return _EFAULT;
|
|
}
|
|
if (!(pt_flags & P_ANON)) {
|
|
FIXME("mremap grow on file mappings");
|
|
return _EFAULT;
|
|
}
|
|
page_t extra_start = PAGE(addr) + old_pages;
|
|
pages_t extra_pages = new_pages - old_pages;
|
|
if (!pt_is_hole(current->mem, extra_start, extra_pages))
|
|
return _ENOMEM;
|
|
int err = pt_map_nothing(current->mem, extra_start, extra_pages, pt_flags);
|
|
if (err < 0)
|
|
return err;
|
|
return addr;
|
|
}
|
|
|
|
int_t sys_mprotect(addr_t addr, uint_t len, int_t prot) {
|
|
STRACE("mprotect(0x%x, 0x%x, 0x%x)", addr, len, prot);
|
|
if (PGOFFSET(addr) != 0)
|
|
return _EINVAL;
|
|
if (prot & ~(P_READ | P_WRITE | P_EXEC))
|
|
return _EINVAL;
|
|
pages_t pages = PAGE_ROUND_UP(len);
|
|
write_wrlock(¤t->mem->lock);
|
|
int err = pt_set_flags(current->mem, PAGE(addr), pages, prot);
|
|
write_wrunlock(¤t->mem->lock);
|
|
return err;
|
|
}
|
|
|
|
dword_t sys_madvise(addr_t UNUSED(addr), dword_t UNUSED(len), dword_t UNUSED(advice)) {
|
|
// portable applications should not rely on linux's destructive semantics for MADV_DONTNEED.
|
|
return 0;
|
|
}
|
|
|
|
dword_t sys_mbind(addr_t UNUSED(addr), dword_t UNUSED(len), int_t UNUSED(mode),
|
|
addr_t UNUSED(nodemask), dword_t UNUSED(maxnode), uint_t UNUSED(flags)) {
|
|
return 0;
|
|
}
|
|
|
|
int_t sys_mlock(addr_t UNUSED(addr), dword_t UNUSED(len)) {
|
|
return 0;
|
|
}
|
|
|
|
addr_t sys_brk(addr_t new_brk) {
|
|
STRACE("brk(0x%x)", new_brk);
|
|
struct mm *mm = current->mm;
|
|
|
|
if (new_brk != 0 && new_brk < mm->start_brk)
|
|
return _EINVAL;
|
|
write_wrlock(&mm->mem.lock);
|
|
addr_t old_brk = mm->brk;
|
|
if (new_brk == 0) {
|
|
write_wrunlock(&mm->mem.lock);
|
|
return old_brk;
|
|
}
|
|
// TODO check for not going too high
|
|
|
|
if (new_brk > old_brk) {
|
|
// expand heap: map region from old_brk to new_brk
|
|
int err = pt_map_nothing(&mm->mem, PAGE_ROUND_UP(old_brk),
|
|
PAGE_ROUND_UP(new_brk) - PAGE_ROUND_UP(old_brk), P_WRITE);
|
|
if (err < 0) {
|
|
write_wrunlock(&mm->mem.lock);
|
|
return err;
|
|
}
|
|
} else if (new_brk < old_brk) {
|
|
// shrink heap: unmap region from new_brk to old_brk
|
|
// first page to unmap is PAGE(new_brk)
|
|
// last page to unmap is PAGE(old_brk)
|
|
pt_unmap(&mm->mem, PAGE(new_brk), PAGE(old_brk), PT_FORCE);
|
|
}
|
|
|
|
mm->brk = new_brk;
|
|
write_wrunlock(&mm->mem.lock);
|
|
return new_brk;
|
|
}
|