[PATCH] uml: skas0 stubs now check system call return values
Change syscall-stub's data to include a "expected retval". Stub now checks syscalls retval and aborts execution of syscall list, if retval != expected retval. run_syscall_stub prints the data of the failed syscall, using the data pointer and retval written by the stub to the beginning of the stack. one_syscall_stub is removed, to simplify code, because only some instructions are saved by one_syscall_stub, no host-syscall. Using the stub with additional data (modify_ldt via stub) is prepared also. Signed-off-by: Bodo Stroesser <bstroesser@fujitsu-siemens.com> Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
8b51304ed3
commit
07bf731e4b
@ -38,9 +38,9 @@ extern void mprotect_kernel_vm(int w);
|
|||||||
extern void force_flush_all(void);
|
extern void force_flush_all(void);
|
||||||
extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
||||||
unsigned long end_addr, int force,
|
unsigned long end_addr, int force,
|
||||||
void *(*do_ops)(union mm_context *,
|
int (*do_ops)(union mm_context *,
|
||||||
struct host_vm_op *, int, int,
|
struct host_vm_op *, int, int,
|
||||||
void *));
|
void **));
|
||||||
extern int flush_tlb_kernel_range_common(unsigned long start,
|
extern int flush_tlb_kernel_range_common(unsigned long start,
|
||||||
unsigned long end);
|
unsigned long end);
|
||||||
|
|
||||||
|
@ -24,14 +24,14 @@ extern void new_thread_proc(void *stack, void (*handler)(int sig));
|
|||||||
extern void remove_sigstack(void);
|
extern void remove_sigstack(void);
|
||||||
extern void new_thread_handler(int sig);
|
extern void new_thread_handler(int sig);
|
||||||
extern void handle_syscall(union uml_pt_regs *regs);
|
extern void handle_syscall(union uml_pt_regs *regs);
|
||||||
extern void *map(struct mm_id * mm_idp, unsigned long virt,
|
extern int map(struct mm_id * mm_idp, unsigned long virt,
|
||||||
unsigned long len, int r, int w, int x, int phys_fd,
|
unsigned long len, int r, int w, int x, int phys_fd,
|
||||||
unsigned long long offset, int done, void *data);
|
unsigned long long offset, int done, void **data);
|
||||||
extern void *unmap(struct mm_id * mm_idp, void *addr,
|
extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len,
|
||||||
unsigned long len, int done, void *data);
|
int done, void **data);
|
||||||
extern void *protect(struct mm_id * mm_idp, unsigned long addr,
|
extern int protect(struct mm_id * mm_idp, unsigned long addr,
|
||||||
unsigned long len, int r, int w, int x, int done,
|
unsigned long len, int r, int w, int x, int done,
|
||||||
void *data);
|
void **data);
|
||||||
extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
|
extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
|
||||||
extern int new_mm(int from, unsigned long stack);
|
extern int new_mm(int from, unsigned long stack);
|
||||||
extern int start_userspace(unsigned long stub_stack);
|
extern int start_userspace(unsigned long stub_stack);
|
||||||
@ -39,16 +39,11 @@ extern int copy_context_skas0(unsigned long stack, int pid);
|
|||||||
extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
|
extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
|
||||||
extern long execute_syscall_skas(void *r);
|
extern long execute_syscall_skas(void *r);
|
||||||
extern unsigned long current_stub_stack(void);
|
extern unsigned long current_stub_stack(void);
|
||||||
|
extern long run_syscall_stub(struct mm_id * mm_idp,
|
||||||
|
int syscall, unsigned long *args, long expected,
|
||||||
|
void **addr, int done);
|
||||||
|
extern long syscall_stub_data(struct mm_id * mm_idp,
|
||||||
|
unsigned long *data, int data_count,
|
||||||
|
void **addr, void **stub_addr);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Overrides for Emacs so that we follow Linus's tabbing style.
|
|
||||||
* Emacs will notice this stuff at the end of the file and automatically
|
|
||||||
* adjust the settings for this buffer only. This must remain at the end
|
|
||||||
* of the file.
|
|
||||||
* ---------------------------------------------------------------------------
|
|
||||||
* Local variables:
|
|
||||||
* c-file-style: "linux"
|
|
||||||
* End:
|
|
||||||
*/
|
|
||||||
|
@ -5,13 +5,14 @@
|
|||||||
|
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
#include <string.h>
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#include <sys/wait.h>
|
#include <sys/wait.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
#include "mem_user.h"
|
#include "mem_user.h"
|
||||||
#include "mem.h"
|
#include "mem.h"
|
||||||
#include "mm_id.h"
|
#include "skas.h"
|
||||||
#include "user.h"
|
#include "user.h"
|
||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "proc_mm.h"
|
#include "proc_mm.h"
|
||||||
@ -23,56 +24,99 @@
|
|||||||
#include "uml-config.h"
|
#include "uml-config.h"
|
||||||
#include "sysdep/ptrace.h"
|
#include "sysdep/ptrace.h"
|
||||||
#include "sysdep/stub.h"
|
#include "sysdep/stub.h"
|
||||||
#include "skas.h"
|
|
||||||
|
|
||||||
extern unsigned long syscall_stub, batch_syscall_stub, __syscall_stub_start;
|
extern unsigned long batch_syscall_stub, __syscall_stub_start;
|
||||||
|
|
||||||
extern void wait_stub_done(int pid, int sig, char * fname);
|
extern void wait_stub_done(int pid, int sig, char * fname);
|
||||||
|
|
||||||
int single_count = 0;
|
static inline unsigned long *check_init_stack(struct mm_id * mm_idp,
|
||||||
|
unsigned long *stack)
|
||||||
static long one_syscall_stub(struct mm_id * mm_idp, int syscall,
|
|
||||||
unsigned long *args)
|
|
||||||
{
|
{
|
||||||
int n, pid = mm_idp->u.pid;
|
if(stack == NULL){
|
||||||
unsigned long regs[MAX_REG_NR];
|
stack = (unsigned long *) mm_idp->stack + 2;
|
||||||
|
*stack = 0;
|
||||||
get_safe_registers(regs);
|
}
|
||||||
regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
|
return stack;
|
||||||
((unsigned long) &syscall_stub -
|
|
||||||
(unsigned long) &__syscall_stub_start);
|
|
||||||
/* XXX Don't have a define for starting a syscall */
|
|
||||||
regs[REGS_SYSCALL_NR] = syscall;
|
|
||||||
regs[REGS_SYSCALL_ARG1] = args[0];
|
|
||||||
regs[REGS_SYSCALL_ARG2] = args[1];
|
|
||||||
regs[REGS_SYSCALL_ARG3] = args[2];
|
|
||||||
regs[REGS_SYSCALL_ARG4] = args[3];
|
|
||||||
regs[REGS_SYSCALL_ARG5] = args[4];
|
|
||||||
regs[REGS_SYSCALL_ARG6] = args[5];
|
|
||||||
n = ptrace_setregs(pid, regs);
|
|
||||||
if(n < 0){
|
|
||||||
printk("one_syscall_stub : PTRACE_SETREGS failed, "
|
|
||||||
"errno = %d\n", n);
|
|
||||||
return(n);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_stub_done(pid, 0, "one_syscall_stub");
|
extern int proc_mm;
|
||||||
|
|
||||||
return(*((unsigned long *) mm_idp->stack));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
int single_count = 0;
|
||||||
int multi_count = 0;
|
int multi_count = 0;
|
||||||
int multi_op_count = 0;
|
int multi_op_count = 0;
|
||||||
|
|
||||||
static long many_syscall_stub(struct mm_id * mm_idp, int syscall,
|
static long do_syscall_stub(struct mm_id *mm_idp, void **addr)
|
||||||
unsigned long *args, int done, void **addr_out)
|
|
||||||
{
|
{
|
||||||
unsigned long regs[MAX_REG_NR], *stack;
|
unsigned long regs[MAX_REG_NR];
|
||||||
|
unsigned long *data;
|
||||||
|
unsigned long *syscall;
|
||||||
|
long ret, offset;
|
||||||
int n, pid = mm_idp->u.pid;
|
int n, pid = mm_idp->u.pid;
|
||||||
|
|
||||||
stack = *addr_out;
|
if(proc_mm)
|
||||||
if(stack == NULL)
|
#warning Need to look up userspace_pid by cpu
|
||||||
stack = (unsigned long *) current_stub_stack();
|
pid = userspace_pid[0];
|
||||||
|
|
||||||
|
multi_count++;
|
||||||
|
|
||||||
|
get_safe_registers(regs);
|
||||||
|
regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
|
||||||
|
((unsigned long) &batch_syscall_stub -
|
||||||
|
(unsigned long) &__syscall_stub_start);
|
||||||
|
n = ptrace_setregs(pid, regs);
|
||||||
|
if(n < 0)
|
||||||
|
panic("do_syscall_stub : PTRACE_SETREGS failed, errno = %d\n",
|
||||||
|
n);
|
||||||
|
|
||||||
|
wait_stub_done(pid, 0, "do_syscall_stub");
|
||||||
|
|
||||||
|
/* When the stub stops, we find the following values on the
|
||||||
|
* beginning of the stack:
|
||||||
|
* (long )return_value
|
||||||
|
* (long )offset to failed sycall-data (0, if no error)
|
||||||
|
*/
|
||||||
|
ret = *((unsigned long *) mm_idp->stack);
|
||||||
|
offset = *((unsigned long *) mm_idp->stack + 1);
|
||||||
|
if (offset) {
|
||||||
|
data = (unsigned long *)(mm_idp->stack +
|
||||||
|
offset - UML_CONFIG_STUB_DATA);
|
||||||
|
syscall = (unsigned long *)((unsigned long)data + data[0]);
|
||||||
|
printk("do_syscall_stub: syscall %ld failed, return value = "
|
||||||
|
"0x%lx, expected return value = 0x%lx\n",
|
||||||
|
syscall[0], ret, syscall[7]);
|
||||||
|
printk(" syscall parameters: "
|
||||||
|
"0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
|
||||||
|
syscall[1], syscall[2], syscall[3],
|
||||||
|
syscall[4], syscall[5], syscall[6]);
|
||||||
|
for(n = 1; n < data[0]/sizeof(long); n++) {
|
||||||
|
if(n == 1)
|
||||||
|
printk(" additional syscall data:");
|
||||||
|
if(n % 4 == 1)
|
||||||
|
printk("\n ");
|
||||||
|
printk(" 0x%lx", data[n]);
|
||||||
|
}
|
||||||
|
if(n > 1)
|
||||||
|
printk("\n");
|
||||||
|
}
|
||||||
|
else ret = 0;
|
||||||
|
|
||||||
|
*addr = check_init_stack(mm_idp, NULL);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
long run_syscall_stub(struct mm_id * mm_idp, int syscall,
|
||||||
|
unsigned long *args, long expected, void **addr,
|
||||||
|
int done)
|
||||||
|
{
|
||||||
|
unsigned long *stack = check_init_stack(mm_idp, *addr);
|
||||||
|
|
||||||
|
if(done && *addr == NULL)
|
||||||
|
single_count++;
|
||||||
|
|
||||||
|
*stack += sizeof(long);
|
||||||
|
stack += *stack / sizeof(long);
|
||||||
|
|
||||||
*stack++ = syscall;
|
*stack++ = syscall;
|
||||||
*stack++ = args[0];
|
*stack++ = args[0];
|
||||||
*stack++ = args[1];
|
*stack++ = args[1];
|
||||||
@ -80,53 +124,55 @@ static long many_syscall_stub(struct mm_id * mm_idp, int syscall,
|
|||||||
*stack++ = args[3];
|
*stack++ = args[3];
|
||||||
*stack++ = args[4];
|
*stack++ = args[4];
|
||||||
*stack++ = args[5];
|
*stack++ = args[5];
|
||||||
|
*stack++ = expected;
|
||||||
*stack = 0;
|
*stack = 0;
|
||||||
multi_op_count++;
|
multi_op_count++;
|
||||||
|
|
||||||
if(!done && ((((unsigned long) stack) & ~PAGE_MASK) <
|
if(!done && ((((unsigned long) stack) & ~PAGE_MASK) <
|
||||||
PAGE_SIZE - 8 * sizeof(long))){
|
PAGE_SIZE - 10 * sizeof(long))){
|
||||||
*addr_out = stack;
|
*addr = stack;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
multi_count++;
|
return do_syscall_stub(mm_idp, addr);
|
||||||
get_safe_registers(regs);
|
|
||||||
regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
|
|
||||||
((unsigned long) &batch_syscall_stub -
|
|
||||||
(unsigned long) &__syscall_stub_start);
|
|
||||||
regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA;
|
|
||||||
|
|
||||||
n = ptrace_setregs(pid, regs);
|
|
||||||
if(n < 0){
|
|
||||||
printk("many_syscall_stub : PTRACE_SETREGS failed, "
|
|
||||||
"errno = %d\n", n);
|
|
||||||
return(n);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_stub_done(pid, 0, "many_syscall_stub");
|
long syscall_stub_data(struct mm_id * mm_idp,
|
||||||
stack = (unsigned long *) mm_idp->stack;
|
unsigned long *data, int data_count,
|
||||||
|
void **addr, void **stub_addr)
|
||||||
*addr_out = stack;
|
|
||||||
return(*stack);
|
|
||||||
}
|
|
||||||
|
|
||||||
static long run_syscall_stub(struct mm_id * mm_idp, int syscall,
|
|
||||||
unsigned long *args, void **addr, int done)
|
|
||||||
{
|
{
|
||||||
long res;
|
unsigned long *stack;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if((*addr == NULL) && done)
|
/* If *addr still is uninitialized, it *must* contain NULL.
|
||||||
res = one_syscall_stub(mm_idp, syscall, args);
|
* Thus in this case do_syscall_stub correctly won't be called.
|
||||||
else res = many_syscall_stub(mm_idp, syscall, args, done, addr);
|
*/
|
||||||
|
if((((unsigned long) *addr) & ~PAGE_MASK) >=
|
||||||
return res;
|
PAGE_SIZE - (10 + data_count) * sizeof(long)) {
|
||||||
|
ret = do_syscall_stub(mm_idp, addr);
|
||||||
|
/* in case of error, don't overwrite data on stack */
|
||||||
|
if(ret)
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *map(struct mm_id * mm_idp, unsigned long virt, unsigned long len,
|
stack = check_init_stack(mm_idp, *addr);
|
||||||
|
*addr = stack;
|
||||||
|
|
||||||
|
*stack = data_count * sizeof(long);
|
||||||
|
|
||||||
|
memcpy(stack + 1, data, data_count * sizeof(long));
|
||||||
|
|
||||||
|
*stub_addr = (void *)(((unsigned long)(stack + 1) & ~PAGE_MASK) +
|
||||||
|
UML_CONFIG_STUB_DATA);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len,
|
||||||
int r, int w, int x, int phys_fd, unsigned long long offset,
|
int r, int w, int x, int phys_fd, unsigned long long offset,
|
||||||
int done, void *data)
|
int done, void **data)
|
||||||
{
|
{
|
||||||
int prot, n;
|
int prot, ret;
|
||||||
|
|
||||||
prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
|
prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
|
||||||
(x ? PROT_EXEC : 0);
|
(x ? PROT_EXEC : 0);
|
||||||
@ -146,29 +192,27 @@ void *map(struct mm_id * mm_idp, unsigned long virt, unsigned long len,
|
|||||||
.fd = phys_fd,
|
.fd = phys_fd,
|
||||||
.offset= offset
|
.offset= offset
|
||||||
} } } );
|
} } } );
|
||||||
n = os_write_file(fd, &map, sizeof(map));
|
ret = os_write_file(fd, &map, sizeof(map));
|
||||||
if(n != sizeof(map))
|
if(ret != sizeof(map))
|
||||||
printk("map : /proc/mm map failed, err = %d\n", -n);
|
printk("map : /proc/mm map failed, err = %d\n", -ret);
|
||||||
|
else ret = 0;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
long res;
|
|
||||||
unsigned long args[] = { virt, len, prot,
|
unsigned long args[] = { virt, len, prot,
|
||||||
MAP_SHARED | MAP_FIXED, phys_fd,
|
MAP_SHARED | MAP_FIXED, phys_fd,
|
||||||
MMAP_OFFSET(offset) };
|
MMAP_OFFSET(offset) };
|
||||||
|
|
||||||
res = run_syscall_stub(mm_idp, STUB_MMAP_NR, args,
|
ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt,
|
||||||
&data, done);
|
data, done);
|
||||||
if((void *) res == MAP_FAILED)
|
|
||||||
printk("mmap stub failed, errno = %d\n", res);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return data;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *unmap(struct mm_id * mm_idp, void *addr, unsigned long len, int done,
|
int unmap(struct mm_id * mm_idp, void *addr, unsigned long len, int done,
|
||||||
void *data)
|
void **data)
|
||||||
{
|
{
|
||||||
int n;
|
int ret;
|
||||||
|
|
||||||
if(proc_mm){
|
if(proc_mm){
|
||||||
struct proc_mm_op unmap;
|
struct proc_mm_op unmap;
|
||||||
@ -180,29 +224,29 @@ void *unmap(struct mm_id * mm_idp, void *addr, unsigned long len, int done,
|
|||||||
{ .addr =
|
{ .addr =
|
||||||
(unsigned long) addr,
|
(unsigned long) addr,
|
||||||
.len = len } } } );
|
.len = len } } } );
|
||||||
n = os_write_file(fd, &unmap, sizeof(unmap));
|
ret = os_write_file(fd, &unmap, sizeof(unmap));
|
||||||
if(n != sizeof(unmap))
|
if(ret != sizeof(unmap))
|
||||||
printk("unmap - proc_mm write returned %d\n", n);
|
printk("unmap - proc_mm write returned %d\n", ret);
|
||||||
|
else ret = 0;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
int res;
|
|
||||||
unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
|
unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
|
||||||
0 };
|
0 };
|
||||||
|
|
||||||
res = run_syscall_stub(mm_idp, __NR_munmap, args,
|
ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0,
|
||||||
&data, done);
|
data, done);
|
||||||
if(res < 0)
|
if(ret < 0)
|
||||||
printk("munmap stub failed, errno = %d\n", res);
|
printk("munmap stub failed, errno = %d\n", ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
return data;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
|
int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
|
||||||
int r, int w, int x, int done, void *data)
|
int r, int w, int x, int done, void **data)
|
||||||
{
|
{
|
||||||
struct proc_mm_op protect;
|
struct proc_mm_op protect;
|
||||||
int prot, n;
|
int prot, ret;
|
||||||
|
|
||||||
prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
|
prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
|
||||||
(x ? PROT_EXEC : 0);
|
(x ? PROT_EXEC : 0);
|
||||||
@ -217,21 +261,19 @@ void *protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
|
|||||||
.len = len,
|
.len = len,
|
||||||
.prot = prot } } } );
|
.prot = prot } } } );
|
||||||
|
|
||||||
n = os_write_file(fd, &protect, sizeof(protect));
|
ret = os_write_file(fd, &protect, sizeof(protect));
|
||||||
if(n != sizeof(protect))
|
if(ret != sizeof(protect))
|
||||||
panic("protect failed, err = %d", -n);
|
printk("protect failed, err = %d", -ret);
|
||||||
|
else ret = 0;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
int res;
|
|
||||||
unsigned long args[] = { addr, len, prot, 0, 0, 0 };
|
unsigned long args[] = { addr, len, prot, 0, 0, 0 };
|
||||||
|
|
||||||
res = run_syscall_stub(mm_idp, __NR_mprotect, args,
|
ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
|
||||||
&data, done);
|
data, done);
|
||||||
if(res < 0)
|
|
||||||
panic("mprotect stub failed, errno = %d\n", res);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return data;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void before_mem_skas(unsigned long unused)
|
void before_mem_skas(unsigned long unused)
|
||||||
|
@ -18,27 +18,28 @@
|
|||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "tlb.h"
|
#include "tlb.h"
|
||||||
|
|
||||||
static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
|
static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
|
||||||
int finished, void *flush)
|
int finished, void **flush)
|
||||||
{
|
{
|
||||||
struct host_vm_op *op;
|
struct host_vm_op *op;
|
||||||
int i;
|
int i, ret = 0;
|
||||||
|
|
||||||
for(i = 0; i <= last; i++){
|
for(i = 0; i <= last && !ret; i++){
|
||||||
op = &ops[i];
|
op = &ops[i];
|
||||||
switch(op->type){
|
switch(op->type){
|
||||||
case MMAP:
|
case MMAP:
|
||||||
flush = map(&mmu->skas.id, op->u.mmap.addr,
|
ret = map(&mmu->skas.id, op->u.mmap.addr,
|
||||||
op->u.mmap.len, op->u.mmap.r, op->u.mmap.w,
|
op->u.mmap.len, op->u.mmap.r, op->u.mmap.w,
|
||||||
op->u.mmap.x, op->u.mmap.fd,
|
op->u.mmap.x, op->u.mmap.fd,
|
||||||
op->u.mmap.offset, finished, flush);
|
op->u.mmap.offset, finished, flush);
|
||||||
break;
|
break;
|
||||||
case MUNMAP:
|
case MUNMAP:
|
||||||
flush = unmap(&mmu->skas.id, (void *) op->u.munmap.addr,
|
ret = unmap(&mmu->skas.id,
|
||||||
|
(void *) op->u.munmap.addr,
|
||||||
op->u.munmap.len, finished, flush);
|
op->u.munmap.len, finished, flush);
|
||||||
break;
|
break;
|
||||||
case MPROTECT:
|
case MPROTECT:
|
||||||
flush = protect(&mmu->skas.id, op->u.mprotect.addr,
|
ret = protect(&mmu->skas.id, op->u.mprotect.addr,
|
||||||
op->u.mprotect.len, op->u.mprotect.r,
|
op->u.mprotect.len, op->u.mprotect.r,
|
||||||
op->u.mprotect.w, op->u.mprotect.x,
|
op->u.mprotect.w, op->u.mprotect.x,
|
||||||
finished, flush);
|
finished, flush);
|
||||||
@ -49,7 +50,7 @@ static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return flush;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int proc_mm;
|
extern int proc_mm;
|
||||||
|
@ -16,34 +16,34 @@
|
|||||||
#include "os.h"
|
#include "os.h"
|
||||||
|
|
||||||
static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
|
static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
|
||||||
int r, int w, int x, struct host_vm_op *ops, int index,
|
int r, int w, int x, struct host_vm_op *ops, int *index,
|
||||||
int last_filled, union mm_context *mmu, void **flush,
|
int last_filled, union mm_context *mmu, void **flush,
|
||||||
void *(*do_ops)(union mm_context *, struct host_vm_op *,
|
int (*do_ops)(union mm_context *, struct host_vm_op *,
|
||||||
int, int, void *))
|
int, int, void **))
|
||||||
{
|
{
|
||||||
__u64 offset;
|
__u64 offset;
|
||||||
struct host_vm_op *last;
|
struct host_vm_op *last;
|
||||||
int fd;
|
int fd, ret = 0;
|
||||||
|
|
||||||
fd = phys_mapping(phys, &offset);
|
fd = phys_mapping(phys, &offset);
|
||||||
if(index != -1){
|
if(*index != -1){
|
||||||
last = &ops[index];
|
last = &ops[*index];
|
||||||
if((last->type == MMAP) &&
|
if((last->type == MMAP) &&
|
||||||
(last->u.mmap.addr + last->u.mmap.len == virt) &&
|
(last->u.mmap.addr + last->u.mmap.len == virt) &&
|
||||||
(last->u.mmap.r == r) && (last->u.mmap.w == w) &&
|
(last->u.mmap.r == r) && (last->u.mmap.w == w) &&
|
||||||
(last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
|
(last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
|
||||||
(last->u.mmap.offset + last->u.mmap.len == offset)){
|
(last->u.mmap.offset + last->u.mmap.len == offset)){
|
||||||
last->u.mmap.len += len;
|
last->u.mmap.len += len;
|
||||||
return index;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(index == last_filled){
|
if(*index == last_filled){
|
||||||
*flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
|
ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
|
||||||
index = -1;
|
*index = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ops[++index] = ((struct host_vm_op) { .type = MMAP,
|
ops[++*index] = ((struct host_vm_op) { .type = MMAP,
|
||||||
.u = { .mmap = {
|
.u = { .mmap = {
|
||||||
.addr = virt,
|
.addr = virt,
|
||||||
.len = len,
|
.len = len,
|
||||||
@ -53,78 +53,80 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
|
|||||||
.fd = fd,
|
.fd = fd,
|
||||||
.offset = offset }
|
.offset = offset }
|
||||||
} });
|
} });
|
||||||
return index;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int add_munmap(unsigned long addr, unsigned long len,
|
static int add_munmap(unsigned long addr, unsigned long len,
|
||||||
struct host_vm_op *ops, int index, int last_filled,
|
struct host_vm_op *ops, int *index, int last_filled,
|
||||||
union mm_context *mmu, void **flush,
|
union mm_context *mmu, void **flush,
|
||||||
void *(*do_ops)(union mm_context *, struct host_vm_op *,
|
int (*do_ops)(union mm_context *, struct host_vm_op *,
|
||||||
int, int, void *))
|
int, int, void **))
|
||||||
{
|
{
|
||||||
struct host_vm_op *last;
|
struct host_vm_op *last;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if(index != -1){
|
if(*index != -1){
|
||||||
last = &ops[index];
|
last = &ops[*index];
|
||||||
if((last->type == MUNMAP) &&
|
if((last->type == MUNMAP) &&
|
||||||
(last->u.munmap.addr + last->u.mmap.len == addr)){
|
(last->u.munmap.addr + last->u.mmap.len == addr)){
|
||||||
last->u.munmap.len += len;
|
last->u.munmap.len += len;
|
||||||
return index;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(index == last_filled){
|
if(*index == last_filled){
|
||||||
*flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
|
ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
|
||||||
index = -1;
|
*index = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ops[++index] = ((struct host_vm_op) { .type = MUNMAP,
|
ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
|
||||||
.u = { .munmap = {
|
.u = { .munmap = {
|
||||||
.addr = addr,
|
.addr = addr,
|
||||||
.len = len } } });
|
.len = len } } });
|
||||||
return index;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
|
static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
|
||||||
int x, struct host_vm_op *ops, int index,
|
int x, struct host_vm_op *ops, int *index,
|
||||||
int last_filled, union mm_context *mmu, void **flush,
|
int last_filled, union mm_context *mmu, void **flush,
|
||||||
void *(*do_ops)(union mm_context *,
|
int (*do_ops)(union mm_context *, struct host_vm_op *,
|
||||||
struct host_vm_op *, int, int, void *))
|
int, int, void **))
|
||||||
{
|
{
|
||||||
struct host_vm_op *last;
|
struct host_vm_op *last;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if(index != -1){
|
if(*index != -1){
|
||||||
last = &ops[index];
|
last = &ops[*index];
|
||||||
if((last->type == MPROTECT) &&
|
if((last->type == MPROTECT) &&
|
||||||
(last->u.mprotect.addr + last->u.mprotect.len == addr) &&
|
(last->u.mprotect.addr + last->u.mprotect.len == addr) &&
|
||||||
(last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
|
(last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
|
||||||
(last->u.mprotect.x == x)){
|
(last->u.mprotect.x == x)){
|
||||||
last->u.mprotect.len += len;
|
last->u.mprotect.len += len;
|
||||||
return index;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(index == last_filled){
|
if(*index == last_filled){
|
||||||
*flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
|
ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
|
||||||
index = -1;
|
*index = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ops[++index] = ((struct host_vm_op) { .type = MPROTECT,
|
ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
|
||||||
.u = { .mprotect = {
|
.u = { .mprotect = {
|
||||||
.addr = addr,
|
.addr = addr,
|
||||||
.len = len,
|
.len = len,
|
||||||
.r = r,
|
.r = r,
|
||||||
.w = w,
|
.w = w,
|
||||||
.x = x } } });
|
.x = x } } });
|
||||||
return index;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
|
#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
|
||||||
|
|
||||||
void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
||||||
unsigned long end_addr, int force,
|
unsigned long end_addr, int force,
|
||||||
void *(*do_ops)(union mm_context *, struct host_vm_op *,
|
int (*do_ops)(union mm_context *, struct host_vm_op *,
|
||||||
int, int, void *))
|
int, int, void **))
|
||||||
{
|
{
|
||||||
pgd_t *npgd;
|
pgd_t *npgd;
|
||||||
pud_t *npud;
|
pud_t *npud;
|
||||||
@ -136,19 +138,20 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
|||||||
struct host_vm_op ops[1];
|
struct host_vm_op ops[1];
|
||||||
void *flush = NULL;
|
void *flush = NULL;
|
||||||
int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
|
int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if(mm == NULL) return;
|
if(mm == NULL) return;
|
||||||
|
|
||||||
ops[0].type = NONE;
|
ops[0].type = NONE;
|
||||||
for(addr = start_addr; addr < end_addr;){
|
for(addr = start_addr; addr < end_addr && !ret;){
|
||||||
npgd = pgd_offset(mm, addr);
|
npgd = pgd_offset(mm, addr);
|
||||||
if(!pgd_present(*npgd)){
|
if(!pgd_present(*npgd)){
|
||||||
end = ADD_ROUND(addr, PGDIR_SIZE);
|
end = ADD_ROUND(addr, PGDIR_SIZE);
|
||||||
if(end > end_addr)
|
if(end > end_addr)
|
||||||
end = end_addr;
|
end = end_addr;
|
||||||
if(force || pgd_newpage(*npgd)){
|
if(force || pgd_newpage(*npgd)){
|
||||||
op_index = add_munmap(addr, end - addr, ops,
|
ret = add_munmap(addr, end - addr, ops,
|
||||||
op_index, last_op, mmu,
|
&op_index, last_op, mmu,
|
||||||
&flush, do_ops);
|
&flush, do_ops);
|
||||||
pgd_mkuptodate(*npgd);
|
pgd_mkuptodate(*npgd);
|
||||||
}
|
}
|
||||||
@ -162,8 +165,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
|||||||
if(end > end_addr)
|
if(end > end_addr)
|
||||||
end = end_addr;
|
end = end_addr;
|
||||||
if(force || pud_newpage(*npud)){
|
if(force || pud_newpage(*npud)){
|
||||||
op_index = add_munmap(addr, end - addr, ops,
|
ret = add_munmap(addr, end - addr, ops,
|
||||||
op_index, last_op, mmu,
|
&op_index, last_op, mmu,
|
||||||
&flush, do_ops);
|
&flush, do_ops);
|
||||||
pud_mkuptodate(*npud);
|
pud_mkuptodate(*npud);
|
||||||
}
|
}
|
||||||
@ -177,8 +180,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
|||||||
if(end > end_addr)
|
if(end > end_addr)
|
||||||
end = end_addr;
|
end = end_addr;
|
||||||
if(force || pmd_newpage(*npmd)){
|
if(force || pmd_newpage(*npmd)){
|
||||||
op_index = add_munmap(addr, end - addr, ops,
|
ret = add_munmap(addr, end - addr, ops,
|
||||||
op_index, last_op, mmu,
|
&op_index, last_op, mmu,
|
||||||
&flush, do_ops);
|
&flush, do_ops);
|
||||||
pmd_mkuptodate(*npmd);
|
pmd_mkuptodate(*npmd);
|
||||||
}
|
}
|
||||||
@ -198,24 +201,32 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
|||||||
}
|
}
|
||||||
if(force || pte_newpage(*npte)){
|
if(force || pte_newpage(*npte)){
|
||||||
if(pte_present(*npte))
|
if(pte_present(*npte))
|
||||||
op_index = add_mmap(addr,
|
ret = add_mmap(addr,
|
||||||
pte_val(*npte) & PAGE_MASK,
|
pte_val(*npte) & PAGE_MASK,
|
||||||
PAGE_SIZE, r, w, x, ops,
|
PAGE_SIZE, r, w, x, ops,
|
||||||
op_index, last_op, mmu,
|
&op_index, last_op, mmu,
|
||||||
&flush, do_ops);
|
&flush, do_ops);
|
||||||
else op_index = add_munmap(addr, PAGE_SIZE, ops,
|
else ret = add_munmap(addr, PAGE_SIZE, ops,
|
||||||
op_index, last_op, mmu,
|
&op_index, last_op, mmu,
|
||||||
&flush, do_ops);
|
&flush, do_ops);
|
||||||
}
|
}
|
||||||
else if(pte_newprot(*npte))
|
else if(pte_newprot(*npte))
|
||||||
op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
|
ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
|
||||||
op_index, last_op, mmu,
|
&op_index, last_op, mmu,
|
||||||
&flush, do_ops);
|
&flush, do_ops);
|
||||||
|
|
||||||
*npte = pte_mkuptodate(*npte);
|
*npte = pte_mkuptodate(*npte);
|
||||||
addr += PAGE_SIZE;
|
addr += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
flush = (*do_ops)(mmu, ops, op_index, 1, flush);
|
|
||||||
|
if(!ret)
|
||||||
|
ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
|
||||||
|
|
||||||
|
/* This is not an else because ret is modified above */
|
||||||
|
if(ret) {
|
||||||
|
printk("fix_range_common: failed, killing current process\n");
|
||||||
|
force_sig(SIGKILL, current);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
|
int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
|
||||||
|
@ -17,26 +17,31 @@
|
|||||||
#include "os.h"
|
#include "os.h"
|
||||||
#include "tlb.h"
|
#include "tlb.h"
|
||||||
|
|
||||||
static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
|
static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
|
||||||
int finished, void *flush)
|
int finished, void **flush)
|
||||||
{
|
{
|
||||||
struct host_vm_op *op;
|
struct host_vm_op *op;
|
||||||
int i;
|
int i, ret=0;
|
||||||
|
|
||||||
for(i = 0; i <= last; i++){
|
for(i = 0; i <= last && !ret; i++){
|
||||||
op = &ops[i];
|
op = &ops[i];
|
||||||
switch(op->type){
|
switch(op->type){
|
||||||
case MMAP:
|
case MMAP:
|
||||||
os_map_memory((void *) op->u.mmap.addr, op->u.mmap.fd,
|
ret = os_map_memory((void *) op->u.mmap.addr,
|
||||||
op->u.mmap.offset, op->u.mmap.len,
|
op->u.mmap.fd, op->u.mmap.offset,
|
||||||
op->u.mmap.r, op->u.mmap.w,
|
op->u.mmap.len, op->u.mmap.r,
|
||||||
op->u.mmap.x);
|
op->u.mmap.w, op->u.mmap.x);
|
||||||
break;
|
break;
|
||||||
case MUNMAP:
|
case MUNMAP:
|
||||||
os_unmap_memory((void *) op->u.munmap.addr,
|
ret = os_unmap_memory((void *) op->u.munmap.addr,
|
||||||
op->u.munmap.len);
|
op->u.munmap.len);
|
||||||
break;
|
break;
|
||||||
case MPROTECT:
|
case MPROTECT:
|
||||||
|
ret = protect_memory(op->u.mprotect.addr,
|
||||||
|
op->u.munmap.len,
|
||||||
|
op->u.mprotect.r,
|
||||||
|
op->u.mprotect.w,
|
||||||
|
op->u.mprotect.x, 1);
|
||||||
protect_memory(op->u.mprotect.addr, op->u.munmap.len,
|
protect_memory(op->u.mprotect.addr, op->u.munmap.len,
|
||||||
op->u.mprotect.r, op->u.mprotect.w,
|
op->u.mprotect.r, op->u.mprotect.w,
|
||||||
op->u.mprotect.x, 1);
|
op->u.mprotect.x, 1);
|
||||||
@ -47,7 +52,7 @@ static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
|
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
|
||||||
|
@ -2,24 +2,50 @@
|
|||||||
|
|
||||||
.globl syscall_stub
|
.globl syscall_stub
|
||||||
.section .__syscall_stub, "x"
|
.section .__syscall_stub, "x"
|
||||||
syscall_stub:
|
|
||||||
int $0x80
|
|
||||||
mov %eax, UML_CONFIG_STUB_DATA
|
|
||||||
int3
|
|
||||||
|
|
||||||
.globl batch_syscall_stub
|
.globl batch_syscall_stub
|
||||||
batch_syscall_stub:
|
batch_syscall_stub:
|
||||||
mov $UML_CONFIG_STUB_DATA, %esp
|
/* load pointer to first operation */
|
||||||
again: pop %eax
|
mov $(UML_CONFIG_STUB_DATA+8), %esp
|
||||||
|
|
||||||
|
again:
|
||||||
|
/* load length of additional data */
|
||||||
|
mov 0x0(%esp), %eax
|
||||||
|
|
||||||
|
/* if(length == 0) : end of list */
|
||||||
|
/* write possible 0 to header */
|
||||||
|
mov %eax, UML_CONFIG_STUB_DATA+4
|
||||||
cmpl $0, %eax
|
cmpl $0, %eax
|
||||||
jz done
|
jz done
|
||||||
|
|
||||||
|
/* save current pointer */
|
||||||
|
mov %esp, UML_CONFIG_STUB_DATA+4
|
||||||
|
|
||||||
|
/* skip additional data */
|
||||||
|
add %eax, %esp
|
||||||
|
|
||||||
|
/* load syscall-# */
|
||||||
|
pop %eax
|
||||||
|
|
||||||
|
/* load syscall params */
|
||||||
pop %ebx
|
pop %ebx
|
||||||
pop %ecx
|
pop %ecx
|
||||||
pop %edx
|
pop %edx
|
||||||
pop %esi
|
pop %esi
|
||||||
pop %edi
|
pop %edi
|
||||||
pop %ebp
|
pop %ebp
|
||||||
|
|
||||||
|
/* execute syscall */
|
||||||
int $0x80
|
int $0x80
|
||||||
|
|
||||||
|
/* check return value */
|
||||||
|
pop %ebx
|
||||||
|
cmp %ebx, %eax
|
||||||
|
je again
|
||||||
|
|
||||||
|
done:
|
||||||
|
/* save return value */
|
||||||
mov %eax, UML_CONFIG_STUB_DATA
|
mov %eax, UML_CONFIG_STUB_DATA
|
||||||
jmp again
|
|
||||||
done: int3
|
/* stop */
|
||||||
|
int3
|
||||||
|
@ -16,21 +16,51 @@ syscall_stub:
|
|||||||
|
|
||||||
.globl batch_syscall_stub
|
.globl batch_syscall_stub
|
||||||
batch_syscall_stub:
|
batch_syscall_stub:
|
||||||
movq $(UML_CONFIG_STUB_DATA >> 32), %rbx
|
mov $(UML_CONFIG_STUB_DATA >> 32), %rbx
|
||||||
salq $32, %rbx
|
sal $32, %rbx
|
||||||
movq $(UML_CONFIG_STUB_DATA & 0xffffffff), %rcx
|
mov $(UML_CONFIG_STUB_DATA & 0xffffffff), %rax
|
||||||
or %rcx, %rbx
|
or %rax, %rbx
|
||||||
movq %rbx, %rsp
|
/* load pointer to first operation */
|
||||||
again: pop %rax
|
mov %rbx, %rsp
|
||||||
cmpq $0, %rax
|
add $0x10, %rsp
|
||||||
|
again:
|
||||||
|
/* load length of additional data */
|
||||||
|
mov 0x0(%rsp), %rax
|
||||||
|
|
||||||
|
/* if(length == 0) : end of list */
|
||||||
|
/* write possible 0 to header */
|
||||||
|
mov %rax, 8(%rbx)
|
||||||
|
cmp $0, %rax
|
||||||
jz done
|
jz done
|
||||||
|
|
||||||
|
/* save current pointer */
|
||||||
|
mov %rsp, 8(%rbx)
|
||||||
|
|
||||||
|
/* skip additional data */
|
||||||
|
add %rax, %rsp
|
||||||
|
|
||||||
|
/* load syscall-# */
|
||||||
|
pop %rax
|
||||||
|
|
||||||
|
/* load syscall params */
|
||||||
pop %rdi
|
pop %rdi
|
||||||
pop %rsi
|
pop %rsi
|
||||||
pop %rdx
|
pop %rdx
|
||||||
pop %r10
|
pop %r10
|
||||||
pop %r8
|
pop %r8
|
||||||
pop %r9
|
pop %r9
|
||||||
|
|
||||||
|
/* execute syscall */
|
||||||
syscall
|
syscall
|
||||||
|
|
||||||
|
/* check return value */
|
||||||
|
pop %rcx
|
||||||
|
cmp %rcx, %rax
|
||||||
|
je again
|
||||||
|
|
||||||
|
done:
|
||||||
|
/* save return value */
|
||||||
mov %rax, (%rbx)
|
mov %rax, (%rbx)
|
||||||
jmp again
|
|
||||||
done: int3
|
/* stop */
|
||||||
|
int3
|
||||||
|
Loading…
x
Reference in New Issue
Block a user