--- sample.c.good 2010-02-12 08:25:18.000000000 -0800 +++ sample.c.p3-good 2010-03-15 09:51:53.000000000 -0700 @@ -54,6 +54,24 @@ #include #include +/* trace start*/ + +#include +//#include +#include +#include +//#include +//#include +#include +#include +#include +//#include +#include +#include + +/* trace end */ + + struct task_security_struct { u32 sid; }; @@ -75,6 +93,8 @@ #define SAMPLE_READ_DIR 5 #define SAMPLE_RW_DIR 6 #define SAMPLE_TARGET_SID 7 +#define SAMPLE_UNTRUSTED_READ 8 +#define SAMPLE_UNTRUSTED_RW 9 /* Mask definitions */ #define MAY_EXEC 1 @@ -122,13 +147,334 @@ } + +/************** from include/linux/stacktrace.h in 2.6.23.4 - +*************** turn on CONFIG_STACKTRACE */ + +#if 0 +struct stack_trace { + unsigned int nr_entries, max_entries; + unsigned long *entries; + int skip; /* input argument: How many entries to skip */ +}; +#endif + + + +/************** from arch/x86/kernel/stacktrace.c in 2.6.32.2 */ + + +struct stack_frame { + const void __user *next_fp; + unsigned long ret_addr; +}; + +static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) +{ + int ret; + + if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) + return 0; + + ret = 1; + pagefault_disable(); + if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) + ret = 0; + pagefault_enable(); + + return ret; +} + +static inline void __save_stack_trace_user(struct stack_trace *trace) +{ + const struct pt_regs *regs = task_pt_regs(current); + const void __user *fp = (const void __user *)regs->ebp; /* TJ: bp */ + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = regs->eip; /* TJ: ip */ + + while (trace->nr_entries < trace->max_entries) { + struct stack_frame frame; + + frame.next_fp = NULL; + frame.ret_addr = 0; + if (!copy_stack_frame(fp, &frame)) + break; + if ((unsigned long)fp < regs->esp) /* TJ: sp */ + break; + if (frame.ret_addr) { + trace->entries[trace->nr_entries++] = + frame.ret_addr; + } + if (fp == frame.next_fp) + break; + fp = frame.next_fp; + } +} + +void save_stack_trace_user(struct stack_trace *trace) +{ + /* + * Trace user stack if we are not a kernel thread + */ + if (current->mm) { + __save_stack_trace_user(trace); + } + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +/************** end from arch/x86/kernel/stacktrace.c in 2.6.32.2 */ + + +/************** trace start ************************/ + +/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ + + +#define MAX_NUM_FRAMES 32 + + +struct proc_info { + pid_t pid; + char proc_name[TASK_COMM_LEN]; +}; + +struct log_entry { + struct stack_trace trace; + struct proc_info info; + unsigned long trace_first_program_ip; // index in trace of first IP in program, -1 if invalid + int program_ip_exists; // Value '1' if we program ip exists; 0 if not, if 0, above points to the last library IP before 0xffffffff, or is -1 if invalid + unsigned long vma_start; + struct list_head list; +}; + +/* + * * Callback to create wall_interfaces file + * */ + +static struct dentry *create_wall_interfaces_file_callback(const char *filename, + struct dentry *parent, + int mode, + struct rchan_buf *buf, + int *is_global) +{ + return debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); +} + +/* + * * Callback to remove wall_interfaces file + * */ + +static int remove_wall_interfaces_file_callback(struct dentry* dentry) +{ + debugfs_remove(dentry); + return 0; +} + +/* + * * Callback when one subbuffer is full + * */ + +static int subbuf_wall_interfaces_start_callback(struct rchan_buf *buf, void *subbuf, + void *prev_subbuf, size_t prev_padding) +{ + atomic_t* dropped; + /* if (prev_subbuf) + * ((size_t*) prev_subbuf) = prev_padding; */ + if (!relay_buf_full(buf)) + return 1; + dropped = buf->chan->private_data; + atomic_inc(dropped); + printk(KERN_INFO "Sample: dropped %d due to overflow of log buffer\n", atomic_read(dropped)); + return 0; +} + +char proc_name_data[12]; //TASK_COMM_LEN is of 10 bytes. We are having 12 bytes just to be on the safe side and on word boundary. + +static struct rchan_callbacks wall_interfaces_relay_callbacks = +{ + .subbuf_start = subbuf_wall_interfaces_start_callback, + .create_buf_file = create_wall_interfaces_file_callback, + .remove_buf_file = remove_wall_interfaces_file_callback, +}; +struct rchan* wall_rchan; + +int copy_last_frame_addr(void *to) { + + int ret; + + const struct pt_regs *regs = task_pt_regs(current); + const void __user *fr = (const void __user *)regs->esp; + + if(!access_ok(VERIFY_READ, fr, sizeof(long)*4)) + return 0; + + ret = 1; + pagefault_disable(); + if(__copy_from_user_inatomic(to, fr + 4*sizeof(long), sizeof(long))) + return 0; + pagefault_enable(); + + return ret; + +} + +//int record_trace(); + + +int record_trace(void){ + + struct log_entry *p; + int size, err = 0, i = 0; + char *ptemp = NULL; + struct vm_area_struct* vma = NULL; + + if (current->mm) { + /* Record the trace */ + p = kmalloc(sizeof(struct log_entry), GFP_ATOMIC); + if (!p) { + err = -ENOMEM; + goto end; + } + strncpy(p->info.proc_name, current->comm, TASK_COMM_LEN); + p->info.pid = current->pid; + + p->trace.nr_entries = 0; + p->trace.max_entries = MAX_NUM_FRAMES; + p->trace.skip = 0; + size = sizeof(unsigned long)*MAX_NUM_FRAMES; + + p->trace.entries = kmalloc(size, GFP_ATOMIC); + if (!p->trace.entries) { + err = -ENOMEM; + goto end; + } + //ftrace routine which traces the user stack + save_stack_trace_user(&p->trace); + + printk(KERN_WARNING "record_trace: save new trace\n"); + p->trace_first_program_ip = -1; + p->program_ip_exists = 0; + p->vma_start = 0; + + i = 0; + // Find out and save the first program IP which is not in the library + while (i < MAX_NUM_FRAMES - 1) { + char path[PAGE_SIZE]; + if (p->trace.entries[i] == 0xffffffff) + { + printk(KERN_WARNING "record_trace: ok entries -1\n"); + i--; + p->trace_first_program_ip = i; + p->program_ip_exists = 0; + p->vma_start = 0; + break; + } + // Check if this memory region contains the program by comparing with current->comm + vma = find_vma(current->mm, p->trace.entries[i]); + if (vma == NULL || (vma->vm_file) == NULL) + { + + printk(KERN_WARNING "record_trace: vma error\n"); + p->program_ip_exists = 0; + p->trace_first_program_ip = -1; + p->vma_start = 0; +// printk(KERN_INFO "filter_tt: vma is NULL\n"); + break; + } + ptemp = d_path(vma->vm_file->f_dentry, + vma->vm_file->f_vfsmnt, + path, PAGE_SIZE); + if (IS_ERR(ptemp)) + { + printk(KERN_WARNING "record_trace: d_path error\n"); + p->program_ip_exists = 0; + p->trace_first_program_ip = -1; + p->vma_start = 0; + //printk(KERN_INFO "filter_tt: d_path failure\n"); + break; + } + if (!strcmp(ptemp - strlen(current->comm) + strlen(ptemp), current->comm)) + { + printk(KERN_WARNING "record_trace: strcmp ok\n"); + p->program_ip_exists = 1; + p->trace_first_program_ip = i; + p->vma_start = vma->vm_start; + break; + } + i++; + } + + if (p->trace_first_program_ip != -1 && p->program_ip_exists != 0 && p->trace.entries[p->trace_first_program_ip] != 0) /* Not logging library-only IPs or IPs that have top-of-stack address as 0 */ + { + static int counter = 0; + int i, num_frames, rc; + char* log_str = NULL, *stack_str = NULL; + i = p->trace_first_program_ip; + while (p->trace.entries[i] != 0xffffffff) + i++; + num_frames = i - p->trace_first_program_ip; + stack_str = kzalloc(300, GFP_ATOMIC); /* 11 commas, 2 number, 8 * 32 entries, 1 \0 = 270 */ + sprintf(stack_str, "%u,", num_frames); + if (!stack_str) + { + printk(KERN_INFO "Sample: No memory left - stack_str allocation\n"); + rc = -ENOMEM; + goto end; + } + i = p->trace_first_program_ip; + while (p->trace.entries[i] != 0xffffffff) /* Print stack trace */ + { + char* tmp_stack_str = NULL; + tmp_stack_str = kasprintf(GFP_ATOMIC, "%lx,", p->trace.entries[i]); + if (!tmp_stack_str) + { + printk(KERN_INFO "Sample: No memory left - tmp_stack_str allocation\n"); + rc = -ENOMEM; + goto end; + } + strcat(stack_str, tmp_stack_str); + kfree(tmp_stack_str); + i++; + if (i - p->trace_first_program_ip > 10) /* Stop entries whose stack trace goes on forever. Why does this happen? */ + break; + } + + /* Write to relay channel - we are converting log_entry to string because + * we don't know scontext, tcontext, filename length beforehand, + * and allocating the maximum for each is a waste of space */ + + log_str = kasprintf(GFP_ATOMIC, "TR,%d),%d,%s,%s\n",counter++, p->info.pid, p->info.proc_name, stack_str); + if (!log_str) + { + printk(KERN_INFO "Sample: No memory left - log_str allocation\n"); + rc = -ENOMEM; + goto end; + } + relay_write(wall_rchan, log_str, strlen(log_str) + 1); + + kfree(stack_str); + kfree(log_str); + } + kfree(p); + } + +end: + return err; +} +/************************** trace end **************************/ + + + + static int has_perm(u32 ssid, u32 osid, u32 ops) { -#if 0 - if (ssid || osid) + if (osid == SAMPLE_UNTRUSTED_READ) printk(KERN_WARNING "%s: 0x%x:0x%x:0x%x\n", __FUNCTION__, ssid, osid, ops); -#endif + /* this process is under our control */ if (ssid) { switch (osid) { @@ -606,21 +970,25 @@ static __init int sample_init(void) { + static atomic_t dropped = ATOMIC_INIT(0); if (register_security (&sample_ops)) { printk("Sample: Unable to register with kernel.\n"); return 0; } - printk(KERN_INFO "Sample: Initializing.\n"); - + /* Allocate the rchan */ + wall_rchan = relay_open("wall_interfaces", NULL, 1024 * 1024, 2, &wall_interfaces_relay_callbacks, &dropped); + if (!wall_rchan) { + printk(KERN_INFO "Sample: rchan allocation failed.\n"); + } return 0; } static __exit void sample_exit(void) { printk(KERN_INFO "Sample: Exiting.\n"); - unregister_security(&sample_ops); + relay_close(wall_rchan); }