/* * trace event based perf event profiling/tracing * * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra * Copyright (C) 2009-2010 Frederic Weisbecker */ #include #include #include "trace.h" EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); static char *perf_trace_buf[4]; /* * Force it to be aligned to unsigned long to avoid misaligned accesses * suprises */ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) perf_trace_t; /* Count the events in use (per event id, not per instance) */ static int total_ref_count; static int perf_trace_event_enable(struct ftrace_event_call *event, void *data) { int ret = -ENOMEM; if (event->perf_refcount++ > 0) { event->perf_data = NULL; return 0; } if (!total_ref_count) { char *buf; int i; for (i = 0; i < 4; i++) { buf = (char *)alloc_percpu(perf_trace_t); if (!buf) goto fail_buf; rcu_assign_pointer(perf_trace_buf[i], buf); } } ret = event->perf_event_enable(event); if (!ret) { event->perf_data = data; total_ref_count++; return 0; } fail_buf: if (!total_ref_count) { int i; for (i = 0; i < 4; i++) { free_percpu(perf_trace_buf[i]); perf_trace_buf[i] = NULL; } } event->perf_refcount--; return ret; } int perf_trace_enable(int event_id, void *data) { struct ftrace_event_call *event; int ret = -EINVAL; mutex_lock(&event_mutex); list_for_each_entry(event, &ftrace_events, list) { if (event->id == event_id && event->perf_event_enable && try_module_get(event->mod)) { ret = perf_trace_event_enable(event, data); break; } } mutex_unlock(&event_mutex); return ret; } static void perf_trace_event_disable(struct ftrace_event_call *event) { if (--event->perf_refcount > 0) return; event->perf_event_disable(event); if (!--total_ref_count) { char *buf[4]; int i; for (i = 0; i < 4; i++) { buf[i] = perf_trace_buf[i]; rcu_assign_pointer(perf_trace_buf[i], NULL); } /* * Ensure every events in profiling have finished before * releasing the buffers */ synchronize_sched(); for (i = 0; i < 4; i++) free_percpu(buf[i]); } } void perf_trace_disable(int event_id) { struct ftrace_event_call *event; mutex_lock(&event_mutex); list_for_each_entry(event, &ftrace_events, list) { if (event->id == event_id) { perf_trace_event_disable(event); module_put(event->mod); break; } } mutex_unlock(&event_mutex); } __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, struct pt_regs *regs, int *rctxp) { struct trace_entry *entry; char *trace_buf, *raw_data; int pc; BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); pc = preempt_count(); *rctxp = perf_swevent_get_recursion_context(); if (*rctxp < 0) goto err_recursion; trace_buf = rcu_dereference_sched(perf_trace_buf[*rctxp]); if (!trace_buf) goto err; raw_data = per_cpu_ptr(trace_buf, smp_processor_id()); /* zero the dead bytes from align to not leak stack to user */ memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); entry = (struct trace_entry *)raw_data; tracing_generic_entry_update(entry, regs->flags, pc); entry->type = type; return raw_data; err: perf_swevent_put_recursion_context(*rctxp); err_recursion: return NULL; } EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);