summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sysprof.c
blob: a7974a552ca90689f3ff358f96b6eb2622c5748c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
0063f043f11aba7b89d63808e6bc67bbb0051c4c mutt-1-1-4-rel
0366855cb4ce645e8547eb371a635a84e2c453d7 mutt-1-3-25-rel
08a17d4cdcbd3fb24bd2ebe83828008f325ae8d8 mutt-1-3-rel
0cb4e2f662903f5ee2972596fb8ebcafe33e733d mutt-1-5-13-rel
0e88fc30c91332187463ee85768805e454140c05 mutt-0-94-13-rel
0e9b6a89555a5ee8bca9fa1f67091618597ec29c mutt-1-3-27-rel
0f24cff519c8501727bc6010ad74556ee0efd198 mutt-1-5-7-rel
0f707f13b76347df80d4d1adbf8dfbb1339dbced mutt-1-3-19-rel
10ae194ea7325c563bc52d803502eff439d369ce mutt-1-3-15-rel
135c43b8d6be6795f7218148e7c142d1b9288105 mutt-1-3-1-rel
15f26edcfd19941bb88e62b034b9fb25f51bb3e7 mutt-1-3-2-rel
178d20efae6a4df9fb12575d30a471c65f64d0d9 mutt-1-3-20-rel
19732d3a1566d2faf5b354af86534cd131852b76 mutt-1-1-11-rel
1b4bbd36bc50f6d482dc800866ed8b0c01b3e71d mutt-1-1-3-rel
1ce6e68d8685ddd721b0698b9692bbad17b22889 mutt-1-1-1-2-rel
232d922da63bcac37f998e8902c27718bdf42aa0 mutt-0-94-9i-rel
23adf861cac46e447b56d34a744c13e7059aa129 mutt-1-3-22-1-rel
23d559ebe55476cdcba916b762ad90fa81912b21 mutt-0-94-18-rel
255a71b1c8a9ed099bdb4b0cf3c8b3a6551bdc80 mutt-1-5-8-rel
2b1e65821a2e4b4d8219e0ff78e87c12fe589d6f mutt-0-94-9i-p1
2b76fdbd3b6803a61cab9438751b9e7aef599370 mutt-1-3-10-rel
30ce625d20ce34aeaac8774955eedd6bdf706fe7 mutt-1-5-1-rel
376a352c9a21c9439c6dd183a6f2f32c0a8f32f2 mutt-1-3-11-rel
3837061be530b4a6f11dfaa1d9a0f539bc9c1aed mutt-1-5-5-rel
3a4ef6600c0afc731e87caeb45ca1d8ef068e96d mutt-0-94-10i-rel
3bef047dbad46fff9ef6abf17dad2decae2bf53f mutt-0-96-7-rel
3d107f9ee4cb8a8d9bbfaba86ccbb0e53c1dffb6 mutt-0-96-8-rel
412f762bc7b8dbce9daa80d544115ffb07cc6f29 mutt-1-3-5-rel
41a18a584dce7c7884da210a277527fb67278093 mutt-1-3-3-rel
42564cca29c857148c7658531eabcd8bc3bedaae mutt-0-96-2-slightly-post-release
42b65d47a6e204f3fb83a9ff57375a3ad2150627 mutt-1-3-23-1-rel
4d0fb523422de134302bcac198a08ecbd8643a9b mutt-1-5-9-rel
4e510c27c6a70589dfe9eacabb3c447ea631d837 mutt-1-3-14-rel
512d53e287cf5fb305f0505e5adaaba1d131d12e mutt-1-3-23-rel
526b2eeec601de50702a882e0893be50b9eb7307 mutt-1-3-21-rel
52f9e92eb4a467024c8f462852037ab2f0280672 mutt-1-1-9-rel
553abc0b5e26ff58fe7130676382ed6da861f61d mutt-1-5-11-rel
559940f0b3d09d49803f654df317f2abaa74310e mutt-1-1-8-rel
5a932e25364f4ef011e0b853916b0b357c77f19c mutt-1-1-1-1-rel
5b22bc9a8a42f6431189f6cda5292f5a9eb726b7 mutt-1-1-rel
5e1d3e482d95b0312ec42efb24b4378cd5e12d00 mutt-0-92-10i
5e807fff2594f1b158534d71bbc0b27f8f864046 mutt-0-96-3-rel
61276da736e2ed1ec31179b0572278be0c341620 mutt-0-94-16i-rel
63f2be9404d6525b8ae23285ad0a0b7a3a748fef mutt-1-5-12-rel
68579cbc7ed88cfafd9a2d6dac5ab3a603f41dce mutt-0-94-7i-rel
6942ab68847a59051565dcab63d7a09900aeb6cd mutt-0-94-6i-rel
699400638f26483ab1c3579c1b237623a58bedc9 mutt-1-3-8-rel
6dbb1acec0cd8be743af82af5809e3df07a4d5f2 mutt-1-3-26-rel
735e30fb79fcbb7fa4e68b86901b7a99523a
/*
 * trace stack traces
 *
 * Copyright (C) 2004-2008, Soeren Sandmann
 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 */
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
#include <linux/hrtimer.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/fs.h>

#include <asm/stacktrace.h>

#include "trace.h"

static struct trace_array	*sysprof_trace;
static int __read_mostly	tracer_enabled;

/*
 * 1 msec sample interval by default:
 */
static unsigned long sample_period = 1000000;
static const unsigned int sample_max_depth = 512;

static DEFINE_MUTEX(sample_timer_lock);
/*
 * Per CPU hrtimers that do the profiling:
 */
static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);

struct stack_frame {
	const void __user	*next_fp;
	unsigned long		return_address;
};

static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
{
	int ret;

	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
		return 0;

	ret = 1;
	pagefault_disable();
	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
		ret = 0;
	pagefault_enable();

	return ret;
}

struct backtrace_info {
	struct trace_array_cpu	*data;
	struct trace_array	*tr;
	int			pos;
};

static void
backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
{
	/* Ignore warnings */
}

static void backtrace_warning(void *data, char *msg)
{
	/* Ignore warnings */
}

static int backtrace_stack(void *data, char *name)
{
	/* Don't bother with IRQ stacks for now */
	return -1;
}

static void backtrace_address(void *data, unsigned long addr, int reliable)
{
	struct backtrace_info *info = data;

	if (info->pos < sample_max_depth && reliable) {
		__trace_special(info->tr, info->data, 1, addr, 0);

		info->pos++;
	}
}

static const struct stacktrace_ops backtrace_ops = {
	.warning		= backtrace_warning,
	.warning_symbol		= backtrace_warning_symbol,
	.stack			= backtrace_stack,
	.address		= backtrace_address,
	.walk_stack		= print_context_stack,
};

static int
trace_kernel(struct pt_regs *regs, struct trace_array *tr,
	     struct trace_array_cpu *data)
{
	struct backtrace_info info;
	unsigned long bp;
	char *stack;

	info.tr = tr;
	info.data = data;
	info.pos = 1;

	__trace_special(info.tr, info.data, 1, regs->ip, 0);

	stack = ((char *)regs + sizeof(struct pt_regs));
#ifdef CONFIG_FRAME_POINTER
	bp = regs->bp;
#else
	bp = 0;
#endif

	dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);

	return info.pos;
}

static void timer_notify(struct pt_regs *regs, int cpu)
{
	struct trace_array_cpu *data;
	struct stack_frame frame;
	struct trace_array *tr;
	const void __user *fp;
	int is_user;
	int i;

	if (!regs)
		return;

	tr = sysprof_trace;
	data = tr->data[cpu];
	is_user = user_mode(regs);

	if (!current || current->pid == 0)
		return;

	if (is_user && current->state != TASK_RUNNING)
		return;

	__trace_special(tr, data, 0, 0, current->pid);

	if (!is_user)
		i = trace_kernel(regs, tr, data);
	else
		i = 0;

	/*
	 * Trace user stack if we are not a kernel thread
	 */
	if (current->mm && i < sample_max_depth) {
		regs = (struct pt_regs *)current->thread.sp0 - 1;

		fp = (void __user *)regs->bp;

		__trace_special(tr, data, 2, regs->ip, 0);

		while (i < sample_max_depth) {
			frame.next_fp = NULL;
			frame.return_address = 0;
			if (!copy_stack_frame(fp, &frame))
				break;
			if ((unsigned long)fp < regs->sp)
				break;

			__trace_special(tr, data, 2, frame.return_address,
					(unsigned long)fp);
			fp = frame.next_fp;

			i++;
		}

	}

	/*
	 * Special trace entry if we overflow the max depth:
	 */
	if (i == sample_max_depth)
		__trace_special(tr, data, -1, -1, -1);

	__trace_special(tr, data, 3, current->pid, i);
}

static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
{
	/* trace here */
	timer_notify(get_irq_regs(), smp_processor_id());

	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));

	return HRTIMER_RESTART;
}

static void start_stack_timer(void *unused)
{
	struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);

	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = stack_trace_timer_fn;

	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
		      HRTIMER_MODE_REL_PINNED);
}

static void start_stack_timers(void)
{
	on_each_cpu(start_stack_timer, NULL, 1);
}

static void stop_stack_timer(int cpu)
{
	struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);

	hrtimer_cancel(hrtimer);
}

static void stop_stack_timers(void)
{
	int cpu;

	for_each_online_cpu(cpu)
		stop_stack_timer(cpu);
}

static void stop_stack_trace(struct trace_array *tr)
{
	mutex_lock(&sample_timer_lock);
	stop_stack_timers();
	tracer_enabled = 0;
	mutex_unlock(&sample_timer_lock);
}

static int stack_trace_init(struct trace_array *tr)
{
	sysprof_trace = tr;

	tracing_start_cmdline_record();

	mutex_lock(&sample_timer_lock);
	start_stack_timers();
	tracer_enabled = 1;
	mutex_unlock(&sample_timer_lock);
	return 0;
}

static void stack_trace_reset(struct trace_array *tr)
{
	tracing_stop_cmdline_record();
	stop_stack_trace(tr);
}

static struct tracer stack_trace __read_mostly =
{
	.name		= "sysprof",
	.init		= stack_trace_init,
	.reset		= stack_trace_reset,
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest    = trace_selftest_startup_sysprof,
#endif
};

__init static int init_stack_trace(void)
{
	return register_tracer(&stack_trace);
}
device_initcall(init_stack_trace);

#define MAX_LONG_DIGITS 22

static ssize_t
sysprof_sample_read(struct file *filp, char __user *ubuf,
		    size_t cnt, loff_t *ppos)
{
	char buf[MAX_LONG_DIGITS];
	int r;

	r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period));

	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

static ssize_t
sysprof_sample_write(struct file *filp, const char __user *ubuf,
		     size_t cnt, loff_t *ppos)
{
	char buf[MAX_LONG_DIGITS];
	unsigned long val;

	if (cnt > MAX_LONG_DIGITS-1)
		cnt = MAX_LONG_DIGITS-1;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;

	val = simple_strtoul(buf, NULL, 10);
	/*
	 * Enforce a minimum sample period of 100 usecs:
	 */
	if (val < 100)
		val = 100;

	mutex_lock(&sample_timer_lock);
	stop_stack_timers();
	sample_period = val * 1000;
	start_stack_timers();
	mutex_unlock(&sample_timer_lock);

	return cnt;
}

static const </