summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/trace/kprobetrace.rst3
-rw-r--r--Documentation/trace/uprobetracer.rst4
-rw-r--r--arch/arm64/kernel/ftrace.c1
-rw-r--r--arch/arm64/kernel/perf_callchain.c2
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/return_address.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c15
-rw-r--r--arch/arm64/kernel/time.c2
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/process.c13
-rw-r--r--arch/sh/kernel/dumpstack.c11
-rw-r--r--arch/sh/kernel/dwarf.c9
-rw-r--r--arch/sparc/kernel/perf_event.c8
-rw-r--r--arch/sparc/kernel/stacktrace.c8
-rw-r--r--arch/sparc/kernel/traps_64.c7
-rw-r--r--arch/x86/kernel/ftrace.c41
-rw-r--r--arch/x86/kernel/ftrace_64.S8
-rw-r--r--include/linux/ftrace.h36
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/string.h20
-rw-r--r--kernel/trace/Kconfig6
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/fgraph.c626
-rw-r--r--kernel/trace/ftrace.c490
-rw-r--r--kernel/trace/ftrace_internal.h75
-rw-r--r--kernel/trace/ring_buffer.c94
-rw-r--r--kernel/trace/trace.c82
-rw-r--r--kernel/trace/trace.h13
-rw-r--r--kernel/trace/trace_dynevent.c217
-rw-r--r--kernel/trace/trace_dynevent.h119
-rw-r--r--kernel/trace/trace_events.c10
-rw-r--r--kernel/trace/trace_events_hist.c597
-rw-r--r--kernel/trace/trace_functions_graph.c334
-rw-r--r--kernel/trace/trace_irqsoff.c18
-rw-r--r--kernel/trace/trace_kprobe.c353
-rw-r--r--kernel/trace/trace_output.c38
-rw-r--r--kernel/trace/trace_probe.c91
-rw-r--r--kernel/trace/trace_probe.h9
-rw-r--r--kernel/trace/trace_sched_wakeup.c270
-rw-r--r--kernel/trace/trace_selftest.c8
-rw-r--r--kernel/trace/trace_stack.c8
-rw-r--r--kernel/trace/trace_uprobe.c301
-rw-r--r--lib/seq_buf.c8
-rw-r--r--scripts/recordmcount.c2
-rwxr-xr-xscripts/recordmcount.pl13
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc30
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_synth.tc27
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc50
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc49
49 files changed, 2494 insertions, 1644 deletions
diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst
index 47e765c2f2c3..235ce2ab131a 100644
--- a/Documentation/trace/kprobetrace.rst
+++ b/Documentation/trace/kprobetrace.rst
@@ -20,6 +20,9 @@ current_tracer. Instead of that, add probe points via
/sys/kernel/debug/tracing/kprobe_events, and enable it via
/sys/kernel/debug/tracing/events/kprobes/<EVENT>/enable.
+You can also use /sys/kernel/debug/tracing/dynamic_events instead of
+kprobe_events. That interface will provide unified access to other
+dynamic events too.
Synopsis of kprobe_events
-------------------------
diff --git a/Documentation/trace/uprobetracer.rst b/Documentation/trace/uprobetracer.rst
index d0822811527a..4c3bfde2ba47 100644
--- a/Documentation/trace/uprobetracer.rst
+++ b/Documentation/trace/uprobetracer.rst
@@ -18,6 +18,10 @@ current_tracer. Instead of that, add probe points via
However unlike kprobe-event tracer, the uprobe event interface expects the
user to calculate the offset of the probepoint in the object.
+You can also use /sys/kernel/debug/tracing/dynamic_events instead of
+uprobe_events. That interface will provide unified access to other
+dynamic events too.
+
Synopsis of uprobe_tracer
-------------------------
::
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index c1f30f854fb3..8e4431a8821f 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -193,6 +193,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
void arch_ftrace_update_code(int command)
{
+ command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command);
}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 94754f07f67a..a34c26afacb0 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -168,7 +168,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
+ frame.graph = 0;
#endif
walk_stackframe(current, &frame, callchain_trace, entry);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index e0a443730e04..a0f985a6ac50 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -461,7 +461,7 @@ unsigned long get_wchan(struct task_struct *p)
frame.fp = thread_saved_fp(p);
frame.pc = thread_saved_pc(p);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = p->curr_ret_stack;
+ frame.graph = 0;
#endif
do {
if (unwind_frame(p, &frame))
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 933adbc0f654..53c40196b607 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -44,7 +44,7 @@ void *return_address(unsigned int level)
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.pc = (unsigned long)return_address; /* dummy */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
+ frame.graph = 0;
#endif
walk_stackframe(current, &frame, save_return_addr, &data);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4989f7ea1e59..1a29f2695ff2 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -59,18 +59,17 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
- if (WARN_ON_ONCE(frame->graph == -1))
- return -EINVAL;
- if (frame->graph < -1)
- frame->graph += FTRACE_NOTRACE_DEPTH;
-
+ struct ftrace_ret_stack *ret_stack;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
- frame->pc = tsk->ret_stack[frame->graph--].ret;
+ ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
+ if (WARN_ON_ONCE(!ret_stack))
+ return -EINVAL;
+ frame->pc = ret_stack->ret;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -137,7 +136,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
+ frame.graph = 0;
#endif
walk_stackframe(current, &frame, save_trace, &data);
@@ -168,7 +167,7 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
frame.pc = (unsigned long)__save_stack_trace;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = tsk->curr_ret_stack;
+ frame.graph = 0;
#endif
walk_stackframe(tsk, &frame, save_trace, &data);
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index f258636273c9..a777ae90044d 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs)
frame.fp = regs->regs[29];
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = current->curr_ret_stack;
+ frame.graph = 0;
#endif
do {
int ret = unwind_frame(NULL, &frame);
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index cdc71cf70aad..4e2fb877f8d5 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -123,7 +123,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.pc = thread_saved_pc(tsk);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = tsk->curr_ret_stack;
+ frame.graph = 0;
#endif
skip = !!regs;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 96f34730010f..ce393df243aa 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -2061,9 +2061,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
int count = 0;
int firstframe = 1;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- int curr_frame = current->curr_ret_stack;
+ struct ftrace_ret_stack *ret_stack;
extern void return_to_handler(void);
unsigned long rth = (unsigned long)return_to_handler;
+ int curr_frame = 0;
#endif
sp = (unsigned long) stack;
@@ -2089,9 +2090,13 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((ip == rth) && curr_frame >= 0) {
- pr_cont(" (%pS)",
- (void *)current->ret_stack[curr_frame].ret);
- curr_frame--;
+ ret_stack = ftrace_graph_get_ret_stack(current,
+ curr_frame++);
+ if (ret_stack)
+ pr_cont(" (%pS)",
+ (void *)ret_stack->ret);
+ else
+ curr_frame = -1;
}
#endif
if (firstframe)
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
index 93c6c0e691ee..9f1c9c11d62d 100644
--- a/arch/sh/kernel/dumpstack.c
+++ b/arch/sh/kernel/dumpstack.c
@@ -56,17 +56,20 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
struct thread_info *tinfo, int *graph)
{
struct task_struct *task = tinfo->task;
+ struct ftrace_ret_stack *ret_stack;
unsigned long ret_addr;
- int index = task->curr_ret_stack;
if (addr != (unsigned long)return_to_handler)
return;
- if (!task->ret_stack || index < *graph)
+ if (!task->ret_stack)
return;
- index -= *graph;
- ret_addr = task->ret_stack[index].ret;
+ ret_stack = ftrace_graph_get_ret_stack(task, *graph);
+ if (!ret_stack)
+ return;
+
+ ret_addr = ret_stack->ret;
ops->address(data, ret_addr, 1);
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 9e1d26c8a0c4..c5b426506d16 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -605,17 +605,18 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
* expected to find the real return address.
*/
if (pc == (unsigned long)&return_to_handler) {
- int index = current->curr_ret_stack;
+ struct ftrace_ret_stack *ret_stack;
+ ret_stack = ftrace_graph_get_ret_stack(current, 0);
+ if (ret_stack)
+ pc = ret_stack->ret;
/*
* We currently have no way of tracking how many
* return_to_handler()'s we've seen. If there is more
* than one patched return address on our stack,
* complain loudly.
*/
- WARN_ON(index > 0);
-
- pc = current->ret_stack[index].ret;
+ WARN_ON(ftrace_graph_get_ret_stack(current, 1);
}
#endif
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 47c871394ccb..6de7c684c29f 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1767,9 +1767,11 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
perf_callchain_store(entry, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
- int index = current->curr_ret_stack;
- if (current->ret_stack && index >= graph) {
- pc = current->ret_stack[index - graph].ret;
+ struct ftrace_ret_stack *ret_stack;
+ ret_stack = ftrace_graph_get_ret_stack(current,
+ graph);
+ if (ret_stack) {
+ pc = ret_stack->ret;
perf_callchain_store(entry, pc);
graph++;
}
diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c
index be4c14cccc05..dd654e651500 100644
--- a/arch/sparc/kernel/stacktrace.c
+++ b/arch/sparc/kernel/stacktrace.c
@@ -57,9 +57,11 @@ static void __save_stack_trace(struct thread_info *tp,
trace->entries[trace->nr_entries++] = pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
- int index = t->curr_ret_stack;
- if (t->ret_stack && index >= graph) {
- pc = t->ret_stack[index - graph].ret;
+ struct ftrace_ret_stack *ret_stack;
+ ret_stack = ftrace_graph_get_ret_stack(t,
+ graph);
+ if (ret_stack) {
+ pc = ret_stack->ret;
if (trace->nr_entries <
trace->max_entries)
trace->entries[trace->nr_entries++] = pc;
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index aa624ed79db1..0cd02a64a451 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2502,9 +2502,10 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
printk(" [%016lx] %pS\n", pc, (void *) pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
- int index = tsk->curr_ret_stack;
- if (tsk->ret_stack && index >= graph) {
- pc = tsk->ret_stack[index - graph].ret;
+ struct ftrace_ret_stack *ret_stack;
+ ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
+ if (ret_stack) {
+ pc = ret_stack->ret;
printk(" [%016lx] %pS\n", pc, (void *) pc);
graph++;
}
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 7ee8067cbf45..8257a59704ae 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -733,18 +733,20 @@ union ftrace_op_code_union {
} __attribute__((packed));
};
+#define RET_SIZE 1
+
static unsigned long
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
{
- unsigned const char *jmp;
unsigned long start_offset;
unsigned long end_offset;
unsigned long op_offset;
unsigned long offset;
unsigned long size;
- unsigned long ip;
+ unsigned long retq;
unsigned long *ptr;
void *trampoline;
+ void *ip;
/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
union ftrace_op_code_union op_ptr;
@@ -764,27 +766,27 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/*
* Allocate enough size to store the ftrace_caller code,
- * the jmp to ftrace_epilogue, as well as the address of
- * the ftrace_ops this trampoline is used for.
+ * the iret , as well as the address of the ftrace_ops this
+ * trampoline is used for.
*/
- trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
+ trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
if (!trampoline)
return 0;
- *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
+ *tramp_size = size + RET_SIZE + sizeof(void *);
/* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
- if (WARN_ON(ret < 0)) {
- tramp_free(trampoline, *tramp_size);
- return 0;
- }
+ if (WARN_ON(ret < 0))
+ goto fail;
- ip = (unsigned long)trampoline + size;
+ ip = trampoline + size;
- /* The trampoline ends with a jmp to ftrace_epilogue */
- jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
- memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
+ /* The trampoline ends with ret(q) */
+ retq = (unsigned long)ftrace_stub;
+ ret = probe_kernel_read(ip, (void *)retq, RET_SIZE);
+ if (WARN_ON(ret < 0))
+ goto fail;
/*
* The address of the ftrace_ops that is used for this trampoline
@@ -794,17 +796,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
* the global function_trace_op variable.
*/
- ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
+ ptr = (unsigned long *)(trampoline + size + RET_SIZE);
*ptr = (unsigned long)ops;
op_offset -= start_offset;
memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
/* Are we pointing to the reference? */
- if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
- tramp_free(trampoline, *tramp_size);
- return 0;
- }
+ if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
+ goto fail;
/* Load the contents of ptr into the callback parameter */
offset = (unsigned long)ptr;
@@ -819,6 +819,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
return (unsigned long)trampoline;
+fail:
+ tramp_free(trampoline, *tramp_size);
+ return 0;
}
static unsigned long calc_trampoline_call_offset(bool save_regs)
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 91b2cff4b79a..75f2b36b41a6 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -171,9 +171,6 @@ GLOBAL(ftrace_call)
restore_mcount_regs
/*
- * The copied trampoline must call ftrace_epilogue as it
- * still may need to call the function graph tracer.
- *
* The code up to this label is copied into trampolines so
* think twice before adding any new code or changing the
* layout here.
@@ -185,7 +182,10 @@ GLOBAL(ftrace_graph_call)
jmp ftrace_stub
#endif
-/* This is weak to keep gas from relaxing the jumps */
+/*
+ * This is weak to keep gas from relaxing the jumps.
+ * It is also used to copy the retq for trampolines.
+ */
WEAK(ftrace_stub)
retq
ENDPROC(ftrace_caller)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 5c990e891d6a..730876187344 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -389,6 +389,7 @@ enum {
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
FTRACE_START_FUNC_RET = (1 << 3),
FTRACE_STOP_FUNC_RET = (1 << 4),
+ FTRACE_MAY_SLEEP = (1 << 5),
};
/*
@@ -752,6 +753,11 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+struct fgraph_ops {
+ trace_func_graph_ent_t entryfunc;
+ trace_func_graph_ret_t retfunc;
+};
+
/*
* Stack of return addresses for functions
* of a thread.
@@ -783,6 +789,9 @@ extern int
function_graph_enter(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp);
+struct ftrace_ret_stack *
+ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
+
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
@@ -793,11 +802,11 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
*/
#define __notrace_funcgraph notrace
-#define FTRACE_NOTRACE_DEPTH 65536
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
-extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- trace_func_graph_ent_t entryfunc);
+
+extern int register_ftrace_graph(struct fgraph_ops *ops);
+extern void unregister_ftrace_graph(struct fgraph_ops *ops);
extern bool ftrace_graph_is_dead(void);
extern void ftrace_graph_stop(void);
@@ -806,17 +815,10 @@ extern void ftrace_graph_stop(void);
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
-extern void unregister_ftrace_graph(void);
-
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
-static inline int task_curr_ret_stack(struct task_struct *t)
-{
- return t->curr_ret_stack;
-}
-
static inline void pause_graph_tracing(void)
{
atomic_inc(&current->tracing_graph_pause);
@@ -834,17 +836,9 @@ static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
-static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- trace_func_graph_ent_t entryfunc)
-{
- return -1;
-}
-static inline void unregister_ftrace_graph(void) { }
-
-static inline int task_curr_ret_stack(struct task_struct *tsk)
-{
- return -1;
-}
+/* Define as macros as fgraph_ops may not be defined */
+#define register_ftrace_graph(ops) ({ -1; })
+#define unregister_ftrace_graph(ops) do { } while (0)
static inline unsigned long
ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 0940fda59872..5b9ae62272bb 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
@@ -189,6 +189,8 @@ bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
size_t ring_buffer_page_len(void *page);
+size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu);
+size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
diff --git a/include/linux/string.h b/include/linux/string.h
index 27d0482e5e05..7927b875f80c 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -456,4 +456,24 @@ static inline void memcpy_and_pad(void *dest, size_t dest_len,
memcpy(dest, src, dest_len);
}
+/**
+ * str_has_prefix - Test if a string has a given prefix
+ * @str: The string to test
+ * @prefix: The string to see if @str starts with
+ *
+ * A common way to test a prefix of a string is to do:
+ * strncmp(str, prefix, sizeof(prefix) - 1)
+ *
+ * But this can lead to bugs due to typos, or if prefix is a pointer
+ * and not a constant. Instead use str_has_prefix().
+ *
+ * Returns: 0 if @str does not start with @prefix
+ strlen(@prefix) if @str does start with @prefix
+ */
+static __always_inline size_t str_has_prefix(const char *str, const char *prefix)
+{
+ size_t len = strlen(prefix);
+ return strncmp(str, prefix, len) == 0 ? len : 0;
+}
+
#endif /* _LINUX_STRING_H_ */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 5e3de28c7677..fa8b1fe824f3 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -461,6 +461,7 @@ config KPROBE_EVENTS
bool "Enable kprobes-based dynamic events"
select TRACING
select PROBE_EVENTS
+ select DYNAMIC_EVENTS
default y
help
This allows the user to add tracing events (similar to tracepoints)
@@ -500,6 +501,7 @@ config UPROBE_EVENTS
depends on PERF_EVENTS
select UPROBES
select PROBE_EVENTS
+ select DYNAMIC_EVENTS
select TRACING
default y
help
@@ -518,6 +520,9 @@ config BPF_EVENTS
help
This allows the user to attach BPF programs to kprobe events.
+config DYNAMIC_EVENTS
+ def_bool n
+
config PROBE_EVENTS
def_bool n
@@ -630,6 +635,7 @@ config HIST_TRIGGERS
depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
select TRACING_MAP
select TRACING
+ select DYNAMIC_EVENTS
default n
help
Hist triggers allow one or more arbitrary trace event fields
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index f81dadbc7c4a..c2b2148bb1d2 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += fgraph.o
ifeq ($(CONFIG_BLOCK),y)
obj-$(CONFIG_EVENT_TRACING) += blktrace.o
endif
@@ -78,6 +79,7 @@ endif
ifeq ($(CONFIG_TRACING),y)
obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
endif
+obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
new file mode 100644
index 000000000000..8dfd5021b933
--- /dev/null
+++ b/kernel/trace/fgraph.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Infrastructure to took into function calls and returns.
+ * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
+ * Mostly borrowed from function tracer which
+ * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
+ *
+ * Highly modified by Steven Rostedt (VMware).
+ */
+#include <linux/suspend.h>
+#include <linux/ftrace.h>
+#include <linux/slab.h>
+
+#include <trace/events/sched.h>
+
+#include "ftrace_internal.h"
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define ASSIGN_OPS_HASH(opsname, val) \
+ .func_hash = val, \
+ .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
+#else
+#define ASSIGN_OPS_HASH(opsname, val)
+#endif
+
+static bool kill_ftrace_graph;
+int ftrace_graph_active;
+
+/* Both enabled by default (can be cleared by function_graph tracer flags */
+static bool fgraph_sleep_time = true;
+
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from d