aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r--arch/microblaze/kernel/ftrace.c58
-rw-r--r--arch/microblaze/kernel/mcount.S41
2 files changed, 99 insertions, 0 deletions
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index c1889b1..0952a8b 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -14,6 +14,64 @@
#include <asm/cacheflush.h>
#include <linux/ftrace.h>
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ int faulted, err;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Protect against fault, even if it shouldn't
+ * happen. This tool is too much intrusive to
+ * ignore such a protection.
+ */
+ asm volatile(" 1: lwi %0, %2, 0; \
+ 2: swi %3, %2, 0; \
+ addik %1, r0, 0; \
+ 3: \
+ .section .fixup, \"ax\"; \
+ 4: brid 3b; \
+ addik %1, r0, 1; \
+ .previous; \
+ .section __ex_table,\"a\"; \
+ .word 1b,4b; \
+ .word 2b,4b; \
+ .previous;" \
+ : "=&r" (old), "=r" (faulted)
+ : "r" (parent), "r" (return_hooker)
+ );
+
+ if (unlikely(faulted)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
+ if (err == -EBUSY) {
+ *parent = old;
+ return;
+ }
+
+ trace.func = self_addr;
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ *parent = old;
+ }
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
#ifdef CONFIG_DYNAMIC_FTRACE
/* save value to addr - it is save to do it in asm */
static int ftrace_modify_code(unsigned long addr, unsigned int value)
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
index 30aaf8f..84a1945 100644
--- a/arch/microblaze/kernel/mcount.S
+++ b/arch/microblaze/kernel/mcount.S
@@ -96,6 +96,27 @@ ENTRY(ftrace_caller)
bneid r5, end;
nop;
/* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ lwi r5, r0, ftrace_graph_return;
+ addik r6, r0, ftrace_stub; /* asm implementation */
+ cmpu r5, r5, r6; /* ftrace_graph_return != ftrace_stub */
+ beqid r5, end_graph_tracer;
+ nop;
+
+ lwi r6, r0, ftrace_graph_entry;
+ addik r5, r0, ftrace_graph_entry_stub; /* implemented in C */
+ cmpu r5, r5, r6; /* ftrace_graph_entry != ftrace_graph_entry_stub */
+ beqid r5, end_graph_tracer;
+ nop;
+ addik r5, r1, 120; /* MS: load parent addr */
+ addik r6, r15, 0; /* MS: load current function addr */
+ bralid r15, prepare_ftrace_return;
+ nop;
+ /* MS: graph was taken that's why - can jump over function trace */
+ brid end;
+ nop;
+end_graph_tracer:
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifndef CONFIG_DYNAMIC_FTRACE
/* MS: test function trace if is taken or not */
lwi r20, r0, ftrace_trace_function;
@@ -121,3 +142,23 @@ end:
rtsd r15, 8; /* MS: jump back */
nop;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(return_to_handler)
+ nop; /* MS: just barrier for rtsd r15, 8 */
+ nop;
+ SAVE_REGS
+ swi r15, r1, 0;
+
+ /* MS: find out returning address */
+ bralid r15, ftrace_return_to_handler;
+ nop;
+
+ /* MS: return value from ftrace_return_to_handler is my returning addr
+ * must be before restore regs because I have to restore r3 content */
+ addik r15, r3, 0;
+ RESTORE_REGS
+
+ rtsd r15, 8; /* MS: jump back */
+ nop;
+#endif /* CONFIG_FUNCTION_TRACER */