perf trace: Exclude the kernel part of the callchain leading to a syscall

The kernel parts are not that useful:

  # trace -m 512 -e nanosleep --call dwarf  usleep 1
     0.065 ( 0.065 ms): usleep/18732 nanosleep(rqtp: 0x7ffc4ee4e200) = 0
                                       syscall_slow_exit_work ([kernel.kallsyms])
                                       do_syscall_64 ([kernel.kallsyms])
                                       return_from_SYSCALL_64 ([kernel.kallsyms])
                                       __nanosleep (/usr/lib64/libc-2.22.so)
                                       usleep (/usr/lib64/libc-2.22.so)
                                       main (/usr/bin/usleep)
                                       __libc_start_main (/usr/lib64/libc-2.22.so)
                                       _start (/usr/bin/usleep)
  #

So lets just use perf_event_attr.exclude_callchain_kernel to avoid
collecting it in the ring buffer:

  # trace -m 512 -e nanosleep --call dwarf  usleep 1
     0.063 ( 0.063 ms): usleep/19212 nanosleep(rqtp: 0x7ffc3df10fb0) = 0
                                       __nanosleep (/usr/lib64/libc-2.22.so)
                                       usleep (/usr/lib64/libc-2.22.so)
                                       main (/usr/bin/usleep)
                                       __libc_start_main (/usr/lib64/libc-2.22.so)
                                       _start (/usr/bin/usleep)
  #

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Milian Wolff <milian.wolff@kdab.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-qctu3gqhpim0dfbcp9d86c91@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 63a3cc9..cfa5ce8 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -159,6 +159,7 @@
 	bool			show_comm;
 	bool			show_tool_stats;
 	bool			trace_syscalls;
+	bool			kernel_syscallchains;
 	bool			force;
 	bool			vfs_getname;
 	int			trace_pgfaults;
@@ -2661,6 +2662,15 @@
 	perf_evlist__add(evlist, sys_enter);
 	perf_evlist__add(evlist, sys_exit);
 
+	if (trace->opts.callgraph_set && !trace->kernel_syscallchains) {
+		/*
+		 * We're interested only in the user space callchain
+		 * leading to the syscall, allow overriding that for
+		 * debugging reasons using --kernel_syscall_callchains
+		 */
+		sys_exit->attr.exclude_callchain_kernel = 1;
+	}
+
 	trace->syscalls.events.sys_enter = sys_enter;
 	trace->syscalls.events.sys_exit  = sys_exit;
 
@@ -3221,6 +3231,7 @@
 		.output = stderr,
 		.show_comm = true,
 		.trace_syscalls = true,
+		.kernel_syscallchains = false,
 	};
 	const char *output_name = NULL;
 	const char *ev_qualifier_str = NULL;
@@ -3269,6 +3280,8 @@
 	OPT_CALLBACK(0, "call-graph", &trace.opts,
 		     "record_mode[,record_size]", record_callchain_help,
 		     &record_parse_callchain_opt),
+	OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
+		    "Show the kernel callchains on the syscall exit path"),
 	OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
 			"per thread proc mmap processing timeout in ms"),
 	OPT_END()