Merge branch 'perf/urgent' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
diff --git a/Documentation/trace/events-msr.txt b/Documentation/trace/events-msr.txt
new file mode 100644
index 0000000..78c383b
--- /dev/null
+++ b/Documentation/trace/events-msr.txt
@@ -0,0 +1,37 @@
+
+The x86 kernel supports tracing most MSR (Model Specific Register) accesses.
+To see the definition of the MSRs on Intel systems please see the SDM
+at http://www.intel.com/sdm (Volume 3)
+
+Available trace points:
+
+/sys/kernel/debug/tracing/events/msr/
+
+Trace MSR reads
+
+read_msr
+
+msr: MSR number
+val: Value written
+failed: 1 if the access failed, otherwise 0
+
+
+Trace MSR writes
+
+write_msr
+
+msr: MSR number
+val: Value written
+failed: 1 if the access failed, otherwise 0
+
+
+Trace RDPMC in kernel
+
+rdpmc
+
+The trace data can be post processed with the postprocess/decode_msr.py script
+
+cat /sys/kernel/debug/tracing/trace | decode_msr.py /usr/src/linux/include/asm/msr-index.h
+
+to add symbolic MSR names.
+
diff --git a/Documentation/trace/postprocess/decode_msr.py b/Documentation/trace/postprocess/decode_msr.py
new file mode 100644
index 0000000..0ab40e0
--- /dev/null
+++ b/Documentation/trace/postprocess/decode_msr.py
@@ -0,0 +1,37 @@
+#!/usr/bin/python
+# add symbolic names to read_msr / write_msr in trace
+# decode_msr msr-index.h < trace
+import sys
+import re
+
+msrs = dict()
+
+with open(sys.argv[1] if len(sys.argv) > 1 else "msr-index.h", "r") as f:
+	for j in f:
+		m = re.match(r'#define (MSR_\w+)\s+(0x[0-9a-fA-F]+)', j)
+		if m:
+			msrs[int(m.group(2), 16)] = m.group(1)
+
+extra_ranges = (
+	( "MSR_LASTBRANCH_%d_FROM_IP", 0x680, 0x69F ),
+	( "MSR_LASTBRANCH_%d_TO_IP", 0x6C0, 0x6DF ),
+	( "LBR_INFO_%d", 0xdc0, 0xddf ),
+)
+
+for j in sys.stdin:
+	m = re.search(r'(read|write)_msr:\s+([0-9a-f]+)', j)
+	if m:
+		r = None
+		num = int(m.group(2), 16)
+		if num in msrs:
+			r = msrs[num]
+		else:
+			for er in extra_ranges:
+				if er[1] <= num <= er[2]:
+					r = er[0] % (num - er[1],)
+					break
+		if r:
+			j = j.replace(" " + m.group(2), " " + r + "(" + m.group(2) + ")")
+	print j,
+
+
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index ae5fb83..3e86742 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -3,7 +3,6 @@
 
 #include <linux/compiler.h>
 #include <linux/types.h>
-#include <asm/processor.h>
 #include <asm/alternative.h>
 #include <asm/cmpxchg.h>
 #include <asm/rmwcc.h>
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index a11c30b..a984111 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -3,7 +3,6 @@
 
 #include <linux/compiler.h>
 #include <linux/types.h>
-#include <asm/processor.h>
 //#include <asm/cmpxchg.h>
 
 /* An 64bit atomic type */
diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h
new file mode 100644
index 0000000..e1a4117
--- /dev/null
+++ b/arch/x86/include/asm/intel_pt.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_X86_INTEL_PT_H
+#define _ASM_X86_INTEL_PT_H
+
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
+void cpu_emergency_stop_pt(void);
+#else
+static inline void cpu_emergency_stop_pt(void) {}
+#endif
+
+#endif /* _ASM_X86_INTEL_PT_H */
diff --git a/arch/x86/include/asm/msr-trace.h b/arch/x86/include/asm/msr-trace.h
new file mode 100644
index 0000000..7567225
--- /dev/null
+++ b/arch/x86/include/asm/msr-trace.h
@@ -0,0 +1,57 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msr
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE msr-trace
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH asm/
+
+#if !defined(_TRACE_MSR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSR_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Tracing for x86 model specific registers. Directly maps to the
+ * RDMSR/WRMSR instructions.
+ */
+
+DECLARE_EVENT_CLASS(msr_trace_class,
+	    TP_PROTO(unsigned msr, u64 val, int failed),
+	    TP_ARGS(msr, val, failed),
+	    TP_STRUCT__entry(
+		    __field(	unsigned,	msr )
+		    __field(    u64,		val )
+		    __field(    int,		failed )
+	    ),
+	    TP_fast_assign(
+		    __entry->msr = msr;
+		    __entry->val = val;
+		    __entry->failed = failed;
+	    ),
+	    TP_printk("%x, value %llx%s",
+		      __entry->msr,
+		      __entry->val,
+		      __entry->failed ? " #GP" : "")
+);
+
+DEFINE_EVENT(msr_trace_class, read_msr,
+	     TP_PROTO(unsigned msr, u64 val, int failed),
+	     TP_ARGS(msr, val, failed)
+);
+
+DEFINE_EVENT(msr_trace_class, write_msr,
+	     TP_PROTO(unsigned msr, u64 val, int failed),
+	     TP_ARGS(msr, val, failed)
+);
+
+DEFINE_EVENT(msr_trace_class, rdpmc,
+	     TP_PROTO(unsigned msr, u64 val, int failed),
+	     TP_ARGS(msr, val, failed)
+);
+
+#endif /* _TRACE_MSR_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 77d8b28..fedd6e6 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -57,11 +57,34 @@
 #define EAX_EDX_RET(val, low, high)	"=A" (val)
 #endif
 
+#ifdef CONFIG_TRACEPOINTS
+/*
+ * Be very careful with includes. This header is prone to include loops.
+ */
+#include <asm/atomic.h>
+#include <linux/tracepoint-defs.h>
+
+extern struct tracepoint __tracepoint_read_msr;
+extern struct tracepoint __tracepoint_write_msr;
+extern struct tracepoint __tracepoint_rdpmc;
+#define msr_tracepoint_active(t) static_key_false(&(t).key)
+extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
+extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
+extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
+#else
+#define msr_tracepoint_active(t) false
+static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
+static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
+static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
+#endif
+
 static inline unsigned long long native_read_msr(unsigned int msr)
 {
 	DECLARE_ARGS(val, low, high);
 
 	asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
+	if (msr_tracepoint_active(__tracepoint_read_msr))
+		do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
 	return EAX_EDX_VAL(val, low, high);
 }
 
@@ -78,6 +101,8 @@
 		     _ASM_EXTABLE(2b, 3b)
 		     : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
 		     : "c" (msr), [fault] "i" (-EIO));
+	if (msr_tracepoint_active(__tracepoint_read_msr))
+		do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
 	return EAX_EDX_VAL(val, low, high);
 }
 
@@ -85,6 +110,8 @@
 				    unsigned low, unsigned high)
 {
 	asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
+	if (msr_tracepoint_active(__tracepoint_read_msr))
+		do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
 }
 
 /* Can be uninlined because referenced by paravirt */
@@ -102,6 +129,8 @@
 		     : "c" (msr), "0" (low), "d" (high),
 		       [fault] "i" (-EIO)
 		     : "memory");
+	if (msr_tracepoint_active(__tracepoint_read_msr))
+		do_trace_write_msr(msr, ((u64)high << 32 | low), err);
 	return err;
 }
 
@@ -160,6 +189,8 @@
 	DECLARE_ARGS(val, low, high);
 
 	asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
+	if (msr_tracepoint_active(__tracepoint_rdpmc))
+		do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
 	return EAX_EDX_VAL(val, low, high);
 }
 
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 09b1b0a..660458a 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -745,5 +745,14 @@
 #undef __copy_from_user_overflow
 #undef __copy_to_user_overflow
 
+/*
+ * We rely on the nested NMI work to allow atomic faults from the NMI path; the
+ * nested NMI paths are careful to preserve CR2.
+ *
+ * Caller must use pagefault_enable/disable, or run in interrupt context,
+ * and also do a uaccess_ok() check
+ */
+#define __copy_from_user_nmi __copy_from_user_inatomic
+
 #endif /* _ASM_X86_UACCESS_H */
 
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bf79d7..9dfbba5 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2250,12 +2250,19 @@
 	ss_base = get_segment_base(regs->ss);
 
 	fp = compat_ptr(ss_base + regs->bp);
+	pagefault_disable();
 	while (entry->nr < PERF_MAX_STACK_DEPTH) {
 		unsigned long bytes;
 		frame.next_frame     = 0;
 		frame.return_address = 0;
 
-		bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+		if (!access_ok(VERIFY_READ, fp, 8))
+			break;
+
+		bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
+		if (bytes != 0)
+			break;
+		bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4);
 		if (bytes != 0)
 			break;
 
@@ -2265,6 +2272,7 @@
 		perf_callchain_store(entry, cs_base + frame.return_address);
 		fp = compat_ptr(ss_base + frame.next_frame);
 	}
+	pagefault_enable();
 	return 1;
 }
 #else
@@ -2302,12 +2310,19 @@
 	if (perf_callchain_user32(regs, entry))
 		return;
 
+	pagefault_disable();
 	while (entry->nr < PERF_MAX_STACK_DEPTH) {
 		unsigned long bytes;
 		frame.next_frame	     = NULL;
 		frame.return_address = 0;
 
-		bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+		if (!access_ok(VERIFY_READ, fp, 16))
+			break;
+
+		bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
+		if (bytes != 0)
+			break;
+		bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
 		if (bytes != 0)
 			break;
 
@@ -2315,8 +2330,9 @@
 			break;
 
 		perf_callchain_store(entry, frame.return_address);
-		fp = frame.next_frame;
+		fp = (void __user *)frame.next_frame;
 	}
+	pagefault_enable();
 }
 
 /*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index d0e35eb..799e6bd 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -14,17 +14,7 @@
 
 #include <linux/perf_event.h>
 
-#if 0
-#undef wrmsrl
-#define wrmsrl(msr, val) 						\
-do {									\
-	unsigned int _msr = (msr);					\
-	u64 _val = (val);						\
-	trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr),		\
-			(unsigned long long)(_val));			\
-	native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32));	\
-} while (0)
-#endif
+/* To enable MSR tracing please use the generic trace points. */
 
 /*
  *          |   NHM/WSM    |      SNB     |
@@ -318,6 +308,10 @@
 #define INTEL_UEVENT_CONSTRAINT(c, n)	\
 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
 
+/* Constraint on specific umask bit only + event */
+#define INTEL_UBIT_EVENT_CONSTRAINT(c, n)	\
+	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
+
 /* Like UEVENT_CONSTRAINT, but match flags too */
 #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)	\
 	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index e2a4300..33b4b67 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -255,7 +255,7 @@
 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
 	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
-	INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4),	/* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
+	INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),	/* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
 	EVENT_CONSTRAINT_END
 };
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 659f01e..e2fad0c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -42,6 +42,13 @@
 #define LBR_FAR_BIT		8 /* do not capture far branches */
 #define LBR_CALL_STACK_BIT	9 /* enable call stack */
 
+/*
+ * Following bit only exists in Linux; we mask it out before writing it to
+ * the actual MSR. But it helps the constraint perf code to understand
+ * that this is a separate configuration.
+ */
+#define LBR_NO_INFO_BIT	       63 /* don't read LBR_INFO. */
+
 #define LBR_KERNEL	(1 << LBR_KERNEL_BIT)
 #define LBR_USER	(1 << LBR_USER_BIT)
 #define LBR_JCC		(1 << LBR_JCC_BIT)
@@ -52,6 +59,7 @@
 #define LBR_IND_JMP	(1 << LBR_IND_JMP_BIT)
 #define LBR_FAR		(1 << LBR_FAR_BIT)
 #define LBR_CALL_STACK	(1 << LBR_CALL_STACK_BIT)
+#define LBR_NO_INFO	(1ULL << LBR_NO_INFO_BIT)
 
 #define LBR_PLM (LBR_KERNEL | LBR_USER)
 
@@ -152,7 +160,7 @@
 	 * did not change.
 	 */
 	if (cpuc->lbr_sel)
-		lbr_select = cpuc->lbr_sel->config;
+		lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
 	if (!pmi)
 		wrmsrl(MSR_LBR_SELECT, lbr_select);
 
@@ -422,6 +430,7 @@
  */
 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
 {
+	bool need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
 	unsigned long mask = x86_pmu.lbr_nr - 1;
 	int lbr_format = x86_pmu.intel_cap.lbr_format;
 	u64 tos = intel_pmu_lbr_tos();
@@ -442,7 +451,7 @@
 		rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
 		rdmsrl(x86_pmu.lbr_to   + lbr_idx, to);
 
-		if (lbr_format == LBR_FORMAT_INFO) {
+		if (lbr_format == LBR_FORMAT_INFO && need_info) {
 			u64 info;
 
 			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
@@ -590,6 +599,7 @@
 		if (v != LBR_IGN)
 			mask |= v;
 	}
+
 	reg = &event->hw.branch_reg;
 	reg->idx = EXTRA_REG_LBR;
 
@@ -600,6 +610,11 @@
 	 */
 	reg->config = mask ^ x86_pmu.lbr_sel_mask;
 
+	if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
+	    (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
+	    (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
+		reg->config |= LBR_NO_INFO;
+
 	return 0;
 }
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
index 868e119..c0bbd10 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
@@ -27,6 +27,7 @@
 #include <asm/perf_event.h>
 #include <asm/insn.h>
 #include <asm/io.h>
+#include <asm/intel_pt.h>
 
 #include "perf_event.h"
 #include "intel_pt.h"
@@ -1122,6 +1123,14 @@
 	return 0;
 }
 
+void cpu_emergency_stop_pt(void)
+{
+	struct pt *pt = this_cpu_ptr(&pt_ctx);
+
+	if (pt->handle.event)
+		pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
+}
+
 static __init int pt_init(void)
 {
 	int ret, cpu, prior_warn = 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index ed446bd..fb5843d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -63,7 +63,7 @@
 #define INTEL_RAPL_PP1		0x4	/* pseudo-encoding */
 
 #define NR_RAPL_DOMAINS         0x4
-static const char *rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
+static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
 	"pp0-core",
 	"package",
 	"dram",
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 2c1910f..58f3431 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -35,6 +35,7 @@
 #include <asm/cpu.h>
 #include <asm/reboot.h>
 #include <asm/virtext.h>
+#include <asm/intel_pt.h>
 
 /* Alignment required for elf header segment */
 #define ELF_CORE_HEADER_ALIGN   4096
@@ -125,6 +126,11 @@
 	cpu_emergency_vmxoff();
 	cpu_emergency_svm_disable();
 
+	/*
+	 * Disable Intel PT to stop its logging
+	 */
+	cpu_emergency_stop_pt();
+
 	disable_local_APIC();
 }
 
@@ -169,6 +175,11 @@
 	cpu_emergency_vmxoff();
 	cpu_emergency_svm_disable();
 
+	/*
+	 * Disable Intel PT to stop its logging
+	 */
+	cpu_emergency_stop_pt();
+
 #ifdef CONFIG_X86_IO_APIC
 	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
 	ioapic_zap_locks();
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 4362373..004c861 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -1,6 +1,8 @@
 #include <linux/module.h>
 #include <linux/preempt.h>
 #include <asm/msr.h>
+#define CREATE_TRACE_POINTS
+#include <asm/msr-trace.h>
 
 struct msr *msrs_alloc(void)
 {
@@ -108,3 +110,27 @@
 {
 	return __flip_bit(msr, bit, false);
 }
+
+#ifdef CONFIG_TRACEPOINTS
+void do_trace_write_msr(unsigned msr, u64 val, int failed)
+{
+	trace_write_msr(msr, val, failed);
+}
+EXPORT_SYMBOL(do_trace_write_msr);
+EXPORT_TRACEPOINT_SYMBOL(write_msr);
+
+void do_trace_read_msr(unsigned msr, u64 val, int failed)
+{
+	trace_read_msr(msr, val, failed);
+}
+EXPORT_SYMBOL(do_trace_read_msr);
+EXPORT_TRACEPOINT_SYMBOL(read_msr);
+
+void do_trace_rdpmc(unsigned counter, u64 val, int failed)
+{
+	trace_rdpmc(counter, val, failed);
+}
+EXPORT_SYMBOL(do_trace_rdpmc);
+EXPORT_TRACEPOINT_SYMBOL(rdpmc);
+
+#endif
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
new file mode 100644
index 0000000..e1ee97c
--- /dev/null
+++ b/include/linux/tracepoint-defs.h
@@ -0,0 +1,27 @@
+#ifndef TRACEPOINT_DEFS_H
+#define TRACEPOINT_DEFS_H 1
+
+/*
+ * File can be included directly by headers who only want to access
+ * tracepoint->key to guard out of line trace calls. Otherwise
+ * linux/tracepoint.h should be used.
+ */
+
+#include <linux/atomic.h>
+#include <linux/static_key.h>
+
+struct tracepoint_func {
+	void *func;
+	void *data;
+	int prio;
+};
+
+struct tracepoint {
+	const char *name;		/* Tracepoint name */
+	struct static_key key;
+	void (*regfunc)(void);
+	void (*unregfunc)(void);
+	struct tracepoint_func __rcu *funcs;
+};
+
+#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 696a339c..f7c732b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -17,26 +17,12 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/rcupdate.h>
-#include <linux/static_key.h>
+#include <linux/tracepoint-defs.h>
 
 struct module;
 struct tracepoint;
 struct notifier_block;
 
-struct tracepoint_func {
-	void *func;
-	void *data;
-	int prio;
-};
-
-struct tracepoint {
-	const char *name;		/* Tracepoint name */
-	struct static_key key;
-	void (*regfunc)(void);
-	void (*unregfunc)(void);
-	struct tracepoint_func __rcu *funcs;
-};
-
 struct trace_enum_map {
 	const char		*system;
 	const char		*enum_string;
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index d801bb0..1afe962 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -171,6 +171,9 @@
 	PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT	= 12, /* indirect jumps */
 	PERF_SAMPLE_BRANCH_CALL_SHIFT		= 13, /* direct call */
 
+	PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT	= 14, /* no flags */
+	PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT	= 15, /* no cycles */
+
 	PERF_SAMPLE_BRANCH_MAX_SHIFT		/* non-ABI */
 };
 
@@ -192,6 +195,9 @@
 	PERF_SAMPLE_BRANCH_IND_JUMP	= 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
 	PERF_SAMPLE_BRANCH_CALL		= 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
 
+	PERF_SAMPLE_BRANCH_NO_FLAGS	= 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
+	PERF_SAMPLE_BRANCH_NO_CYCLES	= 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
+
 	PERF_SAMPLE_BRANCH_MAX		= 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
 };
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 39cf4a4..c3d61b9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -126,6 +126,37 @@
 	return data.ret;
 }
 
+static void event_function_call(struct perf_event *event,
+				int (*active)(void *),
+				void (*inactive)(void *),
+				void *data)
+{
+	struct perf_event_context *ctx = event->ctx;
+	struct task_struct *task = ctx->task;
+
+	if (!task) {
+		cpu_function_call(event->cpu, active, data);
+		return;
+	}
+
+again:
+	if (!task_function_call(task, active, data))
+		return;
+
+	raw_spin_lock_irq(&ctx->lock);
+	if (ctx->is_active) {
+		/*
+		 * Reload the task pointer, it might have been changed by
+		 * a concurrent perf_event_context_sched_out().
+		 */
+		task = ctx->task;
+		raw_spin_unlock_irq(&ctx->lock);
+		goto again;
+	}
+	inactive(data);
+	raw_spin_unlock_irq(&ctx->lock);
+}
+
 #define EVENT_OWNER_KERNEL ((void *) -1)
 
 static bool is_kernel_event(struct perf_event *event)
@@ -1629,6 +1660,17 @@
 	bool detach_group;
 };
 
+static void ___perf_remove_from_context(void *info)
+{
+	struct remove_event *re = info;
+	struct perf_event *event = re->event;
+	struct perf_event_context *ctx = event->ctx;
+
+	if (re->detach_group)
+		perf_group_detach(event);
+	list_del_event(event, ctx);
+}
+
 /*
  * Cross CPU call to remove a performance event
  *
@@ -1656,7 +1698,6 @@
 	return 0;
 }
 
-
 /*
  * Remove the event from a task's (or a CPU's) list of events.
  *
@@ -1673,7 +1714,6 @@
 static void perf_remove_from_context(struct perf_event *event, bool detach_group)
 {
 	struct perf_event_context *ctx = event->ctx;
-	struct task_struct *task = ctx->task;
 	struct remove_event re = {
 		.event = event,
 		.detach_group = detach_group,
@@ -1681,44 +1721,8 @@
 
 	lockdep_assert_held(&ctx->mutex);
 
-	if (!task) {
-		/*
-		 * Per cpu events are removed via an smp call. The removal can
-		 * fail if the CPU is currently offline, but in that case we
-		 * already called __perf_remove_from_context from
-		 * perf_event_exit_cpu.
-		 */
-		cpu_function_call(event->cpu, __perf_remove_from_context, &re);
-		return;
-	}
-
-retry:
-	if (!task_function_call(task, __perf_remove_from_context, &re))
-		return;
-
-	raw_spin_lock_irq(&ctx->lock);
-	/*
-	 * If we failed to find a running task, but find the context active now
-	 * that we've acquired the ctx->lock, retry.
-	 */
-	if (ctx->is_active) {
-		raw_spin_unlock_irq(&ctx->lock);
-		/*
-		 * Reload the task pointer, it might have been changed by
-		 * a concurrent perf_event_context_sched_out().
-		 */
-		task = ctx->task;
-		goto retry;
-	}
-
-	/*
-	 * Since the task isn't running, its safe to remove the event, us
-	 * holding the ctx->lock ensures the task won't get scheduled in.
-	 */
-	if (detach_group)
-		perf_group_detach(event);
-	list_del_event(event, ctx);
-	raw_spin_unlock_irq(&ctx->lock);
+	event_function_call(event, __perf_remove_from_context,
+			    ___perf_remove_from_context, &re);
 }
 
 /*
@@ -2067,6 +2071,18 @@
 		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
 }
 
+static void ___perf_install_in_context(void *info)
+{
+	struct perf_event *event = info;
+	struct perf_event_context *ctx = event->ctx;
+
+	/*
+	 * Since the task isn't running, its safe to add the event, us holding
+	 * the ctx->lock ensures the task won't get scheduled in.
+	 */
+	add_event_to_ctx(event, ctx);
+}
+
 /*
  * Cross CPU call to install and enable a performance event
  *
@@ -2143,48 +2159,14 @@
 			struct perf_event *event,
 			int cpu)
 {
-	struct task_struct *task = ctx->task;
-
 	lockdep_assert_held(&ctx->mutex);
 
 	event->ctx = ctx;
 	if (event->cpu != -1)
 		event->cpu = cpu;
 
-	if (!task) {
-		/*
-		 * Per cpu events are installed via an smp call and
-		 * the install is always successful.
-		 */
-		cpu_function_call(cpu, __perf_install_in_context, event);
-		return;
-	}
-
-retry:
-	if (!task_function_call(task, __perf_install_in_context, event))
-		return;
-
-	raw_spin_lock_irq(&ctx->lock);
-	/*
-	 * If we failed to find a running task, but find the context active now
-	 * that we've acquired the ctx->lock, retry.
-	 */
-	if (ctx->is_active) {
-		raw_spin_unlock_irq(&ctx->lock);
-		/*
-		 * Reload the task pointer, it might have been changed by
-		 * a concurrent perf_event_context_sched_out().
-		 */
-		task = ctx->task;
-		goto retry;
-	}
-
-	/*
-	 * Since the task isn't running, its safe to add the event, us holding
-	 * the ctx->lock ensures the task won't get scheduled in.
-	 */
-	add_event_to_ctx(event, ctx);
-	raw_spin_unlock_irq(&ctx->lock);
+	event_function_call(event, __perf_install_in_context,
+			    ___perf_install_in_context, event);
 }
 
 /*
@@ -4154,6 +4136,22 @@
 	u64 value;
 };
 
+static void ___perf_event_period(void *info)
+{
+	struct period_event *pe = info;
+	struct perf_event *event = pe->event;
+	u64 value = pe->value;
+
+	if (event->attr.freq) {
+		event->attr.sample_freq = value;
+	} else {
+		event->attr.sample_period = value;
+		event->hw.sample_period = value;
+	}
+
+	local64_set(&event->hw.period_left, 0);
+}
+
 static int __perf_event_period(void *info)
 {
 	struct period_event *pe = info;
@@ -4190,8 +4188,6 @@
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
 	struct period_event pe = { .event = event, };
-	struct perf_event_context *ctx = event->ctx;
-	struct task_struct *task;
 	u64 value;
 
 	if (!is_sampling_event(event))
@@ -4206,34 +4202,10 @@
 	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
 		return -EINVAL;
 
-	task = ctx->task;
 	pe.value = value;
 
-	if (!task) {
-		cpu_function_call(event->cpu, __perf_event_period, &pe);
-		return 0;
-	}
-
-retry:
-	if (!task_function_call(task, __perf_event_period, &pe))
-		return 0;
-
-	raw_spin_lock_irq(&ctx->lock);
-	if (ctx->is_active) {
-		raw_spin_unlock_irq(&ctx->lock);
-		task = ctx->task;
-		goto retry;
-	}
-
-	if (event->attr.freq) {
-		event->attr.sample_freq = value;
-	} else {
-		event->attr.sample_period = value;
-		event->hw.sample_period = value;
-	}
-
-	local64_set(&event->hw.period_left, 0);
-	raw_spin_unlock_irq(&ctx->lock);
+	event_function_call(event, __perf_event_period,
+			    ___perf_event_period, &pe);
 
 	return 0;
 }
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 83c33a5b..d51e25a 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,6 +16,10 @@
 #include <linux/kernel.h>
 #include <linux/atomic.h>
 
+#ifdef CONFIG_X86
+#include <asm/processor.h>	/* for boot_cpu_has below */
+#endif
+
 #define TEST(bit, op, c_op, val)				\
 do {								\
 	atomic##bit##_set(&v, v0);				\
diff --git a/tools/Makefile b/tools/Makefile
index 7dc820a..0ba0df3 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -96,7 +96,7 @@
 	$(call descend,$(@:_install=),install)
 
 selftests_install:
-	$(call descend,testing/$(@:_clean=),install)
+	$(call descend,testing/$(@:_install=),install)
 
 turbostat_install x86_energy_perf_policy_install:
 	$(call descend,power/x86/$(@:_install=),install)
diff --git a/tools/build/Makefile b/tools/build/Makefile
index a930362..0d5a0e3 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -25,7 +25,7 @@
 MAKEFLAGS := --no-print-directory
 build     := -f $(srctree)/tools/build/Makefile.build dir=. obj
 
-all: fixdep
+all: $(OUTPUT)fixdep
 
 clean:
 	$(call QUIET_CLEAN, fixdep)
diff --git a/tools/build/Makefile.include b/tools/build/Makefile.include
index 4e09ad6..be630be 100644
--- a/tools/build/Makefile.include
+++ b/tools/build/Makefile.include
@@ -4,7 +4,7 @@
 fixdep:
 else
 fixdep:
-	$(Q)$(MAKE) -C $(srctree)/tools/build fixdep
+	$(Q)$(MAKE) -C $(srctree)/tools/build CFLAGS= LDFLAGS= $(OUTPUT)fixdep
 endif
 
 .PHONY: fixdep
diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h
new file mode 100644
index 0000000..2e2f736c
--- /dev/null
+++ b/tools/include/linux/string.h
@@ -0,0 +1,11 @@
+#ifndef _TOOLS_LINUX_STRING_H_
+#define _TOOLS_LINUX_STRING_H_
+
+
+#include <linux/types.h>	/* for size_t */
+
+void *memdup(const void *src, size_t len);
+
+int strtobool(const char *s, bool *res);
+
+#endif /* _LINUX_STRING_H_ */
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index a3caaf3..636e3dd 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -71,7 +71,17 @@
 INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
 FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
 
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean TAGS tags cscope help
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+  check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
 include $(srctree)/tools/build/Makefile.feature
+endif
 
 export prefix libdir src obj
 
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index a633105..5bdc6ea 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -83,3 +83,17 @@
 	log_buf[0] = 0;
 	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
 }
+
+int bpf_map_update_elem(int fd, void *key, void *value,
+			u64 flags)
+{
+	union bpf_attr attr;
+
+	bzero(&attr, sizeof(attr));
+	attr.map_fd = fd;
+	attr.key = ptr_to_u64(key);
+	attr.value = ptr_to_u64(value);
+	attr.flags = flags;
+
+	return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 854b736..a764655 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -20,4 +20,6 @@
 		     u32 kern_version, char *log_buf,
 		     size_t log_buf_sz);
 
+int bpf_map_update_elem(int fd, void *key, void *value,
+			u64 flags);
 #endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e176bad..a298614 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -152,29 +152,36 @@
 	} *reloc_desc;
 	int nr_reloc;
 
-	int fd;
+	struct {
+		int nr;
+		int *fds;
+	} instances;
+	bpf_program_prep_t preprocessor;
 
 	struct bpf_object *obj;
 	void *priv;
 	bpf_program_clear_priv_t clear_priv;
 };
 
+struct bpf_map {
+	int fd;
+	char *name;
+	struct bpf_map_def def;
+	void *priv;
+	bpf_map_clear_priv_t clear_priv;
+};
+
 static LIST_HEAD(bpf_objects_list);
 
 struct bpf_object {
 	char license[64];
 	u32 kern_version;
-	void *maps_buf;
-	size_t maps_buf_sz;
 
 	struct bpf_program *programs;
 	size_t nr_programs;
-	int *map_fds;
-	/*
-	 * This field is required because maps_buf will be freed and
-	 * maps_buf_sz will be set to 0 after loaded.
-	 */
-	size_t nr_map_fds;
+	struct bpf_map *maps;
+	size_t nr_maps;
+
 	bool loaded;
 
 	/*
@@ -206,10 +213,25 @@
 
 static void bpf_program__unload(struct bpf_program *prog)
 {
+	int i;
+
 	if (!prog)
 		return;
 
-	zclose(prog->fd);
+	/*
+	 * If the object is opened but the program was never loaded,
+	 * it is possible that prog->instances.nr == -1.
+	 */
+	if (prog->instances.nr > 0) {
+		for (i = 0; i < prog->instances.nr; i++)
+			zclose(prog->instances.fds[i]);
+	} else if (prog->instances.nr != -1) {
+		pr_warning("Internal error: instances.nr is %d\n",
+			   prog->instances.nr);
+	}
+
+	prog->instances.nr = -1;
+	zfree(&prog->instances.fds);
 }
 
 static void bpf_program__exit(struct bpf_program *prog)
@@ -260,7 +282,8 @@
 	memcpy(prog->insns, data,
 	       prog->insns_cnt * sizeof(struct bpf_insn));
 	prog->idx = idx;
-	prog->fd = -1;
+	prog->instances.fds = NULL;
+	prog->instances.nr = -1;
 
 	return 0;
 errout:
@@ -469,30 +492,81 @@
 bpf_object__init_maps(struct bpf_object *obj, void *data,
 		      size_t size)
 {
-	if (size == 0) {
+	size_t nr_maps;
+	int i;
+
+	nr_maps = size / sizeof(struct bpf_map_def);
+	if (!data || !nr_maps) {
 		pr_debug("%s doesn't need map definition\n",
 			 obj->path);
 		return 0;
 	}
 
-	obj->maps_buf = malloc(size);
-	if (!obj->maps_buf) {
-		pr_warning("malloc maps failed: %s\n", obj->path);
+	pr_debug("maps in %s: %zd bytes\n", obj->path, size);
+
+	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
+	if (!obj->maps) {
+		pr_warning("alloc maps for object failed\n");
 		return -ENOMEM;
 	}
+	obj->nr_maps = nr_maps;
 
-	obj->maps_buf_sz = size;
-	memcpy(obj->maps_buf, data, size);
-	pr_debug("maps in %s: %ld bytes\n", obj->path, (long)size);
+	for (i = 0; i < nr_maps; i++) {
+		struct bpf_map_def *def = &obj->maps[i].def;
+
+		/*
+		 * fill all fd with -1 so won't close incorrect
+		 * fd (fd=0 is stdin) when failure (zclose won't close
+		 * negative fd)).
+		 */
+		obj->maps[i].fd = -1;
+
+		/* Save map definition into obj->maps */
+		*def = ((struct bpf_map_def *)data)[i];
+	}
 	return 0;
 }
 
+static void
+bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
+{
+	int i;
+	Elf_Data *symbols = obj->efile.symbols;
+
+	if (!symbols || maps_shndx < 0)
+		return;
+
+	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
+		GElf_Sym sym;
+		size_t map_idx;
+		const char *map_name;
+
+		if (!gelf_getsym(symbols, i, &sym))
+			continue;
+		if (sym.st_shndx != maps_shndx)
+			continue;
+
+		map_name = elf_strptr(obj->efile.elf,
+				      obj->efile.ehdr.e_shstrndx,
+				      sym.st_name);
+		map_idx = sym.st_value / sizeof(struct bpf_map_def);
+		if (map_idx >= obj->nr_maps) {
+			pr_warning("index of map \"%s\" is buggy: %zu > %zu\n",
+				   map_name, map_idx, obj->nr_maps);
+			continue;
+		}
+		obj->maps[map_idx].name = strdup(map_name);
+		pr_debug("map %zu is \"%s\"\n", map_idx,
+			 obj->maps[map_idx].name);
+	}
+}
+
 static int bpf_object__elf_collect(struct bpf_object *obj)
 {
 	Elf *elf = obj->efile.elf;
 	GElf_Ehdr *ep = &obj->efile.ehdr;
 	Elf_Scn *scn = NULL;
-	int idx = 0, err = 0;
+	int idx = 0, err = 0, maps_shndx = -1;
 
 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
@@ -542,10 +616,11 @@
 			err = bpf_object__init_kversion(obj,
 							data->d_buf,
 							data->d_size);
-		else if (strcmp(name, "maps") == 0)
+		else if (strcmp(name, "maps") == 0) {
 			err = bpf_object__init_maps(obj, data->d_buf,
 						    data->d_size);
-		else if (sh.sh_type == SHT_SYMTAB) {
+			maps_shndx = idx;
+		} else if (sh.sh_type == SHT_SYMTAB) {
 			if (obj->efile.symbols) {
 				pr_warning("bpf: multiple SYMTAB in %s\n",
 					   obj->path);
@@ -586,6 +661,9 @@
 		if (err)
 			goto out;
 	}
+
+	if (maps_shndx >= 0)
+		bpf_object__init_maps_name(obj, maps_shndx);
 out:
 	return err;
 }
@@ -668,37 +746,15 @@
 bpf_object__create_maps(struct bpf_object *obj)
 {
 	unsigned int i;
-	size_t nr_maps;
-	int *pfd;
 
-	nr_maps = obj->maps_buf_sz / sizeof(struct bpf_map_def);
-	if (!obj->maps_buf || !nr_maps) {
-		pr_debug("don't need create maps for %s\n",
-			 obj->path);
-		return 0;
-	}
+	for (i = 0; i < obj->nr_maps; i++) {
+		struct bpf_map_def *def = &obj->maps[i].def;
+		int *pfd = &obj->maps[i].fd;
 
-	obj->map_fds = malloc(sizeof(int) * nr_maps);
-	if (!obj->map_fds) {
-		pr_warning("realloc perf_bpf_map_fds failed\n");
-		return -ENOMEM;
-	}
-	obj->nr_map_fds = nr_maps;
-
-	/* fill all fd with -1 */
-	memset(obj->map_fds, -1, sizeof(int) * nr_maps);
-
-	pfd = obj->map_fds;
-	for (i = 0; i < nr_maps; i++) {
-		struct bpf_map_def def;
-
-		def = *(struct bpf_map_def *)(obj->maps_buf +
-				i * sizeof(struct bpf_map_def));
-
-		*pfd = bpf_create_map(def.type,
-				      def.key_size,
-				      def.value_size,
-				      def.max_entries);
+		*pfd = bpf_create_map(def->type,
+				      def->key_size,
+				      def->value_size,
+				      def->max_entries);
 		if (*pfd < 0) {
 			size_t j;
 			int err = *pfd;
@@ -706,22 +762,17 @@
 			pr_warning("failed to create map: %s\n",
 				   strerror(errno));
 			for (j = 0; j < i; j++)
-				zclose(obj->map_fds[j]);
-			obj->nr_map_fds = 0;
-			zfree(&obj->map_fds);
+				zclose(obj->maps[j].fd);
 			return err;
 		}
 		pr_debug("create map: fd=%d\n", *pfd);
-		pfd++;
 	}
 
-	zfree(&obj->maps_buf);
-	obj->maps_buf_sz = 0;
 	return 0;
 }
 
 static int
-bpf_program__relocate(struct bpf_program *prog, int *map_fds)
+bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
 {
 	int i;
 
@@ -741,7 +792,7 @@
 			return -LIBBPF_ERRNO__RELOC;
 		}
 		insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
-		insns[insn_idx].imm = map_fds[map_idx];
+		insns[insn_idx].imm = obj->maps[map_idx].fd;
 	}
 
 	zfree(&prog->reloc_desc);
@@ -760,7 +811,7 @@
 	for (i = 0; i < obj->nr_programs; i++) {
 		prog = &obj->programs[i];
 
-		err = bpf_program__relocate(prog, obj->map_fds);
+		err = bpf_program__relocate(prog, obj);
 		if (err) {
 			pr_warning("failed to relocate '%s'\n",
 				   prog->section_name);
@@ -784,8 +835,7 @@
 		Elf_Data *data = obj->efile.reloc[i].data;
 		int idx = shdr->sh_info;
 		struct bpf_program *prog;
-		size_t nr_maps = obj->maps_buf_sz /
-				 sizeof(struct bpf_map_def);
+		size_t nr_maps = obj->nr_maps;
 
 		if (shdr->sh_type != SHT_REL) {
 			pr_warning("internal error at %d\n", __LINE__);
@@ -860,13 +910,73 @@
 bpf_program__load(struct bpf_program *prog,
 		  char *license, u32 kern_version)
 {
-	int err, fd;
+	int err = 0, fd, i;
 
-	err = load_program(prog->insns, prog->insns_cnt,
-			   license, kern_version, &fd);
-	if (!err)
-		prog->fd = fd;
+	if (prog->instances.nr < 0 || !prog->instances.fds) {
+		if (prog->preprocessor) {
+			pr_warning("Internal error: can't load program '%s'\n",
+				   prog->section_name);
+			return -LIBBPF_ERRNO__INTERNAL;
+		}
 
+		prog->instances.fds = malloc(sizeof(int));
+		if (!prog->instances.fds) {
+			pr_warning("Not enough memory for BPF fds\n");
+			return -ENOMEM;
+		}
+		prog->instances.nr = 1;
+		prog->instances.fds[0] = -1;
+	}
+
+	if (!prog->preprocessor) {
+		if (prog->instances.nr != 1) {
+			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
+				   prog->section_name, prog->instances.nr);
+		}
+		err = load_program(prog->insns, prog->insns_cnt,
+				   license, kern_version, &fd);
+		if (!err)
+			prog->instances.fds[0] = fd;
+		goto out;
+	}
+
+	for (i = 0; i < prog->instances.nr; i++) {
+		struct bpf_prog_prep_result result;
+		bpf_program_prep_t preprocessor = prog->preprocessor;
+
+		bzero(&result, sizeof(result));
+		err = preprocessor(prog, i, prog->insns,
+				   prog->insns_cnt, &result);
+		if (err) {
+			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
+				   i, prog->section_name);
+			goto out;
+		}
+
+		if (!result.new_insn_ptr || !result.new_insn_cnt) {
+			pr_debug("Skip loading the %dth instance of program '%s'\n",
+				 i, prog->section_name);
+			prog->instances.fds[i] = -1;
+			if (result.pfd)
+				*result.pfd = -1;
+			continue;
+		}
+
+		err = load_program(result.new_insn_ptr,
+				   result.new_insn_cnt,
+				   license, kern_version, &fd);
+
+		if (err) {
+			pr_warning("Loading the %dth instance of program '%s' failed\n",
+					i, prog->section_name);
+			goto out;
+		}
+
+		if (result.pfd)
+			*result.pfd = fd;
+		prog->instances.fds[i] = fd;
+	}
+out:
 	if (err)
 		pr_warning("failed to load program '%s'\n",
 			   prog->section_name);
@@ -970,10 +1080,8 @@
 	if (!obj)
 		return -EINVAL;
 
-	for (i = 0; i < obj->nr_map_fds; i++)
-		zclose(obj->map_fds[i]);
-	zfree(&obj->map_fds);
-	obj->nr_map_fds = 0;
+	for (i = 0; i < obj->nr_maps; i++)
+		zclose(obj->maps[i].fd);
 
 	for (i = 0; i < obj->nr_programs; i++)
 		bpf_program__unload(&obj->programs[i]);
@@ -1016,7 +1124,16 @@
 	bpf_object__elf_finish(obj);
 	bpf_object__unload(obj);
 
-	zfree(&obj->maps_buf);
+	for (i = 0; i < obj->nr_maps; i++) {
+		zfree(&obj->maps[i].name);
+		if (obj->maps[i].clear_priv)
+			obj->maps[i].clear_priv(&obj->maps[i],
+						obj->maps[i].priv);
+		obj->maps[i].priv = NULL;
+		obj->maps[i].clear_priv = NULL;
+	}
+	zfree(&obj->maps);
+	obj->nr_maps = 0;
 
 	if (obj->programs && obj->nr_programs) {
 		for (i = 0; i < obj->nr_programs; i++)
@@ -1121,5 +1238,142 @@
 
 int bpf_program__fd(struct bpf_program *prog)
 {
-	return prog->fd;
+	return bpf_program__nth_fd(prog, 0);
+}
+
+int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
+			  bpf_program_prep_t prep)
+{
+	int *instances_fds;
+
+	if (nr_instances <= 0 || !prep)
+		return -EINVAL;
+
+	if (prog->instances.nr > 0 || prog->instances.fds) {
+		pr_warning("Can't set pre-processor after loading\n");
+		return -EINVAL;
+	}
+
+	instances_fds = malloc(sizeof(int) * nr_instances);
+	if (!instances_fds) {
+		pr_warning("alloc memory failed for fds\n");
+		return -ENOMEM;
+	}
+
+	/* fill all fd with -1 */
+	memset(instances_fds, -1, sizeof(int) * nr_instances);
+
+	prog->instances.nr = nr_instances;
+	prog->instances.fds = instances_fds;
+	prog->preprocessor = prep;
+	return 0;
+}
+
+int bpf_program__nth_fd(struct bpf_program *prog, int n)
+{
+	int fd;
+
+	if (n >= prog->instances.nr || n < 0) {
+		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
+			   n, prog->section_name, prog->instances.nr);
+		return -EINVAL;
+	}
+
+	fd = prog->instances.fds[n];
+	if (fd < 0) {
+		pr_warning("%dth instance of program '%s' is invalid\n",
+			   n, prog->section_name);
+		return -ENOENT;
+	}
+
+	return fd;
+}
+
+int bpf_map__get_fd(struct bpf_map *map)
+{
+	if (!map)
+		return -EINVAL;
+
+	return map->fd;
+}
+
+int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef)
+{
+	if (!map || !pdef)
+		return -EINVAL;
+
+	*pdef = map->def;
+	return 0;
+}
+
+const char *bpf_map__get_name(struct bpf_map *map)
+{
+	if (!map)
+		return NULL;
+	return map->name;
+}
+
+int bpf_map__set_private(struct bpf_map *map, void *priv,
+			 bpf_map_clear_priv_t clear_priv)
+{
+	if (!map)
+		return -EINVAL;
+
+	if (map->priv) {
+		if (map->clear_priv)
+			map->clear_priv(map, map->priv);
+	}
+
+	map->priv = priv;
+	map->clear_priv = clear_priv;
+	return 0;
+}
+
+int bpf_map__get_private(struct bpf_map *map, void **ppriv)
+{
+	if (!map)
+		return -EINVAL;
+
+	if (ppriv)
+		*ppriv = map->priv;
+	return 0;
+}
+
+struct bpf_map *
+bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
+{
+	size_t idx;
+	struct bpf_map *s, *e;
+
+	if (!obj || !obj->maps)
+		return NULL;
+
+	s = obj->maps;
+	e = obj->maps + obj->nr_maps;
+
+	if (prev == NULL)
+		return s;
+
+	if ((prev < s) || (prev >= e)) {
+		pr_warning("error in %s: map handler doesn't belong to object\n",
+			   __func__);
+		return NULL;
+	}
+
+	idx = (prev - obj->maps) + 1;
+	if (idx >= obj->nr_maps)
+		return NULL;
+	return &obj->maps[idx];
+}
+
+struct bpf_map *
+bpf_object__get_map_by_name(struct bpf_object *obj, const char *name)
+{
+	struct bpf_map *pos;
+
+	bpf_map__for_each(pos, obj) {
+		if (strcmp(pos->name, name) == 0)
+			return pos;
+	}
+	return NULL;
 }
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c9a9aef..a51594c 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -88,6 +88,70 @@
 
 int bpf_program__fd(struct bpf_program *prog);
 
+struct bpf_insn;
+
+/*
+ * Libbpf allows callers to adjust BPF programs before being loaded
+ * into kernel. One program in an object file can be transform into
+ * multiple variants to be attached to different code.
+ *
+ * bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd
+ * are APIs for this propose.
+ *
+ * - bpf_program_prep_t:
+ *   It defines 'preprocessor', which is a caller defined function
+ *   passed to libbpf through bpf_program__set_prep(), and will be
+ *   called before program is loaded. The processor should adjust
+ *   the program one time for each instances according to the number
+ *   passed to it.
+ *
+ * - bpf_program__set_prep:
+ *   Attachs a preprocessor to a BPF program. The number of instances
+ *   whould be created is also passed through this function.
+ *
+ * - bpf_program__nth_fd:
+ *   After the program is loaded, get resuling fds from bpf program for
+ *   each instances.
+ *
+ * If bpf_program__set_prep() is not used, the program whould be loaded
+ * without adjustment during bpf_object__load(). The program has only
+ * one instance. In this case bpf_program__fd(prog) is equal to
+ * bpf_program__nth_fd(prog, 0).
+ */
+
+struct bpf_prog_prep_result {
+	/*
+	 * If not NULL, load new instruction array.
+	 * If set to NULL, don't load this instance.
+	 */
+	struct bpf_insn *new_insn_ptr;
+	int new_insn_cnt;
+
+	/* If not NULL, result fd is set to it */
+	int *pfd;
+};
+
+/*
+ * Parameters of bpf_program_prep_t:
+ *  - prog:	The bpf_program being loaded.
+ *  - n:	Index of instance being generated.
+ *  - insns:	BPF instructions array.
+ *  - insns_cnt:Number of instructions in insns.
+ *  - res:	Output parameter, result of transformation.
+ *
+ * Return value:
+ *  - Zero: pre-processing success.
+ *  - Non-zero: pre-processing, stop loading.
+ */
+typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
+				  struct bpf_insn *insns, int insns_cnt,
+				  struct bpf_prog_prep_result *res);
+
+int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
+			  bpf_program_prep_t prep);
+
+int bpf_program__nth_fd(struct bpf_program *prog, int n);
+
 /*
  * We don't need __attribute__((packed)) now since it is
  * unnecessary for 'bpf_map_def' because they are all aligned.
@@ -101,4 +165,28 @@
 	unsigned int max_entries;
 };
 
+/*
+ * There is another 'struct bpf_map' in include/linux/map.h. However,
+ * it is not a uapi header so no need to consider name clash.
+ */
+struct bpf_map;
+struct bpf_map *
+bpf_object__get_map_by_name(struct bpf_object *obj, const char *name);
+
+struct bpf_map *
+bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
+#define bpf_map__for_each(pos, obj)		\
+	for ((pos) = bpf_map__next(NULL, (obj));	\
+	     (pos) != NULL;				\
+	     (pos) = bpf_map__next((pos), (obj)))
+
+int bpf_map__get_fd(struct bpf_map *map);
+int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef);
+const char *bpf_map__get_name(struct bpf_map *map);
+
+typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
+int bpf_map__set_private(struct bpf_map *map, void *priv,
+			 bpf_map_clear_priv_t clear_priv);
+int bpf_map__get_private(struct bpf_map *map, void **ppriv);
+
 #endif
diff --git a/tools/lib/string.c b/tools/lib/string.c
new file mode 100644
index 0000000..065e54f
--- /dev/null
+++ b/tools/lib/string.c
@@ -0,0 +1,62 @@
+/*
+ *  linux/tools/lib/string.c
+ *
+ *  Copied from linux/lib/string.c, where it is:
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  More specifically, the first copied function was strtobool, which
+ *  was introduced by:
+ *
+ *  d0f1fed29e6e ("Add a strtobool function matching semantics of existing in kernel equivalents")
+ *  Author: Jonathan Cameron <jic23@cam.ac.uk>
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <linux/string.h>
+
+/**
+ * memdup - duplicate region of memory
+ *
+ * @src: memory region to duplicate
+ * @len: memory region length
+ */
+void *memdup(const void *src, size_t len)
+{
+	void *p = malloc(len);
+
+	if (p)
+		memcpy(p, src, len);
+
+	return p;
+}
+
+/**
+ * strtobool - convert common user inputs into boolean values
+ * @s: input string
+ * @res: result
+ *
+ * This routine returns 0 iff the first character is one of 'Yy1Nn0'.
+ * Otherwise it will return -EINVAL.  Value pointed to by res is
+ * updated upon finding a match.
+ */
+int strtobool(const char *s, bool *res)
+{
+	switch (s[0]) {
+	case 'y':
+	case 'Y':
+	case '1':
+		*res = true;
+		break;
+	case 'n':
+	case 'N':
+	case '0':
+		*res = false;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 2a912df..68276f3 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4968,13 +4968,12 @@
 				    sizeof(long) != 8) {
 					char *p;
 
-					ls = 2;
 					/* make %l into %ll */
-					p = strchr(format, 'l');
-					if (p)
+					if (ls == 1 && (p = strchr(format, 'l')))
 						memmove(p+1, p, strlen(p)+1);
 					else if (strcmp(format, "%p") == 0)
 						strcpy(format, "0x%llx");
+					ls = 2;
 				}
 				switch (ls) {
 				case -2:
diff --git a/tools/perf/Build b/tools/perf/Build
index 7223745..2c7aaf2 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -1,5 +1,6 @@
 perf-y += builtin-bench.o
 perf-y += builtin-annotate.o
+perf-y += builtin-config.o
 perf-y += builtin-diff.o
 perf-y += builtin-evlist.o
 perf-y += builtin-help.o
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
new file mode 100644
index 0000000..b9ca1e3
--- /dev/null
+++ b/tools/perf/Documentation/perf-config.txt
@@ -0,0 +1,103 @@
+perf-config(1)
+==============
+
+NAME
+----
+perf-config - Get and set variables in a configuration file.
+
+SYNOPSIS
+--------
+[verse]
+'perf config' -l | --list
+
+DESCRIPTION
+-----------
+You can manage variables in a configuration file with this command.
+
+OPTIONS
+-------
+
+-l::
+--list::
+	Show current config variables, name and value, for all sections.
+
+CONFIGURATION FILE
+------------------
+
+The perf configuration file contains many variables to change various
+aspects of each of its tools, including output, disk usage, etc.
+The '$HOME/.perfconfig' file is used to store a per-user configuration.
+The file '$(sysconfdir)/perfconfig' can be used to
+store a system-wide default configuration.
+
+Syntax
+~~~~~~
+
+The file consist of sections. A section starts with its name
+surrounded by square brackets and continues till the next section
+begins. Each variable must be in a section, and have the form
+'name = value', for example:
+
+	[section]
+		name1 = value1
+		name2 = value2
+
+Section names are case sensitive and can contain any characters except
+newline (double quote `"` and backslash have to be escaped as `\"` and `\\`,
+respectively). Section headers can't span multiple lines.
+
+Example
+~~~~~~~
+
+Given a $HOME/.perfconfig like this:
+
+#
+# This is the config file, and
+# a '#' and ';' character indicates a comment
+#
+
+	[colors]
+		# Color variables
+		top = red, default
+		medium = green, default
+		normal = lightgray, default
+		selected = white, lightgray
+		code = blue, default
+		addr = magenta, default
+		root = white, blue
+
+	[tui]
+		# Defaults if linked with libslang
+		report = on
+		annotate = on
+		top = on
+
+	[buildid]
+		# Default, disable using /dev/null
+		dir = ~/.debug
+
+	[annotate]
+		# Defaults
+		hide_src_code = false
+		use_offset = true
+		jump_arrows = true
+		show_nr_jumps = false
+
+	[help]
+		# Format can be man, info, web or html
+		format = man
+		autocorrect = 0
+
+	[ui]
+		show-headers = true
+
+	[call-graph]
+		# fp (framepointer), dwarf
+		record-mode = fp
+		print-type = graph
+		order = caller
+		sort-key = function
+
+SEE ALSO
+--------
+linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 5ce8da1..dab99ed 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -170,17 +170,18 @@
         Dump raw trace in ASCII.
 
 -g::
---call-graph=<print_type,threshold[,print_limit],order,sort_key,branch>::
+--call-graph=<print_type,threshold[,print_limit],order,sort_key[,branch],value>::
         Display call chains using type, min percent threshold, print limit,
-	call order, sort key and branch.  Note that ordering of parameters is not
-	fixed so any parement can be given in an arbitraty order.  One exception
-	is the print_limit which should be preceded by threshold.
+	call order, sort key, optional branch and value.  Note that ordering of
+	parameters is not fixed so any parement can be given in an arbitraty order.
+	One exception is the print_limit which should be preceded by threshold.
 
 	print_type can be either:
 	- flat: single column, linear exposure of call chains.
 	- graph: use a graph tree, displaying absolute overhead rates. (default)
 	- fractal: like graph, but displays relative rates. Each branch of
 		 the tree is considered as a new profiled object.
+	- folded: call chains are displayed in a line, separated by semicolons
 	- none: disable call chain display.
 
 	threshold is a percentage value which specifies a minimum percent to be
@@ -204,6 +205,11 @@
 	- branch: include last branch information in callgraph when available.
 	          Usually more convenient to use --branch-history for this.
 
+	value can be:
+	- percent: diplay overhead percent (default)
+	- period: display event period
+	- count: display event count
+
 --children::
 	Accumulate callchain of children to parent entry so that then can
 	show up in the output.  The output will have a new "Children" column
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 39c38cb..2562eac 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -22,6 +22,7 @@
 tools/lib/bpf
 tools/lib/hweight.c
 tools/lib/rbtree.c
+tools/lib/string.c
 tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
 tools/lib/util/find_next_bit.c
@@ -50,6 +51,7 @@
 tools/include/linux/poison.h
 tools/include/linux/rbtree.h
 tools/include/linux/rbtree_augmented.h
+tools/include/linux/string.h
 tools/include/linux/types.h
 tools/include/linux/err.h
 include/asm-generic/bitops/arch_hweight.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 0d19d54..929a32b 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -420,7 +420,7 @@
 	$(call QUIET_CLEAN, libtraceevent)
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
 
-install-traceevent-plugins: $(LIBTRACEEVENT)
+install-traceevent-plugins: libtraceevent_plugins
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
 
 $(LIBAPI): fixdep FORCE
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index 7ed00f4..b48de2f 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -2,10 +2,10 @@
 #define ARCH_TESTS_H
 
 /* Tests */
-int test__rdpmc(void);
-int test__perf_time_to_tsc(void);
-int test__insn_x86(void);
-int test__intel_cqm_count_nmi_context(void);
+int test__rdpmc(int subtest);
+int test__perf_time_to_tsc(int subtest);
+int test__insn_x86(int subtest);
+int test__intel_cqm_count_nmi_context(int subtest);
 
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c
index b6115df..08d9b2bc 100644
--- a/tools/perf/arch/x86/tests/insn-x86.c
+++ b/tools/perf/arch/x86/tests/insn-x86.c
@@ -171,7 +171,7 @@
  * verbose (-v) option to see all the instructions and whether or not they
  * decoded successfuly.
  */
-int test__insn_x86(void)
+int test__insn_x86(int subtest __maybe_unused)
 {
 	int ret = 0;
 
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
index d28c1b6..94e0cb7 100644
--- a/tools/perf/arch/x86/tests/intel-cqm.c
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -33,7 +33,7 @@
  * the last read counter value to avoid triggering a WARN_ON_ONCE() in
  * smp_call_function_many() caused by sending IPIs from NMI context.
  */
-int test__intel_cqm_count_nmi_context(void)
+int test__intel_cqm_count_nmi_context(int subtest __maybe_unused)
 {
 	struct perf_evlist *evlist = NULL;
 	struct perf_evsel *evsel = NULL;
diff --git a/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index 658cd20..a289aa8 100644
--- a/tools/perf/arch/x86/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -35,7 +35,7 @@
  * %0 is returned, otherwise %-1 is returned.  If TSC conversion is not
  * supported then then the test passes but " (not supported)" is printed.
  */
-int test__perf_time_to_tsc(void)
+int test__perf_time_to_tsc(int subtest __maybe_unused)
 {
 	struct record_opts opts = {
 		.mmap_pages	     = UINT_MAX,
diff --git a/tools/perf/arch/x86/tests/rdpmc.c b/tools/perf/arch/x86/tests/rdpmc.c
index e768821..7bb0d13 100644
--- a/tools/perf/arch/x86/tests/rdpmc.c
+++ b/tools/perf/arch/x86/tests/rdpmc.c
@@ -149,7 +149,7 @@
 	return 0;
 }
 
-int test__rdpmc(void)
+int test__rdpmc(int subtest __maybe_unused)
 {
 	int status = 0;
 	int wret = 0;
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index ff63649f..4659703 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -5,6 +5,7 @@
 libperf-y += perf_regs.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
+libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
 
 libperf-$(CONFIG_LIBUNWIND)          += unwind-libunwind.o
 libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
new file mode 100644
index 0000000..427ea7a
--- /dev/null
+++ b/tools/perf/builtin-config.c
@@ -0,0 +1,66 @@
+/*
+ * builtin-config.c
+ *
+ * Copyright (C) 2015, Taeung Song <treeze.taeung@gmail.com>
+ *
+ */
+#include "builtin.h"
+
+#include "perf.h"
+
+#include "util/cache.h"
+#include "util/parse-options.h"
+#include "util/util.h"
+#include "util/debug.h"
+
+static const char * const config_usage[] = {
+	"perf config [options]",
+	NULL
+};
+
+enum actions {
+	ACTION_LIST = 1
+} actions;
+
+static struct option config_options[] = {
+	OPT_SET_UINT('l', "list", &actions,
+		     "show current config variables", ACTION_LIST),
+	OPT_END()
+};
+
+static int show_config(const char *key, const char *value,
+		       void *cb __maybe_unused)
+{
+	if (value)
+		printf("%s=%s\n", key, value);
+	else
+		printf("%s\n", key);
+
+	return 0;
+}
+
+int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+	int ret = 0;
+
+	argc = parse_options(argc, argv, config_options, config_usage,
+			     PARSE_OPT_STOP_AT_NON_OPTION);
+
+	switch (actions) {
+	case ACTION_LIST:
+		if (argc) {
+			pr_err("Error: takes no arguments\n");
+			parse_options_usage(config_usage, config_options, "l", 1);
+		} else {
+			ret = perf_config(show_config, NULL);
+			if (ret < 0)
+				pr_err("Nothing configured, "
+				       "please check your ~/.perfconfig file\n");
+		}
+		break;
+	default:
+		usage_with_options(config_usage, config_options);
+	}
+
+	return ret;
+}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index f256fac..af5db88 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -45,7 +45,6 @@
 	struct perf_tool	tool;
 	struct perf_session	*session;
 	bool			use_tui, use_gtk, use_stdio;
-	bool			hide_unresolved;
 	bool			dont_use_callchains;
 	bool			show_full_info;
 	bool			show_threads;
@@ -146,7 +145,7 @@
 	struct hist_entry_iter iter = {
 		.evsel 			= evsel,
 		.sample 		= sample,
-		.hide_unresolved 	= rep->hide_unresolved,
+		.hide_unresolved 	= symbol_conf.hide_unresolved,
 		.add_entry_cb 		= hist_iter__report_callback,
 	};
 	int ret = 0;
@@ -157,7 +156,7 @@
 		return -1;
 	}
 
-	if (rep->hide_unresolved && al.sym == NULL)
+	if (symbol_conf.hide_unresolved && al.sym == NULL)
 		goto out_put;
 
 	if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
@@ -514,20 +513,26 @@
 	if (rep->cpu_list) {
 		ret = perf_session__cpu_bitmap(session, rep->cpu_list,
 					       rep->cpu_bitmap);
-		if (ret)
+		if (ret) {
+			ui__error("failed to set cpu bitmap\n");
 			return ret;
+		}
 	}
 
 	if (rep->show_threads)
 		perf_read_values_init(&rep->show_threads_values);
 
 	ret = report__setup_sample_type(rep);
-	if (ret)
+	if (ret) {
+		/* report__setup_sample_type() already showed error message */
 		return ret;
+	}
 
 	ret = perf_session__process_events(session);
-	if (ret)
+	if (ret) {
+		ui__error("failed to process sample\n");
 		return ret;
+	}
 
 	report__warn_kptr_restrict(rep);
 
@@ -625,7 +630,7 @@
 	return 0;
 }
 
-#define CALLCHAIN_DEFAULT_OPT  "graph,0.5,caller,function"
+#define CALLCHAIN_DEFAULT_OPT  "graph,0.5,caller,function,percent"
 
 const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
 				     CALLCHAIN_REPORT_HELP
@@ -708,7 +713,7 @@
 	OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
 		    "Only display entries with parent-match"),
 	OPT_CALLBACK_DEFAULT('g', "call-graph", &report,
-			     "print_type,threshold[,print_limit],order,sort_key[,branch]",
+			     "print_type,threshold[,print_limit],order,sort_key[,branch],value",
 			     report_callchain_help, &report_parse_callchain_opt,
 			     callchain_default_opt),
 	OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
@@ -740,7 +745,7 @@
 	OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
 		   "separator for columns, no spaces will be added between "
 		   "columns '.' is reserved."),
-	OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved,
+	OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
 		    "Only display entries resolved to a symbol"),
 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
 		    "Look for files with symbols relative to this directory"),
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 72b5deb..3c3f8d0 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -588,8 +588,17 @@
 	printf("  %-4s ", str);
 }
 
-static void process_event(union perf_event *event, struct perf_sample *sample,
-			  struct perf_evsel *evsel, struct addr_location *al)
+struct perf_script {
+	struct perf_tool	tool;
+	struct perf_session	*session;
+	bool			show_task_events;
+	bool			show_mmap_events;
+	bool			show_switch_events;
+};
+
+static void process_event(struct perf_script *script __maybe_unused, union perf_event *event,
+			  struct perf_sample *sample, struct perf_evsel *evsel,
+			  struct addr_location *al)
 {
 	struct thread *thread = al->thread;
 	struct perf_event_attr *attr = &evsel->attr;
@@ -643,65 +652,33 @@
 	printf("\n");
 }
 
-static int default_start_script(const char *script __maybe_unused,
-				int argc __maybe_unused,
-				const char **argv __maybe_unused)
-{
-	return 0;
-}
-
-static int default_flush_script(void)
-{
-	return 0;
-}
-
-static int default_stop_script(void)
-{
-	return 0;
-}
-
-static int default_generate_script(struct pevent *pevent __maybe_unused,
-				   const char *outfile __maybe_unused)
-{
-	return 0;
-}
-
-static struct scripting_ops default_scripting_ops = {
-	.start_script		= default_start_script,
-	.flush_script		= default_flush_script,
-	.stop_script		= default_stop_script,
-	.process_event		= process_event,
-	.generate_script	= default_generate_script,
-};
-
 static struct scripting_ops	*scripting_ops;
 
 static void setup_scripting(void)
 {
 	setup_perl_scripting();
 	setup_python_scripting();
-
-	scripting_ops = &default_scripting_ops;
 }
 
 static int flush_scripting(void)
 {
-	return scripting_ops->flush_script();
+	return scripting_ops ? scripting_ops->flush_script() : 0;
 }
 
 static int cleanup_scripting(void)
 {
 	pr_debug("\nperf script stopped\n");
 
-	return scripting_ops->stop_script();
+	return scripting_ops ? scripting_ops->stop_script() : 0;
 }
 
-static int process_sample_event(struct perf_tool *tool __maybe_unused,
+static int process_sample_event(struct perf_tool *tool,
 				union perf_event *event,
 				struct perf_sample *sample,
 				struct perf_evsel *evsel,
 				struct machine *machine)
 {
+	struct perf_script *scr = container_of(tool, struct perf_script, tool);
 	struct addr_location al;
 
 	if (debug_mode) {
@@ -727,20 +704,16 @@
 	if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
 		goto out_put;
 
-	scripting_ops->process_event(event, sample, evsel, &al);
+	if (scripting_ops)
+		scripting_ops->process_event(event, sample, evsel, &al);
+	else
+		process_event(scr, event, sample, evsel, &al);
+
 out_put:
 	addr_location__put(&al);
 	return 0;
 }
 
-struct perf_script {
-	struct perf_tool	tool;
-	struct perf_session	*session;
-	bool			show_task_events;
-	bool			show_mmap_events;
-	bool			show_switch_events;
-};
-
 static int process_attr(struct perf_tool *tool, union perf_event *event,
 			struct perf_evlist **pevlist)
 {
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e77880b..e74712d 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -161,15 +161,32 @@
 
 	attr->inherit = !no_inherit;
 
-	if (target__has_cpu(&target))
-		return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
+	/*
+	 * Some events get initialized with sample_(period/type) set,
+	 * like tracepoints. Clear it up for counting.
+	 */
+	attr->sample_period = 0;
+	attr->sample_type   = 0;
 
-	if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) {
+	/*
+	 * Disabling all counters initially, they will be enabled
+	 * either manually by us or by kernel via enable_on_exec
+	 * set later.
+	 */
+	if (perf_evsel__is_group_leader(evsel)) {
 		attr->disabled = 1;
-		if (!initial_delay)
+
+		/*
+		 * In case of initial_delay we enable tracee
+		 * events manually.
+		 */
+		if (target__none(&target) && !initial_delay)
 			attr->enable_on_exec = 1;
 	}
 
+	if (target__has_cpu(&target))
+		return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
+
 	return perf_evsel__open_per_thread(evsel, evsel_list->threads);
 }
 
@@ -244,18 +261,18 @@
 	print_counters(&rs, 0, NULL);
 }
 
-static void handle_initial_delay(void)
+static void enable_counters(void)
 {
-	struct perf_evsel *counter;
-
-	if (initial_delay) {
-		const int ncpus = cpu_map__nr(evsel_list->cpus),
-			nthreads = thread_map__nr(evsel_list->threads);
-
+	if (initial_delay)
 		usleep(initial_delay * 1000);
-		evlist__for_each(evsel_list, counter)
-			perf_evsel__enable(counter, ncpus, nthreads);
-	}
+
+	/*
+	 * We need to enable counters only if:
+	 * - we don't have tracee (attaching to task or cpu)
+	 * - we have initial delay configured
+	 */
+	if (!target__none(&target) || initial_delay)
+		perf_evlist__enable(evsel_list);
 }
 
 static volatile int workload_exec_errno;
@@ -352,7 +369,7 @@
 
 	if (forks) {
 		perf_evlist__start_workload(evsel_list);
-		handle_initial_delay();
+		enable_counters();
 
 		if (interval) {
 			while (!waitpid(child_pid, &status, WNOHANG)) {
@@ -371,7 +388,7 @@
 		if (WIFSIGNALED(status))
 			psignal(WTERMSIG(status), argv[0]);
 	} else {
-		handle_initial_delay();
+		enable_counters();
 		while (!done) {
 			nanosleep(&ts, NULL);
 			if (interval)
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 3688ad2..3f871b5 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -17,6 +17,7 @@
 extern int cmd_bench(int argc, const char **argv, const char *prefix);
 extern int cmd_buildid_cache(int argc, const char **argv, const char *prefix);
 extern int cmd_buildid_list(int argc, const char **argv, const char *prefix);
+extern int cmd_config(int argc, const char **argv, const char *prefix);
 extern int cmd_diff(int argc, const char **argv, const char *prefix);
 extern int cmd_evlist(int argc, const char **argv, const char *prefix);
 extern int cmd_help(int argc, const char **argv, const char *prefix);
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 00fcaf8..acc3ea7 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -9,6 +9,7 @@
 perf-buildid-list		mainporcelain common
 perf-data			mainporcelain common
 perf-diff			mainporcelain common
+perf-config			mainporcelain common
 perf-evlist			mainporcelain common
 perf-inject			mainporcelain common
 perf-kmem			mainporcelain common
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index de89ec5..6eb9a95 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -318,6 +318,18 @@
       CFLAGS += -DHAVE_LIBBPF_SUPPORT
       $(call detected,CONFIG_LIBBPF)
     endif
+
+    ifndef NO_DWARF
+      ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
+        CFLAGS += -DHAVE_BPF_PROLOGUE
+        $(call detected,CONFIG_BPF_PROLOGUE)
+      else
+        msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset());
+      endif
+    else
+      msg := $(warning DWARF support is off, BPF prologue is disabled);
+    endif
+
   endif # NO_LIBBPF
 endif # NO_LIBELF
 
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 3d4c7c0..4bee53c 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -39,6 +39,7 @@
 static struct cmd_struct commands[] = {
 	{ "buildid-cache", cmd_buildid_cache, 0 },
 	{ "buildid-list", cmd_buildid_list, 0 },
+	{ "config",	cmd_config,	0 },
 	{ "diff",	cmd_diff,	0 },
 	{ "evlist",	cmd_evlist,	0 },
 	{ "help",	cmd_help,	0 },
diff --git a/tools/perf/tests/.gitignore b/tools/perf/tests/.gitignore
index 489fc9f..bf016c4 100644
--- a/tools/perf/tests/.gitignore
+++ b/tools/perf/tests/.gitignore
@@ -1,2 +1,3 @@
 llvm-src-base.c
 llvm-src-kbuild.c
+llvm-src-prologue.c
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index f41ebf8..0ff8a97 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -31,7 +31,7 @@
 perf-y += parse-no-sample-id-all.o
 perf-y += kmod-path.o
 perf-y += thread-map.o
-perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o
+perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o
 perf-y += bpf.o
 perf-y += topology.o
 
@@ -49,6 +49,13 @@
 	$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
 	$(Q)echo ';' >> $@
 
+$(OUTPUT)tests/llvm-src-prologue.c: tests/bpf-script-test-prologue.c
+	$(call rule_mkdir)
+	$(Q)echo '#include <tests/llvm.h>' > $@
+	$(Q)echo 'const char test_llvm__bpf_test_prologue_prog[] =' >> $@
+	$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
+	$(Q)echo ';' >> $@
+
 ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
 endif
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 638875a..b66730eb9 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -153,7 +153,7 @@
 	return system(cmd);
 }
 
-int test__attr(void)
+int test__attr(int subtest __maybe_unused)
 {
 	struct stat st;
 	char path_perf[PATH_MAX];
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index a02b035..fb80c9e 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -111,7 +111,7 @@
 	return count;
 }
 
-int test__bp_signal(void)
+int test__bp_signal(int subtest __maybe_unused)
 {
 	struct sigaction sa;
 	long long count1, count2;
diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c
index e765377..89f92fa 100644
--- a/tools/perf/tests/bp_signal_overflow.c
+++ b/tools/perf/tests/bp_signal_overflow.c
@@ -58,7 +58,7 @@
 #define EXECUTIONS 10000
 #define THRESHOLD  100
 
-int test__bp_signal_overflow(void)
+int test__bp_signal_overflow(int subtest __maybe_unused)
 {
 	struct perf_event_attr pe;
 	struct sigaction sa;
diff --git a/tools/perf/tests/bpf-script-test-prologue.c b/tools/perf/tests/bpf-script-test-prologue.c
new file mode 100644
index 0000000..7230e62
--- /dev/null
+++ b/tools/perf/tests/bpf-script-test-prologue.c
@@ -0,0 +1,35 @@
+/*
+ * bpf-script-test-prologue.c
+ * Test BPF prologue
+ */
+#ifndef LINUX_VERSION_CODE
+# error Need LINUX_VERSION_CODE
+# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+#endif
+#define SEC(NAME) __attribute__((section(NAME), used))
+
+#include <uapi/linux/fs.h>
+
+#define FMODE_READ		0x1
+#define FMODE_WRITE		0x2
+
+static void (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
+	(void *) 6;
+
+SEC("func=null_lseek file->f_mode offset orig")
+int bpf_func__null_lseek(void *ctx, int err, unsigned long f_mode,
+			 unsigned long offset, unsigned long orig)
+{
+	if (err)
+		return 0;
+	if (f_mode & FMODE_WRITE)
+		return 0;
+	if (offset & 1)
+		return 0;
+	if (orig == SEEK_CUR)
+		return 0;
+	return 1;
+}
+
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index ec16f78..33689a0 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -19,6 +19,29 @@
 	return 0;
 }
 
+#ifdef HAVE_BPF_PROLOGUE
+
+static int llseek_loop(void)
+{
+	int fds[2], i;
+
+	fds[0] = open("/dev/null", O_RDONLY);
+	fds[1] = open("/dev/null", O_RDWR);
+
+	if (fds[0] < 0 || fds[1] < 0)
+		return -1;
+
+	for (i = 0; i < NR_ITERS; i++) {
+		lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
+		lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
+	}
+	close(fds[0]);
+	close(fds[1]);
+	return 0;
+}
+
+#endif
+
 static struct {
 	enum test_llvm__testcase prog_id;
 	const char *desc;
@@ -37,6 +60,17 @@
 		&epoll_pwait_loop,
 		(NR_ITERS + 1) / 2,
 	},
+#ifdef HAVE_BPF_PROLOGUE
+	{
+		LLVM_TESTCASE_BPF_PROLOGUE,
+		"Test BPF prologue generation",
+		"[bpf_prologue_test]",
+		"fix kbuild first",
+		"check your vmlinux setting?",
+		&llseek_loop,
+		(NR_ITERS + 1) / 4,
+	},
+#endif
 };
 
 static int do_test(struct bpf_object *obj, int (*func)(void),
@@ -68,8 +102,7 @@
 	err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj);
 	if (err || list_empty(&parse_evlist.list)) {
 		pr_debug("Failed to add events selected by BPF\n");
-		if (!err)
-			return TEST_FAIL;
+		return TEST_FAIL;
 	}
 
 	snprintf(pid, sizeof(pid), "%d", getpid());
@@ -123,8 +156,10 @@
 		}
 	}
 
-	if (count != expect)
+	if (count != expect) {
 		pr_debug("BPF filter result incorrect\n");
+		goto out_delete_evlist;
+	}
 
 	ret = TEST_OK;
 
@@ -146,7 +181,7 @@
 	return obj;
 }
 
-static int __test__bpf(int index)
+static int __test__bpf(int idx)
 {
 	int ret;
 	void *obj_buf;
@@ -154,54 +189,72 @@
 	struct bpf_object *obj;
 
 	ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
-				       bpf_testcase_table[index].prog_id,
+				       bpf_testcase_table[idx].prog_id,
 				       true);
 	if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
 		pr_debug("Unable to get BPF object, %s\n",
-			 bpf_testcase_table[index].msg_compile_fail);
-		if (index == 0)
+			 bpf_testcase_table[idx].msg_compile_fail);
+		if (idx == 0)
 			return TEST_SKIP;
 		else
 			return TEST_FAIL;
 	}
 
 	obj = prepare_bpf(obj_buf, obj_buf_sz,
-			  bpf_testcase_table[index].name);
+			  bpf_testcase_table[idx].name);
 	if (!obj) {
 		ret = TEST_FAIL;
 		goto out;
 	}
 
 	ret = do_test(obj,
-		      bpf_testcase_table[index].target_func,
-		      bpf_testcase_table[index].expect_result);
+		      bpf_testcase_table[idx].target_func,
+		      bpf_testcase_table[idx].expect_result);
 out:
 	bpf__clear();
 	return ret;
 }
 
-int test__bpf(void)
+int test__bpf_subtest_get_nr(void)
 {
-	unsigned int i;
+	return (int)ARRAY_SIZE(bpf_testcase_table);
+}
+
+const char *test__bpf_subtest_get_desc(int i)
+{
+	if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
+		return NULL;
+	return bpf_testcase_table[i].desc;
+}
+
+int test__bpf(int i)
+{
 	int err;
 
+	if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
+		return TEST_FAIL;
+
 	if (geteuid() != 0) {
 		pr_debug("Only root can run BPF test\n");
 		return TEST_SKIP;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(bpf_testcase_table); i++) {
-		err = __test__bpf(i);
-
-		if (err != TEST_OK)
-			return err;
-	}
-
-	return TEST_OK;
+	err = __test__bpf(i);
+	return err;
 }
 
 #else
-int test__bpf(void)
+int test__bpf_subtest_get_nr(void)
+{
+	return 0;
+}
+
+const char *test__bpf_subtest_get_desc(int i __maybe_unused)
+{
+	return NULL;
+}
+
+int test__bpf(int i __maybe_unused)
 {
 	pr_debug("Skip BPF test because BPF support is not compiled\n");
 	return TEST_SKIP;
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 80c442e..2b1ade1 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -160,6 +160,11 @@
 	{
 		.desc = "Test LLVM searching and compiling",
 		.func = test__llvm,
+		.subtest = {
+			.skip_if_fail	= true,
+			.get_nr		= test__llvm_subtest_get_nr,
+			.get_desc	= test__llvm_subtest_get_desc,
+		},
 	},
 	{
 		.desc = "Test topology in session",
@@ -168,6 +173,11 @@
 	{
 		.desc = "Test BPF filter",
 		.func = test__bpf,
+		.subtest = {
+			.skip_if_fail	= true,
+			.get_nr		= test__bpf_subtest_get_nr,
+			.get_desc	= test__bpf_subtest_get_desc,
+		},
 	},
 	{
 		.func = NULL,
@@ -203,7 +213,7 @@
 	return false;
 }
 
-static int run_test(struct test *test)
+static int run_test(struct test *test, int subtest)
 {
 	int status, err = -1, child = fork();
 	char sbuf[STRERR_BUFSIZE];
@@ -216,7 +226,19 @@
 
 	if (!child) {
 		pr_debug("test child forked, pid %d\n", getpid());
-		err = test->func();
+		if (!verbose) {
+			int nullfd = open("/dev/null", O_WRONLY);
+			if (nullfd >= 0) {
+				close(STDERR_FILENO);
+				close(STDOUT_FILENO);
+
+				dup2(nullfd, STDOUT_FILENO);
+				dup2(STDOUT_FILENO, STDERR_FILENO);
+				close(nullfd);
+			}
+		}
+
+		err = test->func(subtest);
 		exit(err);
 	}
 
@@ -237,6 +259,40 @@
 	for (j = 0; j < ARRAY_SIZE(tests); j++)	\
 		for (t = &tests[j][0]; t->func; t++)
 
+static int test_and_print(struct test *t, bool force_skip, int subtest)
+{
+	int err;
+
+	if (!force_skip) {
+		pr_debug("\n--- start ---\n");
+		err = run_test(t, subtest);
+		pr_debug("---- end ----\n");
+	} else {
+		pr_debug("\n--- force skipped ---\n");
+		err = TEST_SKIP;
+	}
+
+	if (!t->subtest.get_nr)
+		pr_debug("%s:", t->desc);
+	else
+		pr_debug("%s subtest %d:", t->desc, subtest);
+
+	switch (err) {
+	case TEST_OK:
+		pr_info(" Ok\n");
+		break;
+	case TEST_SKIP:
+		color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
+		break;
+	case TEST_FAIL:
+	default:
+		color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
+		break;
+	}
+
+	return err;
+}
+
 static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
 {
 	struct test *t;
@@ -264,21 +320,43 @@
 			continue;
 		}
 
-		pr_debug("\n--- start ---\n");
-		err = run_test(t);
-		pr_debug("---- end ----\n%s:", t->desc);
+		if (!t->subtest.get_nr) {
+			test_and_print(t, false, -1);
+		} else {
+			int subn = t->subtest.get_nr();
+			/*
+			 * minus 2 to align with normal testcases.
+			 * For subtest we print additional '.x' in number.
+			 * for example:
+			 *
+			 * 35: Test LLVM searching and compiling                        :
+			 * 35.1: Basic BPF llvm compiling test                          : Ok
+			 */
+			int subw = width > 2 ? width - 2 : width;
+			bool skip = false;
+			int subi;
 
-		switch (err) {
-		case TEST_OK:
-			pr_info(" Ok\n");
-			break;
-		case TEST_SKIP:
-			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
-			break;
-		case TEST_FAIL:
-		default:
-			color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
-			break;
+			if (subn <= 0) {
+				color_fprintf(stderr, PERF_COLOR_YELLOW,
+					      " Skip (not compiled in)\n");
+				continue;
+			}
+			pr_info("\n");
+
+			for (subi = 0; subi < subn; subi++) {
+				int len = strlen(t->subtest.get_desc(subi));
+
+				if (subw < len)
+					subw = len;
+			}
+
+			for (subi = 0; subi < subn; subi++) {
+				pr_info("%2d.%1d: %-*s:", i, subi + 1, subw,
+					t->subtest.get_desc(subi));
+				err = test_and_print(t, skip, subi);
+				if (err != TEST_OK && t->subtest.skip_if_fail)
+					skip = true;
+			}
 		}
 	}
 
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index a767a64..313a48c 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -433,7 +433,6 @@
 
 static int do_test_code_reading(bool try_kcore)
 {
-	struct machines machines;
 	struct machine *machine;
 	struct thread *thread;
 	struct record_opts opts = {
@@ -459,8 +458,7 @@
 
 	pid = getpid();
 
-	machines__init(&machines);
-	machine = &machines.host;
+	machine = machine__new_host();
 
 	ret = machine__create_kernel_maps(machine);
 	if (ret < 0) {
@@ -549,6 +547,13 @@
 		if (ret < 0) {
 			if (!excl_kernel) {
 				excl_kernel = true;
+				/*
+				 * Both cpus and threads are now owned by evlist
+				 * and will be freed by following perf_evlist__set_maps
+				 * call. Getting refference to keep them alive.
+				 */
+				cpu_map__get(cpus);
+				thread_map__get(threads);
 				perf_evlist__set_maps(evlist, NULL, NULL);
 				perf_evlist__delete(evlist);
 				evlist = NULL;
@@ -594,14 +599,13 @@
 		cpu_map__put(cpus);
 		thread_map__put(threads);
 	}
-	machines__destroy_kernel_maps(&machines);
 	machine__delete_threads(machine);
-	machines__exit(&machines);
+	machine__delete(machine);
 
 	return err;
 }
 
-int test__code_reading(void)
+int test__code_reading(int subtest __maybe_unused)
 {
 	int ret;
 
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index a218aea..dc673ff 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -110,7 +110,7 @@
 	return fd;
 }
 
-int test__dso_data(void)
+int test__dso_data(int subtest __maybe_unused)
 {
 	struct machine machine;
 	struct dso *dso;
@@ -245,7 +245,7 @@
 	return setrlimit(RLIMIT_NOFILE, &rlim);
 }
 
-int test__dso_data_cache(void)
+int test__dso_data_cache(int subtest __maybe_unused)
 {
 	struct machine machine;
 	long nr_end, nr = open_files_cnt();
@@ -302,7 +302,7 @@
 	return 0;
 }
 
-int test__dso_data_reopen(void)
+int test__dso_data_reopen(int subtest __maybe_unused)
 {
 	struct machine machine;
 	long nr_end, nr = open_files_cnt();
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 0722179..1c5c022 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -51,6 +51,12 @@
 		"krava_1",
 		"test__dwarf_unwind"
 	};
+	/*
+	 * The funcs[MAX_STACK] array index, based on the
+	 * callchain order setup.
+	 */
+	int idx = callchain_param.order == ORDER_CALLER ?
+		  MAX_STACK - *cnt - 1 : *cnt;
 
 	if (*cnt >= MAX_STACK) {
 		pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
@@ -63,8 +69,10 @@
 		return -1;
 	}
 
-	pr_debug("got: %s 0x%" PRIx64 "\n", symbol, entry->ip);
-	return strcmp((const char *) symbol, funcs[(*cnt)++]);
+	(*cnt)++;
+	pr_debug("got: %s 0x%" PRIx64 ", expecting %s\n",
+		 symbol, entry->ip, funcs[idx]);
+	return strcmp((const char *) symbol, funcs[idx]);
 }
 
 __attribute__ ((noinline))
@@ -105,8 +113,16 @@
 	/* Any possible value should be 'thread' */
 	struct thread *thread = *(struct thread **)p1;
 
-	if (global_unwind_retval == -INT_MAX)
+	if (global_unwind_retval == -INT_MAX) {
+		/* Call unwinder twice for both callchain orders. */
+		callchain_param.order = ORDER_CALLER;
+
 		global_unwind_retval = unwind_thread(thread);
+		if (!global_unwind_retval) {
+			callchain_param.order = ORDER_CALLEE;
+			global_unwind_retval = unwind_thread(thread);
+		}
+	}
 
 	return p1 - p2;
 }
@@ -142,21 +158,23 @@
 	return krava_2(thread);
 }
 
-int test__dwarf_unwind(void)
+int test__dwarf_unwind(int subtest __maybe_unused)
 {
-	struct machines machines;
 	struct machine *machine;
 	struct thread *thread;
 	int err = -1;
 
-	machines__init(&machines);
-
-	machine = machines__find(&machines, HOST_KERNEL_ID);
+	machine = machine__new_host();
 	if (!machine) {
 		pr_err("Could not get machine\n");
 		return -1;
 	}
 
+	if (machine__create_kernel_maps(machine)) {
+		pr_err("Failed to create kernel maps\n");
+		return -1;
+	}
+
 	callchain_param.record_mode = CALLCHAIN_DWARF;
 
 	if (init_live_machine(machine)) {
@@ -178,7 +196,6 @@
 
  out:
 	machine__delete_threads(machine);
-	machine__exit(machine);
-	machines__exit(&machines);
+	machine__delete(machine);
 	return err;
 }
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index 3fa7159..2de4a4f 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -95,7 +95,7 @@
 #define perf_evsel__name_array_test(names) \
 	__perf_evsel__name_array_test(names, ARRAY_SIZE(names))
 
-int test__perf_evsel__roundtrip_name_test(void)
+int test__perf_evsel__roundtrip_name_test(int subtest __maybe_unused)
 {
 	int err = 0, ret = 0;
 
@@ -103,7 +103,8 @@
 	if (err)
 		ret = err;
 
-	err = perf_evsel__name_array_test(perf_evsel__sw_names);
+	err = __perf_evsel__name_array_test(perf_evsel__sw_names,
+					    PERF_COUNT_SW_DUMMY + 1);
 	if (err)
 		ret = err;
 
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 790e413..1984b3b 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -32,7 +32,7 @@
 	return ret;
 }
 
-int test__perf_evsel__tp_sched_test(void)
+int test__perf_evsel__tp_sched_test(int subtest __maybe_unused)
 {
 	struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
 	int ret = 0;
diff --git a/tools/perf/tests/fdarray.c b/tools/perf/tests/fdarray.c
index d24b837..c809463 100644
--- a/tools/perf/tests/fdarray.c
+++ b/tools/perf/tests/fdarray.c
@@ -25,7 +25,7 @@
 	return printed + fdarray__fprintf(fda, fp);
 }
 
-int test__fdarray__filter(void)
+int test__fdarray__filter(int subtest __maybe_unused)
 {
 	int nr_fds, expected_fd[2], fd, err = TEST_FAIL;
 	struct fdarray *fda = fdarray__new(5, 5);
@@ -103,7 +103,7 @@
 	return err;
 }
 
-int test__fdarray__add(void)
+int test__fdarray__add(int subtest __maybe_unused)
 {
 	int err = TEST_FAIL;
 	struct fdarray *fda = fdarray__new(2, 2);
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index ce80b27..46f453b 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -87,6 +87,11 @@
 		return NULL;
 	}
 
+	if (machine__create_kernel_maps(machine)) {
+		pr_debug("Not enough memory for machine setup\n");
+		goto out;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
 		struct thread *thread;
 
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 7ed73701..8292948 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -686,7 +686,7 @@
 	return err;
 }
 
-int test__hists_cumulate(void)
+int test__hists_cumulate(int subtest __maybe_unused)
 {
 	int err = TEST_FAIL;
 	struct machines machines;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 818acf8..ccb5b49 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -104,7 +104,7 @@
 	return TEST_FAIL;
 }
 
-int test__hists_filter(void)
+int test__hists_filter(int subtest __maybe_unused)
 {
 	int err = TEST_FAIL;
 	struct machines machines;
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 8c102b0..6243e2b 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -274,7 +274,7 @@
 	return __validate_link(leader, 0) || __validate_link(other, 1);
 }
 
-int test__hists_link(void)
+int test__hists_link(int subtest __maybe_unused)
 {
 	int err = -1;
 	struct hists *hists, *first_hists;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index adbebc8..248beec 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -576,7 +576,7 @@
 	return err;
 }
 
-int test__hists_output(void)
+int test__hists_output(int subtest __maybe_unused)
 {
 	int err = TEST_FAIL;
 	struct machines machines;
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index a2e2269..a337a6d 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -49,7 +49,7 @@
  * when an event is disabled but a dummy software event is not disabled.  If the
  * test passes %0 is returned, otherwise %-1 is returned.
  */
-int test__keep_tracking(void)
+int test__keep_tracking(int subtest __maybe_unused)
 {
 	struct record_opts opts = {
 		.mmap_pages	     = UINT_MAX,
diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c
index 08c433b..d2af781 100644
--- a/tools/perf/tests/kmod-path.c
+++ b/tools/perf/tests/kmod-path.c
@@ -49,7 +49,7 @@
 #define M(path, c, e) \
 	TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e))
 
-int test__kmod_path__parse(void)
+int test__kmod_path__parse(int subtest __maybe_unused)
 {
 	/* path                alloc_name  alloc_ext   kmod  comp   name     ext */
 	T("/xxxx/xxxx/x-x.ko", true      , true      , true, false, "[x_x]", NULL);
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
index bc4cf50..06f45c1 100644
--- a/tools/perf/tests/llvm.c
+++ b/tools/perf/tests/llvm.c
@@ -44,13 +44,17 @@
 		.source = test_llvm__bpf_test_kbuild_prog,
 		.desc = "Test kbuild searching",
 	},
+	[LLVM_TESTCASE_BPF_PROLOGUE] = {
+		.source = test_llvm__bpf_test_prologue_prog,
+		.desc = "Compile source for BPF prologue generation test",
+	},
 };
 
 
 int
 test_llvm__fetch_bpf_obj(void **p_obj_buf,
 			 size_t *p_obj_buf_sz,
-			 enum test_llvm__testcase index,
+			 enum test_llvm__testcase idx,
 			 bool force)
 {
 	const char *source;
@@ -59,11 +63,11 @@
 	char *tmpl_new = NULL, *clang_opt_new = NULL;
 	int err, old_verbose, ret = TEST_FAIL;
 
-	if (index >= __LLVM_TESTCASE_MAX)
+	if (idx >= __LLVM_TESTCASE_MAX)
 		return TEST_FAIL;
 
-	source = bpf_source_table[index].source;
-	desc = bpf_source_table[index].desc;
+	source = bpf_source_table[idx].source;
+	desc = bpf_source_table[idx].desc;
 
 	perf_config(perf_config_cb, NULL);
 
@@ -127,44 +131,39 @@
 	return ret;
 }
 
-int test__llvm(void)
+int test__llvm(int subtest)
 {
-	enum test_llvm__testcase i;
+	int ret;
+	void *obj_buf = NULL;
+	size_t obj_buf_sz = 0;
 
-	for (i = 0; i < __LLVM_TESTCASE_MAX; i++) {
-		int ret;
-		void *obj_buf = NULL;
-		size_t obj_buf_sz = 0;
+	if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
+		return TEST_FAIL;
 
-		ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
-					       i, false);
+	ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
+				       subtest, false);
 
-		if (ret == TEST_OK) {
-			ret = test__bpf_parsing(obj_buf, obj_buf_sz);
-			if (ret != TEST_OK)
-				pr_debug("Failed to parse test case '%s'\n",
-					 bpf_source_table[i].desc);
-		}
-		free(obj_buf);
-
-		switch (ret) {
-		case TEST_SKIP:
-			return TEST_SKIP;
-		case TEST_OK:
-			break;
-		default:
-			/*
-			 * Test 0 is the basic LLVM test. If test 0
-			 * fail, the basic LLVM support not functional
-			 * so the whole test should fail. If other test
-			 * case fail, it can be fixed by adjusting
-			 * config so don't report error.
-			 */
-			if (i == 0)
-				return TEST_FAIL;
-			else
-				return TEST_SKIP;
+	if (ret == TEST_OK) {
+		ret = test__bpf_parsing(obj_buf, obj_buf_sz);
+		if (ret != TEST_OK) {
+			pr_debug("Failed to parse test case '%s'\n",
+				 bpf_source_table[subtest].desc);
 		}
 	}
-	return TEST_OK;
+	free(obj_buf);
+
+	return ret;
+}
+
+int test__llvm_subtest_get_nr(void)
+{
+	return __LLVM_TESTCASE_MAX;
+}
+
+const char *test__llvm_subtest_get_desc(int subtest)
+{
+	if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
+		return NULL;
+
+	return bpf_source_table[subtest].desc;
 }
diff --git a/tools/perf/tests/llvm.h b/tools/perf/tests/llvm.h
index d91d8f4..5150b4d 100644
--- a/tools/perf/tests/llvm.h
+++ b/tools/perf/tests/llvm.h
@@ -6,10 +6,12 @@
 
 extern const char test_llvm__bpf_base_prog[];
 extern const char test_llvm__bpf_test_kbuild_prog[];
+extern const char test_llvm__bpf_test_prologue_prog[];
 
 enum test_llvm__testcase {
 	LLVM_TESTCASE_BASE,
 	LLVM_TESTCASE_KBUILD,
+	LLVM_TESTCASE_BPF_PROLOGUE,
 	__LLVM_TESTCASE_MAX,
 };
 
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 4495493..359e98f 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -16,7 +16,7 @@
  * Then it checks if the number of syscalls reported as perf events by
  * the kernel corresponds to the number of syscalls made.
  */
-int test__basic_mmap(void)
+int test__basic_mmap(int subtest __maybe_unused)
 {
 	int err = -1;
 	union perf_event *event;
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 145050e..0c5ce44 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -149,7 +149,6 @@
 
 static int mmap_events(synth_cb synth)
 {
-	struct machines machines;
 	struct machine *machine;
 	int err, i;
 
@@ -162,8 +161,7 @@
 	 */
 	TEST_ASSERT_VAL("failed to create threads", !threads_create());
 
-	machines__init(&machines);
-	machine = &machines.host;
+	machine = machine__new_host();
 
 	dump_trace = verbose > 1 ? 1 : 0;
 
@@ -203,7 +201,7 @@
 	}
 
 	machine__delete_threads(machine);
-	machines__exit(&machines);
+	machine__delete(machine);
 	return err;
 }
 
@@ -221,7 +219,7 @@
  *
  * by using all thread objects.
  */
-int test__mmap_thread_lookup(void)
+int test__mmap_thread_lookup(int subtest __maybe_unused)
 {
 	/* perf_event__synthesize_threads synthesize */
 	TEST_ASSERT_VAL("failed with sythesizing all",
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index 2006485..53c2273 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -7,7 +7,7 @@
 #include "debug.h"
 #include "stat.h"
 
-int test__openat_syscall_event_on_all_cpus(void)
+int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
 {
 	int err = -1, fd, cpu;
 	struct cpu_map *cpus;
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index 5e811cd..eb99a105 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -6,7 +6,7 @@
 #include "tests.h"
 #include "debug.h"
 
-int test__syscall_openat_tp_fields(void)
+int test__syscall_openat_tp_fields(int subtest __maybe_unused)
 {
 	struct record_opts opts = {
 		.target = {
diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c
index 033b547..1184f9b 100644
--- a/tools/perf/tests/openat-syscall.c
+++ b/tools/perf/tests/openat-syscall.c
@@ -5,7 +5,7 @@
 #include "debug.h"
 #include "tests.h"
 
-int test__openat_syscall_event(void)
+int test__openat_syscall_event(int subtest __maybe_unused)
 {
 	int err = -1, fd;
 	struct perf_evsel *evsel;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 636d7b4..abe8849 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1765,7 +1765,7 @@
 	fprintf(stderr, " Warning: %s\n", msg);
 }
 
-int test__parse_events(void)
+int test__parse_events(int subtest __maybe_unused)
 {
 	int ret1, ret2 = 0;
 
diff --git a/tools/perf/tests/parse-no-sample-id-all.c b/tools/perf/tests/parse-no-sample-id-all.c
index 2c63ea6..294c76b 100644
--- a/tools/perf/tests/parse-no-sample-id-all.c
+++ b/tools/perf/tests/parse-no-sample-id-all.c
@@ -67,7 +67,7 @@
  *
  * Return: %0 on success, %-1 if the test fails.
  */
-int test__parse_no_sample_id_all(void)
+int test__parse_no_sample_id_all(int subtest __maybe_unused)
 {
 	int err;
 
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 7a228a2..9d5f0b5 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -32,7 +32,7 @@
 	return cpu;
 }
 
-int test__PERF_RECORD(void)
+int test__PERF_RECORD(int subtest __maybe_unused)
 {
 	struct record_opts opts = {
 		.target = {
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index faa04e9..1e2ba26 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -133,7 +133,7 @@
 	return &terms;
 }
 
-int test__pmu(void)
+int test__pmu(int subtest __maybe_unused)
 {
 	char *format = test_format_dir_get();
 	LIST_HEAD(formats);
diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c
index 7760277..7a52834 100644
--- a/tools/perf/tests/python-use.c
+++ b/tools/perf/tests/python-use.c
@@ -4,11 +4,12 @@
 
 #include <stdio.h>
 #include <stdlib.h>
+#include <linux/compiler.h>
 #include "tests.h"
 
 extern int verbose;
 
-int test__python_use(void)
+int test__python_use(int subtest __maybe_unused)
 {
 	char *cmd;
 	int ret;
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 30c0218..5f23710 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -290,7 +290,7 @@
  * checks sample format bits separately and together.  If the test passes %0 is
  * returned, otherwise %-1 is returned.
  */
-int test__sample_parsing(void)
+int test__sample_parsing(int subtest __maybe_unused)
 {
 	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
 	u64 sample_type;
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 5b83f56..36e8ce1 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -122,7 +122,7 @@
 	return err;
 }
 
-int test__sw_clock_freq(void)
+int test__sw_clock_freq(int subtest __maybe_unused)
 {
 	int ret;
 
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index a02af50..dfbd8d6 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -305,7 +305,7 @@
  * evsel->system_wide and evsel->tracking flags (respectively) with other events
  * sometimes enabled or disabled.
  */
-int test__switch_tracking(void)
+int test__switch_tracking(int subtest __maybe_unused)
 {
 	const char *sched_switch = "sched:sched_switch";
 	struct switch_tracking switch_tracking = { .tids = NULL, };
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index add1638..2dfff7a 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -31,7 +31,7 @@
  * if the number of exit event reported by the kernel is 1 or not
  * in order to check the kernel returns correct number of event.
  */
-int test__task_exit(void)
+int test__task_exit(int subtest __maybe_unused)
 {
 	int err = -1;
 	union perf_event *event;
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 3c8734a..a0733aa 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -1,6 +1,8 @@
 #ifndef TESTS_H
 #define TESTS_H
 
+#include <stdbool.h>
+
 #define TEST_ASSERT_VAL(text, cond)					 \
 do {									 \
 	if (!(cond)) {							 \
@@ -26,48 +28,57 @@
 
 struct test {
 	const char *desc;
-	int (*func)(void);
+	int (*func)(int subtest);
+	struct {
+		bool skip_if_fail;
+		int (*get_nr)(void);
+		const char *(*get_desc)(int subtest);
+	} subtest;
 };
 
 /* Tests */
-int test__vmlinux_matches_kallsyms(void);
-int test__openat_syscall_event(void);
-int test__openat_syscall_event_on_all_cpus(void);
-int test__basic_mmap(void);
-int test__PERF_RECORD(void);
-int test__perf_evsel__roundtrip_name_test(void);
-int test__perf_evsel__tp_sched_test(void);
-int test__syscall_openat_tp_fields(void);
-int test__pmu(void);
-int test__attr(void);
-int test__dso_data(void);
-int test__dso_data_cache(void);
-int test__dso_data_reopen(void);
-int test__parse_events(void);
-int test__hists_link(void);
-int test__python_use(void);
-int test__bp_signal(void);
-int test__bp_signal_overflow(void);
-int test__task_exit(void);
-int test__sw_clock_freq(void);
-int test__code_reading(void);
-int test__sample_parsing(void);
-int test__keep_tracking(void);
-int test__parse_no_sample_id_all(void);
-int test__dwarf_unwind(void);
-int test__hists_filter(void);
-int test__mmap_thread_lookup(void);
-int test__thread_mg_share(void);
-int test__hists_output(void);
-int test__hists_cumulate(void);
-int test__switch_tracking(void);
-int test__fdarray__filter(void);
-int test__fdarray__add(void);
-int test__kmod_path__parse(void);
-int test__thread_map(void);
-int test__llvm(void);
-int test__bpf(void);
-int test_session_topology(void);
+int test__vmlinux_matches_kallsyms(int subtest);
+int test__openat_syscall_event(int subtest);
+int test__openat_syscall_event_on_all_cpus(int subtest);
+int test__basic_mmap(int subtest);
+int test__PERF_RECORD(int subtest);
+int test__perf_evsel__roundtrip_name_test(int subtest);
+int test__perf_evsel__tp_sched_test(int subtest);
+int test__syscall_openat_tp_fields(int subtest);
+int test__pmu(int subtest);
+int test__attr(int subtest);
+int test__dso_data(int subtest);
+int test__dso_data_cache(int subtest);
+int test__dso_data_reopen(int subtest);
+int test__parse_events(int subtest);
+int test__hists_link(int subtest);
+int test__python_use(int subtest);
+int test__bp_signal(int subtest);
+int test__bp_signal_overflow(int subtest);
+int test__task_exit(int subtest);
+int test__sw_clock_freq(int subtest);
+int test__code_reading(int subtest);
+int test__sample_parsing(int subtest);
+int test__keep_tracking(int subtest);
+int test__parse_no_sample_id_all(int subtest);
+int test__dwarf_unwind(int subtest);
+int test__hists_filter(int subtest);
+int test__mmap_thread_lookup(int subtest);
+int test__thread_mg_share(int subtest);
+int test__hists_output(int subtest);
+int test__hists_cumulate(int subtest);
+int test__switch_tracking(int subtest);
+int test__fdarray__filter(int subtest);
+int test__fdarray__add(int subtest);
+int test__kmod_path__parse(int subtest);
+int test__thread_map(int subtest);
+int test__llvm(int subtest);
+const char *test__llvm_subtest_get_desc(int subtest);
+int test__llvm_subtest_get_nr(void);
+int test__bpf(int subtest);
+const char *test__bpf_subtest_get_desc(int subtest);
+int test__bpf_subtest_get_nr(void);
+int test_session_topology(int subtest);
 
 #if defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index 138a0e3..2be02d3 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -4,7 +4,7 @@
 #include "thread_map.h"
 #include "debug.h"
 
-int test__thread_map(void)
+int test__thread_map(int subtest __maybe_unused)
 {
 	struct thread_map *map;
 
diff --git a/tools/perf/tests/thread-mg-share.c b/tools/perf/tests/thread-mg-share.c
index 01fabb1..188b631 100644
--- a/tools/perf/tests/thread-mg-share.c
+++ b/tools/perf/tests/thread-mg-share.c
@@ -4,7 +4,7 @@
 #include "map.h"
 #include "debug.h"
 
-int test__thread_mg_share(void)
+int test__thread_mg_share(int subtest __maybe_unused)
 {
 	struct machines machines;
 	struct machine *machine;
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index f5bb096..98fe69a 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -84,7 +84,7 @@
 	return 0;
 }
 
-int test_session_topology(void)
+int test_session_topology(int subtest __maybe_unused)
 {
 	char path[PATH_MAX];
 	struct cpu_map *map;
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index d677e01..f0bfc9e 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -18,7 +18,7 @@
 
 #define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
 
-int test__vmlinux_matches_kallsyms(void)
+int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
 {
 	int err = -1;
 	struct rb_node *nd;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 81def6c3..ec33196 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -178,12 +178,51 @@
 	return n;
 }
 
+static int callchain_node__count_flat_rows(struct callchain_node *node)
+{
+	struct callchain_list *chain;
+	char folded_sign = 0;
+	int n = 0;
+
+	list_for_each_entry(chain, &node->parent_val, list) {
+		if (!folded_sign) {
+			/* only check first chain list entry */
+			folded_sign = callchain_list__folded(chain);
+			if (folded_sign == '+')
+				return 1;
+		}
+		n++;
+	}
+
+	list_for_each_entry(chain, &node->val, list) {
+		if (!folded_sign) {
+			/* node->parent_val list might be empty */
+			folded_sign = callchain_list__folded(chain);
+			if (folded_sign == '+')
+				return 1;
+		}
+		n++;
+	}
+
+	return n;
+}
+
+static int callchain_node__count_folded_rows(struct callchain_node *node __maybe_unused)
+{
+	return 1;
+}
+
 static int callchain_node__count_rows(struct callchain_node *node)
 {
 	struct callchain_list *chain;
 	bool unfolded = false;
 	int n = 0;
 
+	if (callchain_param.mode == CHAIN_FLAT)
+		return callchain_node__count_flat_rows(node);
+	else if (callchain_param.mode == CHAIN_FOLDED)
+		return callchain_node__count_folded_rows(node);
+
 	list_for_each_entry(chain, &node->val, list) {
 		++n;
 		unfolded = chain->unfolded;
@@ -263,7 +302,7 @@
 	chain = list_entry(node->val.next, struct callchain_list, list);
 	chain->has_children = has_sibling;
 
-	if (!list_empty(&node->val)) {
+	if (node->val.next != node->val.prev) {
 		chain = list_entry(node->val.prev, struct callchain_list, list);
 		chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
 	}
@@ -279,6 +318,9 @@
 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
 		struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
 		callchain_node__init_have_children(node, has_sibling);
+		if (callchain_param.mode == CHAIN_FLAT ||
+		    callchain_param.mode == CHAIN_FOLDED)
+			callchain_node__make_parent_list(node);
 	}
 }
 
@@ -577,6 +619,231 @@
 
 #define LEVEL_OFFSET_STEP 3
 
+static int hist_browser__show_callchain_list(struct hist_browser *browser,
+					     struct callchain_node *node,
+					     struct callchain_list *chain,
+					     unsigned short row, u64 total,
+					     bool need_percent, int offset,
+					     print_callchain_entry_fn print,
+					     struct callchain_print_arg *arg)
+{
+	char bf[1024], *alloc_str;
+	const char *str;
+
+	if (arg->row_offset != 0) {
+		arg->row_offset--;
+		return 0;
+	}
+
+	alloc_str = NULL;
+	str = callchain_list__sym_name(chain, bf, sizeof(bf),
+				       browser->show_dso);
+
+	if (need_percent) {
+		char buf[64];
+
+		callchain_node__scnprintf_value(node, buf, sizeof(buf),
+						total);
+
+		if (asprintf(&alloc_str, "%s %s", buf, str) < 0)
+			str = "Not enough memory!";
+		else
+			str = alloc_str;
+	}
+
+	print(browser, chain, str, offset, row, arg);
+
+	free(alloc_str);
+	return 1;
+}
+
+static int hist_browser__show_callchain_flat(struct hist_browser *browser,
+					     struct rb_root *root,
+					     unsigned short row, u64 total,
+					     print_callchain_entry_fn print,
+					     struct callchain_print_arg *arg,
+					     check_output_full_fn is_output_full)
+{
+	struct rb_node *node;
+	int first_row = row, offset = LEVEL_OFFSET_STEP;
+	bool need_percent;
+
+	node = rb_first(root);
+	need_percent = node && rb_next(node);
+
+	while (node) {
+		struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+		struct rb_node *next = rb_next(node);
+		struct callchain_list *chain;
+		char folded_sign = ' ';
+		int first = true;
+		int extra_offset = 0;
+
+		list_for_each_entry(chain, &child->parent_val, list) {
+			bool was_first = first;
+
+			if (first)
+				first = false;
+			else if (need_percent)
+				extra_offset = LEVEL_OFFSET_STEP;
+
+			folded_sign = callchain_list__folded(chain);
+
+			row += hist_browser__show_callchain_list(browser, child,
+							chain, row, total,
+							was_first && need_percent,
+							offset + extra_offset,
+							print, arg);
+
+			if (is_output_full(browser, row))
+				goto out;
+
+			if (folded_sign == '+')
+				goto next;
+		}
+
+		list_for_each_entry(chain, &child->val, list) {
+			bool was_first = first;
+
+			if (first)
+				first = false;
+			else if (need_percent)
+				extra_offset = LEVEL_OFFSET_STEP;
+
+			folded_sign = callchain_list__folded(chain);
+
+			row += hist_browser__show_callchain_list(browser, child,
+							chain, row, total,
+							was_first && need_percent,
+							offset + extra_offset,
+							print, arg);
+
+			if (is_output_full(browser, row))
+				goto out;
+
+			if (folded_sign == '+')
+				break;
+		}
+
+next:
+		if (is_output_full(browser, row))
+			break;
+		node = next;
+	}
+out:
+	return row - first_row;
+}
+
+static char *hist_browser__folded_callchain_str(struct hist_browser *browser,
+						struct callchain_list *chain,
+						char *value_str, char *old_str)
+{
+	char bf[1024];
+	const char *str;
+	char *new;
+
+	str = callchain_list__sym_name(chain, bf, sizeof(bf),
+				       browser->show_dso);
+	if (old_str) {
+		if (asprintf(&new, "%s%s%s", old_str,
+			     symbol_conf.field_sep ?: ";", str) < 0)
+			new = NULL;
+	} else {
+		if (value_str) {
+			if (asprintf(&new, "%s %s", value_str, str) < 0)
+				new = NULL;
+		} else {
+			if (asprintf(&new, "%s", str) < 0)
+				new = NULL;
+		}
+	}
+	return new;
+}
+
+static int hist_browser__show_callchain_folded(struct hist_browser *browser,
+					       struct rb_root *root,
+					       unsigned short row, u64 total,
+					       print_callchain_entry_fn print,
+					       struct callchain_print_arg *arg,
+					       check_output_full_fn is_output_full)
+{
+	struct rb_node *node;
+	int first_row = row, offset = LEVEL_OFFSET_STEP;
+	bool need_percent;
+
+	node = rb_first(root);
+	need_percent = node && rb_next(node);
+
+	while (node) {
+		struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+		struct rb_node *next = rb_next(node);
+		struct callchain_list *chain, *first_chain = NULL;
+		int first = true;
+		char *value_str = NULL, *value_str_alloc = NULL;
+		char *chain_str = NULL, *chain_str_alloc = NULL;
+
+		if (arg->row_offset != 0) {
+			arg->row_offset--;
+			goto next;
+		}
+
+		if (need_percent) {
+			char buf[64];
+
+			callchain_node__scnprintf_value(child, buf, sizeof(buf), total);
+			if (asprintf(&value_str, "%s", buf) < 0) {
+				value_str = (char *)"<...>";
+				goto do_print;
+			}
+			value_str_alloc = value_str;
+		}
+
+		list_for_each_entry(chain, &child->parent_val, list) {
+			chain_str = hist_browser__folded_callchain_str(browser,
+						chain, value_str, chain_str);
+			if (first) {
+				first = false;
+				first_chain = chain;
+			}
+
+			if (chain_str == NULL) {
+				chain_str = (char *)"Not enough memory!";
+				goto do_print;
+			}
+
+			chain_str_alloc = chain_str;
+		}
+
+		list_for_each_entry(chain, &child->val, list) {
+			chain_str = hist_browser__folded_callchain_str(browser,
+						chain, value_str, chain_str);
+			if (first) {
+				first = false;
+				first_chain = chain;
+			}
+
+			if (chain_str == NULL) {
+				chain_str = (char *)"Not enough memory!";
+				goto do_print;
+			}
+
+			chain_str_alloc = chain_str;
+		}
+
+do_print:
+		print(browser, first_chain, chain_str, offset, row++, arg);
+		free(value_str_alloc);
+		free(chain_str_alloc);
+
+next:
+		if (is_output_full(browser, row))
+			break;
+		node = next;
+	}
+
+	return row - first_row;
+}
+
 static int hist_browser__show_callchain(struct hist_browser *browser,
 					struct rb_root *root, int level,
 					unsigned short row, u64 total,
@@ -595,15 +862,12 @@
 	while (node) {
 		struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
 		struct rb_node *next = rb_next(node);
-		u64 cumul = callchain_cumul_hits(child);
 		struct callchain_list *chain;
 		char folded_sign = ' ';
 		int first = true;
 		int extra_offset = 0;
 
 		list_for_each_entry(chain, &child->val, list) {
-			char bf[1024], *alloc_str;
-			const char *str;
 			bool was_first = first;
 
 			if (first)
@@ -612,31 +876,16 @@
 				extra_offset = LEVEL_OFFSET_STEP;
 
 			folded_sign = callchain_list__folded(chain);
-			if (arg->row_offset != 0) {
-				arg->row_offset--;
-				goto do_next;
-			}
 
-			alloc_str = NULL;
-			str = callchain_list__sym_name(chain, bf, sizeof(bf),
-						       browser->show_dso);
+			row += hist_browser__show_callchain_list(browser, child,
+							chain, row, total,
+							was_first && need_percent,
+							offset + extra_offset,
+							print, arg);
 
-			if (was_first && need_percent) {
-				double percent = cumul * 100.0 / total;
-
-				if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
-					str = "Not enough memory!";
-				else
-					str = alloc_str;
-			}
-
-			print(browser, chain, str, offset + extra_offset, row, arg);
-
-			free(alloc_str);
-
-			if (is_output_full(browser, ++row))
+			if (is_output_full(browser, row))
 				goto out;
-do_next:
+
 			if (folded_sign == '+')
 				break;
 		}
@@ -847,10 +1096,22 @@
 				total = entry->stat.period;
 		}
 
-		printed += hist_browser__show_callchain(browser,
+		if (callchain_param.mode == CHAIN_FLAT) {
+			printed += hist_browser__show_callchain_flat(browser,
+					&entry->sorted_chain, row, total,
+					hist_browser__show_callchain_entry, &arg,
+					hist_browser__check_output_full);
+		} else if (callchain_param.mode == CHAIN_FOLDED) {
+			printed += hist_browser__show_callchain_folded(browser,
+					&entry->sorted_chain, row, total,
+					hist_browser__show_callchain_entry, &arg,
+					hist_browser__check_output_full);
+		} else {
+			printed += hist_browser__show_callchain(browser,
 					&entry->sorted_chain, 1, row, total,
 					hist_browser__show_callchain_entry, &arg,
 					hist_browser__check_output_full);
+		}
 
 		if (arg.is_current_entry)
 			browser->he_selection = entry;
@@ -1802,10 +2063,9 @@
 	SLang_reset_tty();
 	SLang_init_tty(0, 0, 0);
 
-	if (min_pcnt) {
+	if (min_pcnt)
 		browser->min_pcnt = min_pcnt;
-		hist_browser__update_nr_entries(browser);
-	}
+	hist_browser__update_nr_entries(browser);
 
 	browser->pstack = pstack__new(3);
 	if (browser->pstack == NULL)
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 4b3585e..4677172 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -89,8 +89,8 @@
 				perf_gtk__hpp_color_overhead_acc;
 }
 
-static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
-				    GtkTreeIter *parent, int col, u64 total)
+static void perf_gtk__add_callchain_flat(struct rb_root *root, GtkTreeStore *store,
+					 GtkTreeIter *parent, int col, u64 total)
 {
 	struct rb_node *nd;
 	bool has_single_node = (rb_first(root) == rb_last(root));
@@ -100,13 +100,132 @@
 		struct callchain_list *chain;
 		GtkTreeIter iter, new_parent;
 		bool need_new_parent;
-		double percent;
-		u64 hits, child_total;
 
 		node = rb_entry(nd, struct callchain_node, rb_node);
 
-		hits = callchain_cumul_hits(node);
-		percent = 100.0 * hits / total;
+		new_parent = *parent;
+		need_new_parent = !has_single_node;
+
+		callchain_node__make_parent_list(node);
+
+		list_for_each_entry(chain, &node->parent_val, list) {
+			char buf[128];
+
+			gtk_tree_store_append(store, &iter, &new_parent);
+
+			callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
+			gtk_tree_store_set(store, &iter, 0, buf, -1);
+
+			callchain_list__sym_name(chain, buf, sizeof(buf), false);
+			gtk_tree_store_set(store, &iter, col, buf, -1);
+
+			if (need_new_parent) {
+				/*
+				 * Only show the top-most symbol in a callchain
+				 * if it's not the only callchain.
+				 */
+				new_parent = iter;
+				need_new_parent = false;
+			}
+		}
+
+		list_for_each_entry(chain, &node->val, list) {
+			char buf[128];
+
+			gtk_tree_store_append(store, &iter, &new_parent);
+
+			callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
+			gtk_tree_store_set(store, &iter, 0, buf, -1);
+
+			callchain_list__sym_name(chain, buf, sizeof(buf), false);
+			gtk_tree_store_set(store, &iter, col, buf, -1);
+
+			if (need_new_parent) {
+				/*
+				 * Only show the top-most symbol in a callchain
+				 * if it's not the only callchain.
+				 */
+				new_parent = iter;
+				need_new_parent = false;
+			}
+		}
+	}
+}
+
+static void perf_gtk__add_callchain_folded(struct rb_root *root, GtkTreeStore *store,
+					   GtkTreeIter *parent, int col, u64 total)
+{
+	struct rb_node *nd;
+
+	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+		struct callchain_node *node;
+		struct callchain_list *chain;
+		GtkTreeIter iter;
+		char buf[64];
+		char *str, *str_alloc = NULL;
+		bool first = true;
+
+		node = rb_entry(nd, struct callchain_node, rb_node);
+
+		callchain_node__make_parent_list(node);
+
+		list_for_each_entry(chain, &node->parent_val, list) {
+			char name[1024];
+
+			callchain_list__sym_name(chain, name, sizeof(name), false);
+
+			if (asprintf(&str, "%s%s%s",
+				     first ? "" : str_alloc,
+				     first ? "" : symbol_conf.field_sep ?: "; ",
+				     name) < 0)
+				return;
+
+			first = false;
+			free(str_alloc);
+			str_alloc = str;
+		}
+
+		list_for_each_entry(chain, &node->val, list) {
+			char name[1024];
+
+			callchain_list__sym_name(chain, name, sizeof(name), false);
+
+			if (asprintf(&str, "%s%s%s",
+				     first ? "" : str_alloc,
+				     first ? "" : symbol_conf.field_sep ?: "; ",
+				     name) < 0)
+				return;
+
+			first = false;
+			free(str_alloc);
+			str_alloc = str;
+		}
+
+		gtk_tree_store_append(store, &iter, parent);
+
+		callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
+		gtk_tree_store_set(store, &iter, 0, buf, -1);
+
+		gtk_tree_store_set(store, &iter, col, str, -1);
+
+		free(str_alloc);
+	}
+}
+
+static void perf_gtk__add_callchain_graph(struct rb_root *root, GtkTreeStore *store,
+					  GtkTreeIter *parent, int col, u64 total)
+{
+	struct rb_node *nd;
+	bool has_single_node = (rb_first(root) == rb_last(root));
+
+	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+		struct callchain_node *node;
+		struct callchain_list *chain;
+		GtkTreeIter iter, new_parent;
+		bool need_new_parent;
+		u64 child_total;
+
+		node = rb_entry(nd, struct callchain_node, rb_node);
 
 		new_parent = *parent;
 		need_new_parent = !has_single_node && (node->val_nr > 1);
@@ -116,7 +235,7 @@
 
 			gtk_tree_store_append(store, &iter, &new_parent);
 
-			scnprintf(buf, sizeof(buf), "%5.2f%%", percent);
+			callchain_node__scnprintf_value(node, buf, sizeof(buf), total);
 			gtk_tree_store_set(store, &iter, 0, buf, -1);
 
 			callchain_list__sym_name(chain, buf, sizeof(buf), false);
@@ -138,11 +257,22 @@
 			child_total = total;
 
 		/* Now 'iter' contains info of the last callchain_list */
-		perf_gtk__add_callchain(&node->rb_root, store, &iter, col,
-					child_total);
+		perf_gtk__add_callchain_graph(&node->rb_root, store, &iter, col,
+					      child_total);
 	}
 }
 
+static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
+				    GtkTreeIter *parent, int col, u64 total)
+{
+	if (callchain_param.mode == CHAIN_FLAT)
+		perf_gtk__add_callchain_flat(root, store, parent, col, total);
+	else if (callchain_param.mode == CHAIN_FOLDED)
+		perf_gtk__add_callchain_folded(root, store, parent, col, total);
+	else
+		perf_gtk__add_callchain_graph(root, store, parent, col, total);
+}
+
 static void on_row_activated(GtkTreeView *view, GtkTreePath *path,
 			     GtkTreeViewColumn *col __maybe_unused,
 			     gpointer user_data __maybe_unused)
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index dfcbc90..7ebc661 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -34,10 +34,10 @@
 	return ret;
 }
 
-static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
+static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
+				     struct callchain_list *chain,
 				     int depth, int depth_mask, int period,
-				     u64 total_samples, u64 hits,
-				     int left_margin)
+				     u64 total_samples, int left_margin)
 {
 	int i;
 	size_t ret = 0;
@@ -50,10 +50,9 @@
 		else
 			ret += fprintf(fp, " ");
 		if (!period && i == depth - 1) {
-			double percent;
-
-			percent = hits * 100.0 / total_samples;
-			ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
+			ret += fprintf(fp, "--");
+			ret += callchain_node__fprintf_value(node, fp, total_samples);
+			ret += fprintf(fp, "--");
 		} else
 			ret += fprintf(fp, "%s", "          ");
 	}
@@ -82,13 +81,14 @@
 					 int depth_mask, int left_margin)
 {
 	struct rb_node *node, *next;
-	struct callchain_node *child;
+	struct callchain_node *child = NULL;
 	struct callchain_list *chain;
 	int new_depth_mask = depth_mask;
 	u64 remaining;
 	size_t ret = 0;
 	int i;
 	uint entries_printed = 0;
+	int cumul_count = 0;
 
 	remaining = total_samples;
 
@@ -100,6 +100,7 @@
 		child = rb_entry(node, struct callchain_node, rb_node);
 		cumul = callchain_cumul_hits(child);
 		remaining -= cumul;
+		cumul_count += callchain_cumul_counts(child);
 
 		/*
 		 * The depth mask manages the output of pipes that show
@@ -120,10 +121,9 @@
 						   left_margin);
 		i = 0;
 		list_for_each_entry(chain, &child->val, list) {
-			ret += ipchain__fprintf_graph(fp, chain, depth,
+			ret += ipchain__fprintf_graph(fp, child, chain, depth,
 						      new_depth_mask, i++,
 						      total_samples,
-						      cumul,
 						      left_margin);
 		}
 
@@ -143,14 +143,23 @@
 
 	if (callchain_param.mode == CHAIN_GRAPH_REL &&
 		remaining && remaining != total_samples) {
+		struct callchain_node rem_node = {
+			.hit = remaining,
+		};
 
 		if (!rem_sq_bracket)
 			return ret;
 
+		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
+			rem_node.count = child->parent->children_count - cumul_count;
+			if (rem_node.count <= 0)
+				return ret;
+		}
+
 		new_depth_mask &= ~(1 << (depth - 1));
-		ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
+		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
 					      new_depth_mask, 0, total_samples,
-					      remaining, left_margin);
+					      left_margin);
 	}
 
 	return ret;
@@ -243,13 +252,63 @@
 	struct rb_node *rb_node = rb_first(tree);
 
 	while (rb_node) {
-		double percent;
+		chain = rb_entry(rb_node, struct callchain_node, rb_node);
+
+		ret += fprintf(fp, "           ");
+		ret += callchain_node__fprintf_value(chain, fp, total_samples);
+		ret += fprintf(fp, "\n");
+		ret += __callchain__fprintf_flat(fp, chain, total_samples);
+		ret += fprintf(fp, "\n");
+		if (++entries_printed == callchain_param.print_limit)
+			break;
+
+		rb_node = rb_next(rb_node);
+	}
+
+	return ret;
+}
+
+static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
+{
+	const char *sep = symbol_conf.field_sep ?: ";";
+	struct callchain_list *chain;
+	size_t ret = 0;
+	char bf[1024];
+	bool first;
+
+	if (!node)
+		return 0;
+
+	ret += __callchain__fprintf_folded(fp, node->parent);
+
+	first = (ret == 0);
+	list_for_each_entry(chain, &node->val, list) {
+		if (chain->ip >= PERF_CONTEXT_MAX)
+			continue;
+		ret += fprintf(fp, "%s%s", first ? "" : sep,
+			       callchain_list__sym_name(chain,
+						bf, sizeof(bf), false));
+		first = false;
+	}
+
+	return ret;
+}
+
+static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
+					u64 total_samples)
+{
+	size_t ret = 0;
+	u32 entries_printed = 0;
+	struct callchain_node *chain;
+	struct rb_node *rb_node = rb_first(tree);
+
+	while (rb_node) {
 
 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
-		percent = chain->hit * 100.0 / total_samples;
 
-		ret = percent_color_fprintf(fp, "           %6.2f%%\n", percent);
-		ret += __callchain__fprintf_flat(fp, chain, total_samples);
+		ret += callchain_node__fprintf_value(chain, fp, total_samples);
+		ret += fprintf(fp, " ");
+		ret += __callchain__fprintf_folded(fp, chain);
 		ret += fprintf(fp, "\n");
 		if (++entries_printed == callchain_param.print_limit)
 			break;
@@ -278,6 +337,9 @@
 	case CHAIN_FLAT:
 		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
 		break;
+	case CHAIN_FOLDED:
+		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
+		break;
 	case CHAIN_NONE:
 		break;
 	default:
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 591b3fe..0513dd5 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -21,6 +21,7 @@
 libperf-y += perf_regs.o
 libperf-y += path.o
 libperf-y += rbtree.o
+libperf-y += libstring.o
 libperf-y += bitmap.o
 libperf-y += hweight.o
 libperf-y += run-command.o
@@ -88,6 +89,7 @@
 libperf-y += parse-regs-options.o
 
 libperf-$(CONFIG_LIBBPF) += bpf-loader.o
+libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
 libperf-$(CONFIG_LIBELF) += symbol-elf.o
 libperf-$(CONFIG_LIBELF) += probe-file.o
 libperf-$(CONFIG_LIBELF) += probe-event.o
@@ -138,6 +140,7 @@
 
 CFLAGS_find_next_bit.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
 CFLAGS_rbtree.o        += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+CFLAGS_libstring.o     += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
 CFLAGS_hweight.o       += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
 CFLAGS_parse-events.o  += -Wno-redundant-decls
 
@@ -153,6 +156,10 @@
 	$(call rule_mkdir)
 	$(call if_changed_dep,cc_o_c)
 
+$(OUTPUT)util/libstring.o: ../lib/string.c FORCE
+	$(call rule_mkdir)
+	$(call if_changed_dep,cc_o_c)
+
 $(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE
 	$(call rule_mkdir)
 	$(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 1dd1949..b795b69 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -65,6 +65,11 @@
 
 	name++;
 
+#ifdef __arm__
+	if (strchr(name, '+'))
+		return -1;
+#endif
+
 	tok = strchr(name, '>');
 	if (tok == NULL)
 		return -1;
@@ -246,7 +251,11 @@
 		return -1;
 
 	target = ++s;
+#ifdef __arm__
+	comment = strchr(s, ';');
+#else
 	comment = strchr(s, '#');
+#endif
 
 	if (comment != NULL)
 		s = comment - 1;
@@ -354,6 +363,20 @@
 	{ .name = "addq",  .ops  = &mov_ops, },
 	{ .name = "addw",  .ops  = &mov_ops, },
 	{ .name = "and",   .ops  = &mov_ops, },
+#ifdef __arm__
+	{ .name = "b",     .ops  = &jump_ops, }, // might also be a call
+	{ .name = "bcc",   .ops  = &jump_ops, },
+	{ .name = "bcs",   .ops  = &jump_ops, },
+	{ .name = "beq",   .ops  = &jump_ops, },
+	{ .name = "bge",   .ops  = &jump_ops, },
+	{ .name = "bgt",   .ops  = &jump_ops, },
+	{ .name = "bhi",   .ops  = &jump_ops, },
+	{ .name = "bl",    .ops  = &call_ops, },
+	{ .name = "blt",   .ops  = &jump_ops, },
+	{ .name = "bls",   .ops  = &jump_ops, },
+	{ .name = "blx",   .ops  = &call_ops, },
+	{ .name = "bne",   .ops  = &jump_ops, },
+#endif
 	{ .name = "bts",   .ops  = &mov_ops, },
 	{ .name = "call",  .ops  = &call_ops, },
 	{ .name = "callq", .ops  = &call_ops, },
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 4c50411..540a7ef 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -5,11 +5,15 @@
  * Copyright (C) 2015 Huawei Inc.
  */
 
+#include <linux/bpf.h>
 #include <bpf/libbpf.h>
 #include <linux/err.h>
+#include <linux/string.h>
 #include "perf.h"
 #include "debug.h"
 #include "bpf-loader.h"
+#include "bpf-prologue.h"
+#include "llvm-utils.h"
 #include "probe-event.h"
 #include "probe-finder.h" // for MAX_PROBES
 #include "llvm-utils.h"
@@ -32,6 +36,10 @@
 
 struct bpf_prog_priv {
 	struct perf_probe_event pev;
+	bool need_prologue;
+	struct bpf_insn *insns_buf;
+	int nr_types;
+	int *type_mapping;
 };
 
 static bool libbpf_initialized;
@@ -106,10 +114,178 @@
 	struct bpf_prog_priv *priv = _priv;
 
 	cleanup_perf_probe_events(&priv->pev, 1);
+	zfree(&priv->insns_buf);
+	zfree(&priv->type_mapping);
 	free(priv);
 }
 
 static int
+prog_config__exec(const char *value, struct perf_probe_event *pev)
+{
+	pev->uprobes = true;
+	pev->target = strdup(value);
+	if (!pev->target)
+		return -ENOMEM;
+	return 0;
+}
+
+static int
+prog_config__module(const char *value, struct perf_probe_event *pev)
+{
+	pev->uprobes = false;
+	pev->target = strdup(value);
+	if (!pev->target)
+		return -ENOMEM;
+	return 0;
+}
+
+static int
+prog_config__bool(const char *value, bool *pbool, bool invert)
+{
+	int err;
+	bool bool_value;
+
+	if (!pbool)
+		return -EINVAL;
+
+	err = strtobool(value, &bool_value);
+	if (err)
+		return err;
+
+	*pbool = invert ? !bool_value : bool_value;
+	return 0;
+}
+
+static int
+prog_config__inlines(const char *value,
+		     struct perf_probe_event *pev __maybe_unused)
+{
+	return prog_config__bool(value, &probe_conf.no_inlines, true);
+}
+
+static int
+prog_config__force(const char *value,
+		   struct perf_probe_event *pev __maybe_unused)
+{
+	return prog_config__bool(value, &probe_conf.force_add, false);
+}
+
+static struct {
+	const char *key;
+	const char *usage;
+	const char *desc;
+	int (*func)(const char *, struct perf_probe_event *);
+} bpf_prog_config_terms[] = {
+	{
+		.key	= "exec",
+		.usage	= "exec=<full path of file>",
+		.desc	= "Set uprobe target",
+		.func	= prog_config__exec,
+	},
+	{
+		.key	= "module",
+		.usage	= "module=<module name>    ",
+		.desc	= "Set kprobe module",
+		.func	= prog_config__module,
+	},
+	{
+		.key	= "inlines",
+		.usage	= "inlines=[yes|no]        ",
+		.desc	= "Probe at inline symbol",
+		.func	= prog_config__inlines,
+	},
+	{
+		.key	= "force",
+		.usage	= "force=[yes|no]          ",
+		.desc	= "Forcibly add events with existing name",
+		.func	= prog_config__force,
+	},
+};
+
+static int
+do_prog_config(const char *key, const char *value,
+	       struct perf_probe_event *pev)
+{
+	unsigned int i;
+
+	pr_debug("config bpf program: %s=%s\n", key, value);
+	for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
+		if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
+			return bpf_prog_config_terms[i].func(value, pev);
+
+	pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
+		 key, value);
+
+	pr_debug("\nHint: Valid options are:\n");
+	for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
+		pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
+			 bpf_prog_config_terms[i].desc);
+	pr_debug("\n");
+
+	return -BPF_LOADER_ERRNO__PROGCONF_TERM;
+}
+
+static const char *
+parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
+{
+	char *text = strdup(config_str);
+	char *sep, *line;
+	const char *main_str = NULL;
+	int err = 0;
+
+	if (!text) {
+		pr_debug("No enough memory: dup config_str failed\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	line = text;
+	while ((sep = strchr(line, ';'))) {
+		char *equ;
+
+		*sep = '\0';
+		equ = strchr(line, '=');
+		if (!equ) {
+			pr_warning("WARNING: invalid config in BPF object: %s\n",
+				   line);
+			pr_warning("\tShould be 'key=value'.\n");
+			goto nextline;
+		}
+		*equ = '\0';
+
+		err = do_prog_config(line, equ + 1, pev);
+		if (err)
+			break;
+nextline:
+		line = sep + 1;
+	}
+
+	if (!err)
+		main_str = config_str + (line - text);
+	free(text);
+
+	return err ? ERR_PTR(err) : main_str;
+}
+
+static int
+parse_prog_config(const char *config_str, struct perf_probe_event *pev)
+{
+	int err;
+	const char *main_str = parse_prog_config_kvpair(config_str, pev);
+
+	if (IS_ERR(main_str))
+		return PTR_ERR(main_str);
+
+	err = parse_perf_probe_command(main_str, pev);
+	if (err < 0) {
+		pr_debug("bpf: '%s' is not a valid config string\n",
+			 config_str);
+		/* parse failed, don't need clear pev. */
+		return -BPF_LOADER_ERRNO__CONFIG;
+	}
+	return 0;
+}
+
+static int
 config_bpf_program(struct bpf_program *prog)
 {
 	struct perf_probe_event *pev = NULL;
@@ -117,6 +293,10 @@
 	const char *config_str;
 	int err;
 
+	/* Initialize per-program probing setting */
+	probe_conf.no_inlines = false;
+	probe_conf.force_add = false;
+
 	config_str = bpf_program__title(prog, false);
 	if (IS_ERR(config_str)) {
 		pr_debug("bpf: unable to get title for program\n");
@@ -131,13 +311,9 @@
 	pev = &priv->pev;
 
 	pr_debug("bpf: config program '%s'\n", config_str);
-	err = parse_perf_probe_command(config_str, pev);
-	if (err < 0) {
-		pr_debug("bpf: '%s' is not a valid config string\n",
-			 config_str);
-		err = -BPF_LOADER_ERRNO__CONFIG;
+	err = parse_prog_config(config_str, pev);
+	if (err)
 		goto errout;
-	}
 
 	if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
 		pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
@@ -197,6 +373,220 @@
 	return err;
 }
 
+static int
+preproc_gen_prologue(struct bpf_program *prog, int n,
+		     struct bpf_insn *orig_insns, int orig_insns_cnt,
+		     struct bpf_prog_prep_result *res)
+{
+	struct probe_trace_event *tev;
+	struct perf_probe_event *pev;
+	struct bpf_prog_priv *priv;
+	struct bpf_insn *buf;
+	size_t prologue_cnt = 0;
+	int i, err;
+
+	err = bpf_program__get_private(prog, (void **)&priv);
+	if (err || !priv)
+		goto errout;
+
+	pev = &priv->pev;
+
+	if (n < 0 || n >= priv->nr_types)
+		goto errout;
+
+	/* Find a tev belongs to that type */
+	for (i = 0; i < pev->ntevs; i++) {
+		if (priv->type_mapping[i] == n)
+			break;
+	}
+
+	if (i >= pev->ntevs) {
+		pr_debug("Internal error: prologue type %d not found\n", n);
+		return -BPF_LOADER_ERRNO__PROLOGUE;
+	}
+
+	tev = &pev->tevs[i];
+
+	buf = priv->insns_buf;
+	err = bpf__gen_prologue(tev->args, tev->nargs,
+				buf, &prologue_cnt,
+				BPF_MAXINSNS - orig_insns_cnt);
+	if (err) {
+		const char *title;
+
+		title = bpf_program__title(prog, false);
+		if (!title)
+			title = "[unknown]";
+
+		pr_debug("Failed to generate prologue for program %s\n",
+			 title);
+		return err;
+	}
+
+	memcpy(&buf[prologue_cnt], orig_insns,
+	       sizeof(struct bpf_insn) * orig_insns_cnt);
+
+	res->new_insn_ptr = buf;
+	res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
+	res->pfd = NULL;
+	return 0;
+
+errout:
+	pr_debug("Internal error in preproc_gen_prologue\n");
+	return -BPF_LOADER_ERRNO__PROLOGUE;
+}
+
+/*
+ * compare_tev_args is reflexive, transitive and antisymmetric.
+ * I can proof it but this margin is too narrow to contain.
+ */
+static int compare_tev_args(const void *ptev1, const void *ptev2)
+{
+	int i, ret;
+	const struct probe_trace_event *tev1 =
+		*(const struct probe_trace_event **)ptev1;
+	const struct probe_trace_event *tev2 =
+		*(const struct probe_trace_event **)ptev2;
+
+	ret = tev2->nargs - tev1->nargs;
+	if (ret)
+		return ret;
+
+	for (i = 0; i < tev1->nargs; i++) {
+		struct probe_trace_arg *arg1, *arg2;
+		struct probe_trace_arg_ref *ref1, *ref2;
+
+		arg1 = &tev1->args[i];
+		arg2 = &tev2->args[i];
+
+		ret = strcmp(arg1->value, arg2->value);
+		if (ret)
+			return ret;
+
+		ref1 = arg1->ref;
+		ref2 = arg2->ref;
+
+		while (ref1 && ref2) {
+			ret = ref2->offset - ref1->offset;
+			if (ret)
+				return ret;
+
+			ref1 = ref1->next;
+			ref2 = ref2->next;
+		}
+
+		if (ref1 || ref2)
+			return ref2 ? 1 : -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Assign a type number to each tevs in a pev.
+ * mapping is an array with same slots as tevs in that pev.
+ * nr_types will be set to number of types.
+ */
+static int map_prologue(struct perf_probe_event *pev, int *mapping,
+			int *nr_types)
+{
+	int i, type = 0;
+	struct probe_trace_event **ptevs;
+
+	size_t array_sz = sizeof(*ptevs) * pev->ntevs;
+
+	ptevs = malloc(array_sz);
+	if (!ptevs) {
+		pr_debug("No ehough memory: alloc ptevs failed\n");
+		return -ENOMEM;
+	}
+
+	pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
+	for (i = 0; i < pev->ntevs; i++)
+		ptevs[i] = &pev->tevs[i];
+
+	qsort(ptevs, pev->ntevs, sizeof(*ptevs),
+	      compare_tev_args);
+
+	for (i = 0; i < pev->ntevs; i++) {
+		int n;
+
+		n = ptevs[i] - pev->tevs;
+		if (i == 0) {
+			mapping[n] = type;
+			pr_debug("mapping[%d]=%d\n", n, type);
+			continue;
+		}
+
+		if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
+			mapping[n] = type;
+		else
+			mapping[n] = ++type;
+
+		pr_debug("mapping[%d]=%d\n", n, mapping[n]);
+	}
+	free(ptevs);
+	*nr_types = type + 1;
+
+	return 0;
+}
+
+static int hook_load_preprocessor(struct bpf_program *prog)
+{
+	struct perf_probe_event *pev;
+	struct bpf_prog_priv *priv;
+	bool need_prologue = false;
+	int err, i;
+
+	err = bpf_program__get_private(prog, (void **)&priv);
+	if (err || !priv) {
+		pr_debug("Internal error when hook preprocessor\n");
+		return -BPF_LOADER_ERRNO__INTERNAL;
+	}
+
+	pev = &priv->pev;
+	for (i = 0; i < pev->ntevs; i++) {
+		struct probe_trace_event *tev = &pev->tevs[i];
+
+		if (tev->nargs > 0) {
+			need_prologue = true;
+			break;
+		}
+	}
+
+	/*
+	 * Since all tevs don't have argument, we don't need generate
+	 * prologue.
+	 */
+	if (!need_prologue) {
+		priv->need_prologue = false;
+		return 0;
+	}
+
+	priv->need_prologue = true;
+	priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
+	if (!priv->insns_buf) {
+		pr_debug("No enough memory: alloc insns_buf failed\n");
+		return -ENOMEM;
+	}
+
+	priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
+	if (!priv->type_mapping) {
+		pr_debug("No enough memory: alloc type_mapping failed\n");
+		return -ENOMEM;
+	}
+	memset(priv->type_mapping, -1,
+	       sizeof(int) * pev->ntevs);
+
+	err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
+	if (err)
+		return err;
+
+	err = bpf_program__set_prep(prog, priv->nr_types,
+				    preproc_gen_prologue);
+	return err;
+}
+
 int bpf__probe(struct bpf_object *obj)
 {
 	int err = 0;
@@ -231,6 +621,18 @@
 			pr_debug("bpf_probe: failed to apply perf probe events");
 			goto out;
 		}
+
+		/*
+		 * After probing, let's consider prologue, which
+		 * adds program fetcher to BPF programs.
+		 *
+		 * hook_load_preprocessorr() hooks pre-processor
+		 * to bpf_program, let it generate prologue
+		 * dynamically during loading.
+		 */
+		err = hook_load_preprocessor(prog);
+		if (err)
+			goto out;
 	}
 out:
 	return err < 0 ? err : 0;
@@ -314,7 +716,14 @@
 		for (i = 0; i < pev->ntevs; i++) {
 			tev = &pev->tevs[i];
 
-			fd = bpf_program__fd(prog);
+			if (priv->need_prologue) {
+				int type = priv->type_mapping[i];
+
+				fd = bpf_program__nth_fd(prog, type);
+			} else {
+				fd = bpf_program__fd(prog);
+			}
+
 			if (fd < 0) {
 				pr_debug("bpf: failed to get file descriptor\n");
 				return fd;
@@ -340,6 +749,10 @@
 	[ERRCODE_OFFSET(EVENTNAME)]	= "No event name found in config string",
 	[ERRCODE_OFFSET(INTERNAL)]	= "BPF loader internal error",
 	[ERRCODE_OFFSET(COMPILE)]	= "Error when compiling BPF scriptlet",
+	[ERRCODE_OFFSET(PROGCONF_TERM)]	= "Invalid program config term in config string",
+	[ERRCODE_OFFSET(PROLOGUE)]	= "Failed to generate prologue",
+	[ERRCODE_OFFSET(PROLOGUE2BIG)]	= "Prologue too big for program",
+	[ERRCODE_OFFSET(PROLOGUEOOB)]	= "Offset out of bound for prologue",
 };
 
 static int
@@ -420,7 +833,11 @@
 			int err, char *buf, size_t size)
 {
 	bpf__strerror_head(err, buf, size);
-	bpf__strerror_entry(EEXIST, "Probe point exist. Try use 'perf probe -d \"*\"'");
+	case BPF_LOADER_ERRNO__PROGCONF_TERM: {
+		scnprintf(buf, size, "%s (add -v to see detail)", emsg);
+		break;
+	}
+	bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
 	bpf__strerror_entry(EACCES, "You need to be root");
 	bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
 	bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index 9caf3ae..6fdc045 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -20,6 +20,10 @@
 	BPF_LOADER_ERRNO__EVENTNAME,	/* Event name is missing */
 	BPF_LOADER_ERRNO__INTERNAL,	/* BPF loader internal error */
 	BPF_LOADER_ERRNO__COMPILE,	/* Error when compiling BPF scriptlet */
+	BPF_LOADER_ERRNO__PROGCONF_TERM,/* Invalid program config term in config string */
+	BPF_LOADER_ERRNO__PROLOGUE,	/* Failed to generate prologue */
+	BPF_LOADER_ERRNO__PROLOGUE2BIG,	/* Prologue too big for program */
+	BPF_LOADER_ERRNO__PROLOGUEOOB,	/* Offset out of bound for prologue */
 	__BPF_LOADER_ERRNO__END,
 };
 
diff --git a/tools/perf/util/bpf-prologue.c b/tools/perf/util/bpf-prologue.c
new file mode 100644
index 0000000..6cdbee1
--- /dev/null
+++ b/tools/perf/util/bpf-prologue.c
@@ -0,0 +1,455 @@
+/*
+ * bpf-prologue.c
+ *
+ * Copyright (C) 2015 He Kuang <hekuang@huawei.com>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ */
+
+#include <bpf/libbpf.h>
+#include "perf.h"
+#include "debug.h"
+#include "bpf-loader.h"
+#include "bpf-prologue.h"
+#include "probe-finder.h"
+#include <dwarf-regs.h>
+#include <linux/filter.h>
+
+#define BPF_REG_SIZE		8
+
+#define JMP_TO_ERROR_CODE	-1
+#define JMP_TO_SUCCESS_CODE	-2
+#define JMP_TO_USER_CODE	-3
+
+struct bpf_insn_pos {
+	struct bpf_insn *begin;
+	struct bpf_insn *end;
+	struct bpf_insn *pos;
+};
+
+static inline int
+pos_get_cnt(struct bpf_insn_pos *pos)
+{
+	return pos->pos - pos->begin;
+}
+
+static int
+append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
+{
+	if (!pos->pos)
+		return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
+
+	if (pos->pos + 1 >= pos->end) {
+		pr_err("bpf prologue: prologue too long\n");
+		pos->pos = NULL;
+		return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
+	}
+
+	*(pos->pos)++ = new_insn;
+	return 0;
+}
+
+static int
+check_pos(struct bpf_insn_pos *pos)
+{
+	if (!pos->pos || pos->pos >= pos->end)
+		return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
+	return 0;
+}
+
+/* Give it a shorter name */
+#define ins(i, p) append_insn((i), (p))
+
+/*
+ * Give a register name (in 'reg'), generate instruction to
+ * load register into an eBPF register rd:
+ *   'ldd target_reg, offset(ctx_reg)', where:
+ * ctx_reg is pre initialized to pointer of 'struct pt_regs'.
+ */
+static int
+gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
+		     const char *reg, int target_reg)
+{
+	int offset = regs_query_register_offset(reg);
+
+	if (offset < 0) {
+		pr_err("bpf: prologue: failed to get register %s\n",
+		       reg);
+		return offset;
+	}
+	ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
+
+	return check_pos(pos);
+}
+
+/*
+ * Generate a BPF_FUNC_probe_read function call.
+ *
+ * src_base_addr_reg is a register holding base address,
+ * dst_addr_reg is a register holding dest address (on stack),
+ * result is:
+ *
+ *  *[dst_addr_reg] = *([src_base_addr_reg] + offset)
+ *
+ * Arguments of BPF_FUNC_probe_read:
+ *     ARG1: ptr to stack (dest)
+ *     ARG2: size (8)
+ *     ARG3: unsafe ptr (src)
+ */
+static int
+gen_read_mem(struct bpf_insn_pos *pos,
+	     int src_base_addr_reg,
+	     int dst_addr_reg,
+	     long offset)
+{
+	/* mov arg3, src_base_addr_reg */
+	if (src_base_addr_reg != BPF_REG_ARG3)
+		ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
+	/* add arg3, #offset */
+	if (offset)
+		ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
+
+	/* mov arg2, #reg_size */
+	ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
+
+	/* mov arg1, dst_addr_reg */
+	if (dst_addr_reg != BPF_REG_ARG1)
+		ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
+
+	/* Call probe_read  */
+	ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos);
+	/*
+	 * Error processing: if read fail, goto error code,
+	 * will be relocated. Target should be the start of
+	 * error processing code.
+	 */
+	ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
+	    pos);
+
+	return check_pos(pos);
+}
+
+/*
+ * Each arg should be bare register. Fetch and save them into argument
+ * registers (r3 - r5).
+ *
+ * BPF_REG_1 should have been initialized with pointer to
+ * 'struct pt_regs'.
+ */
+static int
+gen_prologue_fastpath(struct bpf_insn_pos *pos,
+		      struct probe_trace_arg *args, int nargs)
+{
+	int i, err = 0;
+
+	for (i = 0; i < nargs; i++) {
+		err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
+					   BPF_PROLOGUE_START_ARG_REG + i);
+		if (err)
+			goto errout;
+	}
+
+	return check_pos(pos);
+errout:
+	return err;
+}
+
+/*
+ * Slow path:
+ *   At least one argument has the form of 'offset($rx)'.
+ *
+ * Following code first stores them into stack, then loads all of then
+ * to r2 - r5.
+ * Before final loading, the final result should be:
+ *
+ * low address
+ * BPF_REG_FP - 24  ARG3
+ * BPF_REG_FP - 16  ARG2
+ * BPF_REG_FP - 8   ARG1
+ * BPF_REG_FP
+ * high address
+ *
+ * For each argument (described as: offn(...off2(off1(reg)))),
+ * generates following code:
+ *
+ *  r7 <- fp
+ *  r7 <- r7 - stack_offset  // Ideal code should initialize r7 using
+ *                           // fp before generating args. However,
+ *                           // eBPF won't regard r7 as stack pointer
+ *                           // if it is generated by minus 8 from
+ *                           // another stack pointer except fp.
+ *                           // This is why we have to set r7
+ *                           // to fp for each variable.
+ *  r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx()
+ *  (r7) <- r3       // skip following instructions for bare reg
+ *  r3 <- r3 + off1  . // skip if off1 == 0
+ *  r2 <- 8           \
+ *  r1 <- r7           |-> generated by gen_read_mem()
+ *  call probe_read    /
+ *  jnei r0, 0, err  ./
+ *  r3 <- (r7)
+ *  r3 <- r3 + off2  . // skip if off2 == 0
+ *  r2 <- 8           \  // r2 may be broken by probe_read, so set again
+ *  r1 <- r7           |-> generated by gen_read_mem()
+ *  call probe_read    /
+ *  jnei r0, 0, err  ./
+ *  ...
+ */
+static int
+gen_prologue_slowpath(struct bpf_insn_pos *pos,
+		      struct probe_trace_arg *args, int nargs)
+{
+	int err, i;
+
+	for (i = 0; i < nargs; i++) {
+		struct probe_trace_arg *arg = &args[i];
+		const char *reg = arg->value;
+		struct probe_trace_arg_ref *ref = NULL;
+		int stack_offset = (i + 1) * -8;
+
+		pr_debug("prologue: fetch arg %d, base reg is %s\n",
+			 i, reg);
+
+		/* value of base register is stored into ARG3 */
+		err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
+					   BPF_REG_ARG3);
+		if (err) {
+			pr_err("prologue: failed to get offset of register %s\n",
+			       reg);
+			goto errout;
+		}
+
+		/* Make r7 the stack pointer. */
+		ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
+		/* r7 += -8 */
+		ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
+		/*
+		 * Store r3 (base register) onto stack
+		 * Ensure fp[offset] is set.
+		 * fp is the only valid base register when storing
+		 * into stack. We are not allowed to use r7 as base
+		 * register here.
+		 */
+		ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
+				stack_offset), pos);
+
+		ref = arg->ref;
+		while (ref) {
+			pr_debug("prologue: arg %d: offset %ld\n",
+				 i, ref->offset);
+			err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
+					   ref->offset);
+			if (err) {
+				pr_err("prologue: failed to generate probe_read function call\n");
+				goto errout;
+			}
+
+			ref = ref->next;
+			/*
+			 * Load previous result into ARG3. Use
+			 * BPF_REG_FP instead of r7 because verifier
+			 * allows FP based addressing only.
+			 */
+			if (ref)
+				ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
+						BPF_REG_FP, stack_offset), pos);
+		}
+	}
+
+	/* Final pass: read to registers */
+	for (i = 0; i < nargs; i++)
+		ins(BPF_LDX_MEM(BPF_DW, BPF_PROLOGUE_START_ARG_REG + i,
+				BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
+
+	ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
+
+	return check_pos(pos);
+errout:
+	return err;
+}
+
+static int
+prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
+		  struct bpf_insn *success_code, struct bpf_insn *user_code)
+{
+	struct bpf_insn *insn;
+
+	if (check_pos(pos))
+		return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
+
+	for (insn = pos->begin; insn < pos->pos; insn++) {
+		struct bpf_insn *target;
+		u8 class = BPF_CLASS(insn->code);
+		u8 opcode;
+
+		if (class != BPF_JMP)
+			continue;
+		opcode = BPF_OP(insn->code);
+		if (opcode == BPF_CALL)
+			continue;
+
+		switch (insn->off) {
+		case JMP_TO_ERROR_CODE:
+			target = error_code;
+			break;
+		case JMP_TO_SUCCESS_CODE:
+			target = success_code;
+			break;
+		case JMP_TO_USER_CODE:
+			target = user_code;
+			break;
+		default:
+			pr_err("bpf prologue: internal error: relocation failed\n");
+			return -BPF_LOADER_ERRNO__PROLOGUE;
+		}
+
+		insn->off = target - (insn + 1);
+	}
+	return 0;
+}
+
+int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
+		      struct bpf_insn *new_prog, size_t *new_cnt,
+		      size_t cnt_space)
+{
+	struct bpf_insn *success_code = NULL;
+	struct bpf_insn *error_code = NULL;
+	struct bpf_insn *user_code = NULL;
+	struct bpf_insn_pos pos;
+	bool fastpath = true;
+	int err = 0, i;
+
+	if (!new_prog || !new_cnt)
+		return -EINVAL;
+
+	if (cnt_space > BPF_MAXINSNS)
+		cnt_space = BPF_MAXINSNS;
+
+	pos.begin = new_prog;
+	pos.end = new_prog + cnt_space;
+	pos.pos = new_prog;
+
+	if (!nargs) {
+		ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
+		    &pos);
+
+		if (check_pos(&pos))
+			goto errout;
+
+		*new_cnt = pos_get_cnt(&pos);
+		return 0;
+	}
+
+	if (nargs > BPF_PROLOGUE_MAX_ARGS) {
+		pr_warning("bpf: prologue: %d arguments are dropped\n",
+			   nargs - BPF_PROLOGUE_MAX_ARGS);
+		nargs = BPF_PROLOGUE_MAX_ARGS;
+	}
+
+	/* First pass: validation */
+	for (i = 0; i < nargs; i++) {
+		struct probe_trace_arg_ref *ref = args[i].ref;
+
+		if (args[i].value[0] == '@') {
+			/* TODO: fetch global variable */
+			pr_err("bpf: prologue: global %s%+ld not support\n",
+				args[i].value, ref ? ref->offset : 0);
+			return -ENOTSUP;
+		}
+
+		while (ref) {
+			/* fastpath is true if all args has ref == NULL */
+			fastpath = false;
+
+			/*
+			 * Instruction encodes immediate value using
+			 * s32, ref->offset is long. On systems which
+			 * can't fill long in s32, refuse to process if
+			 * ref->offset too large (or small).
+			 */
+#ifdef __LP64__
+#define OFFSET_MAX	((1LL << 31) - 1)
+#define OFFSET_MIN	((1LL << 31) * -1)
+			if (ref->offset > OFFSET_MAX ||
+					ref->offset < OFFSET_MIN) {
+				pr_err("bpf: prologue: offset out of bound: %ld\n",
+				       ref->offset);
+				return -BPF_LOADER_ERRNO__PROLOGUEOOB;
+			}
+#endif
+			ref = ref->next;
+		}
+	}
+	pr_debug("prologue: pass validation\n");
+
+	if (fastpath) {
+		/* If all variables are registers... */
+		pr_debug("prologue: fast path\n");
+		err = gen_prologue_fastpath(&pos, args, nargs);
+		if (err)
+			goto errout;
+	} else {
+		pr_debug("prologue: slow path\n");
+
+		/* Initialization: move ctx to a callee saved register. */
+		ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
+
+		err = gen_prologue_slowpath(&pos, args, nargs);
+		if (err)
+			goto errout;
+		/*
+		 * start of ERROR_CODE (only slow pass needs error code)
+		 *   mov r2 <- 1  // r2 is error number
+		 *   mov r3 <- 0  // r3, r4... should be touched or
+		 *                // verifier would complain
+		 *   mov r4 <- 0
+		 *   ...
+		 *   goto usercode
+		 */
+		error_code = pos.pos;
+		ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
+		    &pos);
+
+		for (i = 0; i < nargs; i++)
+			ins(BPF_ALU64_IMM(BPF_MOV,
+					  BPF_PROLOGUE_START_ARG_REG + i,
+					  0),
+			    &pos);
+		ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
+				&pos);
+	}
+
+	/*
+	 * start of SUCCESS_CODE:
+	 *   mov r2 <- 0
+	 *   goto usercode  // skip
+	 */
+	success_code = pos.pos;
+	ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
+
+	/*
+	 * start of USER_CODE:
+	 *   Restore ctx to r1
+	 */
+	user_code = pos.pos;
+	if (!fastpath) {
+		/*
+		 * Only slow path needs restoring of ctx. In fast path,
+		 * register are loaded directly from r1.
+		 */
+		ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
+		err = prologue_relocate(&pos, error_code, success_code,
+					user_code);
+		if (err)
+			goto errout;
+	}
+
+	err = check_pos(&pos);
+	if (err)
+		goto errout;
+
+	*new_cnt = pos_get_cnt(&pos);
+	return 0;
+errout:
+	return err;
+}
diff --git a/tools/perf/util/bpf-prologue.h b/tools/perf/util/bpf-prologue.h
new file mode 100644
index 0000000..d94cbea
--- /dev/null
+++ b/tools/perf/util/bpf-prologue.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2015, He Kuang <hekuang@huawei.com>
+ * Copyright (C) 2015, Huawei Inc.
+ */
+#ifndef __BPF_PROLOGUE_H
+#define __BPF_PROLOGUE_H
+
+#include <linux/compiler.h>
+#include <linux/filter.h>
+#include "probe-event.h"
+
+#define BPF_PROLOGUE_MAX_ARGS 3
+#define BPF_PROLOGUE_START_ARG_REG BPF_REG_3
+#define BPF_PROLOGUE_FETCH_RESULT_REG BPF_REG_2
+
+#ifdef HAVE_BPF_PROLOGUE
+int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
+		      struct bpf_insn *new_prog, size_t *new_cnt,
+		      size_t cnt_space);
+#else
+static inline int
+bpf__gen_prologue(struct probe_trace_arg *args __maybe_unused,
+		  int nargs __maybe_unused,
+		  struct bpf_insn *new_prog __maybe_unused,
+		  size_t *new_cnt,
+		  size_t cnt_space __maybe_unused)
+{
+	if (!new_cnt)
+		return -EINVAL;
+	*new_cnt = 0;
+	return -ENOTSUP;
+}
+#endif
+#endif /* __BPF_PROLOGUE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 735ad48..564377d 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -44,6 +44,10 @@
 		callchain_param.mode = CHAIN_GRAPH_REL;
 		return 0;
 	}
+	if (!strncmp(value, "folded", strlen(value))) {
+		callchain_param.mode = CHAIN_FOLDED;
+		return 0;
+	}
 	return -1;
 }
 
@@ -79,6 +83,23 @@
 	return -1;
 }
 
+static int parse_callchain_value(const char *value)
+{
+	if (!strncmp(value, "percent", strlen(value))) {
+		callchain_param.value = CCVAL_PERCENT;
+		return 0;
+	}
+	if (!strncmp(value, "period", strlen(value))) {
+		callchain_param.value = CCVAL_PERIOD;
+		return 0;
+	}
+	if (!strncmp(value, "count", strlen(value))) {
+		callchain_param.value = CCVAL_COUNT;
+		return 0;
+	}
+	return -1;
+}
+
 static int
 __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
 {
@@ -102,7 +123,8 @@
 
 		if (!parse_callchain_mode(tok) ||
 		    !parse_callchain_order(tok) ||
-		    !parse_callchain_sort_key(tok)) {
+		    !parse_callchain_sort_key(tok) ||
+		    !parse_callchain_value(tok)) {
 			/* parsing ok - move on to the next */
 			try_stack_size = false;
 			goto next;
@@ -218,6 +240,7 @@
 
 		switch (mode) {
 		case CHAIN_FLAT:
+		case CHAIN_FOLDED:
 			if (rnode->hit < chain->hit)
 				p = &(*p)->rb_left;
 			else
@@ -267,6 +290,7 @@
 sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
 		u64 min_hit, struct callchain_param *param __maybe_unused)
 {
+	*rb_root = RB_ROOT;
 	__sort_chain_flat(rb_root, &root->node, min_hit);
 }
 
@@ -338,6 +362,7 @@
 		param->sort = sort_chain_graph_rel;
 		break;
 	case CHAIN_FLAT:
+	case CHAIN_FOLDED:
 		param->sort = sort_chain_flat;
 		break;
 	case CHAIN_NONE:
@@ -363,6 +388,7 @@
 	}
 	new->parent = parent;
 	INIT_LIST_HEAD(&new->val);
+	INIT_LIST_HEAD(&new->parent_val);
 
 	if (inherit_children) {
 		struct rb_node *n;
@@ -431,6 +457,8 @@
 
 	new->children_hit = 0;
 	new->hit = period;
+	new->children_count = 0;
+	new->count = 1;
 	return new;
 }
 
@@ -478,6 +506,9 @@
 	parent->children_hit = callchain_cumul_hits(new);
 	new->val_nr = parent->val_nr - idx_local;
 	parent->val_nr = idx_local;
+	new->count = parent->count;
+	new->children_count = parent->children_count;
+	parent->children_count = callchain_cumul_counts(new);
 
 	/* create a new child for the new branch if any */
 	if (idx_total < cursor->nr) {
@@ -488,6 +519,8 @@
 
 		parent->hit = 0;
 		parent->children_hit += period;
+		parent->count = 0;
+		parent->children_count += 1;
 
 		node = callchain_cursor_current(cursor);
 		new = add_child(parent, cursor, period);
@@ -510,6 +543,7 @@
 		rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
 	} else {
 		parent->hit = period;
+		parent->count = 1;
 	}
 }
 
@@ -556,6 +590,7 @@
 
 inc_children_hit:
 	root->children_hit += period;
+	root->children_count++;
 }
 
 static int
@@ -608,6 +643,7 @@
 	/* we match 100% of the path, increment the hit */
 	if (matches == root->val_nr && cursor->pos == cursor->nr) {
 		root->hit += period;
+		root->count++;
 		return 0;
 	}
 
@@ -799,12 +835,72 @@
 	return bf;
 }
 
+char *callchain_node__scnprintf_value(struct callchain_node *node,
+				      char *bf, size_t bfsize, u64 total)
+{
+	double percent = 0.0;
+	u64 period = callchain_cumul_hits(node);
+	unsigned count = callchain_cumul_counts(node);
+
+	if (callchain_param.mode == CHAIN_FOLDED) {
+		period = node->hit;
+		count = node->count;
+	}
+
+	switch (callchain_param.value) {
+	case CCVAL_PERIOD:
+		scnprintf(bf, bfsize, "%"PRIu64, period);
+		break;
+	case CCVAL_COUNT:
+		scnprintf(bf, bfsize, "%u", count);
+		break;
+	case CCVAL_PERCENT:
+	default:
+		if (total)
+			percent = period * 100.0 / total;
+		scnprintf(bf, bfsize, "%.2f%%", percent);
+		break;
+	}
+	return bf;
+}
+
+int callchain_node__fprintf_value(struct callchain_node *node,
+				 FILE *fp, u64 total)
+{
+	double percent = 0.0;
+	u64 period = callchain_cumul_hits(node);
+	unsigned count = callchain_cumul_counts(node);
+
+	if (callchain_param.mode == CHAIN_FOLDED) {
+		period = node->hit;
+		count = node->count;
+	}
+
+	switch (callchain_param.value) {
+	case CCVAL_PERIOD:
+		return fprintf(fp, "%"PRIu64, period);
+	case CCVAL_COUNT:
+		return fprintf(fp, "%u", count);
+	case CCVAL_PERCENT:
+	default:
+		if (total)
+			percent = period * 100.0 / total;
+		return percent_color_fprintf(fp, "%.2f%%", percent);
+	}
+	return 0;
+}
+
 static void free_callchain_node(struct callchain_node *node)
 {
 	struct callchain_list *list, *tmp;
 	struct callchain_node *child;
 	struct rb_node *n;
 
+	list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
+		list_del(&list->list);
+		free(list);
+	}
+
 	list_for_each_entry_safe(list, tmp, &node->val, list) {
 		list_del(&list->list);
 		free(list);
@@ -828,3 +924,41 @@
 
 	free_callchain_node(&root->node);
 }
+
+int callchain_node__make_parent_list(struct callchain_node *node)
+{
+	struct callchain_node *parent = node->parent;
+	struct callchain_list *chain, *new;
+	LIST_HEAD(head);
+
+	while (parent) {
+		list_for_each_entry_reverse(chain, &parent->val, list) {
+			new = malloc(sizeof(*new));
+			if (new == NULL)
+				goto out;
+			*new = *chain;
+			new->has_children = false;
+			list_add_tail(&new->list, &head);
+		}
+		parent = parent->parent;
+	}
+
+	list_for_each_entry_safe_reverse(chain, new, &head, list)
+		list_move_tail(&chain->list, &node->parent_val);
+
+	if (!list_empty(&node->parent_val)) {
+		chain = list_first_entry(&node->parent_val, struct callchain_list, list);
+		chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
+
+		chain = list_first_entry(&node->val, struct callchain_list, list);
+		chain->has_children = false;
+	}
+	return 0;
+
+out:
+	list_for_each_entry_safe(chain, new, &head, list) {
+		list_del(&chain->list);
+		free(chain);
+	}
+	return -ENOMEM;
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index fce8161..8ac8f043 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -24,12 +24,13 @@
 #define CALLCHAIN_RECORD_HELP  CALLCHAIN_HELP RECORD_MODE_HELP RECORD_SIZE_HELP
 
 #define CALLCHAIN_REPORT_HELP						\
-	HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|none)\n" \
+	HELP_PAD "print_type:\tcall graph printing style (graph|flat|fractal|folded|none)\n" \
 	HELP_PAD "threshold:\tminimum call graph inclusion threshold (<percent>)\n" \
 	HELP_PAD "print_limit:\tmaximum number of call graph entry (<number>)\n" \
 	HELP_PAD "order:\t\tcall graph order (caller|callee)\n" \
 	HELP_PAD "sort_key:\tcall graph sort key (function|address)\n"	\
-	HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n"
+	HELP_PAD "branch:\t\tinclude last branch info to call graph (branch)\n" \
+	HELP_PAD "value:\t\tcall graph value (percent|period|count)\n"
 
 enum perf_call_graph_mode {
 	CALLCHAIN_NONE,
@@ -43,7 +44,8 @@
 	CHAIN_NONE,
 	CHAIN_FLAT,
 	CHAIN_GRAPH_ABS,
-	CHAIN_GRAPH_REL
+	CHAIN_GRAPH_REL,
+	CHAIN_FOLDED,
 };
 
 enum chain_order {
@@ -54,11 +56,14 @@
 struct callchain_node {
 	struct callchain_node	*parent;
 	struct list_head	val;
+	struct list_head	parent_val;
 	struct rb_node		rb_node_in; /* to insert nodes in an rbtree */
 	struct rb_node		rb_node;    /* to sort nodes in an output tree */
 	struct rb_root		rb_root_in; /* input tree of children */
 	struct rb_root		rb_root;    /* sorted output tree of children */
 	unsigned int		val_nr;
+	unsigned int		count;
+	unsigned int		children_count;
 	u64			hit;
 	u64			children_hit;
 };
@@ -78,6 +83,12 @@
 	CCKEY_ADDRESS
 };
 
+enum chain_value {
+	CCVAL_PERCENT,
+	CCVAL_PERIOD,
+	CCVAL_COUNT,
+};
+
 struct callchain_param {
 	bool			enabled;
 	enum perf_call_graph_mode record_mode;
@@ -90,6 +101,7 @@
 	bool			order_set;
 	enum chain_key		key;
 	bool			branch_callstack;
+	enum chain_value	value;
 };
 
 extern struct callchain_param callchain_param;
@@ -131,6 +143,7 @@
 static inline void callchain_init(struct callchain_root *root)
 {
 	INIT_LIST_HEAD(&root->node.val);
+	INIT_LIST_HEAD(&root->node.parent_val);
 
 	root->node.parent = NULL;
 	root->node.hit = 0;
@@ -144,6 +157,11 @@
 	return node->hit + node->children_hit;
 }
 
+static inline unsigned callchain_cumul_counts(struct callchain_node *node)
+{
+	return node->count + node->children_count;
+}
+
 int callchain_register_param(struct callchain_param *param);
 int callchain_append(struct callchain_root *root,
 		     struct callchain_cursor *cursor,
@@ -229,7 +247,12 @@
 
 char *callchain_list__sym_name(struct callchain_list *cl,
 			       char *bf, size_t bfsize, bool show_dso);
+char *callchain_node__scnprintf_value(struct callchain_node *node,
+				      char *bf, size_t bfsize, u64 total);
+int callchain_node__fprintf_value(struct callchain_node *node,
+				  FILE *fp, u64 total);
 
 void free_callchain(struct callchain_root *root);
+int callchain_node__make_parent_list(struct callchain_node *node);
 
 #endif	/* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 425df5c..e8e9a9d 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -1243,6 +1243,8 @@
 	if (dso != NULL) {
 		__dsos__add(dsos, dso);
 		dso__set_basename(dso);
+		/* Put dso here because __dsos_add already got it */
+		dso__put(dso);
 	}
 	return dso;
 }
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d139219..d1b6c20 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -336,20 +336,12 @@
 
 void perf_evlist__disable(struct perf_evlist *evlist)
 {
-	int cpu, thread;
 	struct perf_evsel *pos;
-	int nr_cpus = cpu_map__nr(evlist->cpus);
-	int nr_threads;
 
-	for (cpu = 0; cpu < nr_cpus; cpu++) {
-		evlist__for_each(evlist, pos) {
-			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
-				continue;
-			nr_threads = perf_evlist__nr_threads(evlist, pos);
-			for (thread = 0; thread < nr_threads; thread++)
-				ioctl(FD(pos, cpu, thread),
-				      PERF_EVENT_IOC_DISABLE, 0);
-		}
+	evlist__for_each(evlist, pos) {
+		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
+			continue;
+		perf_evsel__disable(pos);
 	}
 
 	evlist->enabled = false;
@@ -357,20 +349,12 @@
 
 void perf_evlist__enable(struct perf_evlist *evlist)
 {
-	int cpu, thread;
 	struct perf_evsel *pos;
-	int nr_cpus = cpu_map__nr(evlist->cpus);
-	int nr_threads;
 
-	for (cpu = 0; cpu < nr_cpus; cpu++) {
-		evlist__for_each(evlist, pos) {
-			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
-				continue;
-			nr_threads = perf_evlist__nr_threads(evlist, pos);
-			for (thread = 0; thread < nr_threads; thread++)
-				ioctl(FD(pos, cpu, thread),
-				      PERF_EVENT_IOC_ENABLE, 0);
-		}
+	evlist__for_each(evlist, pos) {
+		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
+			continue;
+		perf_evsel__enable(pos);
 	}
 
 	evlist->enabled = true;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 397fb4e..47f0330 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -981,13 +981,26 @@
 	return -1;
 }
 
-int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
+int perf_evsel__enable(struct perf_evsel *evsel)
 {
+	int nthreads = thread_map__nr(evsel->threads);
+	int ncpus = cpu_map__nr(evsel->cpus);
+
 	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
 				     PERF_EVENT_IOC_ENABLE,
 				     0);
 }
 
+int perf_evsel__disable(struct perf_evsel *evsel)
+{
+	int nthreads = thread_map__nr(evsel->threads);
+	int ncpus = cpu_map__nr(evsel->cpus);
+
+	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+				     PERF_EVENT_IOC_DISABLE,
+				     0);
+}
+
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
 	if (ncpus == 0 || nthreads == 0)
@@ -1192,6 +1205,7 @@
 		bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
 		bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
 		bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
+		bit_name(WEIGHT),
 		{ .name = NULL, }
 	};
 #undef bit_name
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 0e49bd7..5ded1fc 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -227,7 +227,8 @@
 			      const char *op, const char *filter);
 int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
 			     const char *filter);
-int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__enable(struct perf_evsel *evsel);
+int perf_evsel__disable(struct perf_evsel *evsel);
 
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
 			     struct cpu_map *cpus);
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
index 7adf4ad..1099e92 100644
--- a/tools/perf/util/exec_cmd.c
+++ b/tools/perf/util/exec_cmd.c
@@ -9,17 +9,17 @@
 static const char *argv_exec_path;
 static const char *argv0_path;
 
-const char *system_path(const char *path)
+char *system_path(const char *path)
 {
 	static const char *prefix = PREFIX;
 	struct strbuf d = STRBUF_INIT;
 
 	if (is_absolute_path(path))
-		return path;
+		return strdup(path);
 
 	strbuf_addf(&d, "%s/%s", prefix, path);
 	path = strbuf_detach(&d, NULL);
-	return path;
+	return (char *)path;
 }
 
 const char *perf_extract_argv0_path(const char *argv0)
@@ -52,17 +52,16 @@
 
 
 /* Returns the highest-priority, location to look for perf programs. */
-const char *perf_exec_path(void)
+char *perf_exec_path(void)
 {
-	const char *env;
+	char *env;
 
 	if (argv_exec_path)
-		return argv_exec_path;
+		return strdup(argv_exec_path);
 
 	env = getenv(EXEC_PATH_ENVIRONMENT);
-	if (env && *env) {
-		return env;
-	}
+	if (env && *env)
+		return strdup(env);
 
 	return system_path(PERF_EXEC_PATH);
 }
@@ -83,9 +82,11 @@
 {
 	const char *old_path = getenv("PATH");
 	struct strbuf new_path = STRBUF_INIT;
+	char *tmp = perf_exec_path();
 
-	add_path(&new_path, perf_exec_path());
+	add_path(&new_path, tmp);
 	add_path(&new_path, argv0_path);
+	free(tmp);
 
 	if (old_path)
 		strbuf_addstr(&new_path, old_path);
diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h
index bc4b915..48b4175 100644
--- a/tools/perf/util/exec_cmd.h
+++ b/tools/perf/util/exec_cmd.h
@@ -3,10 +3,11 @@
 
 extern void perf_set_argv_exec_path(const char *exec_path);
 extern const char *perf_extract_argv0_path(const char *path);
-extern const char *perf_exec_path(void);
 extern void setup_path(void);
 extern int execv_perf_cmd(const char **argv); /* NULL terminated */
 extern int execl_perf_cmd(const char *cmd, ...);
-extern const char *system_path(const char *path);
+/* perf_exec_path and system_path return malloc'd string, caller must free it */
+extern char *perf_exec_path(void);
+extern char *system_path(const char *path);
 
 #endif /* __PERF_EXEC_CMD_H */
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
index 86c37c4..fa1fc4a 100644
--- a/tools/perf/util/help.c
+++ b/tools/perf/util/help.c
@@ -159,7 +159,7 @@
 		struct cmdnames *other_cmds)
 {
 	const char *env_path = getenv("PATH");
-	const char *exec_path = perf_exec_path();
+	char *exec_path = perf_exec_path();
 
 	if (exec_path) {
 		list_commands_in_dir(main_cmds, exec_path, prefix);
@@ -187,6 +187,7 @@
 		      sizeof(*other_cmds->names), cmdname_compare);
 		uniq(other_cmds);
 	}
+	free(exec_path);
 	exclude_cmds(other_cmds, main_cmds);
 }
 
@@ -203,13 +204,14 @@
 			longest = other_cmds->names[i]->len;
 
 	if (main_cmds->cnt) {
-		const char *exec_path = perf_exec_path();
+		char *exec_path = perf_exec_path();
 		printf("available %s in '%s'\n", title, exec_path);
 		printf("----------------");
 		mput_char('-', strlen(title) + strlen(exec_path));
 		putchar('\n');
 		pretty_print_string_list(main_cmds, longest);
 		putchar('\n');
+		free(exec_path);
 	}
 
 	if (other_cmds->cnt) {
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 4fd37d6..6e8e0ee 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -924,9 +924,6 @@
 	int64_t cmp = 0;
 
 	perf_hpp__for_each_sort_list(fmt) {
-		if (perf_hpp__should_skip(fmt))
-			continue;
-
 		cmp = fmt->cmp(fmt, left, right);
 		if (cmp)
 			break;
@@ -942,9 +939,6 @@
 	int64_t cmp = 0;
 
 	perf_hpp__for_each_sort_list(fmt) {
-		if (perf_hpp__should_skip(fmt))
-			continue;
-
 		cmp = fmt->collapse(fmt, left, right);
 		if (cmp)
 			break;
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h
deleted file mode 100644
index 6f19c54..0000000
--- a/tools/perf/util/include/linux/string.h
+++ /dev/null
@@ -1,3 +0,0 @@
-#include <string.h>
-
-void *memdup(const void *src, size_t len);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8b303ff..bfc289c 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -122,6 +122,7 @@
 
 void machine__exit(struct machine *machine)
 {
+	machine__destroy_kernel_maps(machine);
 	map_groups__exit(&machine->kmaps);
 	dsos__exit(&machine->dsos);
 	machine__exit_vdso(machine);
@@ -560,11 +561,29 @@
 	return 0;
 }
 
+static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
+{
+	const char *dup_filename;
+
+	if (!filename || !dso || !dso->long_name)
+		return;
+	if (dso->long_name[0] != '[')
+		return;
+	if (!strchr(filename, '/'))
+		return;
+
+	dup_filename = strdup(filename);
+	if (!dup_filename)
+		return;
+
+	dso__set_long_name(dso, dup_filename, true);
+}
+
 struct map *machine__findnew_module_map(struct machine *machine, u64 start,
 					const char *filename)
 {
 	struct map *map = NULL;
-	struct dso *dso;
+	struct dso *dso = NULL;
 	struct kmod_path m;
 
 	if (kmod_path__parse_name(&m, filename))
@@ -572,8 +591,15 @@
 
 	map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
 				       m.name);
-	if (map)
+	if (map) {
+		/*
+		 * If the map's dso is an offline module, give dso__load()
+		 * a chance to find the file path of that module by fixing
+		 * long_name.
+		 */
+		dso__adjust_kmod_long_name(map->dso, filename);
 		goto out;
+	}
 
 	dso = machine__findnew_module_dso(machine, &m, filename);
 	if (dso == NULL)
@@ -585,7 +611,11 @@
 
 	map_groups__insert(&machine->kmaps, map);
 
+	/* Put the map here because map_groups__insert alread got it */
+	map__put(map);
 out:
+	/* put the dso here, corresponding to  machine__findnew_module_dso */
+	dso__put(dso);
 	free(m.name);
 	return map;
 }
@@ -788,6 +818,7 @@
 				kmap->ref_reloc_sym = NULL;
 		}
 
+		map__put(machine->vmlinux_maps[type]);
 		machine->vmlinux_maps[type] = NULL;
 	}
 }
@@ -1084,11 +1115,14 @@
 	struct dso *kernel = machine__get_kernel(machine);
 	const char *name;
 	u64 addr = machine__get_running_kernel_start(machine, &name);
-	if (!addr)
+	int ret;
+
+	if (!addr || kernel == NULL)
 		return -1;
 
-	if (kernel == NULL ||
-	    __machine__create_kernel_maps(machine, kernel) < 0)
+	ret = __machine__create_kernel_maps(machine, kernel);
+	dso__put(kernel);
+	if (ret < 0)
 		return -1;
 
 	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
@@ -1609,6 +1643,8 @@
 		}
 	}
 
+	if (symbol_conf.hide_unresolved && al.sym == NULL)
+		return 0;
 	return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
 }
 
@@ -1863,6 +1899,9 @@
 static int unwind_entry(struct unwind_entry *entry, void *arg)
 {
 	struct callchain_cursor *cursor = arg;
+
+	if (symbol_conf.hide_unresolved && entry->sym == NULL)
+		return 0;
 	return callchain_cursor_append(cursor, entry->ip,
 				       entry->map, entry->sym);
 }
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index afc6b56..93d9f1c 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -26,8 +26,8 @@
 static inline int is_anon_memory(const char *filename)
 {
 	return !strcmp(filename, "//anon") ||
-	       !strcmp(filename, "/dev/zero (deleted)") ||
-	       !strcmp(filename, "/anon_hugepage (deleted)");
+	       !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
+	       !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
 }
 
 static inline int is_no_dso_memory(const char *filename)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index e48d9da..6fc8cd7 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -124,6 +124,10 @@
 		.symbol = "dummy",
 		.alias  = "",
 	},
+	[PERF_COUNT_SW_BPF_OUTPUT] = {
+		.symbol = "bpf-output",
+		.alias  = "",
+	},
 };
 
 #define __PERF_EVENT_FIELD(config, name) \
@@ -1879,7 +1883,7 @@
 
 	for (i = 0; i < max; i++, syms++) {
 
-		if (event_glob != NULL &&
+		if (event_glob != NULL && syms->symbol != NULL &&
 		    !(strglobmatch(syms->symbol, event_glob) ||
 		      (syms->alias && strglobmatch(syms->alias, event_glob))))
 			continue;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 03875f9..93996ec 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2326,8 +2326,11 @@
 		goto out;
 
 	if (!allow_suffix) {
-		pr_warning("Error: event \"%s\" already exists. "
-			   "(Use -f to force duplicates.)\n", buf);
+		pr_warning("Error: event \"%s\" already exists.\n"
+			   " Hint: Remove existing event by 'perf probe -d'\n"
+			   "       or force duplicates by 'perf probe -f'\n"
+			   "       or set 'force=yes' in BPF source.\n",
+			   buf);
 		ret = -EEXIST;
 		goto out;
 	}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 05012bb..2be10fb 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -654,6 +654,7 @@
 static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
 {
 	Dwarf_Attribute fb_attr;
+	Dwarf_Frame *frame = NULL;
 	size_t nops;
 	int ret;
 
@@ -686,11 +687,11 @@
 #if _ELFUTILS_PREREQ(0, 142)
 	} else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
 		   pf->cfi != NULL) {
-		Dwarf_Frame *frame;
 		if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
 		    dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
 			pr_warning("Failed to get call frame on 0x%jx\n",
 				   (uintmax_t)pf->addr);
+			free(frame);
 			return -ENOENT;
 		}
 #endif
@@ -699,7 +700,8 @@
 	/* Call finder's callback handler */
 	ret = pf->callback(sc_die, pf);
 
-	/* *pf->fb_ops will be cached in libdw. Don't free it. */
+	/* Since *pf->fb_ops can be a part of frame. we should free it here. */
+	free(frame);
 	pf->fb_ops = NULL;
 
 	return ret;
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index fc8781d..7f7e072 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -342,22 +342,6 @@
 	return s;
 }
 
-/**
- * memdup - duplicate region of memory
- * @src: memory region to duplicate
- * @len: memory region length
- */
-void *memdup(const void *src, size_t len)
-{
-	void *p;
-
-	p = malloc(len);
-	if (p)
-		memcpy(p, src, len);
-
-	return p;
-}
-
 char *asprintf_expr_inout_ints(const char *var, bool in, size_t nints, int *ints)
 {
 	/*
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 475d88d..53f1996 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1042,6 +1042,8 @@
 				}
 				curr_dso->symtab_type = dso->symtab_type;
 				map_groups__insert(kmaps, curr_map);
+				/* kmaps already got it */
+				map__put(curr_map);
 				dsos__add(&map->groups->machine->dsos, curr_dso);
 				dso__set_loaded(curr_dso, map->type);
 			} else
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index cd08027..d51abd2 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1860,24 +1860,44 @@
 	zfree(&vmlinux_path);
 }
 
+static const char * const vmlinux_paths[] = {
+	"vmlinux",
+	"/boot/vmlinux"
+};
+
+static const char * const vmlinux_paths_upd[] = {
+	"/boot/vmlinux-%s",
+	"/usr/lib/debug/boot/vmlinux-%s",
+	"/lib/modules/%s/build/vmlinux",
+	"/usr/lib/debug/lib/modules/%s/vmlinux",
+	"/usr/lib/debug/boot/vmlinux-%s.debug"
+};
+
+static int vmlinux_path__add(const char *new_entry)
+{
+	vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
+	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+		return -1;
+	++vmlinux_path__nr_entries;
+
+	return 0;
+}
+
 static int vmlinux_path__init(struct perf_env *env)
 {
 	struct utsname uts;
 	char bf[PATH_MAX];
 	char *kernel_version;
+	unsigned int i;
 
-	vmlinux_path = malloc(sizeof(char *) * 6);
+	vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
+			      ARRAY_SIZE(vmlinux_paths_upd)));
 	if (vmlinux_path == NULL)
 		return -1;
 
-	vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
-	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
-		goto out_fail;
-	++vmlinux_path__nr_entries;
-	vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
-	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
-		goto out_fail;
-	++vmlinux_path__nr_entries;
+	for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
+		if (vmlinux_path__add(vmlinux_paths[i]) < 0)
+			goto out_fail;
 
 	/* only try kernel version if no symfs was given */
 	if (symbol_conf.symfs[0] != 0)
@@ -1892,28 +1912,11 @@
 		kernel_version = uts.release;
 	}
 
-	snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", kernel_version);
-	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
-	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
-		goto out_fail;
-	++vmlinux_path__nr_entries;
-	snprintf(bf, sizeof(bf), "/usr/lib/debug/boot/vmlinux-%s",
-		 kernel_version);
-	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
-	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
-		goto out_fail;
-        ++vmlinux_path__nr_entries;
-	snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", kernel_version);
-	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
-	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
-		goto out_fail;
-	++vmlinux_path__nr_entries;
-	snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
-		 kernel_version);
-	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
-	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
-		goto out_fail;
-	++vmlinux_path__nr_entries;
+	for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
+		snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
+		if (vmlinux_path__add(bf) < 0)
+			goto out_fail;
+	}
 
 	return 0;
 
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index dcd786e..857f707 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -108,7 +108,8 @@
 			show_hist_headers,
 			branch_callstack,
 			has_filter,
-			show_ref_callgraph;
+			show_ref_callgraph,
+			hide_unresolved;
 	const char	*vmlinux_name,
 			*kallsyms_name,
 			*source_prefix,
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 2dcfe9a..db8142b 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -11,6 +11,7 @@
 #include <linux/types.h>
 #include "event.h"
 #include "perf_regs.h"
+#include "callchain.h"
 
 static char *debuginfo_path;
 
@@ -52,25 +53,28 @@
 	return __report_module(&al, ip, ui);
 }
 
+/*
+ * Store all entries within entries array,
+ * we will process it after we finish unwind.
+ */
 static int entry(u64 ip, struct unwind_info *ui)
 
 {
-	struct unwind_entry e;
+	struct unwind_entry *e = &ui->entries[ui->idx++];
 	struct addr_location al;
 
 	if (__report_module(&al, ip, ui))
 		return -1;
 
-	e.ip  = ip;
-	e.map = al.map;
-	e.sym = al.sym;
+	e->ip  = ip;
+	e->map = al.map;
+	e->sym = al.sym;
 
 	pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
 		 al.sym ? al.sym->name : "''",
 		 ip,
 		 al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
-
-	return ui->cb(&e, ui->arg);
+	return 0;
 }
 
 static pid_t next_thread(Dwfl *dwfl, void *arg, void **thread_argp)
@@ -168,7 +172,7 @@
 			struct perf_sample *data,
 			int max_stack)
 {
-	struct unwind_info ui = {
+	struct unwind_info *ui, ui_buf = {
 		.sample		= data,
 		.thread		= thread,
 		.machine	= thread->mg->machine,
@@ -177,35 +181,54 @@
 		.max_stack	= max_stack,
 	};
 	Dwarf_Word ip;
-	int err = -EINVAL;
+	int err = -EINVAL, i;
 
 	if (!data->user_regs.regs)
 		return -EINVAL;
 
-	ui.dwfl = dwfl_begin(&offline_callbacks);
-	if (!ui.dwfl)
+	ui = zalloc(sizeof(ui_buf) + sizeof(ui_buf.entries[0]) * max_stack);
+	if (!ui)
+		return -ENOMEM;
+
+	*ui = ui_buf;
+
+	ui->dwfl = dwfl_begin(&offline_callbacks);
+	if (!ui->dwfl)
 		goto out;
 
 	err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
 	if (err)
 		goto out;
 
-	err = report_module(ip, &ui);
+	err = report_module(ip, ui);
 	if (err)
 		goto out;
 
-	if (!dwfl_attach_state(ui.dwfl, EM_NONE, thread->tid, &callbacks, &ui))
+	if (!dwfl_attach_state(ui->dwfl, EM_NONE, thread->tid, &callbacks, ui))
 		goto out;
 
-	err = dwfl_getthread_frames(ui.dwfl, thread->tid, frame_callback, &ui);
+	err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
 
-	if (err && !ui.max_stack)
+	if (err && !ui->max_stack)
 		err = 0;
 
+	/*
+	 * Display what we got based on the order setup.
+	 */
+	for (i = 0; i < ui->idx && !err; i++) {
+		int j = i;
+
+		if (callchain_param.order == ORDER_CALLER)
+			j = ui->idx - i - 1;
+
+		err = ui->entries[j].ip ? ui->cb(&ui->entries[j], ui->arg) : 0;
+	}
+
  out:
 	if (err)
 		pr_debug("unwind: failed with '%s'\n", dwfl_errmsg(-1));
 
-	dwfl_end(ui.dwfl);
+	dwfl_end(ui->dwfl);
+	free(ui);
 	return 0;
 }
diff --git a/tools/perf/util/unwind-libdw.h b/tools/perf/util/unwind-libdw.h
index 417a142..5832866 100644
--- a/tools/perf/util/unwind-libdw.h
+++ b/tools/perf/util/unwind-libdw.h
@@ -16,6 +16,8 @@
 	unwind_entry_cb_t	cb;
 	void			*arg;
 	int			max_stack;
+	int			idx;
+	struct unwind_entry	entries[];
 };
 
 #endif /* __PERF_UNWIND_LIBDW_H */
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index c83832b..3c258a0 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -614,23 +614,48 @@
 static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
 		       void *arg, int max_stack)
 {
+	u64 val;
+	unw_word_t ips[max_stack];
 	unw_addr_space_t addr_space;
 	unw_cursor_t c;
-	int ret;
+	int ret, i = 0;
 
-	addr_space = thread__priv(ui->thread);
-	if (addr_space == NULL)
-		return -1;
-
-	ret = unw_init_remote(&c, addr_space, ui);
+	ret = perf_reg_value(&val, &ui->sample->user_regs, PERF_REG_IP);
 	if (ret)
-		display_error(ret);
+		return ret;
 
-	while (!ret && (unw_step(&c) > 0) && max_stack--) {
-		unw_word_t ip;
+	ips[i++] = (unw_word_t) val;
 
-		unw_get_reg(&c, UNW_REG_IP, &ip);
-		ret = ip ? entry(ip, ui->thread, cb, arg) : 0;
+	/*
+	 * If we need more than one entry, do the DWARF
+	 * unwind itself.
+	 */
+	if (max_stack - 1 > 0) {
+		addr_space = thread__priv(ui->thread);
+		if (addr_space == NULL)
+			return -1;
+
+		ret = unw_init_remote(&c, addr_space, ui);
+		if (ret)
+			display_error(ret);
+
+		while (!ret && (unw_step(&c) > 0) && i < max_stack) {
+			unw_get_reg(&c, UNW_REG_IP, &ips[i]);
+			++i;
+		}
+
+		max_stack = i;
+	}
+
+	/*
+	 * Display what we got based on the order setup.
+	 */
+	for (i = 0; i < max_stack && !ret; i++) {
+		int j = i;
+
+		if (callchain_param.order == ORDER_CALLER)
+			j = max_stack - i - 1;
+		ret = ips[j] ? entry(ips[j], ui->thread, cb, arg) : 0;
 	}
 
 	return ret;
@@ -640,24 +665,17 @@
 			struct thread *thread,
 			struct perf_sample *data, int max_stack)
 {
-	u64 ip;
 	struct unwind_info ui = {
 		.sample       = data,
 		.thread       = thread,
 		.machine      = thread->mg->machine,
 	};
-	int ret;
 
 	if (!data->user_regs.regs)
 		return -EINVAL;
 
-	ret = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
-	if (ret)
-		return ret;
+	if (max_stack <= 0)
+		return -EINVAL;
 
-	ret = entry(ip, thread, cb, arg);
-	if (ret)
-		return -ENOMEM;
-
-	return --max_stack > 0 ? get_entries(&ui, cb, arg, max_stack) : 0;
+	return get_entries(&ui, cb, arg, max_stack);
 }
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 47b1e36..75759ae 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -21,7 +21,8 @@
 	.mode	= CHAIN_GRAPH_ABS,
 	.min_percent = 0.5,
 	.order  = ORDER_CALLEE,
-	.key	= CCKEY_FUNCTION
+	.key	= CCKEY_FUNCTION,
+	.value	= CCVAL_PERCENT,
 };
 
 /*