Merge branches 'x86/mce' and 'x86/urgent' into perf/mce

Merge reason: Put all MCE changes into this branch, we are
              queueing up a dependent patch.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index f1363b7..227a72d 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -108,6 +108,8 @@
 #define K8_MCE_THRESHOLD_BANK_5    (MCE_THRESHOLD_BASE + 5 * 9)
 #define K8_MCE_THRESHOLD_DRAM_ECC  (MCE_THRESHOLD_BANK_4 + 0)
 
+extern struct atomic_notifier_head x86_mce_decoder_chain;
+
 #ifdef __KERNEL__
 
 #include <linux/percpu.h>
@@ -213,6 +215,5 @@
 void intel_init_thermal(struct cpuinfo_x86 *c);
 
 void mce_log_therm_throt_event(__u64 status);
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index ad7ce3f..8d9f854 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -28,9 +28,20 @@
  */
 #define ARCH_PERFMON_EVENT_MASK				    0xffff
 
+/*
+ * filter mask to validate fixed counter events.
+ * the following filters disqualify for fixed counters:
+ *  - inv
+ *  - edge
+ *  - cnt-mask
+ *  The other filters are supported by fixed counters.
+ *  The any-thread option is supported starting with v3.
+ */
+#define ARCH_PERFMON_EVENT_FILTER_MASK			0xff840000
+
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		      0x3c
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 		 0
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX			 0
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
 		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 721a77c..0fb9dc5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -46,6 +46,9 @@
 
 #include "mce-internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/mce.h>
+
 int mce_disabled __read_mostly;
 
 #define MISC_MCELOG_MINOR	227
@@ -85,18 +88,26 @@
 static DEFINE_PER_CPU(struct mce, mces_seen);
 static int			cpu_missing;
 
-static void default_decode_mce(struct mce *m)
+/*
+ * CPU/chipset specific EDAC code can register a notifier call here to print
+ * MCE errors in a human-readable form.
+ */
+ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
+
+static int default_decode_mce(struct notifier_block *nb, unsigned long val,
+			       void *data)
 {
 	pr_emerg("No human readable MCE decoding support on this CPU type.\n");
 	pr_emerg("Run the message through 'mcelog --ascii' to decode.\n");
+
+	return NOTIFY_STOP;
 }
 
-/*
- * CPU/chipset specific EDAC code can register a callback here to print
- * MCE errors in a human-readable form:
- */
-void (*x86_mce_decode_callback)(struct mce *m) = default_decode_mce;
-EXPORT_SYMBOL(x86_mce_decode_callback);
+static struct notifier_block mce_dec_nb = {
+	.notifier_call = default_decode_mce,
+	.priority      = -1,
+};
 
 /* MCA banks polled by the period polling timer for corrected events */
 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
@@ -141,6 +152,9 @@
 {
 	unsigned next, entry;
 
+	/* Emit the trace record: */
+	trace_mce_record(mce);
+
 	mce->finished = 0;
 	wmb();
 	for (;;) {
@@ -204,9 +218,9 @@
 
 	/*
 	 * Print out human-readable details about the MCE error,
-	 * (if the CPU has an implementation for that):
+	 * (if the CPU has an implementation for that)
 	 */
-	x86_mce_decode_callback(m);
+	atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
 }
 
 static void print_mce_head(void)
@@ -1421,6 +1435,9 @@
 	mce_cpu_features(c);
 	mce_init_timer();
 	INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
+
+	if (raw_smp_processor_id() == 0)
+		atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb);
 }
 
 /*
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b5801c3..2e20bca 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -77,6 +77,18 @@
 	struct debug_store	*ds;
 };
 
+struct event_constraint {
+	unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+	int		code;
+};
+
+#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
+#define EVENT_CONSTRAINT_END  { .code = 0, .idxmsk[0] = 0 }
+
+#define for_each_event_constraint(e, c) \
+	for ((e) = (c); (e)->idxmsk[0]; (e)++)
+
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
@@ -102,6 +114,8 @@
 	u64		intel_ctrl;
 	void		(*enable_bts)(u64 config);
 	void		(*disable_bts)(void);
+	int		(*get_event_idx)(struct cpu_hw_events *cpuc,
+					 struct hw_perf_event *hwc);
 };
 
 static struct x86_pmu x86_pmu __read_mostly;
@@ -110,6 +124,8 @@
 	.enabled = 1,
 };
 
+static const struct event_constraint *event_constraints;
+
 /*
  * Not sure about some of these
  */
@@ -155,6 +171,16 @@
 	return hw_event & P6_EVNTSEL_MASK;
 }
 
+static const struct event_constraint intel_p6_event_constraints[] =
+{
+	EVENT_CONSTRAINT(0xc1, 0x1),	/* FLOPS */
+	EVENT_CONSTRAINT(0x10, 0x1),	/* FP_COMP_OPS_EXE */
+	EVENT_CONSTRAINT(0x11, 0x1),	/* FP_ASSIST */
+	EVENT_CONSTRAINT(0x12, 0x2),	/* MUL */
+	EVENT_CONSTRAINT(0x13, 0x2),	/* DIV */
+	EVENT_CONSTRAINT(0x14, 0x1),	/* CYCLES_DIV_BUSY */
+	EVENT_CONSTRAINT_END
+};
 
 /*
  * Intel PerfMon v3. Used on Core2 and later.
@@ -170,6 +196,35 @@
   [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
 };
 
+static const struct event_constraint intel_core_event_constraints[] =
+{
+	EVENT_CONSTRAINT(0x10, 0x1),	/* FP_COMP_OPS_EXE */
+	EVENT_CONSTRAINT(0x11, 0x2),	/* FP_ASSIST */
+	EVENT_CONSTRAINT(0x12, 0x2),	/* MUL */
+	EVENT_CONSTRAINT(0x13, 0x2),	/* DIV */
+	EVENT_CONSTRAINT(0x14, 0x1),	/* CYCLES_DIV_BUSY */
+	EVENT_CONSTRAINT(0x18, 0x1),	/* IDLE_DURING_DIV */
+	EVENT_CONSTRAINT(0x19, 0x2),	/* DELAYED_BYPASS */
+	EVENT_CONSTRAINT(0xa1, 0x1),	/* RS_UOPS_DISPATCH_CYCLES */
+	EVENT_CONSTRAINT(0xcb, 0x1),	/* MEM_LOAD_RETIRED */
+	EVENT_CONSTRAINT_END
+};
+
+static const struct event_constraint intel_nehalem_event_constraints[] =
+{
+	EVENT_CONSTRAINT(0x40, 0x3),	/* L1D_CACHE_LD */
+	EVENT_CONSTRAINT(0x41, 0x3),	/* L1D_CACHE_ST */
+	EVENT_CONSTRAINT(0x42, 0x3),	/* L1D_CACHE_LOCK */
+	EVENT_CONSTRAINT(0x43, 0x3),	/* L1D_ALL_REF */
+	EVENT_CONSTRAINT(0x4e, 0x3),	/* L1D_PREFETCH */
+	EVENT_CONSTRAINT(0x4c, 0x3),	/* LOAD_HIT_PRE */
+	EVENT_CONSTRAINT(0x51, 0x3),	/* L1D */
+	EVENT_CONSTRAINT(0x52, 0x3),	/* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
+	EVENT_CONSTRAINT(0x53, 0x3),	/* L1D_CACHE_LOCK_FB_HIT */
+	EVENT_CONSTRAINT(0xc5, 0x3),	/* CACHE_LOCK_CYCLES */
+	EVENT_CONSTRAINT_END
+};
+
 static u64 intel_pmu_event_map(int hw_event)
 {
 	return intel_perfmon_event_map[hw_event];
@@ -469,7 +524,7 @@
 #define CORE_EVNTSEL_UNIT_MASK		0x0000FF00ULL
 #define CORE_EVNTSEL_EDGE_MASK		0x00040000ULL
 #define CORE_EVNTSEL_INV_MASK		0x00800000ULL
-#define CORE_EVNTSEL_REG_MASK	0xFF000000ULL
+#define CORE_EVNTSEL_REG_MASK		0xFF000000ULL
 
 #define CORE_EVNTSEL_MASK		\
 	(CORE_EVNTSEL_EVENT_MASK |	\
@@ -932,6 +987,8 @@
 	 */
 	hwc->config = ARCH_PERFMON_EVENTSEL_INT;
 
+	hwc->idx = -1;
+
 	/*
 	 * Count user and OS events unless requested not to.
 	 */
@@ -1334,8 +1391,7 @@
 		x86_pmu_enable_event(hwc, idx);
 }
 
-static int
-fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
+static int fixed_mode_idx(struct hw_perf_event *hwc)
 {
 	unsigned int hw_event;
 
@@ -1349,6 +1405,12 @@
 	if (!x86_pmu.num_events_fixed)
 		return -1;
 
+	/*
+	 * fixed counters do not take all possible filters
+	 */
+	if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
+		return -1;
+
 	if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
 		return X86_PMC_IDX_FIXED_INSTRUCTIONS;
 	if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
@@ -1360,22 +1422,57 @@
 }
 
 /*
- * Find a PMC slot for the freshly enabled / scheduled in event:
+ * generic counter allocator: get next free counter
  */
-static int x86_pmu_enable(struct perf_event *event)
+static int
+gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
 {
-	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-	struct hw_perf_event *hwc = &event->hw;
 	int idx;
 
-	idx = fixed_mode_idx(event, hwc);
+	idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
+	return idx == x86_pmu.num_events ? -1 : idx;
+}
+
+/*
+ * intel-specific counter allocator: check event constraints
+ */
+static int
+intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+	const struct event_constraint *event_constraint;
+	int i, code;
+
+	if (!event_constraints)
+		goto skip;
+
+	code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
+
+	for_each_event_constraint(event_constraint, event_constraints) {
+		if (code == event_constraint->code) {
+			for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
+				if (!test_and_set_bit(i, cpuc->used_mask))
+					return i;
+			}
+			return -1;
+		}
+	}
+skip:
+	return gen_get_event_idx(cpuc, hwc);
+}
+
+static int
+x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+	int idx;
+
+	idx = fixed_mode_idx(hwc);
 	if (idx == X86_PMC_IDX_FIXED_BTS) {
 		/* BTS is already occupied. */
 		if (test_and_set_bit(idx, cpuc->used_mask))
 			return -EAGAIN;
 
 		hwc->config_base	= 0;
-		hwc->event_base	= 0;
+		hwc->event_base		= 0;
 		hwc->idx		= idx;
 	} else if (idx >= 0) {
 		/*
@@ -1396,20 +1493,35 @@
 	} else {
 		idx = hwc->idx;
 		/* Try to get the previous generic event again */
-		if (test_and_set_bit(idx, cpuc->used_mask)) {
+		if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
 try_generic:
-			idx = find_first_zero_bit(cpuc->used_mask,
-						  x86_pmu.num_events);
-			if (idx == x86_pmu.num_events)
+			idx = x86_pmu.get_event_idx(cpuc, hwc);
+			if (idx == -1)
 				return -EAGAIN;
 
 			set_bit(idx, cpuc->used_mask);
 			hwc->idx = idx;
 		}
-		hwc->config_base  = x86_pmu.eventsel;
-		hwc->event_base = x86_pmu.perfctr;
+		hwc->config_base = x86_pmu.eventsel;
+		hwc->event_base  = x86_pmu.perfctr;
 	}
 
+	return idx;
+}
+
+/*
+ * Find a PMC slot for the freshly enabled / scheduled in event:
+ */
+static int x86_pmu_enable(struct perf_event *event)
+{
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+
+	idx = x86_schedule_event(cpuc, hwc);
+	if (idx < 0)
+		return idx;
+
 	perf_events_lapic_init();
 
 	x86_pmu.disable(hwc, idx);
@@ -1877,6 +1989,7 @@
 	 */
 	.event_bits		= 32,
 	.event_mask		= (1ULL << 32) - 1,
+	.get_event_idx		= intel_get_event_idx,
 };
 
 static struct x86_pmu intel_pmu = {
@@ -1900,6 +2013,7 @@
 	.max_period		= (1ULL << 31) - 1,
 	.enable_bts		= intel_pmu_enable_bts,
 	.disable_bts		= intel_pmu_disable_bts,
+	.get_event_idx		= intel_get_event_idx,
 };
 
 static struct x86_pmu amd_pmu = {
@@ -1920,6 +2034,7 @@
 	.apic			= 1,
 	/* use highest bit to detect overflow */
 	.max_period		= (1ULL << 47) - 1,
+	.get_event_idx		= gen_get_event_idx,
 };
 
 static int p6_pmu_init(void)
@@ -1932,10 +2047,12 @@
 	case 7:
 	case 8:
 	case 11: /* Pentium III */
+		event_constraints = intel_p6_event_constraints;
 		break;
 	case 9:
 	case 13:
 		/* Pentium M */
+		event_constraints = intel_p6_event_constraints;
 		break;
 	default:
 		pr_cont("unsupported p6 CPU model %d ",
@@ -2007,12 +2124,14 @@
 		       sizeof(hw_cache_event_ids));
 
 		pr_cont("Core2 events, ");
+		event_constraints = intel_core_event_constraints;
 		break;
 	default:
 	case 26:
 		memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
 		       sizeof(hw_cache_event_ids));
 
+		event_constraints = intel_nehalem_event_constraints;
 		pr_cont("Nehalem/Corei7 events, ");
 		break;
 	case 28:
@@ -2105,11 +2224,47 @@
 	.unthrottle	= x86_pmu_unthrottle,
 };
 
+static int
+validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+	struct hw_perf_event fake_event = event->hw;
+
+	if (event->pmu != &pmu)
+		return 0;
+
+	return x86_schedule_event(cpuc, &fake_event);
+}
+
+static int validate_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct cpu_hw_events fake_pmu;
+
+	memset(&fake_pmu, 0, sizeof(fake_pmu));
+
+	if (!validate_event(&fake_pmu, leader))
+		return -ENOSPC;
+
+	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+		if (!validate_event(&fake_pmu, sibling))
+			return -ENOSPC;
+	}
+
+	if (!validate_event(&fake_pmu, event))
+		return -ENOSPC;
+
+	return 0;
+}
+
 const struct pmu *hw_perf_event_init(struct perf_event *event)
 {
 	int err;
 
 	err = __hw_perf_event_init(event);
+	if (!err) {
+		if (event->group_leader != event)
+			err = validate_group(event);
+	}
 	if (err) {
 		if (event->destroy)
 			event->destroy(event);
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index 713ed7d..689cc6a 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -3,7 +3,6 @@
 
 static bool report_gart_errors;
 static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
-static void (*orig_mce_callback)(struct mce *m);
 
 void amd_report_gart_errors(bool v)
 {
@@ -363,8 +362,10 @@
 		pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
 }
 
-static void amd_decode_mce(struct mce *m)
+static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
+			   void *data)
 {
+	struct mce *m = (struct mce *)data;
 	struct err_regs regs;
 	int node, ecc;
 
@@ -420,20 +421,22 @@
 	}
 
 	amd_decode_err_code(m->status & 0xffff);
+
+	return NOTIFY_STOP;
 }
 
+static struct notifier_block amd_mce_dec_nb = {
+	.notifier_call	= amd_decode_mce,
+};
+
 static int __init mce_amd_init(void)
 {
 	/*
 	 * We can decode MCEs for Opteron and later CPUs:
 	 */
 	if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
-	    (boot_cpu_data.x86 >= 0xf)) {
-		/* safe the default decode mce callback */
-		orig_mce_callback = x86_mce_decode_callback;
-
-		x86_mce_decode_callback = amd_decode_mce;
-	}
+	    (boot_cpu_data.x86 >= 0xf))
+		atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
 
 	return 0;
 }
@@ -442,7 +445,7 @@
 #ifdef MODULE
 static void __exit mce_amd_exit(void)
 {
-	x86_mce_decode_callback = orig_mce_callback;
+	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
 }
 
 MODULE_DESCRIPTION("AMD MCE decoder");
diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h
new file mode 100644
index 0000000..7eee778
--- /dev/null
+++ b/include/trace/events/mce.h
@@ -0,0 +1,69 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mce
+
+#if !defined(_TRACE_MCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MCE_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <asm/mce.h>
+
+TRACE_EVENT(mce_record,
+
+	TP_PROTO(struct mce *m),
+
+	TP_ARGS(m),
+
+	TP_STRUCT__entry(
+		__field(	u64,		mcgcap		)
+		__field(	u64,		mcgstatus	)
+		__field(	u8,		bank		)
+		__field(	u64,		status		)
+		__field(	u64,		addr		)
+		__field(	u64,		misc		)
+		__field(	u64,		ip		)
+		__field(	u8,		cs		)
+		__field(	u64,		tsc		)
+		__field(	u64,		walltime	)
+		__field(	u32,		cpu		)
+		__field(	u32,		cpuid		)
+		__field(	u32,		apicid		)
+		__field(	u32,		socketid	)
+		__field(	u8,		cpuvendor	)
+	),
+
+	TP_fast_assign(
+		__entry->mcgcap		= m->mcgcap;
+		__entry->mcgstatus	= m->mcgstatus;
+		__entry->bank		= m->bank;
+		__entry->status		= m->status;
+		__entry->addr		= m->addr;
+		__entry->misc		= m->misc;
+		__entry->ip		= m->ip;
+		__entry->cs		= m->cs;
+		__entry->tsc		= m->tsc;
+		__entry->walltime	= m->time;
+		__entry->cpu		= m->extcpu;
+		__entry->cpuid		= m->cpuid;
+		__entry->apicid		= m->apicid;
+		__entry->socketid	= m->socketid;
+		__entry->cpuvendor	= m->cpuvendor;
+	),
+
+	TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x",
+		__entry->cpu,
+		__entry->mcgcap, __entry->mcgstatus,
+		__entry->bank, __entry->status,
+		__entry->addr, __entry->misc,
+		__entry->cs, __entry->ip,
+		__entry->tsc,
+		__entry->cpuvendor, __entry->cpuid,
+		__entry->walltime,
+		__entry->socketid,
+		__entry->apicid)
+);
+
+#endif /* _TRACE_MCE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index cc0d966..c9bbcab 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -120,9 +120,10 @@
 #undef __field
 #define __field(type, item)					\
 	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
-			       "offset:%u;\tsize:%u;\n",		\
+			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	\
 			       (unsigned int)offsetof(typeof(field), item), \
-			       (unsigned int)sizeof(field.item));	\
+			       (unsigned int)sizeof(field.item),	\
+			       (unsigned int)is_signed_type(type));	\
 	if (!ret)							\
 		return 0;
 
@@ -132,19 +133,21 @@
 #undef __array
 #define __array(type, item, len)						\
 	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t"	\
-			       "offset:%u;\tsize:%u;\n",		\
+			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	\
 			       (unsigned int)offsetof(typeof(field), item), \
-			       (unsigned int)sizeof(field.item));	\
+			       (unsigned int)sizeof(field.item),	\
+			       (unsigned int)is_signed_type(type));	\
 	if (!ret)							\
 		return 0;
 
 #undef __dynamic_array
 #define __dynamic_array(type, item, len)				       \
 	ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
-			       "offset:%u;\tsize:%u;\n",		       \
+			       "offset:%u;\tsize:%u;\tsigned:%u;\n",	       \
 			       (unsigned int)offsetof(typeof(field),	       \
 					__data_loc_##item),		       \
-			       (unsigned int)sizeof(field.__data_loc_##item)); \
+			       (unsigned int)sizeof(field.__data_loc_##item), \
+			       (unsigned int)is_signed_type(type));	\
 	if (!ret)							       \
 		return 0;
 
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d4ff019..e43c928 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -397,18 +397,21 @@
 	int ret;
 
 	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
-			       "offset:0;\tsize:%u;\n",
-			       (unsigned int)sizeof(field.time_stamp));
+			       "offset:0;\tsize:%u;\tsigned:%u;\n",
+			       (unsigned int)sizeof(field.time_stamp),
+			       (unsigned int)is_signed_type(u64));
 
 	ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
-			       "offset:%u;\tsize:%u;\n",
+			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
 			       (unsigned int)offsetof(typeof(field), commit),
-			       (unsigned int)sizeof(field.commit));
+			       (unsigned int)sizeof(field.commit),
+			       (unsigned int)is_signed_type(long));
 
 	ret = trace_seq_printf(s, "\tfield: char data;\t"
-			       "offset:%u;\tsize:%u;\n",
+			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
 			       (unsigned int)offsetof(typeof(field), data),
-			       (unsigned int)BUF_PAGE_SIZE);
+			       (unsigned int)BUF_PAGE_SIZE,
+			       (unsigned int)is_signed_type(char));
 
 	return ret;
 }
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index d128f65..cf3cabf 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -507,7 +507,7 @@
 #define FIELD(type, name)						\
 	sizeof(type) != sizeof(field.name) ? __bad_type_size() :	\
 	#type, "common_" #name, offsetof(typeof(field), name),		\
-		sizeof(field.name)
+		sizeof(field.name), is_signed_type(type)
 
 static int trace_write_header(struct trace_seq *s)
 {
@@ -515,17 +515,17 @@
 
 	/* struct trace_entry */
 	return trace_seq_printf(s,
-				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-				"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-				"\n",
-				FIELD(unsigned short, type),
-				FIELD(unsigned char, flags),
-				FIELD(unsigned char, preempt_count),
-				FIELD(int, pid),
-				FIELD(int, lock_depth));
+			"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+			"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+			"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+			"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+			"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
+			"\n",
+			FIELD(unsigned short, type),
+			FIELD(unsigned char, flags),
+			FIELD(unsigned char, preempt_count),
+			FIELD(int, pid),
+			FIELD(int, lock_depth));
 }
 
 static ssize_t
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 9753fcc..31da218 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -66,44 +66,47 @@
 #undef __field
 #define __field(type, item)						\
 	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
-			       "offset:%zu;\tsize:%zu;\n",		\
+			       "offset:%zu;\tsize:%zu;\tsigned:%u;\n",	\
 			       offsetof(typeof(field), item),		\
-			       sizeof(field.item));			\
+			       sizeof(field.item), is_signed_type(type)); \
 	if (!ret)							\
 		return 0;
 
 #undef __field_desc
 #define __field_desc(type, container, item)				\
 	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
-			       "offset:%zu;\tsize:%zu;\n",		\
+			       "offset:%zu;\tsize:%zu;\tsigned:%u;\n",	\
 			       offsetof(typeof(field), container.item),	\
-			       sizeof(field.container.item));		\
+			       sizeof(field.container.item),		\
+			       is_signed_type(type));			\
 	if (!ret)							\
 		return 0;
 
 #undef __array
 #define __array(type, item, len)					\
 	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
-			       "offset:%zu;\tsize:%zu;\n",		\
-			       offsetof(typeof(field), item),	\
-			       sizeof(field.item));		\
+			       "offset:%zu;\tsize:%zu;\tsigned:%u;\n",	\
+			       offsetof(typeof(field), item),		\
+			       sizeof(field.item), is_signed_type(type)); \
 	if (!ret)							\
 		return 0;
 
 #undef __array_desc
 #define __array_desc(type, container, item, len)			\
 	ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
-			       "offset:%zu;\tsize:%zu;\n",		\
+			       "offset:%zu;\tsize:%zu;\tsigned:%u;\n",	\
 			       offsetof(typeof(field), container.item),	\
-			       sizeof(field.container.item));		\
+			       sizeof(field.container.item),		\
+			       is_signed_type(type));			\
 	if (!ret)							\
 		return 0;
 
 #undef __dynamic_array
 #define __dynamic_array(type, item)					\
 	ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t"	\
-			       "offset:%zu;\tsize:0;\n",		\
-			       offsetof(typeof(field), item));		\
+			       "offset:%zu;\tsize:0;\tsigned:%u;\n",	\
+			       offsetof(typeof(field), item),		\
+			       is_signed_type(type));			\
 	if (!ret)							\
 		return 0;
 
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 527e17e..d99abc4 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -103,7 +103,8 @@
 #define SYSCALL_FIELD(type, name)					\
 	sizeof(type) != sizeof(trace.name) ?				\
 		__bad_type_size() :					\
-		#type, #name, offsetof(typeof(trace), name), sizeof(trace.name)
+		#type, #name, offsetof(typeof(trace), name),		\
+		sizeof(trace.name), is_signed_type(type)
 
 int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
 {
@@ -120,7 +121,8 @@
 	if (!entry)
 		return 0;
 
-	ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
+	ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
+			       "\tsigned:%u;\n",
 			       SYSCALL_FIELD(int, nr));
 	if (!ret)
 		return 0;
@@ -130,8 +132,10 @@
 				        entry->args[i]);
 		if (!ret)
 			return 0;
-		ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset,
-				       sizeof(unsigned long));
+		ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
+				       "\tsigned:%u;\n", offset,
+				       sizeof(unsigned long),
+				       is_signed_type(unsigned long));
 		if (!ret)
 			return 0;
 		offset += sizeof(unsigned long);
@@ -163,8 +167,10 @@
 	struct syscall_trace_exit trace;
 
 	ret = trace_seq_printf(s,
-			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
-			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
+			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
+			       "\tsigned:%u;\n"
+			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
+			       "\tsigned:%u;\n",
 			       SYSCALL_FIELD(int, nr),
 			       SYSCALL_FIELD(long, ret));
 	if (!ret)
@@ -212,7 +218,7 @@
 	if (ret)
 		return ret;
 
-	ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0,
+	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
 				 FILTER_OTHER);
 
 	return ret;
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 742a32e..106c150 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -330,6 +330,7 @@
 LIB_H += ../../include/linux/list.h
 LIB_H += util/include/linux/list.h
 LIB_H += perf.h
+LIB_H += util/event.h
 LIB_H += util/types.h
 LIB_H += util/levenshtein.h
 LIB_H += util/parse-options.h
@@ -343,9 +344,12 @@
 LIB_H += util/run-command.h
 LIB_H += util/sigchain.h
 LIB_H += util/symbol.h
-LIB_H += util/module.h
 LIB_H += util/color.h
 LIB_H += util/values.h
+LIB_H += util/sort.h
+LIB_H += util/hist.h
+LIB_H += util/thread.h
+LIB_H += util/data_map.h
 
 LIB_OBJS += util/abspath.o
 LIB_OBJS += util/alias.o
@@ -368,7 +372,6 @@
 LIB_OBJS += util/wrapper.o
 LIB_OBJS += util/sigchain.o
 LIB_OBJS += util/symbol.o
-LIB_OBJS += util/module.o
 LIB_OBJS += util/color.o
 LIB_OBJS += util/pager.o
 LIB_OBJS += util/header.o
@@ -381,6 +384,9 @@
 LIB_OBJS += util/trace-event-read.o
 LIB_OBJS += util/trace-event-info.o
 LIB_OBJS += util/svghelper.o
+LIB_OBJS += util/sort.o
+LIB_OBJS += util/hist.o
+LIB_OBJS += util/data_map.o
 
 BUILTIN_OBJS += builtin-annotate.o
 BUILTIN_OBJS += builtin-help.o
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 1ec7416..8c84320 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -22,15 +22,13 @@
 #include "util/parse-options.h"
 #include "util/parse-events.h"
 #include "util/thread.h"
+#include "util/sort.h"
+#include "util/hist.h"
 
 static char		const *input_name = "perf.data";
 
-static char		default_sort_order[] = "comm,symbol";
-static char		*sort_order = default_sort_order;
-
 static int		force;
 static int		input;
-static int		show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
 
 static int		full_paths;
 
@@ -49,248 +47,6 @@
 	char		*path;
 };
 
-/*
- * histogram, sorted on item, collects counts
- */
-
-static struct rb_root hist;
-
-struct hist_entry {
-	struct rb_node	 rb_node;
-
-	struct thread	 *thread;
-	struct map	 *map;
-	struct dso	 *dso;
-	struct symbol	 *sym;
-	u64	 ip;
-	char		 level;
-
-	uint32_t	 count;
-};
-
-/*
- * configurable sorting bits
- */
-
-struct sort_entry {
-	struct list_head list;
-
-	const char *header;
-
-	int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
-	int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
-	size_t	(*print)(FILE *fp, struct hist_entry *);
-};
-
-/* --sort pid */
-
-static int64_t
-sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	return right->thread->pid - left->thread->pid;
-}
-
-static size_t
-sort__thread_print(FILE *fp, struct hist_entry *self)
-{
-	return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
-}
-
-static struct sort_entry sort_thread = {
-	.header = "         Command:  Pid",
-	.cmp	= sort__thread_cmp,
-	.print	= sort__thread_print,
-};
-
-/* --sort comm */
-
-static int64_t
-sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	return right->thread->pid - left->thread->pid;
-}
-
-static int64_t
-sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
-{
-	char *comm_l = left->thread->comm;
-	char *comm_r = right->thread->comm;
-
-	if (!comm_l || !comm_r) {
-		if (!comm_l && !comm_r)
-			return 0;
-		else if (!comm_l)
-			return -1;
-		else
-			return 1;
-	}
-
-	return strcmp(comm_l, comm_r);
-}
-
-static size_t
-sort__comm_print(FILE *fp, struct hist_entry *self)
-{
-	return fprintf(fp, "%16s", self->thread->comm);
-}
-
-static struct sort_entry sort_comm = {
-	.header		= "         Command",
-	.cmp		= sort__comm_cmp,
-	.collapse	= sort__comm_collapse,
-	.print		= sort__comm_print,
-};
-
-/* --sort dso */
-
-static int64_t
-sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	struct dso *dso_l = left->dso;
-	struct dso *dso_r = right->dso;
-
-	if (!dso_l || !dso_r) {
-		if (!dso_l && !dso_r)
-			return 0;
-		else if (!dso_l)
-			return -1;
-		else
-			return 1;
-	}
-
-	return strcmp(dso_l->name, dso_r->name);
-}
-
-static size_t
-sort__dso_print(FILE *fp, struct hist_entry *self)
-{
-	if (self->dso)
-		return fprintf(fp, "%-25s", self->dso->name);
-
-	return fprintf(fp, "%016llx         ", (u64)self->ip);
-}
-
-static struct sort_entry sort_dso = {
-	.header = "Shared Object            ",
-	.cmp	= sort__dso_cmp,
-	.print	= sort__dso_print,
-};
-
-/* --sort symbol */
-
-static int64_t
-sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	u64 ip_l, ip_r;
-
-	if (left->sym == right->sym)
-		return 0;
-
-	ip_l = left->sym ? left->sym->start : left->ip;
-	ip_r = right->sym ? right->sym->start : right->ip;
-
-	return (int64_t)(ip_r - ip_l);
-}
-
-static size_t
-sort__sym_print(FILE *fp, struct hist_entry *self)
-{
-	size_t ret = 0;
-
-	if (verbose)
-		ret += fprintf(fp, "%#018llx  ", (u64)self->ip);
-
-	if (self->sym) {
-		ret += fprintf(fp, "[%c] %s",
-			self->dso == kernel_dso ? 'k' : '.', self->sym->name);
-	} else {
-		ret += fprintf(fp, "%#016llx", (u64)self->ip);
-	}
-
-	return ret;
-}
-
-static struct sort_entry sort_sym = {
-	.header = "Symbol",
-	.cmp	= sort__sym_cmp,
-	.print	= sort__sym_print,
-};
-
-static int sort__need_collapse = 0;
-
-struct sort_dimension {
-	const char		*name;
-	struct sort_entry	*entry;
-	int			taken;
-};
-
-static struct sort_dimension sort_dimensions[] = {
-	{ .name = "pid",	.entry = &sort_thread,	},
-	{ .name = "comm",	.entry = &sort_comm,	},
-	{ .name = "dso",	.entry = &sort_dso,	},
-	{ .name = "symbol",	.entry = &sort_sym,	},
-};
-
-static LIST_HEAD(hist_entry__sort_list);
-
-static int sort_dimension__add(char *tok)
-{
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
-		struct sort_dimension *sd = &sort_dimensions[i];
-
-		if (sd->taken)
-			continue;
-
-		if (strncasecmp(tok, sd->name, strlen(tok)))
-			continue;
-
-		if (sd->entry->collapse)
-			sort__need_collapse = 1;
-
-		list_add_tail(&sd->entry->list, &hist_entry__sort_list);
-		sd->taken = 1;
-
-		return 0;
-	}
-
-	return -ESRCH;
-}
-
-static int64_t
-hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	struct sort_entry *se;
-	int64_t cmp = 0;
-
-	list_for_each_entry(se, &hist_entry__sort_list, list) {
-		cmp = se->cmp(left, right);
-		if (cmp)
-			break;
-	}
-
-	return cmp;
-}
-
-static int64_t
-hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
-{
-	struct sort_entry *se;
-	int64_t cmp = 0;
-
-	list_for_each_entry(se, &hist_entry__sort_list, list) {
-		int64_t (*f)(struct hist_entry *, struct hist_entry *);
-
-		f = se->collapse ?: se->cmp;
-
-		cmp = f(left, right);
-		if (cmp)
-			break;
-	}
-
-	return cmp;
-}
 
 /*
  * collect histogram counts
@@ -306,6 +62,7 @@
 		return;
 
 	sym_size = sym->end - sym->start;
+	ip = he->map->map_ip(he->map, ip);
 	offset = ip - sym->start;
 
 	if (offset >= sym_size)
@@ -322,171 +79,27 @@
 			sym->hist[offset]);
 }
 
-static int
-hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
-		struct symbol *sym, u64 ip, char level)
+static int hist_entry__add(struct thread *thread, struct map *map,
+			   struct symbol *sym, u64 ip, u64 count, char level)
 {
-	struct rb_node **p = &hist.rb_node;
-	struct rb_node *parent = NULL;
-	struct hist_entry *he;
-	struct hist_entry entry = {
-		.thread	= thread,
-		.map	= map,
-		.dso	= dso,
-		.sym	= sym,
-		.ip	= ip,
-		.level	= level,
-		.count	= 1,
-	};
-	int cmp;
-
-	while (*p != NULL) {
-		parent = *p;
-		he = rb_entry(parent, struct hist_entry, rb_node);
-
-		cmp = hist_entry__cmp(&entry, he);
-
-		if (!cmp) {
-			hist_hit(he, ip);
-
-			return 0;
-		}
-
-		if (cmp < 0)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	he = malloc(sizeof(*he));
-	if (!he)
+	bool hit;
+	struct hist_entry *he = __hist_entry__add(thread, map, sym, NULL, ip,
+						  count, level, &hit);
+	if (he == NULL)
 		return -ENOMEM;
-	*he = entry;
-	rb_link_node(&he->rb_node, parent, p);
-	rb_insert_color(&he->rb_node, &hist);
-
+	if (hit)
+		hist_hit(he, ip);
 	return 0;
 }
 
-static void hist_entry__free(struct hist_entry *he)
-{
-	free(he);
-}
-
-/*
- * collapse the histogram
- */
-
-static struct rb_root collapse_hists;
-
-static void collapse__insert_entry(struct hist_entry *he)
-{
-	struct rb_node **p = &collapse_hists.rb_node;
-	struct rb_node *parent = NULL;
-	struct hist_entry *iter;
-	int64_t cmp;
-
-	while (*p != NULL) {
-		parent = *p;
-		iter = rb_entry(parent, struct hist_entry, rb_node);
-
-		cmp = hist_entry__collapse(iter, he);
-
-		if (!cmp) {
-			iter->count += he->count;
-			hist_entry__free(he);
-			return;
-		}
-
-		if (cmp < 0)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	rb_link_node(&he->rb_node, parent, p);
-	rb_insert_color(&he->rb_node, &collapse_hists);
-}
-
-static void collapse__resort(void)
-{
-	struct rb_node *next;
-	struct hist_entry *n;
-
-	if (!sort__need_collapse)
-		return;
-
-	next = rb_first(&hist);
-	while (next) {
-		n = rb_entry(next, struct hist_entry, rb_node);
-		next = rb_next(&n->rb_node);
-
-		rb_erase(&n->rb_node, &hist);
-		collapse__insert_entry(n);
-	}
-}
-
-/*
- * reverse the map, sort on count.
- */
-
-static struct rb_root output_hists;
-
-static void output__insert_entry(struct hist_entry *he)
-{
-	struct rb_node **p = &output_hists.rb_node;
-	struct rb_node *parent = NULL;
-	struct hist_entry *iter;
-
-	while (*p != NULL) {
-		parent = *p;
-		iter = rb_entry(parent, struct hist_entry, rb_node);
-
-		if (he->count > iter->count)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	rb_link_node(&he->rb_node, parent, p);
-	rb_insert_color(&he->rb_node, &output_hists);
-}
-
-static void output__resort(void)
-{
-	struct rb_node *next;
-	struct hist_entry *n;
-	struct rb_root *tree = &hist;
-
-	if (sort__need_collapse)
-		tree = &collapse_hists;
-
-	next = rb_first(tree);
-
-	while (next) {
-		n = rb_entry(next, struct hist_entry, rb_node);
-		next = rb_next(&n->rb_node);
-
-		rb_erase(&n->rb_node, tree);
-		output__insert_entry(n);
-	}
-}
-
-static unsigned long total = 0,
-		     total_mmap = 0,
-		     total_comm = 0,
-		     total_fork = 0,
-		     total_unknown = 0;
-
 static int
 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
 {
 	char level;
-	int show = 0;
-	struct dso *dso = NULL;
 	struct thread *thread;
 	u64 ip = event->ip.ip;
 	struct map *map = NULL;
+	struct symbol *sym = NULL;
 
 	thread = threads__findnew(event->ip.pid, &threads, &last_match);
 
@@ -506,51 +119,44 @@
 	}
 
 	if (event->header.misc & PERF_RECORD_MISC_KERNEL) {
-		show = SHOW_KERNEL;
 		level = 'k';
-
-		dso = kernel_dso;
-
-		dump_printf(" ...... dso: %s\n", dso->name);
-
+		sym = kernel_maps__find_symbol(ip, &map);
+		dump_printf(" ...... dso: %s\n",
+			    map ? map->dso->long_name : "<not found>");
 	} else if (event->header.misc & PERF_RECORD_MISC_USER) {
-
-		show = SHOW_USER;
 		level = '.';
-
 		map = thread__find_map(thread, ip);
 		if (map != NULL) {
+got_map:
 			ip = map->map_ip(map, ip);
-			dso = map->dso;
+			sym = map->dso->find_symbol(map->dso, ip);
 		} else {
 			/*
 			 * If this is outside of all known maps,
 			 * and is a negative address, try to look it
 			 * up in the kernel dso, as it might be a
-			 * vsyscall (which executes in user-mode):
+			 * vsyscall or vdso (which executes in user-mode).
+			 *
+			 * XXX This is nasty, we should have a symbol list in
+			 * the "[vdso]" dso, but for now lets use the old
+			 * trick of looking in the whole kernel symbol list.
 			 */
-			if ((long long)ip < 0)
-				dso = kernel_dso;
+			if ((long long)ip < 0) {
+				map = kernel_map;
+				goto got_map;
+			}
 		}
-		dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
-
+		dump_printf(" ...... dso: %s\n",
+			    map ? map->dso->long_name : "<not found>");
 	} else {
-		show = SHOW_HV;
 		level = 'H';
 		dump_printf(" ...... dso: [hypervisor]\n");
 	}
 
-	if (show & show_mask) {
-		struct symbol *sym = NULL;
-
-		if (dso)
-			sym = dso->find_symbol(dso, ip);
-
-		if (hist_entry__add(thread, map, dso, sym, ip, level)) {
-			fprintf(stderr,
-		"problem incrementing symbol count, skipping event\n");
-			return -1;
-		}
+	if (hist_entry__add(thread, map, sym, ip, 1, level)) {
+		fprintf(stderr, "problem incrementing symbol count, "
+				"skipping event\n");
+		return -1;
 	}
 	total++;
 
@@ -666,7 +272,7 @@
 }
 
 static int
-parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
+parse_line(FILE *file, struct symbol *sym, u64 len)
 {
 	char *line = NULL, *tmp, *tmp2;
 	static const char *prev_line;
@@ -716,7 +322,7 @@
 		const char *color;
 		struct sym_ext *sym_ext = sym->priv;
 
-		offset = line_ip - start;
+		offset = line_ip - sym->start;
 		if (offset < len)
 			hits = sym->hist[offset];
 
@@ -795,7 +401,7 @@
 
 /* Get the filename:line for the colored entries */
 static void
-get_source_line(struct symbol *sym, u64 start, int len, const char *filename)
+get_source_line(struct symbol *sym, int len, const char *filename)
 {
 	int i;
 	char cmd[PATH_MAX * 2];
@@ -820,7 +426,7 @@
 		if (sym_ext[i].percent <= 0.5)
 			continue;
 
-		offset = start + i;
+		offset = sym->start + i;
 		sprintf(cmd, "addr2line -e %s %016llx", filename, offset);
 		fp = popen(cmd, "r");
 		if (!fp)
@@ -872,31 +478,23 @@
 
 static void annotate_sym(struct dso *dso, struct symbol *sym)
 {
-	const char *filename = dso->name, *d_filename;
-	u64 start, end, len;
+	const char *filename = dso->long_name, *d_filename;
+	u64 len;
 	char command[PATH_MAX*2];
 	FILE *file;
 
 	if (!filename)
 		return;
-	if (sym->module)
-		filename = sym->module->path;
-	else if (dso == kernel_dso)
-		filename = vmlinux_name;
 
-	start = sym->obj_start;
-	if (!start)
-		start = sym->start;
 	if (full_paths)
 		d_filename = filename;
 	else
 		d_filename = basename(filename);
 
-	end = start + sym->end - sym->start + 1;
 	len = sym->end - sym->start;
 
 	if (print_line) {
-		get_source_line(sym, start, len, filename);
+		get_source_line(sym, len, filename);
 		print_summary(filename);
 	}
 
@@ -905,10 +503,11 @@
 	printf("------------------------------------------------\n");
 
 	if (verbose >= 2)
-		printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name);
+		printf("annotating [%p] %30s : [%p] %30s\n",
+		       dso, dso->long_name, sym, sym->name);
 
 	sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
-			(u64)start, (u64)end, filename, filename);
+		sym->start, sym->end, filename, filename);
 
 	if (verbose >= 3)
 		printf("doing: %s\n", command);
@@ -918,7 +517,7 @@
 		return;
 
 	while (!feof(file)) {
-		if (parse_line(file, sym, start, len) < 0)
+		if (parse_line(file, sym, len) < 0)
 			break;
 	}
 
@@ -1059,14 +658,14 @@
 	if (dump_trace)
 		return 0;
 
-	if (verbose >= 3)
+	if (verbose > 3)
 		threads__fprintf(stdout, &threads);
 
-	if (verbose >= 2)
+	if (verbose > 2)
 		dsos__fprintf(stdout);
 
 	collapse__resort();
-	output__resort();
+	output__resort(total);
 
 	find_annotations();
 
@@ -1139,5 +738,11 @@
 
 	setup_pager();
 
+	if (field_sep && *field_sep == '.') {
+		fputs("'.' is the only non valid --field-separator argument\n",
+				stderr);
+		exit(129);
+	}
+
 	return __cmd_annotate();
 }
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3eeef33..4e3a374 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -17,7 +17,6 @@
 #include "util/header.h"
 #include "util/event.h"
 #include "util/debug.h"
-#include "util/trace-event.h"
 
 #include <unistd.h>
 #include <sched.h>
@@ -27,45 +26,45 @@
 
 static int			fd[MAX_NR_CPUS][MAX_COUNTERS];
 
-static long			default_interval		= 100000;
+static long			default_interval		=      0;
 
-static int			nr_cpus				= 0;
+static int			nr_cpus				=      0;
 static unsigned int		page_size;
-static unsigned int		mmap_pages			= 128;
-static int			freq				= 0;
+static unsigned int		mmap_pages			=    128;
+static int			freq				=   1000;
 static int			output;
 static const char		*output_name			= "perf.data";
-static int			group				= 0;
-static unsigned int		realtime_prio			= 0;
-static int			raw_samples			= 0;
-static int			system_wide			= 0;
-static int			profile_cpu			= -1;
-static pid_t			target_pid			= -1;
-static pid_t			child_pid			= -1;
-static int			inherit				= 1;
-static int			force				= 0;
-static int			append_file			= 0;
-static int			call_graph			= 0;
-static int			inherit_stat			= 0;
-static int			no_samples			= 0;
-static int			sample_address			= 0;
-static int			multiplex			= 0;
-static int			multiplex_fd			= -1;
+static int			group				=      0;
+static unsigned int		realtime_prio			=      0;
+static int			raw_samples			=      0;
+static int			system_wide			=      0;
+static int			profile_cpu			=     -1;
+static pid_t			target_pid			=     -1;
+static pid_t			child_pid			=     -1;
+static int			inherit				=      1;
+static int			force				=      0;
+static int			append_file			=      0;
+static int			call_graph			=      0;
+static int			inherit_stat			=      0;
+static int			no_samples			=      0;
+static int			sample_address			=      0;
+static int			multiplex			=      0;
+static int			multiplex_fd			=     -1;
 
-static long			samples;
+static long			samples				=      0;
 static struct timeval		last_read;
 static struct timeval		this_read;
 
-static u64			bytes_written;
+static u64			bytes_written			=      0;
 
 static struct pollfd		event_array[MAX_NR_CPUS * MAX_COUNTERS];
 
-static int			nr_poll;
-static int			nr_cpu;
+static int			nr_poll				=      0;
+static int			nr_cpu				=      0;
 
-static int			file_new = 1;
+static int			file_new			=      1;
 
-struct perf_header		*header;
+struct perf_header		*header				=   NULL;
 
 struct mmap_data {
 	int			counter;
@@ -566,17 +565,17 @@
 	else
 		header = perf_header__new();
 
-
 	if (raw_samples) {
-		read_tracing_data(attrs, nr_counters);
+		perf_header__set_trace_info();
 	} else {
 		for (i = 0; i < nr_counters; i++) {
 			if (attrs[i].sample_type & PERF_SAMPLE_RAW) {
-				read_tracing_data(attrs, nr_counters);
+				perf_header__set_trace_info();
 				break;
 			}
 		}
 	}
+
 	atexit(atexit_header);
 
 	if (!system_wide) {
@@ -731,6 +730,18 @@
 		attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
 	}
 
+	/*
+	 * User specified count overrides default frequency.
+	 */
+	if (default_interval)
+		freq = 0;
+	else if (freq) {
+		default_interval = freq;
+	} else {
+		fprintf(stderr, "frequency and count are zero, aborting\n");
+		exit(EXIT_FAILURE);
+	}
+
 	for (counter = 0; counter < nr_counters; counter++) {
 		if (attrs[counter].sample_period)
 			continue;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 19669c2..f57a23b 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -26,20 +26,18 @@
 #include "util/parse-options.h"
 #include "util/parse-events.h"
 
+#include "util/data_map.h"
 #include "util/thread.h"
+#include "util/sort.h"
+#include "util/hist.h"
 
 static char		const *input_name = "perf.data";
 
-static char		default_sort_order[] = "comm,dso,symbol";
-static char		*sort_order = default_sort_order;
 static char		*dso_list_str, *comm_list_str, *sym_list_str,
 			*col_width_list_str;
 static struct strlist	*dso_list, *comm_list, *sym_list;
-static char		*field_sep;
 
 static int		force;
-static int		input;
-static int		show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
 
 static int		full_paths;
 static int		show_nr_samples;
@@ -50,21 +48,11 @@
 static char		default_pretty_printing_style[] = "normal";
 static char		*pretty_printing_style = default_pretty_printing_style;
 
-static unsigned long	page_size;
-static unsigned long	mmap_window = 32;
-
-static char		default_parent_pattern[] = "^sys_|^do_page_fault";
-static char		*parent_pattern = default_parent_pattern;
-static regex_t		parent_regex;
-
 static int		exclude_other = 1;
 
 static char		callchain_default_opt[] = "fractal,0.5";
 
-static int		callchain;
-
-static char		__cwd[PATH_MAX];
-static char		*cwd = __cwd;
+static char		*cwd;
 static int		cwdlen;
 
 static struct rb_root	threads;
@@ -72,346 +60,8 @@
 
 static struct perf_header *header;
 
-static
-struct callchain_param	callchain_param = {
-	.mode	= CHAIN_GRAPH_REL,
-	.min_percent = 0.5
-};
-
 static u64		sample_type;
 
-static int repsep_fprintf(FILE *fp, const char *fmt, ...)
-{
-	int n;
-	va_list ap;
-
-	va_start(ap, fmt);
-	if (!field_sep)
-		n = vfprintf(fp, fmt, ap);
-	else {
-		char *bf = NULL;
-		n = vasprintf(&bf, fmt, ap);
-		if (n > 0) {
-			char *sep = bf;
-
-			while (1) {
-				sep = strchr(sep, *field_sep);
-				if (sep == NULL)
-					break;
-				*sep = '.';
-			}
-		}
-		fputs(bf, fp);
-		free(bf);
-	}
-	va_end(ap);
-	return n;
-}
-
-static unsigned int dsos__col_width,
-		    comms__col_width,
-		    threads__col_width;
-
-/*
- * histogram, sorted on item, collects counts
- */
-
-static struct rb_root hist;
-
-struct hist_entry {
-	struct rb_node		rb_node;
-
-	struct thread		*thread;
-	struct map		*map;
-	struct dso		*dso;
-	struct symbol		*sym;
-	struct symbol		*parent;
-	u64			ip;
-	char			level;
-	struct callchain_node	callchain;
-	struct rb_root		sorted_chain;
-
-	u64			count;
-};
-
-/*
- * configurable sorting bits
- */
-
-struct sort_entry {
-	struct list_head list;
-
-	const char *header;
-
-	int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
-	int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
-	size_t	(*print)(FILE *fp, struct hist_entry *, unsigned int width);
-	unsigned int *width;
-	bool	elide;
-};
-
-static int64_t cmp_null(void *l, void *r)
-{
-	if (!l && !r)
-		return 0;
-	else if (!l)
-		return -1;
-	else
-		return 1;
-}
-
-/* --sort pid */
-
-static int64_t
-sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	return right->thread->pid - left->thread->pid;
-}
-
-static size_t
-sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
-{
-	return repsep_fprintf(fp, "%*s:%5d", width - 6,
-			      self->thread->comm ?: "", self->thread->pid);
-}
-
-static struct sort_entry sort_thread = {
-	.header = "Command:  Pid",
-	.cmp	= sort__thread_cmp,
-	.print	= sort__thread_print,
-	.width	= &threads__col_width,
-};
-
-/* --sort comm */
-
-static int64_t
-sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	return right->thread->pid - left->thread->pid;
-}
-
-static int64_t
-sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
-{
-	char *comm_l = left->thread->comm;
-	char *comm_r = right->thread->comm;
-
-	if (!comm_l || !comm_r)
-		return cmp_null(comm_l, comm_r);
-
-	return strcmp(comm_l, comm_r);
-}
-
-static size_t
-sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
-{
-	return repsep_fprintf(fp, "%*s", width, self->thread->comm);
-}
-
-static struct sort_entry sort_comm = {
-	.header		= "Command",
-	.cmp		= sort__comm_cmp,
-	.collapse	= sort__comm_collapse,
-	.print		= sort__comm_print,
-	.width		= &comms__col_width,
-};
-
-/* --sort dso */
-
-static int64_t
-sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	struct dso *dso_l = left->dso;
-	struct dso *dso_r = right->dso;
-
-	if (!dso_l || !dso_r)
-		return cmp_null(dso_l, dso_r);
-
-	return strcmp(dso_l->name, dso_r->name);
-}
-
-static size_t
-sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
-{
-	if (self->dso)
-		return repsep_fprintf(fp, "%-*s", width, self->dso->name);
-
-	return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
-}
-
-static struct sort_entry sort_dso = {
-	.header = "Shared Object",
-	.cmp	= sort__dso_cmp,
-	.print	= sort__dso_print,
-	.width	= &dsos__col_width,
-};
-
-/* --sort symbol */
-
-static int64_t
-sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	u64 ip_l, ip_r;
-
-	if (left->sym == right->sym)
-		return 0;
-
-	ip_l = left->sym ? left->sym->start : left->ip;
-	ip_r = right->sym ? right->sym->start : right->ip;
-
-	return (int64_t)(ip_r - ip_l);
-}
-
-static size_t
-sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
-{
-	size_t ret = 0;
-
-	if (verbose)
-		ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip,
-				      dso__symtab_origin(self->dso));
-
-	ret += repsep_fprintf(fp, "[%c] ", self->level);
-	if (self->sym) {
-		ret += repsep_fprintf(fp, "%s", self->sym->name);
-
-		if (self->sym->module)
-			ret += repsep_fprintf(fp, "\t[%s]",
-					     self->sym->module->name);
-	} else {
-		ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
-	}
-
-	return ret;
-}
-
-static struct sort_entry sort_sym = {
-	.header = "Symbol",
-	.cmp	= sort__sym_cmp,
-	.print	= sort__sym_print,
-};
-
-/* --sort parent */
-
-static int64_t
-sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	struct symbol *sym_l = left->parent;
-	struct symbol *sym_r = right->parent;
-
-	if (!sym_l || !sym_r)
-		return cmp_null(sym_l, sym_r);
-
-	return strcmp(sym_l->name, sym_r->name);
-}
-
-static size_t
-sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
-{
-	return repsep_fprintf(fp, "%-*s", width,
-			      self->parent ? self->parent->name : "[other]");
-}
-
-static unsigned int parent_symbol__col_width;
-
-static struct sort_entry sort_parent = {
-	.header = "Parent symbol",
-	.cmp	= sort__parent_cmp,
-	.print	= sort__parent_print,
-	.width	= &parent_symbol__col_width,
-};
-
-static int sort__need_collapse = 0;
-static int sort__has_parent = 0;
-
-struct sort_dimension {
-	const char		*name;
-	struct sort_entry	*entry;
-	int			taken;
-};
-
-static struct sort_dimension sort_dimensions[] = {
-	{ .name = "pid",	.entry = &sort_thread,	},
-	{ .name = "comm",	.entry = &sort_comm,	},
-	{ .name = "dso",	.entry = &sort_dso,	},
-	{ .name = "symbol",	.entry = &sort_sym,	},
-	{ .name = "parent",	.entry = &sort_parent,	},
-};
-
-static LIST_HEAD(hist_entry__sort_list);
-
-static int sort_dimension__add(const char *tok)
-{
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
-		struct sort_dimension *sd = &sort_dimensions[i];
-
-		if (sd->taken)
-			continue;
-
-		if (strncasecmp(tok, sd->name, strlen(tok)))
-			continue;
-
-		if (sd->entry->collapse)
-			sort__need_collapse = 1;
-
-		if (sd->entry == &sort_parent) {
-			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
-			if (ret) {
-				char err[BUFSIZ];
-
-				regerror(ret, &parent_regex, err, sizeof(err));
-				fprintf(stderr, "Invalid regex: %s\n%s",
-					parent_pattern, err);
-				exit(-1);
-			}
-			sort__has_parent = 1;
-		}
-
-		list_add_tail(&sd->entry->list, &hist_entry__sort_list);
-		sd->taken = 1;
-
-		return 0;
-	}
-
-	return -ESRCH;
-}
-
-static int64_t
-hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
-{
-	struct sort_entry *se;
-	int64_t cmp = 0;
-
-	list_for_each_entry(se, &hist_entry__sort_list, list) {
-		cmp = se->cmp(left, right);
-		if (cmp)
-			break;
-	}
-
-	return cmp;
-}
-
-static int64_t
-hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
-{
-	struct sort_entry *se;
-	int64_t cmp = 0;
-
-	list_for_each_entry(se, &hist_entry__sort_list, list) {
-		int64_t (*f)(struct hist_entry *, struct hist_entry *);
-
-		f = se->collapse ?: se->cmp;
-
-		cmp = f(left, right);
-		if (cmp)
-			break;
-	}
-
-	return cmp;
-}
-
 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
 {
 	int i;
@@ -610,7 +260,6 @@
 	return ret;
 }
 
-
 static size_t
 hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
 {
@@ -695,22 +344,17 @@
 
 
 static struct symbol *
-resolve_symbol(struct thread *thread, struct map **mapp,
-	       struct dso **dsop, u64 *ipp)
+resolve_symbol(struct thread *thread, struct map **mapp, u64 *ipp)
 {
-	struct dso *dso = dsop ? *dsop : NULL;
 	struct map *map = mapp ? *mapp : NULL;
 	u64 ip = *ipp;
 
-	if (!thread)
-		return NULL;
-
-	if (dso)
-		goto got_dso;
-
 	if (map)
 		goto got_map;
 
+	if (!thread)
+		return NULL;
+
 	map = thread__find_map(thread, ip);
 	if (map != NULL) {
 		/*
@@ -725,29 +369,26 @@
 			*mapp = map;
 got_map:
 		ip = map->map_ip(map, ip);
-
-		dso = map->dso;
 	} else {
 		/*
 		 * If this is outside of all known maps,
 		 * and is a negative address, try to look it
 		 * up in the kernel dso, as it might be a
-		 * vsyscall (which executes in user-mode):
+		 * vsyscall or vdso (which executes in user-mode).
+		 *
+		 * XXX This is nasty, we should have a symbol list in
+		 * the "[vdso]" dso, but for now lets use the old
+		 * trick of looking in the whole kernel symbol list.
 		 */
 		if ((long long)ip < 0)
-		dso = kernel_dso;
+			return kernel_maps__find_symbol(ip, mapp);
 	}
-	dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
+	dump_printf(" ...... dso: %s\n",
+		    map ? map->dso->long_name : "<not found>");
 	dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
 	*ipp  = ip;
 
-	if (dsop)
-		*dsop = dso;
-
-	if (!dso)
-		return NULL;
-got_dso:
-	return dso->find_symbol(dso, ip);
+	return map ? map->dso->find_symbol(map->dso, ip) : NULL;
 }
 
 static int call__match(struct symbol *sym)
@@ -758,9 +399,9 @@
 	return 0;
 }
 
-static struct symbol **
-resolve_callchain(struct thread *thread, struct map *map __used,
-		    struct ip_callchain *chain, struct hist_entry *entry)
+static struct symbol **resolve_callchain(struct thread *thread, struct map *map,
+					 struct ip_callchain *chain,
+					 struct symbol **parent)
 {
 	u64 context = PERF_CONTEXT_MAX;
 	struct symbol **syms = NULL;
@@ -776,8 +417,7 @@
 
 	for (i = 0; i < chain->nr; i++) {
 		u64 ip = chain->ips[i];
-		struct dso *dso = NULL;
-		struct symbol *sym;
+		struct symbol *sym = NULL;
 
 		if (ip >= PERF_CONTEXT_MAX) {
 			context = ip;
@@ -786,21 +426,18 @@
 
 		switch (context) {
 		case PERF_CONTEXT_HV:
-			dso = hypervisor_dso;
 			break;
 		case PERF_CONTEXT_KERNEL:
-			dso = kernel_dso;
+			sym = kernel_maps__find_symbol(ip, &map);
 			break;
 		default:
+			sym = resolve_symbol(thread, &map, &ip);
 			break;
 		}
 
-		sym = resolve_symbol(thread, NULL, &dso, &ip);
-
 		if (sym) {
-			if (sort__has_parent && call__match(sym) &&
-			    !entry->parent)
-				entry->parent = sym;
+			if (sort__has_parent && !*parent && call__match(sym))
+				*parent = sym;
 			if (!callchain)
 				break;
 			syms[i] = sym;
@@ -815,177 +452,35 @@
  */
 
 static int
-hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
+hist_entry__add(struct thread *thread, struct map *map,
 		struct symbol *sym, u64 ip, struct ip_callchain *chain,
 		char level, u64 count)
 {
-	struct rb_node **p = &hist.rb_node;
-	struct rb_node *parent = NULL;
+	struct symbol **syms = NULL, *parent = NULL;
+	bool hit;
 	struct hist_entry *he;
-	struct symbol **syms = NULL;
-	struct hist_entry entry = {
-		.thread	= thread,
-		.map	= map,
-		.dso	= dso,
-		.sym	= sym,
-		.ip	= ip,
-		.level	= level,
-		.count	= count,
-		.parent = NULL,
-		.sorted_chain = RB_ROOT
-	};
-	int cmp;
 
 	if ((sort__has_parent || callchain) && chain)
-		syms = resolve_callchain(thread, map, chain, &entry);
+		syms = resolve_callchain(thread, map, chain, &parent);
 
-	while (*p != NULL) {
-		parent = *p;
-		he = rb_entry(parent, struct hist_entry, rb_node);
-
-		cmp = hist_entry__cmp(&entry, he);
-
-		if (!cmp) {
-			he->count += count;
-			if (callchain) {
-				append_chain(&he->callchain, chain, syms);
-				free(syms);
-			}
-			return 0;
-		}
-
-		if (cmp < 0)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	he = malloc(sizeof(*he));
-	if (!he)
+	he = __hist_entry__add(thread, map, sym, parent,
+			       ip, count, level, &hit);
+	if (he == NULL)
 		return -ENOMEM;
-	*he = entry;
+
+	if (hit)
+		he->count += count;
+
 	if (callchain) {
-		callchain_init(&he->callchain);
+		if (!hit)
+			callchain_init(&he->callchain);
 		append_chain(&he->callchain, chain, syms);
 		free(syms);
 	}
-	rb_link_node(&he->rb_node, parent, p);
-	rb_insert_color(&he->rb_node, &hist);
 
 	return 0;
 }
 
-static void hist_entry__free(struct hist_entry *he)
-{
-	free(he);
-}
-
-/*
- * collapse the histogram
- */
-
-static struct rb_root collapse_hists;
-
-static void collapse__insert_entry(struct hist_entry *he)
-{
-	struct rb_node **p = &collapse_hists.rb_node;
-	struct rb_node *parent = NULL;
-	struct hist_entry *iter;
-	int64_t cmp;
-
-	while (*p != NULL) {
-		parent = *p;
-		iter = rb_entry(parent, struct hist_entry, rb_node);
-
-		cmp = hist_entry__collapse(iter, he);
-
-		if (!cmp) {
-			iter->count += he->count;
-			hist_entry__free(he);
-			return;
-		}
-
-		if (cmp < 0)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	rb_link_node(&he->rb_node, parent, p);
-	rb_insert_color(&he->rb_node, &collapse_hists);
-}
-
-static void collapse__resort(void)
-{
-	struct rb_node *next;
-	struct hist_entry *n;
-
-	if (!sort__need_collapse)
-		return;
-
-	next = rb_first(&hist);
-	while (next) {
-		n = rb_entry(next, struct hist_entry, rb_node);
-		next = rb_next(&n->rb_node);
-
-		rb_erase(&n->rb_node, &hist);
-		collapse__insert_entry(n);
-	}
-}
-
-/*
- * reverse the map, sort on count.
- */
-
-static struct rb_root output_hists;
-
-static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
-{
-	struct rb_node **p = &output_hists.rb_node;
-	struct rb_node *parent = NULL;
-	struct hist_entry *iter;
-
-	if (callchain)
-		callchain_param.sort(&he->sorted_chain, &he->callchain,
-				      min_callchain_hits, &callchain_param);
-
-	while (*p != NULL) {
-		parent = *p;
-		iter = rb_entry(parent, struct hist_entry, rb_node);
-
-		if (he->count > iter->count)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	rb_link_node(&he->rb_node, parent, p);
-	rb_insert_color(&he->rb_node, &output_hists);
-}
-
-static void output__resort(u64 total_samples)
-{
-	struct rb_node *next;
-	struct hist_entry *n;
-	struct rb_root *tree = &hist;
-	u64 min_callchain_hits;
-
-	min_callchain_hits = total_samples * (callchain_param.min_percent / 100);
-
-	if (sort__need_collapse)
-		tree = &collapse_hists;
-
-	next = rb_first(tree);
-
-	while (next) {
-		n = rb_entry(next, struct hist_entry, rb_node);
-		next = rb_next(&n->rb_node);
-
-		rb_erase(&n->rb_node, tree);
-		output__insert_entry(n, min_callchain_hits);
-	}
-}
-
 static size_t output__fprintf(FILE *fp, u64 total_samples)
 {
 	struct hist_entry *pos;
@@ -1080,13 +575,6 @@
 	return ret;
 }
 
-static unsigned long total = 0,
-		     total_mmap = 0,
-		     total_comm = 0,
-		     total_fork = 0,
-		     total_unknown = 0,
-		     total_lost = 0;
-
 static int validate_chain(struct ip_callchain *chain, event_t *event)
 {
 	unsigned int chain_size;
@@ -1104,8 +592,7 @@
 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
 {
 	char level;
-	int show = 0;
-	struct dso *dso = NULL;
+	struct symbol *sym = NULL;
 	struct thread *thread;
 	u64 ip = event->ip.ip;
 	u64 period = 1;
@@ -1161,42 +648,35 @@
 	cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
 	if (cpumode == PERF_RECORD_MISC_KERNEL) {
-		show = SHOW_KERNEL;
 		level = 'k';
-
-		dso = kernel_dso;
-
-		dump_printf(" ...... dso: %s\n", dso->name);
-
+		sym = kernel_maps__find_symbol(ip, &map);
+		dump_printf(" ...... dso: %s\n",
+			    map ? map->dso->long_name : "<not found>");
 	} else if (cpumode == PERF_RECORD_MISC_USER) {
-
-		show = SHOW_USER;
 		level = '.';
+		sym = resolve_symbol(thread, &map, &ip);
 
 	} else {
-		show = SHOW_HV;
 		level = 'H';
-
-		dso = hypervisor_dso;
-
 		dump_printf(" ...... dso: [hypervisor]\n");
 	}
 
-	if (show & show_mask) {
-		struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
+	if (dso_list &&
+	    (!map || !map->dso ||
+	     !(strlist__has_entry(dso_list, map->dso->short_name) ||
+	       (map->dso->short_name != map->dso->long_name &&
+		strlist__has_entry(dso_list, map->dso->long_name)))))
+		return 0;
 
-		if (dso_list && (!dso || !dso->name ||
-				 !strlist__has_entry(dso_list, dso->name)))
-			return 0;
+	if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
+		return 0;
 
-		if (sym_list && (!sym || !strlist__has_entry(sym_list, sym->name)))
-			return 0;
-
-		if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
-			eprintf("problem incrementing symbol count, skipping event\n");
-			return -1;
-		}
+	if (hist_entry__add(thread, map, sym, ip,
+			    chain, level, period)) {
+		eprintf("problem incrementing symbol count, skipping event\n");
+		return -1;
 	}
+
 	total += period;
 
 	return 0;
@@ -1331,56 +811,51 @@
 	return 0;
 }
 
-static int
-process_event(event_t *event, unsigned long offset, unsigned long head)
+static int sample_type_check(u64 type)
 {
-	trace_event(event);
+	sample_type = type;
 
-	switch (event->header.type) {
-	case PERF_RECORD_SAMPLE:
-		return process_sample_event(event, offset, head);
-
-	case PERF_RECORD_MMAP:
-		return process_mmap_event(event, offset, head);
-
-	case PERF_RECORD_COMM:
-		return process_comm_event(event, offset, head);
-
-	case PERF_RECORD_FORK:
-	case PERF_RECORD_EXIT:
-		return process_task_event(event, offset, head);
-
-	case PERF_RECORD_LOST:
-		return process_lost_event(event, offset, head);
-
-	case PERF_RECORD_READ:
-		return process_read_event(event, offset, head);
-
-	/*
-	 * We dont process them right now but they are fine:
-	 */
-
-	case PERF_RECORD_THROTTLE:
-	case PERF_RECORD_UNTHROTTLE:
-		return 0;
-
-	default:
-		return -1;
+	if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
+		if (sort__has_parent) {
+			fprintf(stderr, "selected --sort parent, but no"
+					" callchain data. Did you call"
+					" perf record without -g?\n");
+			return -1;
+		}
+		if (callchain) {
+			fprintf(stderr, "selected -g but no callchain data."
+					" Did you call perf record without"
+					" -g?\n");
+			return -1;
+		}
+	} else if (callchain_param.mode != CHAIN_NONE && !callchain) {
+			callchain = 1;
+			if (register_callchain_param(&callchain_param) < 0) {
+				fprintf(stderr, "Can't register callchain"
+						" params\n");
+				return -1;
+			}
 	}
 
 	return 0;
 }
 
+static struct perf_file_handler file_handler = {
+	.process_sample_event	= process_sample_event,
+	.process_mmap_event	= process_mmap_event,
+	.process_comm_event	= process_comm_event,
+	.process_exit_event	= process_task_event,
+	.process_fork_event	= process_task_event,
+	.process_lost_event	= process_lost_event,
+	.process_read_event	= process_read_event,
+	.sample_type_check	= sample_type_check,
+};
+
+
 static int __cmd_report(void)
 {
-	int ret, rc = EXIT_FAILURE;
-	unsigned long offset = 0;
-	unsigned long head, shift;
-	struct stat input_stat;
 	struct thread *idle;
-	event_t *event;
-	uint32_t size;
-	char *buf;
+	int ret;
 
 	idle = register_idle_thread(&threads, &last_match);
 	thread__comm_adjust(idle);
@@ -1388,159 +863,27 @@
 	if (show_threads)
 		perf_read_values_init(&show_threads_values);
 
-	input = open(input_name, O_RDONLY);
-	if (input < 0) {
-		fprintf(stderr, " failed to open file: %s", input_name);
-		if (!strcmp(input_name, "perf.data"))
-			fprintf(stderr, "  (try 'perf record' first)");
-		fprintf(stderr, "\n");
-		exit(-1);
-	}
+	register_perf_file_handler(&file_handler);
 
-	ret = fstat(input, &input_stat);
-	if (ret < 0) {
-		perror("failed to stat file");
-		exit(-1);
-	}
-
-	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
-		fprintf(stderr, "file: %s not owned by current user or root\n", input_name);
-		exit(-1);
-	}
-
-	if (!input_stat.st_size) {
-		fprintf(stderr, "zero-sized file, nothing to do!\n");
-		exit(0);
-	}
-
-	header = perf_header__read(input);
-	head = header->data_offset;
-
-	sample_type = perf_header__sample_type(header);
-
-	if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
-		if (sort__has_parent) {
-			fprintf(stderr, "selected --sort parent, but no"
-					" callchain data. Did you call"
-					" perf record without -g?\n");
-			exit(-1);
-		}
-		if (callchain) {
-			fprintf(stderr, "selected -g but no callchain data."
-					" Did you call perf record without"
-					" -g?\n");
-			exit(-1);
-		}
-	} else if (callchain_param.mode != CHAIN_NONE && !callchain) {
-			callchain = 1;
-			if (register_callchain_param(&callchain_param) < 0) {
-				fprintf(stderr, "Can't register callchain"
-						" params\n");
-				exit(-1);
-			}
-	}
-
-	if (load_kernel() < 0) {
-		perror("failed to load kernel symbols");
-		return EXIT_FAILURE;
-	}
-
-	if (!full_paths) {
-		if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
-			perror("failed to get the current directory");
-			return EXIT_FAILURE;
-		}
-		cwdlen = strlen(cwd);
-	} else {
-		cwd = NULL;
-		cwdlen = 0;
-	}
-
-	shift = page_size * (head / page_size);
-	offset += shift;
-	head -= shift;
-
-remap:
-	buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
-			   MAP_SHARED, input, offset);
-	if (buf == MAP_FAILED) {
-		perror("failed to mmap file");
-		exit(-1);
-	}
-
-more:
-	event = (event_t *)(buf + head);
-
-	size = event->header.size;
-	if (!size)
-		size = 8;
-
-	if (head + event->header.size >= page_size * mmap_window) {
-		int munmap_ret;
-
-		shift = page_size * (head / page_size);
-
-		munmap_ret = munmap(buf, page_size * mmap_window);
-		assert(munmap_ret == 0);
-
-		offset += shift;
-		head -= shift;
-		goto remap;
-	}
-
-	size = event->header.size;
-
-	dump_printf("\n%p [%p]: event: %d\n",
-			(void *)(offset + head),
-			(void *)(long)event->header.size,
-			event->header.type);
-
-	if (!size || process_event(event, offset, head) < 0) {
-
-		dump_printf("%p [%p]: skipping unknown header type: %d\n",
-			(void *)(offset + head),
-			(void *)(long)(event->header.size),
-			event->header.type);
-
-		total_unknown++;
-
-		/*
-		 * assume we lost track of the stream, check alignment, and
-		 * increment a single u64 in the hope to catch on again 'soon'.
-		 */
-
-		if (unlikely(head & 7))
-			head &= ~7ULL;
-
-		size = 8;
-	}
-
-	head += size;
-
-	if (offset + head >= header->data_offset + header->data_size)
-		goto done;
-
-	if (offset + head < (unsigned long)input_stat.st_size)
-		goto more;
-
-done:
-	rc = EXIT_SUCCESS;
-	close(input);
+	ret = mmap_dispatch_perf_file(&header, input_name, force, full_paths,
+				      &cwdlen, &cwd);
+	if (ret)
+		return ret;
 
 	dump_printf("      IP events: %10ld\n", total);
 	dump_printf("    mmap events: %10ld\n", total_mmap);
 	dump_printf("    comm events: %10ld\n", total_comm);
 	dump_printf("    fork events: %10ld\n", total_fork);
 	dump_printf("    lost events: %10ld\n", total_lost);
-	dump_printf(" unknown events: %10ld\n", total_unknown);
+	dump_printf(" unknown events: %10ld\n", file_handler.total_unknown);
 
 	if (dump_trace)
 		return 0;
 
-	if (verbose >= 3)
+	if (verbose > 3)
 		threads__fprintf(stdout, &threads);
 
-	if (verbose >= 2)
+	if (verbose > 2)
 		dsos__fprintf(stdout);
 
 	collapse__resort();
@@ -1550,7 +893,7 @@
 	if (show_threads)
 		perf_read_values_destroy(&show_threads_values);
 
-	return rc;
+	return ret;
 }
 
 static int
@@ -1606,7 +949,8 @@
 	return 0;
 }
 
-static const char * const report_usage[] = {
+//static const char * const report_usage[] = {
+const char * const report_usage[] = {
 	"perf report [<options>] <command>",
 	NULL
 };
@@ -1692,8 +1036,6 @@
 {
 	symbol__init();
 
-	page_size = getpagesize();
-
 	argc = parse_options(argc, argv, options, report_usage, 0);
 
 	setup_sorting();
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index ce2d5be..62585b2 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -11,6 +11,7 @@
 #include "util/trace-event.h"
 
 #include "util/debug.h"
+#include "util/data_map.h"
 
 #include <sys/types.h>
 #include <sys/prctl.h>
@@ -20,9 +21,6 @@
 #include <math.h>
 
 static char			const *input_name = "perf.data";
-static int			input;
-static unsigned long		page_size;
-static unsigned long		mmap_window = 32;
 
 static unsigned long		total_comm = 0;
 
@@ -35,6 +33,11 @@
 static char			default_sort_order[] = "avg, max, switch, runtime";
 static char			*sort_order = default_sort_order;
 
+static int			profile_cpu = -1;
+
+static char			*cwd;
+static int			cwdlen;
+
 #define PR_SET_NAME		15               /* Set process name */
 #define MAX_CPUS		4096
 
@@ -74,6 +77,7 @@
 	SCHED_EVENT_RUN,
 	SCHED_EVENT_SLEEP,
 	SCHED_EVENT_WAKEUP,
+	SCHED_EVENT_MIGRATION,
 };
 
 struct sched_atom {
@@ -398,6 +402,8 @@
 				ret = sem_post(atom->wait_sem);
 			BUG_ON(ret);
 			break;
+		case SCHED_EVENT_MIGRATION:
+			break;
 		default:
 			BUG_ON(1);
 	}
@@ -637,7 +643,7 @@
 {
 	struct thread *thread;
 
-	thread = threads__findnew(event->comm.pid, &threads, &last_match);
+	thread = threads__findnew(event->comm.tid, &threads, &last_match);
 
 	dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
 		(void *)(offset + head),
@@ -745,6 +751,22 @@
 	u32 child_pid;
 };
 
+struct trace_migrate_task_event {
+	u32 size;
+
+	u16 common_type;
+	u8 common_flags;
+	u8 common_preempt_count;
+	u32 common_pid;
+	u32 common_tgid;
+
+	char comm[16];
+	u32 pid;
+
+	u32 prio;
+	u32 cpu;
+};
+
 struct trace_sched_handler {
 	void (*switch_event)(struct trace_switch_event *,
 			     struct event *,
@@ -769,6 +791,12 @@
 			   int cpu,
 			   u64 timestamp,
 			   struct thread *thread);
+
+	void (*migrate_task_event)(struct trace_migrate_task_event *,
+			   struct event *,
+			   int cpu,
+			   u64 timestamp,
+			   struct thread *thread);
 };
 
 
@@ -1139,7 +1167,12 @@
 
 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
 
-	if (atom->state != THREAD_SLEEPING)
+	/*
+	 * You WILL be missing events if you've recorded only
+	 * one CPU, or are only looking at only one, so don't
+	 * make useless noise.
+	 */
+	if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
 		nr_state_machine_bugs++;
 
 	nr_timestamps++;
@@ -1152,11 +1185,51 @@
 	atom->wake_up_time = timestamp;
 }
 
+static void
+latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
+		     struct event *__event __used,
+		     int cpu __used,
+		     u64 timestamp,
+		     struct thread *thread __used)
+{
+	struct work_atoms *atoms;
+	struct work_atom *atom;
+	struct thread *migrant;
+
+	/*
+	 * Only need to worry about migration when profiling one CPU.
+	 */
+	if (profile_cpu == -1)
+		return;
+
+	migrant = threads__findnew(migrate_task_event->pid, &threads, &last_match);
+	atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+	if (!atoms) {
+		thread_atoms_insert(migrant);
+		register_pid(migrant->pid, migrant->comm);
+		atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+		if (!atoms)
+			die("migration-event: Internal tree error");
+		add_sched_out_event(atoms, 'R', timestamp);
+	}
+
+	BUG_ON(list_empty(&atoms->work_list));
+
+	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
+	atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
+
+	nr_timestamps++;
+
+	if (atom->sched_out_time > timestamp)
+		nr_unordered_timestamps++;
+}
+
 static struct trace_sched_handler lat_ops  = {
 	.wakeup_event		= latency_wakeup_event,
 	.switch_event		= latency_switch_event,
 	.runtime_event		= latency_runtime_event,
 	.fork_event		= latency_fork_event,
+	.migrate_task_event	= latency_migrate_task_event,
 };
 
 static void output_lat_thread(struct work_atoms *work_list)
@@ -1517,6 +1590,26 @@
 }
 
 static void
+process_sched_migrate_task_event(struct raw_event_sample *raw,
+			   struct event *event,
+			   int cpu __used,
+			   u64 timestamp __used,
+			   struct thread *thread __used)
+{
+	struct trace_migrate_task_event migrate_task_event;
+
+	FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
+
+	FILL_ARRAY(migrate_task_event, comm, event, raw->data);
+	FILL_FIELD(migrate_task_event, pid, event, raw->data);
+	FILL_FIELD(migrate_task_event, prio, event, raw->data);
+	FILL_FIELD(migrate_task_event, cpu, event, raw->data);
+
+	if (trace_handler->migrate_task_event)
+		trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
+}
+
+static void
 process_raw_event(event_t *raw_event __used, void *more_data,
 		  int cpu, u64 timestamp, struct thread *thread)
 {
@@ -1539,21 +1632,22 @@
 		process_sched_fork_event(raw, event, cpu, timestamp, thread);
 	if (!strcmp(event->name, "sched_process_exit"))
 		process_sched_exit_event(event, cpu, timestamp, thread);
+	if (!strcmp(event->name, "sched_migrate_task"))
+		process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
 }
 
 static int
 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
 {
-	char level;
-	int show = 0;
-	struct dso *dso = NULL;
 	struct thread *thread;
 	u64 ip = event->ip.ip;
 	u64 timestamp = -1;
 	u32 cpu = -1;
 	u64 period = 1;
 	void *more_data = event->ip.__more_data;
-	int cpumode;
+
+	if (!(sample_type & PERF_SAMPLE_RAW))
+		return 0;
 
 	thread = threads__findnew(event->ip.pid, &threads, &last_match);
 
@@ -1589,161 +1683,52 @@
 		return -1;
 	}
 
-	cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+	if (profile_cpu != -1 && profile_cpu != (int) cpu)
+		return 0;
 
-	if (cpumode == PERF_RECORD_MISC_KERNEL) {
-		show = SHOW_KERNEL;
-		level = 'k';
-
-		dso = kernel_dso;
-
-		dump_printf(" ...... dso: %s\n", dso->name);
-
-	} else if (cpumode == PERF_RECORD_MISC_USER) {
-
-		show = SHOW_USER;
-		level = '.';
-
-	} else {
-		show = SHOW_HV;
-		level = 'H';
-
-		dso = hypervisor_dso;
-
-		dump_printf(" ...... dso: [hypervisor]\n");
-	}
-
-	if (sample_type & PERF_SAMPLE_RAW)
-		process_raw_event(event, more_data, cpu, timestamp, thread);
+	process_raw_event(event, more_data, cpu, timestamp, thread);
 
 	return 0;
 }
 
 static int
-process_event(event_t *event, unsigned long offset, unsigned long head)
+process_lost_event(event_t *event __used,
+		   unsigned long offset __used,
+		   unsigned long head __used)
 {
-	trace_event(event);
+	nr_lost_chunks++;
+	nr_lost_events += event->lost.lost;
 
-	nr_events++;
-	switch (event->header.type) {
-	case PERF_RECORD_MMAP:
-		return 0;
-	case PERF_RECORD_LOST:
-		nr_lost_chunks++;
-		nr_lost_events += event->lost.lost;
-		return 0;
+	return 0;
+}
 
-	case PERF_RECORD_COMM:
-		return process_comm_event(event, offset, head);
+static int sample_type_check(u64 type)
+{
+	sample_type = type;
 
-	case PERF_RECORD_EXIT ... PERF_RECORD_READ:
-		return 0;
-
-	case PERF_RECORD_SAMPLE:
-		return process_sample_event(event, offset, head);
-
-	case PERF_RECORD_MAX:
-	default:
+	if (!(sample_type & PERF_SAMPLE_RAW)) {
+		fprintf(stderr,
+			"No trace sample to read. Did you call perf record "
+			"without -R?");
 		return -1;
 	}
 
 	return 0;
 }
 
+static struct perf_file_handler file_handler = {
+	.process_sample_event	= process_sample_event,
+	.process_comm_event	= process_comm_event,
+	.process_lost_event	= process_lost_event,
+	.sample_type_check	= sample_type_check,
+};
+
 static int read_events(void)
 {
-	int ret, rc = EXIT_FAILURE;
-	unsigned long offset = 0;
-	unsigned long head = 0;
-	struct stat perf_stat;
-	event_t *event;
-	uint32_t size;
-	char *buf;
-
-	trace_report();
 	register_idle_thread(&threads, &last_match);
+	register_perf_file_handler(&file_handler);
 
-	input = open(input_name, O_RDONLY);
-	if (input < 0) {
-		perror("failed to open file");
-		exit(-1);
-	}
-
-	ret = fstat(input, &perf_stat);
-	if (ret < 0) {
-		perror("failed to stat file");
-		exit(-1);
-	}
-
-	if (!perf_stat.st_size) {
-		fprintf(stderr, "zero-sized file, nothing to do!\n");
-		exit(0);
-	}
-	header = perf_header__read(input);
-	head = header->data_offset;
-	sample_type = perf_header__sample_type(header);
-
-	if (!(sample_type & PERF_SAMPLE_RAW))
-		die("No trace sample to read. Did you call perf record "
-		    "without -R?");
-
-	if (load_kernel() < 0) {
-		perror("failed to load kernel symbols");
-		return EXIT_FAILURE;
-	}
-
-remap:
-	buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
-			   MAP_SHARED, input, offset);
-	if (buf == MAP_FAILED) {
-		perror("failed to mmap file");
-		exit(-1);
-	}
-
-more:
-	event = (event_t *)(buf + head);
-
-	size = event->header.size;
-	if (!size)
-		size = 8;
-
-	if (head + event->header.size >= page_size * mmap_window) {
-		unsigned long shift = page_size * (head / page_size);
-		int res;
-
-		res = munmap(buf, page_size * mmap_window);
-		assert(res == 0);
-
-		offset += shift;
-		head -= shift;
-		goto remap;
-	}
-
-	size = event->header.size;
-
-
-	if (!size || process_event(event, offset, head) < 0) {
-
-		/*
-		 * assume we lost track of the stream, check alignment, and
-		 * increment a single u64 in the hope to catch on again 'soon'.
-		 */
-
-		if (unlikely(head & 7))
-			head &= ~7ULL;
-
-		size = 8;
-	}
-
-	head += size;
-
-	if (offset + head < (unsigned long)perf_stat.st_size)
-		goto more;
-
-	rc = EXIT_SUCCESS;
-	close(input);
-
-	return rc;
+	return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
 }
 
 static void print_bad_events(void)
@@ -1883,6 +1868,8 @@
 		   "sort by key(s): runtime, switch, avg, max"),
 	OPT_BOOLEAN('v', "verbose", &verbose,
 		    "be more verbose (show symbol address, etc)"),
+	OPT_INTEGER('C', "CPU", &profile_cpu,
+		    "CPU to profile on"),
 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
 		    "dump raw trace in ASCII"),
 	OPT_END()
@@ -1961,7 +1948,6 @@
 int cmd_sched(int argc, const char **argv, const char *prefix __used)
 {
 	symbol__init();
-	page_size = getpagesize();
 
 	argc = parse_options(argc, argv, sched_options, sched_usage,
 			     PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 37512e9..c0f69e8 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -22,6 +22,7 @@
 
 #include "util/symbol.h"
 #include "util/color.h"
+#include "util/thread.h"
 #include "util/util.h"
 #include <linux/rbtree.h>
 #include "util/parse-options.h"
@@ -54,26 +55,26 @@
 
 static int			fd[MAX_NR_CPUS][MAX_COUNTERS];
 
-static int			system_wide			=  0;
+static int			system_wide			=      0;
 
-static int			default_interval		= 100000;
+static int			default_interval		=      0;
 
-static int			count_filter			=  5;
-static int			print_entries			= 15;
+static int			count_filter			=      5;
+static int			print_entries			=     15;
 
-static int			target_pid			= -1;
-static int			inherit				=  0;
-static int			profile_cpu			= -1;
-static int			nr_cpus				=  0;
-static unsigned int		realtime_prio			=  0;
-static int			group				=  0;
+static int			target_pid			=     -1;
+static int			inherit				=      0;
+static int			profile_cpu			=     -1;
+static int			nr_cpus				=      0;
+static unsigned int		realtime_prio			=      0;
+static int			group				=      0;
 static unsigned int		page_size;
-static unsigned int		mmap_pages			= 16;
-static int			freq				=  0;
+static unsigned int		mmap_pages			=     16;
+static int			freq				=   1000; /* 1 KHz */
 
-static int			delay_secs			=  2;
-static int			zero;
-static int			dump_symtab;
+static int			delay_secs			=      2;
+static int			zero                            =      0;
+static int			dump_symtab                     =      0;
 
 /*
  * Source
@@ -86,19 +87,16 @@
 	struct source_line	*next;
 };
 
-static char			*sym_filter			=  NULL;
-struct sym_entry		*sym_filter_entry		=  NULL;
-static int			sym_pcnt_filter			=  5;
-static int			sym_counter			=  0;
-static int			display_weighted		= -1;
+static char			*sym_filter			=   NULL;
+struct sym_entry		*sym_filter_entry		=   NULL;
+static int			sym_pcnt_filter			=      5;
+static int			sym_counter			=      0;
+static int			display_weighted		=     -1;
 
 /*
  * Symbols
  */
 
-static u64			min_ip;
-static u64			max_ip = -1ll;
-
 struct sym_entry {
 	struct rb_node		rb_node;
 	struct list_head	node;
@@ -106,6 +104,7 @@
 	unsigned long		snap_count;
 	double			weight;
 	int			skip;
+	struct map		*map;
 	struct source_line	*source;
 	struct source_line	*lines;
 	struct source_line	**lines_tail;
@@ -119,12 +118,11 @@
 static void parse_source(struct sym_entry *syme)
 {
 	struct symbol *sym;
-	struct module *module;
-	struct section *section = NULL;
+	struct map *map;
 	FILE *file;
 	char command[PATH_MAX*2];
-	const char *path = vmlinux_name;
-	u64 start, end, len;
+	const char *path;
+	u64 len;
 
 	if (!syme)
 		return;
@@ -135,27 +133,15 @@
 	}
 
 	sym = (struct symbol *)(syme + 1);
-	module = sym->module;
+	map = syme->map;
+	path = map->dso->long_name;
 
-	if (module)
-		path = module->path;
-	if (!path)
-		return;
-
-	start = sym->obj_start;
-	if (!start)
-		start = sym->start;
-
-	if (module) {
-		section = module->sections->find_section(module->sections, ".text");
-		if (section)
-			start -= section->vma;
-	}
-
-	end = start + sym->end - sym->start + 1;
 	len = sym->end - sym->start;
 
-	sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start, end, path);
+	sprintf(command,
+		"objdump --start-address=0x%016Lx "
+			 "--stop-address=0x%016Lx -dS %s",
+		sym->start, sym->end, path);
 
 	file = popen(command, "r");
 	if (!file)
@@ -187,13 +173,11 @@
 
 		if (strlen(src->line)>8 && src->line[8] == ':') {
 			src->eip = strtoull(src->line, NULL, 16);
-			if (section)
-				src->eip += section->vma;
+			src->eip += map->start;
 		}
 		if (strlen(src->line)>8 && src->line[16] == ':') {
 			src->eip = strtoull(src->line, NULL, 16);
-			if (section)
-				src->eip += section->vma;
+			src->eip += map->start;
 		}
 	}
 	pclose(file);
@@ -245,16 +229,9 @@
 	struct symbol *symbol = (struct symbol *)(syme + 1);
 	struct source_line *line;
 	char pattern[PATH_MAX];
-	char *idx;
 
 	sprintf(pattern, "<%s>:", symbol->name);
 
-	if (symbol->module) {
-		idx = strstr(pattern, "\t");
-		if (idx)
-			*idx = 0;
-	}
-
 	pthread_mutex_lock(&syme->source_lock);
 	for (line = syme->lines; line; line = line->next) {
 		if (strstr(line->line, pattern)) {
@@ -516,8 +493,8 @@
 		if (verbose)
 			printf(" - %016llx", sym->start);
 		printf(" : %s", sym->name);
-		if (sym->module)
-			printf("\t[%s]", sym->module->name);
+		if (syme->map->dso->name[0] == '[')
+			printf(" \t%s", syme->map->dso->name);
 		printf("\n");
 	}
 }
@@ -788,7 +765,7 @@
 	NULL
 };
 
-static int symbol_filter(struct dso *self, struct symbol *sym)
+static int symbol_filter(struct map *map, struct symbol *sym)
 {
 	struct sym_entry *syme;
 	const char *name = sym->name;
@@ -810,7 +787,8 @@
 	    strstr(name, "_text_end"))
 		return 1;
 
-	syme = dso__sym_priv(self, sym);
+	syme = dso__sym_priv(map->dso, sym);
+	syme->map = map;
 	pthread_mutex_init(&syme->source_lock, NULL);
 	if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter))
 		sym_filter_entry = syme;
@@ -827,34 +805,14 @@
 
 static int parse_symbols(void)
 {
-	struct rb_node *node;
-	struct symbol  *sym;
-	int use_modules = vmlinux_name ? 1 : 0;
-
-	kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
-	if (kernel_dso == NULL)
+	if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry),
+			      symbol_filter, verbose, 1) <= 0)
 		return -1;
 
-	if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0)
-		goto out_delete_dso;
-
-	node = rb_first(&kernel_dso->syms);
-	sym = rb_entry(node, struct symbol, rb_node);
-	min_ip = sym->start;
-
-	node = rb_last(&kernel_dso->syms);
-	sym = rb_entry(node, struct symbol, rb_node);
-	max_ip = sym->end;
-
 	if (dump_symtab)
-		dso__fprintf(kernel_dso, stderr);
+		dsos__fprintf(stderr);
 
 	return 0;
-
-out_delete_dso:
-	dso__delete(kernel_dso);
-	kernel_dso = NULL;
-	return -1;
 }
 
 /*
@@ -862,10 +820,11 @@
  */
 static void record_ip(u64 ip, int counter)
 {
-	struct symbol *sym = dso__find_symbol(kernel_dso, ip);
+	struct map *map;
+	struct symbol *sym = kernel_maps__find_symbol(ip, &map);
 
 	if (sym != NULL) {
-		struct sym_entry *syme = dso__sym_priv(kernel_dso, sym);
+		struct sym_entry *syme = dso__sym_priv(map->dso, sym);
 
 		if (!syme->skip) {
 			syme->count[counter]++;
@@ -1016,7 +975,13 @@
 	attr = attrs + counter;
 
 	attr->sample_type	= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
-	attr->freq		= freq;
+
+	if (freq) {
+		attr->sample_type	|= PERF_SAMPLE_PERIOD;
+		attr->freq		= 1;
+		attr->sample_freq	= freq;
+	}
+
 	attr->inherit		= (cpu < 0) && inherit;
 
 try_again:
@@ -1171,11 +1136,6 @@
 	if (argc)
 		usage_with_options(top_usage, options);
 
-	if (freq) {
-		default_interval = freq;
-		freq = 1;
-	}
-
 	/* CPU and PID are mutually exclusive */
 	if (target_pid != -1 && profile_cpu != -1) {
 		printf("WARNING: PID switch overriding CPU\n");
@@ -1192,6 +1152,19 @@
 	parse_symbols();
 	parse_source(sym_filter_entry);
 
+
+	/*
+	 * User specified count overrides default frequency.
+	 */
+	if (default_interval)
+		freq = 0;
+	else if (freq) {
+		default_interval = freq;
+	} else {
+		fprintf(stderr, "frequency and count are zero, aborting\n");
+		exit(EXIT_FAILURE);
+	}
+
 	/*
 	 * Fill in the ones not specifically initialized via -c:
 	 */
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 0c5e4f7..fb3f3c2 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -12,11 +12,9 @@
 #include "util/debug.h"
 
 #include "util/trace-event.h"
+#include "util/data_map.h"
 
 static char		const *input_name = "perf.data";
-static int		input;
-static unsigned long	page_size;
-static unsigned long	mmap_window = 32;
 
 static unsigned long	total = 0;
 static unsigned long	total_comm = 0;
@@ -27,6 +25,9 @@
 static struct perf_header *header;
 static u64		sample_type;
 
+static char		*cwd;
+static int		cwdlen;
+
 
 static int
 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
@@ -53,16 +54,12 @@
 static int
 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
 {
-	char level;
-	int show = 0;
-	struct dso *dso = NULL;
 	struct thread *thread;
 	u64 ip = event->ip.ip;
 	u64 timestamp = -1;
 	u32 cpu = -1;
 	u64 period = 1;
 	void *more_data = event->ip.__more_data;
-	int cpumode;
 
 	thread = threads__findnew(event->ip.pid, &threads, &last_match);
 
@@ -98,30 +95,6 @@
 		return -1;
 	}
 
-	cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-
-	if (cpumode == PERF_RECORD_MISC_KERNEL) {
-		show = SHOW_KERNEL;
-		level = 'k';
-
-		dso = kernel_dso;
-
-		dump_printf(" ...... dso: %s\n", dso->name);
-
-	} else if (cpumode == PERF_RECORD_MISC_USER) {
-
-		show = SHOW_USER;
-		level = '.';
-
-	} else {
-		show = SHOW_HV;
-		level = 'H';
-
-		dso = hypervisor_dso;
-
-		dump_printf(" ...... dso: [hypervisor]\n");
-	}
-
 	if (sample_type & PERF_SAMPLE_RAW) {
 		struct {
 			u32 size;
@@ -140,121 +113,32 @@
 	return 0;
 }
 
-static int
-process_event(event_t *event, unsigned long offset, unsigned long head)
+static int sample_type_check(u64 type)
 {
-	trace_event(event);
+	sample_type = type;
 
-	switch (event->header.type) {
-	case PERF_RECORD_MMAP ... PERF_RECORD_LOST:
-		return 0;
-
-	case PERF_RECORD_COMM:
-		return process_comm_event(event, offset, head);
-
-	case PERF_RECORD_EXIT ... PERF_RECORD_READ:
-		return 0;
-
-	case PERF_RECORD_SAMPLE:
-		return process_sample_event(event, offset, head);
-
-	case PERF_RECORD_MAX:
-	default:
+	if (!(sample_type & PERF_SAMPLE_RAW)) {
+		fprintf(stderr,
+			"No trace sample to read. Did you call perf record "
+			"without -R?");
 		return -1;
 	}
 
 	return 0;
 }
 
+static struct perf_file_handler file_handler = {
+	.process_sample_event	= process_sample_event,
+	.process_comm_event	= process_comm_event,
+	.sample_type_check	= sample_type_check,
+};
+
 static int __cmd_trace(void)
 {
-	int ret, rc = EXIT_FAILURE;
-	unsigned long offset = 0;
-	unsigned long head = 0;
-	struct stat perf_stat;
-	event_t *event;
-	uint32_t size;
-	char *buf;
-
-	trace_report();
 	register_idle_thread(&threads, &last_match);
+	register_perf_file_handler(&file_handler);
 
-	input = open(input_name, O_RDONLY);
-	if (input < 0) {
-		perror("failed to open file");
-		exit(-1);
-	}
-
-	ret = fstat(input, &perf_stat);
-	if (ret < 0) {
-		perror("failed to stat file");
-		exit(-1);
-	}
-
-	if (!perf_stat.st_size) {
-		fprintf(stderr, "zero-sized file, nothing to do!\n");
-		exit(0);
-	}
-	header = perf_header__read(input);
-	head = header->data_offset;
-	sample_type = perf_header__sample_type(header);
-
-	if (!(sample_type & PERF_SAMPLE_RAW))
-		die("No trace sample to read. Did you call perf record "
-		    "without -R?");
-
-	if (load_kernel() < 0) {
-		perror("failed to load kernel symbols");
-		return EXIT_FAILURE;
-	}
-
-remap:
-	buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
-			   MAP_SHARED, input, offset);
-	if (buf == MAP_FAILED) {
-		perror("failed to mmap file");
-		exit(-1);
-	}
-
-more:
-	event = (event_t *)(buf + head);
-
-	if (head + event->header.size >= page_size * mmap_window) {
-		unsigned long shift = page_size * (head / page_size);
-		int res;
-
-		res = munmap(buf, page_size * mmap_window);
-		assert(res == 0);
-
-		offset += shift;
-		head -= shift;
-		goto remap;
-	}
-
-	size = event->header.size;
-
-	if (!size || process_event(event, offset, head) < 0) {
-
-		/*
-		 * assume we lost track of the stream, check alignment, and
-		 * increment a single u64 in the hope to catch on again 'soon'.
-		 */
-
-		if (unlikely(head & 7))
-			head &= ~7ULL;
-
-		size = 8;
-	}
-
-	head += size;
-
-	if (offset + head < (unsigned long)perf_stat.st_size)
-		goto more;
-
-	rc = EXIT_SUCCESS;
-	close(input);
-
-	return rc;
+	return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
 }
 
 static const char * const annotate_usage[] = {
@@ -273,7 +157,6 @@
 int cmd_trace(int argc, const char **argv, const char *prefix __used)
 {
 	symbol__init();
-	page_size = getpagesize();
 
 	argc = parse_options(argc, argv, options, annotate_usage, 0);
 	if (argc) {
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 6f8ea9d..f26172c 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -1,5 +1,5 @@
-#ifndef CACHE_H
-#define CACHE_H
+#ifndef __PERF_CACHE_H
+#define __PERF_CACHE_H
 
 #include "util.h"
 #include "strbuf.h"
@@ -117,4 +117,4 @@
 
 extern size_t strlcpy(char *dest, const char *src, size_t size);
 
-#endif /* CACHE_H */
+#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 43cf3ea..ad4626d 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -58,4 +58,4 @@
 int register_callchain_param(struct callchain_param *param);
 void append_chain(struct callchain_node *root, struct ip_callchain *chain,
 		  struct symbol **syms);
-#endif
+#endif	/* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 58d5975..24e8809 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -1,5 +1,5 @@
-#ifndef COLOR_H
-#define COLOR_H
+#ifndef __PERF_COLOR_H
+#define __PERF_COLOR_H
 
 /* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
 #define COLOR_MAXLEN 24
@@ -39,4 +39,4 @@
 int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
 const char *get_percent_color(double percent);
 
-#endif /* COLOR_H */
+#endif /* __PERF_COLOR_H */
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c
new file mode 100644
index 0000000..242b055
--- /dev/null
+++ b/tools/perf/util/data_map.c
@@ -0,0 +1,222 @@
+#include "data_map.h"
+#include "symbol.h"
+#include "util.h"
+#include "debug.h"
+
+
+static struct perf_file_handler *curr_handler;
+static unsigned long	mmap_window = 32;
+static char		__cwd[PATH_MAX];
+
+static int
+process_event_stub(event_t *event __used,
+		   unsigned long offset __used,
+		   unsigned long head __used)
+{
+	return 0;
+}
+
+void register_perf_file_handler(struct perf_file_handler *handler)
+{
+	if (!handler->process_sample_event)
+		handler->process_sample_event = process_event_stub;
+	if (!handler->process_mmap_event)
+		handler->process_mmap_event = process_event_stub;
+	if (!handler->process_comm_event)
+		handler->process_comm_event = process_event_stub;
+	if (!handler->process_fork_event)
+		handler->process_fork_event = process_event_stub;
+	if (!handler->process_exit_event)
+		handler->process_exit_event = process_event_stub;
+	if (!handler->process_lost_event)
+		handler->process_lost_event = process_event_stub;
+	if (!handler->process_read_event)
+		handler->process_read_event = process_event_stub;
+	if (!handler->process_throttle_event)
+		handler->process_throttle_event = process_event_stub;
+	if (!handler->process_unthrottle_event)
+		handler->process_unthrottle_event = process_event_stub;
+
+	curr_handler = handler;
+}
+
+static int
+process_event(event_t *event, unsigned long offset, unsigned long head)
+{
+	trace_event(event);
+
+	switch (event->header.type) {
+	case PERF_RECORD_SAMPLE:
+		return curr_handler->process_sample_event(event, offset, head);
+	case PERF_RECORD_MMAP:
+		return curr_handler->process_mmap_event(event, offset, head);
+	case PERF_RECORD_COMM:
+		return curr_handler->process_comm_event(event, offset, head);
+	case PERF_RECORD_FORK:
+		return curr_handler->process_fork_event(event, offset, head);
+	case PERF_RECORD_EXIT:
+		return curr_handler->process_exit_event(event, offset, head);
+	case PERF_RECORD_LOST:
+		return curr_handler->process_lost_event(event, offset, head);
+	case PERF_RECORD_READ:
+		return curr_handler->process_read_event(event, offset, head);
+	case PERF_RECORD_THROTTLE:
+		return curr_handler->process_throttle_event(event, offset, head);
+	case PERF_RECORD_UNTHROTTLE:
+		return curr_handler->process_unthrottle_event(event, offset, head);
+	default:
+		curr_handler->total_unknown++;
+		return -1;
+	}
+}
+
+int mmap_dispatch_perf_file(struct perf_header **pheader,
+			    const char *input_name,
+			    int force,
+			    int full_paths,
+			    int *cwdlen,
+			    char **cwd)
+{
+	int ret, rc = EXIT_FAILURE;
+	struct perf_header *header;
+	unsigned long head, shift;
+	unsigned long offset = 0;
+	struct stat input_stat;
+	size_t	page_size;
+	u64 sample_type;
+	event_t *event;
+	uint32_t size;
+	int input;
+	char *buf;
+
+	if (!curr_handler)
+		die("Forgot to register perf file handler");
+
+	page_size = getpagesize();
+
+	input = open(input_name, O_RDONLY);
+	if (input < 0) {
+		fprintf(stderr, " failed to open file: %s", input_name);
+		if (!strcmp(input_name, "perf.data"))
+			fprintf(stderr, "  (try 'perf record' first)");
+		fprintf(stderr, "\n");
+		exit(-1);
+	}
+
+	ret = fstat(input, &input_stat);
+	if (ret < 0) {
+		perror("failed to stat file");
+		exit(-1);
+	}
+
+	if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
+		fprintf(stderr, "file: %s not owned by current user or root\n",
+			input_name);
+		exit(-1);
+	}
+
+	if (!input_stat.st_size) {
+		fprintf(stderr, "zero-sized file, nothing to do!\n");
+		exit(0);
+	}
+
+	*pheader = perf_header__read(input);
+	header = *pheader;
+	head = header->data_offset;
+
+	sample_type = perf_header__sample_type(header);
+
+	if (curr_handler->sample_type_check)
+		if (curr_handler->sample_type_check(sample_type) < 0)
+			exit(-1);
+
+	if (load_kernel() < 0) {
+		perror("failed to load kernel symbols");
+		return EXIT_FAILURE;
+	}
+
+	if (!full_paths) {
+		if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
+			perror("failed to get the current directory");
+			return EXIT_FAILURE;
+		}
+		*cwd = __cwd;
+		*cwdlen = strlen(*cwd);
+	} else {
+		*cwd = NULL;
+		*cwdlen = 0;
+	}
+
+	shift = page_size * (head / page_size);
+	offset += shift;
+	head -= shift;
+
+remap:
+	buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
+			   MAP_SHARED, input, offset);
+	if (buf == MAP_FAILED) {
+		perror("failed to mmap file");
+		exit(-1);
+	}
+
+more:
+	event = (event_t *)(buf + head);
+
+	size = event->header.size;
+	if (!size)
+		size = 8;
+
+	if (head + event->header.size >= page_size * mmap_window) {
+		int munmap_ret;
+
+		shift = page_size * (head / page_size);
+
+		munmap_ret = munmap(buf, page_size * mmap_window);
+		assert(munmap_ret == 0);
+
+		offset += shift;
+		head -= shift;
+		goto remap;
+	}
+
+	size = event->header.size;
+
+	dump_printf("\n%p [%p]: event: %d\n",
+			(void *)(offset + head),
+			(void *)(long)event->header.size,
+			event->header.type);
+
+	if (!size || process_event(event, offset, head) < 0) {
+
+		dump_printf("%p [%p]: skipping unknown header type: %d\n",
+			(void *)(offset + head),
+			(void *)(long)(event->header.size),
+			event->header.type);
+
+		/*
+		 * assume we lost track of the stream, check alignment, and
+		 * increment a single u64 in the hope to catch on again 'soon'.
+		 */
+
+		if (unlikely(head & 7))
+			head &= ~7ULL;
+
+		size = 8;
+	}
+
+	head += size;
+
+	if (offset + head >= header->data_offset + header->data_size)
+		goto done;
+
+	if (offset + head < (unsigned long)input_stat.st_size)
+		goto more;
+
+done:
+	rc = EXIT_SUCCESS;
+	close(input);
+
+	return rc;
+}
+
+
diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h
new file mode 100644
index 0000000..716d105
--- /dev/null
+++ b/tools/perf/util/data_map.h
@@ -0,0 +1,31 @@
+#ifndef __PERF_DATAMAP_H
+#define __PERF_DATAMAP_H
+
+#include "event.h"
+#include "header.h"
+
+typedef int (*event_type_handler_t)(event_t *, unsigned long, unsigned long);
+
+struct perf_file_handler {
+	event_type_handler_t	process_sample_event;
+	event_type_handler_t	process_mmap_event;
+	event_type_handler_t	process_comm_event;
+	event_type_handler_t	process_fork_event;
+	event_type_handler_t	process_exit_event;
+	event_type_handler_t	process_lost_event;
+	event_type_handler_t	process_read_event;
+	event_type_handler_t	process_throttle_event;
+	event_type_handler_t	process_unthrottle_event;
+	int			(*sample_type_check)(u64 sample_type);
+	unsigned long		total_unknown;
+};
+
+void register_perf_file_handler(struct perf_file_handler *handler);
+int mmap_dispatch_perf_file(struct perf_header **pheader,
+			    const char *input_name,
+			    int force,
+			    int full_paths,
+			    int *cwdlen,
+			    char **cwd);
+
+#endif
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 437eea5..02d1fa1 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -1,4 +1,6 @@
 /* For debugging general purposes */
+#ifndef __PERF_DEBUG_H
+#define __PERF_DEBUG_H
 
 extern int verbose;
 extern int dump_trace;
@@ -6,3 +8,5 @@
 int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(event_t *event);
+
+#endif	/* __PERF_DEBUG_H */
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 2c9c26d6..c2e62be 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -1,14 +1,10 @@
 #ifndef __PERF_RECORD_H
 #define __PERF_RECORD_H
+
 #include "../perf.h"
 #include "util.h"
 #include <linux/list.h>
-
-enum {
-	SHOW_KERNEL	= 1,
-	SHOW_USER	= 2,
-	SHOW_HV		= 4,
-};
+#include <linux/rbtree.h>
 
 /*
  * PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
@@ -78,7 +74,10 @@
 } event_t;
 
 struct map {
-	struct list_head	node;
+	union {
+		struct rb_node	rb_node;
+		struct list_head node;
+	};
 	u64			start;
 	u64			end;
 	u64			pgoff;
@@ -101,4 +100,4 @@
 int map__overlap(struct map *l, struct map *r);
 size_t map__fprintf(struct map *self, FILE *fp);
 
-#endif
+#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h
index effe25e..31647ac 100644
--- a/tools/perf/util/exec_cmd.h
+++ b/tools/perf/util/exec_cmd.h
@@ -1,5 +1,5 @@
-#ifndef PERF_EXEC_CMD_H
-#define PERF_EXEC_CMD_H
+#ifndef __PERF_EXEC_CMD_H
+#define __PERF_EXEC_CMD_H
 
 extern void perf_set_argv_exec_path(const char *exec_path);
 extern const char *perf_extract_argv0_path(const char *path);
@@ -10,4 +10,4 @@
 extern int execl_perf_cmd(const char *cmd, ...);
 extern const char *system_path(const char *path);
 
-#endif /* PERF_EXEC_CMD_H */
+#endif /* __PERF_EXEC_CMD_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index e306857..9aae360 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -5,6 +5,8 @@
 
 #include "util.h"
 #include "header.h"
+#include "../perf.h"
+#include "trace-event.h"
 
 /*
  * Create new perf.data header attribute:
@@ -62,6 +64,8 @@
 
 	self->data_offset = 0;
 	self->data_size = 0;
+	self->trace_info_offset = 0;
+	self->trace_info_size = 0;
 
 	return self;
 }
@@ -145,8 +149,16 @@
 	struct perf_file_section	attrs;
 	struct perf_file_section	data;
 	struct perf_file_section	event_types;
+	struct perf_file_section	trace_info;
 };
 
+static int trace_info;
+
+void perf_header__set_trace_info(void)
+{
+	trace_info = 1;
+}
+
 static void do_write(int fd, void *buf, size_t size)
 {
 	while (size) {
@@ -198,6 +210,23 @@
 	if (events)
 		do_write(fd, events, self->event_size);
 
+	if (trace_info) {
+		static int trace_info_written;
+
+		/*
+		 * Write it only once
+		 */
+		if (!trace_info_written) {
+			self->trace_info_offset = lseek(fd, 0, SEEK_CUR);
+			read_tracing_data(fd, attrs, nr_counters);
+			self->trace_info_size = lseek(fd, 0, SEEK_CUR) -
+						self->trace_info_offset;
+			trace_info_written = 1;
+		} else {
+			lseek(fd, self->trace_info_offset +
+				self->trace_info_size, SEEK_SET);
+		}
+	}
 
 	self->data_offset = lseek(fd, 0, SEEK_CUR);
 
@@ -217,6 +246,10 @@
 			.offset = self->event_offset,
 			.size	= self->event_size,
 		},
+		.trace_info = {
+			.offset = self->trace_info_offset,
+			.size = self->trace_info_size,
+		},
 	};
 
 	lseek(fd, 0, SEEK_SET);
@@ -254,10 +287,16 @@
 	do_read(fd, &f_header, sizeof(f_header));
 
 	if (f_header.magic	!= PERF_MAGIC		||
-	    f_header.size	!= sizeof(f_header)	||
 	    f_header.attr_size	!= sizeof(f_attr))
 		die("incompatible file format");
 
+	if (f_header.size != sizeof(f_header)) {
+		/* Support the previous format */
+		if (f_header.size == offsetof(typeof(f_header), trace_info))
+			f_header.trace_info.size = 0;
+		else
+			die("incompatible file format");
+	}
 	nr_attrs = f_header.attrs.size / sizeof(f_attr);
 	lseek(fd, f_header.attrs.offset, SEEK_SET);
 
@@ -290,6 +329,15 @@
 		do_read(fd, events, f_header.event_types.size);
 		event_count =  f_header.event_types.size / sizeof(struct perf_trace_event_type);
 	}
+
+	self->trace_info_offset = f_header.trace_info.offset;
+	self->trace_info_size = f_header.trace_info.size;
+
+	if (self->trace_info_size) {
+		lseek(fd, self->trace_info_offset, SEEK_SET);
+		trace_report(fd);
+	}
+
 	self->event_offset = f_header.event_types.offset;
 	self->event_size   = f_header.event_types.size;
 
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index a0761bc..30aee51 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -1,5 +1,5 @@
-#ifndef _PERF_HEADER_H
-#define _PERF_HEADER_H
+#ifndef __PERF_HEADER_H
+#define __PERF_HEADER_H
 
 #include "../../../include/linux/perf_event.h"
 #include <sys/types.h>
@@ -21,6 +21,8 @@
 	u64 data_size;
 	u64 event_offset;
 	u64 event_size;
+	u64 trace_info_offset;
+	u64 trace_info_size;
 };
 
 struct perf_header *perf_header__read(int fd);
@@ -40,8 +42,8 @@
 u64 perf_header__sample_type(struct perf_header *header);
 struct perf_event_attr *
 perf_header__find_attr(u64 id, struct perf_header *header);
-
+void perf_header__set_trace_info(void);
 
 struct perf_header *perf_header__new(void);
 
-#endif /* _PERF_HEADER_H */
+#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h
index 7128783..7f5c6de 100644
--- a/tools/perf/util/help.h
+++ b/tools/perf/util/help.h
@@ -1,5 +1,5 @@
-#ifndef HELP_H
-#define HELP_H
+#ifndef __PERF_HELP_H
+#define __PERF_HELP_H
 
 struct cmdnames {
 	size_t alloc;
@@ -26,4 +26,4 @@
 void list_commands(const char *title, struct cmdnames *main_cmds,
 		   struct cmdnames *other_cmds);
 
-#endif /* HELP_H */
+#endif /* __PERF_HELP_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
new file mode 100644
index 0000000..7393a02
--- /dev/null
+++ b/tools/perf/util/hist.c
@@ -0,0 +1,210 @@
+#include "hist.h"
+
+struct rb_root hist;
+struct rb_root collapse_hists;
+struct rb_root output_hists;
+int callchain;
+
+struct callchain_param	callchain_param = {
+	.mode	= CHAIN_GRAPH_REL,
+	.min_percent = 0.5
+};
+
+unsigned long total;
+unsigned long total_mmap;
+unsigned long total_comm;
+unsigned long total_fork;
+unsigned long total_unknown;
+unsigned long total_lost;
+
+/*
+ * histogram, sorted on item, collects counts
+ */
+
+struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map,
+				     struct symbol *sym,
+				     struct symbol *sym_parent,
+				     u64 ip, u64 count, char level, bool *hit)
+{
+	struct rb_node **p = &hist.rb_node;
+	struct rb_node *parent = NULL;
+	struct hist_entry *he;
+	struct hist_entry entry = {
+		.thread	= thread,
+		.map	= map,
+		.sym	= sym,
+		.ip	= ip,
+		.level	= level,
+		.count	= count,
+		.parent = sym_parent,
+	};
+	int cmp;
+
+	while (*p != NULL) {
+		parent = *p;
+		he = rb_entry(parent, struct hist_entry, rb_node);
+
+		cmp = hist_entry__cmp(&entry, he);
+
+		if (!cmp) {
+			*hit = true;
+			return he;
+		}
+
+		if (cmp < 0)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+
+	he = malloc(sizeof(*he));
+	if (!he)
+		return NULL;
+	*he = entry;
+	rb_link_node(&he->rb_node, parent, p);
+	rb_insert_color(&he->rb_node, &hist);
+	*hit = false;
+	return he;
+}
+
+int64_t
+hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	struct sort_entry *se;
+	int64_t cmp = 0;
+
+	list_for_each_entry(se, &hist_entry__sort_list, list) {
+		cmp = se->cmp(left, right);
+		if (cmp)
+			break;
+	}
+
+	return cmp;
+}
+
+int64_t
+hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
+{
+	struct sort_entry *se;
+	int64_t cmp = 0;
+
+	list_for_each_entry(se, &hist_entry__sort_list, list) {
+		int64_t (*f)(struct hist_entry *, struct hist_entry *);
+
+		f = se->collapse ?: se->cmp;
+
+		cmp = f(left, right);
+		if (cmp)
+			break;
+	}
+
+	return cmp;
+}
+
+void hist_entry__free(struct hist_entry *he)
+{
+	free(he);
+}
+
+/*
+ * collapse the histogram
+ */
+
+void collapse__insert_entry(struct hist_entry *he)
+{
+	struct rb_node **p = &collapse_hists.rb_node;
+	struct rb_node *parent = NULL;
+	struct hist_entry *iter;
+	int64_t cmp;
+
+	while (*p != NULL) {
+		parent = *p;
+		iter = rb_entry(parent, struct hist_entry, rb_node);
+
+		cmp = hist_entry__collapse(iter, he);
+
+		if (!cmp) {
+			iter->count += he->count;
+			hist_entry__free(he);
+			return;
+		}
+
+		if (cmp < 0)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+
+	rb_link_node(&he->rb_node, parent, p);
+	rb_insert_color(&he->rb_node, &collapse_hists);
+}
+
+void collapse__resort(void)
+{
+	struct rb_node *next;
+	struct hist_entry *n;
+
+	if (!sort__need_collapse)
+		return;
+
+	next = rb_first(&hist);
+	while (next) {
+		n = rb_entry(next, struct hist_entry, rb_node);
+		next = rb_next(&n->rb_node);
+
+		rb_erase(&n->rb_node, &hist);
+		collapse__insert_entry(n);
+	}
+}
+
+/*
+ * reverse the map, sort on count.
+ */
+
+void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
+{
+	struct rb_node **p = &output_hists.rb_node;
+	struct rb_node *parent = NULL;
+	struct hist_entry *iter;
+
+	if (callchain)
+		callchain_param.sort(&he->sorted_chain, &he->callchain,
+				      min_callchain_hits, &callchain_param);
+
+	while (*p != NULL) {
+		parent = *p;
+		iter = rb_entry(parent, struct hist_entry, rb_node);
+
+		if (he->count > iter->count)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+
+	rb_link_node(&he->rb_node, parent, p);
+	rb_insert_color(&he->rb_node, &output_hists);
+}
+
+void output__resort(u64 total_samples)
+{
+	struct rb_node *next;
+	struct hist_entry *n;
+	struct rb_root *tree = &hist;
+	u64 min_callchain_hits;
+
+	min_callchain_hits =
+		total_samples * (callchain_param.min_percent / 100);
+
+	if (sort__need_collapse)
+		tree = &collapse_hists;
+
+	next = rb_first(tree);
+
+	while (next) {
+		n = rb_entry(next, struct hist_entry, rb_node);
+		next = rb_next(&n->rb_node);
+
+		rb_erase(&n->rb_node, tree);
+		output__insert_entry(n, min_callchain_hits);
+	}
+}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
new file mode 100644
index 0000000..ac2149c
--- /dev/null
+++ b/tools/perf/util/hist.h
@@ -0,0 +1,50 @@
+#ifndef __PERF_HIST_H
+#define __PERF_HIST_H
+#include "../builtin.h"
+
+#include "util.h"
+
+#include "color.h"
+#include <linux/list.h>
+#include "cache.h"
+#include <linux/rbtree.h>
+#include "symbol.h"
+#include "string.h"
+#include "callchain.h"
+#include "strlist.h"
+#include "values.h"
+
+#include "../perf.h"
+#include "debug.h"
+#include "header.h"
+
+#include "parse-options.h"
+#include "parse-events.h"
+
+#include "thread.h"
+#include "sort.h"
+
+extern struct rb_root hist;
+extern struct rb_root collapse_hists;
+extern struct rb_root output_hists;
+extern int callchain;
+extern struct callchain_param callchain_param;
+extern unsigned long total;
+extern unsigned long total_mmap;
+extern unsigned long total_comm;
+extern unsigned long total_fork;
+extern unsigned long total_unknown;
+extern unsigned long total_lost;
+
+struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map,
+				     struct symbol *sym, struct symbol *parent,
+				     u64 ip, u64 count, char level, bool *hit);
+extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
+extern void hist_entry__free(struct hist_entry *);
+extern void collapse__insert_entry(struct hist_entry *);
+extern void collapse__resort(void);
+extern void output__insert_entry(struct hist_entry *, u64);
+extern void output__resort(u64);
+
+#endif	/* __PERF_HIST_H */
diff --git a/tools/perf/util/levenshtein.h b/tools/perf/util/levenshtein.h
index 0173abe..b0fcb6d 100644
--- a/tools/perf/util/levenshtein.h
+++ b/tools/perf/util/levenshtein.h
@@ -1,8 +1,8 @@
-#ifndef LEVENSHTEIN_H
-#define LEVENSHTEIN_H
+#ifndef __PERF_LEVENSHTEIN_H
+#define __PERF_LEVENSHTEIN_H
 
 int levenshtein(const char *string1, const char *string2,
 	int swap_penalty, int substition_penalty,
 	int insertion_penalty, int deletion_penalty);
 
-#endif
+#endif /* __PERF_LEVENSHTEIN_H */
diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c
deleted file mode 100644
index 0d8c85d..0000000
--- a/tools/perf/util/module.c
+++ /dev/null
@@ -1,545 +0,0 @@
-#include "util.h"
-#include "../perf.h"
-#include "string.h"
-#include "module.h"
-
-#include <libelf.h>
-#include <libgen.h>
-#include <gelf.h>
-#include <elf.h>
-#include <dirent.h>
-#include <sys/utsname.h>
-
-static unsigned int crc32(const char *p, unsigned int len)
-{
-	int i;
-	unsigned int crc = 0;
-
-	while (len--) {
-		crc ^= *p++;
-		for (i = 0; i < 8; i++)
-			crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
-	}
-	return crc;
-}
-
-/* module section methods */
-
-struct sec_dso *sec_dso__new_dso(const char *name)
-{
-	struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
-
-	if (self != NULL) {
-		strcpy(self->name, name);
-		self->secs = RB_ROOT;
-		self->find_section = sec_dso__find_section;
-	}
-
-	return self;
-}
-
-static void sec_dso__delete_section(struct section *self)
-{
-	free(((void *)self));
-}
-
-void sec_dso__delete_sections(struct sec_dso *self)
-{
-	struct section *pos;
-	struct rb_node *next = rb_first(&self->secs);
-
-	while (next) {
-		pos = rb_entry(next, struct section, rb_node);
-		next = rb_next(&pos->rb_node);
-		rb_erase(&pos->rb_node, &self->secs);
-		sec_dso__delete_section(pos);
-	}
-}
-
-void sec_dso__delete_self(struct sec_dso *self)
-{
-	sec_dso__delete_sections(self);
-	free(self);
-}
-
-static void sec_dso__insert_section(struct sec_dso *self, struct section *sec)
-{
-	struct rb_node **p = &self->secs.rb_node;
-	struct rb_node *parent = NULL;
-	const u64 hash = sec->hash;
-	struct section *s;
-
-	while (*p != NULL) {
-		parent = *p;
-		s = rb_entry(parent, struct section, rb_node);
-		if (hash < s->hash)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-	rb_link_node(&sec->rb_node, parent, p);
-	rb_insert_color(&sec->rb_node, &self->secs);
-}
-
-struct section *sec_dso__find_section(struct sec_dso *self, const char *name)
-{
-	struct rb_node *n;
-	u64 hash;
-	int len;
-
-	if (self == NULL)
-		return NULL;
-
-	len = strlen(name);
-	hash = crc32(name, len);
-
-	n = self->secs.rb_node;
-
-	while (n) {
-		struct section *s = rb_entry(n, struct section, rb_node);
-
-		if (hash < s->hash)
-			n = n->rb_left;
-		else if (hash > s->hash)
-			n = n->rb_right;
-		else {
-			if (!strcmp(name, s->name))
-				return s;
-			else
-				n = rb_next(&s->rb_node);
-		}
-	}
-
-	return NULL;
-}
-
-static size_t sec_dso__fprintf_section(struct section *self, FILE *fp)
-{
-	return fprintf(fp, "name:%s vma:%llx path:%s\n",
-		       self->name, self->vma, self->path);
-}
-
-size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp)
-{
-	size_t ret = fprintf(fp, "dso: %s\n", self->name);
-
-	struct rb_node *nd;
-	for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) {
-		struct section *pos = rb_entry(nd, struct section, rb_node);
-		ret += sec_dso__fprintf_section(pos, fp);
-	}
-
-	return ret;
-}
-
-static struct section *section__new(const char *name, const char *path)
-{
-	struct section *self = calloc(1, sizeof(*self));
-
-	if (!self)
-		goto out_failure;
-
-	self->name = calloc(1, strlen(name) + 1);
-	if (!self->name)
-		goto out_failure;
-
-	self->path = calloc(1, strlen(path) + 1);
-	if (!self->path)
-		goto out_failure;
-
-	strcpy(self->name, name);
-	strcpy(self->path, path);
-	self->hash = crc32(self->name, strlen(name));
-
-	return self;
-
-out_failure:
-	if (self) {
-		if (self->name)
-			free(self->name);
-		if (self->path)
-			free(self->path);
-		free(self);
-	}
-
-	return NULL;
-}
-
-/* module methods */
-
-struct mod_dso *mod_dso__new_dso(const char *name)
-{
-	struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1);
-
-	if (self != NULL) {
-		strcpy(self->name, name);
-		self->mods = RB_ROOT;
-		self->find_module = mod_dso__find_module;
-	}
-
-	return self;
-}
-
-static void mod_dso__delete_module(struct module *self)
-{
-	free(((void *)self));
-}
-
-void mod_dso__delete_modules(struct mod_dso *self)
-{
-	struct module *pos;
-	struct rb_node *next = rb_first(&self->mods);
-
-	while (next) {
-		pos = rb_entry(next, struct module, rb_node);
-		next = rb_next(&pos->rb_node);
-		rb_erase(&pos->rb_node, &self->mods);
-		mod_dso__delete_module(pos);
-	}
-}
-
-void mod_dso__delete_self(struct mod_dso *self)
-{
-	mod_dso__delete_modules(self);
-	free(self);
-}
-
-static void mod_dso__insert_module(struct mod_dso *self, struct module *mod)
-{
-	struct rb_node **p = &self->mods.rb_node;
-	struct rb_node *parent = NULL;
-	const u64 hash = mod->hash;
-	struct module *m;
-
-	while (*p != NULL) {
-		parent = *p;
-		m = rb_entry(parent, struct module, rb_node);
-		if (hash < m->hash)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-	rb_link_node(&mod->rb_node, parent, p);
-	rb_insert_color(&mod->rb_node, &self->mods);
-}
-
-struct module *mod_dso__find_module(struct mod_dso *self, const char *name)
-{
-	struct rb_node *n;
-	u64 hash;
-	int len;
-
-	if (self == NULL)
-		return NULL;
-
-	len = strlen(name);
-	hash = crc32(name, len);
-
-	n = self->mods.rb_node;
-
-	while (n) {
-		struct module *m = rb_entry(n, struct module, rb_node);
-
-		if (hash < m->hash)
-			n = n->rb_left;
-		else if (hash > m->hash)
-			n = n->rb_right;
-		else {
-			if (!strcmp(name, m->name))
-				return m;
-			else
-				n = rb_next(&m->rb_node);
-		}
-	}
-
-	return NULL;
-}
-
-static size_t mod_dso__fprintf_module(struct module *self, FILE *fp)
-{
-	return fprintf(fp, "name:%s path:%s\n", self->name, self->path);
-}
-
-size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp)
-{
-	struct rb_node *nd;
-	size_t ret;
-
-	ret = fprintf(fp, "dso: %s\n", self->name);
-
-	for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) {
-		struct module *pos = rb_entry(nd, struct module, rb_node);
-
-		ret += mod_dso__fprintf_module(pos, fp);
-	}
-
-	return ret;
-}
-
-static struct module *module__new(const char *name, const char *path)
-{
-	struct module *self = calloc(1, sizeof(*self));
-
-	if (!self)
-		goto out_failure;
-
-	self->name = calloc(1, strlen(name) + 1);
-	if (!self->name)
-		goto out_failure;
-
-	self->path = calloc(1, strlen(path) + 1);
-	if (!self->path)
-		goto out_failure;
-
-	strcpy(self->name, name);
-	strcpy(self->path, path);
-	self->hash = crc32(self->name, strlen(name));
-
-	return self;
-
-out_failure:
-	if (self) {
-		if (self->name)
-			free(self->name);
-		if (self->path)
-			free(self->path);
-		free(self);
-	}
-
-	return NULL;
-}
-
-static int mod_dso__load_sections(struct module *mod)
-{
-	int count = 0, path_len;
-	struct dirent *entry;
-	char *line = NULL;
-	char *dir_path;
-	DIR *dir;
-	size_t n;
-
-	path_len = strlen("/sys/module/");
-	path_len += strlen(mod->name);
-	path_len += strlen("/sections/");
-
-	dir_path = calloc(1, path_len + 1);
-	if (dir_path == NULL)
-		goto out_failure;
-
-	strcat(dir_path, "/sys/module/");
-	strcat(dir_path, mod->name);
-	strcat(dir_path, "/sections/");
-
-	dir = opendir(dir_path);
-	if (dir == NULL)
-		goto out_free;
-
-	while ((entry = readdir(dir))) {
-		struct section *section;
-		char *path, *vma;
-		int line_len;
-		FILE *file;
-
-		if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name))
-			continue;
-
-		path = calloc(1, path_len + strlen(entry->d_name) + 1);
-		if (path == NULL)
-			break;
-		strcat(path, dir_path);
-		strcat(path, entry->d_name);
-
-		file = fopen(path, "r");
-		if (file == NULL) {
-			free(path);
-			break;
-		}
-
-		line_len = getline(&line, &n, file);
-		if (line_len < 0) {
-			free(path);
-			fclose(file);
-			break;
-		}
-
-		if (!line) {
-			free(path);
-			fclose(file);
-			break;
-		}
-
-		line[--line_len] = '\0'; /* \n */
-
-		vma = strstr(line, "0x");
-		if (!vma) {
-			free(path);
-			fclose(file);
-			break;
-		}
-		vma += 2;
-
-		section = section__new(entry->d_name, path);
-		if (!section) {
-			fprintf(stderr, "load_sections: allocation error\n");
-			free(path);
-			fclose(file);
-			break;
-		}
-
-		hex2u64(vma, &section->vma);
-		sec_dso__insert_section(mod->sections, section);
-
-		free(path);
-		fclose(file);
-		count++;
-	}
-
-	closedir(dir);
-	free(line);
-	free(dir_path);
-
-	return count;
-
-out_free:
-	free(dir_path);
-
-out_failure:
-	return count;
-}
-
-static int mod_dso__load_module_paths(struct mod_dso *self)
-{
-	struct utsname uts;
-	int count = 0, len, err = -1;
-	char *line = NULL;
-	FILE *file;
-	char *dpath, *dir;
-	size_t n;
-
-	if (uname(&uts) < 0)
-		return err;
-
-	len = strlen("/lib/modules/");
-	len += strlen(uts.release);
-	len += strlen("/modules.dep");
-
-	dpath = calloc(1, len + 1);
-	if (dpath == NULL)
-		return err;
-
-	strcat(dpath, "/lib/modules/");
-	strcat(dpath, uts.release);
-	strcat(dpath, "/modules.dep");
-
-	file = fopen(dpath, "r");
-	if (file == NULL)
-		goto out_failure;
-
-	dir = dirname(dpath);
-	if (!dir)
-		goto out_failure;
-	strcat(dir, "/");
-
-	while (!feof(file)) {
-		struct module *module;
-		char *name, *path, *tmp;
-		FILE *modfile;
-		int line_len;
-
-		line_len = getline(&line, &n, file);
-		if (line_len < 0)
-			break;
-
-		if (!line)
-			break;
-
-		line[--line_len] = '\0'; /* \n */
-
-		path = strchr(line, ':');
-		if (!path)
-			break;
-		*path = '\0';
-
-		path = strdup(line);
-		if (!path)
-			break;
-
-		if (!strstr(path, dir)) {
-			if (strncmp(path, "kernel/", 7))
-				break;
-
-			free(path);
-			path = calloc(1, strlen(dir) + strlen(line) + 1);
-			if (!path)
-				break;
-			strcat(path, dir);
-			strcat(path, line);
-		}
-
-		modfile = fopen(path, "r");
-		if (modfile == NULL)
-			break;
-		fclose(modfile);
-
-		name = strdup(path);
-		if (!name)
-			break;
-
-		name = strtok(name, "/");
-		tmp = name;
-
-		while (tmp) {
-			tmp = strtok(NULL, "/");
-			if (tmp)
-				name = tmp;
-		}
-
-		name = strsep(&name, ".");
-		if (!name)
-			break;
-
-		/* Quirk: replace '-' with '_' in all modules */
-		for (len = strlen(name); len; len--) {
-			if (*(name+len) == '-')
-				*(name+len) = '_';
-		}
-
-		module = module__new(name, path);
-		if (!module)
-			break;
-		mod_dso__insert_module(self, module);
-
-		module->sections = sec_dso__new_dso("sections");
-		if (!module->sections)
-			break;
-
-		module->active = mod_dso__load_sections(module);
-
-		if (module->active > 0)
-			count++;
-	}
-
-	if (feof(file))
-		err = count;
-	else
-		fprintf(stderr, "load_module_paths: modules.dep parsing failure!\n");
-
-out_failure:
-	if (dpath)
-		free(dpath);
-	if (file)
-		fclose(file);
-	if (line)
-		free(line);
-
-	return err;
-}
-
-int mod_dso__load_modules(struct mod_dso *dso)
-{
-	int err;
-
-	err = mod_dso__load_module_paths(dso);
-
-	return err;
-}
diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h
deleted file mode 100644
index 8a592ef..0000000
--- a/tools/perf/util/module.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _PERF_MODULE_
-#define _PERF_MODULE_ 1
-
-#include <linux/types.h>
-#include "../types.h"
-#include <linux/list.h>
-#include <linux/rbtree.h>
-
-struct section {
-	struct rb_node	rb_node;
-	u64		hash;
-	u64		vma;
-	char		*name;
-	char		*path;
-};
-
-struct sec_dso {
-	struct list_head node;
-	struct rb_root	 secs;
-	struct section    *(*find_section)(struct sec_dso *, const char *name);
-	char		 name[0];
-};
-
-struct module {
-	struct rb_node	rb_node;
-	u64		hash;
-	char		*name;
-	char		*path;
-	struct sec_dso	*sections;
-	int		active;
-};
-
-struct mod_dso {
-	struct list_head node;
-	struct rb_root	 mods;
-	struct module    *(*find_module)(struct mod_dso *, const char *name);
-	char		 name[0];
-};
-
-struct sec_dso *sec_dso__new_dso(const char *name);
-void sec_dso__delete_sections(struct sec_dso *self);
-void sec_dso__delete_self(struct sec_dso *self);
-size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp);
-struct section *sec_dso__find_section(struct sec_dso *self, const char *name);
-
-struct mod_dso *mod_dso__new_dso(const char *name);
-void mod_dso__delete_modules(struct mod_dso *self);
-void mod_dso__delete_self(struct mod_dso *self);
-size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp);
-struct module *mod_dso__find_module(struct mod_dso *self, const char *name);
-int mod_dso__load_modules(struct mod_dso *dso);
-
-#endif /* _PERF_MODULE_ */
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 30c6081..8626a43 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -1,5 +1,5 @@
-#ifndef _PARSE_EVENTS_H
-#define _PARSE_EVENTS_H
+#ifndef __PERF_PARSE_EVENTS_H
+#define __PERF_PARSE_EVENTS_H
 /*
  * Parse symbolic events/counts passed in as options:
  */
@@ -31,4 +31,4 @@
 extern int valid_debugfs_mount(const char *debugfs);
 
 
-#endif /* _PARSE_EVENTS_H */
+#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index 2ee248f..948805a 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -1,5 +1,5 @@
-#ifndef PARSE_OPTIONS_H
-#define PARSE_OPTIONS_H
+#ifndef __PERF_PARSE_OPTIONS_H
+#define __PERF_PARSE_OPTIONS_H
 
 enum parse_opt_type {
 	/* special types */
@@ -174,4 +174,4 @@
 
 extern const char *parse_options_fix_filename(const char *prefix, const char *file);
 
-#endif
+#endif /* __PERF_PARSE_OPTIONS_H */
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h
index a5454a1..b6a0197 100644
--- a/tools/perf/util/quote.h
+++ b/tools/perf/util/quote.h
@@ -1,5 +1,5 @@
-#ifndef QUOTE_H
-#define QUOTE_H
+#ifndef __PERF_QUOTE_H
+#define __PERF_QUOTE_H
 
 #include <stddef.h>
 #include <stdio.h>
@@ -65,4 +65,4 @@
 extern void python_quote_print(FILE *stream, const char *src);
 extern void tcl_quote_print(FILE *stream, const char *src);
 
-#endif
+#endif /* __PERF_QUOTE_H */
diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h
index cc1837d..d790287 100644
--- a/tools/perf/util/run-command.h
+++ b/tools/perf/util/run-command.h
@@ -1,5 +1,5 @@
-#ifndef RUN_COMMAND_H
-#define RUN_COMMAND_H
+#ifndef __PERF_RUN_COMMAND_H
+#define __PERF_RUN_COMMAND_H
 
 enum {
 	ERR_RUN_COMMAND_FORK = 10000,
@@ -85,4 +85,4 @@
 int start_async(struct async *async);
 int finish_async(struct async *async);
 
-#endif
+#endif /* __PERF_RUN_COMMAND_H */
diff --git a/tools/perf/util/sigchain.h b/tools/perf/util/sigchain.h
index 618083bc..1a53c11 100644
--- a/tools/perf/util/sigchain.h
+++ b/tools/perf/util/sigchain.h
@@ -1,5 +1,5 @@
-#ifndef SIGCHAIN_H
-#define SIGCHAIN_H
+#ifndef __PERF_SIGCHAIN_H
+#define __PERF_SIGCHAIN_H
 
 typedef void (*sigchain_fun)(int);
 
@@ -8,4 +8,4 @@
 
 void sigchain_push_common(sigchain_fun f);
 
-#endif /* SIGCHAIN_H */
+#endif /* __PERF_SIGCHAIN_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
new file mode 100644
index 0000000..40c9acd
--- /dev/null
+++ b/tools/perf/util/sort.c
@@ -0,0 +1,276 @@
+#include "sort.h"
+
+regex_t		parent_regex;
+char		default_parent_pattern[] = "^sys_|^do_page_fault";
+char		*parent_pattern = default_parent_pattern;
+char		default_sort_order[] = "comm,dso,symbol";
+char		*sort_order = default_sort_order;
+int sort__need_collapse = 0;
+int sort__has_parent = 0;
+
+unsigned int dsos__col_width;
+unsigned int comms__col_width;
+unsigned int threads__col_width;
+static unsigned int parent_symbol__col_width;
+char * field_sep;
+
+LIST_HEAD(hist_entry__sort_list);
+
+struct sort_entry sort_thread = {
+	.header = "Command:  Pid",
+	.cmp	= sort__thread_cmp,
+	.print	= sort__thread_print,
+	.width	= &threads__col_width,
+};
+
+struct sort_entry sort_comm = {
+	.header		= "Command",
+	.cmp		= sort__comm_cmp,
+	.collapse	= sort__comm_collapse,
+	.print		= sort__comm_print,
+	.width		= &comms__col_width,
+};
+
+struct sort_entry sort_dso = {
+	.header = "Shared Object",
+	.cmp	= sort__dso_cmp,
+	.print	= sort__dso_print,
+	.width	= &dsos__col_width,
+};
+
+struct sort_entry sort_sym = {
+	.header = "Symbol",
+	.cmp	= sort__sym_cmp,
+	.print	= sort__sym_print,
+};
+
+struct sort_entry sort_parent = {
+	.header = "Parent symbol",
+	.cmp	= sort__parent_cmp,
+	.print	= sort__parent_print,
+	.width	= &parent_symbol__col_width,
+};
+
+struct sort_dimension {
+	const char		*name;
+	struct sort_entry	*entry;
+	int			taken;
+};
+
+static struct sort_dimension sort_dimensions[] = {
+	{ .name = "pid",	.entry = &sort_thread,	},
+	{ .name = "comm",	.entry = &sort_comm,	},
+	{ .name = "dso",	.entry = &sort_dso,	},
+	{ .name = "symbol",	.entry = &sort_sym,	},
+	{ .name = "parent",	.entry = &sort_parent,	},
+};
+
+int64_t cmp_null(void *l, void *r)
+{
+	if (!l && !r)
+		return 0;
+	else if (!l)
+		return -1;
+	else
+		return 1;
+}
+
+/* --sort pid */
+
+int64_t
+sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	return right->thread->pid - left->thread->pid;
+}
+
+int repsep_fprintf(FILE *fp, const char *fmt, ...)
+{
+	int n;
+	va_list ap;
+
+	va_start(ap, fmt);
+	if (!field_sep)
+		n = vfprintf(fp, fmt, ap);
+	else {
+		char *bf = NULL;
+		n = vasprintf(&bf, fmt, ap);
+		if (n > 0) {
+			char *sep = bf;
+
+			while (1) {
+				sep = strchr(sep, *field_sep);
+				if (sep == NULL)
+					break;
+				*sep = '.';
+			}
+		}
+		fputs(bf, fp);
+		free(bf);
+	}
+	va_end(ap);
+	return n;
+}
+
+size_t
+sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width)
+{
+	return repsep_fprintf(fp, "%*s:%5d", width - 6,
+			      self->thread->comm ?: "", self->thread->pid);
+}
+
+size_t
+sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width)
+{
+	return repsep_fprintf(fp, "%*s", width, self->thread->comm);
+}
+
+/* --sort dso */
+
+int64_t
+sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	struct dso *dso_l = left->map ? left->map->dso : NULL;
+	struct dso *dso_r = right->map ? right->map->dso : NULL;
+	const char *dso_name_l, *dso_name_r;
+
+	if (!dso_l || !dso_r)
+		return cmp_null(dso_l, dso_r);
+
+	if (verbose) {
+		dso_name_l = dso_l->long_name;
+		dso_name_r = dso_r->long_name;
+	} else {
+		dso_name_l = dso_l->short_name;
+		dso_name_r = dso_r->short_name;
+	}
+
+	return strcmp(dso_name_l, dso_name_r);
+}
+
+size_t
+sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width)
+{
+	if (self->map && self->map->dso) {
+		const char *dso_name = !verbose ? self->map->dso->short_name :
+						  self->map->dso->long_name;
+		return repsep_fprintf(fp, "%-*s", width, dso_name);
+	}
+
+	return repsep_fprintf(fp, "%*llx", width, (u64)self->ip);
+}
+
+/* --sort symbol */
+
+int64_t
+sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	u64 ip_l, ip_r;
+
+	if (left->sym == right->sym)
+		return 0;
+
+	ip_l = left->sym ? left->sym->start : left->ip;
+	ip_r = right->sym ? right->sym->start : right->ip;
+
+	return (int64_t)(ip_r - ip_l);
+}
+
+
+size_t
+sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used)
+{
+	size_t ret = 0;
+
+	if (verbose) {
+		char o = self->map ? dso__symtab_origin(self->map->dso) : '!';
+		ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, o);
+	}
+
+	ret += repsep_fprintf(fp, "[%c] ", self->level);
+	if (self->sym)
+		ret += repsep_fprintf(fp, "%s", self->sym->name);
+	else
+		ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip);
+
+	return ret;
+}
+
+/* --sort comm */
+
+int64_t
+sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	return right->thread->pid - left->thread->pid;
+}
+
+int64_t
+sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
+{
+	char *comm_l = left->thread->comm;
+	char *comm_r = right->thread->comm;
+
+	if (!comm_l || !comm_r)
+		return cmp_null(comm_l, comm_r);
+
+	return strcmp(comm_l, comm_r);
+}
+
+/* --sort parent */
+
+int64_t
+sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	struct symbol *sym_l = left->parent;
+	struct symbol *sym_r = right->parent;
+
+	if (!sym_l || !sym_r)
+		return cmp_null(sym_l, sym_r);
+
+	return strcmp(sym_l->name, sym_r->name);
+}
+
+size_t
+sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width)
+{
+	return repsep_fprintf(fp, "%-*s", width,
+			      self->parent ? self->parent->name : "[other]");
+}
+
+int sort_dimension__add(const char *tok)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
+		struct sort_dimension *sd = &sort_dimensions[i];
+
+		if (sd->taken)
+			continue;
+
+		if (strncasecmp(tok, sd->name, strlen(tok)))
+			continue;
+
+		if (sd->entry->collapse)
+			sort__need_collapse = 1;
+
+		if (sd->entry == &sort_parent) {
+			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
+			if (ret) {
+				char err[BUFSIZ];
+
+				regerror(ret, &parent_regex, err, sizeof(err));
+				fprintf(stderr, "Invalid regex: %s\n%s",
+					parent_pattern, err);
+				exit(-1);
+			}
+			sort__has_parent = 1;
+		}
+
+		list_add_tail(&sd->entry->list, &hist_entry__sort_list);
+		sd->taken = 1;
+
+		return 0;
+	}
+
+	return -ESRCH;
+}
+
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
new file mode 100644
index 0000000..13806d7
--- /dev/null
+++ b/tools/perf/util/sort.h
@@ -0,0 +1,90 @@
+#ifndef __PERF_SORT_H
+#define __PERF_SORT_H
+#include "../builtin.h"
+
+#include "util.h"
+
+#include "color.h"
+#include <linux/list.h>
+#include "cache.h"
+#include <linux/rbtree.h>
+#include "symbol.h"
+#include "string.h"
+#include "callchain.h"
+#include "strlist.h"
+#include "values.h"
+
+#include "../perf.h"
+#include "debug.h"
+#include "header.h"
+
+#include "parse-options.h"
+#include "parse-events.h"
+
+#include "thread.h"
+#include "sort.h"
+
+extern regex_t parent_regex;
+extern char *sort_order;
+extern char default_parent_pattern[];
+extern char *parent_pattern;
+extern char default_sort_order[];
+extern int sort__need_collapse;
+extern int sort__has_parent;
+extern char *field_sep;
+extern struct sort_entry sort_comm;
+extern struct sort_entry sort_dso;
+extern struct sort_entry sort_sym;
+extern struct sort_entry sort_parent;
+extern unsigned int dsos__col_width;
+extern unsigned int comms__col_width;
+extern unsigned int threads__col_width;
+
+struct hist_entry {
+	struct rb_node		rb_node;
+	u64			count;
+	struct thread		*thread;
+	struct map		*map;
+	struct symbol		*sym;
+	u64			ip;
+	char			level;
+	struct symbol		*parent;
+	struct callchain_node	callchain;
+	struct rb_root		sorted_chain;
+};
+
+/*
+ * configurable sorting bits
+ */
+
+struct sort_entry {
+	struct list_head list;
+
+	const char *header;
+
+	int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
+	int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
+	size_t	(*print)(FILE *fp, struct hist_entry *, unsigned int width);
+	unsigned int *width;
+	bool	elide;
+};
+
+extern struct sort_entry sort_thread;
+extern struct list_head hist_entry__sort_list;
+
+extern int repsep_fprintf(FILE *fp, const char *fmt, ...);
+extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int);
+extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int);
+extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int);
+extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used);
+extern int64_t cmp_null(void *, void *);
+extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *);
+extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int);
+extern int sort_dimension__add(const char *);
+
+#endif	/* __PERF_SORT_H */
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index d2aa86c..a3d121d 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -1,5 +1,5 @@
-#ifndef STRBUF_H
-#define STRBUF_H
+#ifndef __PERF_STRBUF_H
+#define __PERF_STRBUF_H
 
 /*
  * Strbuf's can be use in many ways: as a byte array, or to store arbitrary
@@ -134,4 +134,4 @@
 extern int strbuf_branchname(struct strbuf *sb, const char *name);
 extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name);
 
-#endif /* STRBUF_H */
+#endif /* __PERF_STRBUF_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index c93eca9..04743d3 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -1,3 +1,4 @@
+#include <string.h>
 #include "string.h"
 
 static int hex(char ch)
@@ -32,3 +33,13 @@
 
 	return p - ptr;
 }
+
+char *strxfrchar(char *s, char from, char to)
+{
+	char *p = s;
+
+	while ((p = strchr(p, from)) != NULL)
+		*p++ = to;
+
+	return s;
+}
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index bf39dfa..2c84bf6 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -1,11 +1,12 @@
-#ifndef _PERF_STRING_H_
-#define _PERF_STRING_H_
+#ifndef __PERF_STRING_H_
+#define __PERF_STRING_H_
 
 #include "types.h"
 
 int hex2u64(const char *ptr, u64 *val);
+char *strxfrchar(char *s, char from, char to);
 
 #define _STR(x) #x
 #define STR(x) _STR(x)
 
-#endif
+#endif /* __PERF_STRING_H */
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 921818e..cb46593 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -1,5 +1,5 @@
-#ifndef STRLIST_H_
-#define STRLIST_H_
+#ifndef __PERF_STRLIST_H
+#define __PERF_STRLIST_H
 
 #include <linux/rbtree.h>
 #include <stdbool.h>
@@ -36,4 +36,4 @@
 }
 
 int strlist__parse_list(struct strlist *self, const char *s);
-#endif /* STRLIST_H_ */
+#endif /* __PERF_STRLIST_H */
diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h
index cd93195..e078198 100644
--- a/tools/perf/util/svghelper.h
+++ b/tools/perf/util/svghelper.h
@@ -1,5 +1,5 @@
-#ifndef _INCLUDE_GUARD_SVG_HELPER_
-#define _INCLUDE_GUARD_SVG_HELPER_
+#ifndef __PERF_SVGHELPER_H
+#define __PERF_SVGHELPER_H
 
 #include "types.h"
 
@@ -25,4 +25,4 @@
 
 extern int svg_page_width;
 
-#endif
+#endif /* __PERF_SVGHELPER_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 47ea060..faa84f5 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2,12 +2,14 @@
 #include "../perf.h"
 #include "string.h"
 #include "symbol.h"
+#include "thread.h"
 
 #include "debug.h"
 
 #include <libelf.h>
 #include <gelf.h>
 #include <elf.h>
+#include <sys/utsname.h>
 
 const char *sym_hist_filter;
 
@@ -18,12 +20,65 @@
 	DSO__ORIG_UBUNTU,
 	DSO__ORIG_BUILDID,
 	DSO__ORIG_DSO,
+	DSO__ORIG_KMODULE,
 	DSO__ORIG_NOT_FOUND,
 };
 
-static struct symbol *symbol__new(u64 start, u64 len,
-				  const char *name, unsigned int priv_size,
-				  u64 obj_start, int v)
+static void dsos__add(struct dso *dso);
+static struct dso *dsos__find(const char *name);
+static struct map *map__new2(u64 start, struct dso *dso);
+static void kernel_maps__insert(struct map *map);
+
+static struct rb_root kernel_maps;
+
+static void dso__fixup_sym_end(struct dso *self)
+{
+	struct rb_node *nd, *prevnd = rb_first(&self->syms);
+	struct symbol *curr, *prev;
+
+	if (prevnd == NULL)
+		return;
+
+	curr = rb_entry(prevnd, struct symbol, rb_node);
+
+	for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
+		prev = curr;
+		curr = rb_entry(nd, struct symbol, rb_node);
+
+		if (prev->end == prev->start)
+			prev->end = curr->start - 1;
+	}
+
+	/* Last entry */
+	if (curr->end == curr->start)
+		curr->end = roundup(curr->start, 4096);
+}
+
+static void kernel_maps__fixup_end(void)
+{
+	struct map *prev, *curr;
+	struct rb_node *nd, *prevnd = rb_first(&kernel_maps);
+
+	if (prevnd == NULL)
+		return;
+
+	curr = rb_entry(prevnd, struct map, rb_node);
+
+	for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
+		prev = curr;
+		curr = rb_entry(nd, struct map, rb_node);
+		prev->end = curr->start - 1;
+	}
+
+	nd = rb_last(&curr->dso->syms);
+	if (nd) {
+		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+		curr->end = sym->end;
+	}
+}
+
+static struct symbol *symbol__new(u64 start, u64 len, const char *name,
+				  unsigned int priv_size, int v)
 {
 	size_t namelen = strlen(name) + 1;
 	struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen);
@@ -31,11 +86,10 @@
 	if (!self)
 		return NULL;
 
-	if (v >= 2)
-		printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n",
-			(u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start);
+	if (v > 2)
+		printf("new symbol: %016Lx [%08lx]: %s, hist: %p\n",
+			start, (unsigned long)len, name, self->hist);
 
-	self->obj_start= obj_start;
 	self->hist = NULL;
 	self->hist_sum = 0;
 
@@ -60,12 +114,8 @@
 
 static size_t symbol__fprintf(struct symbol *self, FILE *fp)
 {
-	if (!self->module)
-		return fprintf(fp, " %llx-%llx %s\n",
+	return fprintf(fp, " %llx-%llx %s\n",
 		       self->start, self->end, self->name);
-	else
-		return fprintf(fp, " %llx-%llx %s \t[%s]\n",
-		       self->start, self->end, self->name, self->module->name);
 }
 
 struct dso *dso__new(const char *name, unsigned int sym_priv_size)
@@ -74,6 +124,8 @@
 
 	if (self != NULL) {
 		strcpy(self->name, name);
+		self->long_name = self->name;
+		self->short_name = self->name;
 		self->syms = RB_ROOT;
 		self->sym_priv_size = sym_priv_size;
 		self->find_symbol = dso__find_symbol;
@@ -100,6 +152,8 @@
 void dso__delete(struct dso *self)
 {
 	dso__delete_symbols(self);
+	if (self->long_name != self->name)
+		free(self->long_name);
 	free(self);
 }
 
@@ -147,7 +201,7 @@
 
 size_t dso__fprintf(struct dso *self, FILE *fp)
 {
-	size_t ret = fprintf(fp, "dso: %s\n", self->name);
+	size_t ret = fprintf(fp, "dso: %s\n", self->short_name);
 
 	struct rb_node *nd;
 	for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) {
@@ -158,13 +212,16 @@
 	return ret;
 }
 
-static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v)
+/*
+ * Loads the function entries in /proc/kallsyms into kernel_map->dso,
+ * so that we can in the next step set the symbol ->end address and then
+ * call kernel_maps__split_kallsyms.
+ */
+static int kernel_maps__load_all_kallsyms(int v)
 {
-	struct rb_node *nd, *prevnd;
 	char *line = NULL;
 	size_t n;
 	FILE *file = fopen("/proc/kallsyms", "r");
-	int count = 0;
 
 	if (file == NULL)
 		goto out_failure;
@@ -174,6 +231,7 @@
 		struct symbol *sym;
 		int line_len, len;
 		char symbol_type;
+		char *symbol_name;
 
 		line_len = getline(&line, &n, file);
 		if (line_len < 0)
@@ -196,44 +254,24 @@
 		 */
 		if (symbol_type != 'T' && symbol_type != 'W')
 			continue;
+
+		symbol_name = line + len + 2;
 		/*
-		 * Well fix up the end later, when we have all sorted.
+		 * Will fix up the end later, when we have all symbols sorted.
 		 */
-		sym = symbol__new(start, 0xdead, line + len + 2,
-				  self->sym_priv_size, 0, v);
+		sym = symbol__new(start, 0, symbol_name,
+				  kernel_map->dso->sym_priv_size, v);
 
 		if (sym == NULL)
 			goto out_delete_line;
 
-		if (filter && filter(self, sym))
-			symbol__delete(sym, self->sym_priv_size);
-		else {
-			dso__insert_symbol(self, sym);
-			count++;
-		}
-	}
-
-	/*
-	 * Now that we have all sorted out, just set the ->end of all
-	 * symbols
-	 */
-	prevnd = rb_first(&self->syms);
-
-	if (prevnd == NULL)
-		goto out_delete_line;
-
-	for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
-		struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node),
-			      *curr = rb_entry(nd, struct symbol, rb_node);
-
-		prev->end = curr->start - 1;
-		prevnd = nd;
+		dso__insert_symbol(kernel_map->dso, sym);
 	}
 
 	free(line);
 	fclose(file);
 
-	return count;
+	return 0;
 
 out_delete_line:
 	free(line);
@@ -241,14 +279,125 @@
 	return -1;
 }
 
-static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v)
+/*
+ * Split the symbols into maps, making sure there are no overlaps, i.e. the
+ * kernel range is broken in several maps, named [kernel].N, as we don't have
+ * the original ELF section names vmlinux have.
+ */
+static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules)
+{
+	struct map *map = kernel_map;
+	struct symbol *pos;
+	int count = 0;
+	struct rb_node *next = rb_first(&kernel_map->dso->syms);
+	int kernel_range = 0;
+
+	while (next) {
+		char *module;
+
+		pos = rb_entry(next, struct symbol, rb_node);
+		next = rb_next(&pos->rb_node);
+
+		module = strchr(pos->name, '\t');
+		if (module) {
+			if (!use_modules)
+				goto delete_symbol;
+
+			*module++ = '\0';
+
+			if (strcmp(map->dso->name, module)) {
+				map = kernel_maps__find_by_dso_name(module);
+				if (!map) {
+					fputs("/proc/{kallsyms,modules} "
+					      "inconsistency!\n", stderr);
+					return -1;
+				}
+			}
+			/*
+			 * So that we look just like we get from .ko files,
+			 * i.e. not prelinked, relative to map->start.
+			 */
+			pos->start = map->map_ip(map, pos->start);
+			pos->end   = map->map_ip(map, pos->end);
+		} else if (map != kernel_map) {
+			char dso_name[PATH_MAX];
+			struct dso *dso;
+
+			snprintf(dso_name, sizeof(dso_name), "[kernel].%d",
+				 kernel_range++);
+
+			dso = dso__new(dso_name,
+				       kernel_map->dso->sym_priv_size);
+			if (dso == NULL)
+				return -1;
+
+			map = map__new2(pos->start, dso);
+			if (map == NULL) {
+				dso__delete(dso);
+				return -1;
+			}
+
+			map->map_ip = vdso__map_ip;
+			kernel_maps__insert(map);
+			++kernel_range;
+		}
+
+		if (filter && filter(map, pos)) {
+delete_symbol:
+			rb_erase(&pos->rb_node, &kernel_map->dso->syms);
+			symbol__delete(pos, kernel_map->dso->sym_priv_size);
+		} else {
+			if (map != kernel_map) {
+				rb_erase(&pos->rb_node, &kernel_map->dso->syms);
+				dso__insert_symbol(map->dso, pos);
+			}
+			count++;
+		}
+	}
+
+	return count;
+}
+
+
+static int kernel_maps__load_kallsyms(symbol_filter_t filter,
+				      int use_modules, int v)
+{
+	if (kernel_maps__load_all_kallsyms(v))
+		return -1;
+
+	dso__fixup_sym_end(kernel_map->dso);
+
+	return kernel_maps__split_kallsyms(filter, use_modules);
+}
+
+static size_t kernel_maps__fprintf(FILE *fp, int v)
+{
+	size_t printed = fprintf(stderr, "Kernel maps:\n");
+	struct rb_node *nd;
+
+	for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) {
+		struct map *pos = rb_entry(nd, struct map, rb_node);
+
+		printed += fprintf(fp, "Map:");
+		printed += map__fprintf(pos, fp);
+		if (v > 1) {
+			printed += dso__fprintf(pos->dso, fp);
+			printed += fprintf(fp, "--\n");
+		}
+	}
+
+	return printed + fprintf(stderr, "END kernel maps\n");
+}
+
+static int dso__load_perf_map(struct dso *self, struct map *map,
+			      symbol_filter_t filter, int v)
 {
 	char *line = NULL;
 	size_t n;
 	FILE *file;
 	int nr_syms = 0;
 
-	file = fopen(self->name, "r");
+	file = fopen(self->long_name, "r");
 	if (file == NULL)
 		goto out_failure;
 
@@ -279,12 +428,12 @@
 			continue;
 
 		sym = symbol__new(start, size, line + len,
-				  self->sym_priv_size, start, v);
+				  self->sym_priv_size, v);
 
 		if (sym == NULL)
 			goto out_delete_line;
 
-		if (filter && filter(self, sym))
+		if (filter && filter(map, sym))
 			symbol__delete(sym, self->sym_priv_size);
 		else {
 			dso__insert_symbol(self, sym);
@@ -409,7 +558,7 @@
 	Elf *elf;
 	int nr = 0, symidx, fd, err = 0;
 
-	fd = open(self->name, O_RDONLY);
+	fd = open(self->long_name, O_RDONLY);
 	if (fd < 0)
 		goto out;
 
@@ -477,7 +626,7 @@
 				 "%s@plt", elf_sym__name(&sym, symstrs));
 
 			f = symbol__new(plt_offset, shdr_plt.sh_entsize,
-					sympltname, self->sym_priv_size, 0, v);
+					sympltname, self->sym_priv_size, v);
 			if (!f)
 				goto out_elf_end;
 
@@ -495,7 +644,7 @@
 				 "%s@plt", elf_sym__name(&sym, symstrs));
 
 			f = symbol__new(plt_offset, shdr_plt.sh_entsize,
-					sympltname, self->sym_priv_size, 0, v);
+					sympltname, self->sym_priv_size, v);
 			if (!f)
 				goto out_elf_end;
 
@@ -514,13 +663,17 @@
 		return nr;
 out:
 	fprintf(stderr, "%s: problems reading %s PLT info.\n",
-		__func__, self->name);
+		__func__, self->long_name);
 	return 0;
 }
 
-static int dso__load_sym(struct dso *self, int fd, const char *name,
-			 symbol_filter_t filter, int v, struct module *mod)
+static int dso__load_sym(struct dso *self, struct map *map, const char *name,
+			 int fd, symbol_filter_t filter, int kernel,
+			 int kmodule, int v)
 {
+	struct map *curr_map = map;
+	struct dso *curr_dso = self;
+	size_t dso_name_len = strlen(self->short_name);
 	Elf_Data *symstrs, *secstrs;
 	uint32_t nr_syms;
 	int err = -1;
@@ -531,7 +684,7 @@
 	GElf_Sym sym;
 	Elf_Scn *sec, *sec_strndx;
 	Elf *elf;
-	int nr = 0, kernel = !strcmp("[kernel]", self->name);
+	int nr = 0;
 
 	elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
 	if (elf == NULL) {
@@ -587,9 +740,7 @@
 	elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
 		struct symbol *f;
 		const char *elf_name;
-		char *demangled;
-		u64 obj_start;
-		struct section *section = NULL;
+		char *demangled = NULL;
 		int is_label = elf_sym__is_label(&sym);
 		const char *section_name;
 
@@ -605,52 +756,85 @@
 		if (is_label && !elf_sec__is_text(&shdr, secstrs))
 			continue;
 
+		elf_name = elf_sym__name(&sym, symstrs);
 		section_name = elf_sec__name(&shdr, secstrs);
-		obj_start = sym.st_value;
 
-		if (self->adjust_symbols) {
-			if (v >= 2)
+		if (kernel || kmodule) {
+			char dso_name[PATH_MAX];
+
+			if (strcmp(section_name,
+				   curr_dso->short_name + dso_name_len) == 0)
+				goto new_symbol;
+
+			if (strcmp(section_name, ".text") == 0) {
+				curr_map = map;
+				curr_dso = self;
+				goto new_symbol;
+			}
+
+			snprintf(dso_name, sizeof(dso_name),
+				 "%s%s", self->short_name, section_name);
+
+			curr_map = kernel_maps__find_by_dso_name(dso_name);
+			if (curr_map == NULL) {
+				u64 start = sym.st_value;
+
+				if (kmodule)
+					start += map->start + shdr.sh_offset;
+
+				curr_dso = dso__new(dso_name, self->sym_priv_size);
+				if (curr_dso == NULL)
+					goto out_elf_end;
+				curr_map = map__new2(start, curr_dso);
+				if (curr_map == NULL) {
+					dso__delete(curr_dso);
+					goto out_elf_end;
+				}
+				curr_map->map_ip = vdso__map_ip;
+				curr_dso->origin = DSO__ORIG_KERNEL;
+				kernel_maps__insert(curr_map);
+				dsos__add(curr_dso);
+			} else
+				curr_dso = curr_map->dso;
+
+			goto new_symbol;
+		}
+
+		if (curr_dso->adjust_symbols) {
+			if (v > 2)
 				printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n",
 					(u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset);
 
 			sym.st_value -= shdr.sh_addr - shdr.sh_offset;
 		}
-
-		if (mod) {
-			section = mod->sections->find_section(mod->sections, section_name);
-			if (section)
-				sym.st_value += section->vma;
-			else {
-				fprintf(stderr, "dso__load_sym() module %s lookup of %s failed\n",
-					mod->name, section_name);
-				goto out_elf_end;
-			}
-		}
 		/*
 		 * We need to figure out if the object was created from C++ sources
 		 * DWARF DW_compile_unit has this, but we don't always have access
 		 * to it...
 		 */
-		elf_name = elf_sym__name(&sym, symstrs);
 		demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
 		if (demangled != NULL)
 			elf_name = demangled;
-
+new_symbol:
 		f = symbol__new(sym.st_value, sym.st_size, elf_name,
-				self->sym_priv_size, obj_start, v);
+				curr_dso->sym_priv_size, v);
 		free(demangled);
 		if (!f)
 			goto out_elf_end;
 
-		if (filter && filter(self, f))
-			symbol__delete(f, self->sym_priv_size);
+		if (filter && filter(curr_map, f))
+			symbol__delete(f, curr_dso->sym_priv_size);
 		else {
-			f->module = mod;
-			dso__insert_symbol(self, f);
+			dso__insert_symbol(curr_dso, f);
 			nr++;
 		}
 	}
 
+	/*
+	 * For misannotated, zeroed, ASM function sizes.
+	 */
+	if (nr > 0)
+		dso__fixup_sym_end(self);
 	err = nr;
 out_elf_end:
 	elf_end(elf);
@@ -670,7 +854,7 @@
 	char *build_id = NULL, *bid;
 	unsigned char *raw;
 	Elf *elf;
-	int fd = open(self->name, O_RDONLY);
+	int fd = open(self->long_name, O_RDONLY);
 
 	if (fd < 0)
 		goto out;
@@ -679,7 +863,7 @@
 	if (elf == NULL) {
 		if (v)
 			fprintf(stderr, "%s: cannot read %s ELF file.\n",
-				__func__, self->name);
+				__func__, self->long_name);
 		goto out_close;
 	}
 
@@ -708,7 +892,7 @@
 		bid += 2;
 	}
 	if (v >= 2)
-		printf("%s(%s): %s\n", __func__, self->name, build_id);
+		printf("%s(%s): %s\n", __func__, self->long_name, build_id);
 out_elf_end:
 	elf_end(elf);
 out_close:
@@ -726,6 +910,7 @@
 		[DSO__ORIG_UBUNTU] =   'u',
 		[DSO__ORIG_BUILDID] =  'b',
 		[DSO__ORIG_DSO] =      'd',
+		[DSO__ORIG_KMODULE] =  'K',
 	};
 
 	if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND)
@@ -733,7 +918,7 @@
 	return origin[self->origin];
 }
 
-int dso__load(struct dso *self, symbol_filter_t filter, int v)
+int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, int v)
 {
 	int size = PATH_MAX;
 	char *name = malloc(size), *build_id = NULL;
@@ -746,7 +931,7 @@
 	self->adjust_symbols = 0;
 
 	if (strncmp(self->name, "/tmp/perf-", 10) == 0) {
-		ret = dso__load_perf_map(self, filter, v);
+		ret = dso__load_perf_map(self, map, filter, v);
 		self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT :
 					 DSO__ORIG_NOT_FOUND;
 		return ret;
@@ -759,10 +944,12 @@
 		self->origin++;
 		switch (self->origin) {
 		case DSO__ORIG_FEDORA:
-			snprintf(name, size, "/usr/lib/debug%s.debug", self->name);
+			snprintf(name, size, "/usr/lib/debug%s.debug",
+				 self->long_name);
 			break;
 		case DSO__ORIG_UBUNTU:
-			snprintf(name, size, "/usr/lib/debug%s", self->name);
+			snprintf(name, size, "/usr/lib/debug%s",
+				 self->long_name);
 			break;
 		case DSO__ORIG_BUILDID:
 			build_id = dso__read_build_id(self, v);
@@ -776,7 +963,7 @@
 			self->origin++;
 			/* Fall thru */
 		case DSO__ORIG_DSO:
-			snprintf(name, size, "%s", self->name);
+			snprintf(name, size, "%s", self->long_name);
 			break;
 
 		default:
@@ -786,7 +973,7 @@
 		fd = open(name, O_RDONLY);
 	} while (fd < 0);
 
-	ret = dso__load_sym(self, fd, name, filter, v, NULL);
+	ret = dso__load_sym(self, map, name, fd, filter, 0, 0, v);
 	close(fd);
 
 	/*
@@ -807,89 +994,243 @@
 	return ret;
 }
 
-static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name,
-			     symbol_filter_t filter, int v)
+struct map *kernel_map;
+
+static void kernel_maps__insert(struct map *map)
 {
-	struct module *mod = mod_dso__find_module(mods, name);
-	int err = 0, fd;
+	maps__insert(&kernel_maps, map);
+}
 
-	if (mod == NULL || !mod->active)
+struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp)
+{
+	struct map *map = maps__find(&kernel_maps, ip);
+
+	if (mapp)
+		*mapp = map;
+
+	if (map) {
+		ip = map->map_ip(map, ip);
+		return map->dso->find_symbol(map->dso, ip);
+	}
+
+	return NULL;
+}
+
+struct map *kernel_maps__find_by_dso_name(const char *name)
+{
+	struct rb_node *nd;
+
+	for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) {
+		struct map *map = rb_entry(nd, struct map, rb_node);
+
+		if (map->dso && strcmp(map->dso->name, name) == 0)
+			return map;
+	}
+
+	return NULL;
+}
+
+static int dso__load_module_sym(struct dso *self, struct map *map,
+				symbol_filter_t filter, int v)
+{
+	int err = 0, fd = open(self->long_name, O_RDONLY);
+
+	if (fd < 0) {
+		if (v)
+			fprintf(stderr, "%s: cannot open %s\n",
+				__func__, self->long_name);
 		return err;
+	}
 
-	fd = open(mod->path, O_RDONLY);
-
-	if (fd < 0)
-		return err;
-
-	err = dso__load_sym(self, fd, name, filter, v, mod);
+	err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1, v);
 	close(fd);
 
 	return err;
 }
 
-int dso__load_modules(struct dso *self, symbol_filter_t filter, int v)
+static int dsos__load_modules_sym_dir(char *dirname,
+				      symbol_filter_t filter, int v)
 {
-	struct mod_dso *mods = mod_dso__new_dso("modules");
-	struct module *pos;
-	struct rb_node *next;
-	int err, count = 0;
+	struct dirent *dent;
+	int nr_symbols = 0, err;
+	DIR *dir = opendir(dirname);
 
-	err = mod_dso__load_modules(mods);
-
-	if (err <= 0)
-		return err;
-
-	/*
-	 * Iterate over modules, and load active symbols.
-	 */
-	next = rb_first(&mods->mods);
-	while (next) {
-		pos = rb_entry(next, struct module, rb_node);
-		err = dso__load_module(self, mods, pos->name, filter, v);
-
-		if (err < 0)
-			break;
-
-		next = rb_next(&pos->rb_node);
-		count += err;
+	if (!dir) {
+		if (v)
+			fprintf(stderr, "%s: cannot open %s dir\n", __func__,
+				dirname);
+		return -1;
 	}
 
-	if (err < 0) {
-		mod_dso__delete_modules(mods);
-		mod_dso__delete_self(mods);
-		return err;
-	}
+	while ((dent = readdir(dir)) != NULL) {
+		char path[PATH_MAX];
 
-	return count;
-}
+		if (dent->d_type == DT_DIR) {
+			if (!strcmp(dent->d_name, ".") ||
+			    !strcmp(dent->d_name, ".."))
+				continue;
 
-static inline void dso__fill_symbol_holes(struct dso *self)
-{
-	struct symbol *prev = NULL;
-	struct rb_node *nd;
+			snprintf(path, sizeof(path), "%s/%s",
+				 dirname, dent->d_name);
+			err = dsos__load_modules_sym_dir(path, filter, v);
+			if (err < 0)
+				goto failure;
+		} else {
+			char *dot = strrchr(dent->d_name, '.'),
+			     dso_name[PATH_MAX];
+			struct map *map;
+			struct rb_node *last;
 
-	for (nd = rb_last(&self->syms); nd; nd = rb_prev(nd)) {
-		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+			if (dot == NULL || strcmp(dot, ".ko"))
+				continue;
+			snprintf(dso_name, sizeof(dso_name), "[%.*s]",
+				 (int)(dot - dent->d_name), dent->d_name);
 
-		if (prev) {
-			u64 hole = 0;
-			int alias = pos->start == prev->start;
+			strxfrchar(dso_name, '-', '_');
+			map = kernel_maps__find_by_dso_name(dso_name);
+			if (map == NULL)
+				continue;
 
-			if (!alias)
-				hole = prev->start - pos->end - 1;
+			snprintf(path, sizeof(path), "%s/%s",
+				 dirname, dent->d_name);
 
-			if (hole || alias) {
-				if (alias)
-					pos->end = prev->end;
-				else if (hole)
-					pos->end = prev->start - 1;
+			map->dso->long_name = strdup(path);
+			if (map->dso->long_name == NULL)
+				goto failure;
+
+			err = dso__load_module_sym(map->dso, map, filter, v);
+			if (err < 0)
+				goto failure;
+			last = rb_last(&map->dso->syms);
+			if (last) {
+				struct symbol *sym;
+				/*
+				 * We do this here as well, even having the
+				 * symbol size found in the symtab because
+				 * misannotated ASM symbols may have the size
+				 * set to zero.
+				 */
+				dso__fixup_sym_end(map->dso);
+
+				sym = rb_entry(last, struct symbol, rb_node);
+				map->end = map->start + sym->end;
 			}
 		}
-		prev = pos;
+		nr_symbols += err;
 	}
+
+	return nr_symbols;
+failure:
+	closedir(dir);
+	return -1;
 }
 
-static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
+static int dsos__load_modules_sym(symbol_filter_t filter, int v)
+{
+	struct utsname uts;
+	char modules_path[PATH_MAX];
+
+	if (uname(&uts) < 0)
+		return -1;
+
+	snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel",
+		 uts.release);
+
+	return dsos__load_modules_sym_dir(modules_path, filter, v);
+}
+
+/*
+ * Constructor variant for modules (where we know from /proc/modules where
+ * they are loaded) and for vmlinux, where only after we load all the
+ * symbols we'll know where it starts and ends.
+ */
+static struct map *map__new2(u64 start, struct dso *dso)
+{
+	struct map *self = malloc(sizeof(*self));
+
+	if (self != NULL) {
+		self->start = start;
+		/*
+		 * Will be filled after we load all the symbols
+		 */
+		self->end = 0;
+
+		self->pgoff = 0;
+		self->dso = dso;
+		self->map_ip = map__map_ip;
+		RB_CLEAR_NODE(&self->rb_node);
+	}
+	return self;
+}
+
+static int dsos__load_modules(unsigned int sym_priv_size)
+{
+	char *line = NULL;
+	size_t n;
+	FILE *file = fopen("/proc/modules", "r");
+	struct map *map;
+
+	if (file == NULL)
+		return -1;
+
+	while (!feof(file)) {
+		char name[PATH_MAX];
+		u64 start;
+		struct dso *dso;
+		char *sep;
+		int line_len;
+
+		line_len = getline(&line, &n, file);
+		if (line_len < 0)
+			break;
+
+		if (!line)
+			goto out_failure;
+
+		line[--line_len] = '\0'; /* \n */
+
+		sep = strrchr(line, 'x');
+		if (sep == NULL)
+			continue;
+
+		hex2u64(sep + 1, &start);
+
+		sep = strchr(line, ' ');
+		if (sep == NULL)
+			continue;
+
+		*sep = '\0';
+
+		snprintf(name, sizeof(name), "[%s]", line);
+		dso = dso__new(name, sym_priv_size);
+
+		if (dso == NULL)
+			goto out_delete_line;
+
+		map = map__new2(start, dso);
+		if (map == NULL) {
+			dso__delete(dso);
+			goto out_delete_line;
+		}
+
+		dso->origin = DSO__ORIG_KMODULE;
+		kernel_maps__insert(map);
+		dsos__add(dso);
+	}
+
+	free(line);
+	fclose(file);
+
+	return 0;
+
+out_delete_line:
+	free(line);
+out_failure:
+	return -1;
+}
+
+static int dso__load_vmlinux(struct dso *self, struct map *map,
+			     const char *vmlinux,
 			     symbol_filter_t filter, int v)
 {
 	int err, fd = open(vmlinux, O_RDONLY);
@@ -897,47 +1238,82 @@
 	if (fd < 0)
 		return -1;
 
-	err = dso__load_sym(self, fd, vmlinux, filter, v, NULL);
-
-	if (err > 0)
-		dso__fill_symbol_holes(self);
+	err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0, v);
 
 	close(fd);
 
 	return err;
 }
 
-int dso__load_kernel(struct dso *self, const char *vmlinux,
-		     symbol_filter_t filter, int v, int use_modules)
+int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size,
+		      symbol_filter_t filter, int v, int use_modules)
 {
 	int err = -1;
+	struct dso *dso = dso__new(vmlinux, sym_priv_size);
+
+	if (dso == NULL)
+		return -1;
+
+	dso->short_name = "[kernel]";
+	kernel_map = map__new2(0, dso);
+	if (kernel_map == NULL)
+		goto out_delete_dso;
+
+	kernel_map->map_ip = vdso__map_ip;
+
+	if (use_modules && dsos__load_modules(sym_priv_size) < 0) {
+		fprintf(stderr, "Failed to load list of modules in use! "
+				"Continuing...\n");
+		use_modules = 0;
+	}
 
 	if (vmlinux) {
-		err = dso__load_vmlinux(self, vmlinux, filter, v);
+		err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter, v);
 		if (err > 0 && use_modules) {
-			int syms = dso__load_modules(self, filter, v);
+			int syms = dsos__load_modules_sym(filter, v);
 
-			if (syms < 0) {
-				fprintf(stderr, "dso__load_modules failed!\n");
-				return syms;
-			}
-			err += syms;
+			if (syms < 0)
+				fprintf(stderr, "Failed to read module symbols!"
+					" Continuing...\n");
+			else
+				err += syms;
 		}
 	}
 
 	if (err <= 0)
-		err = dso__load_kallsyms(self, filter, v);
+		err = kernel_maps__load_kallsyms(filter, use_modules, v);
 
-	if (err > 0)
-		self->origin = DSO__ORIG_KERNEL;
+	if (err > 0) {
+		struct rb_node *node = rb_first(&dso->syms);
+		struct symbol *sym = rb_entry(node, struct symbol, rb_node);
+
+		kernel_map->start = sym->start;
+		node = rb_last(&dso->syms);
+		sym = rb_entry(node, struct symbol, rb_node);
+		kernel_map->end = sym->end;
+
+		dso->origin = DSO__ORIG_KERNEL;
+		kernel_maps__insert(kernel_map);
+		/*
+		 * Now that we have all sorted out, just set the ->end of all
+		 * maps:
+		 */
+		kernel_maps__fixup_end();
+		dsos__add(dso);
+
+		if (v > 0)
+			kernel_maps__fprintf(stderr, v);
+	}
 
 	return err;
+
+out_delete_dso:
+	dso__delete(dso);
+	return -1;
 }
 
 LIST_HEAD(dsos);
-struct dso	*kernel_dso;
 struct dso	*vdso;
-struct dso	*hypervisor_dso;
 
 const char	*vmlinux_name = "vmlinux";
 int		modules;
@@ -969,7 +1345,7 @@
 	if (!dso)
 		goto out_delete_dso;
 
-	nr = dso__load(dso, NULL, verbose);
+	nr = dso__load(dso, NULL, NULL, verbose);
 	if (nr < 0) {
 		eprintf("Failed to open: %s\n", name);
 		goto out_delete_dso;
@@ -994,43 +1370,20 @@
 		dso__fprintf(pos, fp);
 }
 
-static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
-{
-	return dso__find_symbol(dso, ip);
-}
-
 int load_kernel(void)
 {
-	int err;
-
-	kernel_dso = dso__new("[kernel]", 0);
-	if (!kernel_dso)
+	if (dsos__load_kernel(vmlinux_name, 0, NULL, verbose, modules) <= 0)
 		return -1;
 
-	err = dso__load_kernel(kernel_dso, vmlinux_name, NULL, verbose, modules);
-	if (err <= 0) {
-		dso__delete(kernel_dso);
-		kernel_dso = NULL;
-	} else
-		dsos__add(kernel_dso);
-
 	vdso = dso__new("[vdso]", 0);
 	if (!vdso)
 		return -1;
 
-	vdso->find_symbol = vdso__find_symbol;
-
 	dsos__add(vdso);
 
-	hypervisor_dso = dso__new("[hypervisor]", 0);
-	if (!hypervisor_dso)
-		return -1;
-	dsos__add(hypervisor_dso);
-
-	return err;
+	return 0;
 }
 
-
 void symbol__init(void)
 {
 	elf_version(EV_CURRENT);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 6e84907..2e4522e 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -1,11 +1,10 @@
-#ifndef _PERF_SYMBOL_
-#define _PERF_SYMBOL_ 1
+#ifndef __PERF_SYMBOL
+#define __PERF_SYMBOL 1
 
 #include <linux/types.h>
 #include "types.h"
 #include <linux/list.h>
 #include <linux/rbtree.h>
-#include "module.h"
 #include "event.h"
 
 #ifdef HAVE_CPLUS_DEMANGLE
@@ -36,10 +35,8 @@
 	struct rb_node	rb_node;
 	u64		start;
 	u64		end;
-	u64		obj_start;
 	u64		hist_sum;
 	u64		*hist;
-	struct module	*module;
 	void		*priv;
 	char		name[0];
 };
@@ -52,12 +49,14 @@
 	unsigned char	 adjust_symbols;
 	unsigned char	 slen_calculated;
 	unsigned char	 origin;
+	const char	 *short_name;
+	char	 	 *long_name;
 	char		 name[0];
 };
 
 extern const char *sym_hist_filter;
 
-typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym);
+typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
 
 struct dso *dso__new(const char *name, unsigned int sym_priv_size);
 void dso__delete(struct dso *self);
@@ -69,10 +68,10 @@
 
 struct symbol *dso__find_symbol(struct dso *self, u64 ip);
 
-int dso__load_kernel(struct dso *self, const char *vmlinux,
-		     symbol_filter_t filter, int verbose, int modules);
-int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose);
-int dso__load(struct dso *self, symbol_filter_t filter, int verbose);
+int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size,
+		      symbol_filter_t filter, int verbose, int modules);
+int dso__load(struct dso *self, struct map *map, symbol_filter_t filter,
+	      int verbose);
 struct dso *dsos__findnew(const char *name);
 void dsos__fprintf(FILE *fp);
 
@@ -84,9 +83,8 @@
 void symbol__init(void);
 
 extern struct list_head dsos;
-extern struct dso *kernel_dso;
+extern struct map *kernel_map;
 extern struct dso *vdso;
-extern struct dso *hypervisor_dso;
 extern const char *vmlinux_name;
 extern int   modules;
-#endif /* _PERF_SYMBOL_ */
+#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 45efb5d..3b56aeb 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -15,7 +15,8 @@
 		self->comm = malloc(32);
 		if (self->comm)
 			snprintf(self->comm, 32, ":%d", self->pid);
-		INIT_LIST_HEAD(&self->maps);
+		self->maps = RB_ROOT;
+		INIT_LIST_HEAD(&self->removed_maps);
 	}
 
 	return self;
@@ -31,10 +32,19 @@
 
 static size_t thread__fprintf(struct thread *self, FILE *fp)
 {
+	struct rb_node *nd;
 	struct map *pos;
-	size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
+	size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n",
+			     self->pid, self->comm);
 
-	list_for_each_entry(pos, &self->maps, node)
+	for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) {
+		pos = rb_entry(nd, struct map, rb_node);
+		ret += map__fprintf(pos, fp);
+	}
+
+	ret = fprintf(fp, "Removed maps:\n");
+
+	list_for_each_entry(pos, &self->removed_maps, node)
 		ret += map__fprintf(pos, fp);
 
 	return ret;
@@ -93,42 +103,82 @@
 	return thread;
 }
 
-void thread__insert_map(struct thread *self, struct map *map)
+static void thread__remove_overlappings(struct thread *self, struct map *map)
 {
-	struct map *pos, *tmp;
+	struct rb_node *next = rb_first(&self->maps);
 
-	list_for_each_entry_safe(pos, tmp, &self->maps, node) {
-		if (map__overlap(pos, map)) {
-			if (verbose >= 2) {
-				printf("overlapping maps:\n");
-				map__fprintf(map, stdout);
-				map__fprintf(pos, stdout);
-			}
+	while (next) {
+		struct map *pos = rb_entry(next, struct map, rb_node);
+		next = rb_next(&pos->rb_node);
 
-			if (map->start <= pos->start && map->end > pos->start)
-				pos->start = map->end;
+		if (!map__overlap(pos, map))
+			continue;
 
-			if (map->end >= pos->end && map->start < pos->end)
-				pos->end = map->start;
-
-			if (verbose >= 2) {
-				printf("after collision:\n");
-				map__fprintf(pos, stdout);
-			}
-
-			if (pos->start >= pos->end) {
-				list_del_init(&pos->node);
-				free(pos);
-			}
+		if (verbose >= 2) {
+			printf("overlapping maps:\n");
+			map__fprintf(map, stdout);
+			map__fprintf(pos, stdout);
 		}
+
+		rb_erase(&pos->rb_node, &self->maps);
+		/*
+		 * We may have references to this map, for instance in some
+		 * hist_entry instances, so just move them to a separate
+		 * list.
+		 */
+		list_add_tail(&pos->node, &self->removed_maps);
+	}
+}
+
+void maps__insert(struct rb_root *maps, struct map *map)
+{
+	struct rb_node **p = &maps->rb_node;
+	struct rb_node *parent = NULL;
+	const u64 ip = map->start;
+	struct map *m;
+
+	while (*p != NULL) {
+		parent = *p;
+		m = rb_entry(parent, struct map, rb_node);
+		if (ip < m->start)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
 	}
 
-	list_add_tail(&map->node, &self->maps);
+	rb_link_node(&map->rb_node, parent, p);
+	rb_insert_color(&map->rb_node, maps);
+}
+
+struct map *maps__find(struct rb_root *maps, u64 ip)
+{
+	struct rb_node **p = &maps->rb_node;
+	struct rb_node *parent = NULL;
+	struct map *m;
+
+	while (*p != NULL) {
+		parent = *p;
+		m = rb_entry(parent, struct map, rb_node);
+		if (ip < m->start)
+			p = &(*p)->rb_left;
+		else if (ip > m->end)
+			p = &(*p)->rb_right;
+		else
+			return m;
+	}
+
+	return NULL;
+}
+
+void thread__insert_map(struct thread *self, struct map *map)
+{
+	thread__remove_overlappings(self, map);
+	maps__insert(&self->maps, map);
 }
 
 int thread__fork(struct thread *self, struct thread *parent)
 {
-	struct map *map;
+	struct rb_node *nd;
 
 	if (self->comm)
 		free(self->comm);
@@ -136,7 +186,8 @@
 	if (!self->comm)
 		return -ENOMEM;
 
-	list_for_each_entry(map, &parent->maps, node) {
+	for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) {
+		struct map *map = rb_entry(nd, struct map, rb_node);
 		struct map *new = map__clone(map);
 		if (!new)
 			return -ENOMEM;
@@ -146,20 +197,6 @@
 	return 0;
 }
 
-struct map *thread__find_map(struct thread *self, u64 ip)
-{
-	struct map *pos;
-
-	if (self == NULL)
-		return NULL;
-
-	list_for_each_entry(pos, &self->maps, node)
-		if (ip >= pos->start && ip <= pos->end)
-			return pos;
-
-	return NULL;
-}
-
 size_t threads__fprintf(FILE *fp, struct rb_root *threads)
 {
 	size_t ret = 0;
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 32aea3c..845d9b6 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -1,11 +1,14 @@
+#ifndef __PERF_THREAD_H
+#define __PERF_THREAD_H
+
 #include <linux/rbtree.h>
-#include <linux/list.h>
 #include <unistd.h>
 #include "symbol.h"
 
 struct thread {
 	struct rb_node		rb_node;
-	struct list_head	maps;
+	struct rb_root		maps;
+	struct list_head	removed_maps;
 	pid_t			pid;
 	char			shortname[3];
 	char			*comm;
@@ -18,5 +21,17 @@
 register_idle_thread(struct rb_root *threads, struct thread **last_match);
 void thread__insert_map(struct thread *self, struct map *map);
 int thread__fork(struct thread *self, struct thread *parent);
-struct map *thread__find_map(struct thread *self, u64 ip);
 size_t threads__fprintf(FILE *fp, struct rb_root *threads);
+
+void maps__insert(struct rb_root *maps, struct map *map);
+struct map *maps__find(struct rb_root *maps, u64 ip);
+
+struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp);
+struct map *kernel_maps__find_by_dso_name(const char *name);
+
+static inline struct map *thread__find_map(struct thread *self, u64 ip)
+{
+	return self ? maps__find(&self->maps, ip) : NULL;
+}
+
+#endif	/* __PERF_THREAD_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index af4b057..831052d 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -496,14 +496,12 @@
 
 	return path.next;
 }
-void read_tracing_data(struct perf_event_attr *pattrs, int nb_events)
+void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
 {
 	char buf[BUFSIZ];
 	struct tracepoint_path *tps;
 
-	output_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 0644);
-	if (output_fd < 0)
-		die("creating file '%s'", output_file);
+	output_fd = fd;
 
 	buf[0] = 23;
 	buf[1] = 8;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 55c9659..eef60df 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -721,6 +721,24 @@
 	return -1;
 }
 
+static int field_is_string(struct format_field *field)
+{
+	if ((field->flags & FIELD_IS_ARRAY) &&
+	    (!strstr(field->type, "char") || !strstr(field->type, "u8") ||
+	     !strstr(field->type, "s8")))
+		return 1;
+
+	return 0;
+}
+
+static int field_is_dynamic(struct format_field *field)
+{
+	if (!strcmp(field->type, "__data_loc"))
+		return 1;
+
+	return 0;
+}
+
 static int event_read_fields(struct event *event, struct format_field **fields)
 {
 	struct format_field *field = NULL;
@@ -865,6 +883,12 @@
 			free(brackets);
 		}
 
+		if (field_is_string(field)) {
+			field->flags |= FIELD_IS_STRING;
+			if (field_is_dynamic(field))
+				field->flags |= FIELD_IS_DYNAMIC;
+		}
+
 		if (test_type_token(type, token,  EVENT_OP, (char *)";"))
 			goto fail;
 		free_token(token);
@@ -897,6 +921,21 @@
 		if (read_expected(EVENT_OP, (char *)";") < 0)
 			goto fail_expect;
 
+		if (read_expected(EVENT_ITEM, (char *)"signed") < 0)
+			goto fail_expect;
+
+		if (read_expected(EVENT_OP, (char *)":") < 0)
+			goto fail_expect;
+
+		if (read_expect_type(EVENT_ITEM, &token))
+			goto fail;
+		if (strtoul(token, NULL, 0))
+			field->flags |= FIELD_IS_SIGNED;
+		free_token(token);
+
+		if (read_expected(EVENT_OP, (char *)";") < 0)
+			goto fail_expect;
+
 		if (read_expect_type(EVENT_NEWLINE, &token) < 0)
 			goto fail;
 		free_token(token);
@@ -2845,6 +2884,15 @@
 	free_token(token);
 	if (read_expected(EVENT_OP, (char *)";") < 0)
 		return;
+	if (read_expected(EVENT_ITEM, (char *)"signed") < 0)
+		return;
+	if (read_expected(EVENT_OP, (char *)":") < 0)
+		return;
+	if (read_expect_type(EVENT_ITEM, &token) < 0)
+		return;
+	free_token(token);
+	if (read_expected(EVENT_OP, (char *)";") < 0)
+		return;
 	if (read_expect_type(EVENT_NEWLINE, &token) < 0)
 		return;
 	free_token(token);
@@ -2926,7 +2974,7 @@
 	return 0;
 }
 
-int parse_event_file(char *buf, unsigned long size, char *system__unused __unused)
+int parse_event_file(char *buf, unsigned long size, char *sys)
 {
 	struct event *event;
 	int ret;
@@ -2953,6 +3001,8 @@
 	if (ret < 0)
 		die("failed to read event print fmt");
 
+	event->system = strdup(sys);
+
 #define PRINT_ARGS 0
 	if (PRINT_ARGS && event->print_fmt.args)
 		print_args(event->print_fmt.args);
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 1b5c847..44292e0 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -458,9 +458,8 @@
 	return data;
 }
 
-void trace_report(void)
+void trace_report(int fd)
 {
-	const char *input_file = "trace.info";
 	char buf[BUFSIZ];
 	char test[] = { 23, 8, 68 };
 	char *version;
@@ -468,9 +467,7 @@
 	int show_funcs = 0;
 	int show_printk = 0;
 
-	input_fd = open(input_file, O_RDONLY);
-	if (input_fd < 0)
-		die("opening '%s'\n", input_file);
+	input_fd = fd;
 
 	read_or_die(buf, 3);
 	if (memcmp(buf, test, 3) != 0)
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 693f815..da77e07 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -1,5 +1,5 @@
-#ifndef _TRACE_EVENTS_H
-#define _TRACE_EVENTS_H
+#ifndef __PERF_TRACE_EVENTS_H
+#define __PERF_TRACE_EVENTS_H
 
 #include "parse-events.h"
 
@@ -26,6 +26,9 @@
 enum format_flags {
 	FIELD_IS_ARRAY		= 1,
 	FIELD_IS_POINTER	= 2,
+	FIELD_IS_SIGNED		= 4,
+	FIELD_IS_STRING		= 8,
+	FIELD_IS_DYNAMIC	= 16,
 };
 
 struct format_field {
@@ -132,6 +135,7 @@
 	int			flags;
 	struct format		format;
 	struct print_fmt	print_fmt;
+	char			*system;
 };
 
 enum {
@@ -154,7 +158,7 @@
 
 void parse_set_info(int nr_cpus, int long_sz);
 
-void trace_report(void);
+void trace_report(int fd);
 
 void *malloc_or_die(unsigned int size);
 
@@ -166,7 +170,7 @@
 void print_printk(void);
 
 int parse_ftrace_file(char *buf, unsigned long size);
-int parse_event_file(char *buf, unsigned long size, char *system);
+int parse_event_file(char *buf, unsigned long size, char *sys);
 void print_event(int cpu, void *data, int size, unsigned long long nsecs,
 		  char *comm);
 
@@ -240,6 +244,6 @@
 raw_field_value(struct event *event, const char *name, void *data);
 void *raw_field_ptr(struct event *event, const char *name, void *data);
 
-void read_tracing_data(struct perf_event_attr *pattrs, int nb_events);
+void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events);
 
-#endif /* _TRACE_EVENTS_H */
+#endif /* __PERF_TRACE_EVENTS_H */
diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h
index 5e75f90..7d6b833 100644
--- a/tools/perf/util/types.h
+++ b/tools/perf/util/types.h
@@ -1,5 +1,5 @@
-#ifndef _PERF_TYPES_H
-#define _PERF_TYPES_H
+#ifndef __PERF_TYPES_H
+#define __PERF_TYPES_H
 
 /*
  * We define u64 as unsigned long long for every architecture
@@ -14,4 +14,4 @@
 typedef unsigned char	   u8;
 typedef signed char	   s8;
 
-#endif /* _PERF_TYPES_H */
+#endif /* __PERF_TYPES_H */
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h
index cadf8cf..2fa967e 100644
--- a/tools/perf/util/values.h
+++ b/tools/perf/util/values.h
@@ -1,5 +1,5 @@
-#ifndef _PERF_VALUES_H
-#define _PERF_VALUES_H
+#ifndef __PERF_VALUES_H
+#define __PERF_VALUES_H
 
 #include "types.h"
 
@@ -24,4 +24,4 @@
 void perf_read_values_display(FILE *fp, struct perf_read_values *values,
 			      int raw);
 
-#endif /* _PERF_VALUES_H */
+#endif /* __PERF_VALUES_H */