Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
new file mode 100644
index 0000000..55720dc
--- /dev/null
+++ b/drivers/oprofile/buffer_sync.c
@@ -0,0 +1,547 @@
+/**
+ * @file buffer_sync.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
+ * This is the core of the buffer management. Each
+ * CPU buffer is processed and entered into the
+ * global event buffer. Such processing is necessary
+ * in several circumstances, mentioned below.
+ *
+ * The processing does the job of converting the
+ * transitory EIP value into a persistent dentry/offset
+ * value that the profiler can record at its leisure.
+ *
+ * See fs/dcookies.c for a description of the dentry/offset
+ * objects.
+ */
+
+#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/notifier.h>
+#include <linux/dcookies.h>
+#include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+ 
+#include "oprofile_stats.h"
+#include "event_buffer.h"
+#include "cpu_buffer.h"
+#include "buffer_sync.h"
+ 
+static LIST_HEAD(dying_tasks);
+static LIST_HEAD(dead_tasks);
+static cpumask_t marked_cpus = CPU_MASK_NONE;
+static DEFINE_SPINLOCK(task_mortuary);
+static void process_task_mortuary(void);
+
+
+/* Take ownership of the task struct and place it on the
+ * list for processing. Only after two full buffer syncs
+ * does the task eventually get freed, because by then
+ * we are sure we will not reference it again.
+ */
+static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
+{
+	struct task_struct * task = data;
+	spin_lock(&task_mortuary);
+	list_add(&task->tasks, &dying_tasks);
+	spin_unlock(&task_mortuary);
+	return NOTIFY_OK;
+}
+
+
+/* The task is on its way out. A sync of the buffer means we can catch
+ * any remaining samples for this task.
+ */
+static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data)
+{
+	/* To avoid latency problems, we only process the current CPU,
+	 * hoping that most samples for the task are on this CPU
+	 */
+	sync_buffer(_smp_processor_id());
+  	return 0;
+}
+
+
+/* The task is about to try a do_munmap(). We peek at what it's going to
+ * do, and if it's an executable region, process the samples first, so
+ * we don't lose any. This does not have to be exact, it's a QoI issue
+ * only.
+ */
+static int munmap_notify(struct notifier_block * self, unsigned long val, void * data)
+{
+	unsigned long addr = (unsigned long)data;
+	struct mm_struct * mm = current->mm;
+	struct vm_area_struct * mpnt;
+
+	down_read(&mm->mmap_sem);
+
+	mpnt = find_vma(mm, addr);
+	if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
+		up_read(&mm->mmap_sem);
+		/* To avoid latency problems, we only process the current CPU,
+		 * hoping that most samples for the task are on this CPU
+		 */
+		sync_buffer(_smp_processor_id());
+		return 0;
+	}
+
+	up_read(&mm->mmap_sem);
+	return 0;
+}
+
+ 
+/* We need to be told about new modules so we don't attribute to a previously
+ * loaded module, or drop the samples on the floor.
+ */
+static int module_load_notify(struct notifier_block * self, unsigned long val, void * data)
+{
+#ifdef CONFIG_MODULES
+	if (val != MODULE_STATE_COMING)
+		return 0;
+
+	/* FIXME: should we process all CPU buffers ? */
+	down(&buffer_sem);
+	add_event_entry(ESCAPE_CODE);
+	add_event_entry(MODULE_LOADED_CODE);
+	up(&buffer_sem);
+#endif
+	return 0;
+}
+
+ 
+static struct notifier_block task_free_nb = {
+	.notifier_call	= task_free_notify,
+};
+
+static struct notifier_block task_exit_nb = {
+	.notifier_call	= task_exit_notify,
+};
+
+static struct notifier_block munmap_nb = {
+	.notifier_call	= munmap_notify,
+};
+
+static struct notifier_block module_load_nb = {
+	.notifier_call = module_load_notify,
+};
+
+ 
+static void end_sync(void)
+{
+	end_cpu_work();
+	/* make sure we don't leak task structs */
+	process_task_mortuary();
+	process_task_mortuary();
+}
+
+
+int sync_start(void)
+{
+	int err;
+
+	start_cpu_work();
+
+	err = task_handoff_register(&task_free_nb);
+	if (err)
+		goto out1;
+	err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
+	if (err)
+		goto out2;
+	err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
+	if (err)
+		goto out3;
+	err = register_module_notifier(&module_load_nb);
+	if (err)
+		goto out4;
+
+out:
+	return err;
+out4:
+	profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
+out3:
+	profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
+out2:
+	task_handoff_unregister(&task_free_nb);
+out1:
+	end_sync();
+	goto out;
+}
+
+
+void sync_stop(void)
+{
+	unregister_module_notifier(&module_load_nb);
+	profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
+	profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
+	task_handoff_unregister(&task_free_nb);
+	end_sync();
+}
+
+ 
+/* Optimisation. We can manage without taking the dcookie sem
+ * because we cannot reach this code without at least one
+ * dcookie user still being registered (namely, the reader
+ * of the event buffer). */
+static inline unsigned long fast_get_dcookie(struct dentry * dentry,
+	struct vfsmount * vfsmnt)
+{
+	unsigned long cookie;
+ 
+	if (dentry->d_cookie)
+		return (unsigned long)dentry;
+	get_dcookie(dentry, vfsmnt, &cookie);
+	return cookie;
+}
+
+ 
+/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
+ * which corresponds loosely to "application name". This is
+ * not strictly necessary but allows oprofile to associate
+ * shared-library samples with particular applications
+ */
+static unsigned long get_exec_dcookie(struct mm_struct * mm)
+{
+	unsigned long cookie = 0;
+	struct vm_area_struct * vma;
+ 
+	if (!mm)
+		goto out;
+ 
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		if (!vma->vm_file)
+			continue;
+		if (!(vma->vm_flags & VM_EXECUTABLE))
+			continue;
+		cookie = fast_get_dcookie(vma->vm_file->f_dentry,
+			vma->vm_file->f_vfsmnt);
+		break;
+	}
+
+out:
+	return cookie;
+}
+
+
+/* Convert the EIP value of a sample into a persistent dentry/offset
+ * pair that can then be added to the global event buffer. We make
+ * sure to do this lookup before a mm->mmap modification happens so
+ * we don't lose track.
+ */
+static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset)
+{
+	unsigned long cookie = 0;
+	struct vm_area_struct * vma;
+
+	for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ 
+		if (!vma->vm_file)
+			continue;
+
+		if (addr < vma->vm_start || addr >= vma->vm_end)
+			continue;
+
+		cookie = fast_get_dcookie(vma->vm_file->f_dentry,
+			vma->vm_file->f_vfsmnt);
+		*offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start; 
+		break;
+	}
+
+	return cookie;
+}
+
+
+static unsigned long last_cookie = ~0UL;
+ 
+static void add_cpu_switch(int i)
+{
+	add_event_entry(ESCAPE_CODE);
+	add_event_entry(CPU_SWITCH_CODE);
+	add_event_entry(i);
+	last_cookie = ~0UL;
+}
+
+static void add_kernel_ctx_switch(unsigned int in_kernel)
+{
+	add_event_entry(ESCAPE_CODE);
+	if (in_kernel)
+		add_event_entry(KERNEL_ENTER_SWITCH_CODE); 
+	else
+		add_event_entry(KERNEL_EXIT_SWITCH_CODE); 
+}
+ 
+static void
+add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
+{
+	add_event_entry(ESCAPE_CODE);
+	add_event_entry(CTX_SWITCH_CODE); 
+	add_event_entry(task->pid);
+	add_event_entry(cookie);
+	/* Another code for daemon back-compat */
+	add_event_entry(ESCAPE_CODE);
+	add_event_entry(CTX_TGID_CODE);
+	add_event_entry(task->tgid);
+}
+
+ 
+static void add_cookie_switch(unsigned long cookie)
+{
+	add_event_entry(ESCAPE_CODE);
+	add_event_entry(COOKIE_SWITCH_CODE);
+	add_event_entry(cookie);
+}
+
+ 
+static void add_trace_begin(void)
+{
+	add_event_entry(ESCAPE_CODE);
+	add_event_entry(TRACE_BEGIN_CODE);
+}
+
+
+static void add_sample_entry(unsigned long offset, unsigned long event)
+{
+	add_event_entry(offset);
+	add_event_entry(event);
+}
+
+
+static int add_us_sample(struct mm_struct * mm, struct op_sample * s)
+{
+	unsigned long cookie;
+	off_t offset;
+ 
+ 	cookie = lookup_dcookie(mm, s->eip, &offset);
+ 
+	if (!cookie) {
+		atomic_inc(&oprofile_stats.sample_lost_no_mapping);
+		return 0;
+	}
+
+	if (cookie != last_cookie) {
+		add_cookie_switch(cookie);
+		last_cookie = cookie;
+	}
+
+	add_sample_entry(offset, s->event);
+
+	return 1;
+}
+
+ 
+/* Add a sample to the global event buffer. If possible the
+ * sample is converted into a persistent dentry/offset pair
+ * for later lookup from userspace.
+ */
+static int
+add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
+{
+	if (in_kernel) {
+		add_sample_entry(s->eip, s->event);
+		return 1;
+	} else if (mm) {
+		return add_us_sample(mm, s);
+	} else {
+		atomic_inc(&oprofile_stats.sample_lost_no_mm);
+	}
+	return 0;
+}
+ 
+
+static void release_mm(struct mm_struct * mm)
+{
+	if (!mm)
+		return;
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+}
+
+
+static struct mm_struct * take_tasks_mm(struct task_struct * task)
+{
+	struct mm_struct * mm = get_task_mm(task);
+	if (mm)
+		down_read(&mm->mmap_sem);
+	return mm;
+}
+
+
+static inline int is_code(unsigned long val)
+{
+	return val == ESCAPE_CODE;
+}
+ 
+
+/* "acquire" as many cpu buffer slots as we can */
+static unsigned long get_slots(struct oprofile_cpu_buffer * b)
+{
+	unsigned long head = b->head_pos;
+	unsigned long tail = b->tail_pos;
+
+	/*
+	 * Subtle. This resets the persistent last_task
+	 * and in_kernel values used for switching notes.
+	 * BUT, there is a small window between reading
+	 * head_pos, and this call, that means samples
+	 * can appear at the new head position, but not
+	 * be prefixed with the notes for switching
+	 * kernel mode or a task switch. This small hole
+	 * can lead to mis-attribution or samples where
+	 * we don't know if it's in the kernel or not,
+	 * at the start of an event buffer.
+	 */
+	cpu_buffer_reset(b);
+
+	if (head >= tail)
+		return head - tail;
+
+	return head + (b->buffer_size - tail);
+}
+
+
+static void increment_tail(struct oprofile_cpu_buffer * b)
+{
+	unsigned long new_tail = b->tail_pos + 1;
+
+	rmb();
+
+	if (new_tail < b->buffer_size)
+		b->tail_pos = new_tail;
+	else
+		b->tail_pos = 0;
+}
+
+
+/* Move tasks along towards death. Any tasks on dead_tasks
+ * will definitely have no remaining references in any
+ * CPU buffers at this point, because we use two lists,
+ * and to have reached the list, it must have gone through
+ * one full sync already.
+ */
+static void process_task_mortuary(void)
+{
+	struct list_head * pos;
+	struct list_head * pos2;
+	struct task_struct * task;
+
+	spin_lock(&task_mortuary);
+
+	list_for_each_safe(pos, pos2, &dead_tasks) {
+		task = list_entry(pos, struct task_struct, tasks);
+		list_del(&task->tasks);
+		free_task(task);
+	}
+
+	list_for_each_safe(pos, pos2, &dying_tasks) {
+		task = list_entry(pos, struct task_struct, tasks);
+		list_del(&task->tasks);
+		list_add_tail(&task->tasks, &dead_tasks);
+	}
+
+	spin_unlock(&task_mortuary);
+}
+
+
+static void mark_done(int cpu)
+{
+	int i;
+
+	cpu_set(cpu, marked_cpus);
+
+	for_each_online_cpu(i) {
+		if (!cpu_isset(i, marked_cpus))
+			return;
+	}
+
+	/* All CPUs have been processed at least once,
+	 * we can process the mortuary once
+	 */
+	process_task_mortuary();
+
+	cpus_clear(marked_cpus);
+}
+
+
+/* FIXME: this is not sufficient if we implement syscall barrier backtrace
+ * traversal, the code switch to sb_sample_start at first kernel enter/exit
+ * switch so we need a fifth state and some special handling in sync_buffer()
+ */
+typedef enum {
+	sb_bt_ignore = -2,
+	sb_buffer_start,
+	sb_bt_start,
+	sb_sample_start,
+} sync_buffer_state;
+
+/* Sync one of the CPU's buffers into the global event buffer.
+ * Here we need to go through each batch of samples punctuated
+ * by context switch notes, taking the task's mmap_sem and doing
+ * lookup in task->mm->mmap to convert EIP into dcookie/offset
+ * value.
+ */
+void sync_buffer(int cpu)
+{
+	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu];
+	struct mm_struct *mm = NULL;
+	struct task_struct * new;
+	unsigned long cookie = 0;
+	int in_kernel = 1;
+	unsigned int i;
+	sync_buffer_state state = sb_buffer_start;
+	unsigned long available;
+
+	down(&buffer_sem);
+ 
+	add_cpu_switch(cpu);
+
+	/* Remember, only we can modify tail_pos */
+
+	available = get_slots(cpu_buf);
+
+	for (i = 0; i < available; ++i) {
+		struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
+ 
+		if (is_code(s->eip)) {
+			if (s->event <= CPU_IS_KERNEL) {
+				/* kernel/userspace switch */
+				in_kernel = s->event;
+				if (state == sb_buffer_start)
+					state = sb_sample_start;
+				add_kernel_ctx_switch(s->event);
+			} else if (s->event == CPU_TRACE_BEGIN) {
+				state = sb_bt_start;
+				add_trace_begin();
+			} else {
+				struct mm_struct * oldmm = mm;
+
+				/* userspace context switch */
+				new = (struct task_struct *)s->event;
+
+				release_mm(oldmm);
+				mm = take_tasks_mm(new);
+				if (mm != oldmm)
+					cookie = get_exec_dcookie(mm);
+				add_user_ctx_switch(new, cookie);
+			}
+		} else {
+			if (state >= sb_bt_start &&
+			    !add_sample(mm, s, in_kernel)) {
+				if (state == sb_bt_start) {
+					state = sb_bt_ignore;
+					atomic_inc(&oprofile_stats.bt_lost_no_mapping);
+				}
+			}
+		}
+
+		increment_tail(cpu_buf);
+	}
+	release_mm(mm);
+
+	mark_done(cpu);
+
+	up(&buffer_sem);
+}
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h
new file mode 100644
index 0000000..08866f6
--- /dev/null
+++ b/drivers/oprofile/buffer_sync.h
@@ -0,0 +1,22 @@
+/**
+ * @file buffer_sync.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROFILE_BUFFER_SYNC_H
+#define OPROFILE_BUFFER_SYNC_H
+ 
+/* add the necessary profiling hooks */
+int sync_start(void);
+
+/* remove the hooks */
+void sync_stop(void);
+ 
+/* sync the given CPU's buffer */
+void sync_buffer(int cpu);
+
+#endif /* OPROFILE_BUFFER_SYNC_H */
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
new file mode 100644
index 0000000..e9b1772
--- /dev/null
+++ b/drivers/oprofile/cpu_buffer.c
@@ -0,0 +1,307 @@
+/**
+ * @file cpu_buffer.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
+ * Each CPU has a local buffer that stores PC value/event
+ * pairs. We also log context switches when we notice them.
+ * Eventually each CPU's buffer is processed into the global
+ * event buffer by sync_buffer().
+ *
+ * We use a local buffer for two reasons: an NMI or similar
+ * interrupt cannot synchronise, and high sampling rates
+ * would lead to catastrophic global synchronisation if
+ * a global buffer was used.
+ */
+
+#include <linux/sched.h>
+#include <linux/oprofile.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+ 
+#include "event_buffer.h"
+#include "cpu_buffer.h"
+#include "buffer_sync.h"
+#include "oprof.h"
+
+struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
+
+static void wq_sync_buffer(void *);
+
+#define DEFAULT_TIMER_EXPIRE (HZ / 10)
+static int work_enabled;
+
+void free_cpu_buffers(void)
+{
+	int i;
+ 
+	for_each_online_cpu(i) {
+		vfree(cpu_buffer[i].buffer);
+	}
+}
+ 
+ 
+int alloc_cpu_buffers(void)
+{
+	int i;
+ 
+	unsigned long buffer_size = fs_cpu_buffer_size;
+ 
+	for_each_online_cpu(i) {
+		struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+ 
+		b->buffer = vmalloc(sizeof(struct op_sample) * buffer_size);
+		if (!b->buffer)
+			goto fail;
+ 
+		b->last_task = NULL;
+		b->last_is_kernel = -1;
+		b->tracing = 0;
+		b->buffer_size = buffer_size;
+		b->tail_pos = 0;
+		b->head_pos = 0;
+		b->sample_received = 0;
+		b->sample_lost_overflow = 0;
+		b->cpu = i;
+		INIT_WORK(&b->work, wq_sync_buffer, b);
+	}
+	return 0;
+
+fail:
+	free_cpu_buffers();
+	return -ENOMEM;
+}
+ 
+
+void start_cpu_work(void)
+{
+	int i;
+
+	work_enabled = 1;
+
+	for_each_online_cpu(i) {
+		struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+
+		/*
+		 * Spread the work by 1 jiffy per cpu so they dont all
+		 * fire at once.
+		 */
+		schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
+	}
+}
+
+
+void end_cpu_work(void)
+{
+	int i;
+
+	work_enabled = 0;
+
+	for_each_online_cpu(i) {
+		struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+
+		cancel_delayed_work(&b->work);
+	}
+
+	flush_scheduled_work();
+}
+
+
+/* Resets the cpu buffer to a sane state. */
+void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
+{
+	/* reset these to invalid values; the next sample
+	 * collected will populate the buffer with proper
+	 * values to initialize the buffer
+	 */
+	cpu_buf->last_is_kernel = -1;
+	cpu_buf->last_task = NULL;
+}
+
+
+/* compute number of available slots in cpu_buffer queue */
+static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
+{
+	unsigned long head = b->head_pos;
+	unsigned long tail = b->tail_pos;
+
+	if (tail > head)
+		return (tail - head) - 1;
+
+	return tail + (b->buffer_size - head) - 1;
+}
+
+
+static void increment_head(struct oprofile_cpu_buffer * b)
+{
+	unsigned long new_head = b->head_pos + 1;
+
+	/* Ensure anything written to the slot before we
+	 * increment is visible */
+	wmb();
+
+	if (new_head < b->buffer_size)
+		b->head_pos = new_head;
+	else
+		b->head_pos = 0;
+}
+
+
+
+
+inline static void
+add_sample(struct oprofile_cpu_buffer * cpu_buf,
+           unsigned long pc, unsigned long event)
+{
+	struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
+	entry->eip = pc;
+	entry->event = event;
+	increment_head(cpu_buf);
+}
+
+
+inline static void
+add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
+{
+	add_sample(buffer, ESCAPE_CODE, value);
+}
+
+
+/* This must be safe from any context. It's safe writing here
+ * because of the head/tail separation of the writer and reader
+ * of the CPU buffer.
+ *
+ * is_kernel is needed because on some architectures you cannot
+ * tell if you are in kernel or user space simply by looking at
+ * pc. We tag this in the buffer by generating kernel enter/exit
+ * events whenever is_kernel changes
+ */
+static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
+		      int is_kernel, unsigned long event)
+{
+	struct task_struct * task;
+
+	cpu_buf->sample_received++;
+
+	if (nr_available_slots(cpu_buf) < 3) {
+		cpu_buf->sample_lost_overflow++;
+		return 0;
+	}
+
+	is_kernel = !!is_kernel;
+
+	task = current;
+
+	/* notice a switch from user->kernel or vice versa */
+	if (cpu_buf->last_is_kernel != is_kernel) {
+		cpu_buf->last_is_kernel = is_kernel;
+		add_code(cpu_buf, is_kernel);
+	}
+
+	/* notice a task switch */
+	if (cpu_buf->last_task != task) {
+		cpu_buf->last_task = task;
+		add_code(cpu_buf, (unsigned long)task);
+	}
+ 
+	add_sample(cpu_buf, pc, event);
+	return 1;
+}
+
+static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
+{
+	if (nr_available_slots(cpu_buf) < 4) {
+		cpu_buf->sample_lost_overflow++;
+		return 0;
+	}
+
+	add_code(cpu_buf, CPU_TRACE_BEGIN);
+	cpu_buf->tracing = 1;
+	return 1;
+}
+
+
+static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
+{
+	cpu_buf->tracing = 0;
+}
+
+
+void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
+{
+	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+	unsigned long pc = profile_pc(regs);
+	int is_kernel = !user_mode(regs);
+
+	if (!backtrace_depth) {
+		log_sample(cpu_buf, pc, is_kernel, event);
+		return;
+	}
+
+	if (!oprofile_begin_trace(cpu_buf))
+		return;
+
+	/* if log_sample() fail we can't backtrace since we lost the source
+	 * of this event */
+	if (log_sample(cpu_buf, pc, is_kernel, event))
+		oprofile_ops.backtrace(regs, backtrace_depth);
+	oprofile_end_trace(cpu_buf);
+}
+
+
+void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
+{
+	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+	log_sample(cpu_buf, pc, is_kernel, event);
+}
+
+
+void oprofile_add_trace(unsigned long pc)
+{
+	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+
+	if (!cpu_buf->tracing)
+		return;
+
+	if (nr_available_slots(cpu_buf) < 1) {
+		cpu_buf->tracing = 0;
+		cpu_buf->sample_lost_overflow++;
+		return;
+	}
+
+	/* broken frame can give an eip with the same value as an escape code,
+	 * abort the trace if we get it */
+	if (pc == ESCAPE_CODE) {
+		cpu_buf->tracing = 0;
+		cpu_buf->backtrace_aborted++;
+		return;
+	}
+
+	add_sample(cpu_buf, pc, 0);
+}
+
+
+
+/*
+ * This serves to avoid cpu buffer overflow, and makes sure
+ * the task mortuary progresses
+ *
+ * By using schedule_delayed_work_on and then schedule_delayed_work
+ * we guarantee this will stay on the correct cpu
+ */
+static void wq_sync_buffer(void * data)
+{
+	struct oprofile_cpu_buffer * b = data;
+	if (b->cpu != smp_processor_id()) {
+		printk("WQ on CPU%d, prefer CPU%d\n",
+		       smp_processor_id(), b->cpu);
+	}
+	sync_buffer(b->cpu);
+
+	/* don't re-add the work if we're shutting down */
+	if (work_enabled)
+		schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
+}
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
new file mode 100644
index 0000000..09abb80
--- /dev/null
+++ b/drivers/oprofile/cpu_buffer.h
@@ -0,0 +1,57 @@
+/**
+ * @file cpu_buffer.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROFILE_CPU_BUFFER_H
+#define OPROFILE_CPU_BUFFER_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/cache.h>
+ 
+struct task_struct;
+ 
+int alloc_cpu_buffers(void);
+void free_cpu_buffers(void);
+
+void start_cpu_work(void);
+void end_cpu_work(void);
+
+/* CPU buffer is composed of such entries (which are
+ * also used for context switch notes)
+ */
+struct op_sample {
+	unsigned long eip;
+	unsigned long event;
+};
+ 
+struct oprofile_cpu_buffer {
+	volatile unsigned long head_pos;
+	volatile unsigned long tail_pos;
+	unsigned long buffer_size;
+	struct task_struct * last_task;
+	int last_is_kernel;
+	int tracing;
+	struct op_sample * buffer;
+	unsigned long sample_received;
+	unsigned long sample_lost_overflow;
+	unsigned long backtrace_aborted;
+	int cpu;
+	struct work_struct work;
+} ____cacheline_aligned;
+
+extern struct oprofile_cpu_buffer cpu_buffer[];
+
+void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
+
+/* transient events for the CPU buffer -> event buffer */
+#define CPU_IS_KERNEL 1
+#define CPU_TRACE_BEGIN 2
+
+#endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
new file mode 100644
index 0000000..166bca7
--- /dev/null
+++ b/drivers/oprofile/event_buffer.c
@@ -0,0 +1,187 @@
+/**
+ * @file event_buffer.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
+ * This is the global event buffer that the user-space
+ * daemon reads from. The event buffer is an untyped array
+ * of unsigned longs. Entries are prefixed by the
+ * escape value ESCAPE_CODE followed by an identifying code.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/oprofile.h>
+#include <linux/sched.h>
+#include <linux/dcookies.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+ 
+#include "oprof.h"
+#include "event_buffer.h"
+#include "oprofile_stats.h"
+
+DECLARE_MUTEX(buffer_sem);
+ 
+static unsigned long buffer_opened;
+static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
+static unsigned long * event_buffer;
+static unsigned long buffer_size;
+static unsigned long buffer_watershed;
+static size_t buffer_pos;
+/* atomic_t because wait_event checks it outside of buffer_sem */
+static atomic_t buffer_ready = ATOMIC_INIT(0);
+
+/* Add an entry to the event buffer. When we
+ * get near to the end we wake up the process
+ * sleeping on the read() of the file.
+ */
+void add_event_entry(unsigned long value)
+{
+	if (buffer_pos == buffer_size) {
+		atomic_inc(&oprofile_stats.event_lost_overflow);
+		return;
+	}
+
+	event_buffer[buffer_pos] = value;
+	if (++buffer_pos == buffer_size - buffer_watershed) {
+		atomic_set(&buffer_ready, 1);
+		wake_up(&buffer_wait);
+	}
+}
+
+
+/* Wake up the waiting process if any. This happens
+ * on "echo 0 >/dev/oprofile/enable" so the daemon
+ * processes the data remaining in the event buffer.
+ */
+void wake_up_buffer_waiter(void)
+{
+	down(&buffer_sem);
+	atomic_set(&buffer_ready, 1);
+	wake_up(&buffer_wait);
+	up(&buffer_sem);
+}
+
+ 
+int alloc_event_buffer(void)
+{
+	int err = -ENOMEM;
+
+	spin_lock(&oprofilefs_lock);
+	buffer_size = fs_buffer_size;
+	buffer_watershed = fs_buffer_watershed;
+	spin_unlock(&oprofilefs_lock);
+ 
+	if (buffer_watershed >= buffer_size)
+		return -EINVAL;
+ 
+	event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
+	if (!event_buffer)
+		goto out; 
+
+	err = 0;
+out:
+	return err;
+}
+
+
+void free_event_buffer(void)
+{
+	vfree(event_buffer);
+}
+
+ 
+static int event_buffer_open(struct inode * inode, struct file * file)
+{
+	int err = -EPERM;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (test_and_set_bit(0, &buffer_opened))
+		return -EBUSY;
+
+	/* Register as a user of dcookies
+	 * to ensure they persist for the lifetime of
+	 * the open event file
+	 */
+	err = -EINVAL;
+	file->private_data = dcookie_register();
+	if (!file->private_data)
+		goto out;
+		 
+	if ((err = oprofile_setup()))
+		goto fail;
+
+	/* NB: the actual start happens from userspace
+	 * echo 1 >/dev/oprofile/enable
+	 */
+ 
+	return 0;
+
+fail:
+	dcookie_unregister(file->private_data);
+out:
+	clear_bit(0, &buffer_opened);
+	return err;
+}
+
+
+static int event_buffer_release(struct inode * inode, struct file * file)
+{
+	oprofile_stop();
+	oprofile_shutdown();
+	dcookie_unregister(file->private_data);
+	buffer_pos = 0;
+	atomic_set(&buffer_ready, 0);
+	clear_bit(0, &buffer_opened);
+	return 0;
+}
+
+
+static ssize_t event_buffer_read(struct file * file, char __user * buf,
+				 size_t count, loff_t * offset)
+{
+	int retval = -EINVAL;
+	size_t const max = buffer_size * sizeof(unsigned long);
+
+	/* handling partial reads is more trouble than it's worth */
+	if (count != max || *offset)
+		return -EINVAL;
+
+	wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
+
+	if (signal_pending(current))
+		return -EINTR;
+
+	/* can't currently happen */
+	if (!atomic_read(&buffer_ready))
+		return -EAGAIN;
+
+	down(&buffer_sem);
+
+	atomic_set(&buffer_ready, 0);
+
+	retval = -EFAULT;
+
+	count = buffer_pos * sizeof(unsigned long);
+ 
+	if (copy_to_user(buf, event_buffer, count))
+		goto out;
+
+	retval = count;
+	buffer_pos = 0;
+ 
+out:
+	up(&buffer_sem);
+	return retval;
+}
+ 
+struct file_operations event_buffer_fops = {
+	.open		= event_buffer_open,
+	.release	= event_buffer_release,
+	.read		= event_buffer_read,
+};
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
new file mode 100644
index 0000000..442aaad
--- /dev/null
+++ b/drivers/oprofile/event_buffer.h
@@ -0,0 +1,48 @@
+/**
+ * @file event_buffer.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef EVENT_BUFFER_H
+#define EVENT_BUFFER_H
+
+#include <linux/types.h> 
+#include <asm/semaphore.h>
+ 
+int alloc_event_buffer(void);
+
+void free_event_buffer(void);
+ 
+/* wake up the process sleeping on the event file */
+void wake_up_buffer_waiter(void);
+ 
+/* Each escaped entry is prefixed by ESCAPE_CODE
+ * then one of the following codes, then the
+ * relevant data.
+ */
+#define ESCAPE_CODE			~0UL
+#define CTX_SWITCH_CODE 		1
+#define CPU_SWITCH_CODE 		2
+#define COOKIE_SWITCH_CODE 		3
+#define KERNEL_ENTER_SWITCH_CODE	4
+#define KERNEL_EXIT_SWITCH_CODE		5
+#define MODULE_LOADED_CODE		6
+#define CTX_TGID_CODE			7
+#define TRACE_BEGIN_CODE		8
+#define TRACE_END_CODE			9
+ 
+/* add data to the event buffer */
+void add_event_entry(unsigned long data);
+ 
+extern struct file_operations event_buffer_fops;
+ 
+/* mutex between sync_cpu_buffers() and the
+ * file reading code.
+ */
+extern struct semaphore buffer_sem;
+ 
+#endif /* EVENT_BUFFER_H */
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
new file mode 100644
index 0000000..b3f1cd6
--- /dev/null
+++ b/drivers/oprofile/oprof.c
@@ -0,0 +1,188 @@
+/**
+ * @file oprof.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/oprofile.h>
+#include <linux/moduleparam.h>
+#include <asm/semaphore.h>
+
+#include "oprof.h"
+#include "event_buffer.h"
+#include "cpu_buffer.h"
+#include "buffer_sync.h"
+#include "oprofile_stats.h"
+ 
+struct oprofile_operations oprofile_ops;
+
+unsigned long oprofile_started;
+unsigned long backtrace_depth;
+static unsigned long is_setup;
+static DECLARE_MUTEX(start_sem);
+
+/* timer
+   0 - use performance monitoring hardware if available
+   1 - use the timer int mechanism regardless
+ */
+static int timer = 0;
+
+int oprofile_setup(void)
+{
+	int err;
+ 
+	down(&start_sem);
+
+	if ((err = alloc_cpu_buffers()))
+		goto out;
+
+	if ((err = alloc_event_buffer()))
+		goto out1;
+ 
+	if (oprofile_ops.setup && (err = oprofile_ops.setup()))
+		goto out2;
+ 
+	/* Note even though this starts part of the
+	 * profiling overhead, it's necessary to prevent
+	 * us missing task deaths and eventually oopsing
+	 * when trying to process the event buffer.
+	 */
+	if ((err = sync_start()))
+		goto out3;
+
+	is_setup = 1;
+	up(&start_sem);
+	return 0;
+ 
+out3:
+	if (oprofile_ops.shutdown)
+		oprofile_ops.shutdown();
+out2:
+	free_event_buffer();
+out1:
+	free_cpu_buffers();
+out:
+	up(&start_sem);
+	return err;
+}
+
+
+/* Actually start profiling (echo 1>/dev/oprofile/enable) */
+int oprofile_start(void)
+{
+	int err = -EINVAL;
+ 
+	down(&start_sem);
+ 
+	if (!is_setup)
+		goto out;
+
+	err = 0; 
+ 
+	if (oprofile_started)
+		goto out;
+ 
+	oprofile_reset_stats();
+
+	if ((err = oprofile_ops.start()))
+		goto out;
+
+	oprofile_started = 1;
+out:
+	up(&start_sem); 
+	return err;
+}
+
+ 
+/* echo 0>/dev/oprofile/enable */
+void oprofile_stop(void)
+{
+	down(&start_sem);
+	if (!oprofile_started)
+		goto out;
+	oprofile_ops.stop();
+	oprofile_started = 0;
+	/* wake up the daemon to read what remains */
+	wake_up_buffer_waiter();
+out:
+	up(&start_sem);
+}
+
+
+void oprofile_shutdown(void)
+{
+	down(&start_sem);
+	sync_stop();
+	if (oprofile_ops.shutdown)
+		oprofile_ops.shutdown();
+	is_setup = 0;
+	free_event_buffer();
+	free_cpu_buffers();
+	up(&start_sem);
+}
+
+
+int oprofile_set_backtrace(unsigned long val)
+{
+	int err = 0;
+
+	down(&start_sem);
+
+	if (oprofile_started) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (!oprofile_ops.backtrace) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	backtrace_depth = val;
+
+out:
+	up(&start_sem);
+	return err;
+}
+
+static int __init oprofile_init(void)
+{
+	int err;
+
+	err = oprofile_arch_init(&oprofile_ops);
+
+	if (err < 0 || timer) {
+		printk(KERN_INFO "oprofile: using timer interrupt.\n");
+		oprofile_timer_init(&oprofile_ops);
+	}
+
+	err = oprofilefs_register();
+	if (err)
+		oprofile_arch_exit();
+
+	return err;
+}
+
+
+static void __exit oprofile_exit(void)
+{
+	oprofilefs_unregister();
+	oprofile_arch_exit();
+}
+
+ 
+module_init(oprofile_init);
+module_exit(oprofile_exit);
+
+module_param_named(timer, timer, int, 0644);
+MODULE_PARM_DESC(timer, "force use of timer interrupt");
+ 
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Levon <levon@movementarian.org>");
+MODULE_DESCRIPTION("OProfile system profiler");
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
new file mode 100644
index 0000000..1832365
--- /dev/null
+++ b/drivers/oprofile/oprof.h
@@ -0,0 +1,39 @@
+/**
+ * @file oprof.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROF_H
+#define OPROF_H
+
+int oprofile_setup(void);
+void oprofile_shutdown(void); 
+
+int oprofilefs_register(void);
+void oprofilefs_unregister(void);
+
+int oprofile_start(void);
+void oprofile_stop(void);
+
+struct oprofile_operations;
+ 
+extern unsigned long fs_buffer_size;
+extern unsigned long fs_cpu_buffer_size;
+extern unsigned long fs_buffer_watershed;
+extern struct oprofile_operations oprofile_ops;
+extern unsigned long oprofile_started;
+extern unsigned long backtrace_depth;
+ 
+struct super_block;
+struct dentry;
+
+void oprofile_create_files(struct super_block * sb, struct dentry * root);
+void oprofile_timer_init(struct oprofile_operations * ops);
+
+int oprofile_set_backtrace(unsigned long depth);
+ 
+#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
new file mode 100644
index 0000000..9abedea
--- /dev/null
+++ b/drivers/oprofile/oprofile_files.c
@@ -0,0 +1,135 @@
+/**
+ * @file oprofile_files.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/fs.h>
+#include <linux/oprofile.h>
+
+#include "event_buffer.h"
+#include "oprofile_stats.h"
+#include "oprof.h"
+ 
+unsigned long fs_buffer_size = 131072;
+unsigned long fs_cpu_buffer_size = 8192;
+unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
+
+static ssize_t depth_read(struct file * file, char * buf, size_t count, loff_t * offset)
+{
+	return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
+}
+
+
+static ssize_t depth_write(struct file * file, char const * buf, size_t count, loff_t * offset)
+{
+	unsigned long val;
+	int retval;
+
+	if (*offset)
+		return -EINVAL;
+
+	retval = oprofilefs_ulong_from_user(&val, buf, count);
+	if (retval)
+		return retval;
+
+	retval = oprofile_set_backtrace(val);
+
+	if (retval)
+		return retval;
+	return count;
+}
+
+
+static struct file_operations depth_fops = {
+	.read		= depth_read,
+	.write		= depth_write
+};
+
+ 
+static ssize_t pointer_size_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+{
+	return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
+}
+
+
+static struct file_operations pointer_size_fops = {
+	.read		= pointer_size_read,
+};
+
+
+static ssize_t cpu_type_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+{
+	return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
+}
+ 
+ 
+static struct file_operations cpu_type_fops = {
+	.read		= cpu_type_read,
+};
+ 
+ 
+static ssize_t enable_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+{
+	return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
+}
+
+
+static ssize_t enable_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+{
+	unsigned long val;
+	int retval;
+
+	if (*offset)
+		return -EINVAL;
+
+	retval = oprofilefs_ulong_from_user(&val, buf, count);
+	if (retval)
+		return retval;
+ 
+	if (val)
+		retval = oprofile_start();
+	else
+		oprofile_stop();
+
+	if (retval)
+		return retval;
+	return count;
+}
+
+ 
+static struct file_operations enable_fops = {
+	.read		= enable_read,
+	.write		= enable_write,
+};
+
+
+static ssize_t dump_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+{
+	wake_up_buffer_waiter();
+	return count;
+}
+
+
+static struct file_operations dump_fops = {
+	.write		= dump_write,
+};
+ 
+void oprofile_create_files(struct super_block * sb, struct dentry * root)
+{
+	oprofilefs_create_file(sb, root, "enable", &enable_fops);
+	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
+	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
+	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
+	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
+	oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
+	oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); 
+	oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
+	oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
+	oprofile_create_stats_files(sb, root);
+	if (oprofile_ops.create_files)
+		oprofile_ops.create_files(sb, root);
+}
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
new file mode 100644
index 0000000..e94b1e4
--- /dev/null
+++ b/drivers/oprofile/oprofile_stats.c
@@ -0,0 +1,74 @@
+/**
+ * @file oprofile_stats.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ */
+
+#include <linux/oprofile.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/threads.h>
+ 
+#include "oprofile_stats.h"
+#include "cpu_buffer.h"
+ 
+struct oprofile_stat_struct oprofile_stats;
+ 
+void oprofile_reset_stats(void)
+{
+	struct oprofile_cpu_buffer * cpu_buf; 
+	int i;
+ 
+	for_each_cpu(i) {
+		cpu_buf = &cpu_buffer[i]; 
+		cpu_buf->sample_received = 0;
+		cpu_buf->sample_lost_overflow = 0;
+	}
+ 
+	atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
+	atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
+	atomic_set(&oprofile_stats.event_lost_overflow, 0);
+}
+
+
+void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
+{
+	struct oprofile_cpu_buffer * cpu_buf;
+	struct dentry * cpudir;
+	struct dentry * dir;
+	char buf[10];
+	int i;
+
+	dir = oprofilefs_mkdir(sb, root, "stats");
+	if (!dir)
+		return;
+
+	for_each_cpu(i) {
+		cpu_buf = &cpu_buffer[i]; 
+		snprintf(buf, 10, "cpu%d", i);
+		cpudir = oprofilefs_mkdir(sb, dir, buf);
+ 
+		/* Strictly speaking access to these ulongs is racy,
+		 * but we can't simply lock them, and they are
+		 * informational only.
+		 */
+		oprofilefs_create_ro_ulong(sb, cpudir, "sample_received",
+			&cpu_buf->sample_received);
+		oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow",
+			&cpu_buf->sample_lost_overflow);
+		oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted",
+			&cpu_buf->backtrace_aborted);
+	}
+ 
+	oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
+		&oprofile_stats.sample_lost_no_mm);
+	oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
+		&oprofile_stats.sample_lost_no_mapping);
+	oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow",
+		&oprofile_stats.event_lost_overflow);
+	oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
+		&oprofile_stats.bt_lost_no_mapping);
+}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
new file mode 100644
index 0000000..6d755a6
--- /dev/null
+++ b/drivers/oprofile/oprofile_stats.h
@@ -0,0 +1,33 @@
+/**
+ * @file oprofile_stats.h
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ */
+
+#ifndef OPROFILE_STATS_H
+#define OPROFILE_STATS_H
+
+#include <asm/atomic.h>
+ 
+struct oprofile_stat_struct {
+	atomic_t sample_lost_no_mm;
+	atomic_t sample_lost_no_mapping;
+	atomic_t bt_lost_no_mapping;
+	atomic_t event_lost_overflow;
+};
+
+extern struct oprofile_stat_struct oprofile_stats;
+ 
+/* reset all stats to zero */
+void oprofile_reset_stats(void);
+ 
+struct super_block;
+struct dentry;
+ 
+/* create the stats/ dir */
+void oprofile_create_stats_files(struct super_block * sb, struct dentry * root);
+
+#endif /* OPROFILE_STATS_H */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
new file mode 100644
index 0000000..d6bae69
--- /dev/null
+++ b/drivers/oprofile/oprofilefs.c
@@ -0,0 +1,299 @@
+/**
+ * @file oprofilefs.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon
+ *
+ * A simple filesystem for configuration and
+ * access of oprofile.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/oprofile.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <asm/uaccess.h>
+
+#include "oprof.h"
+
+#define OPROFILEFS_MAGIC 0x6f70726f
+
+DEFINE_SPINLOCK(oprofilefs_lock);
+
+static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode)
+{
+	struct inode * inode = new_inode(sb);
+
+	if (inode) {
+		inode->i_mode = mode;
+		inode->i_uid = 0;
+		inode->i_gid = 0;
+		inode->i_blksize = PAGE_CACHE_SIZE;
+		inode->i_blocks = 0;
+		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	}
+	return inode;
+}
+
+
+static struct super_operations s_ops = {
+	.statfs		= simple_statfs,
+	.drop_inode 	= generic_delete_inode,
+};
+
+
+ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset)
+{
+	return simple_read_from_buffer(buf, count, offset, str, strlen(str));
+}
+
+
+#define TMPBUFSIZE 50
+
+ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset)
+{
+	char tmpbuf[TMPBUFSIZE];
+	size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
+	if (maxlen > TMPBUFSIZE)
+		maxlen = TMPBUFSIZE;
+	return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
+}
+
+
+int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count)
+{
+	char tmpbuf[TMPBUFSIZE];
+
+	if (!count)
+		return 0;
+
+	if (count > TMPBUFSIZE - 1)
+		return -EINVAL;
+
+	memset(tmpbuf, 0x0, TMPBUFSIZE);
+
+	if (copy_from_user(tmpbuf, buf, count))
+		return -EFAULT;
+
+	spin_lock(&oprofilefs_lock);
+	*val = simple_strtoul(tmpbuf, NULL, 0);
+	spin_unlock(&oprofilefs_lock);
+	return 0;
+}
+
+
+static ssize_t ulong_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
+{
+	unsigned long * val = file->private_data;
+	return oprofilefs_ulong_to_user(*val, buf, count, offset);
+}
+
+
+static ssize_t ulong_write_file(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+{
+	unsigned long * value = file->private_data;
+	int retval;
+
+	if (*offset)
+		return -EINVAL;
+
+	retval = oprofilefs_ulong_from_user(value, buf, count);
+
+	if (retval)
+		return retval;
+	return count;
+}
+
+
+static int default_open(struct inode * inode, struct file * filp)
+{
+	if (inode->u.generic_ip)
+		filp->private_data = inode->u.generic_ip;
+	return 0;
+}
+
+
+static struct file_operations ulong_fops = {
+	.read		= ulong_read_file,
+	.write		= ulong_write_file,
+	.open		= default_open,
+};
+
+
+static struct file_operations ulong_ro_fops = {
+	.read		= ulong_read_file,
+	.open		= default_open,
+};
+
+
+static struct dentry * __oprofilefs_create_file(struct super_block * sb,
+	struct dentry * root, char const * name, struct file_operations * fops,
+	int perm)
+{
+	struct dentry * dentry;
+	struct inode * inode;
+
+	dentry = d_alloc_name(root, name);
+	if (!dentry)
+		return NULL;
+	inode = oprofilefs_get_inode(sb, S_IFREG | perm);
+	if (!inode) {
+		dput(dentry);
+		return NULL;
+	}
+	inode->i_fop = fops;
+	d_add(dentry, inode);
+	return dentry;
+}
+
+
+int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
+	char const * name, unsigned long * val)
+{
+	struct dentry * d = __oprofilefs_create_file(sb, root, name,
+						     &ulong_fops, 0644);
+	if (!d)
+		return -EFAULT;
+
+	d->d_inode->u.generic_ip = val;
+	return 0;
+}
+
+
+int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
+	char const * name, unsigned long * val)
+{
+	struct dentry * d = __oprofilefs_create_file(sb, root, name,
+						     &ulong_ro_fops, 0444);
+	if (!d)
+		return -EFAULT;
+
+	d->d_inode->u.generic_ip = val;
+	return 0;
+}
+
+
+static ssize_t atomic_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
+{
+	atomic_t * val = file->private_data;
+	return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
+}
+ 
+
+static struct file_operations atomic_ro_fops = {
+	.read		= atomic_read_file,
+	.open		= default_open,
+};
+ 
+
+int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
+	char const * name, atomic_t * val)
+{
+	struct dentry * d = __oprofilefs_create_file(sb, root, name,
+						     &atomic_ro_fops, 0444);
+	if (!d)
+		return -EFAULT;
+
+	d->d_inode->u.generic_ip = val;
+	return 0;
+}
+
+ 
+int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
+	char const * name, struct file_operations * fops)
+{
+	if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
+		return -EFAULT;
+	return 0;
+}
+
+
+int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
+	char const * name, struct file_operations * fops, int perm)
+{
+	if (!__oprofilefs_create_file(sb, root, name, fops, perm))
+		return -EFAULT;
+	return 0;
+}
+
+
+struct dentry * oprofilefs_mkdir(struct super_block * sb,
+	struct dentry * root, char const * name)
+{
+	struct dentry * dentry;
+	struct inode * inode;
+
+	dentry = d_alloc_name(root, name);
+	if (!dentry)
+		return NULL;
+	inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
+	if (!inode) {
+		dput(dentry);
+		return NULL;
+	}
+	inode->i_op = &simple_dir_inode_operations;
+	inode->i_fop = &simple_dir_operations;
+	d_add(dentry, inode);
+	return dentry;
+}
+
+
+static int oprofilefs_fill_super(struct super_block * sb, void * data, int silent)
+{
+	struct inode * root_inode;
+	struct dentry * root_dentry;
+
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic = OPROFILEFS_MAGIC;
+	sb->s_op = &s_ops;
+	sb->s_time_gran = 1;
+
+	root_inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
+	if (!root_inode)
+		return -ENOMEM;
+	root_inode->i_op = &simple_dir_inode_operations;
+	root_inode->i_fop = &simple_dir_operations;
+	root_dentry = d_alloc_root(root_inode);
+	if (!root_dentry) {
+		iput(root_inode);
+		return -ENOMEM;
+	}
+
+	sb->s_root = root_dentry;
+
+	oprofile_create_files(sb, root_dentry);
+
+	// FIXME: verify kill_litter_super removes our dentries
+	return 0;
+}
+
+
+static struct super_block *oprofilefs_get_sb(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data)
+{
+	return get_sb_single(fs_type, flags, data, oprofilefs_fill_super);
+}
+
+
+static struct file_system_type oprofilefs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "oprofilefs",
+	.get_sb		= oprofilefs_get_sb,
+	.kill_sb	= kill_litter_super,
+};
+
+
+int __init oprofilefs_register(void)
+{
+	return register_filesystem(&oprofilefs_type);
+}
+
+
+void __exit oprofilefs_unregister(void)
+{
+	unregister_filesystem(&oprofilefs_type);
+}
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
new file mode 100644
index 0000000..710a45f
--- /dev/null
+++ b/drivers/oprofile/timer_int.c
@@ -0,0 +1,46 @@
+/**
+ * @file timer_int.c
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/smp.h>
+#include <linux/oprofile.h>
+#include <linux/profile.h>
+#include <linux/init.h>
+#include <asm/ptrace.h>
+
+#include "oprof.h"
+
+static int timer_notify(struct pt_regs *regs)
+{
+ 	oprofile_add_sample(regs, 0);
+	return 0;
+}
+
+static int timer_start(void)
+{
+	return register_timer_hook(timer_notify);
+}
+
+
+static void timer_stop(void)
+{
+	unregister_timer_hook(timer_notify);
+}
+
+
+void __init oprofile_timer_init(struct oprofile_operations * ops)
+{
+	ops->create_files = NULL;
+	ops->setup = NULL;
+	ops->shutdown = NULL;
+	ops->start = timer_start;
+	ops->stop = timer_stop;
+	ops->cpu_type = "timer";
+}