[POWERPC] spufs: add context switch notification log

There are userspace instrumentation tools that need to monitor spu
context switches. This patch adds a new file called 'switch_log' to
each spufs context directory that can be used to monitor the context
switches.

Context switch in/out and exit from spu_run are monitored after the
file was first opened and can be read from it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index ba791e9..b5c1505 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -2387,6 +2387,171 @@
 	.release	= single_release,
 };
 
+static inline int spufs_switch_log_used(struct spu_context *ctx)
+{
+	return (ctx->switch_log->head - ctx->switch_log->tail) %
+		SWITCH_LOG_BUFSIZE;
+}
+
+static inline int spufs_switch_log_avail(struct spu_context *ctx)
+{
+	return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
+}
+
+static int spufs_switch_log_open(struct inode *inode, struct file *file)
+{
+	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+
+	/*
+	 * We (ab-)use the mapping_lock here because it serves the similar
+	 * purpose for synchronizing open/close elsewhere.  Maybe it should
+	 * be renamed eventually.
+	 */
+	mutex_lock(&ctx->mapping_lock);
+	if (ctx->switch_log) {
+		spin_lock(&ctx->switch_log->lock);
+		ctx->switch_log->head = 0;
+		ctx->switch_log->tail = 0;
+		spin_unlock(&ctx->switch_log->lock);
+	} else {
+		/*
+		 * We allocate the switch log data structures on first open.
+		 * They will never be free because we assume a context will
+		 * be traced until it goes away.
+		 */
+		ctx->switch_log = kzalloc(sizeof(struct switch_log) +
+			SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
+			GFP_KERNEL);
+		if (!ctx->switch_log)
+			goto out;
+		spin_lock_init(&ctx->switch_log->lock);
+		init_waitqueue_head(&ctx->switch_log->wait);
+	}
+	mutex_unlock(&ctx->mapping_lock);
+
+	return 0;
+ out:
+	mutex_unlock(&ctx->mapping_lock);
+	return -ENOMEM;
+}
+
+static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
+{
+	struct switch_log_entry *p;
+
+	p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
+
+	return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
+			(unsigned int) p->tstamp.tv_sec,
+			(unsigned int) p->tstamp.tv_nsec,
+			p->spu_id,
+			(unsigned int) p->type,
+			(unsigned int) p->val,
+			(unsigned long long) p->timebase);
+}
+
+static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
+			     size_t len, loff_t *ppos)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+	int error = 0, cnt = 0;
+
+	if (!buf || len < 0)
+		return -EINVAL;
+
+	while (cnt < len) {
+		char tbuf[128];
+		int width;
+
+		if (file->f_flags & O_NONBLOCK) {
+			if (spufs_switch_log_used(ctx) <= 0)
+				return cnt ? cnt : -EAGAIN;
+		} else {
+			/* Wait for data in buffer */
+			error = wait_event_interruptible(ctx->switch_log->wait,
+					spufs_switch_log_used(ctx) > 0);
+			if (error)
+				break;
+		}
+
+		spin_lock(&ctx->switch_log->lock);
+		if (ctx->switch_log->head == ctx->switch_log->tail) {
+			/* multiple readers race? */
+			spin_unlock(&ctx->switch_log->lock);
+			continue;
+		}
+
+		width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
+		if (width < len) {
+			ctx->switch_log->tail =
+				(ctx->switch_log->tail + 1) %
+				 SWITCH_LOG_BUFSIZE;
+		}
+
+		spin_unlock(&ctx->switch_log->lock);
+
+		/*
+		 * If the record is greater than space available return
+		 * partial buffer (so far)
+		 */
+		if (width >= len)
+			break;
+
+		error = copy_to_user(buf + cnt, tbuf, width);
+		if (error)
+			break;
+		cnt += width;
+	}
+
+	return cnt == 0 ? error : cnt;
+}
+
+static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+	unsigned int mask = 0;
+
+	poll_wait(file, &ctx->switch_log->wait, wait);
+
+	if (spufs_switch_log_used(ctx) > 0)
+		mask |= POLLIN;
+
+	return mask;
+}
+
+static const struct file_operations spufs_switch_log_fops = {
+	.owner	= THIS_MODULE,
+	.open	= spufs_switch_log_open,
+	.read	= spufs_switch_log_read,
+	.poll	= spufs_switch_log_poll,
+};
+
+void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
+		u32 type, u32 val)
+{
+	if (!ctx->switch_log)
+		return;
+
+	spin_lock(&ctx->switch_log->lock);
+	if (spufs_switch_log_avail(ctx) > 1) {
+		struct switch_log_entry *p;
+
+		p = ctx->switch_log->log + ctx->switch_log->head;
+		ktime_get_ts(&p->tstamp);
+		p->timebase = get_tb();
+		p->spu_id = spu ? spu->number : -1;
+		p->type = type;
+		p->val = val;
+
+		ctx->switch_log->head =
+			(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
+	}
+	spin_unlock(&ctx->switch_log->lock);
+
+	wake_up(&ctx->switch_log->wait);
+}
 
 struct tree_descr spufs_dir_contents[] = {
 	{ "capabilities", &spufs_caps_fops, 0444, },
@@ -2423,6 +2588,7 @@
 	{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
 	{ "tid", &spufs_tid_fops, 0444, },
 	{ "stat", &spufs_stat_fops, 0444, },
+	{ "switch_log", &spufs_switch_log_fops, 0444 },
 	{},
 };