[PATCH] Generic HID layer - code split

The "big main" split of USB HID code into generic HID code and
USB-transport specific HID handling.

Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
new file mode 100644
index 0000000..689ae16
--- /dev/null
+++ b/drivers/hid/hid-core.c
@@ -0,0 +1,940 @@
+/*
+ *  USB HID support for Linux
+ *
+ *  Copyright (c) 1999 Andreas Gal
+ *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ *  Copyright (c) 2006 Jiri Kosina
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <linux/input.h>
+#include <linux/wait.h>
+
+#undef DEBUG
+#undef DEBUG_DATA
+
+#include <linux/usb.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+
+/*
+ * Version Information
+ */
+
+#define DRIVER_VERSION "v2.6"
+#define DRIVER_AUTHOR "Andreas Gal, Vojtech Pavlik"
+#define DRIVER_DESC "USB HID core driver"
+#define DRIVER_LICENSE "GPL"
+
+/*
+ * Module parameters.
+ */
+
+static unsigned int hid_mousepoll_interval;
+module_param_named(mousepoll, hid_mousepoll_interval, uint, 0644);
+MODULE_PARM_DESC(mousepoll, "Polling interval of mice");
+
+/*
+ * Register a new report for a device.
+ */
+
+static struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id)
+{
+	struct hid_report_enum *report_enum = device->report_enum + type;
+	struct hid_report *report;
+
+	if (report_enum->report_id_hash[id])
+		return report_enum->report_id_hash[id];
+
+	if (!(report = kzalloc(sizeof(struct hid_report), GFP_KERNEL)))
+		return NULL;
+
+	if (id != 0)
+		report_enum->numbered = 1;
+
+	report->id = id;
+	report->type = type;
+	report->size = 0;
+	report->device = device;
+	report_enum->report_id_hash[id] = report;
+
+	list_add_tail(&report->list, &report_enum->report_list);
+
+	return report;
+}
+
+/*
+ * Register a new field for this report.
+ */
+
+static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
+{
+	struct hid_field *field;
+
+	if (report->maxfield == HID_MAX_FIELDS) {
+		dbg("too many fields in report");
+		return NULL;
+	}
+
+	if (!(field = kzalloc(sizeof(struct hid_field) + usages * sizeof(struct hid_usage)
+		+ values * sizeof(unsigned), GFP_KERNEL))) return NULL;
+
+	field->index = report->maxfield++;
+	report->field[field->index] = field;
+	field->usage = (struct hid_usage *)(field + 1);
+	field->value = (unsigned *)(field->usage + usages);
+	field->report = report;
+
+	return field;
+}
+
+/*
+ * Open a collection. The type/usage is pushed on the stack.
+ */
+
+static int open_collection(struct hid_parser *parser, unsigned type)
+{
+	struct hid_collection *collection;
+	unsigned usage;
+
+	usage = parser->local.usage[0];
+
+	if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
+		dbg("collection stack overflow");
+		return -1;
+	}
+
+	if (parser->device->maxcollection == parser->device->collection_size) {
+		collection = kmalloc(sizeof(struct hid_collection) *
+				parser->device->collection_size * 2, GFP_KERNEL);
+		if (collection == NULL) {
+			dbg("failed to reallocate collection array");
+			return -1;
+		}
+		memcpy(collection, parser->device->collection,
+			sizeof(struct hid_collection) *
+			parser->device->collection_size);
+		memset(collection + parser->device->collection_size, 0,
+			sizeof(struct hid_collection) *
+			parser->device->collection_size);
+		kfree(parser->device->collection);
+		parser->device->collection = collection;
+		parser->device->collection_size *= 2;
+	}
+
+	parser->collection_stack[parser->collection_stack_ptr++] =
+		parser->device->maxcollection;
+
+	collection = parser->device->collection +
+		parser->device->maxcollection++;
+	collection->type = type;
+	collection->usage = usage;
+	collection->level = parser->collection_stack_ptr - 1;
+
+	if (type == HID_COLLECTION_APPLICATION)
+		parser->device->maxapplication++;
+
+	return 0;
+}
+
+/*
+ * Close a collection.
+ */
+
+static int close_collection(struct hid_parser *parser)
+{
+	if (!parser->collection_stack_ptr) {
+		dbg("collection stack underflow");
+		return -1;
+	}
+	parser->collection_stack_ptr--;
+	return 0;
+}
+
+/*
+ * Climb up the stack, search for the specified collection type
+ * and return the usage.
+ */
+
+static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
+{
+	int n;
+	for (n = parser->collection_stack_ptr - 1; n >= 0; n--)
+		if (parser->device->collection[parser->collection_stack[n]].type == type)
+			return parser->device->collection[parser->collection_stack[n]].usage;
+	return 0; /* we know nothing about this usage type */
+}
+
+/*
+ * Add a usage to the temporary parser table.
+ */
+
+static int hid_add_usage(struct hid_parser *parser, unsigned usage)
+{
+	if (parser->local.usage_index >= HID_MAX_USAGES) {
+		dbg("usage index exceeded");
+		return -1;
+	}
+	parser->local.usage[parser->local.usage_index] = usage;
+	parser->local.collection_index[parser->local.usage_index] =
+		parser->collection_stack_ptr ?
+		parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
+	parser->local.usage_index++;
+	return 0;
+}
+
+/*
+ * Register a new field for this report.
+ */
+
+static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
+{
+	struct hid_report *report;
+	struct hid_field *field;
+	int usages;
+	unsigned offset;
+	int i;
+
+	if (!(report = hid_register_report(parser->device, report_type, parser->global.report_id))) {
+		dbg("hid_register_report failed");
+		return -1;
+	}
+
+	if (parser->global.logical_maximum < parser->global.logical_minimum) {
+		dbg("logical range invalid %d %d", parser->global.logical_minimum, parser->global.logical_maximum);
+		return -1;
+	}
+
+	offset = report->size;
+	report->size += parser->global.report_size * parser->global.report_count;
+
+	if (!parser->local.usage_index) /* Ignore padding fields */
+		return 0;
+
+	usages = max_t(int, parser->local.usage_index, parser->global.report_count);
+
+	if ((field = hid_register_field(report, usages, parser->global.report_count)) == NULL)
+		return 0;
+
+	field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
+	field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
+	field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
+
+	for (i = 0; i < usages; i++) {
+		int j = i;
+		/* Duplicate the last usage we parsed if we have excess values */
+		if (i >= parser->local.usage_index)
+			j = parser->local.usage_index - 1;
+		field->usage[i].hid = parser->local.usage[j];
+		field->usage[i].collection_index =
+			parser->local.collection_index[j];
+	}
+
+	field->maxusage = usages;
+	field->flags = flags;
+	field->report_offset = offset;
+	field->report_type = report_type;
+	field->report_size = parser->global.report_size;
+	field->report_count = parser->global.report_count;
+	field->logical_minimum = parser->global.logical_minimum;
+	field->logical_maximum = parser->global.logical_maximum;
+	field->physical_minimum = parser->global.physical_minimum;
+	field->physical_maximum = parser->global.physical_maximum;
+	field->unit_exponent = parser->global.unit_exponent;
+	field->unit = parser->global.unit;
+
+	return 0;
+}
+
+/*
+ * Read data value from item.
+ */
+
+static u32 item_udata(struct hid_item *item)
+{
+	switch (item->size) {
+		case 1: return item->data.u8;
+		case 2: return item->data.u16;
+		case 4: return item->data.u32;
+	}
+	return 0;
+}
+
+static s32 item_sdata(struct hid_item *item)
+{
+	switch (item->size) {
+		case 1: return item->data.s8;
+		case 2: return item->data.s16;
+		case 4: return item->data.s32;
+	}
+	return 0;
+}
+
+/*
+ * Process a global item.
+ */
+
+static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
+{
+	switch (item->tag) {
+
+		case HID_GLOBAL_ITEM_TAG_PUSH:
+
+			if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
+				dbg("global enviroment stack overflow");
+				return -1;
+			}
+
+			memcpy(parser->global_stack + parser->global_stack_ptr++,
+				&parser->global, sizeof(struct hid_global));
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_POP:
+
+			if (!parser->global_stack_ptr) {
+				dbg("global enviroment stack underflow");
+				return -1;
+			}
+
+			memcpy(&parser->global, parser->global_stack + --parser->global_stack_ptr,
+				sizeof(struct hid_global));
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
+			parser->global.usage_page = item_udata(item);
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
+			parser->global.logical_minimum = item_sdata(item);
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
+			if (parser->global.logical_minimum < 0)
+				parser->global.logical_maximum = item_sdata(item);
+			else
+				parser->global.logical_maximum = item_udata(item);
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
+			parser->global.physical_minimum = item_sdata(item);
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
+			if (parser->global.physical_minimum < 0)
+				parser->global.physical_maximum = item_sdata(item);
+			else
+				parser->global.physical_maximum = item_udata(item);
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
+			parser->global.unit_exponent = item_sdata(item);
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_UNIT:
+			parser->global.unit = item_udata(item);
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
+			if ((parser->global.report_size = item_udata(item)) > 32) {
+				dbg("invalid report_size %d", parser->global.report_size);
+				return -1;
+			}
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
+			if ((parser->global.report_count = item_udata(item)) > HID_MAX_USAGES) {
+				dbg("invalid report_count %d", parser->global.report_count);
+				return -1;
+			}
+			return 0;
+
+		case HID_GLOBAL_ITEM_TAG_REPORT_ID:
+			if ((parser->global.report_id = item_udata(item)) == 0) {
+				dbg("report_id 0 is invalid");
+				return -1;
+			}
+			return 0;
+
+		default:
+			dbg("unknown global tag 0x%x", item->tag);
+			return -1;
+	}
+}
+
+/*
+ * Process a local item.
+ */
+
+static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
+{
+	__u32 data;
+	unsigned n;
+
+	if (item->size == 0) {
+		dbg("item data expected for local item");
+		return -1;
+	}
+
+	data = item_udata(item);
+
+	switch (item->tag) {
+
+		case HID_LOCAL_ITEM_TAG_DELIMITER:
+
+			if (data) {
+				/*
+				 * We treat items before the first delimiter
+				 * as global to all usage sets (branch 0).
+				 * In the moment we process only these global
+				 * items and the first delimiter set.
+				 */
+				if (parser->local.delimiter_depth != 0) {
+					dbg("nested delimiters");
+					return -1;
+				}
+				parser->local.delimiter_depth++;
+				parser->local.delimiter_branch++;
+			} else {
+				if (parser->local.delimiter_depth < 1) {
+					dbg("bogus close delimiter");
+					return -1;
+				}
+				parser->local.delimiter_depth--;
+			}
+			return 1;
+
+		case HID_LOCAL_ITEM_TAG_USAGE:
+
+			if (parser->local.delimiter_branch > 1) {
+				dbg("alternative usage ignored");
+				return 0;
+			}
+
+			if (item->size <= 2)
+				data = (parser->global.usage_page << 16) + data;
+
+			return hid_add_usage(parser, data);
+
+		case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
+
+			if (parser->local.delimiter_branch > 1) {
+				dbg("alternative usage ignored");
+				return 0;
+			}
+
+			if (item->size <= 2)
+				data = (parser->global.usage_page << 16) + data;
+
+			parser->local.usage_minimum = data;
+			return 0;
+
+		case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
+
+			if (parser->local.delimiter_branch > 1) {
+				dbg("alternative usage ignored");
+				return 0;
+			}
+
+			if (item->size <= 2)
+				data = (parser->global.usage_page << 16) + data;
+
+			for (n = parser->local.usage_minimum; n <= data; n++)
+				if (hid_add_usage(parser, n)) {
+					dbg("hid_add_usage failed\n");
+					return -1;
+				}
+			return 0;
+
+		default:
+
+			dbg("unknown local item tag 0x%x", item->tag);
+			return 0;
+	}
+	return 0;
+}
+
+/*
+ * Process a main item.
+ */
+
+static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
+{
+	__u32 data;
+	int ret;
+
+	data = item_udata(item);
+
+	switch (item->tag) {
+		case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
+			ret = open_collection(parser, data & 0xff);
+			break;
+		case HID_MAIN_ITEM_TAG_END_COLLECTION:
+			ret = close_collection(parser);
+			break;
+		case HID_MAIN_ITEM_TAG_INPUT:
+			ret = hid_add_field(parser, HID_INPUT_REPORT, data);
+			break;
+		case HID_MAIN_ITEM_TAG_OUTPUT:
+			ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
+			break;
+		case HID_MAIN_ITEM_TAG_FEATURE:
+			ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
+			break;
+		default:
+			dbg("unknown main item tag 0x%x", item->tag);
+			ret = 0;
+	}
+
+	memset(&parser->local, 0, sizeof(parser->local));	/* Reset the local parser environment */
+
+	return ret;
+}
+
+/*
+ * Process a reserved item.
+ */
+
+static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
+{
+	dbg("reserved item type, tag 0x%x", item->tag);
+	return 0;
+}
+
+/*
+ * Free a report and all registered fields. The field->usage and
+ * field->value table's are allocated behind the field, so we need
+ * only to free(field) itself.
+ */
+
+static void hid_free_report(struct hid_report *report)
+{
+	unsigned n;
+
+	for (n = 0; n < report->maxfield; n++)
+		kfree(report->field[n]);
+	kfree(report);
+}
+
+/*
+ * Free a device structure, all reports, and all fields.
+ */
+
+static void hid_free_device(struct hid_device *device)
+{
+	unsigned i,j;
+
+	for (i = 0; i < HID_REPORT_TYPES; i++) {
+		struct hid_report_enum *report_enum = device->report_enum + i;
+
+		for (j = 0; j < 256; j++) {
+			struct hid_report *report = report_enum->report_id_hash[j];
+			if (report)
+				hid_free_report(report);
+		}
+	}
+
+	kfree(device->rdesc);
+	kfree(device);
+}
+
+/*
+ * Fetch a report description item from the data stream. We support long
+ * items, though they are not used yet.
+ */
+
+static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
+{
+	u8 b;
+
+	if ((end - start) <= 0)
+		return NULL;
+
+	b = *start++;
+
+	item->type = (b >> 2) & 3;
+	item->tag  = (b >> 4) & 15;
+
+	if (item->tag == HID_ITEM_TAG_LONG) {
+
+		item->format = HID_ITEM_FORMAT_LONG;
+
+		if ((end - start) < 2)
+			return NULL;
+
+		item->size = *start++;
+		item->tag  = *start++;
+
+		if ((end - start) < item->size)
+			return NULL;
+
+		item->data.longdata = start;
+		start += item->size;
+		return start;
+	}
+
+	item->format = HID_ITEM_FORMAT_SHORT;
+	item->size = b & 3;
+
+	switch (item->size) {
+
+		case 0:
+			return start;
+
+		case 1:
+			if ((end - start) < 1)
+				return NULL;
+			item->data.u8 = *start++;
+			return start;
+
+		case 2:
+			if ((end - start) < 2)
+				return NULL;
+			item->data.u16 = le16_to_cpu(get_unaligned((__le16*)start));
+			start = (__u8 *)((__le16 *)start + 1);
+			return start;
+
+		case 3:
+			item->size++;
+			if ((end - start) < 4)
+				return NULL;
+			item->data.u32 = le32_to_cpu(get_unaligned((__le32*)start));
+			start = (__u8 *)((__le32 *)start + 1);
+			return start;
+	}
+
+	return NULL;
+}
+
+/*
+ * Parse a report description into a hid_device structure. Reports are
+ * enumerated, fields are attached to these reports.
+ */
+
+static struct hid_device *hid_parse_report(__u8 *start, unsigned size)
+{
+	struct hid_device *device;
+	struct hid_parser *parser;
+	struct hid_item item;
+	__u8 *end;
+	unsigned i;
+	static int (*dispatch_type[])(struct hid_parser *parser,
+				      struct hid_item *item) = {
+		hid_parser_main,
+		hid_parser_global,
+		hid_parser_local,
+		hid_parser_reserved
+	};
+
+	if (!(device = kzalloc(sizeof(struct hid_device), GFP_KERNEL)))
+		return NULL;
+
+	if (!(device->collection = kzalloc(sizeof(struct hid_collection) *
+				   HID_DEFAULT_NUM_COLLECTIONS, GFP_KERNEL))) {
+		kfree(device);
+		return NULL;
+	}
+	device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
+
+	for (i = 0; i < HID_REPORT_TYPES; i++)
+		INIT_LIST_HEAD(&device->report_enum[i].report_list);
+
+	if (!(device->rdesc = (__u8 *)kmalloc(size, GFP_KERNEL))) {
+		kfree(device->collection);
+		kfree(device);
+		return NULL;
+	}
+	memcpy(device->rdesc, start, size);
+	device->rsize = size;
+
+	if (!(parser = kzalloc(sizeof(struct hid_parser), GFP_KERNEL))) {
+		kfree(device->rdesc);
+		kfree(device->collection);
+		kfree(device);
+		return NULL;
+	}
+	parser->device = device;
+
+	end = start + size;
+	while ((start = fetch_item(start, end, &item)) != NULL) {
+
+		if (item.format != HID_ITEM_FORMAT_SHORT) {
+			dbg("unexpected long global item");
+			kfree(device->collection);
+			hid_free_device(device);
+			kfree(parser);
+			return NULL;
+		}
+
+		if (dispatch_type[item.type](parser, &item)) {
+			dbg("item %u %u %u %u parsing failed\n",
+				item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag);
+			kfree(device->collection);
+			hid_free_device(device);
+			kfree(parser);
+			return NULL;
+		}
+
+		if (start == end) {
+			if (parser->collection_stack_ptr) {
+				dbg("unbalanced collection at end of report description");
+				kfree(device->collection);
+				hid_free_device(device);
+				kfree(parser);
+				return NULL;
+			}
+			if (parser->local.delimiter_depth) {
+				dbg("unbalanced delimiter at end of report description");
+				kfree(device->collection);
+				hid_free_device(device);
+				kfree(parser);
+				return NULL;
+			}
+			kfree(parser);
+			return device;
+		}
+	}
+
+	dbg("item fetching failed at offset %d\n", (int)(end - start));
+	kfree(device->collection);
+	hid_free_device(device);
+	kfree(parser);
+	return NULL;
+}
+
+/*
+ * Convert a signed n-bit integer to signed 32-bit integer. Common
+ * cases are done through the compiler, the screwed things has to be
+ * done by hand.
+ */
+
+static s32 snto32(__u32 value, unsigned n)
+{
+	switch (n) {
+		case 8:  return ((__s8)value);
+		case 16: return ((__s16)value);
+		case 32: return ((__s32)value);
+	}
+	return value & (1 << (n - 1)) ? value | (-1 << n) : value;
+}
+
+/*
+ * Convert a signed 32-bit integer to a signed n-bit integer.
+ */
+
+static u32 s32ton(__s32 value, unsigned n)
+{
+	s32 a = value >> (n - 1);
+	if (a && a != -1)
+		return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
+	return value & ((1 << n) - 1);
+}
+
+/*
+ * Extract/implement a data field from/to a little endian report (bit array).
+ *
+ * Code sort-of follows HID spec:
+ *     http://www.usb.org/developers/devclass_docs/HID1_11.pdf
+ *
+ * While the USB HID spec allows unlimited length bit fields in "report
+ * descriptors", most devices never use more than 16 bits.
+ * One model of UPS is claimed to report "LINEV" as a 32-bit field.
+ * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
+ */
+
+static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
+{
+	u64 x;
+
+	WARN_ON(n > 32);
+
+	report += offset >> 3;  /* adjust byte index */
+	offset &= 7;		/* now only need bit offset into one byte */
+	x = get_unaligned((u64 *) report);
+	x = le64_to_cpu(x);
+	x = (x >> offset) & ((1ULL << n) - 1);	/* extract bit field */
+	return (u32) x;
+}
+
+/*
+ * "implement" : set bits in a little endian bit stream.
+ * Same concepts as "extract" (see comments above).
+ * The data mangled in the bit stream remains in little endian
+ * order the whole time. It make more sense to talk about
+ * endianness of register values by considering a register
+ * a "cached" copy of the little endiad bit stream.
+ */
+static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value)
+{
+	u64 x;
+	u64 m = (1ULL << n) - 1;
+
+	WARN_ON(n > 32);
+
+	WARN_ON(value > m);
+	value &= m;
+
+	report += offset >> 3;
+	offset &= 7;
+
+	x = get_unaligned((u64 *)report);
+	x &= cpu_to_le64(~(m << offset));
+	x |= cpu_to_le64(((u64) value) << offset);
+	put_unaligned(x, (u64 *) report);
+}
+
+/*
+ * Search an array for a value.
+ */
+
+static __inline__ int search(__s32 *array, __s32 value, unsigned n)
+{
+	while (n--) {
+		if (*array++ == value)
+			return 0;
+	}
+	return -1;
+}
+
+static void hid_process_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value, int interrupt)
+{
+	hid_dump_input(usage, value);
+	if (hid->claimed & HID_CLAIMED_INPUT)
+		hidinput_hid_event(hid, field, usage, value);
+	if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt)
+		hiddev_hid_event(hid, field, usage, value);
+}
+
+/*
+ * Analyse a received field, and fetch the data from it. The field
+ * content is stored for next report processing (we do differential
+ * reporting to the layer).
+ */
+
+static void hid_input_field(struct hid_device *hid, struct hid_field *field, __u8 *data, int interrupt)
+{
+	unsigned n;
+	unsigned count = field->report_count;
+	unsigned offset = field->report_offset;
+	unsigned size = field->report_size;
+	__s32 min = field->logical_minimum;
+	__s32 max = field->logical_maximum;
+	__s32 *value;
+
+	if (!(value = kmalloc(sizeof(__s32) * count, GFP_ATOMIC)))
+		return;
+
+	for (n = 0; n < count; n++) {
+
+			value[n] = min < 0 ? snto32(extract(data, offset + n * size, size), size) :
+						    extract(data, offset + n * size, size);
+
+			if (!(field->flags & HID_MAIN_ITEM_VARIABLE) /* Ignore report if ErrorRollOver */
+			    && value[n] >= min && value[n] <= max
+			    && field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
+				goto exit;
+	}
+
+	for (n = 0; n < count; n++) {
+
+		if (HID_MAIN_ITEM_VARIABLE & field->flags) {
+			hid_process_event(hid, field, &field->usage[n], value[n], interrupt);
+			continue;
+		}
+
+		if (field->value[n] >= min && field->value[n] <= max
+			&& field->usage[field->value[n] - min].hid
+			&& search(value, field->value[n], count))
+				hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
+
+		if (value[n] >= min && value[n] <= max
+			&& field->usage[value[n] - min].hid
+			&& search(field->value, value[n], count))
+				hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
+	}
+
+	memcpy(field->value, value, count * sizeof(__s32));
+exit:
+	kfree(value);
+}
+
+
+/*
+ * Output the field into the report.
+ */
+
+static void hid_output_field(struct hid_field *field, __u8 *data)
+{
+	unsigned count = field->report_count;
+	unsigned offset = field->report_offset;
+	unsigned size = field->report_size;
+	unsigned n;
+
+	for (n = 0; n < count; n++) {
+		if (field->logical_minimum < 0)	/* signed values */
+			implement(data, offset + n * size, size, s32ton(field->value[n], size));
+		else				/* unsigned values */
+			implement(data, offset + n * size, size, field->value[n]);
+	}
+}
+
+/*
+ * Create a report.
+ */
+
+static void hid_output_report(struct hid_report *report, __u8 *data)
+{
+	unsigned n;
+
+	if (report->id > 0)
+		*data++ = report->id;
+
+	for (n = 0; n < report->maxfield; n++)
+		hid_output_field(report->field[n], data);
+}
+
+/*
+ * Set a field value. The report this field belongs to has to be
+ * created and transferred to the device, to set this value in the
+ * device.
+ */
+
+int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
+{
+	unsigned size = field->report_size;
+
+	hid_dump_input(field->usage + offset, value);
+
+	if (offset >= field->report_count) {
+		dbg("offset (%d) exceeds report_count (%d)", offset, field->report_count);
+		hid_dump_field(field, 8);
+		return -1;
+	}
+	if (field->logical_minimum < 0) {
+		if (value != snto32(s32ton(value, size), size)) {
+			dbg("value %d is out of range", value);
+			return -1;
+		}
+	}
+	field->value[offset] = value;
+	return 0;
+}
+