blob: 348ee7c27a07e416391327230f4828c2001b49d2 [file] [log] [blame]
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +08001/*
2 * Greybus gbuf handling
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070015#include <linux/kref.h>
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080016#include <linux/device.h>
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070017#include <linux/slab.h>
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080018
19#include "greybus.h"
20
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -070021static struct kmem_cache *gbuf_head_cache;
22
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070023/**
24 * greybus_alloc_gbuf - allocate a greybus buffer
25 *
Alex Elder778c69c2014-09-22 19:19:03 -050026 * @gmod: greybus device that wants to allocate this
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070027 * @cport: cport to send the data to
28 * @complete: callback when the gbuf is finished with
29 * @size: size of the buffer
30 * @gfp_mask: allocation mask
31 * @context: context added to the gbuf by the driver
32 *
33 * TODO: someday it will be nice to handle DMA, but for now, due to the
34 * architecture we are stuck with, the greybus core has to allocate the buffer
35 * that the driver can then fill up with the data to be sent out. Curse
36 * hardware designers for this issue...
37 */
Alex Elder6eb3f4b2014-10-06 06:53:10 -050038struct gbuf *greybus_alloc_gbuf(struct gb_connection *connection,
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070039 gbuf_complete_t complete,
40 unsigned int size,
Alex Elder9a6f6312014-10-06 06:53:11 -050041 bool outbound,
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070042 gfp_t gfp_mask,
43 void *context)
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080044{
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070045 struct gbuf *gbuf;
46 int retval;
47
Alex Elderfdb594f2014-10-06 06:53:12 -050048 gbuf = kmem_cache_zalloc(gbuf_head_cache, gfp_mask);
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070049 if (!gbuf)
50 return NULL;
51
Alex Elderfdb594f2014-10-06 06:53:12 -050052 kref_init(&gbuf->kref);
53 gbuf->connection = connection;
Alex Elderfdb594f2014-10-06 06:53:12 -050054 gbuf->outbound = outbound;
55 gbuf->complete = complete;
56 gbuf->context = context;
57
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070058 /* Host controller specific allocation for the actual buffer */
Alex Elder6eb3f4b2014-10-06 06:53:10 -050059 retval = connection->hd->driver->alloc_gbuf_data(gbuf, size, gfp_mask);
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070060 if (retval) {
Alex Elderfdb594f2014-10-06 06:53:12 -050061 kmem_cache_free(gbuf_head_cache, gbuf);
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070062 return NULL;
63 }
64
65 return gbuf;
66}
67EXPORT_SYMBOL_GPL(greybus_alloc_gbuf);
68
69static DEFINE_MUTEX(gbuf_mutex);
70
71static void free_gbuf(struct kref *kref)
72{
73 struct gbuf *gbuf = container_of(kref, struct gbuf, kref);
74
Alex Elder9a6f6312014-10-06 06:53:11 -050075 gbuf->connection->hd->driver->free_gbuf_data(gbuf);
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070076
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -070077 kmem_cache_free(gbuf_head_cache, gbuf);
Alex Elderad8cd0d62014-10-16 06:35:25 -050078 mutex_unlock(&gbuf_mutex);
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080079}
80
81void greybus_free_gbuf(struct gbuf *gbuf)
82{
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070083 /* drop the reference count and get out of here */
84 kref_put_mutex(&gbuf->kref, free_gbuf, &gbuf_mutex);
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080085}
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070086EXPORT_SYMBOL_GPL(greybus_free_gbuf);
87
88struct gbuf *greybus_get_gbuf(struct gbuf *gbuf)
89{
90 mutex_lock(&gbuf_mutex);
91 kref_get(&gbuf->kref);
92 mutex_unlock(&gbuf_mutex);
93 return gbuf;
94}
95EXPORT_SYMBOL_GPL(greybus_get_gbuf);
96
Greg Kroah-Hartmanf036e052014-09-19 19:13:33 -070097int greybus_submit_gbuf(struct gbuf *gbuf, gfp_t gfp_mask)
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080098{
Alex Elder6eb3f4b2014-10-06 06:53:10 -050099 struct greybus_host_device *hd = gbuf->connection->hd;
100
Alex Elder61418b92014-10-16 06:35:29 -0500101 return hd->driver->submit_gbuf(gbuf, gfp_mask);
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +0800102}
103
104int greybus_kill_gbuf(struct gbuf *gbuf)
105{
Greg Kroah-Hartman9c8d3af2014-09-13 11:09:35 -0700106 // FIXME - implement
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +0800107 return -ENOMEM;
108}
Greg Kroah-Hartman9c8d3af2014-09-13 11:09:35 -0700109
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700110#define MAX_CPORTS 1024
111struct gb_cport_handler {
112 gbuf_complete_t handler;
Alex Elder1cfc6672014-09-30 19:25:21 -0500113 u16 cport_id;
Alex Eldere1e9dbd2014-10-01 21:54:11 -0500114 struct gb_module *gmod;
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700115 void *context;
116};
117
118static struct gb_cport_handler cport_handler[MAX_CPORTS];
119// FIXME - use a lock for this list of handlers, but really, for now we don't
120// need it, we don't have a dynamic system...
121
Alex Eldere1e9dbd2014-10-01 21:54:11 -0500122int gb_register_cport_complete(struct gb_module *gmod,
123 gbuf_complete_t handler,
124 u16 cport_id,
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700125 void *context)
126{
Alex Elder0db32a62014-09-24 05:16:14 -0500127 if (cport_handler[cport_id].handler)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700128 return -EINVAL;
Alex Elder0db32a62014-09-24 05:16:14 -0500129 cport_handler[cport_id].context = context;
130 cport_handler[cport_id].gmod = gmod;
Alex Elder1cfc6672014-09-30 19:25:21 -0500131 cport_handler[cport_id].cport_id = cport_id;
Alex Elder0db32a62014-09-24 05:16:14 -0500132 cport_handler[cport_id].handler = handler;
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700133 return 0;
134}
135
Alex Elder1cfc6672014-09-30 19:25:21 -0500136void gb_deregister_cport_complete(u16 cport_id)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700137{
Alex Elder0db32a62014-09-24 05:16:14 -0500138 cport_handler[cport_id].handler = NULL;
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700139}
140
Alex Elder1cfc6672014-09-30 19:25:21 -0500141void greybus_cport_in(struct greybus_host_device *hd, u16 cport_id,
142 u8 *data, size_t length)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700143{
Alex Elder00d2e752014-10-06 06:53:09 -0500144 struct gb_connection *connection;
145
146 connection = gb_hd_connection_find(hd, cport_id);
147 if (!connection) {
148 dev_err(hd->parent,
149 "nonexistent connection (%zu bytes dropped)\n", length);
150 return;
151 }
Alex Elderd90c25b2014-10-16 06:35:33 -0500152 gb_connection_operation_recv(connection, data, length);
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700153}
Alex Elder0db32a62014-09-24 05:16:14 -0500154EXPORT_SYMBOL_GPL(greybus_cport_in);
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700155
Greg Kroah-Hartmanf036e052014-09-19 19:13:33 -0700156/* Can be called in interrupt context, do the work and get out of here */
157void greybus_gbuf_finished(struct gbuf *gbuf)
158{
Alex Elderc149f8f2014-10-16 06:35:36 -0500159 gbuf->complete(gbuf);
Greg Kroah-Hartmanf036e052014-09-19 19:13:33 -0700160}
161EXPORT_SYMBOL_GPL(greybus_gbuf_finished);
162
Greg Kroah-Hartman45f36782014-09-14 11:40:35 -0700163int gb_gbuf_init(void)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700164{
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -0700165 gbuf_head_cache = kmem_cache_create("gbuf_head_cache",
166 sizeof(struct gbuf), 0, 0, NULL);
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700167 return 0;
168}
169
Greg Kroah-Hartman45f36782014-09-14 11:40:35 -0700170void gb_gbuf_exit(void)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700171{
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -0700172 kmem_cache_destroy(gbuf_head_cache);
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700173}