blob: a33dbb417312451ef1bd3be75a864054ea37eb8d [file] [log] [blame]
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +08001/*
2 * Greybus gbuf handling
3 *
4 * Copyright 2014 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070015#include <linux/kref.h>
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080016#include <linux/device.h>
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070017#include <linux/slab.h>
Greg Kroah-Hartman45f36782014-09-14 11:40:35 -070018#include <linux/workqueue.h>
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080019
20#include "greybus.h"
21
Alex Elder2e353682014-09-23 12:46:36 -050022static void cport_process_event(struct work_struct *work);
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -070023
24static struct kmem_cache *gbuf_head_cache;
25
Alex Elder2e353682014-09-23 12:46:36 -050026/* Workqueue to handle Greybus buffer completions. */
27static struct workqueue_struct *gbuf_workqueue;
28
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070029/**
30 * greybus_alloc_gbuf - allocate a greybus buffer
31 *
Alex Elder778c69c2014-09-22 19:19:03 -050032 * @gmod: greybus device that wants to allocate this
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070033 * @cport: cport to send the data to
34 * @complete: callback when the gbuf is finished with
35 * @size: size of the buffer
36 * @gfp_mask: allocation mask
37 * @context: context added to the gbuf by the driver
38 *
39 * TODO: someday it will be nice to handle DMA, but for now, due to the
40 * architecture we are stuck with, the greybus core has to allocate the buffer
41 * that the driver can then fill up with the data to be sent out. Curse
42 * hardware designers for this issue...
43 */
Alex Elder6eb3f4b2014-10-06 06:53:10 -050044struct gbuf *greybus_alloc_gbuf(struct gb_connection *connection,
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070045 gbuf_complete_t complete,
46 unsigned int size,
Alex Elder9a6f6312014-10-06 06:53:11 -050047 bool outbound,
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070048 gfp_t gfp_mask,
49 void *context)
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080050{
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070051 struct gbuf *gbuf;
52 int retval;
53
Alex Elderfdb594f2014-10-06 06:53:12 -050054 gbuf = kmem_cache_zalloc(gbuf_head_cache, gfp_mask);
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070055 if (!gbuf)
56 return NULL;
57
Alex Elderfdb594f2014-10-06 06:53:12 -050058 kref_init(&gbuf->kref);
59 gbuf->connection = connection;
60 INIT_WORK(&gbuf->event, cport_process_event);
61 gbuf->outbound = outbound;
62 gbuf->complete = complete;
63 gbuf->context = context;
64
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070065 /* Host controller specific allocation for the actual buffer */
Alex Elder6eb3f4b2014-10-06 06:53:10 -050066 retval = connection->hd->driver->alloc_gbuf_data(gbuf, size, gfp_mask);
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070067 if (retval) {
Alex Elderfdb594f2014-10-06 06:53:12 -050068 kmem_cache_free(gbuf_head_cache, gbuf);
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070069 return NULL;
70 }
71
72 return gbuf;
73}
74EXPORT_SYMBOL_GPL(greybus_alloc_gbuf);
75
76static DEFINE_MUTEX(gbuf_mutex);
77
78static void free_gbuf(struct kref *kref)
79{
80 struct gbuf *gbuf = container_of(kref, struct gbuf, kref);
81
Alex Elder9a6f6312014-10-06 06:53:11 -050082 gbuf->connection->hd->driver->free_gbuf_data(gbuf);
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070083
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -070084 kmem_cache_free(gbuf_head_cache, gbuf);
Alex Elderad8cd0d62014-10-16 06:35:25 -050085 mutex_unlock(&gbuf_mutex);
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080086}
87
88void greybus_free_gbuf(struct gbuf *gbuf)
89{
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070090 /* drop the reference count and get out of here */
91 kref_put_mutex(&gbuf->kref, free_gbuf, &gbuf_mutex);
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +080092}
Greg Kroah-Hartmana39879f2014-09-06 16:57:36 -070093EXPORT_SYMBOL_GPL(greybus_free_gbuf);
94
95struct gbuf *greybus_get_gbuf(struct gbuf *gbuf)
96{
97 mutex_lock(&gbuf_mutex);
98 kref_get(&gbuf->kref);
99 mutex_unlock(&gbuf_mutex);
100 return gbuf;
101}
102EXPORT_SYMBOL_GPL(greybus_get_gbuf);
103
Greg Kroah-Hartmanf036e052014-09-19 19:13:33 -0700104int greybus_submit_gbuf(struct gbuf *gbuf, gfp_t gfp_mask)
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +0800105{
Alex Elder6eb3f4b2014-10-06 06:53:10 -0500106 struct greybus_host_device *hd = gbuf->connection->hd;
107
108 return hd->driver->submit_gbuf(gbuf, hd, gfp_mask);
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +0800109}
110
111int greybus_kill_gbuf(struct gbuf *gbuf)
112{
Greg Kroah-Hartman9c8d3af2014-09-13 11:09:35 -0700113 // FIXME - implement
Greg Kroah-Hartmand5d19032014-08-11 19:03:20 +0800114 return -ENOMEM;
115}
Greg Kroah-Hartman9c8d3af2014-09-13 11:09:35 -0700116
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -0700117static void cport_process_event(struct work_struct *work)
118{
Alex Elder2e353682014-09-23 12:46:36 -0500119 struct gbuf *gbuf = container_of(work, struct gbuf, event);
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -0700120
Alex Elder2e353682014-09-23 12:46:36 -0500121 /* Call the completion handler, then drop our reference */
Greg Kroah-Hartmandce745a2014-09-23 20:58:58 -0700122 gbuf->complete(gbuf);
123 greybus_put_gbuf(gbuf);
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -0700124}
125
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700126#define MAX_CPORTS 1024
127struct gb_cport_handler {
128 gbuf_complete_t handler;
Alex Elder1cfc6672014-09-30 19:25:21 -0500129 u16 cport_id;
Alex Eldere1e9dbd2014-10-01 21:54:11 -0500130 struct gb_module *gmod;
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700131 void *context;
132};
133
134static struct gb_cport_handler cport_handler[MAX_CPORTS];
135// FIXME - use a lock for this list of handlers, but really, for now we don't
136// need it, we don't have a dynamic system...
137
Alex Eldere1e9dbd2014-10-01 21:54:11 -0500138int gb_register_cport_complete(struct gb_module *gmod,
139 gbuf_complete_t handler,
140 u16 cport_id,
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700141 void *context)
142{
Alex Elder0db32a62014-09-24 05:16:14 -0500143 if (cport_handler[cport_id].handler)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700144 return -EINVAL;
Alex Elder0db32a62014-09-24 05:16:14 -0500145 cport_handler[cport_id].context = context;
146 cport_handler[cport_id].gmod = gmod;
Alex Elder1cfc6672014-09-30 19:25:21 -0500147 cport_handler[cport_id].cport_id = cport_id;
Alex Elder0db32a62014-09-24 05:16:14 -0500148 cport_handler[cport_id].handler = handler;
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700149 return 0;
150}
151
Alex Elder1cfc6672014-09-30 19:25:21 -0500152void gb_deregister_cport_complete(u16 cport_id)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700153{
Alex Elder0db32a62014-09-24 05:16:14 -0500154 cport_handler[cport_id].handler = NULL;
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700155}
156
Alex Elder1cfc6672014-09-30 19:25:21 -0500157void greybus_cport_in(struct greybus_host_device *hd, u16 cport_id,
158 u8 *data, size_t length)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700159{
160 struct gb_cport_handler *ch;
161 struct gbuf *gbuf;
Alex Elder00d2e752014-10-06 06:53:09 -0500162 struct gb_connection *connection;
163
164 connection = gb_hd_connection_find(hd, cport_id);
165 if (!connection) {
166 dev_err(hd->parent,
167 "nonexistent connection (%zu bytes dropped)\n", length);
168 return;
169 }
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700170
171 /* first check to see if we have a cport handler for this cport */
Alex Elder0db32a62014-09-24 05:16:14 -0500172 ch = &cport_handler[cport_id];
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700173 if (!ch->handler) {
174 /* Ugh, drop the data on the floor, after logging it... */
Greg Kroah-Hartman772149b2014-09-14 12:27:28 -0700175 dev_err(hd->parent,
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700176 "Received data for cport %d, but no handler!\n",
Alex Elder0db32a62014-09-24 05:16:14 -0500177 cport_id);
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700178 return;
179 }
180
Alex Elder9a6f6312014-10-06 06:53:11 -0500181 gbuf = greybus_alloc_gbuf(connection, ch->handler, length, false,
182 GFP_ATOMIC, ch->context);
183
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700184 if (!gbuf) {
185 /* Again, something bad went wrong, log it... */
186 pr_err("can't allocate gbuf???\n");
187 return;
188 }
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700189
Greg Kroah-Hartman45f36782014-09-14 11:40:35 -0700190 /*
191 * FIXME:
192 * Very dumb copy data method for now, if this is slow (odds are it will
193 * be, we should move to a model where the hd "owns" all buffers, but we
194 * want something up and working first for now.
195 */
Greg Kroah-Hartman45f36782014-09-14 11:40:35 -0700196 memcpy(gbuf->transfer_buffer, data, length);
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -0700197 gbuf->actual_length = length;
Greg Kroah-Hartman45f36782014-09-14 11:40:35 -0700198
Alex Elder2e353682014-09-23 12:46:36 -0500199 queue_work(gbuf_workqueue, &gbuf->event);
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700200}
Alex Elder0db32a62014-09-24 05:16:14 -0500201EXPORT_SYMBOL_GPL(greybus_cport_in);
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700202
Greg Kroah-Hartmanf036e052014-09-19 19:13:33 -0700203/* Can be called in interrupt context, do the work and get out of here */
204void greybus_gbuf_finished(struct gbuf *gbuf)
205{
Alex Elder2e353682014-09-23 12:46:36 -0500206 queue_work(gbuf_workqueue, &gbuf->event);
Greg Kroah-Hartmanf036e052014-09-19 19:13:33 -0700207}
208EXPORT_SYMBOL_GPL(greybus_gbuf_finished);
209
Greg Kroah-Hartman45f36782014-09-14 11:40:35 -0700210int gb_gbuf_init(void)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700211{
Alex Elder2e353682014-09-23 12:46:36 -0500212 gbuf_workqueue = alloc_workqueue("greybus_gbuf", 0, 1);
213 if (!gbuf_workqueue)
Greg Kroah-Hartman45f36782014-09-14 11:40:35 -0700214 return -ENOMEM;
215
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -0700216 gbuf_head_cache = kmem_cache_create("gbuf_head_cache",
217 sizeof(struct gbuf), 0, 0, NULL);
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700218 return 0;
219}
220
Greg Kroah-Hartman45f36782014-09-14 11:40:35 -0700221void gb_gbuf_exit(void)
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700222{
Alex Elder2e353682014-09-23 12:46:36 -0500223 destroy_workqueue(gbuf_workqueue);
Greg Kroah-Hartman3e7736e2014-09-21 17:34:28 -0700224 kmem_cache_destroy(gbuf_head_cache);
Greg Kroah-Hartman80e04f02014-09-13 18:20:54 -0700225}