blob: d4d925912c47c83c76a67356f6146c2f6d0ceae6 [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35 * Both of these are protected by a mutex, dma_list_mutex.
36 *
37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver.
39 *
Dan Williamsd379b012007-07-09 11:56:42 -070040 * Each client is responsible for keeping track of the channels it uses. See
41 * the definition of dma_event_callback in dmaengine.h.
Chris Leechc13c8262006-05-23 17:18:44 -070042 *
43 * Each device has a kref, which is initialized to 1 when the device is
Tony Jones891f78e2007-09-25 02:03:03 +020044 * registered. A kref_get is done for each device registered. When the
Sebastian Siewior8a5703f2008-04-21 22:38:45 +000045 * device is released, the corresponding kref_put is done in the release
Chris Leechc13c8262006-05-23 17:18:44 -070046 * method. Every time one of the device's channels is allocated to a client,
Sebastian Siewior8a5703f2008-04-21 22:38:45 +000047 * a kref_get occurs. When the channel is freed, the corresponding kref_put
Chris Leechc13c8262006-05-23 17:18:44 -070048 * happens. The device's release function does a completion, so
Tony Jones891f78e2007-09-25 02:03:03 +020049 * unregister_device does a remove event, device_unregister, a kref_put
Chris Leechc13c8262006-05-23 17:18:44 -070050 * for the first reference, then waits on the completion for all other
51 * references to finish.
52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
Dan Williamsd379b012007-07-09 11:56:42 -070054 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * signals that it wants to use a channel, and dma_chan_put is called when
Sebastian Siewior8a5703f2008-04-21 22:38:45 +000056 * a channel is removed or a client using it is unregistered. A client can
Dan Williamsd379b012007-07-09 11:56:42 -070057 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
59 * -ChrisL, DanW
Chris Leechc13c8262006-05-23 17:18:44 -070060 */
61
62#include <linux/init.h>
63#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070064#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070065#include <linux/device.h>
66#include <linux/dmaengine.h>
67#include <linux/hardirq.h>
68#include <linux/spinlock.h>
69#include <linux/percpu.h>
70#include <linux/rcupdate.h>
71#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070072#include <linux/jiffies.h>
Chris Leechc13c8262006-05-23 17:18:44 -070073
74static DEFINE_MUTEX(dma_list_mutex);
75static LIST_HEAD(dma_device_list);
76static LIST_HEAD(dma_client_list);
Dan Williams6f49a572009-01-06 11:38:14 -070077static long dmaengine_ref_count;
Chris Leechc13c8262006-05-23 17:18:44 -070078
79/* --- sysfs implementation --- */
80
Tony Jones891f78e2007-09-25 02:03:03 +020081static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070082{
Tony Jones891f78e2007-09-25 02:03:03 +020083 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -070084 unsigned long count = 0;
85 int i;
86
Andrew Morton17f3ae02006-05-25 13:26:53 -070087 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -070088 count += per_cpu_ptr(chan->local, i)->memcpy_count;
89
90 return sprintf(buf, "%lu\n", count);
91}
92
Tony Jones891f78e2007-09-25 02:03:03 +020093static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
94 char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070095{
Tony Jones891f78e2007-09-25 02:03:03 +020096 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -070097 unsigned long count = 0;
98 int i;
99
Andrew Morton17f3ae02006-05-25 13:26:53 -0700100 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -0700101 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
102
103 return sprintf(buf, "%lu\n", count);
104}
105
Tony Jones891f78e2007-09-25 02:03:03 +0200106static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700107{
Tony Jones891f78e2007-09-25 02:03:03 +0200108 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -0700109
Dan Williams6f49a572009-01-06 11:38:14 -0700110 return sprintf(buf, "%d\n", chan->client_count);
Chris Leechc13c8262006-05-23 17:18:44 -0700111}
112
Tony Jones891f78e2007-09-25 02:03:03 +0200113static struct device_attribute dma_attrs[] = {
Chris Leechc13c8262006-05-23 17:18:44 -0700114 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
115 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
116 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
117 __ATTR_NULL
118};
119
120static void dma_async_device_cleanup(struct kref *kref);
121
Tony Jones891f78e2007-09-25 02:03:03 +0200122static void dma_dev_release(struct device *dev)
Chris Leechc13c8262006-05-23 17:18:44 -0700123{
Tony Jones891f78e2007-09-25 02:03:03 +0200124 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -0700125 kref_put(&chan->device->refcount, dma_async_device_cleanup);
126}
127
128static struct class dma_devclass = {
Tony Jones891f78e2007-09-25 02:03:03 +0200129 .name = "dma",
130 .dev_attrs = dma_attrs,
131 .dev_release = dma_dev_release,
Chris Leechc13c8262006-05-23 17:18:44 -0700132};
133
134/* --- client and device registration --- */
135
Dan Williamsd379b012007-07-09 11:56:42 -0700136#define dma_chan_satisfies_mask(chan, mask) \
137 __dma_chan_satisfies_mask((chan), &(mask))
138static int
139__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
140{
141 dma_cap_mask_t has;
142
143 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
144 DMA_TX_TYPE_END);
145 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
146}
147
Dan Williams6f49a572009-01-06 11:38:14 -0700148static struct module *dma_chan_to_owner(struct dma_chan *chan)
149{
150 return chan->device->dev->driver->owner;
151}
152
153/**
154 * balance_ref_count - catch up the channel reference count
155 * @chan - channel to balance ->client_count versus dmaengine_ref_count
156 *
157 * balance_ref_count must be called under dma_list_mutex
158 */
159static void balance_ref_count(struct dma_chan *chan)
160{
161 struct module *owner = dma_chan_to_owner(chan);
162
163 while (chan->client_count < dmaengine_ref_count) {
164 __module_get(owner);
165 chan->client_count++;
166 }
167}
168
169/**
170 * dma_chan_get - try to grab a dma channel's parent driver module
171 * @chan - channel to grab
172 *
173 * Must be called under dma_list_mutex
174 */
175static int dma_chan_get(struct dma_chan *chan)
176{
177 int err = -ENODEV;
178 struct module *owner = dma_chan_to_owner(chan);
179
180 if (chan->client_count) {
181 __module_get(owner);
182 err = 0;
183 } else if (try_module_get(owner))
184 err = 0;
185
186 if (err == 0)
187 chan->client_count++;
188
189 /* allocate upon first client reference */
190 if (chan->client_count == 1 && err == 0) {
191 int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL);
192
193 if (desc_cnt < 0) {
194 err = desc_cnt;
195 chan->client_count = 0;
196 module_put(owner);
197 } else
198 balance_ref_count(chan);
199 }
200
201 return err;
202}
203
204/**
205 * dma_chan_put - drop a reference to a dma channel's parent driver module
206 * @chan - channel to release
207 *
208 * Must be called under dma_list_mutex
209 */
210static void dma_chan_put(struct dma_chan *chan)
211{
212 if (!chan->client_count)
213 return; /* this channel failed alloc_chan_resources */
214 chan->client_count--;
215 module_put(dma_chan_to_owner(chan));
216 if (chan->client_count == 0)
217 chan->device->device_free_chan_resources(chan);
218}
219
Chris Leechc13c8262006-05-23 17:18:44 -0700220/**
Dan Williamsd379b012007-07-09 11:56:42 -0700221 * dma_client_chan_alloc - try to allocate channels to a client
Chris Leechc13c8262006-05-23 17:18:44 -0700222 * @client: &dma_client
223 *
224 * Called with dma_list_mutex held.
225 */
Dan Williamsd379b012007-07-09 11:56:42 -0700226static void dma_client_chan_alloc(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700227{
228 struct dma_device *device;
229 struct dma_chan *chan;
Dan Williamsd379b012007-07-09 11:56:42 -0700230 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700231
Dan Williamsd379b012007-07-09 11:56:42 -0700232 /* Find a channel */
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700233 list_for_each_entry(device, &dma_device_list, global_node) {
234 /* Does the client require a specific DMA controller? */
235 if (client->slave && client->slave->dma_dev
236 && client->slave->dma_dev != device->dev)
237 continue;
238
Chris Leechc13c8262006-05-23 17:18:44 -0700239 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700240 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
Chris Leechc13c8262006-05-23 17:18:44 -0700241 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700242 if (!chan->client_count)
243 continue;
244 ack = client->event_callback(client, chan,
245 DMA_RESOURCE_AVAILABLE);
Chris Leechc13c8262006-05-23 17:18:44 -0700246
Dan Williams6f49a572009-01-06 11:38:14 -0700247 /* we are done once this client rejects
248 * an available resource
249 */
250 if (ack == DMA_NAK)
251 return;
Chris Leechc13c8262006-05-23 17:18:44 -0700252 }
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700253 }
Chris Leechc13c8262006-05-23 17:18:44 -0700254}
255
Dan Williams7405f742007-01-02 11:10:43 -0700256enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
257{
258 enum dma_status status;
259 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
260
261 dma_async_issue_pending(chan);
262 do {
263 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
264 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
265 printk(KERN_ERR "dma_sync_wait_timeout!\n");
266 return DMA_ERROR;
267 }
268 } while (status == DMA_IN_PROGRESS);
269
270 return status;
271}
272EXPORT_SYMBOL(dma_sync_wait);
273
Chris Leechc13c8262006-05-23 17:18:44 -0700274/**
Randy Dunlap65088712006-07-03 19:45:31 -0700275 * dma_chan_cleanup - release a DMA channel's resources
276 * @kref: kernel reference structure that contains the DMA channel device
Chris Leechc13c8262006-05-23 17:18:44 -0700277 */
278void dma_chan_cleanup(struct kref *kref)
279{
280 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
Chris Leechc13c8262006-05-23 17:18:44 -0700281 kref_put(&chan->device->refcount, dma_async_device_cleanup);
282}
David Brownell765e3d82007-03-16 13:38:05 -0800283EXPORT_SYMBOL(dma_chan_cleanup);
Chris Leechc13c8262006-05-23 17:18:44 -0700284
285static void dma_chan_free_rcu(struct rcu_head *rcu)
286{
287 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
Dan Williams6f49a572009-01-06 11:38:14 -0700288
Chris Leechc13c8262006-05-23 17:18:44 -0700289 kref_put(&chan->refcount, dma_chan_cleanup);
290}
291
Dan Williamsd379b012007-07-09 11:56:42 -0700292static void dma_chan_release(struct dma_chan *chan)
Chris Leechc13c8262006-05-23 17:18:44 -0700293{
Chris Leechc13c8262006-05-23 17:18:44 -0700294 call_rcu(&chan->rcu, dma_chan_free_rcu);
295}
296
297/**
Dan Williamsd379b012007-07-09 11:56:42 -0700298 * dma_chans_notify_available - broadcast available channels to the clients
Chris Leechc13c8262006-05-23 17:18:44 -0700299 */
Dan Williamsd379b012007-07-09 11:56:42 -0700300static void dma_clients_notify_available(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700301{
302 struct dma_client *client;
Dan Williamsd379b012007-07-09 11:56:42 -0700303
304 mutex_lock(&dma_list_mutex);
305
306 list_for_each_entry(client, &dma_client_list, global_node)
307 dma_client_chan_alloc(client);
308
309 mutex_unlock(&dma_list_mutex);
310}
311
312/**
Dan Williamsd379b012007-07-09 11:56:42 -0700313 * dma_async_client_register - register a &dma_client
314 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
Chris Leechc13c8262006-05-23 17:18:44 -0700315 */
Dan Williamsd379b012007-07-09 11:56:42 -0700316void dma_async_client_register(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700317{
Dan Williams6f49a572009-01-06 11:38:14 -0700318 struct dma_device *device, *_d;
319 struct dma_chan *chan;
320 int err;
321
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700322 /* validate client data */
323 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
324 !client->slave);
325
Chris Leechc13c8262006-05-23 17:18:44 -0700326 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700327 dmaengine_ref_count++;
328
329 /* try to grab channels */
330 list_for_each_entry_safe(device, _d, &dma_device_list, global_node)
331 list_for_each_entry(chan, &device->channels, device_node) {
332 err = dma_chan_get(chan);
333 if (err == -ENODEV) {
334 /* module removed before we could use it */
335 list_del_init(&device->global_node);
336 break;
337 } else if (err)
338 pr_err("dmaengine: failed to get %s: (%d)\n",
339 dev_name(&chan->dev), err);
340 }
341
342
Chris Leechc13c8262006-05-23 17:18:44 -0700343 list_add_tail(&client->global_node, &dma_client_list);
344 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700345}
David Brownell765e3d82007-03-16 13:38:05 -0800346EXPORT_SYMBOL(dma_async_client_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700347
348/**
349 * dma_async_client_unregister - unregister a client and free the &dma_client
Randy Dunlap65088712006-07-03 19:45:31 -0700350 * @client: &dma_client to free
Chris Leechc13c8262006-05-23 17:18:44 -0700351 *
352 * Force frees any allocated DMA channels, frees the &dma_client memory
353 */
354void dma_async_client_unregister(struct dma_client *client)
355{
Dan Williamsd379b012007-07-09 11:56:42 -0700356 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700357 struct dma_chan *chan;
358
359 if (!client)
360 return;
361
Chris Leechc13c8262006-05-23 17:18:44 -0700362 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700363 dmaengine_ref_count--;
364 BUG_ON(dmaengine_ref_count < 0);
365 /* drop channel references */
Dan Williamsd379b012007-07-09 11:56:42 -0700366 list_for_each_entry(device, &dma_device_list, global_node)
Dan Williams6f49a572009-01-06 11:38:14 -0700367 list_for_each_entry(chan, &device->channels, device_node)
368 dma_chan_put(chan);
Dan Williamsd379b012007-07-09 11:56:42 -0700369
Chris Leechc13c8262006-05-23 17:18:44 -0700370 list_del(&client->global_node);
371 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700372}
David Brownell765e3d82007-03-16 13:38:05 -0800373EXPORT_SYMBOL(dma_async_client_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700374
375/**
Dan Williamsd379b012007-07-09 11:56:42 -0700376 * dma_async_client_chan_request - send all available channels to the
377 * client that satisfy the capability mask
378 * @client - requester
Chris Leechc13c8262006-05-23 17:18:44 -0700379 */
Dan Williamsd379b012007-07-09 11:56:42 -0700380void dma_async_client_chan_request(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700381{
Dan Williamsd379b012007-07-09 11:56:42 -0700382 mutex_lock(&dma_list_mutex);
383 dma_client_chan_alloc(client);
384 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700385}
David Brownell765e3d82007-03-16 13:38:05 -0800386EXPORT_SYMBOL(dma_async_client_chan_request);
Chris Leechc13c8262006-05-23 17:18:44 -0700387
388/**
Randy Dunlap65088712006-07-03 19:45:31 -0700389 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700390 * @device: &dma_device
391 */
392int dma_async_device_register(struct dma_device *device)
393{
394 static int id;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800395 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700396 struct dma_chan* chan;
397
398 if (!device)
399 return -ENODEV;
400
Dan Williams7405f742007-01-02 11:10:43 -0700401 /* validate device routines */
402 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
403 !device->device_prep_dma_memcpy);
404 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
405 !device->device_prep_dma_xor);
406 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
407 !device->device_prep_dma_zero_sum);
408 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
409 !device->device_prep_dma_memset);
Zhang Wei9b941c62008-03-13 17:45:28 -0700410 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
Dan Williams7405f742007-01-02 11:10:43 -0700411 !device->device_prep_dma_interrupt);
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700412 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
413 !device->device_prep_slave_sg);
414 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
415 !device->device_terminate_all);
Dan Williams7405f742007-01-02 11:10:43 -0700416
417 BUG_ON(!device->device_alloc_chan_resources);
418 BUG_ON(!device->device_free_chan_resources);
Dan Williams7405f742007-01-02 11:10:43 -0700419 BUG_ON(!device->device_is_tx_complete);
420 BUG_ON(!device->device_issue_pending);
421 BUG_ON(!device->dev);
422
Chris Leechc13c8262006-05-23 17:18:44 -0700423 init_completion(&device->done);
424 kref_init(&device->refcount);
Dan Williamsb0b42b12008-12-03 17:17:07 -0700425
426 mutex_lock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700427 device->dev_id = id++;
Dan Williamsb0b42b12008-12-03 17:17:07 -0700428 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700429
430 /* represent channels in sysfs. Probably want devs too */
431 list_for_each_entry(chan, &device->channels, device_node) {
432 chan->local = alloc_percpu(typeof(*chan->local));
433 if (chan->local == NULL)
434 continue;
435
436 chan->chan_id = chancnt++;
Tony Jones891f78e2007-09-25 02:03:03 +0200437 chan->dev.class = &dma_devclass;
Haavard Skinnemoen1099dc72008-07-08 11:58:05 -0700438 chan->dev.parent = device->dev;
Kay Sievers06190d82008-11-11 13:12:33 -0700439 dev_set_name(&chan->dev, "dma%dchan%d",
440 device->dev_id, chan->chan_id);
Chris Leechc13c8262006-05-23 17:18:44 -0700441
Tony Jones891f78e2007-09-25 02:03:03 +0200442 rc = device_register(&chan->dev);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800443 if (rc) {
444 chancnt--;
445 free_percpu(chan->local);
446 chan->local = NULL;
447 goto err_out;
448 }
449
Haavard Skinnemoen348badf2007-11-14 16:59:27 -0800450 /* One for the channel, one of the class device */
451 kref_get(&device->refcount);
Chris Leechc13c8262006-05-23 17:18:44 -0700452 kref_get(&device->refcount);
Dan Williamsd379b012007-07-09 11:56:42 -0700453 kref_init(&chan->refcount);
Dan Williams7cc5bf92008-07-08 11:58:21 -0700454 chan->client_count = 0;
Dan Williamsd379b012007-07-09 11:56:42 -0700455 chan->slow_ref = 0;
456 INIT_RCU_HEAD(&chan->rcu);
Chris Leechc13c8262006-05-23 17:18:44 -0700457 }
458
459 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700460 if (dmaengine_ref_count)
461 list_for_each_entry(chan, &device->channels, device_node) {
462 /* if clients are already waiting for channels we need
463 * to take references on their behalf
464 */
465 if (dma_chan_get(chan) == -ENODEV) {
466 /* note we can only get here for the first
467 * channel as the remaining channels are
468 * guaranteed to get a reference
469 */
470 rc = -ENODEV;
471 mutex_unlock(&dma_list_mutex);
472 goto err_out;
473 }
474 }
Chris Leechc13c8262006-05-23 17:18:44 -0700475 list_add_tail(&device->global_node, &dma_device_list);
476 mutex_unlock(&dma_list_mutex);
477
Dan Williamsd379b012007-07-09 11:56:42 -0700478 dma_clients_notify_available();
Chris Leechc13c8262006-05-23 17:18:44 -0700479
480 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800481
482err_out:
483 list_for_each_entry(chan, &device->channels, device_node) {
484 if (chan->local == NULL)
485 continue;
486 kref_put(&device->refcount, dma_async_device_cleanup);
Tony Jones891f78e2007-09-25 02:03:03 +0200487 device_unregister(&chan->dev);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800488 chancnt--;
489 free_percpu(chan->local);
490 }
491 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700492}
David Brownell765e3d82007-03-16 13:38:05 -0800493EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700494
495/**
Randy Dunlap65088712006-07-03 19:45:31 -0700496 * dma_async_device_cleanup - function called when all references are released
497 * @kref: kernel reference object
Chris Leechc13c8262006-05-23 17:18:44 -0700498 */
499static void dma_async_device_cleanup(struct kref *kref)
500{
501 struct dma_device *device;
502
503 device = container_of(kref, struct dma_device, refcount);
504 complete(&device->done);
505}
506
Randy Dunlap65088712006-07-03 19:45:31 -0700507/**
Dan Williams6f49a572009-01-06 11:38:14 -0700508 * dma_async_device_unregister - unregister a DMA device
Randy Dunlap65088712006-07-03 19:45:31 -0700509 * @device: &dma_device
510 */
511void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700512{
513 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700514
515 mutex_lock(&dma_list_mutex);
516 list_del(&device->global_node);
517 mutex_unlock(&dma_list_mutex);
518
519 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams6f49a572009-01-06 11:38:14 -0700520 WARN_ONCE(chan->client_count,
521 "%s called while %d clients hold a reference\n",
522 __func__, chan->client_count);
Tony Jones891f78e2007-09-25 02:03:03 +0200523 device_unregister(&chan->dev);
Dan Williamsd379b012007-07-09 11:56:42 -0700524 dma_chan_release(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700525 }
Chris Leechc13c8262006-05-23 17:18:44 -0700526
527 kref_put(&device->refcount, dma_async_device_cleanup);
528 wait_for_completion(&device->done);
529}
David Brownell765e3d82007-03-16 13:38:05 -0800530EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700531
Dan Williams7405f742007-01-02 11:10:43 -0700532/**
533 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
534 * @chan: DMA channel to offload copy to
535 * @dest: destination address (virtual)
536 * @src: source address (virtual)
537 * @len: length
538 *
539 * Both @dest and @src must be mappable to a bus address according to the
540 * DMA mapping API rules for streaming mappings.
541 * Both @dest and @src must stay memory resident (kernel memory or locked
542 * user space pages).
543 */
544dma_cookie_t
545dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
546 void *src, size_t len)
547{
548 struct dma_device *dev = chan->device;
549 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700550 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700551 dma_cookie_t cookie;
552 int cpu;
553
Dan Williams00367312008-02-02 19:49:57 -0700554 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
555 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700556 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
557 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700558
559 if (!tx) {
560 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
561 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700562 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700563 }
Dan Williams7405f742007-01-02 11:10:43 -0700564
Dan Williams7405f742007-01-02 11:10:43 -0700565 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700566 cookie = tx->tx_submit(tx);
567
568 cpu = get_cpu();
569 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
570 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
571 put_cpu();
572
573 return cookie;
574}
575EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
576
577/**
578 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
579 * @chan: DMA channel to offload copy to
580 * @page: destination page
581 * @offset: offset in page to copy to
582 * @kdata: source address (virtual)
583 * @len: length
584 *
585 * Both @page/@offset and @kdata must be mappable to a bus address according
586 * to the DMA mapping API rules for streaming mappings.
587 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
588 * locked user space pages)
589 */
590dma_cookie_t
591dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
592 unsigned int offset, void *kdata, size_t len)
593{
594 struct dma_device *dev = chan->device;
595 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700596 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700597 dma_cookie_t cookie;
598 int cpu;
599
Dan Williams00367312008-02-02 19:49:57 -0700600 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
601 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700602 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
603 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700604
605 if (!tx) {
606 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
607 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700608 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700609 }
Dan Williams7405f742007-01-02 11:10:43 -0700610
Dan Williams7405f742007-01-02 11:10:43 -0700611 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700612 cookie = tx->tx_submit(tx);
613
614 cpu = get_cpu();
615 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
616 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
617 put_cpu();
618
619 return cookie;
620}
621EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
622
623/**
624 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
625 * @chan: DMA channel to offload copy to
626 * @dest_pg: destination page
627 * @dest_off: offset in page to copy to
628 * @src_pg: source page
629 * @src_off: offset in page to copy from
630 * @len: length
631 *
632 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
633 * address according to the DMA mapping API rules for streaming mappings.
634 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
635 * (kernel memory or locked user space pages).
636 */
637dma_cookie_t
638dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
639 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
640 size_t len)
641{
642 struct dma_device *dev = chan->device;
643 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700644 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700645 dma_cookie_t cookie;
646 int cpu;
647
Dan Williams00367312008-02-02 19:49:57 -0700648 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
649 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
650 DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700651 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
652 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700653
654 if (!tx) {
655 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
656 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700657 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700658 }
Dan Williams7405f742007-01-02 11:10:43 -0700659
Dan Williams7405f742007-01-02 11:10:43 -0700660 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700661 cookie = tx->tx_submit(tx);
662
663 cpu = get_cpu();
664 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
665 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
666 put_cpu();
667
668 return cookie;
669}
670EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
671
672void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
673 struct dma_chan *chan)
674{
675 tx->chan = chan;
676 spin_lock_init(&tx->lock);
Dan Williams7405f742007-01-02 11:10:43 -0700677}
678EXPORT_SYMBOL(dma_async_tx_descriptor_init);
679
Dan Williams07f22112009-01-05 17:14:31 -0700680/* dma_wait_for_async_tx - spin wait for a transaction to complete
681 * @tx: in-flight transaction to wait on
682 *
683 * This routine assumes that tx was obtained from a call to async_memcpy,
684 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
685 * and submitted). Walking the parent chain is only meant to cover for DMA
686 * drivers that do not implement the DMA_INTERRUPT capability and may race with
687 * the driver's descriptor cleanup routine.
688 */
689enum dma_status
690dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
691{
692 enum dma_status status;
693 struct dma_async_tx_descriptor *iter;
694 struct dma_async_tx_descriptor *parent;
695
696 if (!tx)
697 return DMA_SUCCESS;
698
699 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
700 " %s\n", __func__, dev_name(&tx->chan->dev));
701
702 /* poll through the dependency chain, return when tx is complete */
703 do {
704 iter = tx;
705
706 /* find the root of the unsubmitted dependency chain */
707 do {
708 parent = iter->parent;
709 if (!parent)
710 break;
711 else
712 iter = parent;
713 } while (parent);
714
715 /* there is a small window for ->parent == NULL and
716 * ->cookie == -EBUSY
717 */
718 while (iter->cookie == -EBUSY)
719 cpu_relax();
720
721 status = dma_sync_wait(iter->chan, iter->cookie);
722 } while (status == DMA_IN_PROGRESS || (iter != tx));
723
724 return status;
725}
726EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
727
728/* dma_run_dependencies - helper routine for dma drivers to process
729 * (start) dependent operations on their target channel
730 * @tx: transaction with dependencies
731 */
732void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
733{
734 struct dma_async_tx_descriptor *dep = tx->next;
735 struct dma_async_tx_descriptor *dep_next;
736 struct dma_chan *chan;
737
738 if (!dep)
739 return;
740
741 chan = dep->chan;
742
743 /* keep submitting up until a channel switch is detected
744 * in that case we will be called again as a result of
745 * processing the interrupt from async_tx_channel_switch
746 */
747 for (; dep; dep = dep_next) {
748 spin_lock_bh(&dep->lock);
749 dep->parent = NULL;
750 dep_next = dep->next;
751 if (dep_next && dep_next->chan == chan)
752 dep->next = NULL; /* ->next will be submitted */
753 else
754 dep_next = NULL; /* submit current dep and terminate */
755 spin_unlock_bh(&dep->lock);
756
757 dep->tx_submit(dep);
758 }
759
760 chan->device->device_issue_pending(chan);
761}
762EXPORT_SYMBOL_GPL(dma_run_dependencies);
763
Chris Leechc13c8262006-05-23 17:18:44 -0700764static int __init dma_bus_init(void)
765{
766 mutex_init(&dma_list_mutex);
767 return class_register(&dma_devclass);
768}
Chris Leechc13c8262006-05-23 17:18:44 -0700769subsys_initcall(dma_bus_init);
770