blob: 7a0594f24a3f688ec16fcf79c04c8cb306c5f142 [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35 * Both of these are protected by a mutex, dma_list_mutex.
36 *
37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver.
39 *
Dan Williamsd379b012007-07-09 11:56:42 -070040 * Each client is responsible for keeping track of the channels it uses. See
41 * the definition of dma_event_callback in dmaengine.h.
Chris Leechc13c8262006-05-23 17:18:44 -070042 *
43 * Each device has a kref, which is initialized to 1 when the device is
Tony Jones891f78e2007-09-25 02:03:03 +020044 * registered. A kref_get is done for each device registered. When the
Sebastian Siewior8a5703f2008-04-21 22:38:45 +000045 * device is released, the corresponding kref_put is done in the release
Chris Leechc13c8262006-05-23 17:18:44 -070046 * method. Every time one of the device's channels is allocated to a client,
Sebastian Siewior8a5703f2008-04-21 22:38:45 +000047 * a kref_get occurs. When the channel is freed, the corresponding kref_put
Chris Leechc13c8262006-05-23 17:18:44 -070048 * happens. The device's release function does a completion, so
Tony Jones891f78e2007-09-25 02:03:03 +020049 * unregister_device does a remove event, device_unregister, a kref_put
Chris Leechc13c8262006-05-23 17:18:44 -070050 * for the first reference, then waits on the completion for all other
51 * references to finish.
52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
Dan Williamsd379b012007-07-09 11:56:42 -070054 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * signals that it wants to use a channel, and dma_chan_put is called when
Sebastian Siewior8a5703f2008-04-21 22:38:45 +000056 * a channel is removed or a client using it is unregistered. A client can
Dan Williamsd379b012007-07-09 11:56:42 -070057 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
59 * -ChrisL, DanW
Chris Leechc13c8262006-05-23 17:18:44 -070060 */
61
62#include <linux/init.h>
63#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070064#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070065#include <linux/device.h>
66#include <linux/dmaengine.h>
67#include <linux/hardirq.h>
68#include <linux/spinlock.h>
69#include <linux/percpu.h>
70#include <linux/rcupdate.h>
71#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070072#include <linux/jiffies.h>
Dan Williams2ba05622009-01-06 11:38:14 -070073#include <linux/rculist.h>
Chris Leechc13c8262006-05-23 17:18:44 -070074
75static DEFINE_MUTEX(dma_list_mutex);
76static LIST_HEAD(dma_device_list);
77static LIST_HEAD(dma_client_list);
Dan Williams6f49a572009-01-06 11:38:14 -070078static long dmaengine_ref_count;
Chris Leechc13c8262006-05-23 17:18:44 -070079
80/* --- sysfs implementation --- */
81
Tony Jones891f78e2007-09-25 02:03:03 +020082static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070083{
Tony Jones891f78e2007-09-25 02:03:03 +020084 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -070085 unsigned long count = 0;
86 int i;
87
Andrew Morton17f3ae02006-05-25 13:26:53 -070088 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -070089 count += per_cpu_ptr(chan->local, i)->memcpy_count;
90
91 return sprintf(buf, "%lu\n", count);
92}
93
Tony Jones891f78e2007-09-25 02:03:03 +020094static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
95 char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070096{
Tony Jones891f78e2007-09-25 02:03:03 +020097 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -070098 unsigned long count = 0;
99 int i;
100
Andrew Morton17f3ae02006-05-25 13:26:53 -0700101 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -0700102 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
103
104 return sprintf(buf, "%lu\n", count);
105}
106
Tony Jones891f78e2007-09-25 02:03:03 +0200107static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700108{
Tony Jones891f78e2007-09-25 02:03:03 +0200109 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -0700110
Dan Williams6f49a572009-01-06 11:38:14 -0700111 return sprintf(buf, "%d\n", chan->client_count);
Chris Leechc13c8262006-05-23 17:18:44 -0700112}
113
Tony Jones891f78e2007-09-25 02:03:03 +0200114static struct device_attribute dma_attrs[] = {
Chris Leechc13c8262006-05-23 17:18:44 -0700115 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
116 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
117 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
118 __ATTR_NULL
119};
120
121static void dma_async_device_cleanup(struct kref *kref);
122
Tony Jones891f78e2007-09-25 02:03:03 +0200123static void dma_dev_release(struct device *dev)
Chris Leechc13c8262006-05-23 17:18:44 -0700124{
Tony Jones891f78e2007-09-25 02:03:03 +0200125 struct dma_chan *chan = to_dma_chan(dev);
Chris Leechc13c8262006-05-23 17:18:44 -0700126 kref_put(&chan->device->refcount, dma_async_device_cleanup);
127}
128
129static struct class dma_devclass = {
Tony Jones891f78e2007-09-25 02:03:03 +0200130 .name = "dma",
131 .dev_attrs = dma_attrs,
132 .dev_release = dma_dev_release,
Chris Leechc13c8262006-05-23 17:18:44 -0700133};
134
135/* --- client and device registration --- */
136
Dan Williams59b5ec22009-01-06 11:38:15 -0700137#define dma_device_satisfies_mask(device, mask) \
138 __dma_device_satisfies_mask((device), &(mask))
Dan Williamsd379b012007-07-09 11:56:42 -0700139static int
Dan Williams59b5ec22009-01-06 11:38:15 -0700140__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
Dan Williamsd379b012007-07-09 11:56:42 -0700141{
142 dma_cap_mask_t has;
143
Dan Williams59b5ec22009-01-06 11:38:15 -0700144 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
Dan Williamsd379b012007-07-09 11:56:42 -0700145 DMA_TX_TYPE_END);
146 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
147}
148
Dan Williams6f49a572009-01-06 11:38:14 -0700149static struct module *dma_chan_to_owner(struct dma_chan *chan)
150{
151 return chan->device->dev->driver->owner;
152}
153
154/**
155 * balance_ref_count - catch up the channel reference count
156 * @chan - channel to balance ->client_count versus dmaengine_ref_count
157 *
158 * balance_ref_count must be called under dma_list_mutex
159 */
160static void balance_ref_count(struct dma_chan *chan)
161{
162 struct module *owner = dma_chan_to_owner(chan);
163
164 while (chan->client_count < dmaengine_ref_count) {
165 __module_get(owner);
166 chan->client_count++;
167 }
168}
169
170/**
171 * dma_chan_get - try to grab a dma channel's parent driver module
172 * @chan - channel to grab
173 *
174 * Must be called under dma_list_mutex
175 */
176static int dma_chan_get(struct dma_chan *chan)
177{
178 int err = -ENODEV;
179 struct module *owner = dma_chan_to_owner(chan);
180
181 if (chan->client_count) {
182 __module_get(owner);
183 err = 0;
184 } else if (try_module_get(owner))
185 err = 0;
186
187 if (err == 0)
188 chan->client_count++;
189
190 /* allocate upon first client reference */
191 if (chan->client_count == 1 && err == 0) {
192 int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL);
193
194 if (desc_cnt < 0) {
195 err = desc_cnt;
196 chan->client_count = 0;
197 module_put(owner);
Dan Williams59b5ec22009-01-06 11:38:15 -0700198 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700199 balance_ref_count(chan);
200 }
201
202 return err;
203}
204
205/**
206 * dma_chan_put - drop a reference to a dma channel's parent driver module
207 * @chan - channel to release
208 *
209 * Must be called under dma_list_mutex
210 */
211static void dma_chan_put(struct dma_chan *chan)
212{
213 if (!chan->client_count)
214 return; /* this channel failed alloc_chan_resources */
215 chan->client_count--;
216 module_put(dma_chan_to_owner(chan));
217 if (chan->client_count == 0)
218 chan->device->device_free_chan_resources(chan);
219}
220
Chris Leechc13c8262006-05-23 17:18:44 -0700221/**
Dan Williamsd379b012007-07-09 11:56:42 -0700222 * dma_client_chan_alloc - try to allocate channels to a client
Chris Leechc13c8262006-05-23 17:18:44 -0700223 * @client: &dma_client
224 *
225 * Called with dma_list_mutex held.
226 */
Dan Williamsd379b012007-07-09 11:56:42 -0700227static void dma_client_chan_alloc(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700228{
229 struct dma_device *device;
230 struct dma_chan *chan;
Dan Williamsd379b012007-07-09 11:56:42 -0700231 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700232
Dan Williamsd379b012007-07-09 11:56:42 -0700233 /* Find a channel */
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700234 list_for_each_entry(device, &dma_device_list, global_node) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700235 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
236 continue;
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700237 /* Does the client require a specific DMA controller? */
238 if (client->slave && client->slave->dma_dev
239 && client->slave->dma_dev != device->dev)
240 continue;
Dan Williams59b5ec22009-01-06 11:38:15 -0700241 if (!dma_device_satisfies_mask(device, client->cap_mask))
242 continue;
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700243
Chris Leechc13c8262006-05-23 17:18:44 -0700244 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams6f49a572009-01-06 11:38:14 -0700245 if (!chan->client_count)
246 continue;
247 ack = client->event_callback(client, chan,
248 DMA_RESOURCE_AVAILABLE);
Chris Leechc13c8262006-05-23 17:18:44 -0700249
Dan Williams6f49a572009-01-06 11:38:14 -0700250 /* we are done once this client rejects
251 * an available resource
252 */
253 if (ack == DMA_NAK)
254 return;
Chris Leechc13c8262006-05-23 17:18:44 -0700255 }
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700256 }
Chris Leechc13c8262006-05-23 17:18:44 -0700257}
258
Dan Williams7405f742007-01-02 11:10:43 -0700259enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
260{
261 enum dma_status status;
262 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
263
264 dma_async_issue_pending(chan);
265 do {
266 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
267 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
268 printk(KERN_ERR "dma_sync_wait_timeout!\n");
269 return DMA_ERROR;
270 }
271 } while (status == DMA_IN_PROGRESS);
272
273 return status;
274}
275EXPORT_SYMBOL(dma_sync_wait);
276
Chris Leechc13c8262006-05-23 17:18:44 -0700277/**
Randy Dunlap65088712006-07-03 19:45:31 -0700278 * dma_chan_cleanup - release a DMA channel's resources
279 * @kref: kernel reference structure that contains the DMA channel device
Chris Leechc13c8262006-05-23 17:18:44 -0700280 */
281void dma_chan_cleanup(struct kref *kref)
282{
283 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
Chris Leechc13c8262006-05-23 17:18:44 -0700284 kref_put(&chan->device->refcount, dma_async_device_cleanup);
285}
David Brownell765e3d82007-03-16 13:38:05 -0800286EXPORT_SYMBOL(dma_chan_cleanup);
Chris Leechc13c8262006-05-23 17:18:44 -0700287
288static void dma_chan_free_rcu(struct rcu_head *rcu)
289{
290 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
Dan Williams6f49a572009-01-06 11:38:14 -0700291
Chris Leechc13c8262006-05-23 17:18:44 -0700292 kref_put(&chan->refcount, dma_chan_cleanup);
293}
294
Dan Williamsd379b012007-07-09 11:56:42 -0700295static void dma_chan_release(struct dma_chan *chan)
Chris Leechc13c8262006-05-23 17:18:44 -0700296{
Chris Leechc13c8262006-05-23 17:18:44 -0700297 call_rcu(&chan->rcu, dma_chan_free_rcu);
298}
299
300/**
Dan Williamsbec08512009-01-06 11:38:14 -0700301 * dma_cap_mask_all - enable iteration over all operation types
302 */
303static dma_cap_mask_t dma_cap_mask_all;
304
305/**
306 * dma_chan_tbl_ent - tracks channel allocations per core/operation
307 * @chan - associated channel for this entry
308 */
309struct dma_chan_tbl_ent {
310 struct dma_chan *chan;
311};
312
313/**
314 * channel_table - percpu lookup table for memory-to-memory offload providers
315 */
316static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
317
318static int __init dma_channel_table_init(void)
319{
320 enum dma_transaction_type cap;
321 int err = 0;
322
323 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
324
Dan Williams59b5ec22009-01-06 11:38:15 -0700325 /* 'interrupt', 'private', and 'slave' are channel capabilities,
326 * but are not associated with an operation so they do not need
327 * an entry in the channel_table
Dan Williamsbec08512009-01-06 11:38:14 -0700328 */
329 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
Dan Williams59b5ec22009-01-06 11:38:15 -0700330 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
Dan Williamsbec08512009-01-06 11:38:14 -0700331 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
332
333 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
334 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
335 if (!channel_table[cap]) {
336 err = -ENOMEM;
337 break;
338 }
339 }
340
341 if (err) {
342 pr_err("dmaengine: initialization failure\n");
343 for_each_dma_cap_mask(cap, dma_cap_mask_all)
344 if (channel_table[cap])
345 free_percpu(channel_table[cap]);
346 }
347
348 return err;
349}
350subsys_initcall(dma_channel_table_init);
351
352/**
353 * dma_find_channel - find a channel to carry out the operation
354 * @tx_type: transaction type
355 */
356struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
357{
358 struct dma_chan *chan;
359 int cpu;
360
361 WARN_ONCE(dmaengine_ref_count == 0,
362 "client called %s without a reference", __func__);
363
364 cpu = get_cpu();
365 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
366 put_cpu();
367
368 return chan;
369}
370EXPORT_SYMBOL(dma_find_channel);
371
372/**
Dan Williams2ba05622009-01-06 11:38:14 -0700373 * dma_issue_pending_all - flush all pending operations across all channels
374 */
375void dma_issue_pending_all(void)
376{
377 struct dma_device *device;
378 struct dma_chan *chan;
379
380 WARN_ONCE(dmaengine_ref_count == 0,
381 "client called %s without a reference", __func__);
382
383 rcu_read_lock();
Dan Williams59b5ec22009-01-06 11:38:15 -0700384 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
385 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
386 continue;
Dan Williams2ba05622009-01-06 11:38:14 -0700387 list_for_each_entry(chan, &device->channels, device_node)
388 if (chan->client_count)
389 device->device_issue_pending(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700390 }
Dan Williams2ba05622009-01-06 11:38:14 -0700391 rcu_read_unlock();
392}
393EXPORT_SYMBOL(dma_issue_pending_all);
394
395/**
Dan Williamsbec08512009-01-06 11:38:14 -0700396 * nth_chan - returns the nth channel of the given capability
397 * @cap: capability to match
398 * @n: nth channel desired
399 *
400 * Defaults to returning the channel with the desired capability and the
401 * lowest reference count when 'n' cannot be satisfied. Must be called
402 * under dma_list_mutex.
403 */
404static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
405{
406 struct dma_device *device;
407 struct dma_chan *chan;
408 struct dma_chan *ret = NULL;
409 struct dma_chan *min = NULL;
410
411 list_for_each_entry(device, &dma_device_list, global_node) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700412 if (!dma_has_cap(cap, device->cap_mask) ||
413 dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williamsbec08512009-01-06 11:38:14 -0700414 continue;
415 list_for_each_entry(chan, &device->channels, device_node) {
416 if (!chan->client_count)
417 continue;
418 if (!min)
419 min = chan;
420 else if (chan->table_count < min->table_count)
421 min = chan;
422
423 if (n-- == 0) {
424 ret = chan;
425 break; /* done */
426 }
427 }
428 if (ret)
429 break; /* done */
430 }
431
432 if (!ret)
433 ret = min;
434
435 if (ret)
436 ret->table_count++;
437
438 return ret;
439}
440
441/**
442 * dma_channel_rebalance - redistribute the available channels
443 *
444 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
445 * operation type) in the SMP case, and operation isolation (avoid
446 * multi-tasking channels) in the non-SMP case. Must be called under
447 * dma_list_mutex.
448 */
449static void dma_channel_rebalance(void)
450{
451 struct dma_chan *chan;
452 struct dma_device *device;
453 int cpu;
454 int cap;
455 int n;
456
457 /* undo the last distribution */
458 for_each_dma_cap_mask(cap, dma_cap_mask_all)
459 for_each_possible_cpu(cpu)
460 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
461
Dan Williams59b5ec22009-01-06 11:38:15 -0700462 list_for_each_entry(device, &dma_device_list, global_node) {
463 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
464 continue;
Dan Williamsbec08512009-01-06 11:38:14 -0700465 list_for_each_entry(chan, &device->channels, device_node)
466 chan->table_count = 0;
Dan Williams59b5ec22009-01-06 11:38:15 -0700467 }
Dan Williamsbec08512009-01-06 11:38:14 -0700468
469 /* don't populate the channel_table if no clients are available */
470 if (!dmaengine_ref_count)
471 return;
472
473 /* redistribute available channels */
474 n = 0;
475 for_each_dma_cap_mask(cap, dma_cap_mask_all)
476 for_each_online_cpu(cpu) {
477 if (num_possible_cpus() > 1)
478 chan = nth_chan(cap, n++);
479 else
480 chan = nth_chan(cap, -1);
481
482 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
483 }
484}
485
Dan Williams59b5ec22009-01-06 11:38:15 -0700486static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev)
487{
488 struct dma_chan *chan;
489 struct dma_chan *ret = NULL;
490
491 if (!__dma_device_satisfies_mask(dev, mask)) {
492 pr_debug("%s: wrong capabilities\n", __func__);
493 return NULL;
494 }
495 /* devices with multiple channels need special handling as we need to
496 * ensure that all channels are either private or public.
497 */
498 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
499 list_for_each_entry(chan, &dev->channels, device_node) {
500 /* some channels are already publicly allocated */
501 if (chan->client_count)
502 return NULL;
503 }
504
505 list_for_each_entry(chan, &dev->channels, device_node) {
506 if (chan->client_count) {
507 pr_debug("%s: %s busy\n",
508 __func__, dev_name(&chan->dev));
509 continue;
510 }
511 ret = chan;
512 break;
513 }
514
515 return ret;
516}
517
518/**
519 * dma_request_channel - try to allocate an exclusive channel
520 * @mask: capabilities that the channel must satisfy
521 * @fn: optional callback to disposition available channels
522 * @fn_param: opaque parameter to pass to dma_filter_fn
523 */
524struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
525{
526 struct dma_device *device, *_d;
527 struct dma_chan *chan = NULL;
528 enum dma_state_client ack;
529 int err;
530
531 /* Find a channel */
532 mutex_lock(&dma_list_mutex);
533 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
534 chan = private_candidate(mask, device);
535 if (!chan)
536 continue;
537
538 if (fn)
539 ack = fn(chan, fn_param);
540 else
541 ack = DMA_ACK;
542
543 if (ack == DMA_ACK) {
544 /* Found a suitable channel, try to grab, prep, and
545 * return it. We first set DMA_PRIVATE to disable
546 * balance_ref_count as this channel will not be
547 * published in the general-purpose allocator
548 */
549 dma_cap_set(DMA_PRIVATE, device->cap_mask);
550 err = dma_chan_get(chan);
551
552 if (err == -ENODEV) {
553 pr_debug("%s: %s module removed\n", __func__,
554 dev_name(&chan->dev));
555 list_del_rcu(&device->global_node);
556 } else if (err)
557 pr_err("dmaengine: failed to get %s: (%d)\n",
558 dev_name(&chan->dev), err);
559 else
560 break;
561 } else if (ack == DMA_DUP) {
562 pr_debug("%s: %s filter said DMA_DUP\n",
563 __func__, dev_name(&chan->dev));
564 } else if (ack == DMA_NAK) {
565 pr_debug("%s: %s filter said DMA_NAK\n",
566 __func__, dev_name(&chan->dev));
567 break;
568 } else
569 WARN_ONCE(1, "filter_fn: unknown response?\n");
570 chan = NULL;
571 }
572 mutex_unlock(&dma_list_mutex);
573
574 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
575 chan ? dev_name(&chan->dev) : NULL);
576
577 return chan;
578}
579EXPORT_SYMBOL_GPL(__dma_request_channel);
580
581void dma_release_channel(struct dma_chan *chan)
582{
583 mutex_lock(&dma_list_mutex);
584 WARN_ONCE(chan->client_count != 1,
585 "chan reference count %d != 1\n", chan->client_count);
586 dma_chan_put(chan);
587 mutex_unlock(&dma_list_mutex);
588}
589EXPORT_SYMBOL_GPL(dma_release_channel);
590
Dan Williamsbec08512009-01-06 11:38:14 -0700591/**
Dan Williamsd379b012007-07-09 11:56:42 -0700592 * dma_chans_notify_available - broadcast available channels to the clients
Chris Leechc13c8262006-05-23 17:18:44 -0700593 */
Dan Williamsd379b012007-07-09 11:56:42 -0700594static void dma_clients_notify_available(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700595{
596 struct dma_client *client;
Dan Williamsd379b012007-07-09 11:56:42 -0700597
598 mutex_lock(&dma_list_mutex);
599
600 list_for_each_entry(client, &dma_client_list, global_node)
601 dma_client_chan_alloc(client);
602
603 mutex_unlock(&dma_list_mutex);
604}
605
606/**
Dan Williamsd379b012007-07-09 11:56:42 -0700607 * dma_async_client_register - register a &dma_client
608 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
Chris Leechc13c8262006-05-23 17:18:44 -0700609 */
Dan Williamsd379b012007-07-09 11:56:42 -0700610void dma_async_client_register(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700611{
Dan Williams6f49a572009-01-06 11:38:14 -0700612 struct dma_device *device, *_d;
613 struct dma_chan *chan;
614 int err;
615
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700616 /* validate client data */
617 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
618 !client->slave);
619
Chris Leechc13c8262006-05-23 17:18:44 -0700620 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700621 dmaengine_ref_count++;
622
623 /* try to grab channels */
Dan Williams59b5ec22009-01-06 11:38:15 -0700624 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
625 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
626 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700627 list_for_each_entry(chan, &device->channels, device_node) {
628 err = dma_chan_get(chan);
629 if (err == -ENODEV) {
630 /* module removed before we could use it */
Dan Williams2ba05622009-01-06 11:38:14 -0700631 list_del_rcu(&device->global_node);
Dan Williams6f49a572009-01-06 11:38:14 -0700632 break;
633 } else if (err)
634 pr_err("dmaengine: failed to get %s: (%d)\n",
635 dev_name(&chan->dev), err);
636 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700637 }
Dan Williams6f49a572009-01-06 11:38:14 -0700638
Dan Williamsbec08512009-01-06 11:38:14 -0700639 /* if this is the first reference and there were channels
640 * waiting we need to rebalance to get those channels
641 * incorporated into the channel table
642 */
643 if (dmaengine_ref_count == 1)
644 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700645 list_add_tail(&client->global_node, &dma_client_list);
646 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700647}
David Brownell765e3d82007-03-16 13:38:05 -0800648EXPORT_SYMBOL(dma_async_client_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700649
650/**
651 * dma_async_client_unregister - unregister a client and free the &dma_client
Randy Dunlap65088712006-07-03 19:45:31 -0700652 * @client: &dma_client to free
Chris Leechc13c8262006-05-23 17:18:44 -0700653 *
654 * Force frees any allocated DMA channels, frees the &dma_client memory
655 */
656void dma_async_client_unregister(struct dma_client *client)
657{
Dan Williamsd379b012007-07-09 11:56:42 -0700658 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700659 struct dma_chan *chan;
660
661 if (!client)
662 return;
663
Chris Leechc13c8262006-05-23 17:18:44 -0700664 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700665 dmaengine_ref_count--;
666 BUG_ON(dmaengine_ref_count < 0);
667 /* drop channel references */
Dan Williams59b5ec22009-01-06 11:38:15 -0700668 list_for_each_entry(device, &dma_device_list, global_node) {
669 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
670 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700671 list_for_each_entry(chan, &device->channels, device_node)
672 dma_chan_put(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700673 }
Dan Williamsd379b012007-07-09 11:56:42 -0700674
Chris Leechc13c8262006-05-23 17:18:44 -0700675 list_del(&client->global_node);
676 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700677}
David Brownell765e3d82007-03-16 13:38:05 -0800678EXPORT_SYMBOL(dma_async_client_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700679
680/**
Dan Williamsd379b012007-07-09 11:56:42 -0700681 * dma_async_client_chan_request - send all available channels to the
682 * client that satisfy the capability mask
683 * @client - requester
Chris Leechc13c8262006-05-23 17:18:44 -0700684 */
Dan Williamsd379b012007-07-09 11:56:42 -0700685void dma_async_client_chan_request(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700686{
Dan Williamsd379b012007-07-09 11:56:42 -0700687 mutex_lock(&dma_list_mutex);
688 dma_client_chan_alloc(client);
689 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700690}
David Brownell765e3d82007-03-16 13:38:05 -0800691EXPORT_SYMBOL(dma_async_client_chan_request);
Chris Leechc13c8262006-05-23 17:18:44 -0700692
693/**
Randy Dunlap65088712006-07-03 19:45:31 -0700694 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700695 * @device: &dma_device
696 */
697int dma_async_device_register(struct dma_device *device)
698{
699 static int id;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800700 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700701 struct dma_chan* chan;
702
703 if (!device)
704 return -ENODEV;
705
Dan Williams7405f742007-01-02 11:10:43 -0700706 /* validate device routines */
707 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
708 !device->device_prep_dma_memcpy);
709 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
710 !device->device_prep_dma_xor);
711 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
712 !device->device_prep_dma_zero_sum);
713 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
714 !device->device_prep_dma_memset);
Zhang Wei9b941c62008-03-13 17:45:28 -0700715 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
Dan Williams7405f742007-01-02 11:10:43 -0700716 !device->device_prep_dma_interrupt);
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700717 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
718 !device->device_prep_slave_sg);
719 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
720 !device->device_terminate_all);
Dan Williams7405f742007-01-02 11:10:43 -0700721
722 BUG_ON(!device->device_alloc_chan_resources);
723 BUG_ON(!device->device_free_chan_resources);
Dan Williams7405f742007-01-02 11:10:43 -0700724 BUG_ON(!device->device_is_tx_complete);
725 BUG_ON(!device->device_issue_pending);
726 BUG_ON(!device->dev);
727
Chris Leechc13c8262006-05-23 17:18:44 -0700728 init_completion(&device->done);
729 kref_init(&device->refcount);
Dan Williamsb0b42b12008-12-03 17:17:07 -0700730
731 mutex_lock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700732 device->dev_id = id++;
Dan Williamsb0b42b12008-12-03 17:17:07 -0700733 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700734
735 /* represent channels in sysfs. Probably want devs too */
736 list_for_each_entry(chan, &device->channels, device_node) {
737 chan->local = alloc_percpu(typeof(*chan->local));
738 if (chan->local == NULL)
739 continue;
740
741 chan->chan_id = chancnt++;
Tony Jones891f78e2007-09-25 02:03:03 +0200742 chan->dev.class = &dma_devclass;
Haavard Skinnemoen1099dc72008-07-08 11:58:05 -0700743 chan->dev.parent = device->dev;
Kay Sievers06190d82008-11-11 13:12:33 -0700744 dev_set_name(&chan->dev, "dma%dchan%d",
745 device->dev_id, chan->chan_id);
Chris Leechc13c8262006-05-23 17:18:44 -0700746
Tony Jones891f78e2007-09-25 02:03:03 +0200747 rc = device_register(&chan->dev);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800748 if (rc) {
749 chancnt--;
750 free_percpu(chan->local);
751 chan->local = NULL;
752 goto err_out;
753 }
754
Haavard Skinnemoen348badf2007-11-14 16:59:27 -0800755 /* One for the channel, one of the class device */
756 kref_get(&device->refcount);
Chris Leechc13c8262006-05-23 17:18:44 -0700757 kref_get(&device->refcount);
Dan Williamsd379b012007-07-09 11:56:42 -0700758 kref_init(&chan->refcount);
Dan Williams7cc5bf92008-07-08 11:58:21 -0700759 chan->client_count = 0;
Dan Williamsd379b012007-07-09 11:56:42 -0700760 chan->slow_ref = 0;
761 INIT_RCU_HEAD(&chan->rcu);
Chris Leechc13c8262006-05-23 17:18:44 -0700762 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700763 device->chancnt = chancnt;
Chris Leechc13c8262006-05-23 17:18:44 -0700764
765 mutex_lock(&dma_list_mutex);
Dan Williams59b5ec22009-01-06 11:38:15 -0700766 /* take references on public channels */
767 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700768 list_for_each_entry(chan, &device->channels, device_node) {
769 /* if clients are already waiting for channels we need
770 * to take references on their behalf
771 */
772 if (dma_chan_get(chan) == -ENODEV) {
773 /* note we can only get here for the first
774 * channel as the remaining channels are
775 * guaranteed to get a reference
776 */
777 rc = -ENODEV;
778 mutex_unlock(&dma_list_mutex);
779 goto err_out;
780 }
781 }
Dan Williams2ba05622009-01-06 11:38:14 -0700782 list_add_tail_rcu(&device->global_node, &dma_device_list);
Dan Williamsbec08512009-01-06 11:38:14 -0700783 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700784 mutex_unlock(&dma_list_mutex);
785
Dan Williamsd379b012007-07-09 11:56:42 -0700786 dma_clients_notify_available();
Chris Leechc13c8262006-05-23 17:18:44 -0700787
788 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800789
790err_out:
791 list_for_each_entry(chan, &device->channels, device_node) {
792 if (chan->local == NULL)
793 continue;
794 kref_put(&device->refcount, dma_async_device_cleanup);
Tony Jones891f78e2007-09-25 02:03:03 +0200795 device_unregister(&chan->dev);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800796 chancnt--;
797 free_percpu(chan->local);
798 }
799 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700800}
David Brownell765e3d82007-03-16 13:38:05 -0800801EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700802
803/**
Randy Dunlap65088712006-07-03 19:45:31 -0700804 * dma_async_device_cleanup - function called when all references are released
805 * @kref: kernel reference object
Chris Leechc13c8262006-05-23 17:18:44 -0700806 */
807static void dma_async_device_cleanup(struct kref *kref)
808{
809 struct dma_device *device;
810
811 device = container_of(kref, struct dma_device, refcount);
812 complete(&device->done);
813}
814
Randy Dunlap65088712006-07-03 19:45:31 -0700815/**
Dan Williams6f49a572009-01-06 11:38:14 -0700816 * dma_async_device_unregister - unregister a DMA device
Randy Dunlap65088712006-07-03 19:45:31 -0700817 * @device: &dma_device
818 */
819void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700820{
821 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700822
823 mutex_lock(&dma_list_mutex);
Dan Williams2ba05622009-01-06 11:38:14 -0700824 list_del_rcu(&device->global_node);
Dan Williamsbec08512009-01-06 11:38:14 -0700825 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700826 mutex_unlock(&dma_list_mutex);
827
828 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams6f49a572009-01-06 11:38:14 -0700829 WARN_ONCE(chan->client_count,
830 "%s called while %d clients hold a reference\n",
831 __func__, chan->client_count);
Tony Jones891f78e2007-09-25 02:03:03 +0200832 device_unregister(&chan->dev);
Dan Williamsd379b012007-07-09 11:56:42 -0700833 dma_chan_release(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700834 }
Chris Leechc13c8262006-05-23 17:18:44 -0700835
836 kref_put(&device->refcount, dma_async_device_cleanup);
837 wait_for_completion(&device->done);
838}
David Brownell765e3d82007-03-16 13:38:05 -0800839EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700840
Dan Williams7405f742007-01-02 11:10:43 -0700841/**
842 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
843 * @chan: DMA channel to offload copy to
844 * @dest: destination address (virtual)
845 * @src: source address (virtual)
846 * @len: length
847 *
848 * Both @dest and @src must be mappable to a bus address according to the
849 * DMA mapping API rules for streaming mappings.
850 * Both @dest and @src must stay memory resident (kernel memory or locked
851 * user space pages).
852 */
853dma_cookie_t
854dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
855 void *src, size_t len)
856{
857 struct dma_device *dev = chan->device;
858 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700859 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700860 dma_cookie_t cookie;
861 int cpu;
862
Dan Williams00367312008-02-02 19:49:57 -0700863 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
864 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700865 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
866 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700867
868 if (!tx) {
869 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
870 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700871 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700872 }
Dan Williams7405f742007-01-02 11:10:43 -0700873
Dan Williams7405f742007-01-02 11:10:43 -0700874 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700875 cookie = tx->tx_submit(tx);
876
877 cpu = get_cpu();
878 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
879 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
880 put_cpu();
881
882 return cookie;
883}
884EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
885
886/**
887 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
888 * @chan: DMA channel to offload copy to
889 * @page: destination page
890 * @offset: offset in page to copy to
891 * @kdata: source address (virtual)
892 * @len: length
893 *
894 * Both @page/@offset and @kdata must be mappable to a bus address according
895 * to the DMA mapping API rules for streaming mappings.
896 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
897 * locked user space pages)
898 */
899dma_cookie_t
900dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
901 unsigned int offset, void *kdata, size_t len)
902{
903 struct dma_device *dev = chan->device;
904 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700905 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700906 dma_cookie_t cookie;
907 int cpu;
908
Dan Williams00367312008-02-02 19:49:57 -0700909 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
910 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700911 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
912 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700913
914 if (!tx) {
915 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
916 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700917 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700918 }
Dan Williams7405f742007-01-02 11:10:43 -0700919
Dan Williams7405f742007-01-02 11:10:43 -0700920 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700921 cookie = tx->tx_submit(tx);
922
923 cpu = get_cpu();
924 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
925 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
926 put_cpu();
927
928 return cookie;
929}
930EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
931
932/**
933 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
934 * @chan: DMA channel to offload copy to
935 * @dest_pg: destination page
936 * @dest_off: offset in page to copy to
937 * @src_pg: source page
938 * @src_off: offset in page to copy from
939 * @len: length
940 *
941 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
942 * address according to the DMA mapping API rules for streaming mappings.
943 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
944 * (kernel memory or locked user space pages).
945 */
946dma_cookie_t
947dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
948 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
949 size_t len)
950{
951 struct dma_device *dev = chan->device;
952 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700953 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700954 dma_cookie_t cookie;
955 int cpu;
956
Dan Williams00367312008-02-02 19:49:57 -0700957 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
958 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
959 DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -0700960 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
961 DMA_CTRL_ACK);
Dan Williams00367312008-02-02 19:49:57 -0700962
963 if (!tx) {
964 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
965 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700966 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700967 }
Dan Williams7405f742007-01-02 11:10:43 -0700968
Dan Williams7405f742007-01-02 11:10:43 -0700969 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700970 cookie = tx->tx_submit(tx);
971
972 cpu = get_cpu();
973 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
974 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
975 put_cpu();
976
977 return cookie;
978}
979EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
980
981void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
982 struct dma_chan *chan)
983{
984 tx->chan = chan;
985 spin_lock_init(&tx->lock);
Dan Williams7405f742007-01-02 11:10:43 -0700986}
987EXPORT_SYMBOL(dma_async_tx_descriptor_init);
988
Dan Williams07f22112009-01-05 17:14:31 -0700989/* dma_wait_for_async_tx - spin wait for a transaction to complete
990 * @tx: in-flight transaction to wait on
991 *
992 * This routine assumes that tx was obtained from a call to async_memcpy,
993 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
994 * and submitted). Walking the parent chain is only meant to cover for DMA
995 * drivers that do not implement the DMA_INTERRUPT capability and may race with
996 * the driver's descriptor cleanup routine.
997 */
998enum dma_status
999dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1000{
1001 enum dma_status status;
1002 struct dma_async_tx_descriptor *iter;
1003 struct dma_async_tx_descriptor *parent;
1004
1005 if (!tx)
1006 return DMA_SUCCESS;
1007
1008 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
1009 " %s\n", __func__, dev_name(&tx->chan->dev));
1010
1011 /* poll through the dependency chain, return when tx is complete */
1012 do {
1013 iter = tx;
1014
1015 /* find the root of the unsubmitted dependency chain */
1016 do {
1017 parent = iter->parent;
1018 if (!parent)
1019 break;
1020 else
1021 iter = parent;
1022 } while (parent);
1023
1024 /* there is a small window for ->parent == NULL and
1025 * ->cookie == -EBUSY
1026 */
1027 while (iter->cookie == -EBUSY)
1028 cpu_relax();
1029
1030 status = dma_sync_wait(iter->chan, iter->cookie);
1031 } while (status == DMA_IN_PROGRESS || (iter != tx));
1032
1033 return status;
1034}
1035EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1036
1037/* dma_run_dependencies - helper routine for dma drivers to process
1038 * (start) dependent operations on their target channel
1039 * @tx: transaction with dependencies
1040 */
1041void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1042{
1043 struct dma_async_tx_descriptor *dep = tx->next;
1044 struct dma_async_tx_descriptor *dep_next;
1045 struct dma_chan *chan;
1046
1047 if (!dep)
1048 return;
1049
1050 chan = dep->chan;
1051
1052 /* keep submitting up until a channel switch is detected
1053 * in that case we will be called again as a result of
1054 * processing the interrupt from async_tx_channel_switch
1055 */
1056 for (; dep; dep = dep_next) {
1057 spin_lock_bh(&dep->lock);
1058 dep->parent = NULL;
1059 dep_next = dep->next;
1060 if (dep_next && dep_next->chan == chan)
1061 dep->next = NULL; /* ->next will be submitted */
1062 else
1063 dep_next = NULL; /* submit current dep and terminate */
1064 spin_unlock_bh(&dep->lock);
1065
1066 dep->tx_submit(dep);
1067 }
1068
1069 chan->device->device_issue_pending(chan);
1070}
1071EXPORT_SYMBOL_GPL(dma_run_dependencies);
1072
Chris Leechc13c8262006-05-23 17:18:44 -07001073static int __init dma_bus_init(void)
1074{
1075 mutex_init(&dma_list_mutex);
1076 return class_register(&dma_devclass);
1077}
Chris Leechc13c8262006-05-23 17:18:44 -07001078subsys_initcall(dma_bus_init);
1079
Dan Williamsbec08512009-01-06 11:38:14 -07001080