blob: 0f1ca74fe0bb9edc21b54119dbb1b82754e9b703 [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
Dan Williamsaa1e6f12009-01-06 11:38:17 -070034 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
Chris Leechc13c8262006-05-23 17:18:44 -070036 *
Dan Williamsf27c5802009-01-06 11:38:18 -070037 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
Chris Leechc13c8262006-05-23 17:18:44 -070042 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
Dan Williamsf27c5802009-01-06 11:38:18 -070045 * See Documentation/dmaengine.txt for more details
Chris Leechc13c8262006-05-23 17:18:44 -070046 */
47
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000048#include <linux/dma-mapping.h>
Chris Leechc13c8262006-05-23 17:18:44 -070049#include <linux/init.h>
50#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070051#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070052#include <linux/device.h>
53#include <linux/dmaengine.h>
54#include <linux/hardirq.h>
55#include <linux/spinlock.h>
56#include <linux/percpu.h>
57#include <linux/rcupdate.h>
58#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070059#include <linux/jiffies.h>
Dan Williams2ba05622009-01-06 11:38:14 -070060#include <linux/rculist.h>
Dan Williams864498a2009-01-06 11:38:21 -070061#include <linux/idr.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Chris Leechc13c8262006-05-23 17:18:44 -070063
64static DEFINE_MUTEX(dma_list_mutex);
Axel Lin21ef4b82011-07-20 11:32:28 +080065static DEFINE_IDR(dma_idr);
Chris Leechc13c8262006-05-23 17:18:44 -070066static LIST_HEAD(dma_device_list);
Dan Williams6f49a572009-01-06 11:38:14 -070067static long dmaengine_ref_count;
Chris Leechc13c8262006-05-23 17:18:44 -070068
69/* --- sysfs implementation --- */
70
Dan Williams41d5e592009-01-06 11:38:21 -070071/**
72 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
73 * @dev - device node
74 *
75 * Must be called under dma_list_mutex
76 */
77static struct dma_chan *dev_to_dma_chan(struct device *dev)
78{
79 struct dma_chan_dev *chan_dev;
80
81 chan_dev = container_of(dev, typeof(*chan_dev), device);
82 return chan_dev->chan;
83}
84
Tony Jones891f78e2007-09-25 02:03:03 +020085static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070086{
Dan Williams41d5e592009-01-06 11:38:21 -070087 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -070088 unsigned long count = 0;
89 int i;
Dan Williams41d5e592009-01-06 11:38:21 -070090 int err;
Chris Leechc13c8262006-05-23 17:18:44 -070091
Dan Williams41d5e592009-01-06 11:38:21 -070092 mutex_lock(&dma_list_mutex);
93 chan = dev_to_dma_chan(dev);
94 if (chan) {
95 for_each_possible_cpu(i)
96 count += per_cpu_ptr(chan->local, i)->memcpy_count;
97 err = sprintf(buf, "%lu\n", count);
98 } else
99 err = -ENODEV;
100 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700101
Dan Williams41d5e592009-01-06 11:38:21 -0700102 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700103}
104
Tony Jones891f78e2007-09-25 02:03:03 +0200105static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
106 char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700107{
Dan Williams41d5e592009-01-06 11:38:21 -0700108 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700109 unsigned long count = 0;
110 int i;
Dan Williams41d5e592009-01-06 11:38:21 -0700111 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700112
Dan Williams41d5e592009-01-06 11:38:21 -0700113 mutex_lock(&dma_list_mutex);
114 chan = dev_to_dma_chan(dev);
115 if (chan) {
116 for_each_possible_cpu(i)
117 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
118 err = sprintf(buf, "%lu\n", count);
119 } else
120 err = -ENODEV;
121 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700122
Dan Williams41d5e592009-01-06 11:38:21 -0700123 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700124}
125
Tony Jones891f78e2007-09-25 02:03:03 +0200126static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700127{
Dan Williams41d5e592009-01-06 11:38:21 -0700128 struct dma_chan *chan;
129 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700130
Dan Williams41d5e592009-01-06 11:38:21 -0700131 mutex_lock(&dma_list_mutex);
132 chan = dev_to_dma_chan(dev);
133 if (chan)
134 err = sprintf(buf, "%d\n", chan->client_count);
135 else
136 err = -ENODEV;
137 mutex_unlock(&dma_list_mutex);
138
139 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700140}
141
Tony Jones891f78e2007-09-25 02:03:03 +0200142static struct device_attribute dma_attrs[] = {
Chris Leechc13c8262006-05-23 17:18:44 -0700143 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
144 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
145 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
146 __ATTR_NULL
147};
148
Dan Williams41d5e592009-01-06 11:38:21 -0700149static void chan_dev_release(struct device *dev)
150{
151 struct dma_chan_dev *chan_dev;
152
153 chan_dev = container_of(dev, typeof(*chan_dev), device);
Dan Williams864498a2009-01-06 11:38:21 -0700154 if (atomic_dec_and_test(chan_dev->idr_ref)) {
155 mutex_lock(&dma_list_mutex);
156 idr_remove(&dma_idr, chan_dev->dev_id);
157 mutex_unlock(&dma_list_mutex);
158 kfree(chan_dev->idr_ref);
159 }
Dan Williams41d5e592009-01-06 11:38:21 -0700160 kfree(chan_dev);
161}
162
Chris Leechc13c8262006-05-23 17:18:44 -0700163static struct class dma_devclass = {
Tony Jones891f78e2007-09-25 02:03:03 +0200164 .name = "dma",
165 .dev_attrs = dma_attrs,
Dan Williams41d5e592009-01-06 11:38:21 -0700166 .dev_release = chan_dev_release,
Chris Leechc13c8262006-05-23 17:18:44 -0700167};
168
169/* --- client and device registration --- */
170
Dan Williams59b5ec22009-01-06 11:38:15 -0700171#define dma_device_satisfies_mask(device, mask) \
172 __dma_device_satisfies_mask((device), &(mask))
Dan Williamsd379b012007-07-09 11:56:42 -0700173static int
Dan Williams59b5ec22009-01-06 11:38:15 -0700174__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
Dan Williamsd379b012007-07-09 11:56:42 -0700175{
176 dma_cap_mask_t has;
177
Dan Williams59b5ec22009-01-06 11:38:15 -0700178 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
Dan Williamsd379b012007-07-09 11:56:42 -0700179 DMA_TX_TYPE_END);
180 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
181}
182
Dan Williams6f49a572009-01-06 11:38:14 -0700183static struct module *dma_chan_to_owner(struct dma_chan *chan)
184{
185 return chan->device->dev->driver->owner;
186}
187
188/**
189 * balance_ref_count - catch up the channel reference count
190 * @chan - channel to balance ->client_count versus dmaengine_ref_count
191 *
192 * balance_ref_count must be called under dma_list_mutex
193 */
194static void balance_ref_count(struct dma_chan *chan)
195{
196 struct module *owner = dma_chan_to_owner(chan);
197
198 while (chan->client_count < dmaengine_ref_count) {
199 __module_get(owner);
200 chan->client_count++;
201 }
202}
203
204/**
205 * dma_chan_get - try to grab a dma channel's parent driver module
206 * @chan - channel to grab
207 *
208 * Must be called under dma_list_mutex
209 */
210static int dma_chan_get(struct dma_chan *chan)
211{
212 int err = -ENODEV;
213 struct module *owner = dma_chan_to_owner(chan);
214
215 if (chan->client_count) {
216 __module_get(owner);
217 err = 0;
218 } else if (try_module_get(owner))
219 err = 0;
220
221 if (err == 0)
222 chan->client_count++;
223
224 /* allocate upon first client reference */
225 if (chan->client_count == 1 && err == 0) {
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700226 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
Dan Williams6f49a572009-01-06 11:38:14 -0700227
228 if (desc_cnt < 0) {
229 err = desc_cnt;
230 chan->client_count = 0;
231 module_put(owner);
Dan Williams59b5ec22009-01-06 11:38:15 -0700232 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700233 balance_ref_count(chan);
234 }
235
236 return err;
237}
238
239/**
240 * dma_chan_put - drop a reference to a dma channel's parent driver module
241 * @chan - channel to release
242 *
243 * Must be called under dma_list_mutex
244 */
245static void dma_chan_put(struct dma_chan *chan)
246{
247 if (!chan->client_count)
248 return; /* this channel failed alloc_chan_resources */
249 chan->client_count--;
250 module_put(dma_chan_to_owner(chan));
251 if (chan->client_count == 0)
252 chan->device->device_free_chan_resources(chan);
253}
254
Dan Williams7405f742007-01-02 11:10:43 -0700255enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
256{
257 enum dma_status status;
258 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
259
260 dma_async_issue_pending(chan);
261 do {
262 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
263 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
264 printk(KERN_ERR "dma_sync_wait_timeout!\n");
265 return DMA_ERROR;
266 }
267 } while (status == DMA_IN_PROGRESS);
268
269 return status;
270}
271EXPORT_SYMBOL(dma_sync_wait);
272
Chris Leechc13c8262006-05-23 17:18:44 -0700273/**
Dan Williamsbec08512009-01-06 11:38:14 -0700274 * dma_cap_mask_all - enable iteration over all operation types
275 */
276static dma_cap_mask_t dma_cap_mask_all;
277
278/**
279 * dma_chan_tbl_ent - tracks channel allocations per core/operation
280 * @chan - associated channel for this entry
281 */
282struct dma_chan_tbl_ent {
283 struct dma_chan *chan;
284};
285
286/**
287 * channel_table - percpu lookup table for memory-to-memory offload providers
288 */
Tejun Heoa29d8b82010-02-02 14:39:15 +0900289static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
Dan Williamsbec08512009-01-06 11:38:14 -0700290
291static int __init dma_channel_table_init(void)
292{
293 enum dma_transaction_type cap;
294 int err = 0;
295
296 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
297
Dan Williams59b5ec22009-01-06 11:38:15 -0700298 /* 'interrupt', 'private', and 'slave' are channel capabilities,
299 * but are not associated with an operation so they do not need
300 * an entry in the channel_table
Dan Williamsbec08512009-01-06 11:38:14 -0700301 */
302 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
Dan Williams59b5ec22009-01-06 11:38:15 -0700303 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
Dan Williamsbec08512009-01-06 11:38:14 -0700304 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
305
306 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
307 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
308 if (!channel_table[cap]) {
309 err = -ENOMEM;
310 break;
311 }
312 }
313
314 if (err) {
315 pr_err("dmaengine: initialization failure\n");
316 for_each_dma_cap_mask(cap, dma_cap_mask_all)
317 if (channel_table[cap])
318 free_percpu(channel_table[cap]);
319 }
320
321 return err;
322}
Dan Williams652afc22009-01-06 11:38:22 -0700323arch_initcall(dma_channel_table_init);
Dan Williamsbec08512009-01-06 11:38:14 -0700324
325/**
326 * dma_find_channel - find a channel to carry out the operation
327 * @tx_type: transaction type
328 */
329struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
330{
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900331 return this_cpu_read(channel_table[tx_type]->chan);
Dan Williamsbec08512009-01-06 11:38:14 -0700332}
333EXPORT_SYMBOL(dma_find_channel);
334
Dave Jianga2bd1142012-04-04 16:10:46 -0700335/*
336 * net_dma_find_channel - find a channel for net_dma
337 * net_dma has alignment requirements
338 */
339struct dma_chan *net_dma_find_channel(void)
340{
341 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
342 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
343 return NULL;
344
345 return chan;
346}
347EXPORT_SYMBOL(net_dma_find_channel);
348
Dan Williamsbec08512009-01-06 11:38:14 -0700349/**
Dan Williams2ba05622009-01-06 11:38:14 -0700350 * dma_issue_pending_all - flush all pending operations across all channels
351 */
352void dma_issue_pending_all(void)
353{
354 struct dma_device *device;
355 struct dma_chan *chan;
356
Dan Williams2ba05622009-01-06 11:38:14 -0700357 rcu_read_lock();
Dan Williams59b5ec22009-01-06 11:38:15 -0700358 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
359 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
360 continue;
Dan Williams2ba05622009-01-06 11:38:14 -0700361 list_for_each_entry(chan, &device->channels, device_node)
362 if (chan->client_count)
363 device->device_issue_pending(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700364 }
Dan Williams2ba05622009-01-06 11:38:14 -0700365 rcu_read_unlock();
366}
367EXPORT_SYMBOL(dma_issue_pending_all);
368
369/**
Dan Williamsbec08512009-01-06 11:38:14 -0700370 * nth_chan - returns the nth channel of the given capability
371 * @cap: capability to match
372 * @n: nth channel desired
373 *
374 * Defaults to returning the channel with the desired capability and the
375 * lowest reference count when 'n' cannot be satisfied. Must be called
376 * under dma_list_mutex.
377 */
378static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
379{
380 struct dma_device *device;
381 struct dma_chan *chan;
382 struct dma_chan *ret = NULL;
383 struct dma_chan *min = NULL;
384
385 list_for_each_entry(device, &dma_device_list, global_node) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700386 if (!dma_has_cap(cap, device->cap_mask) ||
387 dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williamsbec08512009-01-06 11:38:14 -0700388 continue;
389 list_for_each_entry(chan, &device->channels, device_node) {
390 if (!chan->client_count)
391 continue;
392 if (!min)
393 min = chan;
394 else if (chan->table_count < min->table_count)
395 min = chan;
396
397 if (n-- == 0) {
398 ret = chan;
399 break; /* done */
400 }
401 }
402 if (ret)
403 break; /* done */
404 }
405
406 if (!ret)
407 ret = min;
408
409 if (ret)
410 ret->table_count++;
411
412 return ret;
413}
414
415/**
416 * dma_channel_rebalance - redistribute the available channels
417 *
418 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
419 * operation type) in the SMP case, and operation isolation (avoid
420 * multi-tasking channels) in the non-SMP case. Must be called under
421 * dma_list_mutex.
422 */
423static void dma_channel_rebalance(void)
424{
425 struct dma_chan *chan;
426 struct dma_device *device;
427 int cpu;
428 int cap;
429 int n;
430
431 /* undo the last distribution */
432 for_each_dma_cap_mask(cap, dma_cap_mask_all)
433 for_each_possible_cpu(cpu)
434 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
435
Dan Williams59b5ec22009-01-06 11:38:15 -0700436 list_for_each_entry(device, &dma_device_list, global_node) {
437 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
438 continue;
Dan Williamsbec08512009-01-06 11:38:14 -0700439 list_for_each_entry(chan, &device->channels, device_node)
440 chan->table_count = 0;
Dan Williams59b5ec22009-01-06 11:38:15 -0700441 }
Dan Williamsbec08512009-01-06 11:38:14 -0700442
443 /* don't populate the channel_table if no clients are available */
444 if (!dmaengine_ref_count)
445 return;
446
447 /* redistribute available channels */
448 n = 0;
449 for_each_dma_cap_mask(cap, dma_cap_mask_all)
450 for_each_online_cpu(cpu) {
451 if (num_possible_cpus() > 1)
452 chan = nth_chan(cap, n++);
453 else
454 chan = nth_chan(cap, -1);
455
456 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
457 }
458}
459
Dan Williamse2346672009-01-06 11:38:21 -0700460static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
461 dma_filter_fn fn, void *fn_param)
Dan Williams59b5ec22009-01-06 11:38:15 -0700462{
463 struct dma_chan *chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700464
465 if (!__dma_device_satisfies_mask(dev, mask)) {
466 pr_debug("%s: wrong capabilities\n", __func__);
467 return NULL;
468 }
469 /* devices with multiple channels need special handling as we need to
470 * ensure that all channels are either private or public.
471 */
472 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
473 list_for_each_entry(chan, &dev->channels, device_node) {
474 /* some channels are already publicly allocated */
475 if (chan->client_count)
476 return NULL;
477 }
478
479 list_for_each_entry(chan, &dev->channels, device_node) {
480 if (chan->client_count) {
481 pr_debug("%s: %s busy\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700482 __func__, dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700483 continue;
484 }
Dan Williamse2346672009-01-06 11:38:21 -0700485 if (fn && !fn(chan, fn_param)) {
486 pr_debug("%s: %s filter said false\n",
487 __func__, dma_chan_name(chan));
488 continue;
489 }
490 return chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700491 }
492
Dan Williamse2346672009-01-06 11:38:21 -0700493 return NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700494}
495
496/**
497 * dma_request_channel - try to allocate an exclusive channel
498 * @mask: capabilities that the channel must satisfy
499 * @fn: optional callback to disposition available channels
500 * @fn_param: opaque parameter to pass to dma_filter_fn
501 */
502struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
503{
504 struct dma_device *device, *_d;
505 struct dma_chan *chan = NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700506 int err;
507
508 /* Find a channel */
509 mutex_lock(&dma_list_mutex);
510 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
Dan Williamse2346672009-01-06 11:38:21 -0700511 chan = private_candidate(mask, device, fn, fn_param);
512 if (chan) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700513 /* Found a suitable channel, try to grab, prep, and
514 * return it. We first set DMA_PRIVATE to disable
515 * balance_ref_count as this channel will not be
516 * published in the general-purpose allocator
517 */
518 dma_cap_set(DMA_PRIVATE, device->cap_mask);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900519 device->privatecnt++;
Dan Williams59b5ec22009-01-06 11:38:15 -0700520 err = dma_chan_get(chan);
521
522 if (err == -ENODEV) {
523 pr_debug("%s: %s module removed\n", __func__,
Dan Williams41d5e592009-01-06 11:38:21 -0700524 dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700525 list_del_rcu(&device->global_node);
526 } else if (err)
Guennadi Liakhovetskia03a2022011-06-20 17:02:47 +0200527 pr_debug("dmaengine: failed to get %s: (%d)\n",
528 dma_chan_name(chan), err);
Dan Williams59b5ec22009-01-06 11:38:15 -0700529 else
530 break;
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900531 if (--device->privatecnt == 0)
532 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
Dan Williamse2346672009-01-06 11:38:21 -0700533 chan = NULL;
534 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700535 }
536 mutex_unlock(&dma_list_mutex);
537
538 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
Dan Williams41d5e592009-01-06 11:38:21 -0700539 chan ? dma_chan_name(chan) : NULL);
Dan Williams59b5ec22009-01-06 11:38:15 -0700540
541 return chan;
542}
543EXPORT_SYMBOL_GPL(__dma_request_channel);
544
545void dma_release_channel(struct dma_chan *chan)
546{
547 mutex_lock(&dma_list_mutex);
548 WARN_ONCE(chan->client_count != 1,
549 "chan reference count %d != 1\n", chan->client_count);
550 dma_chan_put(chan);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900551 /* drop PRIVATE cap enabled by __dma_request_channel() */
552 if (--chan->device->privatecnt == 0)
553 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
Dan Williams59b5ec22009-01-06 11:38:15 -0700554 mutex_unlock(&dma_list_mutex);
555}
556EXPORT_SYMBOL_GPL(dma_release_channel);
557
Dan Williamsbec08512009-01-06 11:38:14 -0700558/**
Dan Williams209b84a2009-01-06 11:38:17 -0700559 * dmaengine_get - register interest in dma_channels
Chris Leechc13c8262006-05-23 17:18:44 -0700560 */
Dan Williams209b84a2009-01-06 11:38:17 -0700561void dmaengine_get(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700562{
Dan Williams6f49a572009-01-06 11:38:14 -0700563 struct dma_device *device, *_d;
564 struct dma_chan *chan;
565 int err;
566
Chris Leechc13c8262006-05-23 17:18:44 -0700567 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700568 dmaengine_ref_count++;
569
570 /* try to grab channels */
Dan Williams59b5ec22009-01-06 11:38:15 -0700571 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
572 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
573 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700574 list_for_each_entry(chan, &device->channels, device_node) {
575 err = dma_chan_get(chan);
576 if (err == -ENODEV) {
577 /* module removed before we could use it */
Dan Williams2ba05622009-01-06 11:38:14 -0700578 list_del_rcu(&device->global_node);
Dan Williams6f49a572009-01-06 11:38:14 -0700579 break;
580 } else if (err)
581 pr_err("dmaengine: failed to get %s: (%d)\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700582 dma_chan_name(chan), err);
Dan Williams6f49a572009-01-06 11:38:14 -0700583 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700584 }
Dan Williams6f49a572009-01-06 11:38:14 -0700585
Dan Williamsbec08512009-01-06 11:38:14 -0700586 /* if this is the first reference and there were channels
587 * waiting we need to rebalance to get those channels
588 * incorporated into the channel table
589 */
590 if (dmaengine_ref_count == 1)
591 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700592 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700593}
Dan Williams209b84a2009-01-06 11:38:17 -0700594EXPORT_SYMBOL(dmaengine_get);
Chris Leechc13c8262006-05-23 17:18:44 -0700595
596/**
Dan Williams209b84a2009-01-06 11:38:17 -0700597 * dmaengine_put - let dma drivers be removed when ref_count == 0
Chris Leechc13c8262006-05-23 17:18:44 -0700598 */
Dan Williams209b84a2009-01-06 11:38:17 -0700599void dmaengine_put(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700600{
Dan Williamsd379b012007-07-09 11:56:42 -0700601 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700602 struct dma_chan *chan;
603
Chris Leechc13c8262006-05-23 17:18:44 -0700604 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700605 dmaengine_ref_count--;
606 BUG_ON(dmaengine_ref_count < 0);
607 /* drop channel references */
Dan Williams59b5ec22009-01-06 11:38:15 -0700608 list_for_each_entry(device, &dma_device_list, global_node) {
609 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
610 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700611 list_for_each_entry(chan, &device->channels, device_node)
612 dma_chan_put(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700613 }
Chris Leechc13c8262006-05-23 17:18:44 -0700614 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700615}
Dan Williams209b84a2009-01-06 11:38:17 -0700616EXPORT_SYMBOL(dmaengine_put);
Chris Leechc13c8262006-05-23 17:18:44 -0700617
Dan Williams138f4c32009-09-08 17:42:51 -0700618static bool device_has_all_tx_types(struct dma_device *device)
619{
620 /* A device that satisfies this test has channels that will never cause
621 * an async_tx channel switch event as all possible operation types can
622 * be handled.
623 */
624 #ifdef CONFIG_ASYNC_TX_DMA
625 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
626 return false;
627 #endif
628
629 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
630 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
631 return false;
632 #endif
633
634 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
635 if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
636 return false;
637 #endif
638
639 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
640 if (!dma_has_cap(DMA_XOR, device->cap_mask))
641 return false;
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700642
643 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
Dan Williams4499a242009-11-19 17:10:25 -0700644 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
645 return false;
Dan Williams138f4c32009-09-08 17:42:51 -0700646 #endif
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700647 #endif
Dan Williams138f4c32009-09-08 17:42:51 -0700648
649 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
650 if (!dma_has_cap(DMA_PQ, device->cap_mask))
651 return false;
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700652
653 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
Dan Williams4499a242009-11-19 17:10:25 -0700654 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
655 return false;
Dan Williams138f4c32009-09-08 17:42:51 -0700656 #endif
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700657 #endif
Dan Williams138f4c32009-09-08 17:42:51 -0700658
659 return true;
660}
661
Dan Williams257b17c2009-03-25 09:13:23 -0700662static int get_dma_id(struct dma_device *device)
663{
664 int rc;
665
666 idr_retry:
667 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
668 return -ENOMEM;
669 mutex_lock(&dma_list_mutex);
670 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
671 mutex_unlock(&dma_list_mutex);
672 if (rc == -EAGAIN)
673 goto idr_retry;
674 else if (rc != 0)
675 return rc;
676
677 return 0;
678}
679
Chris Leechc13c8262006-05-23 17:18:44 -0700680/**
Randy Dunlap65088712006-07-03 19:45:31 -0700681 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700682 * @device: &dma_device
683 */
684int dma_async_device_register(struct dma_device *device)
685{
Jeff Garzikff487fb2007-03-08 09:57:34 -0800686 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700687 struct dma_chan* chan;
Dan Williams864498a2009-01-06 11:38:21 -0700688 atomic_t *idr_ref;
Chris Leechc13c8262006-05-23 17:18:44 -0700689
690 if (!device)
691 return -ENODEV;
692
Dan Williams7405f742007-01-02 11:10:43 -0700693 /* validate device routines */
694 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
695 !device->device_prep_dma_memcpy);
696 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
697 !device->device_prep_dma_xor);
Dan Williams099f53c2009-04-08 14:28:37 -0700698 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
699 !device->device_prep_dma_xor_val);
Dan Williamsb2f46fd2009-07-14 12:20:36 -0700700 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
701 !device->device_prep_dma_pq);
702 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
703 !device->device_prep_dma_pq_val);
Dan Williams7405f742007-01-02 11:10:43 -0700704 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
705 !device->device_prep_dma_memset);
Zhang Wei9b941c62008-03-13 17:45:28 -0700706 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
Dan Williams7405f742007-01-02 11:10:43 -0700707 !device->device_prep_dma_interrupt);
Ira Snydera86ee032010-09-30 11:46:44 +0000708 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
709 !device->device_prep_dma_sg);
Sascha Hauer782bc952010-09-30 13:56:32 +0000710 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
711 !device->device_prep_dma_cyclic);
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700712 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
Linus Walleijc3635c72010-03-26 16:44:01 -0700713 !device->device_control);
Jassi Brarb14dab72011-10-13 12:33:30 +0530714 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
715 !device->device_prep_interleaved_dma);
Dan Williams7405f742007-01-02 11:10:43 -0700716
717 BUG_ON(!device->device_alloc_chan_resources);
718 BUG_ON(!device->device_free_chan_resources);
Linus Walleij07934482010-03-26 16:50:49 -0700719 BUG_ON(!device->device_tx_status);
Dan Williams7405f742007-01-02 11:10:43 -0700720 BUG_ON(!device->device_issue_pending);
721 BUG_ON(!device->dev);
722
Dan Williams138f4c32009-09-08 17:42:51 -0700723 /* note: this only matters in the
Dan Williams5fc6d892010-10-07 16:44:50 -0700724 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
Dan Williams138f4c32009-09-08 17:42:51 -0700725 */
726 if (device_has_all_tx_types(device))
727 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
728
Dan Williams864498a2009-01-06 11:38:21 -0700729 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
730 if (!idr_ref)
731 return -ENOMEM;
Dan Williams257b17c2009-03-25 09:13:23 -0700732 rc = get_dma_id(device);
733 if (rc != 0) {
734 kfree(idr_ref);
Dan Williams864498a2009-01-06 11:38:21 -0700735 return rc;
Dan Williams257b17c2009-03-25 09:13:23 -0700736 }
737
738 atomic_set(idr_ref, 0);
Chris Leechc13c8262006-05-23 17:18:44 -0700739
740 /* represent channels in sysfs. Probably want devs too */
741 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams257b17c2009-03-25 09:13:23 -0700742 rc = -ENOMEM;
Chris Leechc13c8262006-05-23 17:18:44 -0700743 chan->local = alloc_percpu(typeof(*chan->local));
744 if (chan->local == NULL)
Dan Williams257b17c2009-03-25 09:13:23 -0700745 goto err_out;
Dan Williams41d5e592009-01-06 11:38:21 -0700746 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
747 if (chan->dev == NULL) {
748 free_percpu(chan->local);
Dan Williams257b17c2009-03-25 09:13:23 -0700749 chan->local = NULL;
750 goto err_out;
Dan Williams41d5e592009-01-06 11:38:21 -0700751 }
Chris Leechc13c8262006-05-23 17:18:44 -0700752
753 chan->chan_id = chancnt++;
Dan Williams41d5e592009-01-06 11:38:21 -0700754 chan->dev->device.class = &dma_devclass;
755 chan->dev->device.parent = device->dev;
756 chan->dev->chan = chan;
Dan Williams864498a2009-01-06 11:38:21 -0700757 chan->dev->idr_ref = idr_ref;
758 chan->dev->dev_id = device->dev_id;
759 atomic_inc(idr_ref);
Dan Williams41d5e592009-01-06 11:38:21 -0700760 dev_set_name(&chan->dev->device, "dma%dchan%d",
Kay Sievers06190d82008-11-11 13:12:33 -0700761 device->dev_id, chan->chan_id);
Chris Leechc13c8262006-05-23 17:18:44 -0700762
Dan Williams41d5e592009-01-06 11:38:21 -0700763 rc = device_register(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800764 if (rc) {
Jeff Garzikff487fb2007-03-08 09:57:34 -0800765 free_percpu(chan->local);
766 chan->local = NULL;
Dan Williams257b17c2009-03-25 09:13:23 -0700767 kfree(chan->dev);
768 atomic_dec(idr_ref);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800769 goto err_out;
770 }
Dan Williams7cc5bf92008-07-08 11:58:21 -0700771 chan->client_count = 0;
Chris Leechc13c8262006-05-23 17:18:44 -0700772 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700773 device->chancnt = chancnt;
Chris Leechc13c8262006-05-23 17:18:44 -0700774
775 mutex_lock(&dma_list_mutex);
Dan Williams59b5ec22009-01-06 11:38:15 -0700776 /* take references on public channels */
777 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700778 list_for_each_entry(chan, &device->channels, device_node) {
779 /* if clients are already waiting for channels we need
780 * to take references on their behalf
781 */
782 if (dma_chan_get(chan) == -ENODEV) {
783 /* note we can only get here for the first
784 * channel as the remaining channels are
785 * guaranteed to get a reference
786 */
787 rc = -ENODEV;
788 mutex_unlock(&dma_list_mutex);
789 goto err_out;
790 }
791 }
Dan Williams2ba05622009-01-06 11:38:14 -0700792 list_add_tail_rcu(&device->global_node, &dma_device_list);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900793 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
794 device->privatecnt++; /* Always private */
Dan Williamsbec08512009-01-06 11:38:14 -0700795 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700796 mutex_unlock(&dma_list_mutex);
797
Chris Leechc13c8262006-05-23 17:18:44 -0700798 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800799
800err_out:
Dan Williams257b17c2009-03-25 09:13:23 -0700801 /* if we never registered a channel just release the idr */
802 if (atomic_read(idr_ref) == 0) {
803 mutex_lock(&dma_list_mutex);
804 idr_remove(&dma_idr, device->dev_id);
805 mutex_unlock(&dma_list_mutex);
806 kfree(idr_ref);
807 return rc;
808 }
809
Jeff Garzikff487fb2007-03-08 09:57:34 -0800810 list_for_each_entry(chan, &device->channels, device_node) {
811 if (chan->local == NULL)
812 continue;
Dan Williams41d5e592009-01-06 11:38:21 -0700813 mutex_lock(&dma_list_mutex);
814 chan->dev->chan = NULL;
815 mutex_unlock(&dma_list_mutex);
816 device_unregister(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800817 free_percpu(chan->local);
818 }
819 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700820}
David Brownell765e3d82007-03-16 13:38:05 -0800821EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700822
823/**
Dan Williams6f49a572009-01-06 11:38:14 -0700824 * dma_async_device_unregister - unregister a DMA device
Randy Dunlap65088712006-07-03 19:45:31 -0700825 * @device: &dma_device
Dan Williamsf27c5802009-01-06 11:38:18 -0700826 *
827 * This routine is called by dma driver exit routines, dmaengine holds module
828 * references to prevent it being called while channels are in use.
Randy Dunlap65088712006-07-03 19:45:31 -0700829 */
830void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700831{
832 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700833
834 mutex_lock(&dma_list_mutex);
Dan Williams2ba05622009-01-06 11:38:14 -0700835 list_del_rcu(&device->global_node);
Dan Williamsbec08512009-01-06 11:38:14 -0700836 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700837 mutex_unlock(&dma_list_mutex);
838
839 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams6f49a572009-01-06 11:38:14 -0700840 WARN_ONCE(chan->client_count,
841 "%s called while %d clients hold a reference\n",
842 __func__, chan->client_count);
Dan Williams41d5e592009-01-06 11:38:21 -0700843 mutex_lock(&dma_list_mutex);
844 chan->dev->chan = NULL;
845 mutex_unlock(&dma_list_mutex);
846 device_unregister(&chan->dev->device);
Anatolij Gustschinadef4772010-01-26 10:26:06 +0100847 free_percpu(chan->local);
Chris Leechc13c8262006-05-23 17:18:44 -0700848 }
Chris Leechc13c8262006-05-23 17:18:44 -0700849}
David Brownell765e3d82007-03-16 13:38:05 -0800850EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700851
Dan Williams7405f742007-01-02 11:10:43 -0700852/**
853 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
854 * @chan: DMA channel to offload copy to
855 * @dest: destination address (virtual)
856 * @src: source address (virtual)
857 * @len: length
858 *
859 * Both @dest and @src must be mappable to a bus address according to the
860 * DMA mapping API rules for streaming mappings.
861 * Both @dest and @src must stay memory resident (kernel memory or locked
862 * user space pages).
863 */
864dma_cookie_t
865dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
866 void *src, size_t len)
867{
868 struct dma_device *dev = chan->device;
869 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700870 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700871 dma_cookie_t cookie;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200872 unsigned long flags;
Dan Williams7405f742007-01-02 11:10:43 -0700873
Dan Williams00367312008-02-02 19:49:57 -0700874 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
875 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200876 flags = DMA_CTRL_ACK |
877 DMA_COMPL_SRC_UNMAP_SINGLE |
878 DMA_COMPL_DEST_UNMAP_SINGLE;
879 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
Dan Williams00367312008-02-02 19:49:57 -0700880
881 if (!tx) {
882 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
883 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700884 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700885 }
Dan Williams7405f742007-01-02 11:10:43 -0700886
Dan Williams7405f742007-01-02 11:10:43 -0700887 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700888 cookie = tx->tx_submit(tx);
889
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900890 preempt_disable();
891 __this_cpu_add(chan->local->bytes_transferred, len);
892 __this_cpu_inc(chan->local->memcpy_count);
893 preempt_enable();
Dan Williams7405f742007-01-02 11:10:43 -0700894
895 return cookie;
896}
897EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
898
899/**
900 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
901 * @chan: DMA channel to offload copy to
902 * @page: destination page
903 * @offset: offset in page to copy to
904 * @kdata: source address (virtual)
905 * @len: length
906 *
907 * Both @page/@offset and @kdata must be mappable to a bus address according
908 * to the DMA mapping API rules for streaming mappings.
909 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
910 * locked user space pages)
911 */
912dma_cookie_t
913dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
914 unsigned int offset, void *kdata, size_t len)
915{
916 struct dma_device *dev = chan->device;
917 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700918 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700919 dma_cookie_t cookie;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200920 unsigned long flags;
Dan Williams7405f742007-01-02 11:10:43 -0700921
Dan Williams00367312008-02-02 19:49:57 -0700922 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
923 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200924 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
925 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
Dan Williams00367312008-02-02 19:49:57 -0700926
927 if (!tx) {
928 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
929 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700930 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700931 }
Dan Williams7405f742007-01-02 11:10:43 -0700932
Dan Williams7405f742007-01-02 11:10:43 -0700933 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700934 cookie = tx->tx_submit(tx);
935
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900936 preempt_disable();
937 __this_cpu_add(chan->local->bytes_transferred, len);
938 __this_cpu_inc(chan->local->memcpy_count);
939 preempt_enable();
Dan Williams7405f742007-01-02 11:10:43 -0700940
941 return cookie;
942}
943EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
944
945/**
946 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
947 * @chan: DMA channel to offload copy to
948 * @dest_pg: destination page
949 * @dest_off: offset in page to copy to
950 * @src_pg: source page
951 * @src_off: offset in page to copy from
952 * @len: length
953 *
954 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
955 * address according to the DMA mapping API rules for streaming mappings.
956 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
957 * (kernel memory or locked user space pages).
958 */
959dma_cookie_t
960dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
961 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
962 size_t len)
963{
964 struct dma_device *dev = chan->device;
965 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700966 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700967 dma_cookie_t cookie;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200968 unsigned long flags;
Dan Williams7405f742007-01-02 11:10:43 -0700969
Dan Williams00367312008-02-02 19:49:57 -0700970 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
971 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
972 DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200973 flags = DMA_CTRL_ACK;
974 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
Dan Williams00367312008-02-02 19:49:57 -0700975
976 if (!tx) {
977 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
978 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700979 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700980 }
Dan Williams7405f742007-01-02 11:10:43 -0700981
Dan Williams7405f742007-01-02 11:10:43 -0700982 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700983 cookie = tx->tx_submit(tx);
984
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900985 preempt_disable();
986 __this_cpu_add(chan->local->bytes_transferred, len);
987 __this_cpu_inc(chan->local->memcpy_count);
988 preempt_enable();
Dan Williams7405f742007-01-02 11:10:43 -0700989
990 return cookie;
991}
992EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
993
994void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
995 struct dma_chan *chan)
996{
997 tx->chan = chan;
Dan Williams5fc6d892010-10-07 16:44:50 -0700998 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
Dan Williams7405f742007-01-02 11:10:43 -0700999 spin_lock_init(&tx->lock);
Dan Williamscaa20d972010-05-17 16:24:16 -07001000 #endif
Dan Williams7405f742007-01-02 11:10:43 -07001001}
1002EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1003
Dan Williams07f22112009-01-05 17:14:31 -07001004/* dma_wait_for_async_tx - spin wait for a transaction to complete
1005 * @tx: in-flight transaction to wait on
Dan Williams07f22112009-01-05 17:14:31 -07001006 */
1007enum dma_status
1008dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1009{
Dan Williams95475e52009-07-14 12:19:02 -07001010 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
Dan Williams07f22112009-01-05 17:14:31 -07001011
1012 if (!tx)
1013 return DMA_SUCCESS;
1014
Dan Williams95475e52009-07-14 12:19:02 -07001015 while (tx->cookie == -EBUSY) {
1016 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1017 pr_err("%s timeout waiting for descriptor submission\n",
1018 __func__);
1019 return DMA_ERROR;
1020 }
1021 cpu_relax();
1022 }
1023 return dma_sync_wait(tx->chan, tx->cookie);
Dan Williams07f22112009-01-05 17:14:31 -07001024}
1025EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1026
1027/* dma_run_dependencies - helper routine for dma drivers to process
1028 * (start) dependent operations on their target channel
1029 * @tx: transaction with dependencies
1030 */
1031void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1032{
Dan Williamscaa20d972010-05-17 16:24:16 -07001033 struct dma_async_tx_descriptor *dep = txd_next(tx);
Dan Williams07f22112009-01-05 17:14:31 -07001034 struct dma_async_tx_descriptor *dep_next;
1035 struct dma_chan *chan;
1036
1037 if (!dep)
1038 return;
1039
Yuri Tikhonovdd59b852009-01-12 15:17:20 -07001040 /* we'll submit tx->next now, so clear the link */
Dan Williamscaa20d972010-05-17 16:24:16 -07001041 txd_clear_next(tx);
Dan Williams07f22112009-01-05 17:14:31 -07001042 chan = dep->chan;
1043
1044 /* keep submitting up until a channel switch is detected
1045 * in that case we will be called again as a result of
1046 * processing the interrupt from async_tx_channel_switch
1047 */
1048 for (; dep; dep = dep_next) {
Dan Williamscaa20d972010-05-17 16:24:16 -07001049 txd_lock(dep);
1050 txd_clear_parent(dep);
1051 dep_next = txd_next(dep);
Dan Williams07f22112009-01-05 17:14:31 -07001052 if (dep_next && dep_next->chan == chan)
Dan Williamscaa20d972010-05-17 16:24:16 -07001053 txd_clear_next(dep); /* ->next will be submitted */
Dan Williams07f22112009-01-05 17:14:31 -07001054 else
1055 dep_next = NULL; /* submit current dep and terminate */
Dan Williamscaa20d972010-05-17 16:24:16 -07001056 txd_unlock(dep);
Dan Williams07f22112009-01-05 17:14:31 -07001057
1058 dep->tx_submit(dep);
1059 }
1060
1061 chan->device->device_issue_pending(chan);
1062}
1063EXPORT_SYMBOL_GPL(dma_run_dependencies);
1064
Chris Leechc13c8262006-05-23 17:18:44 -07001065static int __init dma_bus_init(void)
1066{
Chris Leechc13c8262006-05-23 17:18:44 -07001067 return class_register(&dma_devclass);
1068}
Dan Williams652afc22009-01-06 11:38:22 -07001069arch_initcall(dma_bus_init);
Chris Leechc13c8262006-05-23 17:18:44 -07001070
Dan Williamsbec08512009-01-06 11:38:14 -07001071