blob: 8bcb15fb959d1e3de64e79a5e90300c105d62ad3 [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
Dan Williamsaa1e6f12009-01-06 11:38:17 -070034 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
Chris Leechc13c8262006-05-23 17:18:44 -070036 *
Dan Williamsf27c5802009-01-06 11:38:18 -070037 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
Chris Leechc13c8262006-05-23 17:18:44 -070042 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
Dan Williamsf27c5802009-01-06 11:38:18 -070045 * See Documentation/dmaengine.txt for more details
Chris Leechc13c8262006-05-23 17:18:44 -070046 */
47
48#include <linux/init.h>
49#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070050#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070051#include <linux/device.h>
52#include <linux/dmaengine.h>
53#include <linux/hardirq.h>
54#include <linux/spinlock.h>
55#include <linux/percpu.h>
56#include <linux/rcupdate.h>
57#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070058#include <linux/jiffies.h>
Dan Williams2ba05622009-01-06 11:38:14 -070059#include <linux/rculist.h>
Dan Williams864498a2009-01-06 11:38:21 -070060#include <linux/idr.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090061#include <linux/slab.h>
Chris Leechc13c8262006-05-23 17:18:44 -070062
63static DEFINE_MUTEX(dma_list_mutex);
64static LIST_HEAD(dma_device_list);
Dan Williams6f49a572009-01-06 11:38:14 -070065static long dmaengine_ref_count;
Dan Williams864498a2009-01-06 11:38:21 -070066static struct idr dma_idr;
Chris Leechc13c8262006-05-23 17:18:44 -070067
68/* --- sysfs implementation --- */
69
Dan Williams41d5e592009-01-06 11:38:21 -070070/**
71 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
72 * @dev - device node
73 *
74 * Must be called under dma_list_mutex
75 */
76static struct dma_chan *dev_to_dma_chan(struct device *dev)
77{
78 struct dma_chan_dev *chan_dev;
79
80 chan_dev = container_of(dev, typeof(*chan_dev), device);
81 return chan_dev->chan;
82}
83
Tony Jones891f78e2007-09-25 02:03:03 +020084static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070085{
Dan Williams41d5e592009-01-06 11:38:21 -070086 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -070087 unsigned long count = 0;
88 int i;
Dan Williams41d5e592009-01-06 11:38:21 -070089 int err;
Chris Leechc13c8262006-05-23 17:18:44 -070090
Dan Williams41d5e592009-01-06 11:38:21 -070091 mutex_lock(&dma_list_mutex);
92 chan = dev_to_dma_chan(dev);
93 if (chan) {
94 for_each_possible_cpu(i)
95 count += per_cpu_ptr(chan->local, i)->memcpy_count;
96 err = sprintf(buf, "%lu\n", count);
97 } else
98 err = -ENODEV;
99 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700100
Dan Williams41d5e592009-01-06 11:38:21 -0700101 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700102}
103
Tony Jones891f78e2007-09-25 02:03:03 +0200104static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
105 char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700106{
Dan Williams41d5e592009-01-06 11:38:21 -0700107 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700108 unsigned long count = 0;
109 int i;
Dan Williams41d5e592009-01-06 11:38:21 -0700110 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700111
Dan Williams41d5e592009-01-06 11:38:21 -0700112 mutex_lock(&dma_list_mutex);
113 chan = dev_to_dma_chan(dev);
114 if (chan) {
115 for_each_possible_cpu(i)
116 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
117 err = sprintf(buf, "%lu\n", count);
118 } else
119 err = -ENODEV;
120 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700121
Dan Williams41d5e592009-01-06 11:38:21 -0700122 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700123}
124
Tony Jones891f78e2007-09-25 02:03:03 +0200125static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700126{
Dan Williams41d5e592009-01-06 11:38:21 -0700127 struct dma_chan *chan;
128 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700129
Dan Williams41d5e592009-01-06 11:38:21 -0700130 mutex_lock(&dma_list_mutex);
131 chan = dev_to_dma_chan(dev);
132 if (chan)
133 err = sprintf(buf, "%d\n", chan->client_count);
134 else
135 err = -ENODEV;
136 mutex_unlock(&dma_list_mutex);
137
138 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700139}
140
Tony Jones891f78e2007-09-25 02:03:03 +0200141static struct device_attribute dma_attrs[] = {
Chris Leechc13c8262006-05-23 17:18:44 -0700142 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
143 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
144 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
145 __ATTR_NULL
146};
147
Dan Williams41d5e592009-01-06 11:38:21 -0700148static void chan_dev_release(struct device *dev)
149{
150 struct dma_chan_dev *chan_dev;
151
152 chan_dev = container_of(dev, typeof(*chan_dev), device);
Dan Williams864498a2009-01-06 11:38:21 -0700153 if (atomic_dec_and_test(chan_dev->idr_ref)) {
154 mutex_lock(&dma_list_mutex);
155 idr_remove(&dma_idr, chan_dev->dev_id);
156 mutex_unlock(&dma_list_mutex);
157 kfree(chan_dev->idr_ref);
158 }
Dan Williams41d5e592009-01-06 11:38:21 -0700159 kfree(chan_dev);
160}
161
Chris Leechc13c8262006-05-23 17:18:44 -0700162static struct class dma_devclass = {
Tony Jones891f78e2007-09-25 02:03:03 +0200163 .name = "dma",
164 .dev_attrs = dma_attrs,
Dan Williams41d5e592009-01-06 11:38:21 -0700165 .dev_release = chan_dev_release,
Chris Leechc13c8262006-05-23 17:18:44 -0700166};
167
168/* --- client and device registration --- */
169
Dan Williams59b5ec22009-01-06 11:38:15 -0700170#define dma_device_satisfies_mask(device, mask) \
171 __dma_device_satisfies_mask((device), &(mask))
Dan Williamsd379b012007-07-09 11:56:42 -0700172static int
Dan Williams59b5ec22009-01-06 11:38:15 -0700173__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
Dan Williamsd379b012007-07-09 11:56:42 -0700174{
175 dma_cap_mask_t has;
176
Dan Williams59b5ec22009-01-06 11:38:15 -0700177 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
Dan Williamsd379b012007-07-09 11:56:42 -0700178 DMA_TX_TYPE_END);
179 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
180}
181
Dan Williams6f49a572009-01-06 11:38:14 -0700182static struct module *dma_chan_to_owner(struct dma_chan *chan)
183{
184 return chan->device->dev->driver->owner;
185}
186
187/**
188 * balance_ref_count - catch up the channel reference count
189 * @chan - channel to balance ->client_count versus dmaengine_ref_count
190 *
191 * balance_ref_count must be called under dma_list_mutex
192 */
193static void balance_ref_count(struct dma_chan *chan)
194{
195 struct module *owner = dma_chan_to_owner(chan);
196
197 while (chan->client_count < dmaengine_ref_count) {
198 __module_get(owner);
199 chan->client_count++;
200 }
201}
202
203/**
204 * dma_chan_get - try to grab a dma channel's parent driver module
205 * @chan - channel to grab
206 *
207 * Must be called under dma_list_mutex
208 */
209static int dma_chan_get(struct dma_chan *chan)
210{
211 int err = -ENODEV;
212 struct module *owner = dma_chan_to_owner(chan);
213
214 if (chan->client_count) {
215 __module_get(owner);
216 err = 0;
217 } else if (try_module_get(owner))
218 err = 0;
219
220 if (err == 0)
221 chan->client_count++;
222
223 /* allocate upon first client reference */
224 if (chan->client_count == 1 && err == 0) {
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700225 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
Dan Williams6f49a572009-01-06 11:38:14 -0700226
227 if (desc_cnt < 0) {
228 err = desc_cnt;
229 chan->client_count = 0;
230 module_put(owner);
Dan Williams59b5ec22009-01-06 11:38:15 -0700231 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700232 balance_ref_count(chan);
233 }
234
235 return err;
236}
237
238/**
239 * dma_chan_put - drop a reference to a dma channel's parent driver module
240 * @chan - channel to release
241 *
242 * Must be called under dma_list_mutex
243 */
244static void dma_chan_put(struct dma_chan *chan)
245{
246 if (!chan->client_count)
247 return; /* this channel failed alloc_chan_resources */
248 chan->client_count--;
249 module_put(dma_chan_to_owner(chan));
250 if (chan->client_count == 0)
251 chan->device->device_free_chan_resources(chan);
252}
253
Dan Williams7405f742007-01-02 11:10:43 -0700254enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
255{
256 enum dma_status status;
257 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
258
259 dma_async_issue_pending(chan);
260 do {
261 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
262 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
263 printk(KERN_ERR "dma_sync_wait_timeout!\n");
264 return DMA_ERROR;
265 }
266 } while (status == DMA_IN_PROGRESS);
267
268 return status;
269}
270EXPORT_SYMBOL(dma_sync_wait);
271
Chris Leechc13c8262006-05-23 17:18:44 -0700272/**
Dan Williamsbec08512009-01-06 11:38:14 -0700273 * dma_cap_mask_all - enable iteration over all operation types
274 */
275static dma_cap_mask_t dma_cap_mask_all;
276
277/**
278 * dma_chan_tbl_ent - tracks channel allocations per core/operation
279 * @chan - associated channel for this entry
280 */
281struct dma_chan_tbl_ent {
282 struct dma_chan *chan;
283};
284
285/**
286 * channel_table - percpu lookup table for memory-to-memory offload providers
287 */
Tejun Heoa29d8b82010-02-02 14:39:15 +0900288static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
Dan Williamsbec08512009-01-06 11:38:14 -0700289
290static int __init dma_channel_table_init(void)
291{
292 enum dma_transaction_type cap;
293 int err = 0;
294
295 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
296
Dan Williams59b5ec22009-01-06 11:38:15 -0700297 /* 'interrupt', 'private', and 'slave' are channel capabilities,
298 * but are not associated with an operation so they do not need
299 * an entry in the channel_table
Dan Williamsbec08512009-01-06 11:38:14 -0700300 */
301 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
Dan Williams59b5ec22009-01-06 11:38:15 -0700302 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
Dan Williamsbec08512009-01-06 11:38:14 -0700303 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
304
305 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
306 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
307 if (!channel_table[cap]) {
308 err = -ENOMEM;
309 break;
310 }
311 }
312
313 if (err) {
314 pr_err("dmaengine: initialization failure\n");
315 for_each_dma_cap_mask(cap, dma_cap_mask_all)
316 if (channel_table[cap])
317 free_percpu(channel_table[cap]);
318 }
319
320 return err;
321}
Dan Williams652afc22009-01-06 11:38:22 -0700322arch_initcall(dma_channel_table_init);
Dan Williamsbec08512009-01-06 11:38:14 -0700323
324/**
325 * dma_find_channel - find a channel to carry out the operation
326 * @tx_type: transaction type
327 */
328struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
329{
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900330 return this_cpu_read(channel_table[tx_type]->chan);
Dan Williamsbec08512009-01-06 11:38:14 -0700331}
332EXPORT_SYMBOL(dma_find_channel);
333
334/**
Dan Williams2ba05622009-01-06 11:38:14 -0700335 * dma_issue_pending_all - flush all pending operations across all channels
336 */
337void dma_issue_pending_all(void)
338{
339 struct dma_device *device;
340 struct dma_chan *chan;
341
Dan Williams2ba05622009-01-06 11:38:14 -0700342 rcu_read_lock();
Dan Williams59b5ec22009-01-06 11:38:15 -0700343 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
344 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
345 continue;
Dan Williams2ba05622009-01-06 11:38:14 -0700346 list_for_each_entry(chan, &device->channels, device_node)
347 if (chan->client_count)
348 device->device_issue_pending(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700349 }
Dan Williams2ba05622009-01-06 11:38:14 -0700350 rcu_read_unlock();
351}
352EXPORT_SYMBOL(dma_issue_pending_all);
353
354/**
Dan Williamsbec08512009-01-06 11:38:14 -0700355 * nth_chan - returns the nth channel of the given capability
356 * @cap: capability to match
357 * @n: nth channel desired
358 *
359 * Defaults to returning the channel with the desired capability and the
360 * lowest reference count when 'n' cannot be satisfied. Must be called
361 * under dma_list_mutex.
362 */
363static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
364{
365 struct dma_device *device;
366 struct dma_chan *chan;
367 struct dma_chan *ret = NULL;
368 struct dma_chan *min = NULL;
369
370 list_for_each_entry(device, &dma_device_list, global_node) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700371 if (!dma_has_cap(cap, device->cap_mask) ||
372 dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williamsbec08512009-01-06 11:38:14 -0700373 continue;
374 list_for_each_entry(chan, &device->channels, device_node) {
375 if (!chan->client_count)
376 continue;
377 if (!min)
378 min = chan;
379 else if (chan->table_count < min->table_count)
380 min = chan;
381
382 if (n-- == 0) {
383 ret = chan;
384 break; /* done */
385 }
386 }
387 if (ret)
388 break; /* done */
389 }
390
391 if (!ret)
392 ret = min;
393
394 if (ret)
395 ret->table_count++;
396
397 return ret;
398}
399
400/**
401 * dma_channel_rebalance - redistribute the available channels
402 *
403 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
404 * operation type) in the SMP case, and operation isolation (avoid
405 * multi-tasking channels) in the non-SMP case. Must be called under
406 * dma_list_mutex.
407 */
408static void dma_channel_rebalance(void)
409{
410 struct dma_chan *chan;
411 struct dma_device *device;
412 int cpu;
413 int cap;
414 int n;
415
416 /* undo the last distribution */
417 for_each_dma_cap_mask(cap, dma_cap_mask_all)
418 for_each_possible_cpu(cpu)
419 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
420
Dan Williams59b5ec22009-01-06 11:38:15 -0700421 list_for_each_entry(device, &dma_device_list, global_node) {
422 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
423 continue;
Dan Williamsbec08512009-01-06 11:38:14 -0700424 list_for_each_entry(chan, &device->channels, device_node)
425 chan->table_count = 0;
Dan Williams59b5ec22009-01-06 11:38:15 -0700426 }
Dan Williamsbec08512009-01-06 11:38:14 -0700427
428 /* don't populate the channel_table if no clients are available */
429 if (!dmaengine_ref_count)
430 return;
431
432 /* redistribute available channels */
433 n = 0;
434 for_each_dma_cap_mask(cap, dma_cap_mask_all)
435 for_each_online_cpu(cpu) {
436 if (num_possible_cpus() > 1)
437 chan = nth_chan(cap, n++);
438 else
439 chan = nth_chan(cap, -1);
440
441 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
442 }
443}
444
Dan Williamse2346672009-01-06 11:38:21 -0700445static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
446 dma_filter_fn fn, void *fn_param)
Dan Williams59b5ec22009-01-06 11:38:15 -0700447{
448 struct dma_chan *chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700449
450 if (!__dma_device_satisfies_mask(dev, mask)) {
451 pr_debug("%s: wrong capabilities\n", __func__);
452 return NULL;
453 }
454 /* devices with multiple channels need special handling as we need to
455 * ensure that all channels are either private or public.
456 */
457 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
458 list_for_each_entry(chan, &dev->channels, device_node) {
459 /* some channels are already publicly allocated */
460 if (chan->client_count)
461 return NULL;
462 }
463
464 list_for_each_entry(chan, &dev->channels, device_node) {
465 if (chan->client_count) {
466 pr_debug("%s: %s busy\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700467 __func__, dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700468 continue;
469 }
Dan Williamse2346672009-01-06 11:38:21 -0700470 if (fn && !fn(chan, fn_param)) {
471 pr_debug("%s: %s filter said false\n",
472 __func__, dma_chan_name(chan));
473 continue;
474 }
475 return chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700476 }
477
Dan Williamse2346672009-01-06 11:38:21 -0700478 return NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700479}
480
481/**
482 * dma_request_channel - try to allocate an exclusive channel
483 * @mask: capabilities that the channel must satisfy
484 * @fn: optional callback to disposition available channels
485 * @fn_param: opaque parameter to pass to dma_filter_fn
486 */
487struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
488{
489 struct dma_device *device, *_d;
490 struct dma_chan *chan = NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700491 int err;
492
493 /* Find a channel */
494 mutex_lock(&dma_list_mutex);
495 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
Dan Williamse2346672009-01-06 11:38:21 -0700496 chan = private_candidate(mask, device, fn, fn_param);
497 if (chan) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700498 /* Found a suitable channel, try to grab, prep, and
499 * return it. We first set DMA_PRIVATE to disable
500 * balance_ref_count as this channel will not be
501 * published in the general-purpose allocator
502 */
503 dma_cap_set(DMA_PRIVATE, device->cap_mask);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900504 device->privatecnt++;
Dan Williams59b5ec22009-01-06 11:38:15 -0700505 err = dma_chan_get(chan);
506
507 if (err == -ENODEV) {
508 pr_debug("%s: %s module removed\n", __func__,
Dan Williams41d5e592009-01-06 11:38:21 -0700509 dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700510 list_del_rcu(&device->global_node);
511 } else if (err)
512 pr_err("dmaengine: failed to get %s: (%d)\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700513 dma_chan_name(chan), err);
Dan Williams59b5ec22009-01-06 11:38:15 -0700514 else
515 break;
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900516 if (--device->privatecnt == 0)
517 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
Dan Williamse2346672009-01-06 11:38:21 -0700518 chan = NULL;
519 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700520 }
521 mutex_unlock(&dma_list_mutex);
522
523 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
Dan Williams41d5e592009-01-06 11:38:21 -0700524 chan ? dma_chan_name(chan) : NULL);
Dan Williams59b5ec22009-01-06 11:38:15 -0700525
526 return chan;
527}
528EXPORT_SYMBOL_GPL(__dma_request_channel);
529
530void dma_release_channel(struct dma_chan *chan)
531{
532 mutex_lock(&dma_list_mutex);
533 WARN_ONCE(chan->client_count != 1,
534 "chan reference count %d != 1\n", chan->client_count);
535 dma_chan_put(chan);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900536 /* drop PRIVATE cap enabled by __dma_request_channel() */
537 if (--chan->device->privatecnt == 0)
538 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
Dan Williams59b5ec22009-01-06 11:38:15 -0700539 mutex_unlock(&dma_list_mutex);
540}
541EXPORT_SYMBOL_GPL(dma_release_channel);
542
Dan Williamsbec08512009-01-06 11:38:14 -0700543/**
Dan Williams209b84a2009-01-06 11:38:17 -0700544 * dmaengine_get - register interest in dma_channels
Chris Leechc13c8262006-05-23 17:18:44 -0700545 */
Dan Williams209b84a2009-01-06 11:38:17 -0700546void dmaengine_get(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700547{
Dan Williams6f49a572009-01-06 11:38:14 -0700548 struct dma_device *device, *_d;
549 struct dma_chan *chan;
550 int err;
551
Chris Leechc13c8262006-05-23 17:18:44 -0700552 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700553 dmaengine_ref_count++;
554
555 /* try to grab channels */
Dan Williams59b5ec22009-01-06 11:38:15 -0700556 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
557 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
558 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700559 list_for_each_entry(chan, &device->channels, device_node) {
560 err = dma_chan_get(chan);
561 if (err == -ENODEV) {
562 /* module removed before we could use it */
Dan Williams2ba05622009-01-06 11:38:14 -0700563 list_del_rcu(&device->global_node);
Dan Williams6f49a572009-01-06 11:38:14 -0700564 break;
565 } else if (err)
566 pr_err("dmaengine: failed to get %s: (%d)\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700567 dma_chan_name(chan), err);
Dan Williams6f49a572009-01-06 11:38:14 -0700568 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700569 }
Dan Williams6f49a572009-01-06 11:38:14 -0700570
Dan Williamsbec08512009-01-06 11:38:14 -0700571 /* if this is the first reference and there were channels
572 * waiting we need to rebalance to get those channels
573 * incorporated into the channel table
574 */
575 if (dmaengine_ref_count == 1)
576 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700577 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700578}
Dan Williams209b84a2009-01-06 11:38:17 -0700579EXPORT_SYMBOL(dmaengine_get);
Chris Leechc13c8262006-05-23 17:18:44 -0700580
581/**
Dan Williams209b84a2009-01-06 11:38:17 -0700582 * dmaengine_put - let dma drivers be removed when ref_count == 0
Chris Leechc13c8262006-05-23 17:18:44 -0700583 */
Dan Williams209b84a2009-01-06 11:38:17 -0700584void dmaengine_put(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700585{
Dan Williamsd379b012007-07-09 11:56:42 -0700586 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700587 struct dma_chan *chan;
588
Chris Leechc13c8262006-05-23 17:18:44 -0700589 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700590 dmaengine_ref_count--;
591 BUG_ON(dmaengine_ref_count < 0);
592 /* drop channel references */
Dan Williams59b5ec22009-01-06 11:38:15 -0700593 list_for_each_entry(device, &dma_device_list, global_node) {
594 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
595 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700596 list_for_each_entry(chan, &device->channels, device_node)
597 dma_chan_put(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700598 }
Chris Leechc13c8262006-05-23 17:18:44 -0700599 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700600}
Dan Williams209b84a2009-01-06 11:38:17 -0700601EXPORT_SYMBOL(dmaengine_put);
Chris Leechc13c8262006-05-23 17:18:44 -0700602
Dan Williams138f4c32009-09-08 17:42:51 -0700603static bool device_has_all_tx_types(struct dma_device *device)
604{
605 /* A device that satisfies this test has channels that will never cause
606 * an async_tx channel switch event as all possible operation types can
607 * be handled.
608 */
609 #ifdef CONFIG_ASYNC_TX_DMA
610 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
611 return false;
612 #endif
613
614 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
615 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
616 return false;
617 #endif
618
619 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
620 if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
621 return false;
622 #endif
623
624 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
625 if (!dma_has_cap(DMA_XOR, device->cap_mask))
626 return false;
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700627
628 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
Dan Williams4499a242009-11-19 17:10:25 -0700629 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
630 return false;
Dan Williams138f4c32009-09-08 17:42:51 -0700631 #endif
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700632 #endif
Dan Williams138f4c32009-09-08 17:42:51 -0700633
634 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
635 if (!dma_has_cap(DMA_PQ, device->cap_mask))
636 return false;
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700637
638 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
Dan Williams4499a242009-11-19 17:10:25 -0700639 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
640 return false;
Dan Williams138f4c32009-09-08 17:42:51 -0700641 #endif
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700642 #endif
Dan Williams138f4c32009-09-08 17:42:51 -0700643
644 return true;
645}
646
Dan Williams257b17c2009-03-25 09:13:23 -0700647static int get_dma_id(struct dma_device *device)
648{
649 int rc;
650
651 idr_retry:
652 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
653 return -ENOMEM;
654 mutex_lock(&dma_list_mutex);
655 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
656 mutex_unlock(&dma_list_mutex);
657 if (rc == -EAGAIN)
658 goto idr_retry;
659 else if (rc != 0)
660 return rc;
661
662 return 0;
663}
664
Chris Leechc13c8262006-05-23 17:18:44 -0700665/**
Randy Dunlap65088712006-07-03 19:45:31 -0700666 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700667 * @device: &dma_device
668 */
669int dma_async_device_register(struct dma_device *device)
670{
Jeff Garzikff487fb2007-03-08 09:57:34 -0800671 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700672 struct dma_chan* chan;
Dan Williams864498a2009-01-06 11:38:21 -0700673 atomic_t *idr_ref;
Chris Leechc13c8262006-05-23 17:18:44 -0700674
675 if (!device)
676 return -ENODEV;
677
Dan Williams7405f742007-01-02 11:10:43 -0700678 /* validate device routines */
679 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
680 !device->device_prep_dma_memcpy);
681 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
682 !device->device_prep_dma_xor);
Dan Williams099f53c2009-04-08 14:28:37 -0700683 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
684 !device->device_prep_dma_xor_val);
Dan Williamsb2f46fd2009-07-14 12:20:36 -0700685 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
686 !device->device_prep_dma_pq);
687 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
688 !device->device_prep_dma_pq_val);
Dan Williams7405f742007-01-02 11:10:43 -0700689 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
690 !device->device_prep_dma_memset);
Zhang Wei9b941c62008-03-13 17:45:28 -0700691 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
Dan Williams7405f742007-01-02 11:10:43 -0700692 !device->device_prep_dma_interrupt);
Ira Snydera86ee032010-09-30 11:46:44 +0000693 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
694 !device->device_prep_dma_sg);
Haavard Skinnemoendc0ee642008-07-08 11:59:35 -0700695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
696 !device->device_prep_slave_sg);
Sascha Hauer782bc952010-09-30 13:56:32 +0000697 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
698 !device->device_prep_dma_cyclic);
Haavard Skinnemoendc0ee642008-07-08 11:59:35 -0700699 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
Linus Walleijc3635c72010-03-26 16:44:01 -0700700 !device->device_control);
Dan Williams7405f742007-01-02 11:10:43 -0700701
702 BUG_ON(!device->device_alloc_chan_resources);
703 BUG_ON(!device->device_free_chan_resources);
Linus Walleij07934482010-03-26 16:50:49 -0700704 BUG_ON(!device->device_tx_status);
Dan Williams7405f742007-01-02 11:10:43 -0700705 BUG_ON(!device->device_issue_pending);
706 BUG_ON(!device->dev);
707
Dan Williams138f4c32009-09-08 17:42:51 -0700708 /* note: this only matters in the
Dan Williams5fc6d892010-10-07 16:44:50 -0700709 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
Dan Williams138f4c32009-09-08 17:42:51 -0700710 */
711 if (device_has_all_tx_types(device))
712 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
713
Dan Williams864498a2009-01-06 11:38:21 -0700714 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
715 if (!idr_ref)
716 return -ENOMEM;
Dan Williams257b17c2009-03-25 09:13:23 -0700717 rc = get_dma_id(device);
718 if (rc != 0) {
719 kfree(idr_ref);
Dan Williams864498a2009-01-06 11:38:21 -0700720 return rc;
Dan Williams257b17c2009-03-25 09:13:23 -0700721 }
722
723 atomic_set(idr_ref, 0);
Chris Leechc13c8262006-05-23 17:18:44 -0700724
725 /* represent channels in sysfs. Probably want devs too */
726 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams257b17c2009-03-25 09:13:23 -0700727 rc = -ENOMEM;
Chris Leechc13c8262006-05-23 17:18:44 -0700728 chan->local = alloc_percpu(typeof(*chan->local));
729 if (chan->local == NULL)
Dan Williams257b17c2009-03-25 09:13:23 -0700730 goto err_out;
Dan Williams41d5e592009-01-06 11:38:21 -0700731 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
732 if (chan->dev == NULL) {
733 free_percpu(chan->local);
Dan Williams257b17c2009-03-25 09:13:23 -0700734 chan->local = NULL;
735 goto err_out;
Dan Williams41d5e592009-01-06 11:38:21 -0700736 }
Chris Leechc13c8262006-05-23 17:18:44 -0700737
738 chan->chan_id = chancnt++;
Dan Williams41d5e592009-01-06 11:38:21 -0700739 chan->dev->device.class = &dma_devclass;
740 chan->dev->device.parent = device->dev;
741 chan->dev->chan = chan;
Dan Williams864498a2009-01-06 11:38:21 -0700742 chan->dev->idr_ref = idr_ref;
743 chan->dev->dev_id = device->dev_id;
744 atomic_inc(idr_ref);
Dan Williams41d5e592009-01-06 11:38:21 -0700745 dev_set_name(&chan->dev->device, "dma%dchan%d",
Kay Sievers06190d82008-11-11 13:12:33 -0700746 device->dev_id, chan->chan_id);
Chris Leechc13c8262006-05-23 17:18:44 -0700747
Dan Williams41d5e592009-01-06 11:38:21 -0700748 rc = device_register(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800749 if (rc) {
Jeff Garzikff487fb2007-03-08 09:57:34 -0800750 free_percpu(chan->local);
751 chan->local = NULL;
Dan Williams257b17c2009-03-25 09:13:23 -0700752 kfree(chan->dev);
753 atomic_dec(idr_ref);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800754 goto err_out;
755 }
Dan Williams7cc5bf92008-07-08 11:58:21 -0700756 chan->client_count = 0;
Chris Leechc13c8262006-05-23 17:18:44 -0700757 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700758 device->chancnt = chancnt;
Chris Leechc13c8262006-05-23 17:18:44 -0700759
760 mutex_lock(&dma_list_mutex);
Dan Williams59b5ec22009-01-06 11:38:15 -0700761 /* take references on public channels */
762 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700763 list_for_each_entry(chan, &device->channels, device_node) {
764 /* if clients are already waiting for channels we need
765 * to take references on their behalf
766 */
767 if (dma_chan_get(chan) == -ENODEV) {
768 /* note we can only get here for the first
769 * channel as the remaining channels are
770 * guaranteed to get a reference
771 */
772 rc = -ENODEV;
773 mutex_unlock(&dma_list_mutex);
774 goto err_out;
775 }
776 }
Dan Williams2ba05622009-01-06 11:38:14 -0700777 list_add_tail_rcu(&device->global_node, &dma_device_list);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900778 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
779 device->privatecnt++; /* Always private */
Dan Williamsbec08512009-01-06 11:38:14 -0700780 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700781 mutex_unlock(&dma_list_mutex);
782
Chris Leechc13c8262006-05-23 17:18:44 -0700783 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800784
785err_out:
Dan Williams257b17c2009-03-25 09:13:23 -0700786 /* if we never registered a channel just release the idr */
787 if (atomic_read(idr_ref) == 0) {
788 mutex_lock(&dma_list_mutex);
789 idr_remove(&dma_idr, device->dev_id);
790 mutex_unlock(&dma_list_mutex);
791 kfree(idr_ref);
792 return rc;
793 }
794
Jeff Garzikff487fb2007-03-08 09:57:34 -0800795 list_for_each_entry(chan, &device->channels, device_node) {
796 if (chan->local == NULL)
797 continue;
Dan Williams41d5e592009-01-06 11:38:21 -0700798 mutex_lock(&dma_list_mutex);
799 chan->dev->chan = NULL;
800 mutex_unlock(&dma_list_mutex);
801 device_unregister(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800802 free_percpu(chan->local);
803 }
804 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700805}
David Brownell765e3d82007-03-16 13:38:05 -0800806EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700807
808/**
Dan Williams6f49a572009-01-06 11:38:14 -0700809 * dma_async_device_unregister - unregister a DMA device
Randy Dunlap65088712006-07-03 19:45:31 -0700810 * @device: &dma_device
Dan Williamsf27c5802009-01-06 11:38:18 -0700811 *
812 * This routine is called by dma driver exit routines, dmaengine holds module
813 * references to prevent it being called while channels are in use.
Randy Dunlap65088712006-07-03 19:45:31 -0700814 */
815void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700816{
817 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700818
819 mutex_lock(&dma_list_mutex);
Dan Williams2ba05622009-01-06 11:38:14 -0700820 list_del_rcu(&device->global_node);
Dan Williamsbec08512009-01-06 11:38:14 -0700821 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700822 mutex_unlock(&dma_list_mutex);
823
824 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams6f49a572009-01-06 11:38:14 -0700825 WARN_ONCE(chan->client_count,
826 "%s called while %d clients hold a reference\n",
827 __func__, chan->client_count);
Dan Williams41d5e592009-01-06 11:38:21 -0700828 mutex_lock(&dma_list_mutex);
829 chan->dev->chan = NULL;
830 mutex_unlock(&dma_list_mutex);
831 device_unregister(&chan->dev->device);
Anatolij Gustschinadef4772010-01-26 10:26:06 +0100832 free_percpu(chan->local);
Chris Leechc13c8262006-05-23 17:18:44 -0700833 }
Chris Leechc13c8262006-05-23 17:18:44 -0700834}
David Brownell765e3d82007-03-16 13:38:05 -0800835EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700836
Dan Williams7405f742007-01-02 11:10:43 -0700837/**
838 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
839 * @chan: DMA channel to offload copy to
840 * @dest: destination address (virtual)
841 * @src: source address (virtual)
842 * @len: length
843 *
844 * Both @dest and @src must be mappable to a bus address according to the
845 * DMA mapping API rules for streaming mappings.
846 * Both @dest and @src must stay memory resident (kernel memory or locked
847 * user space pages).
848 */
849dma_cookie_t
850dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
851 void *src, size_t len)
852{
853 struct dma_device *dev = chan->device;
854 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700855 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700856 dma_cookie_t cookie;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200857 unsigned long flags;
Dan Williams7405f742007-01-02 11:10:43 -0700858
Dan Williams00367312008-02-02 19:49:57 -0700859 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
860 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200861 flags = DMA_CTRL_ACK |
862 DMA_COMPL_SRC_UNMAP_SINGLE |
863 DMA_COMPL_DEST_UNMAP_SINGLE;
864 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
Dan Williams00367312008-02-02 19:49:57 -0700865
866 if (!tx) {
867 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
868 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700869 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700870 }
Dan Williams7405f742007-01-02 11:10:43 -0700871
Dan Williams7405f742007-01-02 11:10:43 -0700872 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700873 cookie = tx->tx_submit(tx);
874
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900875 preempt_disable();
876 __this_cpu_add(chan->local->bytes_transferred, len);
877 __this_cpu_inc(chan->local->memcpy_count);
878 preempt_enable();
Dan Williams7405f742007-01-02 11:10:43 -0700879
880 return cookie;
881}
882EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
883
884/**
885 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
886 * @chan: DMA channel to offload copy to
887 * @page: destination page
888 * @offset: offset in page to copy to
889 * @kdata: source address (virtual)
890 * @len: length
891 *
892 * Both @page/@offset and @kdata must be mappable to a bus address according
893 * to the DMA mapping API rules for streaming mappings.
894 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
895 * locked user space pages)
896 */
897dma_cookie_t
898dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
899 unsigned int offset, void *kdata, size_t len)
900{
901 struct dma_device *dev = chan->device;
902 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700903 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700904 dma_cookie_t cookie;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200905 unsigned long flags;
Dan Williams7405f742007-01-02 11:10:43 -0700906
Dan Williams00367312008-02-02 19:49:57 -0700907 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
908 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200909 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
910 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
Dan Williams00367312008-02-02 19:49:57 -0700911
912 if (!tx) {
913 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
914 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700915 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700916 }
Dan Williams7405f742007-01-02 11:10:43 -0700917
Dan Williams7405f742007-01-02 11:10:43 -0700918 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700919 cookie = tx->tx_submit(tx);
920
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900921 preempt_disable();
922 __this_cpu_add(chan->local->bytes_transferred, len);
923 __this_cpu_inc(chan->local->memcpy_count);
924 preempt_enable();
Dan Williams7405f742007-01-02 11:10:43 -0700925
926 return cookie;
927}
928EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
929
930/**
931 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
932 * @chan: DMA channel to offload copy to
933 * @dest_pg: destination page
934 * @dest_off: offset in page to copy to
935 * @src_pg: source page
936 * @src_off: offset in page to copy from
937 * @len: length
938 *
939 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
940 * address according to the DMA mapping API rules for streaming mappings.
941 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
942 * (kernel memory or locked user space pages).
943 */
944dma_cookie_t
945dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
946 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
947 size_t len)
948{
949 struct dma_device *dev = chan->device;
950 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700951 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700952 dma_cookie_t cookie;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200953 unsigned long flags;
Dan Williams7405f742007-01-02 11:10:43 -0700954
Dan Williams00367312008-02-02 19:49:57 -0700955 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
956 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
957 DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200958 flags = DMA_CTRL_ACK;
959 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
Dan Williams00367312008-02-02 19:49:57 -0700960
961 if (!tx) {
962 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
963 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700964 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700965 }
Dan Williams7405f742007-01-02 11:10:43 -0700966
Dan Williams7405f742007-01-02 11:10:43 -0700967 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700968 cookie = tx->tx_submit(tx);
969
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900970 preempt_disable();
971 __this_cpu_add(chan->local->bytes_transferred, len);
972 __this_cpu_inc(chan->local->memcpy_count);
973 preempt_enable();
Dan Williams7405f742007-01-02 11:10:43 -0700974
975 return cookie;
976}
977EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
978
979void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
980 struct dma_chan *chan)
981{
982 tx->chan = chan;
Dan Williams5fc6d892010-10-07 16:44:50 -0700983 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
Dan Williams7405f742007-01-02 11:10:43 -0700984 spin_lock_init(&tx->lock);
Dan Williamscaa20d972010-05-17 16:24:16 -0700985 #endif
Dan Williams7405f742007-01-02 11:10:43 -0700986}
987EXPORT_SYMBOL(dma_async_tx_descriptor_init);
988
Dan Williams07f22112009-01-05 17:14:31 -0700989/* dma_wait_for_async_tx - spin wait for a transaction to complete
990 * @tx: in-flight transaction to wait on
Dan Williams07f22112009-01-05 17:14:31 -0700991 */
992enum dma_status
993dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
994{
Dan Williams95475e52009-07-14 12:19:02 -0700995 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
Dan Williams07f22112009-01-05 17:14:31 -0700996
997 if (!tx)
998 return DMA_SUCCESS;
999
Dan Williams95475e52009-07-14 12:19:02 -07001000 while (tx->cookie == -EBUSY) {
1001 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1002 pr_err("%s timeout waiting for descriptor submission\n",
1003 __func__);
1004 return DMA_ERROR;
1005 }
1006 cpu_relax();
1007 }
1008 return dma_sync_wait(tx->chan, tx->cookie);
Dan Williams07f22112009-01-05 17:14:31 -07001009}
1010EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1011
1012/* dma_run_dependencies - helper routine for dma drivers to process
1013 * (start) dependent operations on their target channel
1014 * @tx: transaction with dependencies
1015 */
1016void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1017{
Dan Williamscaa20d972010-05-17 16:24:16 -07001018 struct dma_async_tx_descriptor *dep = txd_next(tx);
Dan Williams07f22112009-01-05 17:14:31 -07001019 struct dma_async_tx_descriptor *dep_next;
1020 struct dma_chan *chan;
1021
1022 if (!dep)
1023 return;
1024
Yuri Tikhonovdd59b852009-01-12 15:17:20 -07001025 /* we'll submit tx->next now, so clear the link */
Dan Williamscaa20d972010-05-17 16:24:16 -07001026 txd_clear_next(tx);
Dan Williams07f22112009-01-05 17:14:31 -07001027 chan = dep->chan;
1028
1029 /* keep submitting up until a channel switch is detected
1030 * in that case we will be called again as a result of
1031 * processing the interrupt from async_tx_channel_switch
1032 */
1033 for (; dep; dep = dep_next) {
Dan Williamscaa20d972010-05-17 16:24:16 -07001034 txd_lock(dep);
1035 txd_clear_parent(dep);
1036 dep_next = txd_next(dep);
Dan Williams07f22112009-01-05 17:14:31 -07001037 if (dep_next && dep_next->chan == chan)
Dan Williamscaa20d972010-05-17 16:24:16 -07001038 txd_clear_next(dep); /* ->next will be submitted */
Dan Williams07f22112009-01-05 17:14:31 -07001039 else
1040 dep_next = NULL; /* submit current dep and terminate */
Dan Williamscaa20d972010-05-17 16:24:16 -07001041 txd_unlock(dep);
Dan Williams07f22112009-01-05 17:14:31 -07001042
1043 dep->tx_submit(dep);
1044 }
1045
1046 chan->device->device_issue_pending(chan);
1047}
1048EXPORT_SYMBOL_GPL(dma_run_dependencies);
1049
Chris Leechc13c8262006-05-23 17:18:44 -07001050static int __init dma_bus_init(void)
1051{
Dan Williams864498a2009-01-06 11:38:21 -07001052 idr_init(&dma_idr);
Chris Leechc13c8262006-05-23 17:18:44 -07001053 mutex_init(&dma_list_mutex);
1054 return class_register(&dma_devclass);
1055}
Dan Williams652afc22009-01-06 11:38:22 -07001056arch_initcall(dma_bus_init);
Chris Leechc13c8262006-05-23 17:18:44 -07001057
Dan Williamsbec08512009-01-06 11:38:14 -07001058