blob: 755ba2f4f1d538e8db370aed4496ea8b8defd99b [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
Dan Williamsaa1e6f12009-01-06 11:38:17 -070034 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
Chris Leechc13c8262006-05-23 17:18:44 -070036 *
Dan Williamsf27c5802009-01-06 11:38:18 -070037 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
Chris Leechc13c8262006-05-23 17:18:44 -070042 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
Dan Williamsf27c5802009-01-06 11:38:18 -070045 * See Documentation/dmaengine.txt for more details
Chris Leechc13c8262006-05-23 17:18:44 -070046 */
47
Joe Perches63433252012-07-18 09:51:28 -070048#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000050#include <linux/dma-mapping.h>
Chris Leechc13c8262006-05-23 17:18:44 -070051#include <linux/init.h>
52#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070053#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070054#include <linux/device.h>
55#include <linux/dmaengine.h>
56#include <linux/hardirq.h>
57#include <linux/spinlock.h>
58#include <linux/percpu.h>
59#include <linux/rcupdate.h>
60#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070061#include <linux/jiffies.h>
Dan Williams2ba05622009-01-06 11:38:14 -070062#include <linux/rculist.h>
Dan Williams864498a2009-01-06 11:38:21 -070063#include <linux/idr.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090064#include <linux/slab.h>
Andy Shevchenko4e82f5d2013-04-09 14:05:44 +030065#include <linux/acpi.h>
66#include <linux/acpi_dma.h>
Jon Hunter9a6cecc2012-09-14 17:41:57 -050067#include <linux/of_dma.h>
Chris Leechc13c8262006-05-23 17:18:44 -070068
69static DEFINE_MUTEX(dma_list_mutex);
Axel Lin21ef4b82011-07-20 11:32:28 +080070static DEFINE_IDR(dma_idr);
Chris Leechc13c8262006-05-23 17:18:44 -070071static LIST_HEAD(dma_device_list);
Dan Williams6f49a572009-01-06 11:38:14 -070072static long dmaengine_ref_count;
Chris Leechc13c8262006-05-23 17:18:44 -070073
74/* --- sysfs implementation --- */
75
Dan Williams41d5e592009-01-06 11:38:21 -070076/**
77 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
78 * @dev - device node
79 *
80 * Must be called under dma_list_mutex
81 */
82static struct dma_chan *dev_to_dma_chan(struct device *dev)
83{
84 struct dma_chan_dev *chan_dev;
85
86 chan_dev = container_of(dev, typeof(*chan_dev), device);
87 return chan_dev->chan;
88}
89
Tony Jones891f78e2007-09-25 02:03:03 +020090static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070091{
Dan Williams41d5e592009-01-06 11:38:21 -070092 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -070093 unsigned long count = 0;
94 int i;
Dan Williams41d5e592009-01-06 11:38:21 -070095 int err;
Chris Leechc13c8262006-05-23 17:18:44 -070096
Dan Williams41d5e592009-01-06 11:38:21 -070097 mutex_lock(&dma_list_mutex);
98 chan = dev_to_dma_chan(dev);
99 if (chan) {
100 for_each_possible_cpu(i)
101 count += per_cpu_ptr(chan->local, i)->memcpy_count;
102 err = sprintf(buf, "%lu\n", count);
103 } else
104 err = -ENODEV;
105 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700106
Dan Williams41d5e592009-01-06 11:38:21 -0700107 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700108}
109
Tony Jones891f78e2007-09-25 02:03:03 +0200110static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
111 char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700112{
Dan Williams41d5e592009-01-06 11:38:21 -0700113 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700114 unsigned long count = 0;
115 int i;
Dan Williams41d5e592009-01-06 11:38:21 -0700116 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700117
Dan Williams41d5e592009-01-06 11:38:21 -0700118 mutex_lock(&dma_list_mutex);
119 chan = dev_to_dma_chan(dev);
120 if (chan) {
121 for_each_possible_cpu(i)
122 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
123 err = sprintf(buf, "%lu\n", count);
124 } else
125 err = -ENODEV;
126 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700127
Dan Williams41d5e592009-01-06 11:38:21 -0700128 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700129}
130
Tony Jones891f78e2007-09-25 02:03:03 +0200131static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700132{
Dan Williams41d5e592009-01-06 11:38:21 -0700133 struct dma_chan *chan;
134 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700135
Dan Williams41d5e592009-01-06 11:38:21 -0700136 mutex_lock(&dma_list_mutex);
137 chan = dev_to_dma_chan(dev);
138 if (chan)
139 err = sprintf(buf, "%d\n", chan->client_count);
140 else
141 err = -ENODEV;
142 mutex_unlock(&dma_list_mutex);
143
144 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700145}
146
Tony Jones891f78e2007-09-25 02:03:03 +0200147static struct device_attribute dma_attrs[] = {
Chris Leechc13c8262006-05-23 17:18:44 -0700148 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
149 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
150 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
151 __ATTR_NULL
152};
153
Dan Williams41d5e592009-01-06 11:38:21 -0700154static void chan_dev_release(struct device *dev)
155{
156 struct dma_chan_dev *chan_dev;
157
158 chan_dev = container_of(dev, typeof(*chan_dev), device);
Dan Williams864498a2009-01-06 11:38:21 -0700159 if (atomic_dec_and_test(chan_dev->idr_ref)) {
160 mutex_lock(&dma_list_mutex);
161 idr_remove(&dma_idr, chan_dev->dev_id);
162 mutex_unlock(&dma_list_mutex);
163 kfree(chan_dev->idr_ref);
164 }
Dan Williams41d5e592009-01-06 11:38:21 -0700165 kfree(chan_dev);
166}
167
Chris Leechc13c8262006-05-23 17:18:44 -0700168static struct class dma_devclass = {
Tony Jones891f78e2007-09-25 02:03:03 +0200169 .name = "dma",
170 .dev_attrs = dma_attrs,
Dan Williams41d5e592009-01-06 11:38:21 -0700171 .dev_release = chan_dev_release,
Chris Leechc13c8262006-05-23 17:18:44 -0700172};
173
174/* --- client and device registration --- */
175
Dan Williams59b5ec22009-01-06 11:38:15 -0700176#define dma_device_satisfies_mask(device, mask) \
177 __dma_device_satisfies_mask((device), &(mask))
Dan Williamsd379b012007-07-09 11:56:42 -0700178static int
Lars-Peter Clausena53e28d2013-03-25 13:23:52 +0100179__dma_device_satisfies_mask(struct dma_device *device,
180 const dma_cap_mask_t *want)
Dan Williamsd379b012007-07-09 11:56:42 -0700181{
182 dma_cap_mask_t has;
183
Dan Williams59b5ec22009-01-06 11:38:15 -0700184 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
Dan Williamsd379b012007-07-09 11:56:42 -0700185 DMA_TX_TYPE_END);
186 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
187}
188
Dan Williams6f49a572009-01-06 11:38:14 -0700189static struct module *dma_chan_to_owner(struct dma_chan *chan)
190{
191 return chan->device->dev->driver->owner;
192}
193
194/**
195 * balance_ref_count - catch up the channel reference count
196 * @chan - channel to balance ->client_count versus dmaengine_ref_count
197 *
198 * balance_ref_count must be called under dma_list_mutex
199 */
200static void balance_ref_count(struct dma_chan *chan)
201{
202 struct module *owner = dma_chan_to_owner(chan);
203
204 while (chan->client_count < dmaengine_ref_count) {
205 __module_get(owner);
206 chan->client_count++;
207 }
208}
209
210/**
211 * dma_chan_get - try to grab a dma channel's parent driver module
212 * @chan - channel to grab
213 *
214 * Must be called under dma_list_mutex
215 */
216static int dma_chan_get(struct dma_chan *chan)
217{
218 int err = -ENODEV;
219 struct module *owner = dma_chan_to_owner(chan);
220
221 if (chan->client_count) {
222 __module_get(owner);
223 err = 0;
224 } else if (try_module_get(owner))
225 err = 0;
226
227 if (err == 0)
228 chan->client_count++;
229
230 /* allocate upon first client reference */
231 if (chan->client_count == 1 && err == 0) {
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700232 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
Dan Williams6f49a572009-01-06 11:38:14 -0700233
234 if (desc_cnt < 0) {
235 err = desc_cnt;
236 chan->client_count = 0;
237 module_put(owner);
Dan Williams59b5ec22009-01-06 11:38:15 -0700238 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700239 balance_ref_count(chan);
240 }
241
242 return err;
243}
244
245/**
246 * dma_chan_put - drop a reference to a dma channel's parent driver module
247 * @chan - channel to release
248 *
249 * Must be called under dma_list_mutex
250 */
251static void dma_chan_put(struct dma_chan *chan)
252{
253 if (!chan->client_count)
254 return; /* this channel failed alloc_chan_resources */
255 chan->client_count--;
256 module_put(dma_chan_to_owner(chan));
257 if (chan->client_count == 0)
258 chan->device->device_free_chan_resources(chan);
259}
260
Dan Williams7405f742007-01-02 11:10:43 -0700261enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
262{
263 enum dma_status status;
264 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
265
266 dma_async_issue_pending(chan);
267 do {
268 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
269 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
Joe Perches63433252012-07-18 09:51:28 -0700270 pr_err("%s: timeout!\n", __func__);
Dan Williams7405f742007-01-02 11:10:43 -0700271 return DMA_ERROR;
272 }
Bartlomiej Zolnierkiewicz2cbe7fe2012-11-08 10:02:07 +0000273 if (status != DMA_IN_PROGRESS)
274 break;
275 cpu_relax();
276 } while (1);
Dan Williams7405f742007-01-02 11:10:43 -0700277
278 return status;
279}
280EXPORT_SYMBOL(dma_sync_wait);
281
Chris Leechc13c8262006-05-23 17:18:44 -0700282/**
Dan Williamsbec08512009-01-06 11:38:14 -0700283 * dma_cap_mask_all - enable iteration over all operation types
284 */
285static dma_cap_mask_t dma_cap_mask_all;
286
287/**
288 * dma_chan_tbl_ent - tracks channel allocations per core/operation
289 * @chan - associated channel for this entry
290 */
291struct dma_chan_tbl_ent {
292 struct dma_chan *chan;
293};
294
295/**
296 * channel_table - percpu lookup table for memory-to-memory offload providers
297 */
Tejun Heoa29d8b82010-02-02 14:39:15 +0900298static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
Dan Williamsbec08512009-01-06 11:38:14 -0700299
300static int __init dma_channel_table_init(void)
301{
302 enum dma_transaction_type cap;
303 int err = 0;
304
305 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
306
Dan Williams59b5ec22009-01-06 11:38:15 -0700307 /* 'interrupt', 'private', and 'slave' are channel capabilities,
308 * but are not associated with an operation so they do not need
309 * an entry in the channel_table
Dan Williamsbec08512009-01-06 11:38:14 -0700310 */
311 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
Dan Williams59b5ec22009-01-06 11:38:15 -0700312 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
Dan Williamsbec08512009-01-06 11:38:14 -0700313 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
314
315 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
316 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
317 if (!channel_table[cap]) {
318 err = -ENOMEM;
319 break;
320 }
321 }
322
323 if (err) {
Joe Perches63433252012-07-18 09:51:28 -0700324 pr_err("initialization failure\n");
Dan Williamsbec08512009-01-06 11:38:14 -0700325 for_each_dma_cap_mask(cap, dma_cap_mask_all)
326 if (channel_table[cap])
327 free_percpu(channel_table[cap]);
328 }
329
330 return err;
331}
Dan Williams652afc22009-01-06 11:38:22 -0700332arch_initcall(dma_channel_table_init);
Dan Williamsbec08512009-01-06 11:38:14 -0700333
334/**
335 * dma_find_channel - find a channel to carry out the operation
336 * @tx_type: transaction type
337 */
338struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
339{
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900340 return this_cpu_read(channel_table[tx_type]->chan);
Dan Williamsbec08512009-01-06 11:38:14 -0700341}
342EXPORT_SYMBOL(dma_find_channel);
343
Dave Jianga2bd1142012-04-04 16:10:46 -0700344/*
345 * net_dma_find_channel - find a channel for net_dma
346 * net_dma has alignment requirements
347 */
348struct dma_chan *net_dma_find_channel(void)
349{
350 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
351 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
352 return NULL;
353
354 return chan;
355}
356EXPORT_SYMBOL(net_dma_find_channel);
357
Dan Williamsbec08512009-01-06 11:38:14 -0700358/**
Dan Williams2ba05622009-01-06 11:38:14 -0700359 * dma_issue_pending_all - flush all pending operations across all channels
360 */
361void dma_issue_pending_all(void)
362{
363 struct dma_device *device;
364 struct dma_chan *chan;
365
Dan Williams2ba05622009-01-06 11:38:14 -0700366 rcu_read_lock();
Dan Williams59b5ec22009-01-06 11:38:15 -0700367 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
368 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
369 continue;
Dan Williams2ba05622009-01-06 11:38:14 -0700370 list_for_each_entry(chan, &device->channels, device_node)
371 if (chan->client_count)
372 device->device_issue_pending(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700373 }
Dan Williams2ba05622009-01-06 11:38:14 -0700374 rcu_read_unlock();
375}
376EXPORT_SYMBOL(dma_issue_pending_all);
377
378/**
Dan Williamsbec08512009-01-06 11:38:14 -0700379 * nth_chan - returns the nth channel of the given capability
380 * @cap: capability to match
381 * @n: nth channel desired
382 *
383 * Defaults to returning the channel with the desired capability and the
384 * lowest reference count when 'n' cannot be satisfied. Must be called
385 * under dma_list_mutex.
386 */
387static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
388{
389 struct dma_device *device;
390 struct dma_chan *chan;
391 struct dma_chan *ret = NULL;
392 struct dma_chan *min = NULL;
393
394 list_for_each_entry(device, &dma_device_list, global_node) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700395 if (!dma_has_cap(cap, device->cap_mask) ||
396 dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williamsbec08512009-01-06 11:38:14 -0700397 continue;
398 list_for_each_entry(chan, &device->channels, device_node) {
399 if (!chan->client_count)
400 continue;
401 if (!min)
402 min = chan;
403 else if (chan->table_count < min->table_count)
404 min = chan;
405
406 if (n-- == 0) {
407 ret = chan;
408 break; /* done */
409 }
410 }
411 if (ret)
412 break; /* done */
413 }
414
415 if (!ret)
416 ret = min;
417
418 if (ret)
419 ret->table_count++;
420
421 return ret;
422}
423
424/**
425 * dma_channel_rebalance - redistribute the available channels
426 *
427 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
428 * operation type) in the SMP case, and operation isolation (avoid
429 * multi-tasking channels) in the non-SMP case. Must be called under
430 * dma_list_mutex.
431 */
432static void dma_channel_rebalance(void)
433{
434 struct dma_chan *chan;
435 struct dma_device *device;
436 int cpu;
437 int cap;
438 int n;
439
440 /* undo the last distribution */
441 for_each_dma_cap_mask(cap, dma_cap_mask_all)
442 for_each_possible_cpu(cpu)
443 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
444
Dan Williams59b5ec22009-01-06 11:38:15 -0700445 list_for_each_entry(device, &dma_device_list, global_node) {
446 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
447 continue;
Dan Williamsbec08512009-01-06 11:38:14 -0700448 list_for_each_entry(chan, &device->channels, device_node)
449 chan->table_count = 0;
Dan Williams59b5ec22009-01-06 11:38:15 -0700450 }
Dan Williamsbec08512009-01-06 11:38:14 -0700451
452 /* don't populate the channel_table if no clients are available */
453 if (!dmaengine_ref_count)
454 return;
455
456 /* redistribute available channels */
457 n = 0;
458 for_each_dma_cap_mask(cap, dma_cap_mask_all)
459 for_each_online_cpu(cpu) {
460 if (num_possible_cpus() > 1)
461 chan = nth_chan(cap, n++);
462 else
463 chan = nth_chan(cap, -1);
464
465 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
466 }
467}
468
Lars-Peter Clausena53e28d2013-03-25 13:23:52 +0100469static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
470 struct dma_device *dev,
Dan Williamse2346672009-01-06 11:38:21 -0700471 dma_filter_fn fn, void *fn_param)
Dan Williams59b5ec22009-01-06 11:38:15 -0700472{
473 struct dma_chan *chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700474
475 if (!__dma_device_satisfies_mask(dev, mask)) {
476 pr_debug("%s: wrong capabilities\n", __func__);
477 return NULL;
478 }
479 /* devices with multiple channels need special handling as we need to
480 * ensure that all channels are either private or public.
481 */
482 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
483 list_for_each_entry(chan, &dev->channels, device_node) {
484 /* some channels are already publicly allocated */
485 if (chan->client_count)
486 return NULL;
487 }
488
489 list_for_each_entry(chan, &dev->channels, device_node) {
490 if (chan->client_count) {
491 pr_debug("%s: %s busy\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700492 __func__, dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700493 continue;
494 }
Dan Williamse2346672009-01-06 11:38:21 -0700495 if (fn && !fn(chan, fn_param)) {
496 pr_debug("%s: %s filter said false\n",
497 __func__, dma_chan_name(chan));
498 continue;
499 }
500 return chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700501 }
502
Dan Williamse2346672009-01-06 11:38:21 -0700503 return NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700504}
505
506/**
Zhangfei Gao7bb587f2013-06-28 20:39:12 +0800507 * dma_request_channel - try to get specific channel exclusively
508 * @chan: target channel
509 */
510struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
511{
512 int err = -EBUSY;
513
514 /* lock against __dma_request_channel */
515 mutex_lock(&dma_list_mutex);
516
Vinod Kould9a6c8f2013-08-19 10:47:26 +0530517 if (chan->client_count == 0) {
Zhangfei Gao7bb587f2013-06-28 20:39:12 +0800518 err = dma_chan_get(chan);
Vinod Kould9a6c8f2013-08-19 10:47:26 +0530519 if (err)
520 pr_debug("%s: failed to get %s: (%d)\n",
521 __func__, dma_chan_name(chan), err);
522 } else
Zhangfei Gao7bb587f2013-06-28 20:39:12 +0800523 chan = NULL;
524
525 mutex_unlock(&dma_list_mutex);
526
Zhangfei Gao7bb587f2013-06-28 20:39:12 +0800527
528 return chan;
529}
530EXPORT_SYMBOL_GPL(dma_get_slave_channel);
531
532/**
Dan Williams59b5ec22009-01-06 11:38:15 -0700533 * dma_request_channel - try to allocate an exclusive channel
534 * @mask: capabilities that the channel must satisfy
535 * @fn: optional callback to disposition available channels
536 * @fn_param: opaque parameter to pass to dma_filter_fn
537 */
Lars-Peter Clausena53e28d2013-03-25 13:23:52 +0100538struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
539 dma_filter_fn fn, void *fn_param)
Dan Williams59b5ec22009-01-06 11:38:15 -0700540{
541 struct dma_device *device, *_d;
542 struct dma_chan *chan = NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700543 int err;
544
545 /* Find a channel */
546 mutex_lock(&dma_list_mutex);
547 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
Dan Williamse2346672009-01-06 11:38:21 -0700548 chan = private_candidate(mask, device, fn, fn_param);
549 if (chan) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700550 /* Found a suitable channel, try to grab, prep, and
551 * return it. We first set DMA_PRIVATE to disable
552 * balance_ref_count as this channel will not be
553 * published in the general-purpose allocator
554 */
555 dma_cap_set(DMA_PRIVATE, device->cap_mask);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900556 device->privatecnt++;
Dan Williams59b5ec22009-01-06 11:38:15 -0700557 err = dma_chan_get(chan);
558
559 if (err == -ENODEV) {
Joe Perches63433252012-07-18 09:51:28 -0700560 pr_debug("%s: %s module removed\n",
561 __func__, dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700562 list_del_rcu(&device->global_node);
563 } else if (err)
Fabio Estevamd8b53482012-02-21 12:51:59 -0200564 pr_debug("%s: failed to get %s: (%d)\n",
Joe Perches63433252012-07-18 09:51:28 -0700565 __func__, dma_chan_name(chan), err);
Dan Williams59b5ec22009-01-06 11:38:15 -0700566 else
567 break;
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900568 if (--device->privatecnt == 0)
569 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
Dan Williamse2346672009-01-06 11:38:21 -0700570 chan = NULL;
571 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700572 }
573 mutex_unlock(&dma_list_mutex);
574
Joe Perches63433252012-07-18 09:51:28 -0700575 pr_debug("%s: %s (%s)\n",
576 __func__,
577 chan ? "success" : "fail",
Dan Williams41d5e592009-01-06 11:38:21 -0700578 chan ? dma_chan_name(chan) : NULL);
Dan Williams59b5ec22009-01-06 11:38:15 -0700579
580 return chan;
581}
582EXPORT_SYMBOL_GPL(__dma_request_channel);
583
Jon Hunter9a6cecc2012-09-14 17:41:57 -0500584/**
585 * dma_request_slave_channel - try to allocate an exclusive slave channel
586 * @dev: pointer to client device structure
587 * @name: slave channel name
588 */
Markus Pargmannbef29ec2013-02-24 16:36:09 +0100589struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
Jon Hunter9a6cecc2012-09-14 17:41:57 -0500590{
591 /* If device-tree is present get slave info from here */
592 if (dev->of_node)
593 return of_dma_request_slave_channel(dev->of_node, name);
594
Andy Shevchenko4e82f5d2013-04-09 14:05:44 +0300595 /* If device was enumerated by ACPI get slave info from here */
596 if (ACPI_HANDLE(dev))
597 return acpi_dma_request_slave_chan_by_name(dev, name);
598
Jon Hunter9a6cecc2012-09-14 17:41:57 -0500599 return NULL;
600}
601EXPORT_SYMBOL_GPL(dma_request_slave_channel);
602
Dan Williams59b5ec22009-01-06 11:38:15 -0700603void dma_release_channel(struct dma_chan *chan)
604{
605 mutex_lock(&dma_list_mutex);
606 WARN_ONCE(chan->client_count != 1,
607 "chan reference count %d != 1\n", chan->client_count);
608 dma_chan_put(chan);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900609 /* drop PRIVATE cap enabled by __dma_request_channel() */
610 if (--chan->device->privatecnt == 0)
611 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
Dan Williams59b5ec22009-01-06 11:38:15 -0700612 mutex_unlock(&dma_list_mutex);
613}
614EXPORT_SYMBOL_GPL(dma_release_channel);
615
Dan Williamsbec08512009-01-06 11:38:14 -0700616/**
Dan Williams209b84a2009-01-06 11:38:17 -0700617 * dmaengine_get - register interest in dma_channels
Chris Leechc13c8262006-05-23 17:18:44 -0700618 */
Dan Williams209b84a2009-01-06 11:38:17 -0700619void dmaengine_get(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700620{
Dan Williams6f49a572009-01-06 11:38:14 -0700621 struct dma_device *device, *_d;
622 struct dma_chan *chan;
623 int err;
624
Chris Leechc13c8262006-05-23 17:18:44 -0700625 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700626 dmaengine_ref_count++;
627
628 /* try to grab channels */
Dan Williams59b5ec22009-01-06 11:38:15 -0700629 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
630 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
631 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700632 list_for_each_entry(chan, &device->channels, device_node) {
633 err = dma_chan_get(chan);
634 if (err == -ENODEV) {
635 /* module removed before we could use it */
Dan Williams2ba05622009-01-06 11:38:14 -0700636 list_del_rcu(&device->global_node);
Dan Williams6f49a572009-01-06 11:38:14 -0700637 break;
638 } else if (err)
Fabio Estevam0eb5a352012-10-04 17:11:16 -0700639 pr_debug("%s: failed to get %s: (%d)\n",
Joe Perches63433252012-07-18 09:51:28 -0700640 __func__, dma_chan_name(chan), err);
Dan Williams6f49a572009-01-06 11:38:14 -0700641 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700642 }
Dan Williams6f49a572009-01-06 11:38:14 -0700643
Dan Williamsbec08512009-01-06 11:38:14 -0700644 /* if this is the first reference and there were channels
645 * waiting we need to rebalance to get those channels
646 * incorporated into the channel table
647 */
648 if (dmaengine_ref_count == 1)
649 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700650 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700651}
Dan Williams209b84a2009-01-06 11:38:17 -0700652EXPORT_SYMBOL(dmaengine_get);
Chris Leechc13c8262006-05-23 17:18:44 -0700653
654/**
Dan Williams209b84a2009-01-06 11:38:17 -0700655 * dmaengine_put - let dma drivers be removed when ref_count == 0
Chris Leechc13c8262006-05-23 17:18:44 -0700656 */
Dan Williams209b84a2009-01-06 11:38:17 -0700657void dmaengine_put(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700658{
Dan Williamsd379b012007-07-09 11:56:42 -0700659 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700660 struct dma_chan *chan;
661
Chris Leechc13c8262006-05-23 17:18:44 -0700662 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700663 dmaengine_ref_count--;
664 BUG_ON(dmaengine_ref_count < 0);
665 /* drop channel references */
Dan Williams59b5ec22009-01-06 11:38:15 -0700666 list_for_each_entry(device, &dma_device_list, global_node) {
667 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
668 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700669 list_for_each_entry(chan, &device->channels, device_node)
670 dma_chan_put(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700671 }
Chris Leechc13c8262006-05-23 17:18:44 -0700672 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700673}
Dan Williams209b84a2009-01-06 11:38:17 -0700674EXPORT_SYMBOL(dmaengine_put);
Chris Leechc13c8262006-05-23 17:18:44 -0700675
Dan Williams138f4c32009-09-08 17:42:51 -0700676static bool device_has_all_tx_types(struct dma_device *device)
677{
678 /* A device that satisfies this test has channels that will never cause
679 * an async_tx channel switch event as all possible operation types can
680 * be handled.
681 */
682 #ifdef CONFIG_ASYNC_TX_DMA
683 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
684 return false;
685 #endif
686
687 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
688 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
689 return false;
690 #endif
691
Dan Williams138f4c32009-09-08 17:42:51 -0700692 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
693 if (!dma_has_cap(DMA_XOR, device->cap_mask))
694 return false;
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700695
696 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
Dan Williams4499a242009-11-19 17:10:25 -0700697 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
698 return false;
Dan Williams138f4c32009-09-08 17:42:51 -0700699 #endif
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700700 #endif
Dan Williams138f4c32009-09-08 17:42:51 -0700701
702 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
703 if (!dma_has_cap(DMA_PQ, device->cap_mask))
704 return false;
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700705
706 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
Dan Williams4499a242009-11-19 17:10:25 -0700707 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
708 return false;
Dan Williams138f4c32009-09-08 17:42:51 -0700709 #endif
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700710 #endif
Dan Williams138f4c32009-09-08 17:42:51 -0700711
712 return true;
713}
714
Dan Williams257b17c2009-03-25 09:13:23 -0700715static int get_dma_id(struct dma_device *device)
716{
717 int rc;
718
Dan Williams257b17c2009-03-25 09:13:23 -0700719 mutex_lock(&dma_list_mutex);
Dan Williams257b17c2009-03-25 09:13:23 -0700720
Tejun Heo69ee2662013-02-27 17:04:03 -0800721 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
722 if (rc >= 0)
723 device->dev_id = rc;
724
725 mutex_unlock(&dma_list_mutex);
726 return rc < 0 ? rc : 0;
Dan Williams257b17c2009-03-25 09:13:23 -0700727}
728
Chris Leechc13c8262006-05-23 17:18:44 -0700729/**
Randy Dunlap65088712006-07-03 19:45:31 -0700730 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700731 * @device: &dma_device
732 */
733int dma_async_device_register(struct dma_device *device)
734{
Jeff Garzikff487fb2007-03-08 09:57:34 -0800735 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700736 struct dma_chan* chan;
Dan Williams864498a2009-01-06 11:38:21 -0700737 atomic_t *idr_ref;
Chris Leechc13c8262006-05-23 17:18:44 -0700738
739 if (!device)
740 return -ENODEV;
741
Dan Williams7405f742007-01-02 11:10:43 -0700742 /* validate device routines */
743 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
744 !device->device_prep_dma_memcpy);
745 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
746 !device->device_prep_dma_xor);
Dan Williams099f53c2009-04-08 14:28:37 -0700747 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
748 !device->device_prep_dma_xor_val);
Dan Williamsb2f46fd2009-07-14 12:20:36 -0700749 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
750 !device->device_prep_dma_pq);
751 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
752 !device->device_prep_dma_pq_val);
Zhang Wei9b941c62008-03-13 17:45:28 -0700753 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
Dan Williams7405f742007-01-02 11:10:43 -0700754 !device->device_prep_dma_interrupt);
Ira Snydera86ee032010-09-30 11:46:44 +0000755 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
756 !device->device_prep_dma_sg);
Sascha Hauer782bc952010-09-30 13:56:32 +0000757 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
758 !device->device_prep_dma_cyclic);
Haavard Skinnemoendc0ee6432008-07-08 11:59:35 -0700759 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
Linus Walleijc3635c72010-03-26 16:44:01 -0700760 !device->device_control);
Jassi Brarb14dab72011-10-13 12:33:30 +0530761 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
762 !device->device_prep_interleaved_dma);
Dan Williams7405f742007-01-02 11:10:43 -0700763
764 BUG_ON(!device->device_alloc_chan_resources);
765 BUG_ON(!device->device_free_chan_resources);
Linus Walleij07934482010-03-26 16:50:49 -0700766 BUG_ON(!device->device_tx_status);
Dan Williams7405f742007-01-02 11:10:43 -0700767 BUG_ON(!device->device_issue_pending);
768 BUG_ON(!device->dev);
769
Dan Williams138f4c32009-09-08 17:42:51 -0700770 /* note: this only matters in the
Dan Williams5fc6d892010-10-07 16:44:50 -0700771 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
Dan Williams138f4c32009-09-08 17:42:51 -0700772 */
773 if (device_has_all_tx_types(device))
774 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
775
Dan Williams864498a2009-01-06 11:38:21 -0700776 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
777 if (!idr_ref)
778 return -ENOMEM;
Dan Williams257b17c2009-03-25 09:13:23 -0700779 rc = get_dma_id(device);
780 if (rc != 0) {
781 kfree(idr_ref);
Dan Williams864498a2009-01-06 11:38:21 -0700782 return rc;
Dan Williams257b17c2009-03-25 09:13:23 -0700783 }
784
785 atomic_set(idr_ref, 0);
Chris Leechc13c8262006-05-23 17:18:44 -0700786
787 /* represent channels in sysfs. Probably want devs too */
788 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams257b17c2009-03-25 09:13:23 -0700789 rc = -ENOMEM;
Chris Leechc13c8262006-05-23 17:18:44 -0700790 chan->local = alloc_percpu(typeof(*chan->local));
791 if (chan->local == NULL)
Dan Williams257b17c2009-03-25 09:13:23 -0700792 goto err_out;
Dan Williams41d5e592009-01-06 11:38:21 -0700793 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
794 if (chan->dev == NULL) {
795 free_percpu(chan->local);
Dan Williams257b17c2009-03-25 09:13:23 -0700796 chan->local = NULL;
797 goto err_out;
Dan Williams41d5e592009-01-06 11:38:21 -0700798 }
Chris Leechc13c8262006-05-23 17:18:44 -0700799
800 chan->chan_id = chancnt++;
Dan Williams41d5e592009-01-06 11:38:21 -0700801 chan->dev->device.class = &dma_devclass;
802 chan->dev->device.parent = device->dev;
803 chan->dev->chan = chan;
Dan Williams864498a2009-01-06 11:38:21 -0700804 chan->dev->idr_ref = idr_ref;
805 chan->dev->dev_id = device->dev_id;
806 atomic_inc(idr_ref);
Dan Williams41d5e592009-01-06 11:38:21 -0700807 dev_set_name(&chan->dev->device, "dma%dchan%d",
Kay Sievers06190d82008-11-11 13:12:33 -0700808 device->dev_id, chan->chan_id);
Chris Leechc13c8262006-05-23 17:18:44 -0700809
Dan Williams41d5e592009-01-06 11:38:21 -0700810 rc = device_register(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800811 if (rc) {
Jeff Garzikff487fb2007-03-08 09:57:34 -0800812 free_percpu(chan->local);
813 chan->local = NULL;
Dan Williams257b17c2009-03-25 09:13:23 -0700814 kfree(chan->dev);
815 atomic_dec(idr_ref);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800816 goto err_out;
817 }
Dan Williams7cc5bf92008-07-08 11:58:21 -0700818 chan->client_count = 0;
Chris Leechc13c8262006-05-23 17:18:44 -0700819 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700820 device->chancnt = chancnt;
Chris Leechc13c8262006-05-23 17:18:44 -0700821
822 mutex_lock(&dma_list_mutex);
Dan Williams59b5ec22009-01-06 11:38:15 -0700823 /* take references on public channels */
824 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700825 list_for_each_entry(chan, &device->channels, device_node) {
826 /* if clients are already waiting for channels we need
827 * to take references on their behalf
828 */
829 if (dma_chan_get(chan) == -ENODEV) {
830 /* note we can only get here for the first
831 * channel as the remaining channels are
832 * guaranteed to get a reference
833 */
834 rc = -ENODEV;
835 mutex_unlock(&dma_list_mutex);
836 goto err_out;
837 }
838 }
Dan Williams2ba05622009-01-06 11:38:14 -0700839 list_add_tail_rcu(&device->global_node, &dma_device_list);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900840 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
841 device->privatecnt++; /* Always private */
Dan Williamsbec08512009-01-06 11:38:14 -0700842 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700843 mutex_unlock(&dma_list_mutex);
844
Chris Leechc13c8262006-05-23 17:18:44 -0700845 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800846
847err_out:
Dan Williams257b17c2009-03-25 09:13:23 -0700848 /* if we never registered a channel just release the idr */
849 if (atomic_read(idr_ref) == 0) {
850 mutex_lock(&dma_list_mutex);
851 idr_remove(&dma_idr, device->dev_id);
852 mutex_unlock(&dma_list_mutex);
853 kfree(idr_ref);
854 return rc;
855 }
856
Jeff Garzikff487fb2007-03-08 09:57:34 -0800857 list_for_each_entry(chan, &device->channels, device_node) {
858 if (chan->local == NULL)
859 continue;
Dan Williams41d5e592009-01-06 11:38:21 -0700860 mutex_lock(&dma_list_mutex);
861 chan->dev->chan = NULL;
862 mutex_unlock(&dma_list_mutex);
863 device_unregister(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800864 free_percpu(chan->local);
865 }
866 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700867}
David Brownell765e3d82007-03-16 13:38:05 -0800868EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700869
870/**
Dan Williams6f49a572009-01-06 11:38:14 -0700871 * dma_async_device_unregister - unregister a DMA device
Randy Dunlap65088712006-07-03 19:45:31 -0700872 * @device: &dma_device
Dan Williamsf27c5802009-01-06 11:38:18 -0700873 *
874 * This routine is called by dma driver exit routines, dmaengine holds module
875 * references to prevent it being called while channels are in use.
Randy Dunlap65088712006-07-03 19:45:31 -0700876 */
877void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700878{
879 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700880
881 mutex_lock(&dma_list_mutex);
Dan Williams2ba05622009-01-06 11:38:14 -0700882 list_del_rcu(&device->global_node);
Dan Williamsbec08512009-01-06 11:38:14 -0700883 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700884 mutex_unlock(&dma_list_mutex);
885
886 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams6f49a572009-01-06 11:38:14 -0700887 WARN_ONCE(chan->client_count,
888 "%s called while %d clients hold a reference\n",
889 __func__, chan->client_count);
Dan Williams41d5e592009-01-06 11:38:21 -0700890 mutex_lock(&dma_list_mutex);
891 chan->dev->chan = NULL;
892 mutex_unlock(&dma_list_mutex);
893 device_unregister(&chan->dev->device);
Anatolij Gustschinadef4772010-01-26 10:26:06 +0100894 free_percpu(chan->local);
Chris Leechc13c8262006-05-23 17:18:44 -0700895 }
Chris Leechc13c8262006-05-23 17:18:44 -0700896}
David Brownell765e3d82007-03-16 13:38:05 -0800897EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700898
Dan Williams7405f742007-01-02 11:10:43 -0700899/**
900 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
901 * @chan: DMA channel to offload copy to
902 * @dest: destination address (virtual)
903 * @src: source address (virtual)
904 * @len: length
905 *
906 * Both @dest and @src must be mappable to a bus address according to the
907 * DMA mapping API rules for streaming mappings.
908 * Both @dest and @src must stay memory resident (kernel memory or locked
909 * user space pages).
910 */
911dma_cookie_t
912dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
913 void *src, size_t len)
914{
915 struct dma_device *dev = chan->device;
916 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700917 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700918 dma_cookie_t cookie;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200919 unsigned long flags;
Dan Williams7405f742007-01-02 11:10:43 -0700920
Dan Williams00367312008-02-02 19:49:57 -0700921 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
922 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200923 flags = DMA_CTRL_ACK |
924 DMA_COMPL_SRC_UNMAP_SINGLE |
925 DMA_COMPL_DEST_UNMAP_SINGLE;
926 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
Dan Williams00367312008-02-02 19:49:57 -0700927
928 if (!tx) {
929 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
930 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700931 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700932 }
Dan Williams7405f742007-01-02 11:10:43 -0700933
Dan Williams7405f742007-01-02 11:10:43 -0700934 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700935 cookie = tx->tx_submit(tx);
936
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900937 preempt_disable();
938 __this_cpu_add(chan->local->bytes_transferred, len);
939 __this_cpu_inc(chan->local->memcpy_count);
940 preempt_enable();
Dan Williams7405f742007-01-02 11:10:43 -0700941
942 return cookie;
943}
944EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
945
946/**
947 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
948 * @chan: DMA channel to offload copy to
949 * @page: destination page
950 * @offset: offset in page to copy to
951 * @kdata: source address (virtual)
952 * @len: length
953 *
954 * Both @page/@offset and @kdata must be mappable to a bus address according
955 * to the DMA mapping API rules for streaming mappings.
956 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
957 * locked user space pages)
958 */
959dma_cookie_t
960dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
961 unsigned int offset, void *kdata, size_t len)
962{
963 struct dma_device *dev = chan->device;
964 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700965 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -0700966 dma_cookie_t cookie;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200967 unsigned long flags;
Dan Williams7405f742007-01-02 11:10:43 -0700968
Dan Williams00367312008-02-02 19:49:57 -0700969 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
970 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200971 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
972 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
Dan Williams00367312008-02-02 19:49:57 -0700973
974 if (!tx) {
975 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
976 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -0700977 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -0700978 }
Dan Williams7405f742007-01-02 11:10:43 -0700979
Dan Williams7405f742007-01-02 11:10:43 -0700980 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700981 cookie = tx->tx_submit(tx);
982
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900983 preempt_disable();
984 __this_cpu_add(chan->local->bytes_transferred, len);
985 __this_cpu_inc(chan->local->memcpy_count);
986 preempt_enable();
Dan Williams7405f742007-01-02 11:10:43 -0700987
988 return cookie;
989}
990EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
991
992/**
993 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
994 * @chan: DMA channel to offload copy to
995 * @dest_pg: destination page
996 * @dest_off: offset in page to copy to
997 * @src_pg: source page
998 * @src_off: offset in page to copy from
999 * @len: length
1000 *
1001 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1002 * address according to the DMA mapping API rules for streaming mappings.
1003 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1004 * (kernel memory or locked user space pages).
1005 */
1006dma_cookie_t
1007dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1008 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1009 size_t len)
1010{
1011 struct dma_device *dev = chan->device;
1012 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -07001013 dma_addr_t dma_dest, dma_src;
Dan Williams7405f742007-01-02 11:10:43 -07001014 dma_cookie_t cookie;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +02001015 unsigned long flags;
Dan Williams7405f742007-01-02 11:10:43 -07001016
Dan Williams00367312008-02-02 19:49:57 -07001017 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
1018 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
1019 DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +02001020 flags = DMA_CTRL_ACK;
1021 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
Dan Williams00367312008-02-02 19:49:57 -07001022
1023 if (!tx) {
1024 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
1025 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
Dan Williams7405f742007-01-02 11:10:43 -07001026 return -ENOMEM;
Dan Williams00367312008-02-02 19:49:57 -07001027 }
Dan Williams7405f742007-01-02 11:10:43 -07001028
Dan Williams7405f742007-01-02 11:10:43 -07001029 tx->callback = NULL;
Dan Williams7405f742007-01-02 11:10:43 -07001030 cookie = tx->tx_submit(tx);
1031
Christoph Lametere7dcaa42009-10-03 19:48:23 +09001032 preempt_disable();
1033 __this_cpu_add(chan->local->bytes_transferred, len);
1034 __this_cpu_inc(chan->local->memcpy_count);
1035 preempt_enable();
Dan Williams7405f742007-01-02 11:10:43 -07001036
1037 return cookie;
1038}
1039EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1040
1041void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1042 struct dma_chan *chan)
1043{
1044 tx->chan = chan;
Dan Williams5fc6d892010-10-07 16:44:50 -07001045 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
Dan Williams7405f742007-01-02 11:10:43 -07001046 spin_lock_init(&tx->lock);
Dan Williamscaa20d972010-05-17 16:24:16 -07001047 #endif
Dan Williams7405f742007-01-02 11:10:43 -07001048}
1049EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1050
Dan Williams07f22112009-01-05 17:14:31 -07001051/* dma_wait_for_async_tx - spin wait for a transaction to complete
1052 * @tx: in-flight transaction to wait on
Dan Williams07f22112009-01-05 17:14:31 -07001053 */
1054enum dma_status
1055dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1056{
Dan Williams95475e52009-07-14 12:19:02 -07001057 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
Dan Williams07f22112009-01-05 17:14:31 -07001058
1059 if (!tx)
1060 return DMA_SUCCESS;
1061
Dan Williams95475e52009-07-14 12:19:02 -07001062 while (tx->cookie == -EBUSY) {
1063 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1064 pr_err("%s timeout waiting for descriptor submission\n",
Joe Perches63433252012-07-18 09:51:28 -07001065 __func__);
Dan Williams95475e52009-07-14 12:19:02 -07001066 return DMA_ERROR;
1067 }
1068 cpu_relax();
1069 }
1070 return dma_sync_wait(tx->chan, tx->cookie);
Dan Williams07f22112009-01-05 17:14:31 -07001071}
1072EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1073
1074/* dma_run_dependencies - helper routine for dma drivers to process
1075 * (start) dependent operations on their target channel
1076 * @tx: transaction with dependencies
1077 */
1078void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1079{
Dan Williamscaa20d972010-05-17 16:24:16 -07001080 struct dma_async_tx_descriptor *dep = txd_next(tx);
Dan Williams07f22112009-01-05 17:14:31 -07001081 struct dma_async_tx_descriptor *dep_next;
1082 struct dma_chan *chan;
1083
1084 if (!dep)
1085 return;
1086
Yuri Tikhonovdd59b852009-01-12 15:17:20 -07001087 /* we'll submit tx->next now, so clear the link */
Dan Williamscaa20d972010-05-17 16:24:16 -07001088 txd_clear_next(tx);
Dan Williams07f22112009-01-05 17:14:31 -07001089 chan = dep->chan;
1090
1091 /* keep submitting up until a channel switch is detected
1092 * in that case we will be called again as a result of
1093 * processing the interrupt from async_tx_channel_switch
1094 */
1095 for (; dep; dep = dep_next) {
Dan Williamscaa20d972010-05-17 16:24:16 -07001096 txd_lock(dep);
1097 txd_clear_parent(dep);
1098 dep_next = txd_next(dep);
Dan Williams07f22112009-01-05 17:14:31 -07001099 if (dep_next && dep_next->chan == chan)
Dan Williamscaa20d972010-05-17 16:24:16 -07001100 txd_clear_next(dep); /* ->next will be submitted */
Dan Williams07f22112009-01-05 17:14:31 -07001101 else
1102 dep_next = NULL; /* submit current dep and terminate */
Dan Williamscaa20d972010-05-17 16:24:16 -07001103 txd_unlock(dep);
Dan Williams07f22112009-01-05 17:14:31 -07001104
1105 dep->tx_submit(dep);
1106 }
1107
1108 chan->device->device_issue_pending(chan);
1109}
1110EXPORT_SYMBOL_GPL(dma_run_dependencies);
1111
Chris Leechc13c8262006-05-23 17:18:44 -07001112static int __init dma_bus_init(void)
1113{
Chris Leechc13c8262006-05-23 17:18:44 -07001114 return class_register(&dma_devclass);
1115}
Dan Williams652afc22009-01-06 11:38:22 -07001116arch_initcall(dma_bus_init);
Chris Leechc13c8262006-05-23 17:18:44 -07001117
Dan Williamsbec08512009-01-06 11:38:14 -07001118