blob: 0e035a8cf40146ccf191d3e26418dc04ed7da0b8 [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
Chris Leechc13c8262006-05-23 17:18:44 -070014 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
16 */
17
18/*
19 * This code implements the DMA subsystem. It provides a HW-neutral interface
20 * for other kernel code to use asynchronous memory copy capabilities,
21 * if present, and allows different HW DMA drivers to register as providing
22 * this capability.
23 *
24 * Due to the fact we are accelerating what is already a relatively fast
25 * operation, the code goes to great lengths to avoid additional overhead,
26 * such as locking.
27 *
28 * LOCKING:
29 *
Dan Williamsaa1e6f12009-01-06 11:38:17 -070030 * The subsystem keeps a global list of dma_device structs it is protected by a
31 * mutex, dma_list_mutex.
Chris Leechc13c8262006-05-23 17:18:44 -070032 *
Dan Williamsf27c5802009-01-06 11:38:18 -070033 * A subsystem can get access to a channel by calling dmaengine_get() followed
34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
35 * dma_request_channel(). Once a channel is allocated a reference is taken
36 * against its corresponding driver to disable removal.
37 *
Chris Leechc13c8262006-05-23 17:18:44 -070038 * Each device has a channels list, which runs unlocked but is never modified
39 * once the device is registered, it's just setup by the driver.
40 *
Dan Williamsf27c5802009-01-06 11:38:18 -070041 * See Documentation/dmaengine.txt for more details
Chris Leechc13c8262006-05-23 17:18:44 -070042 */
43
Joe Perches63433252012-07-18 09:51:28 -070044#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000046#include <linux/dma-mapping.h>
Chris Leechc13c8262006-05-23 17:18:44 -070047#include <linux/init.h>
48#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070049#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070050#include <linux/device.h>
51#include <linux/dmaengine.h>
52#include <linux/hardirq.h>
53#include <linux/spinlock.h>
54#include <linux/percpu.h>
55#include <linux/rcupdate.h>
56#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070057#include <linux/jiffies.h>
Dan Williams2ba05622009-01-06 11:38:14 -070058#include <linux/rculist.h>
Dan Williams864498a2009-01-06 11:38:21 -070059#include <linux/idr.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090060#include <linux/slab.h>
Andy Shevchenko4e82f5d2013-04-09 14:05:44 +030061#include <linux/acpi.h>
62#include <linux/acpi_dma.h>
Jon Hunter9a6cecc2012-09-14 17:41:57 -050063#include <linux/of_dma.h>
Dan Williams45c463a2013-10-18 19:35:24 +020064#include <linux/mempool.h>
Chris Leechc13c8262006-05-23 17:18:44 -070065
66static DEFINE_MUTEX(dma_list_mutex);
Axel Lin21ef4b82011-07-20 11:32:28 +080067static DEFINE_IDR(dma_idr);
Chris Leechc13c8262006-05-23 17:18:44 -070068static LIST_HEAD(dma_device_list);
Dan Williams6f49a572009-01-06 11:38:14 -070069static long dmaengine_ref_count;
Chris Leechc13c8262006-05-23 17:18:44 -070070
71/* --- sysfs implementation --- */
72
Dan Williams41d5e592009-01-06 11:38:21 -070073/**
74 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
75 * @dev - device node
76 *
77 * Must be called under dma_list_mutex
78 */
79static struct dma_chan *dev_to_dma_chan(struct device *dev)
80{
81 struct dma_chan_dev *chan_dev;
82
83 chan_dev = container_of(dev, typeof(*chan_dev), device);
84 return chan_dev->chan;
85}
86
Greg Kroah-Hartman58b267d2013-07-24 15:05:08 -070087static ssize_t memcpy_count_show(struct device *dev,
88 struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -070089{
Dan Williams41d5e592009-01-06 11:38:21 -070090 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -070091 unsigned long count = 0;
92 int i;
Dan Williams41d5e592009-01-06 11:38:21 -070093 int err;
Chris Leechc13c8262006-05-23 17:18:44 -070094
Dan Williams41d5e592009-01-06 11:38:21 -070095 mutex_lock(&dma_list_mutex);
96 chan = dev_to_dma_chan(dev);
97 if (chan) {
98 for_each_possible_cpu(i)
99 count += per_cpu_ptr(chan->local, i)->memcpy_count;
100 err = sprintf(buf, "%lu\n", count);
101 } else
102 err = -ENODEV;
103 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700104
Dan Williams41d5e592009-01-06 11:38:21 -0700105 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700106}
Greg Kroah-Hartman58b267d2013-07-24 15:05:08 -0700107static DEVICE_ATTR_RO(memcpy_count);
Chris Leechc13c8262006-05-23 17:18:44 -0700108
Greg Kroah-Hartman58b267d2013-07-24 15:05:08 -0700109static ssize_t bytes_transferred_show(struct device *dev,
110 struct device_attribute *attr, char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700111{
Dan Williams41d5e592009-01-06 11:38:21 -0700112 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700113 unsigned long count = 0;
114 int i;
Dan Williams41d5e592009-01-06 11:38:21 -0700115 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700116
Dan Williams41d5e592009-01-06 11:38:21 -0700117 mutex_lock(&dma_list_mutex);
118 chan = dev_to_dma_chan(dev);
119 if (chan) {
120 for_each_possible_cpu(i)
121 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
122 err = sprintf(buf, "%lu\n", count);
123 } else
124 err = -ENODEV;
125 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700126
Dan Williams41d5e592009-01-06 11:38:21 -0700127 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700128}
Greg Kroah-Hartman58b267d2013-07-24 15:05:08 -0700129static DEVICE_ATTR_RO(bytes_transferred);
Chris Leechc13c8262006-05-23 17:18:44 -0700130
Greg Kroah-Hartman58b267d2013-07-24 15:05:08 -0700131static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
132 char *buf)
Chris Leechc13c8262006-05-23 17:18:44 -0700133{
Dan Williams41d5e592009-01-06 11:38:21 -0700134 struct dma_chan *chan;
135 int err;
Chris Leechc13c8262006-05-23 17:18:44 -0700136
Dan Williams41d5e592009-01-06 11:38:21 -0700137 mutex_lock(&dma_list_mutex);
138 chan = dev_to_dma_chan(dev);
139 if (chan)
140 err = sprintf(buf, "%d\n", chan->client_count);
141 else
142 err = -ENODEV;
143 mutex_unlock(&dma_list_mutex);
144
145 return err;
Chris Leechc13c8262006-05-23 17:18:44 -0700146}
Greg Kroah-Hartman58b267d2013-07-24 15:05:08 -0700147static DEVICE_ATTR_RO(in_use);
Chris Leechc13c8262006-05-23 17:18:44 -0700148
Greg Kroah-Hartman58b267d2013-07-24 15:05:08 -0700149static struct attribute *dma_dev_attrs[] = {
150 &dev_attr_memcpy_count.attr,
151 &dev_attr_bytes_transferred.attr,
152 &dev_attr_in_use.attr,
153 NULL,
Chris Leechc13c8262006-05-23 17:18:44 -0700154};
Greg Kroah-Hartman58b267d2013-07-24 15:05:08 -0700155ATTRIBUTE_GROUPS(dma_dev);
Chris Leechc13c8262006-05-23 17:18:44 -0700156
Dan Williams41d5e592009-01-06 11:38:21 -0700157static void chan_dev_release(struct device *dev)
158{
159 struct dma_chan_dev *chan_dev;
160
161 chan_dev = container_of(dev, typeof(*chan_dev), device);
Dan Williams864498a2009-01-06 11:38:21 -0700162 if (atomic_dec_and_test(chan_dev->idr_ref)) {
163 mutex_lock(&dma_list_mutex);
164 idr_remove(&dma_idr, chan_dev->dev_id);
165 mutex_unlock(&dma_list_mutex);
166 kfree(chan_dev->idr_ref);
167 }
Dan Williams41d5e592009-01-06 11:38:21 -0700168 kfree(chan_dev);
169}
170
Chris Leechc13c8262006-05-23 17:18:44 -0700171static struct class dma_devclass = {
Tony Jones891f78e2007-09-25 02:03:03 +0200172 .name = "dma",
Greg Kroah-Hartman58b267d2013-07-24 15:05:08 -0700173 .dev_groups = dma_dev_groups,
Dan Williams41d5e592009-01-06 11:38:21 -0700174 .dev_release = chan_dev_release,
Chris Leechc13c8262006-05-23 17:18:44 -0700175};
176
177/* --- client and device registration --- */
178
Dan Williams59b5ec22009-01-06 11:38:15 -0700179#define dma_device_satisfies_mask(device, mask) \
180 __dma_device_satisfies_mask((device), &(mask))
Dan Williamsd379b012007-07-09 11:56:42 -0700181static int
Lars-Peter Clausena53e28d2013-03-25 13:23:52 +0100182__dma_device_satisfies_mask(struct dma_device *device,
183 const dma_cap_mask_t *want)
Dan Williamsd379b012007-07-09 11:56:42 -0700184{
185 dma_cap_mask_t has;
186
Dan Williams59b5ec22009-01-06 11:38:15 -0700187 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
Dan Williamsd379b012007-07-09 11:56:42 -0700188 DMA_TX_TYPE_END);
189 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
190}
191
Dan Williams6f49a572009-01-06 11:38:14 -0700192static struct module *dma_chan_to_owner(struct dma_chan *chan)
193{
194 return chan->device->dev->driver->owner;
195}
196
197/**
198 * balance_ref_count - catch up the channel reference count
199 * @chan - channel to balance ->client_count versus dmaengine_ref_count
200 *
201 * balance_ref_count must be called under dma_list_mutex
202 */
203static void balance_ref_count(struct dma_chan *chan)
204{
205 struct module *owner = dma_chan_to_owner(chan);
206
207 while (chan->client_count < dmaengine_ref_count) {
208 __module_get(owner);
209 chan->client_count++;
210 }
211}
212
213/**
214 * dma_chan_get - try to grab a dma channel's parent driver module
215 * @chan - channel to grab
216 *
217 * Must be called under dma_list_mutex
218 */
219static int dma_chan_get(struct dma_chan *chan)
220{
Dan Williams6f49a572009-01-06 11:38:14 -0700221 struct module *owner = dma_chan_to_owner(chan);
Maxime Ripardd2f4f992014-11-17 14:41:58 +0100222 int ret;
Dan Williams6f49a572009-01-06 11:38:14 -0700223
Maxime Ripardd2f4f992014-11-17 14:41:58 +0100224 /* The channel is already in use, update client count */
Dan Williams6f49a572009-01-06 11:38:14 -0700225 if (chan->client_count) {
226 __module_get(owner);
Maxime Ripardd2f4f992014-11-17 14:41:58 +0100227 goto out;
Dan Williams6f49a572009-01-06 11:38:14 -0700228 }
229
Maxime Ripardd2f4f992014-11-17 14:41:58 +0100230 if (!try_module_get(owner))
231 return -ENODEV;
232
233 /* allocate upon first client reference */
Maxime Ripardc4b54a62014-11-17 14:41:59 +0100234 if (chan->device->device_alloc_chan_resources) {
235 ret = chan->device->device_alloc_chan_resources(chan);
236 if (ret < 0)
237 goto err_out;
238 }
Maxime Ripardd2f4f992014-11-17 14:41:58 +0100239
240 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
241 balance_ref_count(chan);
242
243out:
244 chan->client_count++;
245 return 0;
246
247err_out:
248 module_put(owner);
249 return ret;
Dan Williams6f49a572009-01-06 11:38:14 -0700250}
251
252/**
253 * dma_chan_put - drop a reference to a dma channel's parent driver module
254 * @chan - channel to release
255 *
256 * Must be called under dma_list_mutex
257 */
258static void dma_chan_put(struct dma_chan *chan)
259{
Maxime Ripardc4b54a62014-11-17 14:41:59 +0100260 /* This channel is not in use, bail out */
Dan Williams6f49a572009-01-06 11:38:14 -0700261 if (!chan->client_count)
Maxime Ripardc4b54a62014-11-17 14:41:59 +0100262 return;
263
Dan Williams6f49a572009-01-06 11:38:14 -0700264 chan->client_count--;
265 module_put(dma_chan_to_owner(chan));
Maxime Ripardc4b54a62014-11-17 14:41:59 +0100266
267 /* This channel is not in use anymore, free it */
268 if (!chan->client_count && chan->device->device_free_chan_resources)
Dan Williams6f49a572009-01-06 11:38:14 -0700269 chan->device->device_free_chan_resources(chan);
270}
271
Dan Williams7405f742007-01-02 11:10:43 -0700272enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
273{
274 enum dma_status status;
275 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
276
277 dma_async_issue_pending(chan);
278 do {
279 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
280 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
Joe Perches63433252012-07-18 09:51:28 -0700281 pr_err("%s: timeout!\n", __func__);
Dan Williams7405f742007-01-02 11:10:43 -0700282 return DMA_ERROR;
283 }
Bartlomiej Zolnierkiewicz2cbe7fe2012-11-08 10:02:07 +0000284 if (status != DMA_IN_PROGRESS)
285 break;
286 cpu_relax();
287 } while (1);
Dan Williams7405f742007-01-02 11:10:43 -0700288
289 return status;
290}
291EXPORT_SYMBOL(dma_sync_wait);
292
Chris Leechc13c8262006-05-23 17:18:44 -0700293/**
Dan Williamsbec08512009-01-06 11:38:14 -0700294 * dma_cap_mask_all - enable iteration over all operation types
295 */
296static dma_cap_mask_t dma_cap_mask_all;
297
298/**
299 * dma_chan_tbl_ent - tracks channel allocations per core/operation
300 * @chan - associated channel for this entry
301 */
302struct dma_chan_tbl_ent {
303 struct dma_chan *chan;
304};
305
306/**
307 * channel_table - percpu lookup table for memory-to-memory offload providers
308 */
Tejun Heoa29d8b82010-02-02 14:39:15 +0900309static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
Dan Williamsbec08512009-01-06 11:38:14 -0700310
311static int __init dma_channel_table_init(void)
312{
313 enum dma_transaction_type cap;
314 int err = 0;
315
316 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
317
Dan Williams59b5ec22009-01-06 11:38:15 -0700318 /* 'interrupt', 'private', and 'slave' are channel capabilities,
319 * but are not associated with an operation so they do not need
320 * an entry in the channel_table
Dan Williamsbec08512009-01-06 11:38:14 -0700321 */
322 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
Dan Williams59b5ec22009-01-06 11:38:15 -0700323 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
Dan Williamsbec08512009-01-06 11:38:14 -0700324 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
325
326 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
327 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
328 if (!channel_table[cap]) {
329 err = -ENOMEM;
330 break;
331 }
332 }
333
334 if (err) {
Joe Perches63433252012-07-18 09:51:28 -0700335 pr_err("initialization failure\n");
Dan Williamsbec08512009-01-06 11:38:14 -0700336 for_each_dma_cap_mask(cap, dma_cap_mask_all)
Markus Elfringa9507ca2014-12-01 06:06:57 +0100337 free_percpu(channel_table[cap]);
Dan Williamsbec08512009-01-06 11:38:14 -0700338 }
339
340 return err;
341}
Dan Williams652afc22009-01-06 11:38:22 -0700342arch_initcall(dma_channel_table_init);
Dan Williamsbec08512009-01-06 11:38:14 -0700343
344/**
345 * dma_find_channel - find a channel to carry out the operation
346 * @tx_type: transaction type
347 */
348struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
349{
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900350 return this_cpu_read(channel_table[tx_type]->chan);
Dan Williamsbec08512009-01-06 11:38:14 -0700351}
352EXPORT_SYMBOL(dma_find_channel);
353
354/**
Dan Williams2ba05622009-01-06 11:38:14 -0700355 * dma_issue_pending_all - flush all pending operations across all channels
356 */
357void dma_issue_pending_all(void)
358{
359 struct dma_device *device;
360 struct dma_chan *chan;
361
Dan Williams2ba05622009-01-06 11:38:14 -0700362 rcu_read_lock();
Dan Williams59b5ec22009-01-06 11:38:15 -0700363 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
364 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
365 continue;
Dan Williams2ba05622009-01-06 11:38:14 -0700366 list_for_each_entry(chan, &device->channels, device_node)
367 if (chan->client_count)
368 device->device_issue_pending(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700369 }
Dan Williams2ba05622009-01-06 11:38:14 -0700370 rcu_read_unlock();
371}
372EXPORT_SYMBOL(dma_issue_pending_all);
373
374/**
Brice Goglinc4d27c42013-08-19 11:43:35 +0200375 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
Dan Williamsbec08512009-01-06 11:38:14 -0700376 */
Brice Goglinc4d27c42013-08-19 11:43:35 +0200377static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
378{
379 int node = dev_to_node(chan->device->dev);
380 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
381}
382
383/**
384 * min_chan - returns the channel with min count and in the same numa-node as the cpu
385 * @cap: capability to match
386 * @cpu: cpu index which the channel should be close to
387 *
388 * If some channels are close to the given cpu, the one with the lowest
389 * reference count is returned. Otherwise, cpu is ignored and only the
390 * reference count is taken into account.
391 * Must be called under dma_list_mutex.
392 */
393static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
Dan Williamsbec08512009-01-06 11:38:14 -0700394{
395 struct dma_device *device;
396 struct dma_chan *chan;
Dan Williamsbec08512009-01-06 11:38:14 -0700397 struct dma_chan *min = NULL;
Brice Goglinc4d27c42013-08-19 11:43:35 +0200398 struct dma_chan *localmin = NULL;
Dan Williamsbec08512009-01-06 11:38:14 -0700399
400 list_for_each_entry(device, &dma_device_list, global_node) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700401 if (!dma_has_cap(cap, device->cap_mask) ||
402 dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williamsbec08512009-01-06 11:38:14 -0700403 continue;
404 list_for_each_entry(chan, &device->channels, device_node) {
405 if (!chan->client_count)
406 continue;
Brice Goglinc4d27c42013-08-19 11:43:35 +0200407 if (!min || chan->table_count < min->table_count)
Dan Williamsbec08512009-01-06 11:38:14 -0700408 min = chan;
409
Brice Goglinc4d27c42013-08-19 11:43:35 +0200410 if (dma_chan_is_local(chan, cpu))
411 if (!localmin ||
412 chan->table_count < localmin->table_count)
413 localmin = chan;
Dan Williamsbec08512009-01-06 11:38:14 -0700414 }
Dan Williamsbec08512009-01-06 11:38:14 -0700415 }
416
Brice Goglinc4d27c42013-08-19 11:43:35 +0200417 chan = localmin ? localmin : min;
Dan Williamsbec08512009-01-06 11:38:14 -0700418
Brice Goglinc4d27c42013-08-19 11:43:35 +0200419 if (chan)
420 chan->table_count++;
Dan Williamsbec08512009-01-06 11:38:14 -0700421
Brice Goglinc4d27c42013-08-19 11:43:35 +0200422 return chan;
Dan Williamsbec08512009-01-06 11:38:14 -0700423}
424
425/**
426 * dma_channel_rebalance - redistribute the available channels
427 *
428 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
429 * operation type) in the SMP case, and operation isolation (avoid
430 * multi-tasking channels) in the non-SMP case. Must be called under
431 * dma_list_mutex.
432 */
433static void dma_channel_rebalance(void)
434{
435 struct dma_chan *chan;
436 struct dma_device *device;
437 int cpu;
438 int cap;
Dan Williamsbec08512009-01-06 11:38:14 -0700439
440 /* undo the last distribution */
441 for_each_dma_cap_mask(cap, dma_cap_mask_all)
442 for_each_possible_cpu(cpu)
443 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
444
Dan Williams59b5ec22009-01-06 11:38:15 -0700445 list_for_each_entry(device, &dma_device_list, global_node) {
446 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
447 continue;
Dan Williamsbec08512009-01-06 11:38:14 -0700448 list_for_each_entry(chan, &device->channels, device_node)
449 chan->table_count = 0;
Dan Williams59b5ec22009-01-06 11:38:15 -0700450 }
Dan Williamsbec08512009-01-06 11:38:14 -0700451
452 /* don't populate the channel_table if no clients are available */
453 if (!dmaengine_ref_count)
454 return;
455
456 /* redistribute available channels */
Dan Williamsbec08512009-01-06 11:38:14 -0700457 for_each_dma_cap_mask(cap, dma_cap_mask_all)
458 for_each_online_cpu(cpu) {
Brice Goglinc4d27c42013-08-19 11:43:35 +0200459 chan = min_chan(cap, cpu);
Dan Williamsbec08512009-01-06 11:38:14 -0700460 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
461 }
462}
463
Laurent Pinchart0d5484b2014-10-29 00:30:58 +0200464int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
465{
466 struct dma_device *device;
467
468 if (!chan || !caps)
469 return -EINVAL;
470
471 device = chan->device;
472
473 /* check if the channel supports slave transactions */
474 if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
475 return -ENXIO;
476
477 /*
478 * Check whether it reports it uses the generic slave
479 * capabilities, if not, that means it doesn't support any
480 * kind of slave capabilities reporting.
481 */
482 if (!device->directions)
483 return -ENXIO;
484
485 caps->src_addr_widths = device->src_addr_widths;
486 caps->dst_addr_widths = device->dst_addr_widths;
487 caps->directions = device->directions;
488 caps->residue_granularity = device->residue_granularity;
489
490 caps->cmd_pause = !!device->device_pause;
491 caps->cmd_terminate = !!device->device_terminate_all;
492
493 return 0;
494}
495EXPORT_SYMBOL_GPL(dma_get_slave_caps);
496
Lars-Peter Clausena53e28d2013-03-25 13:23:52 +0100497static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
498 struct dma_device *dev,
Dan Williamse2346672009-01-06 11:38:21 -0700499 dma_filter_fn fn, void *fn_param)
Dan Williams59b5ec22009-01-06 11:38:15 -0700500{
501 struct dma_chan *chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700502
503 if (!__dma_device_satisfies_mask(dev, mask)) {
504 pr_debug("%s: wrong capabilities\n", __func__);
505 return NULL;
506 }
507 /* devices with multiple channels need special handling as we need to
508 * ensure that all channels are either private or public.
509 */
510 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
511 list_for_each_entry(chan, &dev->channels, device_node) {
512 /* some channels are already publicly allocated */
513 if (chan->client_count)
514 return NULL;
515 }
516
517 list_for_each_entry(chan, &dev->channels, device_node) {
518 if (chan->client_count) {
519 pr_debug("%s: %s busy\n",
Dan Williams41d5e592009-01-06 11:38:21 -0700520 __func__, dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700521 continue;
522 }
Dan Williamse2346672009-01-06 11:38:21 -0700523 if (fn && !fn(chan, fn_param)) {
524 pr_debug("%s: %s filter said false\n",
525 __func__, dma_chan_name(chan));
526 continue;
527 }
528 return chan;
Dan Williams59b5ec22009-01-06 11:38:15 -0700529 }
530
Dan Williamse2346672009-01-06 11:38:21 -0700531 return NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700532}
533
534/**
Daniel Mack6b9019a2013-08-14 18:35:03 +0200535 * dma_request_slave_channel - try to get specific channel exclusively
Zhangfei Gao7bb587f2013-06-28 20:39:12 +0800536 * @chan: target channel
537 */
538struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
539{
540 int err = -EBUSY;
541
542 /* lock against __dma_request_channel */
543 mutex_lock(&dma_list_mutex);
544
Vinod Kould9a6c8f2013-08-19 10:47:26 +0530545 if (chan->client_count == 0) {
Zhangfei Gao7bb587f2013-06-28 20:39:12 +0800546 err = dma_chan_get(chan);
Vinod Kould9a6c8f2013-08-19 10:47:26 +0530547 if (err)
548 pr_debug("%s: failed to get %s: (%d)\n",
549 __func__, dma_chan_name(chan), err);
550 } else
Zhangfei Gao7bb587f2013-06-28 20:39:12 +0800551 chan = NULL;
552
553 mutex_unlock(&dma_list_mutex);
554
Zhangfei Gao7bb587f2013-06-28 20:39:12 +0800555
556 return chan;
557}
558EXPORT_SYMBOL_GPL(dma_get_slave_channel);
559
Stephen Warren8010dad2013-11-26 12:40:51 -0700560struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
561{
562 dma_cap_mask_t mask;
563 struct dma_chan *chan;
564 int err;
565
566 dma_cap_zero(mask);
567 dma_cap_set(DMA_SLAVE, mask);
568
569 /* lock against __dma_request_channel */
570 mutex_lock(&dma_list_mutex);
571
572 chan = private_candidate(&mask, device, NULL, NULL);
573 if (chan) {
574 err = dma_chan_get(chan);
575 if (err) {
576 pr_debug("%s: failed to get %s: (%d)\n",
577 __func__, dma_chan_name(chan), err);
578 chan = NULL;
579 }
580 }
581
582 mutex_unlock(&dma_list_mutex);
583
584 return chan;
585}
586EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
587
Zhangfei Gao7bb587f2013-06-28 20:39:12 +0800588/**
Daniel Mack6b9019a2013-08-14 18:35:03 +0200589 * __dma_request_channel - try to allocate an exclusive channel
Dan Williams59b5ec22009-01-06 11:38:15 -0700590 * @mask: capabilities that the channel must satisfy
591 * @fn: optional callback to disposition available channels
592 * @fn_param: opaque parameter to pass to dma_filter_fn
Stephen Warren0ad7c002013-11-26 10:04:22 -0700593 *
594 * Returns pointer to appropriate DMA channel on success or NULL.
Dan Williams59b5ec22009-01-06 11:38:15 -0700595 */
Lars-Peter Clausena53e28d2013-03-25 13:23:52 +0100596struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
597 dma_filter_fn fn, void *fn_param)
Dan Williams59b5ec22009-01-06 11:38:15 -0700598{
599 struct dma_device *device, *_d;
600 struct dma_chan *chan = NULL;
Dan Williams59b5ec22009-01-06 11:38:15 -0700601 int err;
602
603 /* Find a channel */
604 mutex_lock(&dma_list_mutex);
605 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
Dan Williamse2346672009-01-06 11:38:21 -0700606 chan = private_candidate(mask, device, fn, fn_param);
607 if (chan) {
Dan Williams59b5ec22009-01-06 11:38:15 -0700608 /* Found a suitable channel, try to grab, prep, and
609 * return it. We first set DMA_PRIVATE to disable
610 * balance_ref_count as this channel will not be
611 * published in the general-purpose allocator
612 */
613 dma_cap_set(DMA_PRIVATE, device->cap_mask);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900614 device->privatecnt++;
Dan Williams59b5ec22009-01-06 11:38:15 -0700615 err = dma_chan_get(chan);
616
617 if (err == -ENODEV) {
Joe Perches63433252012-07-18 09:51:28 -0700618 pr_debug("%s: %s module removed\n",
619 __func__, dma_chan_name(chan));
Dan Williams59b5ec22009-01-06 11:38:15 -0700620 list_del_rcu(&device->global_node);
621 } else if (err)
Fabio Estevamd8b53482012-02-21 12:51:59 -0200622 pr_debug("%s: failed to get %s: (%d)\n",
Joe Perches63433252012-07-18 09:51:28 -0700623 __func__, dma_chan_name(chan), err);
Dan Williams59b5ec22009-01-06 11:38:15 -0700624 else
625 break;
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900626 if (--device->privatecnt == 0)
627 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
Dan Williamse2346672009-01-06 11:38:21 -0700628 chan = NULL;
629 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700630 }
631 mutex_unlock(&dma_list_mutex);
632
Joe Perches63433252012-07-18 09:51:28 -0700633 pr_debug("%s: %s (%s)\n",
634 __func__,
635 chan ? "success" : "fail",
Dan Williams41d5e592009-01-06 11:38:21 -0700636 chan ? dma_chan_name(chan) : NULL);
Dan Williams59b5ec22009-01-06 11:38:15 -0700637
638 return chan;
639}
640EXPORT_SYMBOL_GPL(__dma_request_channel);
641
Jon Hunter9a6cecc2012-09-14 17:41:57 -0500642/**
643 * dma_request_slave_channel - try to allocate an exclusive slave channel
644 * @dev: pointer to client device structure
645 * @name: slave channel name
Stephen Warren0ad7c002013-11-26 10:04:22 -0700646 *
647 * Returns pointer to appropriate DMA channel on success or an error pointer.
Jon Hunter9a6cecc2012-09-14 17:41:57 -0500648 */
Stephen Warren0ad7c002013-11-26 10:04:22 -0700649struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
650 const char *name)
Jon Hunter9a6cecc2012-09-14 17:41:57 -0500651{
652 /* If device-tree is present get slave info from here */
653 if (dev->of_node)
654 return of_dma_request_slave_channel(dev->of_node, name);
655
Andy Shevchenko4e82f5d2013-04-09 14:05:44 +0300656 /* If device was enumerated by ACPI get slave info from here */
Andy Shevchenko0f6a9282014-02-06 13:25:40 +0200657 if (ACPI_HANDLE(dev))
658 return acpi_dma_request_slave_chan_by_name(dev, name);
Andy Shevchenko4e82f5d2013-04-09 14:05:44 +0300659
Stephen Warren0ad7c002013-11-26 10:04:22 -0700660 return ERR_PTR(-ENODEV);
661}
662EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
663
664/**
665 * dma_request_slave_channel - try to allocate an exclusive slave channel
666 * @dev: pointer to client device structure
667 * @name: slave channel name
668 *
669 * Returns pointer to appropriate DMA channel on success or NULL.
670 */
671struct dma_chan *dma_request_slave_channel(struct device *dev,
672 const char *name)
673{
674 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
675 if (IS_ERR(ch))
676 return NULL;
677 return ch;
Jon Hunter9a6cecc2012-09-14 17:41:57 -0500678}
679EXPORT_SYMBOL_GPL(dma_request_slave_channel);
680
Dan Williams59b5ec22009-01-06 11:38:15 -0700681void dma_release_channel(struct dma_chan *chan)
682{
683 mutex_lock(&dma_list_mutex);
684 WARN_ONCE(chan->client_count != 1,
685 "chan reference count %d != 1\n", chan->client_count);
686 dma_chan_put(chan);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900687 /* drop PRIVATE cap enabled by __dma_request_channel() */
688 if (--chan->device->privatecnt == 0)
689 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
Dan Williams59b5ec22009-01-06 11:38:15 -0700690 mutex_unlock(&dma_list_mutex);
691}
692EXPORT_SYMBOL_GPL(dma_release_channel);
693
Dan Williamsbec08512009-01-06 11:38:14 -0700694/**
Dan Williams209b84a2009-01-06 11:38:17 -0700695 * dmaengine_get - register interest in dma_channels
Chris Leechc13c8262006-05-23 17:18:44 -0700696 */
Dan Williams209b84a2009-01-06 11:38:17 -0700697void dmaengine_get(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700698{
Dan Williams6f49a572009-01-06 11:38:14 -0700699 struct dma_device *device, *_d;
700 struct dma_chan *chan;
701 int err;
702
Chris Leechc13c8262006-05-23 17:18:44 -0700703 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700704 dmaengine_ref_count++;
705
706 /* try to grab channels */
Dan Williams59b5ec22009-01-06 11:38:15 -0700707 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
708 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
709 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700710 list_for_each_entry(chan, &device->channels, device_node) {
711 err = dma_chan_get(chan);
712 if (err == -ENODEV) {
713 /* module removed before we could use it */
Dan Williams2ba05622009-01-06 11:38:14 -0700714 list_del_rcu(&device->global_node);
Dan Williams6f49a572009-01-06 11:38:14 -0700715 break;
716 } else if (err)
Fabio Estevam0eb5a352012-10-04 17:11:16 -0700717 pr_debug("%s: failed to get %s: (%d)\n",
Joe Perches63433252012-07-18 09:51:28 -0700718 __func__, dma_chan_name(chan), err);
Dan Williams6f49a572009-01-06 11:38:14 -0700719 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700720 }
Dan Williams6f49a572009-01-06 11:38:14 -0700721
Dan Williamsbec08512009-01-06 11:38:14 -0700722 /* if this is the first reference and there were channels
723 * waiting we need to rebalance to get those channels
724 * incorporated into the channel table
725 */
726 if (dmaengine_ref_count == 1)
727 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700728 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700729}
Dan Williams209b84a2009-01-06 11:38:17 -0700730EXPORT_SYMBOL(dmaengine_get);
Chris Leechc13c8262006-05-23 17:18:44 -0700731
732/**
Dan Williams209b84a2009-01-06 11:38:17 -0700733 * dmaengine_put - let dma drivers be removed when ref_count == 0
Chris Leechc13c8262006-05-23 17:18:44 -0700734 */
Dan Williams209b84a2009-01-06 11:38:17 -0700735void dmaengine_put(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700736{
Dan Williamsd379b012007-07-09 11:56:42 -0700737 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700738 struct dma_chan *chan;
739
Chris Leechc13c8262006-05-23 17:18:44 -0700740 mutex_lock(&dma_list_mutex);
Dan Williams6f49a572009-01-06 11:38:14 -0700741 dmaengine_ref_count--;
742 BUG_ON(dmaengine_ref_count < 0);
743 /* drop channel references */
Dan Williams59b5ec22009-01-06 11:38:15 -0700744 list_for_each_entry(device, &dma_device_list, global_node) {
745 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
746 continue;
Dan Williams6f49a572009-01-06 11:38:14 -0700747 list_for_each_entry(chan, &device->channels, device_node)
748 dma_chan_put(chan);
Dan Williams59b5ec22009-01-06 11:38:15 -0700749 }
Chris Leechc13c8262006-05-23 17:18:44 -0700750 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700751}
Dan Williams209b84a2009-01-06 11:38:17 -0700752EXPORT_SYMBOL(dmaengine_put);
Chris Leechc13c8262006-05-23 17:18:44 -0700753
Dan Williams138f4c32009-09-08 17:42:51 -0700754static bool device_has_all_tx_types(struct dma_device *device)
755{
756 /* A device that satisfies this test has channels that will never cause
757 * an async_tx channel switch event as all possible operation types can
758 * be handled.
759 */
760 #ifdef CONFIG_ASYNC_TX_DMA
761 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
762 return false;
763 #endif
764
765 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
766 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
767 return false;
768 #endif
769
Dan Williams138f4c32009-09-08 17:42:51 -0700770 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
771 if (!dma_has_cap(DMA_XOR, device->cap_mask))
772 return false;
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700773
774 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
Dan Williams4499a242009-11-19 17:10:25 -0700775 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
776 return false;
Dan Williams138f4c32009-09-08 17:42:51 -0700777 #endif
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700778 #endif
Dan Williams138f4c32009-09-08 17:42:51 -0700779
780 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
781 if (!dma_has_cap(DMA_PQ, device->cap_mask))
782 return false;
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700783
784 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
Dan Williams4499a242009-11-19 17:10:25 -0700785 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
786 return false;
Dan Williams138f4c32009-09-08 17:42:51 -0700787 #endif
Dan Williams7b3cc2b2009-11-19 17:10:37 -0700788 #endif
Dan Williams138f4c32009-09-08 17:42:51 -0700789
790 return true;
791}
792
Dan Williams257b17c2009-03-25 09:13:23 -0700793static int get_dma_id(struct dma_device *device)
794{
795 int rc;
796
Dan Williams257b17c2009-03-25 09:13:23 -0700797 mutex_lock(&dma_list_mutex);
Dan Williams257b17c2009-03-25 09:13:23 -0700798
Tejun Heo69ee2662013-02-27 17:04:03 -0800799 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
800 if (rc >= 0)
801 device->dev_id = rc;
802
803 mutex_unlock(&dma_list_mutex);
804 return rc < 0 ? rc : 0;
Dan Williams257b17c2009-03-25 09:13:23 -0700805}
806
Chris Leechc13c8262006-05-23 17:18:44 -0700807/**
Randy Dunlap65088712006-07-03 19:45:31 -0700808 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700809 * @device: &dma_device
810 */
811int dma_async_device_register(struct dma_device *device)
812{
Jeff Garzikff487fb2007-03-08 09:57:34 -0800813 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700814 struct dma_chan* chan;
Dan Williams864498a2009-01-06 11:38:21 -0700815 atomic_t *idr_ref;
Chris Leechc13c8262006-05-23 17:18:44 -0700816
817 if (!device)
818 return -ENODEV;
819
Dan Williams7405f742007-01-02 11:10:43 -0700820 /* validate device routines */
821 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
822 !device->device_prep_dma_memcpy);
823 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
824 !device->device_prep_dma_xor);
Dan Williams099f53c2009-04-08 14:28:37 -0700825 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
826 !device->device_prep_dma_xor_val);
Dan Williamsb2f46fd2009-07-14 12:20:36 -0700827 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
828 !device->device_prep_dma_pq);
829 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
830 !device->device_prep_dma_pq_val);
Zhang Wei9b941c62008-03-13 17:45:28 -0700831 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
Dan Williams7405f742007-01-02 11:10:43 -0700832 !device->device_prep_dma_interrupt);
Ira Snydera86ee032010-09-30 11:46:44 +0000833 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
834 !device->device_prep_dma_sg);
Sascha Hauer782bc952010-09-30 13:56:32 +0000835 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
836 !device->device_prep_dma_cyclic);
Jassi Brarb14dab72011-10-13 12:33:30 +0530837 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
838 !device->device_prep_interleaved_dma);
Dan Williams7405f742007-01-02 11:10:43 -0700839
Linus Walleij07934482010-03-26 16:50:49 -0700840 BUG_ON(!device->device_tx_status);
Dan Williams7405f742007-01-02 11:10:43 -0700841 BUG_ON(!device->device_issue_pending);
842 BUG_ON(!device->dev);
843
Dan Williams138f4c32009-09-08 17:42:51 -0700844 /* note: this only matters in the
Dan Williams5fc6d892010-10-07 16:44:50 -0700845 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
Dan Williams138f4c32009-09-08 17:42:51 -0700846 */
847 if (device_has_all_tx_types(device))
848 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
849
Dan Williams864498a2009-01-06 11:38:21 -0700850 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
851 if (!idr_ref)
852 return -ENOMEM;
Dan Williams257b17c2009-03-25 09:13:23 -0700853 rc = get_dma_id(device);
854 if (rc != 0) {
855 kfree(idr_ref);
Dan Williams864498a2009-01-06 11:38:21 -0700856 return rc;
Dan Williams257b17c2009-03-25 09:13:23 -0700857 }
858
859 atomic_set(idr_ref, 0);
Chris Leechc13c8262006-05-23 17:18:44 -0700860
861 /* represent channels in sysfs. Probably want devs too */
862 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams257b17c2009-03-25 09:13:23 -0700863 rc = -ENOMEM;
Chris Leechc13c8262006-05-23 17:18:44 -0700864 chan->local = alloc_percpu(typeof(*chan->local));
865 if (chan->local == NULL)
Dan Williams257b17c2009-03-25 09:13:23 -0700866 goto err_out;
Dan Williams41d5e592009-01-06 11:38:21 -0700867 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
868 if (chan->dev == NULL) {
869 free_percpu(chan->local);
Dan Williams257b17c2009-03-25 09:13:23 -0700870 chan->local = NULL;
871 goto err_out;
Dan Williams41d5e592009-01-06 11:38:21 -0700872 }
Chris Leechc13c8262006-05-23 17:18:44 -0700873
874 chan->chan_id = chancnt++;
Dan Williams41d5e592009-01-06 11:38:21 -0700875 chan->dev->device.class = &dma_devclass;
876 chan->dev->device.parent = device->dev;
877 chan->dev->chan = chan;
Dan Williams864498a2009-01-06 11:38:21 -0700878 chan->dev->idr_ref = idr_ref;
879 chan->dev->dev_id = device->dev_id;
880 atomic_inc(idr_ref);
Dan Williams41d5e592009-01-06 11:38:21 -0700881 dev_set_name(&chan->dev->device, "dma%dchan%d",
Kay Sievers06190d82008-11-11 13:12:33 -0700882 device->dev_id, chan->chan_id);
Chris Leechc13c8262006-05-23 17:18:44 -0700883
Dan Williams41d5e592009-01-06 11:38:21 -0700884 rc = device_register(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800885 if (rc) {
Jeff Garzikff487fb2007-03-08 09:57:34 -0800886 free_percpu(chan->local);
887 chan->local = NULL;
Dan Williams257b17c2009-03-25 09:13:23 -0700888 kfree(chan->dev);
889 atomic_dec(idr_ref);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800890 goto err_out;
891 }
Dan Williams7cc5bf92008-07-08 11:58:21 -0700892 chan->client_count = 0;
Chris Leechc13c8262006-05-23 17:18:44 -0700893 }
Dan Williams59b5ec22009-01-06 11:38:15 -0700894 device->chancnt = chancnt;
Chris Leechc13c8262006-05-23 17:18:44 -0700895
896 mutex_lock(&dma_list_mutex);
Dan Williams59b5ec22009-01-06 11:38:15 -0700897 /* take references on public channels */
898 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
Dan Williams6f49a572009-01-06 11:38:14 -0700899 list_for_each_entry(chan, &device->channels, device_node) {
900 /* if clients are already waiting for channels we need
901 * to take references on their behalf
902 */
903 if (dma_chan_get(chan) == -ENODEV) {
904 /* note we can only get here for the first
905 * channel as the remaining channels are
906 * guaranteed to get a reference
907 */
908 rc = -ENODEV;
909 mutex_unlock(&dma_list_mutex);
910 goto err_out;
911 }
912 }
Dan Williams2ba05622009-01-06 11:38:14 -0700913 list_add_tail_rcu(&device->global_node, &dma_device_list);
Atsushi Nemoto0f571512009-03-06 20:07:14 +0900914 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
915 device->privatecnt++; /* Always private */
Dan Williamsbec08512009-01-06 11:38:14 -0700916 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700917 mutex_unlock(&dma_list_mutex);
918
Chris Leechc13c8262006-05-23 17:18:44 -0700919 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800920
921err_out:
Dan Williams257b17c2009-03-25 09:13:23 -0700922 /* if we never registered a channel just release the idr */
923 if (atomic_read(idr_ref) == 0) {
924 mutex_lock(&dma_list_mutex);
925 idr_remove(&dma_idr, device->dev_id);
926 mutex_unlock(&dma_list_mutex);
927 kfree(idr_ref);
928 return rc;
929 }
930
Jeff Garzikff487fb2007-03-08 09:57:34 -0800931 list_for_each_entry(chan, &device->channels, device_node) {
932 if (chan->local == NULL)
933 continue;
Dan Williams41d5e592009-01-06 11:38:21 -0700934 mutex_lock(&dma_list_mutex);
935 chan->dev->chan = NULL;
936 mutex_unlock(&dma_list_mutex);
937 device_unregister(&chan->dev->device);
Jeff Garzikff487fb2007-03-08 09:57:34 -0800938 free_percpu(chan->local);
939 }
940 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700941}
David Brownell765e3d82007-03-16 13:38:05 -0800942EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700943
944/**
Dan Williams6f49a572009-01-06 11:38:14 -0700945 * dma_async_device_unregister - unregister a DMA device
Randy Dunlap65088712006-07-03 19:45:31 -0700946 * @device: &dma_device
Dan Williamsf27c5802009-01-06 11:38:18 -0700947 *
948 * This routine is called by dma driver exit routines, dmaengine holds module
949 * references to prevent it being called while channels are in use.
Randy Dunlap65088712006-07-03 19:45:31 -0700950 */
951void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700952{
953 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700954
955 mutex_lock(&dma_list_mutex);
Dan Williams2ba05622009-01-06 11:38:14 -0700956 list_del_rcu(&device->global_node);
Dan Williamsbec08512009-01-06 11:38:14 -0700957 dma_channel_rebalance();
Chris Leechc13c8262006-05-23 17:18:44 -0700958 mutex_unlock(&dma_list_mutex);
959
960 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williams6f49a572009-01-06 11:38:14 -0700961 WARN_ONCE(chan->client_count,
962 "%s called while %d clients hold a reference\n",
963 __func__, chan->client_count);
Dan Williams41d5e592009-01-06 11:38:21 -0700964 mutex_lock(&dma_list_mutex);
965 chan->dev->chan = NULL;
966 mutex_unlock(&dma_list_mutex);
967 device_unregister(&chan->dev->device);
Anatolij Gustschinadef4772010-01-26 10:26:06 +0100968 free_percpu(chan->local);
Chris Leechc13c8262006-05-23 17:18:44 -0700969 }
Chris Leechc13c8262006-05-23 17:18:44 -0700970}
David Brownell765e3d82007-03-16 13:38:05 -0800971EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700972
Dan Williams45c463a2013-10-18 19:35:24 +0200973struct dmaengine_unmap_pool {
974 struct kmem_cache *cache;
975 const char *name;
976 mempool_t *pool;
977 size_t size;
978};
979
980#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
981static struct dmaengine_unmap_pool unmap_pool[] = {
982 __UNMAP_POOL(2),
Dan Williams3cc377b2013-12-09 10:33:16 -0800983 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
Dan Williams45c463a2013-10-18 19:35:24 +0200984 __UNMAP_POOL(16),
985 __UNMAP_POOL(128),
986 __UNMAP_POOL(256),
987 #endif
988};
989
990static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
Dan Williams7405f742007-01-02 11:10:43 -0700991{
Dan Williams45c463a2013-10-18 19:35:24 +0200992 int order = get_count_order(nr);
Dan Williams7405f742007-01-02 11:10:43 -0700993
Dan Williams45c463a2013-10-18 19:35:24 +0200994 switch (order) {
995 case 0 ... 1:
996 return &unmap_pool[0];
997 case 2 ... 4:
998 return &unmap_pool[1];
999 case 5 ... 7:
1000 return &unmap_pool[2];
1001 case 8:
1002 return &unmap_pool[3];
1003 default:
1004 BUG();
1005 return NULL;
1006 }
1007}
Dan Williams00367312008-02-02 19:49:57 -07001008
Dan Williams45c463a2013-10-18 19:35:24 +02001009static void dmaengine_unmap(struct kref *kref)
1010{
1011 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1012 struct device *dev = unmap->dev;
1013 int cnt, i;
1014
1015 cnt = unmap->to_cnt;
1016 for (i = 0; i < cnt; i++)
1017 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1018 DMA_TO_DEVICE);
1019 cnt += unmap->from_cnt;
1020 for (; i < cnt; i++)
1021 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1022 DMA_FROM_DEVICE);
1023 cnt += unmap->bidi_cnt;
Dan Williams7476bd72013-10-18 19:35:29 +02001024 for (; i < cnt; i++) {
1025 if (unmap->addr[i] == 0)
1026 continue;
Dan Williams45c463a2013-10-18 19:35:24 +02001027 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1028 DMA_BIDIRECTIONAL);
Dan Williams7476bd72013-10-18 19:35:29 +02001029 }
Xuelin Shic1f43dd2014-05-21 14:02:37 -07001030 cnt = unmap->map_cnt;
Dan Williams45c463a2013-10-18 19:35:24 +02001031 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1032}
1033
1034void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1035{
1036 if (unmap)
1037 kref_put(&unmap->kref, dmaengine_unmap);
1038}
1039EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1040
1041static void dmaengine_destroy_unmap_pool(void)
1042{
1043 int i;
1044
1045 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1046 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1047
1048 if (p->pool)
1049 mempool_destroy(p->pool);
1050 p->pool = NULL;
1051 if (p->cache)
1052 kmem_cache_destroy(p->cache);
1053 p->cache = NULL;
1054 }
1055}
1056
1057static int __init dmaengine_init_unmap_pool(void)
1058{
1059 int i;
1060
1061 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1062 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1063 size_t size;
1064
1065 size = sizeof(struct dmaengine_unmap_data) +
1066 sizeof(dma_addr_t) * p->size;
1067
1068 p->cache = kmem_cache_create(p->name, size, 0,
1069 SLAB_HWCACHE_ALIGN, NULL);
1070 if (!p->cache)
1071 break;
1072 p->pool = mempool_create_slab_pool(1, p->cache);
1073 if (!p->pool)
1074 break;
Dan Williams00367312008-02-02 19:49:57 -07001075 }
Dan Williams7405f742007-01-02 11:10:43 -07001076
Dan Williams45c463a2013-10-18 19:35:24 +02001077 if (i == ARRAY_SIZE(unmap_pool))
1078 return 0;
Dan Williams7405f742007-01-02 11:10:43 -07001079
Dan Williams45c463a2013-10-18 19:35:24 +02001080 dmaengine_destroy_unmap_pool();
1081 return -ENOMEM;
Dan Williams7405f742007-01-02 11:10:43 -07001082}
Dan Williams7405f742007-01-02 11:10:43 -07001083
Dan Williams89716462013-10-18 19:35:25 +02001084struct dmaengine_unmap_data *
Dan Williams45c463a2013-10-18 19:35:24 +02001085dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
Dan Williams7405f742007-01-02 11:10:43 -07001086{
Dan Williams45c463a2013-10-18 19:35:24 +02001087 struct dmaengine_unmap_data *unmap;
Dan Williams7405f742007-01-02 11:10:43 -07001088
Dan Williams45c463a2013-10-18 19:35:24 +02001089 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1090 if (!unmap)
1091 return NULL;
Dan Williams00367312008-02-02 19:49:57 -07001092
Dan Williams45c463a2013-10-18 19:35:24 +02001093 memset(unmap, 0, sizeof(*unmap));
1094 kref_init(&unmap->kref);
1095 unmap->dev = dev;
Xuelin Shic1f43dd2014-05-21 14:02:37 -07001096 unmap->map_cnt = nr;
Dan Williams7405f742007-01-02 11:10:43 -07001097
Dan Williams45c463a2013-10-18 19:35:24 +02001098 return unmap;
Dan Williams7405f742007-01-02 11:10:43 -07001099}
Dan Williams89716462013-10-18 19:35:25 +02001100EXPORT_SYMBOL(dmaengine_get_unmap_data);
Dan Williams7405f742007-01-02 11:10:43 -07001101
Dan Williams7405f742007-01-02 11:10:43 -07001102void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1103 struct dma_chan *chan)
1104{
1105 tx->chan = chan;
Dan Williams5fc6d892010-10-07 16:44:50 -07001106 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
Dan Williams7405f742007-01-02 11:10:43 -07001107 spin_lock_init(&tx->lock);
Dan Williamscaa20d972010-05-17 16:24:16 -07001108 #endif
Dan Williams7405f742007-01-02 11:10:43 -07001109}
1110EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1111
Dan Williams07f22112009-01-05 17:14:31 -07001112/* dma_wait_for_async_tx - spin wait for a transaction to complete
1113 * @tx: in-flight transaction to wait on
Dan Williams07f22112009-01-05 17:14:31 -07001114 */
1115enum dma_status
1116dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1117{
Dan Williams95475e52009-07-14 12:19:02 -07001118 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
Dan Williams07f22112009-01-05 17:14:31 -07001119
1120 if (!tx)
Vinod Kouladfedd92013-10-16 13:29:02 +05301121 return DMA_COMPLETE;
Dan Williams07f22112009-01-05 17:14:31 -07001122
Dan Williams95475e52009-07-14 12:19:02 -07001123 while (tx->cookie == -EBUSY) {
1124 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1125 pr_err("%s timeout waiting for descriptor submission\n",
Joe Perches63433252012-07-18 09:51:28 -07001126 __func__);
Dan Williams95475e52009-07-14 12:19:02 -07001127 return DMA_ERROR;
1128 }
1129 cpu_relax();
1130 }
1131 return dma_sync_wait(tx->chan, tx->cookie);
Dan Williams07f22112009-01-05 17:14:31 -07001132}
1133EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1134
1135/* dma_run_dependencies - helper routine for dma drivers to process
1136 * (start) dependent operations on their target channel
1137 * @tx: transaction with dependencies
1138 */
1139void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1140{
Dan Williamscaa20d972010-05-17 16:24:16 -07001141 struct dma_async_tx_descriptor *dep = txd_next(tx);
Dan Williams07f22112009-01-05 17:14:31 -07001142 struct dma_async_tx_descriptor *dep_next;
1143 struct dma_chan *chan;
1144
1145 if (!dep)
1146 return;
1147
Yuri Tikhonovdd59b852009-01-12 15:17:20 -07001148 /* we'll submit tx->next now, so clear the link */
Dan Williamscaa20d972010-05-17 16:24:16 -07001149 txd_clear_next(tx);
Dan Williams07f22112009-01-05 17:14:31 -07001150 chan = dep->chan;
1151
1152 /* keep submitting up until a channel switch is detected
1153 * in that case we will be called again as a result of
1154 * processing the interrupt from async_tx_channel_switch
1155 */
1156 for (; dep; dep = dep_next) {
Dan Williamscaa20d972010-05-17 16:24:16 -07001157 txd_lock(dep);
1158 txd_clear_parent(dep);
1159 dep_next = txd_next(dep);
Dan Williams07f22112009-01-05 17:14:31 -07001160 if (dep_next && dep_next->chan == chan)
Dan Williamscaa20d972010-05-17 16:24:16 -07001161 txd_clear_next(dep); /* ->next will be submitted */
Dan Williams07f22112009-01-05 17:14:31 -07001162 else
1163 dep_next = NULL; /* submit current dep and terminate */
Dan Williamscaa20d972010-05-17 16:24:16 -07001164 txd_unlock(dep);
Dan Williams07f22112009-01-05 17:14:31 -07001165
1166 dep->tx_submit(dep);
1167 }
1168
1169 chan->device->device_issue_pending(chan);
1170}
1171EXPORT_SYMBOL_GPL(dma_run_dependencies);
1172
Chris Leechc13c8262006-05-23 17:18:44 -07001173static int __init dma_bus_init(void)
1174{
Dan Williams45c463a2013-10-18 19:35:24 +02001175 int err = dmaengine_init_unmap_pool();
1176
1177 if (err)
1178 return err;
Chris Leechc13c8262006-05-23 17:18:44 -07001179 return class_register(&dma_devclass);
1180}
Dan Williams652afc22009-01-06 11:38:22 -07001181arch_initcall(dma_bus_init);
Chris Leechc13c8262006-05-23 17:18:44 -07001182
Dan Williamsbec08512009-01-06 11:38:14 -07001183