blob: 404cc7b6e70530066b5a91970d791c8ae8f978c0 [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35 * Both of these are protected by a mutex, dma_list_mutex.
36 *
37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver.
39 *
40 * Each client has a channels list, it's only modified under the client->lock
41 * and in an RCU callback, so it's safe to read under rcu_read_lock().
42 *
43 * Each device has a kref, which is initialized to 1 when the device is
44 * registered. A kref_put is done for each class_device registered. When the
45 * class_device is released, the coresponding kref_put is done in the release
46 * method. Every time one of the device's channels is allocated to a client,
47 * a kref_get occurs. When the channel is freed, the coresponding kref_put
48 * happens. The device's release function does a completion, so
49 * unregister_device does a remove event, class_device_unregister, a kref_put
50 * for the first reference, then waits on the completion for all other
51 * references to finish.
52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54 * with a kref and a per_cpu local_t. A single reference is set when on an
55 * ADDED event, and removed with a REMOVE event. Net DMA client takes an
56 * extra reference per outstanding transaction. The relase function does a
57 * kref_put on the device. -ChrisL
58 */
59
60#include <linux/init.h>
61#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070062#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070063#include <linux/device.h>
64#include <linux/dmaengine.h>
65#include <linux/hardirq.h>
66#include <linux/spinlock.h>
67#include <linux/percpu.h>
68#include <linux/rcupdate.h>
69#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070070#include <linux/jiffies.h>
Chris Leechc13c8262006-05-23 17:18:44 -070071
72static DEFINE_MUTEX(dma_list_mutex);
73static LIST_HEAD(dma_device_list);
74static LIST_HEAD(dma_client_list);
75
76/* --- sysfs implementation --- */
77
78static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
79{
80 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
81 unsigned long count = 0;
82 int i;
83
Andrew Morton17f3ae02006-05-25 13:26:53 -070084 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -070085 count += per_cpu_ptr(chan->local, i)->memcpy_count;
86
87 return sprintf(buf, "%lu\n", count);
88}
89
90static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
91{
92 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
93 unsigned long count = 0;
94 int i;
95
Andrew Morton17f3ae02006-05-25 13:26:53 -070096 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -070097 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
98
99 return sprintf(buf, "%lu\n", count);
100}
101
102static ssize_t show_in_use(struct class_device *cd, char *buf)
103{
104 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
105
106 return sprintf(buf, "%d\n", (chan->client ? 1 : 0));
107}
108
109static struct class_device_attribute dma_class_attrs[] = {
110 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
111 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
112 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
113 __ATTR_NULL
114};
115
116static void dma_async_device_cleanup(struct kref *kref);
117
118static void dma_class_dev_release(struct class_device *cd)
119{
120 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
121 kref_put(&chan->device->refcount, dma_async_device_cleanup);
122}
123
124static struct class dma_devclass = {
125 .name = "dma",
126 .class_dev_attrs = dma_class_attrs,
127 .release = dma_class_dev_release,
128};
129
130/* --- client and device registration --- */
131
132/**
133 * dma_client_chan_alloc - try to allocate a channel to a client
134 * @client: &dma_client
135 *
136 * Called with dma_list_mutex held.
137 */
138static struct dma_chan *dma_client_chan_alloc(struct dma_client *client)
139{
140 struct dma_device *device;
141 struct dma_chan *chan;
142 unsigned long flags;
143 int desc; /* allocated descriptor count */
144
145 /* Find a channel, any DMA engine will do */
146 list_for_each_entry(device, &dma_device_list, global_node) {
147 list_for_each_entry(chan, &device->channels, device_node) {
148 if (chan->client)
149 continue;
150
151 desc = chan->device->device_alloc_chan_resources(chan);
152 if (desc >= 0) {
153 kref_get(&device->refcount);
154 kref_init(&chan->refcount);
155 chan->slow_ref = 0;
156 INIT_RCU_HEAD(&chan->rcu);
157 chan->client = client;
158 spin_lock_irqsave(&client->lock, flags);
159 list_add_tail_rcu(&chan->client_node,
160 &client->channels);
161 spin_unlock_irqrestore(&client->lock, flags);
162 return chan;
163 }
164 }
165 }
166
167 return NULL;
168}
169
Dan Williams7405f742007-01-02 11:10:43 -0700170enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
171{
172 enum dma_status status;
173 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
174
175 dma_async_issue_pending(chan);
176 do {
177 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
178 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
179 printk(KERN_ERR "dma_sync_wait_timeout!\n");
180 return DMA_ERROR;
181 }
182 } while (status == DMA_IN_PROGRESS);
183
184 return status;
185}
186EXPORT_SYMBOL(dma_sync_wait);
187
Chris Leechc13c8262006-05-23 17:18:44 -0700188/**
Randy Dunlap65088712006-07-03 19:45:31 -0700189 * dma_chan_cleanup - release a DMA channel's resources
190 * @kref: kernel reference structure that contains the DMA channel device
Chris Leechc13c8262006-05-23 17:18:44 -0700191 */
192void dma_chan_cleanup(struct kref *kref)
193{
194 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
195 chan->device->device_free_chan_resources(chan);
196 chan->client = NULL;
197 kref_put(&chan->device->refcount, dma_async_device_cleanup);
198}
David Brownell765e3d82007-03-16 13:38:05 -0800199EXPORT_SYMBOL(dma_chan_cleanup);
Chris Leechc13c8262006-05-23 17:18:44 -0700200
201static void dma_chan_free_rcu(struct rcu_head *rcu)
202{
203 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
204 int bias = 0x7FFFFFFF;
205 int i;
Andrew Morton17f3ae02006-05-25 13:26:53 -0700206 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -0700207 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
208 atomic_sub(bias, &chan->refcount.refcount);
209 kref_put(&chan->refcount, dma_chan_cleanup);
210}
211
212static void dma_client_chan_free(struct dma_chan *chan)
213{
214 atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
215 chan->slow_ref = 1;
216 call_rcu(&chan->rcu, dma_chan_free_rcu);
217}
218
219/**
220 * dma_chans_rebalance - reallocate channels to clients
221 *
222 * When the number of DMA channel in the system changes,
Randy Dunlap65088712006-07-03 19:45:31 -0700223 * channels need to be rebalanced among clients.
Chris Leechc13c8262006-05-23 17:18:44 -0700224 */
225static void dma_chans_rebalance(void)
226{
227 struct dma_client *client;
228 struct dma_chan *chan;
229 unsigned long flags;
230
231 mutex_lock(&dma_list_mutex);
232
233 list_for_each_entry(client, &dma_client_list, global_node) {
234 while (client->chans_desired > client->chan_count) {
235 chan = dma_client_chan_alloc(client);
236 if (!chan)
237 break;
238 client->chan_count++;
239 client->event_callback(client,
240 chan,
241 DMA_RESOURCE_ADDED);
242 }
243 while (client->chans_desired < client->chan_count) {
244 spin_lock_irqsave(&client->lock, flags);
245 chan = list_entry(client->channels.next,
246 struct dma_chan,
247 client_node);
248 list_del_rcu(&chan->client_node);
249 spin_unlock_irqrestore(&client->lock, flags);
250 client->chan_count--;
251 client->event_callback(client,
252 chan,
253 DMA_RESOURCE_REMOVED);
254 dma_client_chan_free(chan);
255 }
256 }
257
258 mutex_unlock(&dma_list_mutex);
259}
260
261/**
262 * dma_async_client_register - allocate and register a &dma_client
263 * @event_callback: callback for notification of channel addition/removal
264 */
265struct dma_client *dma_async_client_register(dma_event_callback event_callback)
266{
267 struct dma_client *client;
268
269 client = kzalloc(sizeof(*client), GFP_KERNEL);
270 if (!client)
271 return NULL;
272
273 INIT_LIST_HEAD(&client->channels);
274 spin_lock_init(&client->lock);
275 client->chans_desired = 0;
276 client->chan_count = 0;
277 client->event_callback = event_callback;
278
279 mutex_lock(&dma_list_mutex);
280 list_add_tail(&client->global_node, &dma_client_list);
281 mutex_unlock(&dma_list_mutex);
282
283 return client;
284}
David Brownell765e3d82007-03-16 13:38:05 -0800285EXPORT_SYMBOL(dma_async_client_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700286
287/**
288 * dma_async_client_unregister - unregister a client and free the &dma_client
Randy Dunlap65088712006-07-03 19:45:31 -0700289 * @client: &dma_client to free
Chris Leechc13c8262006-05-23 17:18:44 -0700290 *
291 * Force frees any allocated DMA channels, frees the &dma_client memory
292 */
293void dma_async_client_unregister(struct dma_client *client)
294{
295 struct dma_chan *chan;
296
297 if (!client)
298 return;
299
300 rcu_read_lock();
301 list_for_each_entry_rcu(chan, &client->channels, client_node)
302 dma_client_chan_free(chan);
303 rcu_read_unlock();
304
305 mutex_lock(&dma_list_mutex);
306 list_del(&client->global_node);
307 mutex_unlock(&dma_list_mutex);
308
309 kfree(client);
310 dma_chans_rebalance();
311}
David Brownell765e3d82007-03-16 13:38:05 -0800312EXPORT_SYMBOL(dma_async_client_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700313
314/**
315 * dma_async_client_chan_request - request DMA channels
316 * @client: &dma_client
317 * @number: count of DMA channels requested
318 *
319 * Clients call dma_async_client_chan_request() to specify how many
320 * DMA channels they need, 0 to free all currently allocated.
321 * The resulting allocations/frees are indicated to the client via the
322 * event callback.
323 */
324void dma_async_client_chan_request(struct dma_client *client,
325 unsigned int number)
326{
327 client->chans_desired = number;
328 dma_chans_rebalance();
329}
David Brownell765e3d82007-03-16 13:38:05 -0800330EXPORT_SYMBOL(dma_async_client_chan_request);
Chris Leechc13c8262006-05-23 17:18:44 -0700331
332/**
Randy Dunlap65088712006-07-03 19:45:31 -0700333 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700334 * @device: &dma_device
335 */
336int dma_async_device_register(struct dma_device *device)
337{
338 static int id;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800339 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700340 struct dma_chan* chan;
341
342 if (!device)
343 return -ENODEV;
344
Dan Williams7405f742007-01-02 11:10:43 -0700345 /* validate device routines */
346 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
347 !device->device_prep_dma_memcpy);
348 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
349 !device->device_prep_dma_xor);
350 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
351 !device->device_prep_dma_zero_sum);
352 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
353 !device->device_prep_dma_memset);
354 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
355 !device->device_prep_dma_interrupt);
356
357 BUG_ON(!device->device_alloc_chan_resources);
358 BUG_ON(!device->device_free_chan_resources);
359 BUG_ON(!device->device_dependency_added);
360 BUG_ON(!device->device_is_tx_complete);
361 BUG_ON(!device->device_issue_pending);
362 BUG_ON(!device->dev);
363
Chris Leechc13c8262006-05-23 17:18:44 -0700364 init_completion(&device->done);
365 kref_init(&device->refcount);
366 device->dev_id = id++;
367
368 /* represent channels in sysfs. Probably want devs too */
369 list_for_each_entry(chan, &device->channels, device_node) {
370 chan->local = alloc_percpu(typeof(*chan->local));
371 if (chan->local == NULL)
372 continue;
373
374 chan->chan_id = chancnt++;
375 chan->class_dev.class = &dma_devclass;
376 chan->class_dev.dev = NULL;
377 snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
378 device->dev_id, chan->chan_id);
379
Jeff Garzikff487fb2007-03-08 09:57:34 -0800380 rc = class_device_register(&chan->class_dev);
381 if (rc) {
382 chancnt--;
383 free_percpu(chan->local);
384 chan->local = NULL;
385 goto err_out;
386 }
387
Chris Leechc13c8262006-05-23 17:18:44 -0700388 kref_get(&device->refcount);
Chris Leechc13c8262006-05-23 17:18:44 -0700389 }
390
391 mutex_lock(&dma_list_mutex);
392 list_add_tail(&device->global_node, &dma_device_list);
393 mutex_unlock(&dma_list_mutex);
394
395 dma_chans_rebalance();
396
397 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800398
399err_out:
400 list_for_each_entry(chan, &device->channels, device_node) {
401 if (chan->local == NULL)
402 continue;
403 kref_put(&device->refcount, dma_async_device_cleanup);
404 class_device_unregister(&chan->class_dev);
405 chancnt--;
406 free_percpu(chan->local);
407 }
408 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700409}
David Brownell765e3d82007-03-16 13:38:05 -0800410EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700411
412/**
Randy Dunlap65088712006-07-03 19:45:31 -0700413 * dma_async_device_cleanup - function called when all references are released
414 * @kref: kernel reference object
Chris Leechc13c8262006-05-23 17:18:44 -0700415 */
416static void dma_async_device_cleanup(struct kref *kref)
417{
418 struct dma_device *device;
419
420 device = container_of(kref, struct dma_device, refcount);
421 complete(&device->done);
422}
423
Randy Dunlap65088712006-07-03 19:45:31 -0700424/**
425 * dma_async_device_unregister - unregisters DMA devices
426 * @device: &dma_device
427 */
428void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700429{
430 struct dma_chan *chan;
431 unsigned long flags;
432
433 mutex_lock(&dma_list_mutex);
434 list_del(&device->global_node);
435 mutex_unlock(&dma_list_mutex);
436
437 list_for_each_entry(chan, &device->channels, device_node) {
438 if (chan->client) {
439 spin_lock_irqsave(&chan->client->lock, flags);
440 list_del(&chan->client_node);
441 chan->client->chan_count--;
442 spin_unlock_irqrestore(&chan->client->lock, flags);
443 chan->client->event_callback(chan->client,
444 chan,
445 DMA_RESOURCE_REMOVED);
446 dma_client_chan_free(chan);
447 }
448 class_device_unregister(&chan->class_dev);
449 }
450 dma_chans_rebalance();
451
452 kref_put(&device->refcount, dma_async_device_cleanup);
453 wait_for_completion(&device->done);
454}
David Brownell765e3d82007-03-16 13:38:05 -0800455EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700456
Dan Williams7405f742007-01-02 11:10:43 -0700457/**
458 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
459 * @chan: DMA channel to offload copy to
460 * @dest: destination address (virtual)
461 * @src: source address (virtual)
462 * @len: length
463 *
464 * Both @dest and @src must be mappable to a bus address according to the
465 * DMA mapping API rules for streaming mappings.
466 * Both @dest and @src must stay memory resident (kernel memory or locked
467 * user space pages).
468 */
469dma_cookie_t
470dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
471 void *src, size_t len)
472{
473 struct dma_device *dev = chan->device;
474 struct dma_async_tx_descriptor *tx;
475 dma_addr_t addr;
476 dma_cookie_t cookie;
477 int cpu;
478
479 tx = dev->device_prep_dma_memcpy(chan, len, 0);
480 if (!tx)
481 return -ENOMEM;
482
483 tx->ack = 1;
484 tx->callback = NULL;
485 addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
486 tx->tx_set_src(addr, tx, 0);
487 addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
488 tx->tx_set_dest(addr, tx, 0);
489 cookie = tx->tx_submit(tx);
490
491 cpu = get_cpu();
492 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
493 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
494 put_cpu();
495
496 return cookie;
497}
498EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
499
500/**
501 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
502 * @chan: DMA channel to offload copy to
503 * @page: destination page
504 * @offset: offset in page to copy to
505 * @kdata: source address (virtual)
506 * @len: length
507 *
508 * Both @page/@offset and @kdata must be mappable to a bus address according
509 * to the DMA mapping API rules for streaming mappings.
510 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
511 * locked user space pages)
512 */
513dma_cookie_t
514dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
515 unsigned int offset, void *kdata, size_t len)
516{
517 struct dma_device *dev = chan->device;
518 struct dma_async_tx_descriptor *tx;
519 dma_addr_t addr;
520 dma_cookie_t cookie;
521 int cpu;
522
523 tx = dev->device_prep_dma_memcpy(chan, len, 0);
524 if (!tx)
525 return -ENOMEM;
526
527 tx->ack = 1;
528 tx->callback = NULL;
529 addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
530 tx->tx_set_src(addr, tx, 0);
531 addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
532 tx->tx_set_dest(addr, tx, 0);
533 cookie = tx->tx_submit(tx);
534
535 cpu = get_cpu();
536 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
537 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
538 put_cpu();
539
540 return cookie;
541}
542EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
543
544/**
545 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
546 * @chan: DMA channel to offload copy to
547 * @dest_pg: destination page
548 * @dest_off: offset in page to copy to
549 * @src_pg: source page
550 * @src_off: offset in page to copy from
551 * @len: length
552 *
553 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
554 * address according to the DMA mapping API rules for streaming mappings.
555 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
556 * (kernel memory or locked user space pages).
557 */
558dma_cookie_t
559dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
560 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
561 size_t len)
562{
563 struct dma_device *dev = chan->device;
564 struct dma_async_tx_descriptor *tx;
565 dma_addr_t addr;
566 dma_cookie_t cookie;
567 int cpu;
568
569 tx = dev->device_prep_dma_memcpy(chan, len, 0);
570 if (!tx)
571 return -ENOMEM;
572
573 tx->ack = 1;
574 tx->callback = NULL;
575 addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
576 tx->tx_set_src(addr, tx, 0);
577 addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
578 tx->tx_set_dest(addr, tx, 0);
579 cookie = tx->tx_submit(tx);
580
581 cpu = get_cpu();
582 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
583 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
584 put_cpu();
585
586 return cookie;
587}
588EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
589
590void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
591 struct dma_chan *chan)
592{
593 tx->chan = chan;
594 spin_lock_init(&tx->lock);
595 INIT_LIST_HEAD(&tx->depend_node);
596 INIT_LIST_HEAD(&tx->depend_list);
597}
598EXPORT_SYMBOL(dma_async_tx_descriptor_init);
599
Chris Leechc13c8262006-05-23 17:18:44 -0700600static int __init dma_bus_init(void)
601{
602 mutex_init(&dma_list_mutex);
603 return class_register(&dma_devclass);
604}
Chris Leechc13c8262006-05-23 17:18:44 -0700605subsys_initcall(dma_bus_init);
606